repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dimkal/mne-python | mne/simulation/tests/test_evoked.py | 3 | 3170 | # Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import (read_label, read_forward_solution, pick_types_forward,
read_evokeds, read_cov)
from mne.time_frequency import morlet
from mne.simulation import generate_sparse_stc, generate_evoked
from mne.io import Raw
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
'%s.label' % label)) for label in label_names]
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series from 2 Morlet wavelets
stc_data = np.zeros((len(labels), len(times)))
Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
stc_data[0][:len(Ws[0])] = np.real(Ws[0])
stc_data[1][:len(Ws[1])] = np.real(Ws[1])
stc_data *= 100 * 1e-9 # use nAm as unit
# time translation
stc_data[1] = np.roll(stc_data[1], 80)
stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
random_state=0)
# Generate noisy evoked data
iir_filter = [1, -0.9]
with warnings.catch_warnings(record=True):
warnings.simplefilter('always') # positive semidefinite warning
evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, generate_evoked, fwd, stc_bad,
evoked_template, cov, snr, tmin=0.0, tmax=0.2)
run_tests_if_main()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/cluster/__init__.py | 359 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 296 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/examples/learn/iris_custom_decay_dnn.py | 29 | 2039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
andrewcmyers/tensorflow | tensorflow/contrib/imperative/examples/mnist.py | 68 | 4576 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST training in imperative mode TensorFlow."""
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.contrib.imperative as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
NUM_CLASSES = 10
BATCH_SIZE = 100
NUM_EPOCHS = 2
LEARNING_RATE = 0.1
class Model(object):
"""Fully connected model for MNIST."""
def __init__(self, hidden1_units, hidden2_units):
"""Create the model parameters."""
self.params = []
# Hidden 1
with tf.name_scope('hidden1'):
self.weights1 = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(IMAGE_PIXELS)),
size=[IMAGE_PIXELS, hidden1_units]),
dtype=tf.float32,
name='weights')
self.biases1 = tf.Variable(
np.zeros([hidden1_units]),
dtype=tf.float32,
name='biases')
# Hidden 2
with tf.name_scope('hidden2'):
self.weights2 = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(hidden1_units)),
size=[hidden1_units, hidden2_units]),
dtype=tf.float32,
name='weights')
self.biases2 = tf.Variable(
np.zeros([hidden2_units]),
dtype=tf.float32,
name='biases')
# Linear
with tf.name_scope('softmax_linear'):
self.sm_w = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(hidden2_units)),
size=[hidden2_units, NUM_CLASSES]),
dtype=tf.float32,
name='weights')
self.sm_b = tf.Variable(
np.zeros([NUM_CLASSES]),
dtype=tf.float32,
name='biases')
self.params = [self.weights1, self.biases1,
self.weights2, self.biases2,
self.sm_w, self.sm_b]
def __call__(self, images):
"""Run the model's forward prop on `images`."""
hidden1 = tf.nn.relu(tf.matmul(images, self.weights1) + self.biases1)
hidden2 = tf.nn.relu(tf.matmul(hidden1, self.weights2) + self.biases2)
logits = tf.matmul(hidden2, self.sm_w) + self.sm_b
return logits
model = Model(128, 32)
data = read_data_sets('/tmp/mnist_train')
def get_test_accuracy():
"""Gets the model's classification accuracy on test data."""
num_examples = data.test.num_examples
test_images = np.split(data.test.images, num_examples/BATCH_SIZE)
test_labels = np.split(data.test.labels.astype(np.int32),
num_examples/BATCH_SIZE)
num_correct = 0
for _, (images, labels) in enumerate(zip(test_images, test_labels)):
with tf.new_step():
logits = model(images)
predictions = tf.argmax(tf.nn.softmax(logits), axis=1)
num_correct += np.sum(predictions.value == labels)
return float(num_correct) / float(num_examples)
num_examples = data.train.num_examples
train_images = np.split(data.train.images, num_examples/BATCH_SIZE)
train_labels = np.split(data.train.labels.astype(np.int32),
num_examples/BATCH_SIZE)
for epoch in range(NUM_EPOCHS):
for i, (images, labels) in enumerate(zip(train_images, train_labels)):
with tf.new_step() as step:
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
gradients = tf.gradients(loss, model.params)
step.run([v.assign_sub(LEARNING_RATE * g)
for g, v in zip(gradients, model.params)])
if i % 10 == 0:
print('Loss after {} steps = {}'.format(i, loss))
if i % 100 == 0:
print('Test accuracy after {} steps = {}'
.format(i, get_test_accuracy()))
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/spatial/_procrustes.py | 41 | 4466 | """
This module provides functions to perform full Procrustes analysis.
This code was originally written by Justin Kucynski and ported over from
scikit-bio by Yoshiki Vazquez-Baeza.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.linalg import orthogonal_procrustes
__all__ = ['procrustes']
def procrustes(data1, data2):
r"""Procrustes analysis, a similarity test for two data sets.
Each input matrix is a set of points or vectors (the rows of the matrix).
The dimension of the space is the number of columns of each matrix. Given
two identically sized matrices, procrustes standardizes both such that:
- :math:`tr(AA^{T}) = 1`.
- Both sets of points are centered around the origin.
Procrustes ([1]_, [2]_) then applies the optimal transform to the second
matrix (including scaling/dilation, rotations, and reflections) to minimize
:math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
pointwise differences between the two input datasets.
This function was not designed to handle datasets with different numbers of
datapoints (rows). If two data sets have different dimensionality
(different number of columns), simply add columns of zeros to the smaller
of the two.
Parameters
----------
data1 : array_like
Matrix, n rows represent points in k (columns) space `data1` is the
reference data, after it is standardised, the data from `data2` will be
transformed to fit the pattern in `data1` (must have >1 unique points).
data2 : array_like
n rows of data in k space to be fit to `data1`. Must be the same
shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
Returns
-------
mtx1 : array_like
A standardized version of `data1`.
mtx2 : array_like
The orientation of `data2` that best fits `data1`. Centered, but not
necessarily :math:`tr(AA^{T}) = 1`.
disparity : float
:math:`M^{2}` as defined above.
Raises
------
ValueError
If the input arrays are not two-dimensional.
If the shape of the input arrays is different.
If the input arrays have zero columns or zero rows.
See Also
--------
scipy.linalg.orthogonal_procrustes
scipy.spatial.distance.directed_hausdorff : Another similarity test
for two data sets
Notes
-----
- The disparity should not depend on the order of the input matrices, but
the output matrices will, as only the first output matrix is guaranteed
to be scaled such that :math:`tr(AA^{T}) = 1`.
- Duplicate data points are generally ok, duplicating a data point will
increase its effect on the procrustes fit.
- The disparity scales as the number of points per input matrix.
References
----------
.. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
.. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
Examples
--------
>>> from scipy.spatial import procrustes
The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
``a`` here:
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
"""
mtx1 = np.array(data1, dtype=np.double, copy=True)
mtx2 = np.array(data2, dtype=np.double, copy=True)
if mtx1.ndim != 2 or mtx2.ndim != 2:
raise ValueError("Input matrices must be two-dimensional")
if mtx1.shape != mtx2.shape:
raise ValueError("Input matrices must be of same shape")
if mtx1.size == 0:
raise ValueError("Input matrices must be >0 rows and >0 cols")
# translate all the data to the origin
mtx1 -= np.mean(mtx1, 0)
mtx2 -= np.mean(mtx2, 0)
norm1 = np.linalg.norm(mtx1)
norm2 = np.linalg.norm(mtx2)
if norm1 == 0 or norm2 == 0:
raise ValueError("Input matrices must contain >1 unique points")
# change scaling of data (in rows) such that trace(mtx*mtx') = 1
mtx1 /= norm1
mtx2 /= norm2
# transform mtx2 to minimize disparity
R, s = orthogonal_procrustes(mtx1, mtx2)
mtx2 = np.dot(mtx2, R.T) * s
# measure the dissimilarity between the two datasets
disparity = np.sum(np.square(mtx1 - mtx2))
return mtx1, mtx2, disparity
| mit |
yonglehou/scikit-learn | benchmarks/bench_plot_svd.py | 322 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/Online_PassiveAggressive_ReservoirHashKernels_notanh.py | 1 | 10477 | # -*- coding: utf-8 -*-
"""
python -m scripts/Online_PassiveAggressive_countmeansketch LMdata 3 1 a ODDST 0.01
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from copy import copy
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier as PAC
from skgraph.datasets import load_graph_datasets
import numpy as np
from scipy.sparse import csc_matrix
from sklearn.utils import compute_class_weight
from scipy.sparse import csr_matrix
from skgraph.utils.countminsketch_randomprojection_notanh import CountMinSketch
from itertools import izip
import time
if __name__=='__main__':
start_time = time.time()
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l filename kernel C m seed")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
C=float(sys.argv[6])
m=int(sys.argv[7])
rs=int(sys.argv[8])
#lr=float(sys.argv[7])
#FIXED PARAMETERS
normalization=False
#working with Chemical
g_it=load_graph_datasets.dispatch(dataset)
f=open(name,'w')
#At this point, one_hot_encoding contains the encoding for each symbol in the alphabet
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
#TODO the C parameter should probably be optimized
#print zip(_letters, _one_hot)
#exit()
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
print "examples, features", features.shape
features_time=time.time()
print("Computed features in %s seconds ---" % (features_time - start_time))
errors=0
tp=0
fp=0
tn=0
fn=0
predictions=[0]*50
correct=[0]*50
#print ESN
#netDataSet=[]
#netTargetSet=[]
#netKeyList=[]
BERtotal=[]
bintargets=[1,-1]
#print features
#print list_for_deep.keys()
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
sizes=[5000]*50
transformer=CountMinSketch(m,features.shape[1],rs)
WCMS=np.zeros(shape=(m,1))
cms_creation=0.0
for i in xrange(features.shape[0]):
time1=time.time()
ex=features[i][0].T
exCMS=transformer.transform(ex)
#print "exCMS", type(exCMS), exCMS.shape
target=g_it.target[i]
#W=csr_matrix(ex)
#dot=0.0
module=np.dot(exCMS.T,exCMS)[0,0]
#print "module", module
time2=time.time()
cms_creation+=time2 - time1
dot=np.dot(WCMS.T,exCMS)
#print "dot", dot
#print "dot:", dot, "dotCMS:",dot1
if (np.sign(dot) != target ):
#print "error on example",i, "predicted:", dot, "correct:", target
errors+=1
if target==1:
fn+=1
else:
fp+=1
else:
#print "correct classification", target
if target==1:
tp+=1
else:
tn+=1
if(target==1):
coef=(part_minus+1.0)/(part_plus+part_minus+1.0)
part_plus+=1
else:
coef=(part_plus+1.0)/(part_plus+part_minus+1.0)
part_minus+=1
tao = min (C, max (0.0,( (1.0 - target*dot )*coef) / module ) );
if (tao > 0.0):
WCMS+=(exCMS*(tao*target))
# for row,col in zip(rows,cols):
# ((row,col), ex[row,col])
# #print col, ex[row,col]
# WCMS.add(col,target*tao*ex[row,col])
#print "Correct prediction example",i, "pred", score, "target",target
if i%50==0 and i!=0:
#output performance statistics every 50 examples
if (tn+fp) > 0:
pos_part= float(fp) / (tn+fp)
else:
pos_part=0
if (tp+fn) > 0:
neg_part=float(fn) / (tp+fn)
else:
neg_part=0
BER = 0.5 * ( pos_part + neg_part)
print "1-BER Window esempio ",i, (1.0 - BER)
f.write("1-BER Window esempio "+str(i)+" "+str(1.0 - BER)+"\n")
#print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
BERtotal.append(1.0 - BER)
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
end_time=time.time()
print("Learning phase time %s seconds ---" % (end_time - features_time )) #- cms_creation
print("Total time %s seconds ---" % (end_time - start_time))
print "BER AVG", str(np.average(BERtotal)),"std", np.std(BERtotal)
f.write("BER AVG "+ str(np.average(BERtotal))+" std "+str(np.std(BERtotal))+"\n")
f.close()
#print "N_features", ex.shape
#generate explicit W from CountMeanSketch
#print W
#raw_input("W (output)")
#==============================================================================
#
# tao = /*(double)labels->get_label(idx_a) **/ min (C, max (0.0,(1.0 - (((double)labels->get_label(idx_a))*(classe_mod) )) * c_plus ) / modulo_test);
#
# #W=W_old #dump line
#
#
# #set the weights of PA to the predicted values
# PassiveAggressive.coef_=W
# pred=PassiveAggressive.predict(ex)
#
# score=PassiveAggressive.decision_function(ex)
#
# bintargets.append(target)
# if pred!=target:
# errors+=1
# print "Error",errors," on example",i, "pred", score, "target",target
# if target==1:
# fn+=1
# else:
# fp+=1
#
# else:
# if target==1:
# tp+=1
# else:
# tn+=1
# #print "Correct prediction example",i, "pred", score, "target",target
#
# else:
# #first example is always an error!
# pred=0
# score=0
# errors+=1
# print "Error",errors," on example",i
# if g_it.target[i]==1:
# fn+=1
# else:
# fp+=1
# #print i
# if i%50==0 and i!=0:
# #output performance statistics every 50 examples
# if (tn+fp) > 0:
# pos_part= float(fp) / (tn+fp)
# else:
# pos_part=0
# if (tp+fn) > 0:
# neg_part=float(fn) / (tp+fn)
# else:
# neg_part=0
# BER = 0.5 * ( pos_part + neg_part)
# print "1-BER Window esempio ",i, (1.0 - BER)
# print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
# BERtotal.append(1.0 - BER)
# tp = 0
# fp = 0
# fn = 0
# tn = 0
# bintargets=[1,-1]
# #print features[0][i]
# #print features[0][i].shape
# #f=features[0][i,:]
# #print f.shape
# #print f.shape
# #print g_it.target[i]
# #third parameter is compulsory just for the first call
# print "prediction", pred, score
# #print "intecept",PassiveAggressive.intercept_
# #raw_input()
# if abs(score)<1.0 or pred!=g_it.target[i]:
#
# ClassWeight=compute_class_weight('auto',np.asarray([1,-1]),bintargets)
# #print "class weights", {1:ClassWeight[0],-1:ClassWeight[1]}
# PassiveAggressive.class_weight={1:ClassWeight[0],-1:ClassWeight[1]}
#
# PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# #PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# W_old=PassiveAggressive.coef_
#
#
# #ESN target---#
# netTargetSet=[]
# for key,rowDict in list_for_deep[i].iteritems():
#
#
# target=np.asarray( [np.asarray([W_old[0,key]])]*len(rowDict))
#
#
# netTargetSet.append(target)
#
#
#
#
# #------------ESN TargetSetset--------------------#
# # ESN Training
#
# #for ftDataset,ftTargetSet in zip(netDataSet,netTargetSet):
# #print "Input"
# #print netDataSet
# #raw_input("Output")
# #print netTargetSet
# #raw_input("Target")
# model.OnlineTrain(netDataSet,netTargetSet,lr)
# #raw_input("TR")
# #calcolo statistiche
#
# print "BER AVG", sum(BERtotal) / float(len(BERtotal))
# print>>f,"BER AVG "+str(sum(BERtotal) / float(len(BERtotal)))
# f.close()
#==============================================================================
| gpl-3.0 |
pkruskal/scikit-learn | sklearn/linear_model/bayes.py | 219 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
Leminen/project_template_deeplearning | src/data/datasets/psd.py | 1 | 9623 | """
Methods for downloading and converting the MNIST dataset to TF-records
implementation is heavily inspired by the slim.datasets implementation (https://github.com/tensorflow/models/tree/master/research/slim/datasets)
"""
import os
import sys
import numpy as np
from six.moves import urllib
import gzip
import zipfile
import tensorflow as tf
import src.utils as utils
import src.data.util_data as util_data
# The URLs where the PSD data can be downloaded.
_DATA_URL = 'https://vision.eng.au.dk/?download=/data/WeedData/'
_NONSEGMENTED = 'NonsegmentedV2.zip'
_SEGMENTED = 'Segmented.zip'
_DATA_URL_NONSEGMENTED = 'https://vision.eng.au.dk/?download=/data/WeedData/NonsegmentedV2.zip'
_DATA_URL_SEGMENTED = 'https://vision.eng.au.dk/?download=/data/WeedData/Segmented.zip'
# Local directories to store the dataset
_DIR_RAW = 'data/raw/PSD'
_DIR_PROCESSED = 'data/processed/PSD'
_DIR_RAW_NONSEGMENTED = 'data/raw/PSD_Nonsegmented/NonsegmentedV2.zip'
_DIR_PROCESSED_NONSEGMENTED = 'data/processed/PSD_Nonsegmented/'
_DIR_RAW_SEGMENTED = 'data/raw/PSD_Segmented/Segmented.zip'
_DIR_PROCESSED_SEGMENTED = 'data/processed/PSD_Segmented/'
_EXCLUDED_GRASSES = True
_EXCLUDE_LARGE_IMAGES = True
_LARGE_IMAGE_DIM = 400
_NUM_SHARDS = 10
def chunkify(lst,n):
return [lst[i::n] for i in iter(range(n))]
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB PNG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
self._encode_png = tf.image.encode_png(self._decode_png)
def truncate_image(self, sess, image_data):
image, reencoded_image = sess.run(
[self._decode_png, self._encode_png],
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return reencoded_image, image.shape[0], image.shape[1], image.shape[2]
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1], image.shape[2]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def encode_png(self, sess, image_data):
image_data = sess.run(self._encode_png,
feed_dict={self._decode_png_data: image_data})
def _get_filenames_and_classes(dataset_dir, setname, exclude_list):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
data_root = os.path.join(dataset_dir, *setname)
directories = []
class_names = []
for filename in os.listdir(data_root):
path = os.path.join(data_root, filename)
if os.path.isdir(path):
if not any(x in filename for x in exclude_list):
directories.append(path)
class_names.append(filename)
photo_filenames = []
photo_filenames2 = []
for _ in range(_NUM_SHARDS):
photo_filenames2.append([])
for directory in directories:
if not any(x in directory for x in exclude_list):
filenames = os.listdir(directory)
paths = [os.path.join(directory, filename) for filename in filenames]
paths_split = chunkify(paths,_NUM_SHARDS)
for shard_n in range(_NUM_SHARDS):
photo_filenames2[shard_n].extend(paths_split[shard_n])
for filename in filenames:
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames2, sorted(class_names)
def _convert_to_tfrecord(filenames, class_dict, tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
num_images = len(filenames)
image_reader = ImageReader()
with tf.Session('') as sess:
for i in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (i + 1, num_images))
sys.stdout.flush()
# Read the filename:
encoded_img = tf.gfile.FastGFile(filenames[i], 'rb').read()
encoded_img, height, width, channels = image_reader.truncate_image(sess, encoded_img)
if _EXCLUDE_LARGE_IMAGES and (height > _LARGE_IMAGE_DIM or width > _LARGE_IMAGE_DIM):
pass
else:
class_name = os.path.basename(os.path.dirname(filenames[i]))
label = class_dict[class_name]
example = util_data.encode_image(
image_data = encoded_img,
image_format = 'png'.encode(),
class_lbl = label,
class_text = class_name.encode(),
height = height,
width = width,
channels = channels,
origin = filenames[i].encode()
)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, shard_id):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/PSD-data_%03d-of-%03d.tfrecord' % (dataset_dir, shard_id+1, _NUM_SHARDS)
def download(dataset_part):
"""Downloads PSD locally
"""
if dataset_part == 'Nonsegmented':
_data_url = _DATA_URL_NONSEGMENTED
filepath = os.path.join(_DIR_RAW_NONSEGMENTED)
else:
_data_url = _DATA_URL_SEGMENTED
filepath = os.path.join(_DIR_RAW_SEGMENTED)
if not os.path.exists(filepath):
print('Downloading dataset...')
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_data_url, filepath, _progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', size, 'bytes.')
def process(dataset_part):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if dataset_part == 'Nonsegmented':
_dir_raw = _DIR_RAW_NONSEGMENTED
_dir_processed = _DIR_PROCESSED_NONSEGMENTED
setname = 'Nonsegmented'
#training_filename = _get_output_filename(_DIR_PROCESSED_NONSEGMENTED, 'train')
# testing_filename = _get_output_filename(_DIR_PROCESSED_NONSEGMENTED, 'test')
else:
_dir_raw = _DIR_RAW_SEGMENTED
_dir_processed = _DIR_PROCESSED_SEGMENTED
setname = 'Segmented'
#training_filename = _get_output_filename(_DIR_PROCESSED_SEGMENTED, 'train')
# testing_filename = _get_output_filename(_DIR_PROCESSED_SEGMENTED, 'test')
#if tf.gfile.Exists(training_filename): #and tf.gfile.Exists(testing_filename):
# print('Dataset files already exist. Exiting without re-creating them.')
# return
if _EXCLUDED_GRASSES:
exclude_list = ['Black-grass', 'Common wheat', 'Loose Silky-bent']
else:
exclude_list = []
# First, process training data:
data_filename = os.path.join(_dir_raw)
archive = zipfile.ZipFile(data_filename)
archive.extractall(_dir_processed)
filenames, class_names = _get_filenames_and_classes(_dir_processed, [setname], exclude_list)
class_dict = dict(zip(class_names, range(len(class_names))))
utils.save_dict(class_dict, _dir_processed, 'class_dict.json')
for shard_n in range(_NUM_SHARDS):
utils.show_message('Processing shard %d/%d' % (shard_n+1,_NUM_SHARDS))
tf_filename = _get_output_filename(_dir_processed, shard_n)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
_convert_to_tfrecord(filenames[shard_n], class_dict, tfrecord_writer)
tmp_dir = os.path.join(_dir_processed, setname)
tf.gfile.DeleteRecursively(tmp_dir)
# # First, process test data:
# with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
# data_filename = os.path.join(_dir_raw)
# archive = zipfile.ZipFile(data_filename)
# archive.extractall(_dir_processed)
# # filenames, class_names = _get_filenames_and_classes(_dir_processed, [setname, 'test'], exclude_list)
# class_dict = dict(zip(class_names, range(len(class_names))))
# _convert_to_tfrecord(filenames, class_dict, tfrecord_writer)
# tmp_dir = os.path.join(_dir_processed, setname)
# tf.gfile.DeleteRecursively(tmp_dir)
print('\nFinished converting the PSD %s dataset!' % setname)
| mit |
yonglehou/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 248 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/IPython/core/prefilter.py | 9 | 25512 | # encoding: utf-8
"""
Prefiltering components.
Prefilters transform user input before it is exec'd by Python. These
transforms are used to implement additional syntax such as !ls and %magic.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from keyword import iskeyword
import re
from IPython.core.autocall import IPyAutocall
from traitlets.config.configurable import Configurable
from IPython.core.inputsplitter import (
ESC_MAGIC,
ESC_QUOTE,
ESC_QUOTE2,
ESC_PAREN,
)
from IPython.core.macro import Macro
from IPython.core.splitinput import LineInfo
from traitlets import (
List, Integer, Unicode, Bool, Instance, CRegExp
)
#-----------------------------------------------------------------------------
# Global utilities, errors and constants
#-----------------------------------------------------------------------------
class PrefilterError(Exception):
pass
# RegExp to identify potential function names
re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
# RegExp to exclude strings with this start from autocalling. In
# particular, all binary operators should be excluded, so that if foo is
# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
# characters '!=()' don't need to be checked for, as the checkPythonChars
# routine explicitely does so, to catch direct calls and rebindings of
# existing names.
# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
# it affects the rest of the group in square brackets.
re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
r'|^is |^not |^in |^and |^or ')
# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
# Handler Check Utilities
def is_shadowed(identifier, ip):
"""Is the given identifier defined in one of the namespaces which shadow
the alias and magic namespaces? Note that an identifier is different
than ifun, because it can not contain a '.' character."""
# This is much safer than calling ofind, which can change state
return (identifier in ip.user_ns \
or identifier in ip.user_global_ns \
or identifier in ip.ns_table['builtin']\
or iskeyword(identifier))
#-----------------------------------------------------------------------------
# Main Prefilter manager
#-----------------------------------------------------------------------------
class PrefilterManager(Configurable):
"""Main prefilter component.
The IPython prefilter is run on all user input before it is run. The
prefilter consumes lines of input and produces transformed lines of
input.
The iplementation consists of two phases:
1. Transformers
2. Checkers and handlers
Over time, we plan on deprecating the checkers and handlers and doing
everything in the transformers.
The transformers are instances of :class:`PrefilterTransformer` and have
a single method :meth:`transform` that takes a line and returns a
transformed line. The transformation can be accomplished using any
tool, but our current ones use regular expressions for speed.
After all the transformers have been run, the line is fed to the checkers,
which are instances of :class:`PrefilterChecker`. The line is passed to
the :meth:`check` method, which either returns `None` or a
:class:`PrefilterHandler` instance. If `None` is returned, the other
checkers are tried. If an :class:`PrefilterHandler` instance is returned,
the line is passed to the :meth:`handle` method of the returned
handler and no further checkers are tried.
Both transformers and checkers have a `priority` attribute, that determines
the order in which they are called. Smaller priorities are tried first.
Both transformers and checkers also have `enabled` attribute, which is
a boolean that determines if the instance is used.
Users or developers can change the priority or enabled attribute of
transformers or checkers, but they must call the :meth:`sort_checkers`
or :meth:`sort_transformers` method after changing the priority.
"""
multi_line_specials = Bool(True).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(PrefilterManager, self).__init__(shell=shell, **kwargs)
self.shell = shell
self.init_transformers()
self.init_handlers()
self.init_checkers()
#-------------------------------------------------------------------------
# API for managing transformers
#-------------------------------------------------------------------------
def init_transformers(self):
"""Create the default transformers."""
self._transformers = []
for transformer_cls in _default_transformers:
transformer_cls(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_transformers(self):
"""Sort the transformers by priority.
This must be called after the priority of a transformer is changed.
The :meth:`register_transformer` method calls this automatically.
"""
self._transformers.sort(key=lambda x: x.priority)
@property
def transformers(self):
"""Return a list of checkers, sorted by priority."""
return self._transformers
def register_transformer(self, transformer):
"""Register a transformer instance."""
if transformer not in self._transformers:
self._transformers.append(transformer)
self.sort_transformers()
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer)
#-------------------------------------------------------------------------
# API for managing checkers
#-------------------------------------------------------------------------
def init_checkers(self):
"""Create the default checkers."""
self._checkers = []
for checker in _default_checkers:
checker(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_checkers(self):
"""Sort the checkers by priority.
This must be called after the priority of a checker is changed.
The :meth:`register_checker` method calls this automatically.
"""
self._checkers.sort(key=lambda x: x.priority)
@property
def checkers(self):
"""Return a list of checkers, sorted by priority."""
return self._checkers
def register_checker(self, checker):
"""Register a checker instance."""
if checker not in self._checkers:
self._checkers.append(checker)
self.sort_checkers()
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker)
#-------------------------------------------------------------------------
# API for managing handlers
#-------------------------------------------------------------------------
def init_handlers(self):
"""Create the default handlers."""
self._handlers = {}
self._esc_handlers = {}
for handler in _default_handlers:
handler(
shell=self.shell, prefilter_manager=self, parent=self
)
@property
def handlers(self):
"""Return a dict of all the handlers."""
return self._handlers
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
def unregister_handler(self, name, handler, esc_strings):
"""Unregister a handler instance by name with esc_strings."""
try:
del self._handlers[name]
except KeyError:
pass
for esc_str in esc_strings:
h = self._esc_handlers.get(esc_str)
if h is handler:
del self._esc_handlers[esc_str]
def get_handler_by_name(self, name):
"""Get a handler by its name."""
return self._handlers.get(name)
def get_handler_by_esc(self, esc_str):
"""Get a handler by its escape string."""
return self._esc_handlers.get(esc_str)
#-------------------------------------------------------------------------
# Main prefiltering API
#-------------------------------------------------------------------------
def prefilter_line_info(self, line_info):
"""Prefilter a line that has been converted to a LineInfo object.
This implements the checker/handler part of the prefilter pipe.
"""
# print "prefilter_line_info: ", line_info
handler = self.find_handler(line_info)
return handler.handle(line_info)
def find_handler(self, line_info):
"""Find a handler for the line_info by trying checkers."""
for checker in self.checkers:
if checker.enabled:
handler = checker.check(line_info)
if handler:
return handler
return self.get_handler_by_name('normal')
def transform_line(self, line, continue_prompt):
"""Calls the enabled transformers in order of increasing priority."""
for transformer in self.transformers:
if transformer.enabled:
line = transformer.transform(line, continue_prompt)
return line
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out
#-----------------------------------------------------------------------------
# Prefilter transformers
#-----------------------------------------------------------------------------
class PrefilterTransformer(Configurable):
"""Transform a line of user input."""
priority = Integer(100).tag(config=True)
# Transformers don't currently use shell or prefilter_manager, but as we
# move away from checkers and handlers, they will need them.
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterTransformer, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_transformer(self)
def transform(self, line, continue_prompt):
"""Transform a line, returning the new one."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
#-----------------------------------------------------------------------------
# Prefilter checkers
#-----------------------------------------------------------------------------
class PrefilterChecker(Configurable):
"""Inspect an input line and return a handler for that line."""
priority = Integer(100).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterChecker, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_checker(self)
def check(self, line_info):
"""Inspect line_info and return a handler instance or None."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
class EmacsChecker(PrefilterChecker):
priority = Integer(100).tag(config=True)
enabled = Bool(False).tag(config=True)
def check(self, line_info):
"Emacs ipython-mode tags certain input lines."
if line_info.line.endswith('# PYTHON-MODE'):
return self.prefilter_manager.get_handler_by_name('emacs')
else:
return None
class MacroChecker(PrefilterChecker):
priority = Integer(250).tag(config=True)
def check(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
if isinstance(obj, Macro):
return self.prefilter_manager.get_handler_by_name('macro')
else:
return None
class IPyAutocallChecker(PrefilterChecker):
priority = Integer(300).tag(config=True)
def check(self, line_info):
"Instances of IPyAutocall in user_ns get autocalled immediately"
obj = self.shell.user_ns.get(line_info.ifun, None)
if isinstance(obj, IPyAutocall):
obj.set_ip(self.shell)
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
class AssignmentChecker(PrefilterChecker):
priority = Integer(600).tag(config=True)
def check(self, line_info):
"""Check to see if user is assigning to a var for the first time, in
which case we want to avoid any sort of automagic / autocall games.
This allows users to assign to either alias or magic names true python
variables (the magic/alias systems always take second seat to true
python code). E.g. ls='hi', or ls,that=1,2"""
if line_info.the_rest:
if line_info.the_rest[0] in '=,':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutoMagicChecker(PrefilterChecker):
priority = Integer(700).tag(config=True)
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
class PythonOpsChecker(PrefilterChecker):
priority = Integer(900).tag(config=True)
def check(self, line_info):
"""If the 'rest' of the line begins with a function call or pretty much
any python operator, we should simply execute the line (regardless of
whether or not there's a possible autocall expansion). This avoids
spurious (and very confusing) geattr() accesses."""
if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutocallChecker(PrefilterChecker):
priority = Integer(1000).tag(config=True)
function_name_regexp = CRegExp(re_fun_name,
help="RegExp to identify potential function names."
).tag(config=True)
exclude_regexp = CRegExp(re_exclude_auto,
help="RegExp to exclude strings with this start from autocalling."
).tag(config=True)
def check(self, line_info):
"Check if the initial word/function is callable and autocall is on."
if not self.shell.autocall:
return None
oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
if not oinfo['found']:
return None
if callable(oinfo['obj']) \
and (not self.exclude_regexp.match(line_info.the_rest)) \
and self.function_name_regexp.match(line_info.ifun):
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
#-----------------------------------------------------------------------------
# Prefilter handlers
#-----------------------------------------------------------------------------
class PrefilterHandler(Configurable):
handler_name = Unicode('normal')
esc_strings = List([])
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterHandler, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_handler(
self.handler_name,
self,
self.esc_strings
)
def handle(self, line_info):
# print "normal: ", line_info
"""Handle normal input lines. Use as a template for handlers."""
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if (continue_prompt and
self.shell.autoindent and
line.isspace() and
0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = ''
return line
def __str__(self):
return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
class MacroHandler(PrefilterHandler):
handler_name = Unicode("macro")
def handle(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
pre_space = line_info.pre_whitespace
line_sep = "\n" + pre_space
return pre_space + line_sep.join(obj.value.splitlines())
class MagicHandler(PrefilterHandler):
handler_name = Unicode('magic')
esc_strings = List([ESC_MAGIC])
def handle(self, line_info):
"""Execute magic functions."""
ifun = line_info.ifun
the_rest = line_info.the_rest
cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace,
(ifun + " " + the_rest))
return cmd
class AutoHandler(PrefilterHandler):
handler_name = Unicode('auto')
esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
def handle(self, line_info):
"""Handle lines which can be auto-executed, quoting if requested."""
line = line_info.line
ifun = line_info.ifun
the_rest = line_info.the_rest
esc = line_info.esc
continue_prompt = line_info.continue_prompt
obj = line_info.ofind(self.shell)['obj']
# This should only be active for single-line input!
if continue_prompt:
return line
force_auto = isinstance(obj, IPyAutocall)
# User objects sometimes raise exceptions on attribute access other
# than AttributeError (we've seen it in the past), so it's safest to be
# ultra-conservative here and catch all.
try:
auto_rewrite = obj.rewrite
except Exception:
auto_rewrite = True
if esc == ESC_QUOTE:
# Auto-quote splitting on whitespace
newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
elif esc == ESC_QUOTE2:
# Auto-quote whole string
newcmd = '%s("%s")' % (ifun,the_rest)
elif esc == ESC_PAREN:
newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
else:
# Auto-paren.
if force_auto:
# Don't rewrite if it is already a call.
do_rewrite = not the_rest.startswith('(')
else:
if not the_rest:
# We only apply it to argument-less calls if the autocall
# parameter is set to 2.
do_rewrite = (self.shell.autocall >= 2)
elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
# Don't autocall in this case: item access for an object
# which is BOTH callable and implements __getitem__.
do_rewrite = False
else:
do_rewrite = True
# Figure out the rewritten command
if do_rewrite:
if the_rest.endswith(';'):
newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
else:
newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
else:
normal_handler = self.prefilter_manager.get_handler_by_name('normal')
return normal_handler.handle(line_info)
# Display the rewritten call
if auto_rewrite:
self.shell.auto_rewrite_input(newcmd)
return newcmd
class EmacsHandler(PrefilterHandler):
handler_name = Unicode('emacs')
esc_strings = List([])
def handle(self, line_info):
"""Handle input lines marked by python-mode."""
# Currently, nothing is done. Later more functionality can be added
# here if needed.
# The input cache shouldn't be updated
return line_info.line
#-----------------------------------------------------------------------------
# Defaults
#-----------------------------------------------------------------------------
_default_transformers = [
]
_default_checkers = [
EmacsChecker,
MacroChecker,
IPyAutocallChecker,
AssignmentChecker,
AutoMagicChecker,
PythonOpsChecker,
AutocallChecker
]
_default_handlers = [
PrefilterHandler,
MacroHandler,
MagicHandler,
AutoHandler,
EmacsHandler
]
| mit |
pkruskal/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 248 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 129 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
cpacker/GraphZip | test/test_expr.py | 1 | 23327 | """ Test file that """
import cProfile
import os
import unittest
import sys
import time
from operator import itemgetter
from pstats import Stats
from timeit import default_timer as timer
try:
import cPickle as pickle
except:
import pickle
from compressor.compress import Compressor
from .utils import import_insts, parse_subdue_output
DEBUG = True # enable for debug print output
SAVE = False # save the SVGs from each example
PROFILE = False
GRAPH_DIR = "data/" # root dir for graph (eg. *.g, *.graph) files
IMAGE_DIR = "images/" # root dir for SVG images
SUBDUE_DIR = "../SUBDUE/subdue-5.2.2/bin/" # location of SUBDUE exe
SUBGEN_DIR = "data/SUBGEN/" # location of SUBGEN .graph and .insts files
def get_gt_patterns_found(groundtruth, patterns):
""" Returns an error metric using the groundtruth and returned patterns
Error = #gt_patterns missed / total #gt_patterns
"""
hits = [0 for g in groundtruth] # 1 if hit, 0 if miss (on gt)
# For each ground_truth pattern, check if we found it with our algorithm
for i, gt in enumerate(groundtruth):
c1 = gt.vs["label"]
c1_edge = gt.es["label"]
for p in patterns:
if len(p.es) == 0:
continue
c2 = p.vs["label"]
c2_edge = p.es["label"]
if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):
continue
try:
if gt.isomorphic_vf2(p, color1=c1, color2=c2,
edge_color1=c1_edge, edge_color2=c2_edge):
if(hits[i] >= 1):
print("Warning: ground-truth pattern already found")
else:
hits[i] = 1
# print("hit:",p)
break
except:
print('Error')
print(c1_edge)
print(c2_edge)
return (sum(hits), len(hits)) # hits, total
def get_patterns_also_in_gt(groundtruth, patterns):
""" Returns an error metric using the groundtruth and returned patterns
Error = #patterns not in gt / total #patterns
"""
hits = [0 for p in patterns] # 1 if hit, 0 if miss
# For each ground_truth pattern, check if we found it with our algorithm
for i, p in enumerate(patterns):
if len(p.es) == 0:
continue
c1 = p.vs["label"]
c1_edge = p.es["label"]
for gt in groundtruth:
c2 = gt.vs["label"]
c2_edge = gt.es["label"]
if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):
continue
if gt.isomorphic_vf2(p, color1=c1, color2=c2,
edge_color1=c1_edge, edge_color2=c2_edge):
if(hits[i] >= 1):
print("Warning: ground-truth pattern already found")
else:
hits[i] = 1
break # consider multiple instances of same pattern?
return (sum(hits), len(hits)) # hits,total
def print_top_n_graphs(C, n):
""" Print (repr) the iGraph representation and count of the top-N patterns
Args:
C (Compressor object): Has member field p (list of (Graph,c) tuples)
n (int): Number of patterns to print
"""
ps = sorted(C.P, key=itemgetter(2), reverse=True)
for i in range(n):
if i >= len(ps):
break
p, c, s = ps[i]
print(p)
print("Appeared %d times" % c)
class TestExamples(unittest.TestCase):
""" Test the main compression algorithm using small example graphs
The following tests only test that (>0) patterns are captured, ensuring
basic functionality of the compressor
"""
def setUp(self):
batch_size = 50
dict_size = 5000
if DEBUG:
print("Setting up compressor with batch_size=%d, dict_size=%d ..."
% (batch_size, dict_size))
self.c = Compressor(batch_size, dict_size)
if PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
def tearDown(self):
if PROFILE:
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('cumtime')
p.print_stats()
if DEBUG:
print('\n{}>>>'.format('-'*77))
def test_nonempty_basic(self):
if DEBUG:
print("Running compression on basic1.graph ...")
self.c.compress_file(GRAPH_DIR + "basic1.graph")
# No "correct" patterns, however we should extract at least ONE pattern
self.assertNotEqual(self.c.P, [])
def test_nonempty_groups(self):
if DEBUG:
print("Running compression on groups.graph ...")
self.c.compress_file(GRAPH_DIR + "groups.graph")
self.assertNotEqual(self.c.P, [])
def test_nonempty_diabetes(self):
if DEBUG:
print("Running compression on diabetes_0.graph ...")
self.c.compress_file(GRAPH_DIR + "diabetes_0.graph")
self.assertNotEqual(self.c.P, [])
# @unittest.skip("Non-standard test")
class TestGraphZipSubgen(unittest.TestCase):
""" Test GraphZip on graphs and ground-truth files created via Subgen
Idea is to test GraphZip on synthetic graphs containing:
cliques, cycles, paths and trees
The synthetic graphs are created via Subgen.
Each Subgen graph has a certain pattern embedded in it (i.e. 3-cliques) and
the specific instances of those patterns are specified in the *.insts files
Subgen generates.
For each test, we check how many of the patterns from the .insts file were
captured in the compressor dictionary.
"""
def setUp(self):
batch_size = 10
dict_size = 1000
if DEBUG:
print("Setting up compressor with batch_size=%d, dict_size=%d ..."
% (batch_size, dict_size))
self.c = Compressor(batch_size, dict_size)
self.c.add_implicit_vertices = True # since batch_size < file_size
if PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
def tearDown(self):
if DEBUG:
print("\nCompression was run on a total of %d times\n"
% self.c._compress_count)
if PROFILE:
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('cumtime')
p.print_stats()
if(DEBUG):
print('\n{}>>>'.format('-'*77))
def _test_graphzip_subgen(self, fin_graphzip, fin_insts, n=None):
""" Run compress on the Subgen file, then checks against GT """
print('Running compression on %s...' % fin_graphzip)
start = time.perf_counter()
# run compression to get pattern dictionary
self.c.compress_file(fin_graphzip)
elapsed = time.perf_counter()-start
print('Compression took:')
print(elapsed)
# collect y and y_hat
gt_gs = import_insts(fin_insts)
graphzip_gs = [g for (g, _, _) in self.c.P]
# trim the pattern dictionary e.g. to match the #patterns Subdue found
if n is not None:
graphzip_gs = graphzip_gs[:n]
print('Succesfully imported %d graphs from the pattern dictionary'
% len(graphzip_gs))
# error metric 1
hits1, total1 = get_gt_patterns_found(gt_gs, graphzip_gs)
print('%d/%d GT patterns in the insts file were found by GraphZip.' %
(hits1, total1))
# error metric 2
hits2, total2 = get_patterns_also_in_gt(gt_gs, graphzip_gs)
print('%d/%d patterns in the dictionary were in the insts file.' %
(hits2, total2))
def _test_multiple(self, fin_graphzip, fin_insts, T, n=None):
""" Run the test multiple times to get iterative pattern growth """
for t in range(T+1)[1:]:
self._test_graphzip_subgen(fin_graphzip, fin_insts, n)
def test_3CLIQ_20(self):
print('20pc coverage:')
self._test_multiple("%s3CLIQ/3CLIQ_1_5_20cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_3CLIQ_50(self):
print('50pc coverage:')
self._test_multiple("%s3CLIQ/3CLIQ_1_5_50cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_3CLIQ_80(self):
print('80pc coverage:')
self._test_multiple("%s3CLIQ/3CLIQ_1_5_80cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_80c.insts" % SUBGEN_DIR,
1)
def test_4PATH_20(self):
print('20pc coverage:')
self._test_multiple("%s4PATH/4PATH_1_5_20cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_4PATH_50(self):
print('50pc coverage:')
self._test_multiple("%s4PATH/4PATH_1_5_50cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_4PATH_80(self):
print('80pc coverage:')
self._test_multiple("%s4PATH/4PATH_1_5_80cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_80c.insts" % SUBGEN_DIR,
1)
def test_4STAR_20(self):
print('20pc coverage:')
self._test_multiple("%s4STAR/4STAR_1_5_20cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_4STAR_50(self):
print('50pc coverage:')
self._test_multiple("%s4STAR/4STAR_1_5_50cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_4STAR_80(self):
print('80pc coverage:')
self._test_multiple("%s4STAR/4STAR_1_5_80cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_80c.insts" % SUBGEN_DIR,
1)
def test_5PATH_20(self):
print('20pc coverage:')
self._test_multiple("%s5PATH/5PATH_1_5_20cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_5PATH_50(self):
print('50pc coverage:')
self._test_multiple("%s5PATH/5PATH_1_5_50cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_5PATH_80(self):
print('80pc coverage:')
self._test_multiple("%s5PATH/5PATH_1_5_80cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_80c.insts" % SUBGEN_DIR,
1)
def test_8TREE_20(self):
print('20pc coverage:')
self._test_multiple("%s8TREE/8TREE_1_5_20cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_8TREE_50(self):
print('50pc coverage:')
self._test_multiple("%s8TREE/8TREE_1_5_50cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_8TREE_80(self):
print('80pc coverage:')
self._test_multiple("%s8TREE/8TREE_1_5_80cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_80c.insts" % SUBGEN_DIR,
1)
def test_4CLIQ_20(self):
print('20pc coverage:')
self._test_multiple("%s4CLIQ/4CLIQ_1_5_20cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_20c.insts" % SUBGEN_DIR,
1)
def test_4CLIQ_50(self):
print('50pc coverage:')
self._test_multiple("%s4CLIQ/4CLIQ_1_5_50cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_50c.insts" % SUBGEN_DIR,
1)
def test_4CLIQ_80(self):
print('80pc coverage:')
self._test_multiple("%s4CLIQ/4CLIQ_1_5_80cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_80c.insts" % SUBGEN_DIR,
1)
@unittest.skip("Non-standard test")
class TestSubdueSubgen(unittest.TestCase):
""" Test SUBDUE on graphs and ground-truth files created via Subgen """
def _test_subdue_subgen(self, fin_subdue, fin_insts, n=100):
"""
Use GraphZip and Subdue on the same example graph to compare the error
rates and runtime on each
When comparing run-time, we only count the runtime of the compression
part, as opposed to the overall time for the entire test (reported by
the profiler). This means starting and stopping the clock before and
after the Compressor.compress_file() method and the `./subdue' system
call.
Subdue outputs its patterns to a file in the .graph format. After Subdue
is finished running we can parse the file into iGraph objects then
compare with the ground-truth using the same functions.
Iterate once (no compression) to find bottom-level structures in the
graph
"""
if not SUBDUE_DIR:
pass
# fout = "subdue_patterns_output_latest.out"
fout = "subdue_patterns_output_{}.out".format(fin_subdue[-20:-6])
# XXX change to subprocess.call
# e.g. './subdue -nsubs 100 ../data/3clique.graph > example_out.txt'
cmd = "{}subdue -nsubs {} {} > {}".format(
SUBDUE_DIR, n, GRAPH_DIR + fin_subdue, fout)
print(cmd)
start = time.perf_counter()
status = os.system(cmd) # run cmd
elapsed = time.perf_counter()-start
if status:
raise Exception("Error occured while attempting to run Subdue")
print(elapsed)
gt_gs = import_insts(GRAPH_DIR+fin_insts)
subdue_gs = parse_subdue_output(fout)
# error metric 1
hits1, total1 = get_gt_patterns_found(gt_gs, subdue_gs)
print('%d/%d GT patterns in the insts file were found by Subdue.' %
(hits1, total1))
# error metric 2
hits2, total2 = get_patterns_also_in_gt(gt_gs, subdue_gs)
print('%d/%d patterns found by Subdue were in the insts file.' %
(hits2, total2))
def test_3CLIQ_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s3CLIQ/3CLIQ_1_5_20cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_20c.insts" % SUBGEN_DIR)
def test_3CLIQ_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s3CLIQ/3CLIQ_1_5_50cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_50c.insts" % SUBGEN_DIR)
def test_3CLIQ_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s3CLIQ/3CLIQ_1_5_80cx.graph" % SUBGEN_DIR,
"%s3CLIQ/3CLIQ_1_5_80c.insts" % SUBGEN_DIR)
def test_4PATH_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s4PATH/4PATH_1_5_20cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_20c.insts" % SUBGEN_DIR)
def test_4PATH_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s4PATH/4PATH_1_5_50cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_50c.insts" % SUBGEN_DIR)
def test_4PATH_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s4PATH/4PATH_1_5_80cx.graph" % SUBGEN_DIR,
"%s4PATH/4PATH_1_5_80c.insts" % SUBGEN_DIR)
def test_4STAR_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s4STAR/4STAR_1_5_20cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_20c.insts" % SUBGEN_DIR)
def test_4STAR_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s4STAR/4STAR_1_5_50cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_50c.insts" % SUBGEN_DIR)
def test_4STAR_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s4STAR/4STAR_1_5_80cx.graph" % SUBGEN_DIR,
"%s4STAR/4STAR_1_5_80c.insts" % SUBGEN_DIR)
def test_5PATH_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s5PATH/5PATH_1_5_20cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_20c.insts" % SUBGEN_DIR)
def test_5PATH_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s5PATH/5PATH_1_5_50cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_50c.insts" % SUBGEN_DIR)
def test_5PATH_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s5PATH/5PATH_1_5_80cx.graph" % SUBGEN_DIR,
"%s5PATH/5PATH_1_5_80c.insts" % SUBGEN_DIR)
def test_8TREE_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s8TREE/8TREE_1_5_20cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_20c.insts" % SUBGEN_DIR)
def test_8TREE_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s8TREE/8TREE_1_5_50cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_50c.insts" % SUBGEN_DIR)
def test_8TREE_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s8TREE/8TREE_1_5_80cx.graph" % SUBGEN_DIR,
"%s8TREE/8TREE_1_5_80c.insts" % SUBGEN_DIR)
def test_4CLIQ_20(self):
print('20pc coverage:')
self._test_subdue_subgen("%s4CLIQ/4CLIQ_1_5_20cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_20c.insts" % SUBGEN_DIR)
def test_4CLIQ_50(self):
print('50pc coverage:')
self._test_subdue_subgen("%s4CLIQ/4CLIQ_1_5_50cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_50c.insts" % SUBGEN_DIR)
def test_4CLIQ_80(self):
print('80pc coverage:')
self._test_subdue_subgen("%s4CLIQ/4CLIQ_1_5_80cx.graph" % SUBGEN_DIR,
"%s4CLIQ/4CLIQ_1_5_80c.insts" % SUBGEN_DIR)
@unittest.skip("Non-standard test")
class TestLarge(unittest.TestCase):
""" GraphZip with larger (real-world) .graph datasets """
def setUp(self):
batch_size = 5
dict_size = 50
if DEBUG:
print("Setting up compressor with batch_size=%d, dict_size=%d ..."
% (batch_size, dict_size))
self.c = Compressor(batch_size, dict_size)
if PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
def tearDown(self):
if DEBUG:
print("\nCompression was run a total of %d times\n"
% self.c._compress_count)
print("%d lines read" % self.c._lines_read)
print("Dictionary trimmed %d times" % self.c._dict_trimmed)
print("Compressor: batch_size=%d, dict_size=%d ..."
% (self.c.batch_size, self.c.dict_size))
if PROFILE:
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('cumtime')
p.print_stats()
if(DEBUG):
print('\n{}>>>'.format('-'*77))
def testHetRec(self):
times = []
try:
for i in range(1, 99):
f = '%sHetRec/hetrec_year_vfirst/%d.graph' % (GRAPH_DIR, i)
start = timer()
self.c.compress_file(f)
end = timer()
elapsed = end - start
times.append((i, elapsed))
print('\nTook %.2f seconds' % elapsed)
finally:
self.c.P = sorted(self.c.P, key=itemgetter(2), reverse=True)
print('Printing top 50 patterns for reference:')
for g, c, s in self.c.P[:49]:
print('\ncount: %d, score: %d\n' % (c, s))
print(g)
print(g.vs['label'])
# Save the dictionary, etc.
print('Saving the latest state of GraphZip..')
self.c.save_state('latest_HetRec_state.p')
# Save the time measurements for plotting
print('Saving time measurements..')
print(times)
with open('latest_HetRec_times.p', 'wb') as pfile:
pickle.dump((times), pfile)
def testHiggs(self):
times = []
try:
for i in range(1, 169):
# f = '../datasets/Twitter_Higgs/higgs_hour_vfirst/%d.g' % i
f = '%sTwitter_Higgs/higgs_hour_vfirst_unilabel/%d.g' %\
(GRAPH_DIR, i)
start = timer()
self.c.compress_file(f)
end = timer()
elapsed = end - start
times.append((i, elapsed))
print('\nTook %.2f seconds' % elapsed)
finally:
self.c.P = sorted(self.c.P, key=itemgetter(2), reverse=True)
print('Printing top 50 patterns for reference:')
for g, c, s in self.c.P[:49]:
print('\ncount: %d, score: %d\n' % (c, s))
print(g)
print('Vertex labels:')
print(g.vs['label'])
print('Edge labels:')
print(g.es['label'])
# Save the dictionary, etc
print('Saving the latest state of GraphZip..')
self.c.save_state('latest_Higgs_state.p')
# Save the time measurements for plotting
print('Saving time measurements..')
print(times)
with open('latest_Higgs_times.p', 'wb') as pfile:
pickle.dump((times), pfile)
def testNBER(self):
times = []
try:
for i in range(1, 301):
f = '%sNBER/cite75_99_month_clabels/%d.graph' % (GRAPH_DIR, i)
#f = '../datasets/NBER/cite75_99_month_clabels_v0/%d.graph' % i
# f = '../datasets/NBER/cite75_99_month_clabels_v0_vfirst/%d.graph' % i
start = timer()
self.c.compress_file(f)
end = timer()
elapsed = end - start
times.append((i, elapsed))
print('\nTook %d seconds' % elapsed)
finally:
self.c.P = sorted(self.c.P, key=itemgetter(2), reverse=True)
for g, c, s in self.c.P[:49]:
print('\ncount: %d, score: %d\n' % (c, s))
print(g)
print(g.vs['label'])
# Save the dictionary, etc
print('Saving the latest state of GraphZip..')
self.c.save_state('latest_NBER_state.p')
# Save the time measurements for plotting
print('Saving time measurements..')
print(times)
with open('latest_NBER_times.p', 'wb') as pfile:
pickle.dump((times), pfile)
def main(out=sys.stderr, verbosity=2):
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
unittest.TextTestRunner(out, verbosity=verbosity).run(suite)
if __name__ == '__main__':
with open('testing.out', 'w') as f:
main(f)
| mit |
ageron/tensorflow | tensorflow/contrib/data/python/ops/enumerate_ops.py | 20 | 1903 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import enumerate_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.enumerate_dataset(...)`.")
def enumerate_dataset(start=0):
"""A transformation that enumerate the elements of a dataset.
It is Similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.contrib.data.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.apply(tf.contrib.data.enumerate()) == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start
value for enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return enumerate_ops.enumerate_dataset(start)
| apache-2.0 |
ageron/tensorflow | tensorflow/python/debug/examples/debug_tflearn_iris.py | 17 | 7249 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debug the tf-learn iris example, based on the tf-learn tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tempfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python import debug as tf_debug
# URLs to download data sets from, if necessary.
IRIS_TRAINING_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_training.csv"
IRIS_TEST_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_test.csv"
def maybe_download_data(data_dir):
"""Download data sets if necessary.
Args:
data_dir: Path to where data should be downloaded.
Returns:
Paths to the training and test data files.
"""
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
training_data_path = os.path.join(data_dir,
os.path.basename(IRIS_TRAINING_DATA_URL))
if not os.path.isfile(training_data_path):
train_file = open(training_data_path, "wt")
urllib.request.urlretrieve(IRIS_TRAINING_DATA_URL, train_file.name)
train_file.close()
print("Training data are downloaded to %s" % train_file.name)
test_data_path = os.path.join(data_dir, os.path.basename(IRIS_TEST_DATA_URL))
if not os.path.isfile(test_data_path):
test_file = open(test_data_path, "wt")
urllib.request.urlretrieve(IRIS_TEST_DATA_URL, test_file.name)
test_file.close()
print("Test data are downloaded to %s" % test_file.name)
return training_data_path, test_data_path
_IRIS_INPUT_DIM = 4
def iris_input_fn():
iris = base.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def main(_):
# Load datasets.
if FLAGS.fake_data:
def training_input_fn():
return ({"features": tf.random_normal([128, 4])},
tf.random_uniform([128], minval=0, maxval=3, dtype=tf.int32))
def test_input_fn():
return ({"features": tf.random_normal([32, 4])},
tf.random_uniform([32], minval=0, maxval=3, dtype=tf.int32))
feature_columns = [
tf.feature_column.numeric_column("features", shape=(4,))]
else:
training_data_path, test_data_path = maybe_download_data(FLAGS.data_dir)
column_names = [
"sepal_length", "sepal_width", "petal_length", "petal_width", "label"]
batch_size = 32
def training_input_fn():
return tf.data.experimental.make_csv_dataset([training_data_path],
batch_size,
column_names=column_names,
label_name="label")
def test_input_fn():
return tf.data.experimental.make_csv_dataset([test_data_path],
batch_size,
column_names=column_names,
label_name="label")
feature_columns = [tf.feature_column.numeric_column(feature)
for feature in column_names[:-1]]
# Build 3 layer DNN with 10, 20, 10 units respectively.
model_dir = FLAGS.model_dir or tempfile.mkdtemp(prefix="debug_tflearn_iris_")
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
hooks = []
if FLAGS.debug:
hooks.append(tf_debug.LocalCLIDebugHook(ui_type=FLAGS.ui_type,
dump_root=FLAGS.dump_root))
elif FLAGS.tensorboard_debug_address:
hooks.append(tf_debug.TensorBoardDebugHook(FLAGS.tensorboard_debug_address))
# Train model, using tfdbg hook.
classifier.train(training_input_fn,
steps=FLAGS.train_steps,
hooks=hooks)
# Evaluate accuracy, using tfdbg hook.
accuracy_score = classifier.evaluate(test_input_fn,
steps=FLAGS.eval_steps,
hooks=hooks)["accuracy"]
print("After training %d steps, Accuracy = %f" %
(FLAGS.train_steps, accuracy_score))
# Make predictions, using tfdbg hook.
predict_results = classifier.predict(test_input_fn, hooks=hooks)
print("A prediction result: %s" % next(predict_results))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/iris_data",
help="Directory to save the training and test data in.")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Directory to save the trained model in.")
parser.add_argument(
"--train_steps",
type=int,
default=10,
help="Number of steps to run training for.")
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of steps to run evaluation foir.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--fake_data",
type="bool",
nargs="?",
const=True,
default=False,
help="Use fake MNIST data for unit testing")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--dump_root",
type=str,
default="",
help="Optional custom root directory for temporary debug dump data")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
yonglehou/scikit-learn | sklearn/svm/tests/test_svm.py | 115 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
mxjl620/scikit-learn | examples/linear_model/plot_omp.py | 379 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
dimkal/mne-python | examples/inverse/plot_compute_mne_inverse_raw_in_label.py | 19 | 1614 | """
=============================================
Compute sLORETA inverse solution on raw data
=============================================
Compute sLORETA inverse solution on raw dataset restricted
to a brain label and stores the solution in stc files for
visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "sLORETA" # use sLORETA method (could also be MNE or dSPM)
# Load data
raw = Raw(fname_raw)
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
# Compute inverse solution
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop, pick_ori=None)
# Save result in stc files
stc.save('mne_%s_raw_inverse_%s' % (method, label_name))
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
| bsd-3-clause |
hammerlab/immuno_research | Feb7_tumor_specific_antigens.py | 1 | 9469 | import numpy as np
import sklearn
import sklearn.cross_validation
import sklearn.ensemble
import sklearn.linear_model
from epitopes import cri_tumor_antigens, iedb, features, reduced_alphabet
import eval_dataset
cancer_peptides = cri_tumor_antigens.load_peptides(mhc_class = 1)
def run(x,y, f):
x_test = f.transform(cancer_peptides)
y_test = np.array([True] * len(cancer_peptides))
eval_dataset.eval_split(x,y,x_test,y_test)
ASSAY = 'cytotoxicity'
print
print "---"
print "aromatic unigram"
X, Y, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= reduced_alphabet.aromatic2,
return_transformer = True)
eval_dataset.eval_cv(X, Y)
print "Tumor-specific antigens"
run(X,Y,f)
print
print "---"
print "aromatic bigram"
X, Y, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= reduced_alphabet.aromatic2,
return_transformer = True)
eval_dataset.eval_cv(X, Y)
print "Tumor-specific antigens"
run(X, Y, f)
print
print "---"
print "aromatic trigram"
X, Y, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 3,
reduced_alphabet= reduced_alphabet.aromatic2,
return_transformer = True)
eval_dataset.eval_cv(X, Y)
print "Tumor-specific antigens"
run(X, Y, f)
print
print "---"
print "6-letter unigram"
X6, Y6, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= reduced_alphabet.alex6,
return_transformer = True)
eval_dataset.eval_cv(X6, Y6)
print "Tumor-specific antigens"
run(X6,Y6,f)
print
print "---"
print "6-letter bigram"
X6, Y6, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= reduced_alphabet.alex6,
return_transformer = True)
eval_dataset.eval_cv(X6, Y6)
print "Tumor-specific antigens"
run(X6, Y6, f)
print
print "---"
print "6-letter trigram"
X6, Y6, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 3,
reduced_alphabet= reduced_alphabet.alex6,
return_transformer = True)
eval_dataset.eval_cv(X6, Y6)
print "Tumor-specific antigens"
run(X6, Y6, f)
print
print "---"
print "2-letter unigram"
X2, Y2, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet = reduced_alphabet.hp2,
return_transformer = True)
eval_dataset.eval_cv(X2, Y2)
print "Tumor-specific antigens"
run(X2, Y2, f)
print
print "---"
print "2-letter bigram"
X2, Y2, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority',
assay_group = ASSAY,
subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet = reduced_alphabet.hp2,
return_transformer = True)
eval_dataset.eval_cv(X2, Y2)
print "Tumor-specific antigens"
run(X2, Y2, f)
print
print "---"
print "2-letter trigram"
X2, Y2, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 3,
reduced_alphabet = reduced_alphabet.hp2,
return_transformer = True)
eval_dataset.eval_cv(X2, Y2)
print "Tumor-specific antigens"
run(X2, Y2, f)
print
print "---"
print "2-letter 4-gram"
X2, Y2, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 4,
reduced_alphabet = reduced_alphabet.hp2,
return_transformer = True)
eval_dataset.eval_cv(X2, Y2)
print "Tumor-specific antigens"
run(X2, Y2, f)
print
print "---"
print "3-letter unigram"
X3, Y3, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= reduced_alphabet.gbmr4,
return_transformer = True)
eval_dataset.eval_cv(X3, Y3)
print "Tumor-specific antigens"
run(X3, Y3, f)
print
print "---"
print "3-letter bigram"
X3, Y3, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= reduced_alphabet.gbmr4,
return_transformer = True)
eval_dataset.eval_cv(X3, Y3)
print "Tumor-specific antigens"
run(X3, Y3, f)
print
print "---"
print "3-letter trigram"
X3, Y3, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 3,
reduced_alphabet= reduced_alphabet.gbmr4,
return_transformer = True)
eval_dataset.eval_cv(X3, Y3)
print "Tumor-specific antigens"
run(X3, Y3, f)
print
print "---"
print "3-letter 4-gram"
X3, Y3, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 4,
reduced_alphabet= reduced_alphabet.gbmr4,
return_transformer = True)
eval_dataset.eval_cv(X3, Y3)
print "Tumor-specific antigens"
run(X3, Y3, f)
print
print "---"
print "12-letter unigram"
X12, Y12, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= reduced_alphabet.sdm12,
return_transformer = True)
eval_dataset.eval_cv(X12, Y12)
print "Tumor-specific antigens"
run(X12, Y12, f)
print
print "---"
print "12-letter bigram"
X12, Y12, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= reduced_alphabet.sdm12,
return_transformer = True)
eval_dataset.eval_cv(X12, Y12)
print "Tumor-specific antigens"
run(X12, Y12, f)
print
print "---"
print "17-letter unigram"
X17, Y17, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= reduced_alphabet.hsdm17,
return_transformer = True)
eval_dataset.eval_cv(X17, Y17)
print "Tumor-specific antigens"
run(X17, Y17, f)
print
print "---"
print "17-letter bigram"
X17, Y17, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= reduced_alphabet.hsdm17,
return_transformer = True)
eval_dataset.eval_cv(X17, Y17)
print "Tumor-specific antigens"
run(X17, Y17, f)
print
print "---"
print "AA unigram"
X, Y, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 1,
reduced_alphabet= None,
return_transformer = True)
eval_dataset.eval_cv(X, Y)
print "Tumor-specific antigens"
run(X,Y,f)
print
print "---"
print "AA bigram"
X, Y, f = iedb.load_tcell_ngrams(
noisy_labels = 'majority', assay_group = ASSAY, subsample_bigger_class = True,
human = True,
mhc_class = 1,
max_ngram = 2,
reduced_alphabet= None,
return_transformer = True)
eval_dataset.eval_cv(X, Y)
print "Tumor-specific antigens"
run(X, Y, f)
| gpl-2.0 |
GoogleCloudPlatform/public-datasets-pipelines | datasets/covid19_govt_response/pipelines/oxford_policy_tracker/oxford_policy_tracker_dag.py | 2 | 22798 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="covid19_govt_response.oxford_policy_tracker",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
oxford_policy_tracker_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="oxford_policy_tracker_transform_csv",
startup_timeout_seconds=600,
name="oxford_policy_tracker",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.covid19_govt_response.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/covid19_govt_response/oxford_policy_tracker/data_output.csv",
"PIPELINE_NAME": "oxford_policy_tracker",
"CSV_HEADERS": '["country_name","alpha_3_code","region_name","region_code","date","school_closing","school_closing_flag","school_closing_notes","workplace_closing","workplace_closing_flag","workplace_closing_notes","cancel_public_events","cancel_public_events_flag","cancel_public_events_notes","restrictions_on_gatherings","restrictions_on_gatherings_flag","restrictions_on_gatherings_notes","close_public_transit","close_public_transit_flag","close_public_transit_notes","stay_at_home_requirements","stay_at_home_requirements_flag","stay_at_home_requirements_notes","restrictions_on_internal_movement","restrictions_on_internal_movement_flag","restrictions_on_internal_movement_notes","international_travel_controls","international_travel_controls_notes","income_support","income_support_flag","income_support_notes","debt_contract_relief","debt_contract_relief_notes","fiscal_measures","fiscal_measures_notes","international_support","international_support_notes","public_information_campaigns","public_information_campaigns_flag","public_information_campaigns_notes","testing_policy","testing_policy_notes","contact_tracing","contact_tracing_notes","emergency_healthcare_investment","emergency_healthcare_investment_notes","vaccine_investment","vaccine_investment_notes","misc_wildcard","misc_wildcard_notes","confirmed_cases","deaths","strintgency_index"]',
"RENAME_MAPPINGS": '{"CountryName":"country_name","CountryCode":"alpha_3_code","RegionName":"region_name","RegionCode":"region_code","Date":"date","C1_School closing":"school_closing","C1_Flag":"school_closing_flag","C1_Notes":"school_closing_notes","C2_Workplace closing":"workplace_closing","C2_Flag":"workplace_closing_flag","C2_Notes":"workplace_closing_notes","C3_Cancel public events":"cancel_public_events","C3_Flag":"cancel_public_events_flag","C3_Notes":"cancel_public_events_notes","C4_Restrictions on gatherings":"restrictions_on_gatherings","C4_Flag":"restrictions_on_gatherings_flag","C4_Notes":"restrictions_on_gatherings_notes","C5_Close public transport":"close_public_transit","C5_Flag":"close_public_transit_flag","C5_Notes":"close_public_transit_notes","C6_Stay at home requirements":"stay_at_home_requirements","C6_Flag":"stay_at_home_requirements_flag","C6_Notes":"stay_at_home_requirements_notes","C7_Restrictions on internal movement":"restrictions_on_internal_movement","C7_Flag":"restrictions_on_internal_movement_flag","C7_Notes":"restrictions_on_internal_movement_notes","C8_International travel controls":"international_travel_controls","C8_Notes":"international_travel_controls_notes","E1_Income support":"income_support","E1_Flag":"income_support_flag","E1_Notes":"income_support_notes","E2_Debt/contract relief":"debt_contract_relief","E2_Notes":"debt_contract_relief_notes","E3_Fiscal measures":"fiscal_measures","E3_Notes":"fiscal_measures_notes","E4_International support":"international_support","E4_Notes":"international_support_notes","H1_Public information campaigns":"public_information_campaigns","H1_Flag":"public_information_campaigns_flag","H1_Notes":"public_information_campaigns_notes","H2_Testing policy":"testing_policy","H2_Notes":"testing_policy_notes","H3_Contact tracing":"contact_tracing","H3_Notes":"contact_tracing_notes","H4_Emergency investment in healthcare":"emergency_healthcare_investment","H4_Notes":"emergency_healthcare_investment_notes","H5_Investment in vaccines":"vaccine_investment","H5_Notes":"vaccine_investment_notes","M1_Wildcard":"misc_wildcard","M1_Notes":"misc_wildcard_notes","ConfirmedCases":"confirmed_cases","ConfirmedDeaths":"deaths","StringencyIndexForDisplay":"strintgency_index"}',
},
resources={"request_memory": "2G", "request_cpu": "1"},
)
# Task to load CSV data to a BigQuery table
load_oxford_policy_tracker_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_oxford_policy_tracker_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=[
"data/covid19_govt_response/oxford_policy_tracker/data_output.csv"
],
source_format="CSV",
destination_project_dataset_table="covid19_govt_response.oxford_policy_tracker",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "country_name",
"type": "string",
"description": "Name of the country",
"mode": "nullable",
},
{
"name": "alpha_3_code",
"type": "string",
"description": "3-letter alpha code abbreviation of the country/region. See `bigquery-public-data.utility_us.country_code_iso` for more details",
"mode": "nullable",
},
{
"name": "region_name",
"type": "string",
"description": "Name of the region within the country",
"mode": "nullable",
},
{
"name": "region_code",
"type": "string",
"description": "Code of the region within the country",
"mode": "nullable",
},
{
"name": "date",
"type": "date",
"description": "Date of the measured policy action status",
"mode": "nullable",
},
{
"name": "school_closing",
"type": "string",
"description": "C1 - Ordinal scale record closings of schools and universities; 0 - No measures 1 - recommend closing 2 - Require closing (only some levels or categories eg just high school or just public schools) 3 - Require closing all levels No data - blank",
"mode": "nullable",
},
{
"name": "school_closing_flag",
"type": "string",
"description": "Are C1 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "school_closing_notes",
"type": "string",
"description": "Additional details about C1 policy actions",
"mode": "nullable",
},
{
"name": "workplace_closing",
"type": "string",
"description": "C2 - Ordinal scale record closings of workplace; 0 - No measures 1 - recommend closing (or work from home) 2 - require closing (or work from home) for some sectors or categories of workers 3 - require closing (or work from home) all-but-essential workplaces (eg grocery stores doctors) No data - blank",
"mode": "nullable",
},
{
"name": "workplace_closing_flag",
"type": "string",
"description": "Are C2 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "workplace_closing_notes",
"type": "string",
"description": "Additional details about C2 policy actions",
"mode": "nullable",
},
{
"name": "cancel_public_events",
"type": "string",
"description": "C3 - Ordinal scale record cancellations of public events;0- No measures 1 - Recommend cancelling 2 - Require cancelling No data - blank",
"mode": "nullable",
},
{
"name": "cancel_public_events_flag",
"type": "string",
"description": "Are C3 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "cancel_public_events_notes",
"type": "string",
"description": "Additional details about C3 policy actions",
"mode": "nullable",
},
{
"name": "restrictions_on_gatherings",
"type": "string",
"description": "C4 - Ordinal scale to record the cut-off size for bans on private gatherings; 0 - No restrictions 1 - Restrictions on very large gatherings (the limit is above 1000 people) 2 - Restrictions on gatherings between 100-1000 people 3 - Restrictions on gatherings between 10-100 people 4 - Restrictions on gatherings of less than 10 people No data - blank",
"mode": "nullable",
},
{
"name": "restrictions_on_gatherings_flag",
"type": "string",
"description": "Are C4 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "restrictions_on_gatherings_notes",
"type": "string",
"description": "Additional details about C4 policy actions",
"mode": "nullable",
},
{
"name": "close_public_transit",
"type": "string",
"description": "C5 - Ordinal scale to record closing of public transportation; 0 - No measures 1 - Recommend closing (or significantly reduce volume/route/means of transport available) 2 - Require closing (or prohibit most citizens from using it)",
"mode": "nullable",
},
{
"name": "close_public_transit_flag",
"type": "string",
"description": "Are C5 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "close_public_transit_notes",
"type": "string",
"description": "Additional details about C5 policy actions",
"mode": "nullable",
},
{
"name": "stay_at_home_requirements",
"type": "string",
"description": "C6 - Ordinal scale record of orders to “shelter-in- place” and otherwise confine to home.",
"mode": "nullable",
},
{
"name": "stay_at_home_requirements_flag",
"type": "string",
"description": 'Are C6 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank"\\',
"mode": "nullable",
},
{
"name": "stay_at_home_requirements_notes",
"type": "string",
"description": "Additional details about C6 policy actions",
"mode": "nullable",
},
{
"name": "restrictions_on_internal_movement",
"type": "string",
"description": "C7 - Ordinal scale of restrictions on internal movement; 0 - No measures 1 - Recommend closing (or significantly reduce volume/route/means of transport) 2 - Require closing (or prohibit most people from using it)",
"mode": "nullable",
},
{
"name": "restrictions_on_internal_movement_flag",
"type": "string",
"description": "Are C7 actions targeted at specific areas or general:0 - Targeted 1- General No data - blank",
"mode": "nullable",
},
{
"name": "restrictions_on_internal_movement_notes",
"type": "string",
"description": "Additional details about C7 policy actions",
"mode": "nullable",
},
{
"name": "international_travel_controls",
"type": "string",
"description": "C8 - Ordinal scale record of restrictions on international travel; 0 - No measures 1 - Screening 2 - Quarantine arrivals from high-risk regions 3 - Ban on high-risk regions 4 - Total border closure No data - blank",
"mode": "nullable",
},
{
"name": "international_travel_controls_notes",
"type": "string",
"description": "Additional details about C8 policy actions",
"mode": "nullable",
},
{
"name": "income_support",
"type": "string",
"description": "E1 - Ordinal scale record if the government is covering the salaries or providing direct cash payments universal basic income or similar of people who lose their jobs or cannot work. (Includes payments to firms if explicitly linked to payroll/ salaries)",
"mode": "nullable",
},
{
"name": "income_support_flag",
"type": "string",
"description": "Sector scope of E1 actions; 0 - formal sector workers only 1 - transfers to informal sector workers too No data - blank",
"mode": "nullable",
},
{
"name": "income_support_notes",
"type": "string",
"description": "Additional details about E1 policy actions",
"mode": "nullable",
},
{
"name": "debt_contract_relief",
"type": "string",
"description": "E2 - Record if govt. is freezing financial obligations (eg stopping loan repayments preventing services like water from stopping or banning evictions)",
"mode": "nullable",
},
{
"name": "debt_contract_relief_notes",
"type": "string",
"description": "Additional details about E2 policy actions",
"mode": "nullable",
},
{
"name": "fiscal_measures",
"type": "float",
"description": "E3 - What economic stimulus policies are adopted (in USD); Record monetary value USD of fiscal stimuli including spending or tax cuts NOT included in S10 (see below) -If none enter 0 No data - blank Please use the exchange rate of the date you are coding not the current date.",
"mode": "nullable",
},
{
"name": "fiscal_measures_notes",
"type": "string",
"description": "Additional details about E3 policy actions",
"mode": "nullable",
},
{
"name": "international_support",
"type": "float",
"description": "E4 - Announced offers of COVID-19 related aid spending to other countries (in USD); Record monetary value announced if additional to previously announced spending -if none enter 0 No data - blank Please use the exchange rate of the date you are coding not the current date.",
"mode": "nullable",
},
{
"name": "international_support_notes",
"type": "string",
"description": "Additional details about E4 policy actions",
"mode": "nullable",
},
{
"name": "public_information_campaigns",
"type": "string",
"description": "H1 - Ordinal scale record presence of public info campaigns; 0 -No COVID-19 public information campaign 1 - public officials urging caution about COVID-19 2 - coordinated public information campaign (e.g. across traditional and social media) No data - blank",
"mode": "nullable",
},
{
"name": "public_information_campaigns_flag",
"type": "string",
"description": "Sector scope of H1 actions; 0 - formal sector workers only 1 - transfers to informal sector workers too No data - blank",
"mode": "nullable",
},
{
"name": "public_information_campaigns_notes",
"type": "string",
"description": "Additional details about H1 policy actions",
"mode": "nullable",
},
{
"name": "testing_policy",
"type": "string",
"description": "H2 - Ordinal scale record of who can get tested; 0 – No testing policy 1 – Only those who both (a) have symptoms AND (b) meet specific criteria (eg key workers admitted to hospital came into contact with a known case returned from overseas) 2 – testing of anyone showing COVID-19 symptoms 3 – open public testing (eg “drive through” testing available to asymptomatic people) No data Nb we are looking for policies about testing for having an infection (PCR tests) - not for policies about testing for immunity (antibody tests).",
"mode": "nullable",
},
{
"name": "testing_policy_notes",
"type": "string",
"description": "Additional details about H2 policy actions",
"mode": "nullable",
},
{
"name": "contact_tracing",
"type": "string",
"description": "H3 - Ordinal scale record if governments doing contact tracing; 0 - No contact tracing 1 - Limited contact tracing - not done for all cases 2 - Comprehensive contact tracing - done for all cases No data",
"mode": "nullable",
},
{
"name": "contact_tracing_notes",
"type": "string",
"description": "Additional details about H3 policy actions",
"mode": "nullable",
},
{
"name": "emergency_healthcare_investment",
"type": "float",
"description": "H4 - Short-term spending on e.g hospitals masks etc in USD; Record monetary value in USD of new short-term spending on health. If none enter 0. No data - blank Please use the exchange rate of the date you are coding not the current date.",
"mode": "nullable",
},
{
"name": "emergency_healthcare_investment_notes",
"type": "string",
"description": "Additional details about H4 policy actions",
"mode": "nullable",
},
{
"name": "vaccine_investment",
"type": "float",
"description": "H5 - Announced public spending on vaccine development in USD; Record monetary value in USD of new short-term spending on health. If none enter 0. No data - blank Please use the exchange rate of the date you are coding not the current date.",
"mode": "nullable",
},
{
"name": "vaccine_investment_notes",
"type": "string",
"description": "Additional details about H5 policy actions",
"mode": "nullable",
},
{
"name": "misc_wildcard",
"type": "string",
"description": "M1 - Record policy announcements that do not fit anywhere else",
"mode": "nullable",
},
{
"name": "misc_wildcard_notes",
"type": "string",
"description": "Additional details about M1 policy actions",
"mode": "nullable",
},
{
"name": "confirmed_cases",
"type": "integer",
"description": "Number of confirmed COVID-19 cases",
"mode": "nullable",
},
{
"name": "deaths",
"type": "integer",
"description": "Number of confirmed COVID-19 deaths",
"mode": "nullable",
},
{
"name": "stringency_index",
"type": "float",
"description": "Used after April 28 2020. Nine-point aggregation of the eight containment and closure indicators as well as H1 (public information campaigns). It reports a number between 0 to 100 that reflects the overall stringency of the governments response. This is a measure of how many of the these nine indicators (mostly around social isolation) a government has acted upon and to what degree.",
"mode": "nullable",
},
],
)
oxford_policy_tracker_transform_csv >> load_oxford_policy_tracker_to_bq
| apache-2.0 |
CI-WATER/tethys | tethys_apps/models.py | 1 | 32674 | """
********************************************************************************
* Name: models.py
* Author: Nathan Swain
* Created On: 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
import sqlalchemy
import logging
import uuid
import django.dispatch
from django.db import models
from django.core.exceptions import ValidationError
from model_utils.managers import InheritanceManager
from tethys_apps.exceptions import TethysAppSettingNotAssigned, PersistentStorePermissionError, \
PersistentStoreInitializerError
from django.contrib.postgres.fields import ArrayField
from sqlalchemy.orm import sessionmaker
from tethys_apps.base.mixins import TethysBaseMixin
from tethys_services.models import validate_url
from tethys_sdk.testing import is_testing_environment, get_test_db_name
from tethys_apps.base.function_extractor import TethysFunctionExtractor
log = logging.getLogger('tethys')
try:
from tethys_services.models import (DatasetService, SpatialDatasetService,
WebProcessingService, PersistentStoreService)
except RuntimeError: # pragma: no cover
log.exception('An error occurred while trying to import tethys service models.')
class TethysApp(models.Model, TethysBaseMixin):
"""
DB Model for Tethys Apps
"""
# The package is enforced to be unique by the file system
package = models.CharField(max_length=200, unique=True, default='')
# Portal admin first attributes
name = models.CharField(max_length=200, default='')
description = models.TextField(max_length=1000, blank=True, default='')
enable_feedback = models.BooleanField(default=False)
feedback_emails = ArrayField(
models.CharField(max_length=200, null=True, blank=True),
default=list,
)
tags = models.CharField(max_length=200, blank=True, default='')
# Developer first attributes
index = models.CharField(max_length=200, default='')
icon = models.CharField(max_length=200, default='')
root_url = models.CharField(max_length=200, default='')
color = models.CharField(max_length=10, default='')
# Portal admin only attributes
enabled = models.BooleanField(default=True)
show_in_apps_library = models.BooleanField(default=True)
class Meta:
verbose_name = 'Tethys App'
verbose_name_plural = 'Installed Apps'
def __str__(self):
return self.name
def add_settings(self, setting_list):
"""
Associate setting with app in database
"""
if setting_list is not None:
for setting in setting_list:
# Don't add the same setting twice
if self.settings_set.filter(name=setting.name):
return
# Associate setting with this app
setting.tethys_app = self
setting.save()
@property
def settings(self):
return self.settings_set.select_subclasses()
@property
def custom_settings(self):
return self.settings_set.exclude(customsetting__isnull=True) \
.select_subclasses('customsetting')
@property
def dataset_service_settings(self):
return self.settings_set.exclude(datasetservicesetting__isnull=True) \
.select_subclasses('datasetservicesetting')
@property
def spatial_dataset_service_settings(self):
return self.settings_set.exclude(spatialdatasetservicesetting__isnull=True) \
.select_subclasses('spatialdatasetservicesetting')
@property
def wps_services_settings(self):
return self.settings_set.exclude(webprocessingservicesetting__isnull=True) \
.select_subclasses('webprocessingservicesetting')
@property
def persistent_store_connection_settings(self):
return self.settings_set.exclude(persistentstoreconnectionsetting__isnull=True) \
.select_subclasses('persistentstoreconnectionsetting')
@property
def persistent_store_database_settings(self):
return self.settings_set.exclude(persistentstoredatabasesetting__isnull=True) \
.select_subclasses('persistentstoredatabasesetting')
@property
def configured(self):
required_settings = [s for s in self.settings if s.required]
for setting in required_settings:
try:
setting.get_value()
except TethysAppSettingNotAssigned:
return False
return True
class TethysExtension(models.Model, TethysBaseMixin):
"""
DB Model for Tethys Extension
"""
# The package is enforced to be unique by the file system
package = models.CharField(max_length=200, unique=True, default='')
# Portal admin first attributes
name = models.CharField(max_length=200, default='')
description = models.TextField(max_length=1000, blank=True, default='')
# Developer first attributes
root_url = models.CharField(max_length=200, default='')
# Portal admin only attributes
enabled = models.BooleanField(default=True)
class Meta:
verbose_name = 'Tethys Extension'
verbose_name_plural = 'Installed Extensions'
def __str__(self):
return self.name
class TethysAppSetting(models.Model):
"""
DB Model for Tethys App Settings
"""
objects = InheritanceManager()
tethys_app = models.ForeignKey(TethysApp, on_delete=models.CASCADE,
related_name='settings_set')
name = models.CharField(max_length=200, default='')
description = models.TextField(max_length=1000, blank=True, default='')
required = models.BooleanField(default=True)
initializer = models.CharField(max_length=1000, default='')
initialized = models.BooleanField(default=False)
def __str__(self):
return self.name
@property
def initializer_function(self):
"""
The function pointed to by the initializer attribute.
Returns:
A handle to a Python function that will initialize the database or None if function is not valid.
"""
func_ext = TethysFunctionExtractor(self.initializer)
return func_ext.function
def initialize(self):
"""
Initialize.
"""
self.initializer_function(self.initialized)
self.initialized = True
def get_value(self, *args, **kwargs):
raise NotImplementedError()
class CustomSetting(TethysAppSetting):
"""
Used to define a Custom Setting.
Attributes:
name(str): Unique name used to identify the setting.
type(enum): The type of the custom setting. Either CustomSetting.TYPE_STRING, CustomSetting.TYPE_INTEGER, CustomSetting.TYPE_FLOAT, CustomSetting.TYPE_BOOLEAN, CustomSetting.TYPE_UUID
description(str): Short description of the setting.
required(bool): A value will be required if True.
default(str): Value as a string that may be provided as a default.
**Example:**
::
from tethys_sdk.app_settings import CustomSetting
default_name_setting = CustomSetting(
name='default_name',
type=CustomSetting.TYPE_STRING
description='Default model name.',
required=True,
default="Name_123"
)
max_count_setting = CustomSetting(
name='max_count',
type=CustomSetting.TYPE_INTEGER,
description='Maximum allowed count in a method.',
required=False
)
change_factor_setting = CustomSetting(
name='change_factor',
type=CustomSetting.TYPE_FLOAT,
description='Change factor that is applied to some process.',
required=True
)
enable_feature_setting = CustomSetting(
name='enable_feature',
type=CustomSetting.TYPE_BOOLEAN,
description='Enable this feature when True.',
required=True
)
feature_id_setting = CustomSetting(
name='feature_id',
type=CustomSetting.TYPE_UUID,
description='Feature ID.',
required=True
)
""" # noqa: E501
TYPE_STRING = 'STRING'
TYPE_INTEGER = 'INTEGER'
TYPE_FLOAT = 'FLOAT'
TYPE_BOOLEAN = 'BOOLEAN'
TYPE_UUID = 'UUID'
VALID_TYPES = (TYPE_STRING, TYPE_INTEGER, TYPE_FLOAT, TYPE_BOOLEAN, TYPE_UUID)
VALID_BOOL_STRINGS = ('true', 'false', 'yes', 'no', 't', 'f', 'y', 'n', '1', '0')
TRUTHY_BOOL_STRINGS = ('true', 'yes', 't', 'y', '1')
TYPE_CHOICES = (
(TYPE_STRING, 'String'),
(TYPE_INTEGER, 'Integer'),
(TYPE_FLOAT, 'Float'),
(TYPE_BOOLEAN, 'Boolean'),
(TYPE_UUID, 'UUID'),
)
value = models.CharField(max_length=1024, blank=True, default='')
default = models.CharField(max_length=1024, blank=True, default='')
type = models.CharField(max_length=200, choices=TYPE_CHOICES, default=TYPE_STRING)
def clean(self):
"""
Validate prior to saving changes.
"""
if self.default != '':
if self.value == '':
self.value = self.default
else:
if self.value == '' and self.required:
raise ValidationError('Required.')
if self.value != '' and self.type == self.TYPE_FLOAT:
try:
float(self.value)
except Exception:
raise ValidationError('Value must be a float.')
elif self.value != '' and self.type == self.TYPE_INTEGER:
try:
int(self.value)
except Exception:
raise ValidationError('Value must be an integer.')
elif self.value != '' and self.type == self.TYPE_BOOLEAN:
if self.value.lower() not in self.VALID_BOOL_STRINGS:
raise ValidationError('Value must be a boolean.')
elif self.value != '' and self.type == self.TYPE_UUID:
try:
uuid.UUID(self.value)
except Exception:
raise ValidationError('Value must be a uuid.')
def get_value(self):
"""
Get the value, automatically casting it to the correct type.
"""
if self.default != '':
if self.value == '':
self.value = self.default
if self.value == '' or self.value is None:
if self.required:
raise TethysAppSettingNotAssigned(
f'The required setting "{self.name}" for app "{self.tethys_app.package}":'
f'has not been assigned.')
# None is a valid value to return in the case the value has not been set for this setting type
return None
if self.type == self.TYPE_STRING:
return self.value
if self.type == self.TYPE_FLOAT:
return float(self.value)
if self.type == self.TYPE_INTEGER:
return int(self.value)
if self.type == self.TYPE_BOOLEAN:
return self.value.lower() in self.TRUTHY_BOOL_STRINGS
if self.type == self.TYPE_UUID:
return uuid.UUID(self.value)
@django.dispatch.receiver(models.signals.post_init, sender=CustomSetting)
def set_default_value(sender, instance, *args, **kwargs):
"""
Set the default value for `value` on the `instance` of Setting.
This signal receiver will process it as soon as the object is created for use
Attributes:
sender(CustomSetting): The `CustomSetting` class that sent the signal.
instance(CustomSetting): The `CustomSetting` instance that is being initialised.
Returns:
None
"""
if not instance.value or instance.value == '':
instance.value = instance.default
class DatasetServiceSetting(TethysAppSetting):
"""
Used to define a Dataset Service Setting.
Attributes:
name(str): Unique name used to identify the setting.
description(str): Short description of the setting.
engine(enum): Either DatasetServiceSetting.CKAN or DatasetServiceSetting.HYDROSHARE
required(bool): A value will be required if True.
**Example:**
::
from tethys_sdk.app_settings import DatasetServiceSetting
primary_ckan_setting = DatasetServiceSetting(
name='primary_ckan',
description='Primary CKAN service for app to use.',
engine=DatasetServiceSetting.CKAN,
required=True,
)
hydroshare_setting = DatasetServiceSetting(
name='hydroshare',
description='HydroShare service for app to use.',
engine=DatasetServiceSetting.HYDROSHARE,
required=False
)
"""
CKAN = DatasetService.CKAN
HYDROSHARE = DatasetService.HYDROSHARE
dataset_service = models.ForeignKey(DatasetService, on_delete=models.CASCADE, blank=True, null=True)
engine = models.CharField(max_length=200,
choices=DatasetService.ENGINE_CHOICES,
default=DatasetService.CKAN)
def clean(self):
"""
Validate prior to saving changes.
"""
if not self.dataset_service and self.required:
raise ValidationError('Required.')
def get_value(self, as_public_endpoint=False, as_endpoint=False, as_engine=False):
if not self.dataset_service:
raise TethysAppSettingNotAssigned(f'Cannot create engine or endpoint for DatasetServiceSetting '
f'"{self.name}" for app "{self.tethys_app.package}": '
f'no DatasetService assigned.')
# Order here matters. Think carefully before changing.
if as_engine:
return self.dataset_service.get_engine()
if as_endpoint:
return self.dataset_service.endpoint
if as_public_endpoint:
return self.dataset_service.public_endpoint
return self.dataset_service
class SpatialDatasetServiceSetting(TethysAppSetting):
"""
Used to define a Spatial Dataset Service Setting.
Attributes:
name(str): Unique name used to identify the setting.
description(str): Short description of the setting.
engine(enum): One of SpatialDatasetServiceSetting.GEOSERVER or SpatialDatasetServiceSetting.THREDDS at this time.
required(bool): A value will be required if True.
**Example:**
::
from tethys_sdk.app_settings import SpatialDatasetServiceSetting
primary_geoserver_setting = SpatialDatasetServiceSetting(
name='primary_geoserver',
description='spatial dataset service for app to use',
engine=SpatialDatasetServiceSetting.GEOSERVER,
required=True,
)
""" # noqa: E501
GEOSERVER = SpatialDatasetService.GEOSERVER
THREDDS = SpatialDatasetService.THREDDS
spatial_dataset_service = models.ForeignKey(SpatialDatasetService, on_delete=models.CASCADE, blank=True, null=True)
engine = models.CharField(max_length=200,
choices=SpatialDatasetService.ENGINE_CHOICES,
default=SpatialDatasetService.GEOSERVER)
def clean(self):
"""
Validate prior to saving changes.
"""
if not self.spatial_dataset_service and self.required:
raise ValidationError('Required.')
def get_value(self, as_public_endpoint=False, as_endpoint=False, as_wms=False,
as_wfs=False, as_engine=False, as_wcs=False):
if not self.spatial_dataset_service:
raise TethysAppSettingNotAssigned(f'Cannot create engine or endpoint for SpatialDatasetServiceSetting '
f'"{self.name}" for app "{self.tethys_app.package}": '
f'no SpatialDatasetService assigned.')
# Order here matters. Think carefully before changing.
if as_engine:
return self.spatial_dataset_service.get_engine()
if as_endpoint:
return self.spatial_dataset_service.endpoint
if as_public_endpoint:
return self.spatial_dataset_service.public_endpoint
if self.engine == self.GEOSERVER:
if as_wms:
return self.spatial_dataset_service.endpoint.split('/rest')[0] + '/wms'
if as_wfs:
return self.spatial_dataset_service.endpoint.split('/rest')[0] + '/ows'
if as_wcs:
return self.spatial_dataset_service.endpoint.split('/rest')[0] + '/wcs'
elif self.engine == self.THREDDS:
if as_wms:
return self.spatial_dataset_service.endpoint.rstrip('/') + '/wms'
if as_wcs:
return self.spatial_dataset_service.endpoint.rstrip('/') + '/wcs'
if as_wfs:
raise ValueError('THREDDS does not support the WFS interface.')
return self.spatial_dataset_service
class WebProcessingServiceSetting(TethysAppSetting):
"""
Used to define a Web Processing Service Setting.
Attributes:
name(str): Unique name used to identify the setting.
description(str): Short description of the setting.
required(bool): A value will be required if True.
**Example:**
::
from tethys_sdk.app_settings import WebProcessingServiceSetting
primary_52n_setting = WebProcessingServiceSetting(
name='primary_52n',
description='WPS service for app to use',
required=True,
)
"""
web_processing_service = models.ForeignKey(WebProcessingService, on_delete=models.CASCADE, blank=True, null=True)
def clean(self):
"""
Validate prior to saving changes.
"""
if not self.web_processing_service and self.required:
raise ValidationError('Required.')
def get_value(self, as_public_endpoint=False, as_endpoint=False, as_engine=False):
wps_service = self.web_processing_service
if not wps_service:
raise TethysAppSettingNotAssigned(f'Cannot create engine or endpoint for WebProcessingServiceSetting '
f'"{self.name}" for app "{self.tethys_app.package}": '
f'no WebProcessingService assigned.')
# Order here matters. Think carefully before changing.
if as_engine:
return wps_service.get_engine()
if as_endpoint:
return wps_service.endpoint
if as_public_endpoint:
return wps_service.public_endpoint
return wps_service
class PersistentStoreConnectionSetting(TethysAppSetting):
"""
Used to define a Peristent Store Connection Setting.
Attributes:
name(str): Unique name used to identify the setting.
description(str): Short description of the setting.
required(bool): A value will be required if True.
**Example:**
::
from tethys_sdk.app_settings import PersistentStoreConnectionSetting
primary_db_conn_setting = PersistentStoreConnectionSetting(
name='primary',
description='Connection with superuser role needed.',
required=True
)
"""
persistent_store_service = models.ForeignKey(
PersistentStoreService, on_delete=models.CASCADE, blank=True, null=True)
def clean(self):
"""
Validate prior to saving changes.
"""
if not self.persistent_store_service and self.required:
raise ValidationError('Required.')
def get_value(self, as_url=False, as_sessionmaker=False, as_engine=False):
"""
Get the SQLAlchemy engine from the connected persistent store service
"""
ps_service = self.persistent_store_service
# Validate connection service
if ps_service is None:
raise TethysAppSettingNotAssigned(f'Cannot create engine or endpoint for PersistentStoreConnectionSetting '
f'"{self.name}" for app "{self.tethys_app.package}": '
f'no PersistentStoreService assigned.')
# Order here matters. Think carefully before changing.
if as_engine:
return ps_service.get_engine()
if as_sessionmaker:
return sessionmaker(bind=ps_service.get_engine())
if as_url:
return ps_service.get_url()
return ps_service
class PersistentStoreDatabaseSetting(TethysAppSetting):
"""
Used to define a Peristent Store Database Setting.
Attributes:
name(str): Unique name used to identify the setting.
description(str): Short description of the setting.
initializer(str): Dot-notation path to function used to initialize the database.
spatial(bool): Enable the PostGIS extension on the database during creation when True.
required(bool): A value will be required if True.
**Example:**
::
from tethys_sdk.app_settings import PersistentStoreDatabaseSetting
spatial_db_setting = PersistentStoreDatabaseSetting(
name='spatial_db',
description='for storing important spatial stuff',
required=True,
initializer='appsettings.init_stores.init_spatial_db',
spatial=True,
),
temp_db_setting = PersistentStoreDatabaseSetting(
name='temp_db',
description='for storing temporary stuff',
required=False,
initializer='appsettings.init_stores.init_temp_db',
spatial=False,
)
"""
spatial = models.BooleanField(default=False)
dynamic = models.BooleanField(default=False)
persistent_store_service = models.ForeignKey(
PersistentStoreService, on_delete=models.CASCADE, blank=True, null=True)
def clean(self):
"""
Validate prior to saving changes.
"""
if not self.persistent_store_service and self.required:
raise ValidationError('Required.')
def initialize(self):
"""
Initialize persistent store database setting.
"""
self.create_persistent_store_database()
def get_namespaced_persistent_store_name(self):
"""
Return the namespaced persistent store database name (e.g. my_first_app_db).
"""
# Convert name given by user to database safe name
safe_name = self.name.lower().replace(' ', '_')
# If testing environment, the engine for the "test" version of the persistent store should be fetched
if is_testing_environment():
safe_name = get_test_db_name(safe_name)
return '_'.join((self.tethys_app.package, safe_name))
def get_value(self, with_db=False, as_url=False, as_sessionmaker=False, as_engine=False):
"""
Get the SQLAlchemy engine from the connected persistent store service
"""
ps_service = self.persistent_store_service
# Validate connection service
if ps_service is None:
raise TethysAppSettingNotAssigned(f'Cannot create engine or endpoint for PersistentStoreDatabaseSetting '
f'"{self.name}" for app "{self.tethys_app.package}": '
f'no PersistentStoreService assigned.')
if with_db:
ps_service.database = self.get_namespaced_persistent_store_name()
# Order here matters. Think carefully before changing.
if as_engine:
return ps_service.get_engine()
if as_sessionmaker:
return sessionmaker(bind=ps_service.get_engine())
if as_url:
return ps_service.get_url()
return ps_service
def persistent_store_database_exists(self):
"""
Returns True if the persistent store database exists.
"""
# Get the database engine
engine = self.get_value(as_engine=True)
namespaced_name = self.get_namespaced_persistent_store_name()
# Cannot create databases in a transaction: connect and commit to close transaction
connection = engine.connect()
# Check for Database
existing_query = """
SELECT d.datname as name
FROM pg_catalog.pg_database d
LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid
WHERE d.datname = '{0}';
""".format(namespaced_name)
existing_dbs = connection.execute(existing_query)
connection.close()
for existing_db in existing_dbs:
if existing_db.name == namespaced_name:
return True
return False
def drop_persistent_store_database(self):
"""
Drop the persistent store database.
"""
if not self.persistent_store_database_exists():
return
# Provide update for user
log = logging.getLogger('tethys')
log.info('Dropping database "{0}" for app "{1}"...'.format(
self.name,
self.tethys_app.package
))
# Get the database engine
engine = self.get_value(as_engine=True)
# Connection
drop_connection = None
namespaced_ps_name = self.get_namespaced_persistent_store_name()
# Drop db
drop_db_statement = 'DROP DATABASE IF EXISTS "{0}"'.format(namespaced_ps_name)
try:
drop_connection = engine.connect()
drop_connection.execute('commit')
drop_connection.execute(drop_db_statement)
except Exception as e:
if 'being accessed by other users' in str(e):
# Force disconnect all other connections to the database
disconnect_sessions_statement = '''
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '{0}'
AND pg_stat_activity.pid <> pg_backend_pid();
'''.format(namespaced_ps_name)
if drop_connection:
drop_connection.execute(disconnect_sessions_statement)
# Try again to drop the database
drop_connection.execute('commit')
drop_connection.execute(drop_db_statement)
else:
raise e
finally:
drop_connection and drop_connection.close()
def create_persistent_store_database(self, refresh=False, force_first_time=False):
"""
Provision all persistent stores for all apps or for only the app name given.
"""
# Get looger
log = logging.getLogger('tethys')
# Connection engine
url = self.get_value(as_url=True)
engine = self.get_value(as_engine=True)
namespaced_ps_name = self.get_namespaced_persistent_store_name()
db_exists = self.persistent_store_database_exists()
# -------------------------------------------------------------------------------------------------------------#
# 1. Drop database if refresh option is included
# -------------------------------------------------------------------------------------------------------------#
if db_exists and refresh:
self.drop_persistent_store_database()
self.initialized = False
self.save()
db_exists = False
# -------------------------------------------------------------------------------------------------------------#
# 2. Create the database if it does not already exist
# -------------------------------------------------------------------------------------------------------------#
if not db_exists:
# Provide Update for User
log.info('Creating database "{0}" for app "{1}"...'.format(
self.name,
self.tethys_app.package
))
# Cannot create databases in a transaction: connect and commit to close transaction
create_connection = engine.connect()
# Create db
create_db_statement = '''
CREATE DATABASE "{0}"
WITH OWNER {1}
TEMPLATE template0
ENCODING 'UTF8'
'''.format(namespaced_ps_name, url.username)
# Close transaction first and then execute
create_connection.execute('commit')
try:
create_connection.execute(create_db_statement)
except sqlalchemy.exc.ProgrammingError:
raise PersistentStorePermissionError('Database user "{0}" has insufficient permissions to create '
'the persistent store database "{1}": must have CREATE DATABASES '
'permission at a minimum.'.format(url.username, self.name))
finally:
create_connection.close()
# -------------------------------------------------------------------------------------------------------------#
# 3. Enable PostGIS extension
# -------------------------------------------------------------------------------------------------------------#
if self.spatial:
# Connect to new database
new_db_engine = self.get_value(with_db=True, as_engine=True)
new_db_connection = new_db_engine.connect()
# Notify user
log.info('Enabling PostGIS on database "{0}" for app "{1}"...'.format(
self.name,
self.tethys_app.package,
))
enable_postgis_statement = 'CREATE EXTENSION IF NOT EXISTS postgis'
# Execute postgis statement
try:
new_db_connection.execute(enable_postgis_statement)
except sqlalchemy.exc.ProgrammingError:
raise PersistentStorePermissionError('Database user "{0}" has insufficient permissions to enable '
'spatial extension on persistent store database "{1}": must be a '
'superuser.'.format(url.username, self.name))
finally:
new_db_connection.close()
# -------------------------------------------------------------------------------------------------------------#
# 4. Run initialization function
# -------------------------------------------------------------------------------------------------------------#
if self.initializer:
log.info('Initializing database "{0}" for app "{1}" with initializer "{2}"...'.format(
self.name,
self.tethys_app.package,
self.initializer
))
try:
if force_first_time:
self.initializer_function(self.get_value(with_db=True, as_engine=True), True)
else:
self.initializer_function(self.get_value(with_db=True, as_engine=True), not self.initialized)
except Exception as e:
raise PersistentStoreInitializerError(e)
# Update initialization
self.initialized = True
self.save()
class ProxyApp(models.Model):
"""
DB model for Proxy Apps which allows you to redirect an app to another host.
"""
name = models.CharField(max_length=100, unique=True)
endpoint = models.CharField(max_length=1024, validators=[validate_url])
logo_url = models.CharField(max_length=100, validators=[validate_url], blank=True)
description = models.TextField(max_length=2048, blank=True)
tags = models.CharField(max_length=200, blank=True, default='')
enabled = models.BooleanField(default=True)
show_in_apps_library = models.BooleanField(default=True)
class Meta:
verbose_name = 'Proxy App'
verbose_name_plural = 'Proxy Apps'
def __str__(self):
return self.name
| bsd-2-clause |
davidam/python-examples | scikit/pca-choosing-components.py | 1 | 1682 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--csv')
args = parser.parse_args()
#filepath = 'pulsar_stars.csv' #your path here
data = np.genfromtxt(args.csv, delimiter=',', dtype='float64')
scaler = MinMaxScaler(feature_range=[0, 1])
data_rescaled = scaler.fit_transform(data[1:, 0:8])
#Fitting the PCA algorithm with our Data
pca = PCA().fit(data_rescaled)
#Plotting the Cumulative Summation of the Explained Variance
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Dataset Explained Variance')
plt.show()
| gpl-3.0 |
mxjl620/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 352 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
TNick/pyl2extra | pyl2extra/scripts/datasets/tests/test_imagenet.py | 1 | 13084 | """
Tests for adjusters.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "[email protected]"
import functools
import unittest
from mock import patch, Mock
import os
import shutil
import tempfile
from xml.dom import minidom
from xml.parsers.expat import ExpatError
from pyl2extra.scripts.datasets import imagenet
TEST_SYNSETS = """
n04386664
n10731013
n03002948
n07609632
n03003091
n10562968
n07586179
n09929577
n07933530
n04136161
n03602194
n03703075
n12990597
"""
RELEASE_STATUS_SAMPLE = """<ReleaseStatus>
<releaseData>fall2011</releaseData>
<images>
<synsetInfos>
<synset wnid="n10801802" released="1" version="winter11" numImages="269"/>
<synset wnid="n10772937" released="1" version="winter11" numImages="58"/>
<synset wnid="n10028541" released="1" version="winter11" numImages="201"/>
<synset wnid="n10712374" released="1" version="winter11" numImages="175"/>
<synset wnid="n09878921" released="1" version="winter11" numImages="46"/>
<synset wnid="n10789415" released="1" version="winter11" numImages="48"/>
<synset wnid="n10370955" released="1" version="winter11" numImages="502"/>
</synsetInfos>
</images>
</ReleaseStatus>"""
GET_MAPPING_SAMPLE = """
n02109150_5962 http://1.jpg
n02109150_5969 http://2.jpg
n02109150_5976 http://3.jpg
n02109150_5981 http://4.jpg
n02109150_307 http://www.scbeacon.com/beacon_issues/03_09_18/images/Guidedog_pjh_091803.jpg
n02109150_323 http://www.braille.be/content/lig_braille/rapport_2005/img_05.jpg
"""
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestListFromUrl(unittest.TestCase):
"""
Tests for list_from_url().
"""
def test_simple(self, mock_urlopen):
"""
testing list_from_url().
"""
mok = Mock()
mok.read.side_effect = ['resp1', 'resp1\nresp2', '', ' a ']
mock_urlopen.return_value = mok
lst = imagenet.list_from_url('some_url')
self.assertListEqual(lst, ['resp1'])
lst = imagenet.list_from_url('some_url')
self.assertListEqual(lst, ['resp1', 'resp2'])
lst = imagenet.list_from_url('some_url')
self.assertListEqual(lst, [''])
lst = imagenet.list_from_url('some_url')
self.assertListEqual(lst, [' a '])
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestDenseListFromUrl(unittest.TestCase):
"""
Tests for dense_list_from_url().
"""
def test_simple(self, mock_urlopen):
"""
testing dense_list_from_url().
"""
mok = Mock()
mok.read.side_effect = ['resp1', 'resp1\nresp2', '',
' ', ' a ', ' a \n b \n c ',
'\n\na\n\nb\n\n c']
mock_urlopen.return_value = mok
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, ['resp1'])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, ['resp1', 'resp2'])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, [])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, [])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, ['a'])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, ['a', 'b', 'c'])
lst = imagenet.dense_list_from_url('some_url')
self.assertListEqual(lst, ['a', 'b', 'c'])
class TestXmlElemByPath(unittest.TestCase):
"""
Tests for xml_elem_by_path().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.doc = minidom.Document()
root = self.doc.createElement('root')
self.doc.appendChild(root)
self.lv1 = self.doc.createElement('level1-1')
root.appendChild(self.lv1)
self.lv11 = self.doc.createElement('level2-1')
self.lv1.appendChild(self.lv11)
lv111 = self.doc.createElement('level3-1')
self.lv11.appendChild(lv111)
root.appendChild(self.doc.createElement('level1-2'))
root.appendChild(self.doc.createElement('level1-3'))
lv4 = self.doc.createElement('level1-4')
root.appendChild(lv4)
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
del self.doc
def test_simple(self):
"""
testing xml_elem_by_path().
"""
elm = imagenet.xml_elem_by_path(self.doc, [])
self.assertEqual(elm, self.doc.documentElement)
self.assertRaises(IndexError, imagenet.xml_elem_by_path,
self.doc, ['nonexisting'])
self.assertRaises(IndexError, imagenet.xml_elem_by_path,
self.doc, ['level1-1', 'nonexisting'])
elm = imagenet.xml_elem_by_path(self.doc, ['level1-1'])
self.assertEqual(elm, self.lv1)
elm = imagenet.xml_elem_by_path(self.doc, ['level1-1', 'level2-1'])
self.assertEqual(elm, self.lv11)
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestXmlFromUrl(unittest.TestCase):
"""
Tests for xml_from_url().
"""
def test_simple(self, mock_urlopen):
"""
testing xml_from_url().
"""
mok = Mock()
mok.read.side_effect = ['<root></root>',
'<root><el>test text</el></root>',
'',
' a ']
mock_urlopen.return_value = mok
doc = imagenet.xml_from_url('some_url')
self.assertEqual(doc.documentElement.tagName, 'root')
doc = imagenet.xml_from_url('some_url')
self.assertEqual(doc.documentElement.tagName, 'root')
self.assertRaises(ExpatError, imagenet.xml_from_url, 'some_url')
self.assertRaises(ExpatError, imagenet.xml_from_url, 'some_url')
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetSynsets(unittest.TestCase):
"""
Tests for get_synsets().
"""
def test_simple(self, mock_urlopen):
"""
testing get_synsets().
"""
mok = Mock()
mok.read.side_effect = [TEST_SYNSETS]
mock_urlopen.return_value = mok
lst = imagenet.get_synsets()
self.assertListEqual(lst, ['n04386664', 'n10731013', 'n03002948',
'n07609632', 'n03003091', 'n10562968',
'n07586179', 'n09929577', 'n07933530',
'n04136161', 'n03602194', 'n03703075',
'n12990597'])
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetWords(unittest.TestCase):
"""
Tests for get_words().
"""
def test_simple(self, mock_urlopen):
"""
testing get_words().
"""
mok = Mock()
mok.read.side_effect = ["chickeree\nDouglas squirrel\n"
"Tamiasciurus douglasi"]
mock_urlopen.return_value = mok
lst = imagenet.get_words('some_url/%s', 'n07609632')
self.assertListEqual(lst, ['chickeree',
'Douglas squirrel',
'Tamiasciurus douglasi'])
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetHypos(unittest.TestCase):
"""
Tests for get_hypos().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
pass
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
pass
def test_simple(self, mock_urlopen):
"""
testing get_hypos().
"""
mok = Mock()
mok.read.side_effect = [TEST_SYNSETS]
mock_urlopen.return_value = mok
lst = imagenet.get_hypos('some_url/%s-%s', 'n07609632', True)
self.assertListEqual(lst, ['n04386664', 'n10731013', 'n03002948',
'n07609632', 'n03003091', 'n10562968',
'n07586179', 'n09929577', 'n07933530',
'n04136161', 'n03602194', 'n03703075',
'n12990597'])
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetImageCount(unittest.TestCase):
"""
Tests for get_image_count().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.sample = RELEASE_STATUS_SAMPLE
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
pass
def test_simple(self, mock_urlopen):
"""
testing get_image_count().
"""
mok = Mock()
mok.read.side_effect = [self.sample]
mock_urlopen.return_value = mok
lst = imagenet.get_image_count('some_url', True)
self.assertDictEqual(lst, {'n10801802': 269,
'n10772937': 58,
'n10028541': 201,
'n10712374': 175,
'n09878921': 46,
'n10789415': 48,
'n10370955': 502})
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetImageSynsets(unittest.TestCase):
"""
Tests for get_image_synsets().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.sample = RELEASE_STATUS_SAMPLE
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
pass
def test_simple(self, mock_urlopen):
"""
testing get_image_synsets().
"""
mok = Mock()
mok.read.side_effect = [self.sample]
mock_urlopen.return_value = mok
lst = imagenet.get_image_synsets('some_url', True)
self.assertListEqual(lst, ['n10801802', 'n10772937', 'n10028541',
'n10712374', 'n09878921', 'n10789415',
'n10370955'])
@patch('pyl2extra.scripts.datasets.imagenet.urllib2.urlopen')
class TestGetImageUrls(unittest.TestCase):
"""
Tests for get_image_urls().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
pass
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
pass
def test_simple(self, mock_urlopen):
"""
testing get_image_urls().
"""
mok = Mock()
mok.read.side_effect = [GET_MAPPING_SAMPLE]
mock_urlopen.return_value = mok
lst = imagenet.get_image_urls('some_url/%s', 'n02109150')
self.assertDictEqual(lst, {'n02109150_5962': 'http://1.jpg',
'n02109150_5969': 'http://2.jpg',
'n02109150_5976': 'http://3.jpg',
'n02109150_5981': 'http://4.jpg',
'n02109150_307': 'http://www.scbeacon.com/beacon_issues/03_09_18/images/Guidedog_pjh_091803.jpg',
'n02109150_323': 'http://www.braille.be/content/lig_braille/rapport_2005/img_05.jpg'})
class TestHashFile(unittest.TestCase):
"""
Tests for hashfile().
"""
@functools.wraps(unittest.TestCase.setUp)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.file_empty = os.path.join(self.tmp_dir, 'file_empty.txt')
with open(self.file_empty, 'wt') as fhnd:
fhnd.write('')
self.file_a = os.path.join(self.tmp_dir, 'file_a.txt')
with open(self.file_a, 'wt') as fhnd:
fhnd.write('a')
self.file_line = os.path.join(self.tmp_dir, 'file_line.txt')
with open(self.file_line, 'wt') as fhnd:
fhnd.write('abcdefghij')
self.file_mlines = os.path.join(self.tmp_dir, 'file_mlines.txt')
with open(self.file_mlines, 'wt') as fhnd:
fhnd.write('abcdefghij\nabcdefghij\nabcdefghij\n')
@functools.wraps(unittest.TestCase.tearDown)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
def test_simple(self):
"""
testing hashfile().
"""
self.assertEqual(imagenet.hashfile(self.file_empty),
'd41d8cd98f00b204e9800998ecf8427e')
self.assertEqual(imagenet.hashfile(self.file_a),
'0cc175b9c0f1b6a831c399e269772661')
self.assertEqual(imagenet.hashfile(self.file_line),
'a925576942e94b2ef57a066101b48876')
self.assertEqual(imagenet.hashfile(self.file_mlines),
'f90932f561733ea4558ada7ac7d27527')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 129 | 50966 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
deniszgonjanin/ckanext-bcgov | ckanext/bcgov/controllers/user.py | 2 | 8360 | # Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
import logging
from ckan.controllers.user import UserController
from ckan.common import OrderedDict,_,g, request
import ckan.lib.base as base
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
from urllib import urlencode
from ckan.logic import get_action
import ckan.lib.maintain as maintain
import ckan.plugins.toolkit as toolkit
from ckanext.bcgov.util.util import (get_user_orgs, get_user_toporgs)
c = toolkit.c
render = base.render
abort = base.abort
redirect = base.redirect
check_access = logic.check_access
NotAuthorized = logic.NotAuthorized
render = base.render
log = logging.getLogger('ckanext.edc_schema')
def _encode_params(params):
return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v))
for k, v in params]
class EDCUserController(UserController):
def dashboard_unpublished(self):
user_id = c.userobj.id
fq = ' +edc_state:("DRAFT" OR "PENDING PUBLISH" OR "REJECTED")'
#Get the list of organizations that this user is the admin
if not c.userobj.sysadmin :
user_orgs = ['"' + org.id + '"' for org in get_user_orgs(user_id, 'admin')]
user_orgs += ['"' + org.id + '"' for org in get_user_orgs(user_id, 'editor')]
if len(user_orgs) > 0 :
fq += ' +owner_org:(' + ' OR '.join(user_orgs) + ')'
self._user_datasets('dashboard_unpublished', c.userobj.id, fq)
return render('user/dashboard_unpublished.html')
def dashboard_datasets(self):
fq = ' +author:("%s")' % (c.userobj.id)
self._user_datasets('dashboard_datasets', c.userobj.id, fq)
return render('user/dashboard_datasets.html')
def read(self, id=None):
if c.userobj and c.userobj.sysadmin == True:
fq = ''
else:
fq = ' +(edc_state:("PUBLISHED" OR "PENDING ARCHIVE")'
if c.userobj:
user_id = c.userobj.id
user_orgs = ['"' + org.id + '"' for org in get_user_orgs(user_id, 'admin')]
user_orgs += ['"' + org.id + '"' for org in get_user_orgs(user_id, 'editor')]
if len(user_orgs) > 0:
fq += ' OR owner_org:(' + ' OR '.join(user_orgs) + ')'
fq += ')'
self._user_datasets('read',id, fq)
return render('user/read.html')
def _user_datasets(self, action, id=None, filter_query=None):
from ckan.lib.search import SearchError
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True}
user_dict = {'id': id,
'user_obj': c.userobj}
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', u'')
# q += ' author:"%s"' %c.userobj.id
context['return_query'] = True
try:
page = int(request.params.get('page', 1))
except ValueError, e:
abort(400, ('"page" parameter must be an integer'))
limit = g.datasets_per_page
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
sort_by = request.params.get('sort', None)
def search_url(params):
if action == 'read':
url = h.url_for(controller='user', action=action, id=id)
else:
url = h.url_for(controller='user', action=action)
params = [(k, v.encode('utf-8') if isinstance(v, basestring)
else str(v)) for k, v in params]
return url + u'?' + urlencode(params)
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(alternative_url=alternative_url,
controller='user', action=action,
extras=dict(id=c.userobj.id),
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='user', action=action,
extras=dict(id=c.userobj.id))
c.remove_field = remove_field
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params)
try:
c.fields = []
search_extras = {}
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
q += ' %s:"%s"' % (param, value)
else:
search_extras[param] = value
facets = OrderedDict()
default_facet_titles = {
'organization': _('Organizations'),
'edc_state': _('States'),
'tags': _('Tags'),
'res_format': _('Formats'),
}
for facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
c.facet_titles = facets
fq = filter_query or ''
data_dict = {
'q': q,
'fq': fq.strip(),
'facet.field': facets.keys(),
'rows': limit,
'start': (page - 1) * limit,
'sort': sort_by,
'extras': search_extras
}
query = get_action('package_search')(context, data_dict)
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
user_dict['package_count'] = query['count']
c.facets = query['facets']
maintain.deprecate_context_item('facets',
'Use `c.search_facets` instead.')
c.search_facets = query['search_facets']
c.search_facets_limits = {}
for facet in c.facets.keys():
limit = int(request.params.get('_%s_limit' % facet,
g.facets_default_number))
c.search_facets_limits[facet] = limit
c.page.items = query['results']
c.sort_by_selected = sort_by
except SearchError, se:
log.error('User search error: %r', se.args)
c.query_error = True
c.facets = {}
c.page = h.Page(collection=[])
self._setup_template_variables(context, user_dict)
def dashboard_organizations(self):
context = {'model': model, 'session': model.Session,
'for_view': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'user_obj': c.userobj}
self._setup_template_variables(context, data_dict)
(user_orgs, usr_suborgs) = get_user_toporgs(c.userobj.id)
facets = OrderedDict()
#Add the organization facet to get the number of records for each organization
facets['organization'] = _('Organizations')
data_dict = {
'facet.field': facets.keys(),
}
query = get_action('package_search')(context, data_dict)
c.org_pkg_count = query['facets'].get('organization')
c.top_orgs_items = user_orgs
c.suborgs_items = usr_suborgs
return render('user/dashboard_organizations.html')
| agpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/datasets/base_test.py | 132 | 3072 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import test
mock = test.mock
_TIMEOUT = IOError(110, "timeout")
class BaseTest(test.TestCase):
"""Test load csv functions."""
def testUrlretrieveRetriesOnIOError(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, None
]
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesAfterRetriesAreExhausted(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesOnNonRetriableErrorWithoutRetry(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
IOError(2, "No such file or directory"),
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert no retries
self.assertFalse(mock_time.called)
if __name__ == "__main__":
test.main()
| apache-2.0 |
yonglehou/scikit-learn | sklearn/metrics/regression.py | 174 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
andrewcmyers/tensorflow | tensorflow/contrib/keras/api/keras/datasets/__init__.py | 129 | 1271 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.datasets import boston_housing
from tensorflow.contrib.keras.api.keras.datasets import cifar10
from tensorflow.contrib.keras.api.keras.datasets import cifar100
from tensorflow.contrib.keras.api.keras.datasets import imdb
from tensorflow.contrib.keras.api.keras.datasets import mnist
from tensorflow.contrib.keras.api.keras.datasets import reuters
del absolute_import
del division
del print_function
| apache-2.0 |
pkruskal/scikit-learn | sklearn/tests/test_learning_curve.py | 224 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 142 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
yonglehou/scikit-learn | examples/manifold/plot_manifold_sphere.py | 257 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/manifold/plot_manifold_sphere.py | 257 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | examples/cluster/plot_lena_segmentation.py | 269 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
dimkal/mne-python | examples/decoding/plot_linear_model_patterns.py | 13 | 3098 | """
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
"""
# Authors: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.pick_types(meg=True, eeg=False, copy=True)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.pick_types(meg=False, eeg=True, copy=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/factorization/python/ops/kmeans.py | 12 | 20349 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.feature_column.input_layer`. If this is None,
all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See @{tf.estimator.Estimator}.
mode: See @{tf.estimator.Estimator}.
config: See @{tf.estimator.Estimator}.
Returns:
A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# cluster_centers_var: a Variable containing the cluster centers.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following:
* a tensor or numpy array with the initial cluster centers.
* a callable `f(inputs, k)` that selects and returns up to `k` centers
from an input batch. `f` is free to return any number of centers
from `0` to `k`. It will be invoked on successive input batches
as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the
entire batch is chosen to be initial cluster centers and the
remaining centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less
than `num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\)
which is the square root of the sum of the absolute squares of
the elements' difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See @{tf.estimator.Estimator}.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
is None, all features will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(
num_clusters, initial_clusters, distance_metric, random_seed,
use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.evaluate}. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
| apache-2.0 |
yonglehou/scikit-learn | sklearn/linear_model/omp.py | 126 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
opendatadurban/scoda | scoda/public.py | 1 | 45401 | import itertools
import operator
from sqlalchemy_searchable import search
from scoda.app import app
from flask import request, url_for, redirect, flash, make_response, session, render_template, jsonify, Response, \
send_file
from flask_security import current_user
from itertools import zip_longest
from sqlalchemy.sql import select
from sqlalchemy import func, extract, desc,cast,Date
from .models import db
from .models import *
from .models.user import UserAnalysis
from .models.datasets import ExploreForm
from .models.maps import MapForm, NightFormETH, NightFormJHB
from pandas import read_sql_query
import gviz_api
import geojson, json
import pandas as pd
from .app import csrf
from werkzeug.datastructures import MultiDict
from urllib.parse import urlencode, urlparse, parse_qsl, urlsplit, parse_qs
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
@app.route('/help')
def help():
return render_template('help/help.html')
@app.route('/demographics/<region_id>/<city_ward_code>/download', methods=['GET'])
def demographics_download(region_id, city_ward_code):
region = Region.query.get(region_id).re_name
if city_ward_code == 'None':
query = db.session.query(Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == region_id).all()
df = pd.DataFrame()
df['Year'] = range(1996, 2031)
for g in query:
df['%s - Ward %s' % (region, g[1])] = list(g[0])
else:
query = db.session.query(Ward.data, Ward.city_ward_code) \
.filter(Ward.city_ward_code == city_ward_code) \
.filter(Ward.region_id == region_id).all()
df = pd.DataFrame()
df['Year'] = range(1996, 2031)
for g in query:
df['%s - Ward %s' % (region, g[1])] = list(g[0])
return Response(df.to_csv(index=False), mimetype="text/csv",
headers={"Content-disposition": "attachment; filename=demographics.csv"})
@app.route('/demographics', methods=['GET', 'POST'])
def demographics():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['demo'] = []
if 'maps' not in session.keys():
session['maps'] = {0: {}, 1: {}}
form1 = MapForm(prefix='form1', region_id='1', year=1)
print(form1.city_ward_code.choices)
status = 200
tour = 1
geometries1 = {}
forms = [form1]
if request.method == 'POST':
if all(f.validate() for f in forms):
for f, F in enumerate(forms):
for field in F:
if str(field.data) == 'None':
field.data = session['maps'][str(f)][field.name[6:]]
else:
session['maps'][str(f)][field.name[6:]] = field.data
tour = 0
# query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data)
year1 = int(form1.year.data)
year_ind1 = range(1996, 2031)
if form1.city_ward_code.data == '':
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == form1.region_id.data)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Ward %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == form1.region_id.data).all()
region = db.session.query(Region.re_name).filter(Region.id == form1.region_id.data).first()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s' % str(region[0])]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
else:
query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data, Area.city_ward_code) \
.filter(Area.city_ward_code == form1.city_ward_code.data) \
.filter(Area.region_id == form1.region_id.data)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Area %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.city_ward_code == form1.city_ward_code.data). \
filter(Ward.region_id == form1.region_id.data).first()
region = db.session.query(Region.re_name).filter(Region.id == form1.region_id.data).first()
region2 = db.session.query(Ward.city_ward_code).filter(Ward.city_ward_code == form1.city_ward_code.data) \
.first()
results = []
for r in query:
row = [val for val in list(r)]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s - Ward %s' % (str(region[0]), str(region2[0]))]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == form1.region_id.data).order_by(
Ward.city_ward_code).distinct()
form1.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form1.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics.html', form1=form1, geometries1=geometries1,
table1=table1, tour=tour, max1=m1, region1=form1.region_id.data,
ward1=form1.city_ward_code.data, analyses=analyses)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
session['maps'][0] = {'city_ward_code': '', 'region_id': 1, 'year': 1}
session['maps'][1] = {'city_ward_code': '', 'region_id': 4, 'year': 1}
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 4)
for g in query:
d = json.loads(g[0])
geometries2['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 4).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table2 = [['Year', 'EThekwini']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table2.append([str(y), val])
m2 = 1.05 * max(df.sum(axis=0).tolist())
return render_template('demographics/demographics.html', form1=form1, geometries1=geometries1,
tour=tour, table1=table1, max1=m, region1=1, ward1=None, ward2=None, analyses=analyses
)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1,
geometries1=geometries1, table1=table1,
tour=tour, max1=m, region1=1,
ward1=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/api/demographics', methods=['GET', 'POST'])
@csrf.exempt
def api_demographics():
analyses = []
session['demo'] = []
if 'maps' not in session.keys():
session['maps'] = {0: {}, 1: {}}
form1 = MapForm(prefix='form1', region_id='1', year=1)
geometries1 = {}
if request.method == 'POST':
data = request.get_json()
print(data)
#data = request.data.decode('utf-8')
#object = parse_qs(urlsplit('?' + data).query)
#object = {key: str(value[0]) for key, value in object.items()}
#if 'csrf_token' in object: del object['csrf_token']
#form1 = MapForm(MultiDict(object))
form1 = data
print(form1['year'])
#if form1.validate():
if form1:
tour = 0
# query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data)
#year1 = int(form1.year)
year1 = int(form1['year'])
year_ind1 = range(1996, 2031)
#if form1.city_ward_code.data == '':
if form1['city_ward_code'] == '':
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == form1['region_id'])
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Ward %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == form1['region_id']).all()
region = db.session.query(Region.re_name).filter(Region.id == form1['region_id']).first()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s' % str(region[0])]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
else:
query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data, Area.city_ward_code) \
.filter(Area.city_ward_code == int(form1['city_ward_code'])) \
.filter(Area.region_id == int(form1['region_id']))
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Area %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.city_ward_code == int(form1['city_ward_code'])). \
filter(Ward.region_id == int(form1['region_id'])).first()
region = db.session.query(Region.re_name).filter(Region.id == int(form1['region_id'])).first()
region2 = db.session.query(Ward.city_ward_code).filter(Ward.city_ward_code == int(form1['city_ward_code'])) \
.first()
results = []
for r in query:
row = [val for val in list(r)]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s - Ward %s' % (str(region[0]), str(region2[0]))]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == int(form1['region_id'])).order_by(
Ward.city_ward_code).distinct()
#form1.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
#, start=1)]
#form1.city_ward_code.choices.insert(0, ('', 'View All'))
resp = jsonify({'success': True, 'geometries1': geometries1,'table1':table1,
'tour':tour, 'max1':m1, 'region1':form1['region_id'],'ward1':form1['city_ward_code']})
resp.status_code = 200
return resp
else:
message = 'Please correct the problems below and try again.'
resp = jsonify(message=message)
resp.status_code = 500
return resp
else:
session['maps'][0] = {'city_ward_code': '', 'region_id': 1, 'year': 1}
session['maps'][1] = {'city_ward_code': '', 'region_id': 4, 'year': 1}
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = jsonify({'success': True, 'table1': table1,
'max1': m, 'region1': 1, 'ward1': None,'ward2':None, 'geometries1': geometries1,
'form_year':form1.year.choices,'form_ward':form1.city_ward_code.choices,'form_city':form1.region_id.choices})
resp.status_code = 200
return resp
@app.route('/nightlights_jhb', methods=['GET', 'POST'])
def demographics_night_jhb():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['night'] = []
form = NightFormJHB()
status = 200
tour = 1
if request.method == 'POST':
if form.validate():
tour = 0
if form.city_ward_code.data == '':
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 1)
geometries = {"type": "FeatureCollection",
"features": []}
bias_ind = [x / 10.0 for x in range(5, 21, 1)].index(float(form.grid_bias.data))
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][bias_ind] - g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 1).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in
enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night.html', form=form, geometries=geometries,
bias_val=form.grid_bias.data)
else:
w = db.session.query(Ward.id).filter(Ward.city_ward_code == form.city_ward_code.data)\
.filter(Ward.region_id == 1).first()
w = Ward.query.get(w[0])
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference) \
.filter(Grid.geom.intersects(w.geom))
geometries = {"type": "FeatureCollection",
"features": []}
bias_ind = [x / 10.0 for x in range(5, 21, 1)].index(float(form.grid_bias.data))
for g in query:
d = json.loads(g[0])
geometries['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][bias_ind] - g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code)\
.filter(Ward.city_ward_code == form.city_ward_code.data).filter(Ward.region_id == 1)
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries2['features'].append(
{"type": "Feature", "properties": {"density": 0,
"name": 'Ward %s' % form.city_ward_code.data,
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 1).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night.html', form=form, geometries=geometries,
bias_val=form.grid_bias.data, geometries2=geometries2, ward=form.city_ward_code.data)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 1)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": g[1][0] - g[3],
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
return render_template('demographics/demographics_night.html', form=form, bias_val=0.5, geometries=geometries,
analyses=analyses)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1, form2=form2,
geometries1=geometries1, geometries2=geometries2, table1=table1,
table2=table1, tour=tour, max1=m, max2=m, region1=1, region2=1,
ward1=None, ward2=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/nightlights_eth', methods=['GET', 'POST'])
def demographics_night_eth():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['night'] = []
form = NightFormETH()
status = 200
tour = 1
if request.method == 'POST':
if form.validate():
tour = 0
if form.city_ward_code.data == '':
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 4).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in
enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries)
else:
w = db.session.query(Ward.id).filter(Ward.city_ward_code == form.city_ward_code.data)\
.filter(Ward.region_id == 4).first()
w = Ward.query.get(w[0])
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference) \
.filter(Grid.geom.intersects(w.geom)).filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code)\
.filter(Ward.city_ward_code == form.city_ward_code.data).filter(Ward.region_id == 4)
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries2['features'].append(
{"type": "Feature", "properties": {"density": 0,
"name": 'Ward %s' % form.city_ward_code.data,
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 4).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries,
geometries2=geometries2, ward=form.city_ward_code.data)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries,
analyses=analyses)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1, form2=form2,
geometries1=geometries1, geometries2=geometries2, table1=table1,
table2=table1, tour=tour, max1=m, max2=m, region1=1, region2=1,
ward1=None, ward2=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/return-land/')
def land_gen():
return send_file('data/Ethekwini_Region_Data.xlsx', as_attachment=True)
@app.route('/_parse_data', methods=['GET'])
def parse_data():
kwargs = {}
for i in ['dataset_id', 'indicator_id', 'region_id', 'type_id', 'theme_id', 'year']:
param = request.args.get(i)
if (i == 'year'):
if (str(param) != 'Empty') and (param is not None) and (str(param) != ''):
kwargs[i] = int(param)
else:
pass
elif (param is not None) and (str(param) != ''):
kwargs[i] = param
session['explore'] = [i for i in kwargs]
datasets = db.session.query(DataPoint.dataset_id).filter_by(**kwargs).distinct()
indicators = db.session.query(DataPoint.indicator_id).filter_by(**kwargs).distinct()
regions = db.session.query(DataPoint.region_id).filter_by(**kwargs).distinct()
types = db.session.query(DataPoint.type_id).filter_by(**kwargs).distinct()
themes = db.session.query(DataPoint.theme_id).filter_by(**kwargs).distinct()
years = db.session.query(DataPoint.year).filter_by(**kwargs).distinct()
response = {}
remove_list = ['Poverty rate', 'Gini Coefficient', 'Gross Value Add', 'Exports', 'Multiple deprivation index',
'Human Development Index']
dataset_list = [(i[0], str(DataSet.query.filter_by(id=i).first().ds_name)) for i in datasets if
str(DataSet.query.filter_by(id=i).first().ds_name) not in remove_list]
if 'dataset_id' not in session['explore']:
dataset_list.insert(0, ('', 'Empty'))
else:
dataset_list.insert(1, ('', 'Empty'))
response['dataset'] = dataset_list
indicator_list = [[i[0], str(Indicator.query.filter_by(id=i).first().in_name)] for i in indicators if
str(Indicator.query.filter_by(id=i).first().in_name) not in remove_list]
if 'indicator_id' not in session['explore']:
indicator_list.insert(0, ('', 'Empty'))
response['ind_ready'] = 0
else:
indicator_list.insert(1, ('', 'Empty'))
response['ind_ready'] = 1
response['indicator'] = indicator_list
region_list = [(i[0], str(Region.query.filter_by(id=i).first().re_name)) for i in regions]
if 'region_id' not in session['explore']:
region_list.insert(0, ('', 'Empty'))
else:
region_list.insert(1, ('', 'Empty'))
response['region'] = region_list
type_list = [(i[0], str(Type.query.filter_by(id=i).first().ty_name)) for i in types]
if 'type_id' not in session['explore']:
type_list.insert(0, ('', 'Empty'))
else:
type_list.insert(1, ('', 'Empty'))
response['type'] = type_list
theme_list = [(i[0], str(Theme.query.filter_by(id=i).first().th_name)) for i in themes]
if 'theme_id' not in session['explore']:
theme_list.insert(0, ('', 'Empty'))
else:
theme_list.insert(1, ('', 'Empty'))
response['theme'] = theme_list
year_list = [(str(i), str(y[0])) for i, y in enumerate(sorted(years))]
if 'year' not in session['explore']:
year_list.insert(0, ('', 'Empty'))
else:
year_list.insert(1, ('', 'Empty'))
response['year'] = year_list
return jsonify(response)
@app.route('/_parse_demo', methods=['GET'])
def parse_demo():
kwargs = {}
for i in ['region_id', 'ward_id']:
param = request.args.get(i)
if (param is not None) and (str(param) != ''):
kwargs[i] = param
session['demo'] = [i for i in kwargs]
wards = db.session.query(Ward.city_ward_code).filter_by(**kwargs).distinct().order_by(Ward.city_ward_code)
response = {}
ward_list = [(str(i[0]), 'Ward %s' % Ward.query.filter_by(id=i).first().city_ward_code) for i in wards]
if 'ward_id' not in session['demo']:
ward_list.insert(0, ('', 'View All'))
else:
ward_list.insert(1, ('', 'View All'))
response['wards'] = ward_list
return jsonify(response)
@app.route('/api/codebook', methods=['GET', 'POST'])
@app.route('/api/codebook/<int:page>', methods=['GET', 'POST'])
@csrf.exempt
def api_codebook(page=1):
query = db.session.query(CbIndicator). \
outerjoin(CbTheme, CbTheme.id == CbIndicator.theme_id). \
outerjoin(CbSource, CbSource.id == CbIndicator.source_id). \
outerjoin(CbUnit, CbUnit.id == CbIndicator.unit_id)
if request.method == 'POST':
data = request.get_json()
print(f'data: {data}')
if data['c88']:
query = query.filter(CbIndicator.c88_theme.in_(data['c88']))
if data['socr']:
query = query.filter(CbIndicator.socr_theme.in_(data['socr']))
if data['sdg']:
query = query.filter(CbIndicator.sdg_theme.in_(data['sdg']))
if data['search']:
query = search(query, data['search'], sort=True)
# else:
# query = query.limit(150).offset((page - 1) * 20)
row_count = query.count()
query = query.all()
# query.sort(key=lambda x: x.code)
result_list = [row_count]
for day, dicts_for_group_code in itertools.groupby(query, key=lambda x:x.group_code):
dicts_for_group_code = list(dicts_for_group_code)
day_dict = {
"id": str(dicts_for_group_code[0].id),
"varCode": dicts_for_group_code[0].code,
"groupCode": dicts_for_group_code[0].group_code,
"indicator": dicts_for_group_code[0].name,
"c88": dicts_for_group_code[0].c88_theme,
"socr": dicts_for_group_code[0].socr_theme,
"sdg": dicts_for_group_code[0].sdg_theme,
"definition": dicts_for_group_code[0].definition,
"source": dicts_for_group_code[0].source.name if dicts_for_group_code[0].source else None,
"reportingResponsibility": dicts_for_group_code[0].reporting_responsibility,
"notesOnCalculation": dicts_for_group_code[0].notes_on_calculation,
"variableType": dicts_for_group_code[0].unit.name if dicts_for_group_code[0].unit else None,
"frequencyOfCollection": dicts_for_group_code[0].frequency_of_collection,
"automatibility": dicts_for_group_code[0].automatable,
"granulity": dicts_for_group_code[0].granularity,
"gathering_method": dicts_for_group_code[0].gathering_method,
"expandability": dicts_for_group_code[0].expandable,
"period": dicts_for_group_code[0].period,
"unit_of_measurement": dicts_for_group_code[0].unit.name if dicts_for_group_code[0].unit else None,
"source_link": dicts_for_group_code[0].url_link,
"data_check":True if dicts_for_group_code[0].indicator_data else False
}
children = []
dicts_for_group_code.pop(0)
for d in dicts_for_group_code:
child = {
"id": str(d.id),
"varCode": d.code,
"groupCode": d.group_code,
"indicator": d.name,
"c88": d.c88_theme,
"socr": d.socr_theme,
"sdg": d.sdg_theme,
"definition": d.definition,
"source": d.source.name if d.source else None,
"reportingResponsibility": d.reporting_responsibility,
"notesOnCalculation": d.notes_on_calculation,
"variableType": d.unit.name if d.unit else None,
"frequencyOfCollection": d.frequency_of_collection,
"automatibility": d.automatable,
"granulity": d.granularity,
"gathering_method": d.gathering_method,
"expandability": d.expandable,
"period": d.period,
"unit_of_measurement": d.unit.name if d.unit else None,
"source_link": d.url_link,
"data_check": bool(d.indicator_data),
}
children.append(child)
day_dict.update({"children": children})
result_list.append(day_dict)
return jsonify(result_list)
| apache-2.0 |
mxjl620/scikit-learn | sklearn/datasets/tests/test_base.py | 204 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
Borisvl/amazon-dsstne | benchmarks/tf/autoencoder.py | 3 | 7363 | #!/usr/bin/env python
"""Trains sparse auto-encoder
"""
import argparse
import math
import numpy as np
import sys
import tensorflow as tf
import time
class FeedForwardNetwork(object):
"""Constructs a basic multi-layer neural network.
"""
def __init__(self, dim_x, dim_y, hidden_units, layers,
gpu_mrr=True,
activation=tf.nn.sigmoid):
with tf.variable_scope("FFN"):
# Create input/output variables
self.x = x = tf.placeholder("float", shape=[None, dim_x])
self.y_ = y_ = tf.placeholder("float", shape=[None, dim_y])
# Create model: parameterized for k deep FF layers
Hsize = [dim_x] + [hidden_units]*layers + [dim_y]
print "Layers: %s" % str(Hsize)
k = len(Hsize)-1
Wall = [None] * k
ball = [None] * k
for (layer, d1) in enumerate(Hsize[:-1]):
d2 = Hsize[layer+1]
Wall[layer] = tf.Variable(tf.random_normal(shape=[d1,d2],stddev=0.1))
ball[layer] = tf.Variable(tf.constant(0.1,shape=[d2]))
Hact = [None] * (k+1)
Hact[0] = x
for layer in range(k):
Hact[layer+1] = activation(tf.matmul(Hact[layer],Wall[layer]) + ball[layer])
# output is the last activation
self.output = y = Hact[k]
# Loss: numerically stable cross-entropy
self.loss = loss = -tf.reduce_mean(y_*tf.log(y) +
(tf.sub(1.0,y_)*tf.log(tf.sub(1.000001,y))))
# Optimizer
self.lr = tf.Variable(1e-4, trainable=False)
#self.train_step = tf.train.MomentumOptimizer(self.lr,momentum=0.9).minimize(loss)
# Momentum gives very poor results in my experience here.
#self.train_step = tf.train.AdamOptimizer(self.lr).minimize(loss)
self.train_step = tf.train.RMSPropOptimizer(self.lr,decay=0.9).minimize(loss)
self.avgloss = tf.reduce_mean(loss)
class DataManager(object):
"""Encapsulates low-level data loading.
"""
def __init__(self, width):
"""Initialize loader
width: the possible number of bits, which is the dimensionality of the
vectors
"""
self.width = width # number of dimensions
self.word_assignments = {} # maps from word to vector index
self.W = None
def index_for_word(self, word):
"""returns a list of k indices into the output vector
corresponding to the bits for this word
"""
if not self.word_assignments.has_key(word):
idx = len(self.word_assignments)
self.word_assignments[word] = idx
return self.word_assignments[word]
def set_bit(self,row,word):
bit = self.index_for_word(word)
row[0,bit] = 1
return row
def parse_line_into_words(self, line):
"""This is specific to the ReMo AIV format, but can be overridden
"""
line = line.split("\t")[1] # strip first column, which is customer id
words = [x[:x.find(",")] for x in line.split(":")]
return words
def parse_cust_id(self, line):
cust_id = line.split("\t")[0]
return cust_id
def load(self, filename):
W_list = []
with open(filename,"r") as f:
for line in f.readlines():
words = self.parse_line_into_words(line)
row = np.zeros((1,self.width))
for word in words:
row = self.set_bit(row,word)
W_list.append(row)
self.W = np.concatenate(W_list)
return self.W
class MiniBatcher(object):
"""Iterable set of input/output matrices for training or testing
"""
def __init__(self, x, y):
self.batch_pos = 0
self.x = x
self.y = y
self.size = x.shape[0]
if y.shape[0] != self.size:
raise RuntimeError("X & Y must have same number of entries")
def next(self, n):
"""Generates the next minibatch of n items.
Returns a tuple of (x,y)
"""
if self.batch_pos + n > self.size:
# We could be cleaner about wrapping
self.batch_pos = 0
b = []
p1 = self.batch_pos
p2 = p1 + n
b.append( self.x[p1:p2] )
b.append( self.y[p1:p2] )
self.batch_pos = p2
return b
class AutoencoderParser(object):
"""Responsible for loading a directory of data files
(train/validate,input/output/etc).
"""
def __init__(self, cmd):
"""Takes a argparse command as configuration.
Loads data, and makes it accessible as member variables:
Accessible members:
train: MiniBatcher object for training
"""
# Parse config from command
dims = cmd.vocab_size
# Set up loader
mgr = DataManager(dims)
# Load train data
train_x = mgr.load(cmd.datafile)
train_y = train_x
self.train = MiniBatcher(train_x,train_y)
def main(cmd):
print("Loading datasets")
all_data = AutoencoderParser(cmd)
dims = cmd.vocab_size
print("Constructing neural network")
dnn = FeedForwardNetwork(dims, dims, cmd.hidden_units, cmd.layers)
print("Initializing TensorFlow")
# train the model
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print("Starting training")
with sess.as_default():
start_time = time.time()
for i in range(cmd.max_iters):
batch = all_data.train.next(cmd.batch_size)
train_dict = {
dnn.x: batch[0],
dnn.y_: batch[1],
dnn.lr: cmd.learning_rate,
}
dnn.train_step.run(feed_dict=train_dict)
if i%cmd.eval_iters == 0:
spd = (i+1) / (time.time() - start_time)
print("Iter %d. %giter/s" % (i,spd))
print "Done training\n"
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-l","--layers",
help="Number of hidden layers",
type=int,
default=1)
parser.add_argument("--vocab_size",
help="Number of unique items to auto-encode",
type=int,
default=30000)
parser.add_argument("-u","--hidden_units",
help="Size of hidden layer",
type=int,
default=8192)
parser.add_argument("-i","--max_iters",
help="Maximum number of iterations",
type=int,
default=1000)
parser.add_argument("-b","--batch_size",
help="minibatch size",
type=int,
default=512)
parser.add_argument("-f","--datafile",
help="file with input/output data for autoencoder",
required=True)
parser.add_argument("-v","--eval_iters",
help="how often to print speed",
type=int,
default=5)
parser.add_argument("--learning_rate",
help="learning rate",
type=float,
default=1e-4)
return parser
if __name__ == "__main__":
cmd = get_parser().parse_args()
main(cmd)
| apache-2.0 |
LucidAi/nlcd | husky/textutil.py | 1 | 10826 | # coding: utf-8
# Author: Vova Zaytsev <[email protected]>
import re
import lxml
import nltk
import ftfy
import string
import langid
import difflib
import logging
import newspaper
import readability
import textblob.tokenizers
from lxml.etree import ParserError
from readability.readability import Unparseable
from husky.markup import Markup
from husky.markup import MarkupChunk
from husky.markup import EntityReference
class TextUtil(object):
"""
TODO
"""
RE_MULTIPLE_SPACE = re.compile(u"\s+", re.UNICODE)
RE_WHITESPACE = re.compile(u" +", re.UNICODE)
RE_EMPTY_STR = re.compile(u"^\s*$", re.UNICODE)
RE_HTML_SPECIAL_CHARS = re.compile(u"&#?[a-z0-9]+;", re.UNICODE)
RE_QUOTED_PHRASE = re.compile(u"\"([^\"]*)\"", re.UNICODE)
RE_L_SPACE = re.compile(u"^\s+", re.UNICODE)
RE_R_SPACE = re.compile(u"\s+$", re.UNICODE)
RE_LQ = re.compile(u"^\s*\"\s*", re.UNICODE)
RE_RQ = re.compile(u"\s*\"\s*$", re.UNICODE)
def __init__(self):
self.np_config = newspaper.configuration.Configuration()
self.np_config.fetch_images = False
self.seq_matcher = difflib.SequenceMatcher(None)
def simplified_text(self, text, remove_punct=True):
if text is None:
return None
text = text.lower()
if isinstance(text, unicode):
text = text.encode("utf-8")
if remove_punct:
text = text.translate(None, string.punctuation)
text = self.RE_MULTIPLE_SPACE.sub(" ", text)
text = " ".join(nltk.word_tokenize(text))
return text
def get_pretty_markup(self, html):
clean_html = document = readability.Document(html).summary()
paragraphs = lxml.html.fromstring(clean_html).xpath("//p")
return [p.text_content() for p in paragraphs]
def extract_body(self, url, html):
try:
document = readability.Document(html)
summary = document.summary()
lang_id, _ = langid.classify(summary)
try:
doc_title = document.title()
except TypeError:
doc_title = None
except ParserError:
lang_id = "en"
document = None
summary = ""
doc_title = None
except Unparseable:
lang_id = "en"
document = None
summary = ""
doc_title = None
try:
article = newspaper.Article(url, language=lang_id, config=self.np_config)
article.set_html(html)
article.parse()
except IOError:
# If language is not found, try to use English parser.
article = newspaper.Article(url, language="en", config=self.np_config)
article.set_html(html)
article.parse()
a_text = "" if article.text is None or len(article.text) == 0 else article.text
r_text = "" if summary is None or len(summary) == 0 else summary
r_text = nltk.clean_html(r_text)
if len(a_text) > len(r_text):
text = a_text
else:
text = r_text
a_title = "" if article.title is None or len(article.title) == 0 else article.title
r_title = "" if doc_title is None or len(doc_title) == 0 else doc_title
if len(r_title) == 0:
title = a_title
else:
title = r_title
if len(title) > 0:
title = self.norm_sentence(title)
if title[-1] not in string.punctuation:
title += "."
text = title + "\n" + text
text = text.replace(".\n", " ")
try:
text = ftfy.fix_text(text,
fix_entities=True,
remove_terminal_escapes=True,
uncurl_quotes=True,
fix_line_breaks=True)
except UnicodeError:
logging.error("Error while parsing HTML from %r" % url)
return "", "en"
return text, lang_id
def sent_tokenize(self, text):
lines = text.split("\n")
sentences = []
for line in lines:
sentences.extend(textblob.tokenizers.sent_tokenize(line))
sentences = [sent for sent in sentences if not self.RE_EMPTY_STR.match(sent)]
return [self.RE_WHITESPACE.sub(" ", sent) for sent in sentences]
def extract_quoted(self, text):
return self.RE_QUOTED_PHRASE.findall(text)
def norm_sentence(self, sentence):
sentence = self.RE_L_SPACE.sub("", sentence)
sentence = self.RE_R_SPACE.sub("", sentence)
return sentence
def remove_lr_quotes(self, sentence):
sentence = self.RE_LQ.sub("", sentence)
sentence = self.RE_RQ.sub("", sentence)
return sentence
def words_count(self, sentence):
if isinstance(sentence, unicode):
sentence = sentence.encode("utf-8")
no_punct = sentence.translate(None, string.punctuation)
tokens = nltk.word_tokenize(no_punct)
return len(tokens)
def select_segments(self, sentences, quoted, min_length=10, min_size=5):
sentences = map(self.remove_lr_quotes, map(self.norm_sentence, sentences))
quoted = map(self.remove_lr_quotes, map(self.norm_sentence, quoted))
segments = set()
for segm in sentences + quoted:
if len(segm) < min_length:
continue
if segm in segments:
continue
if self.words_count(segm) < min_size:
continue
segments.add(segm.replace("\"", ""))
return list(segments)
def compile_fuzzy_patterns(self, queries):
patterns = {}
for query_text in queries:
query_re = self.compile_fuzzy_pattern(query_text)
patterns[query_text] = query_re
return patterns
def compile_fuzzy_pattern(self, query_text):
query_tokens = query_text.split(" ")
query_re_tokens = ["(?:%s)?" % re.escape(query_tokens[0])]
for i in xrange(1, len(query_tokens)):
re_token = "(?: %s)?" % re.escape(query_tokens[i])
query_re_tokens.append(re_token)
query_re = "(%s)" % ".*?".join(query_re_tokens)
query_re = re.compile(query_re, re.UNICODE | re.IGNORECASE)
return query_re
def ffs(self, text, query_text, fuzzy_pattern, min_threshold=0.5, max_threshold=1.5, min_m_size=5, min_ratio=0.5):
if query_text in text:
return True
min_len = max(len(query_text) * min_threshold, min_m_size)
max_len = len(query_text) * max_threshold
matches = [m for m in fuzzy_pattern.findall(text) if min_len < len(m) < max_len]
if len(matches) == 0:
return False
self.seq_matcher.set_seq1(query_text)
for m in matches:
self.seq_matcher.set_seq2(m)
ratio = self.seq_matcher.ratio()
if ratio > min_ratio:
return True
return False
def fuzzy_search(self, text, query_text, fuzzy_pattern, min_threshold=0.5, max_threshold=1.5, min_m_size=5):
min_len = max(len(query_text) * min_threshold, min_m_size)
max_len = len(query_text) * max_threshold
mm = fuzzy_pattern.findall(text)
matches = [m for m in mm if min_len < len(m) < max_len]
if len(matches) == 0:
return 0.0, None
best_ratio = 0.0
best_match = None
self.seq_matcher.set_seq1(query_text)
for m in matches:
self.seq_matcher.set_seq2(m)
ratio = self.seq_matcher.ratio()
if ratio > best_ratio:
best_ratio = ratio
best_match = m
return best_ratio, best_match
def fuzzy_group_search(self, text, query_text, fuzzy_pattern, min_threshold=0.5, max_threshold=1.5, min_m_size=5):
min_len = max(len(query_text) * min_threshold, min_m_size)
max_len = len(query_text) * max_threshold
match_groups = [m for m in fuzzy_pattern.finditer(text)]
matches = []
for m_g in match_groups:
if min_len < len(m_g.group()) < max_len:
matches.append(m_g)
if len(match_groups) == 0:
return 0.0, None
best_ratio = 0.0
best_match = None
self.seq_matcher.set_seq1(query_text)
for m in matches:
self.seq_matcher.set_seq2(m.group())
ratio = self.seq_matcher.ratio()
if ratio > best_ratio:
best_ratio = ratio
best_match = m
return best_ratio, best_match
def generate_markup(self, title, text, paragraphs, bodies, trim=32, min_ratio=0.00):
paragraphs = [title] + paragraphs
markup = Markup.blank()
for i, paragraph in enumerate(paragraphs):
paragraph_markup = MarkupChunk(text=paragraph)
if i == 0:
markup.set_title(paragraph_markup)
else:
markup.add_body_element(paragraph_markup)
sentences = self.sent_tokenize(paragraph)
quotes = self.extract_quoted(paragraph)
segments = self.select_segments(sentences, quotes, min_size=5)
segments = [self.simplified_text(s) for s in segments]
if trim is not None and trim > 0:
for i in xrange(len(segments)):
segments[i] = " ".join(segments[i].split()[:trim])
triggered_regexes = []
for segment in segments:
fuzzy_pattern = self.compile_fuzzy_pattern(segment)
found_refs = []
for body, body_id in bodies:
if self.ffs(body, segment, fuzzy_pattern, min_ratio=0.3):
found_refs.append(body_id)
if len(found_refs) > 0:
triggered_regexes.append((segment, fuzzy_pattern, found_refs))
for segment, fuzzy_pattern, found_refs in triggered_regexes:
ratio, match = self.fuzzy_group_search(paragraph,
segment,
fuzzy_pattern,
min_threshold=0,
max_threshold=10)
if ratio >= min_ratio:
ref_object = EntityReference(span=match.span(),
match=match.group(),
references=found_refs,
extra_attr={"ratio": ratio})
paragraph_markup.add_ref(ref_object)
return markup
| mit |
bdoner/SickRage | lib/guessit/transfo/guess_video_rexps.py | 40 | 3082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from guessit.patterns import _psep
from guessit.containers import PropertiesContainer
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.patterns.numeral import parse_numeral
class GuessVideoRexps(Transformer):
def __init__(self):
Transformer.__init__(self, 25)
self.container = PropertiesContainer(canonical_from_pattern=False)
self.container.register_property(None, 'cd' + _psep + '(?P<cdNumber>[0-9])(?:' + _psep + 'of' + _psep + '(?P<cdNumberTotal>[0-9]))?', confidence=1.0, enhance=False, global_span=True, formatter=parse_numeral)
self.container.register_property('cdNumberTotal', '([1-9])' + _psep + 'cds?', confidence=0.9, enhance=False, formatter=parse_numeral)
self.container.register_property('bonusNumber', 'x([0-9]{1,2})', enhance=False, global_span=True, formatter=parse_numeral)
self.container.register_property('filmNumber', 'f([0-9]{1,2})', enhance=False, global_span=True, formatter=parse_numeral)
self.container.register_property('edition', 'collector', 'collector-edition', 'edition-collector', canonical_form='Collector Edition')
self.container.register_property('edition', 'special-edition', 'edition-special', canonical_form='Special Edition')
self.container.register_property('edition', 'criterion', 'criterion-edition', 'edition-criterion', canonical_form='Criterion Edition')
self.container.register_property('edition', 'deluxe', 'cdeluxe-edition', 'edition-deluxe', canonical_form='Deluxe Edition')
self.container.register_property('edition', 'director\'?s?-cut', 'director\'?s?-cut-edition', 'edition-director\'?s?-cut', canonical_form='Director\'s cut')
def supported_properties(self):
return self.container.get_supported_properties()
def guess_video_rexps(self, string, node=None, options=None):
found = self.container.find_properties(string, node, options)
return self.container.as_guess(found, string)
def process(self, mtree, options=None):
GuessFinder(self.guess_video_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves())
| gpl-3.0 |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 12 | 6775 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
IONISx/edx-platform | lms/djangoapps/course_blocks/transformers/tests/test_helpers.py | 9 | 12710 | """
Test helpers for testing course block transformers.
"""
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from lms.djangoapps.courseware.access import has_access
from ...api import get_course_blocks
class CourseStructureTestCase(ModuleStoreTestCase):
"""
Helper for test cases that need to build course structures.
"""
def setUp(self):
"""
Create users.
"""
super(CourseStructureTestCase, self).setUp()
# Set up users.
self.password = 'test'
self.user = UserFactory.create(password=self.password)
self.staff = UserFactory.create(password=self.password, is_staff=True)
def create_block_id(self, block_type, block_ref):
"""
Returns the block id (display name) that is used in the test
course structures for the given block type and block reference
string.
"""
return '{}_{}'.format(block_type, block_ref)
def build_xblock(self, block_hierarchy, block_map, parent):
"""
Build an XBlock, add it to block_map, and call build_xblock on
the children defined in block_dict.
Arguments:
block_hierarchy (BlockStructureDict): Definition of
hierarchy, from this block down.
block_map (dict[str: XBlock]): Mapping from '#ref' values to
their XBlocks.
parent (XBlock): Parent block for this xBlock.
"""
block_type = block_hierarchy['#type']
block_ref = block_hierarchy['#ref']
factory = (CourseFactory if block_type == 'course' else ItemFactory)
kwargs = {key: value for key, value in block_hierarchy.iteritems() if key[0] != '#'}
if block_type != 'course':
kwargs['category'] = block_type
if parent:
kwargs['parent'] = parent
xblock = factory.create(
display_name=self.create_block_id(block_type, block_ref),
publish_item=True,
**kwargs
)
block_map[block_ref] = xblock
for child_hierarchy in block_hierarchy.get('#children', []):
self.build_xblock(child_hierarchy, block_map, xblock)
def add_parents(self, block_hierarchy, block_map):
"""
Recursively traverse the block_hierarchy and add additional
parents. This method is expected to be called only after all
blocks have been created.
The additional parents are obtained from the '#parents' field
and is expected to be a list of '#ref' values of the parents.
Note: if a '#parents' field is found, the block is removed from
the course block since it is expected to not belong to the root.
If the block is meant to be a direct child of the course as
well, the course should be explicitly listed in '#parents'.
Arguments:
block_hierarchy (BlockStructureDict):
Definition of block hierarchy.
block_map (dict[str: XBlock]):
Mapping from '#ref' values to their XBlocks.
"""
parents = block_hierarchy.get('#parents', [])
if parents:
block_key = block_map[block_hierarchy['#ref']].location
# First remove the block from the course.
# It would be re-added to the course if the course was
# explicitly listed in parents.
course = modulestore().get_item(block_map['course'].location)
course.children.remove(block_key)
block_map['course'] = update_block(course)
# Add this to block to each listed parent.
for parent_ref in parents:
parent_block = modulestore().get_item(block_map[parent_ref].location)
parent_block.children.append(block_key)
block_map[parent_ref] = update_block(parent_block)
# recursively call the children
for child_hierarchy in block_hierarchy.get('#children', []):
self.add_parents(child_hierarchy, block_map)
def build_course(self, course_hierarchy):
"""
Build a hierarchy of XBlocks.
Arguments:
course_hierarchy (BlockStructureDict): Definition of course
hierarchy.
where a BlockStructureDict is a list of dicts in the form {
'key1': 'value1',
...
'keyN': 'valueN',
'#type': block_type,
'#ref': short_string_for_referencing_block,
'#children': list[BlockStructureDict],
'#parents': list['#ref' values]
}
Special keys start with '#'; the rest just get passed as
kwargs to Factory.create.
Note: the caller has a choice of whether to create
(1) a nested block structure with children blocks embedded
within their parents, or
(2) a flat block structure with children blocks defined
alongside their parents and attached via the #parents
field, or
(3) a combination of both #1 and #2 used for whichever
blocks.
Note 2: When the #parents field is used in addition to the
nested pattern for a block, it specifies additional parents
that aren't already implied by having the block exist within
another block's #children field.
Returns:
dict[str: XBlock]:
Mapping from '#ref' values to their XBlocks.
"""
block_map = {}
# build the course tree
for block_hierarchy in course_hierarchy:
self.build_xblock(block_hierarchy, block_map, parent=None)
# add additional parents if the course is a DAG or built
# linearly (without specifying '#children' values)
for block_hierarchy in course_hierarchy:
self.add_parents(block_hierarchy, block_map)
return block_map
def get_block_key_set(self, blocks, *refs):
"""
Gets the set of usage keys that correspond to the list of
#ref values as defined on blocks.
Returns: set[UsageKey]
"""
xblocks = (blocks[ref] for ref in refs)
return set([xblock.location for xblock in xblocks])
class BlockParentsMapTestCase(ModuleStoreTestCase):
"""
Test helper class for creating a test course of
a graph of vertical blocks based on a parents_map.
"""
# Tree formed by parent_map:
# 0
# / \
# 1 2
# / \ / \
# 3 4 / 5
# \ /
# 6
# Note the parents must always have lower indices than their
# children.
parents_map = [[], [0], [0], [1], [1], [2], [2, 4]]
def setUp(self, **kwargs):
super(BlockParentsMapTestCase, self).setUp(**kwargs)
# create the course
self.course = CourseFactory.create()
# an ordered list of block locations, where the index
# corresponds to the block's index in the parents_map.
self.xblock_keys = [self.course.location]
# create all other blocks in the course
for i, parents_index in enumerate(self.parents_map):
if i == 0:
continue # course already created
# create the block as a vertical
self.xblock_keys.append(
ItemFactory.create(
parent=self.get_block(parents_index[0]),
category="vertical",
).location
)
# add additional parents
if len(parents_index) > 1:
for index in range(1, len(parents_index)):
parent_index = parents_index[index]
parent_block = self.get_block(parent_index)
parent_block.children.append(self.xblock_keys[i])
update_block(parent_block)
self.password = 'test'
self.student = UserFactory.create(is_staff=False, username='test_student', password=self.password)
self.staff = UserFactory.create(is_staff=True, username='test_staff', password=self.password)
CourseEnrollmentFactory.create(is_active=True, mode='honor', user=self.student, course_id=self.course.id)
def assert_transform_results(
self,
test_user,
expected_user_accessible_blocks,
blocks_with_differing_access,
transformers=None,
):
"""
Verifies the results of transforming the blocks in the course.
Arguments:
test_user (User): The non-staff user that is being tested.
For example, self.student.
expected_user_accessible_blocks (set(int)): Set of blocks
(indices) that a student user is expected to have access
to after the transformers are executed.
blocks_with_differing_access (set(int)): Set of
blocks (indices) whose access will differ from the
transformers result and the current implementation of
has_access.
transformers (BlockStructureTransformer): An optional list
of transformer that are to be executed. If not
provided, the default value used by get_course_blocks
is used.
"""
def check_results(user, expected_accessible_blocks, blocks_with_differing_access):
"""
Verifies the results of transforming the blocks in the
course for the given user.
"""
self.client.login(username=user.username, password=self.password)
block_structure = get_course_blocks(user, self.course.location, transformers=transformers)
# Enumerate through all the blocks that were created in the
# course
for i, xblock_key in enumerate(self.xblock_keys):
# verify existence of the block
block_structure_result = block_structure.has_block(xblock_key)
has_access_result = bool(has_access(user, 'load', self.get_block(i), course_key=self.course.id))
# compare with expected value
self.assertEquals(
block_structure_result,
i in expected_accessible_blocks,
"block_structure return value {0} not equal to expected value for block {1} for user {2}".format(
block_structure_result, i, user.username
)
)
# compare with has_access result
if i in blocks_with_differing_access:
self.assertNotEqual(
block_structure_result,
has_access_result,
"block structure ({0}) & has_access ({1}) results are equal for block {2} for user {3}".format(
block_structure_result, has_access_result, i, user.username
)
)
else:
self.assertEquals(
block_structure_result,
has_access_result,
"block structure ({0}) & has_access ({1}) results not equal for block {2} for user {3}".format(
block_structure_result, has_access_result, i, user.username
)
)
self.client.logout()
# verify given test user has access to expected blocks
check_results(
test_user,
expected_user_accessible_blocks,
blocks_with_differing_access
)
# verify staff has access to all blocks
check_results(self.staff, set(range(len(self.parents_map))), {})
def get_block(self, block_index):
"""
Helper method to retrieve the requested block (index) from the
modulestore
"""
return modulestore().get_item(self.xblock_keys[block_index])
def update_block(block):
"""
Helper method to update the block in the modulestore
"""
return modulestore().update_item(block, 'test_user')
def create_location(org, course, run, block_type, block_id):
"""
Returns the usage key for the given key parameters using the
default modulestore
"""
return modulestore().make_course_key(org, course, run).make_usage_key(block_type, block_id)
| agpl-3.0 |
pkruskal/scikit-learn | sklearn/linear_model/randomized_l1.py | 33 | 23358 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
GoogleCloudPlatform/public-datasets-pipelines | datasets/irs_990/pipelines/irs_990_ez_2014/irs_990_ez_2014_dag.py | 1 | 20827 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="irs_990.irs_990_ez_2014",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
irs_990_ez_2014_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="irs_990_ez_2014_transform_csv",
startup_timeout_seconds=600,
name="irs_990_ez_2014",
service_account_name="datasets",
namespace="composer",
image_pull_policy="Always",
image="{{ var.json.irs_990.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.irs.gov/pub/irs-soi/14eofinextract990ez.zip",
"SOURCE_FILE": "files/data.dat",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/irs_990/irs_990_ez_2014/data_output.csv",
"PIPELINE_NAME": "irs_990_ez_2014",
"CSV_HEADERS": '["ein","tax_pd","subseccd","totcntrbs","prgmservrev","duesassesmnts","othrinvstinc","grsamtsalesastothr","basisalesexpnsothr","gnsaleofastothr","grsincgaming","grsrevnuefndrsng","direxpns","netincfndrsng","grsalesminusret","costgoodsold","grsprft","othrevnue","totrevnue","totexpns","totexcessyr","othrchgsnetassetfnd","networthend","totassetsend","totliabend","totnetassetsend","actvtynotprevrptcd","chngsinorgcd","unrelbusincd","filedf990tcd","contractioncd","politicalexpend","filedf1120polcd","loanstoofficerscd","loanstoofficers","initiationfee","grspublicrcpts","s4958excessbenefcd","prohibtdtxshltrcd","nonpfrea","totnooforgscnt","totsupport","gftgrntsrcvd170","txrevnuelevied170","srvcsval170","pubsuppsubtot170","exceeds2pct170","pubsupplesspct170","samepubsuppsubtot170","grsinc170","netincunreltd170","othrinc170","totsupp170","grsrcptsrelated170","totgftgrntrcvd509","grsrcptsadmissn509","grsrcptsactivities509","txrevnuelevied509","srvcsval509","pubsuppsubtot509","rcvdfrmdisqualsub509","exceeds1pct509","subtotpub509","pubsupplesub509","samepubsuppsubtot509","grsinc509","unreltxincls511tx509","subtotsuppinc509","netincunrelatd509","othrinc509","totsupp509"]',
"RENAME_MAPPINGS": '{"EIN": "ein","a_tax_prd": "tax_pd","taxpd": "tax_pd","taxprd": "tax_pd","subseccd": "subseccd","prgmservrev": "prgmservrev","duesassesmnts": "duesassesmnts","othrinvstinc": "othrinvstinc","grsamtsalesastothr": "grsamtsalesastothr","basisalesexpnsothr": "basisalesexpnsothr","gnsaleofastothr": "gnsaleofastothr","grsincgaming": "grsincgaming","grsrevnuefndrsng": "grsrevnuefndrsng","direxpns": "direxpns","netincfndrsng": "netincfndrsng","grsalesminusret": "grsalesminusret","costgoodsold": "costgoodsold","grsprft": "grsprft","othrevnue": "othrevnue","totrevnue": "totrevnue","totexpns": "totexpns","totexcessyr": "totexcessyr","othrchgsnetassetfnd": "othrchgsnetassetfnd","networthend": "networthend","totassetsend": "totassetsend","totliabend": "totliabend","totnetassetsend": "totnetassetsend","actvtynotprevrptcd": "actvtynotprevrptcd","chngsinorgcd": "chngsinorgcd","unrelbusincd": "unrelbusincd","filedf990tcd": "filedf990tcd","contractioncd": "contractioncd","politicalexpend": "politicalexpend","filedfYYN0polcd": "filedf1120polcd","loanstoofficerscd": "loanstoofficerscd","loanstoofficers": "loanstoofficers","initiationfee": "initiationfee","grspublicrcpts": "grspublicrcpts","s4958excessbenefcd": "s4958excessbenefcd","prohibtdtxshltrcd": "prohibtdtxshltrcd","nonpfrea": "nonpfrea","totnoforgscnt": "totnooforgscnt","totsupport": "totsupport","gftgrntrcvd170": "gftgrntsrcvd170","txrevnuelevied170": "txrevnuelevied170","srvcsval170": "srvcsval170","pubsuppsubtot170": "pubsuppsubtot170","excds2pct170": "exceeds2pct170","pubsupplesspct170": "pubsupplesspct170","samepubsuppsubtot170": "samepubsuppsubtot170","grsinc170": "grsinc170","netincunrelatd170": "netincunreltd170","othrinc170": "othrinc170","totsupport170": "totsupp170","grsrcptsrelatd170": "grsrcptsrelated170","totgftgrntrcvd509": "totgftgrntrcvd509","grsrcptsadmiss509": "grsrcptsadmissn509","grsrcptsactvts509": "grsrcptsactivities509","txrevnuelevied509": "txrevnuelevied509","srvcsval509": "srvcsval509","pubsuppsubtot509": "pubsuppsubtot509","rcvdfrmdisqualsub509": "rcvdfrmdisqualsub509","excds1pct509": "exceeds1pct509","subtotpub509": "subtotpub509","pubsupplesssub509": "pubsupplesub509","samepubsuppsubtot509": "samepubsuppsubtot509","grsinc509": "grsinc509","unreltxincls511tx509": "unreltxincls511tx509","subtotsuppinc509": "subtotsuppinc509","netincunreltd509": "netincunrelatd509","othrinc509": "othrinc509","totsupp509": "totsupp509","elf": "elf","totcntrbs": "totcntrbs"}',
},
resources={"request_memory": "2G", "request_cpu": "1"},
)
# Task to load CSV data to a BigQuery table
load_irs_990_ez_2014_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_irs_990_ez_2014_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/irs_990/irs_990_ez_2014/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="irs_990.irs_990_ez_2014",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "ein",
"type": "string",
"description": "Employer Identification Number",
"mode": "required",
},
{
"name": "tax_pd",
"type": "integer",
"description": "Tax period",
"mode": "nullable",
},
{
"name": "subseccd",
"type": "integer",
"description": "Subsection code",
"mode": "nullable",
},
{
"name": "totcntrbs",
"type": "integer",
"description": "Contributions gifts grants etc received",
"mode": "nullable",
},
{
"name": "prgmservrev",
"type": "integer",
"description": "Program service revenue",
"mode": "nullable",
},
{
"name": "duesassesmnts",
"type": "integer",
"description": "Membership dues and assessments",
"mode": "nullable",
},
{
"name": "othrinvstinc",
"type": "integer",
"description": "Investment income",
"mode": "nullable",
},
{
"name": "grsamtsalesastothr",
"type": "integer",
"description": "Gross amount from sale of assets",
"mode": "nullable",
},
{
"name": "basisalesexpnsothr",
"type": "integer",
"description": "Cost or other basis and sales expenses",
"mode": "nullable",
},
{
"name": "gnsaleofastothr",
"type": "integer",
"description": "Gain or (loss) from sale of assets",
"mode": "nullable",
},
{
"name": "grsincgaming",
"type": "integer",
"description": "Gross income from gaming",
"mode": "nullable",
},
{
"name": "grsrevnuefndrsng",
"type": "integer",
"description": "Special events gross revenue",
"mode": "nullable",
},
{
"name": "direxpns",
"type": "integer",
"description": "Special events direct expenses",
"mode": "nullable",
},
{
"name": "netincfndrsng",
"type": "integer",
"description": "Special events net income (or loss)",
"mode": "nullable",
},
{
"name": "grsalesminusret",
"type": "integer",
"description": "Gross sales of inventory",
"mode": "nullable",
},
{
"name": "costgoodsold",
"type": "integer",
"description": "Less: cost of goods sold",
"mode": "nullable",
},
{
"name": "grsprft",
"type": "integer",
"description": "Gross profit (or loss) from sales of inventory",
"mode": "nullable",
},
{
"name": "othrevnue",
"type": "integer",
"description": "Other revenue - total",
"mode": "nullable",
},
{
"name": "totrevnue",
"type": "integer",
"description": "Total revenue",
"mode": "nullable",
},
{
"name": "totexpns",
"type": "integer",
"description": "Total expenses",
"mode": "nullable",
},
{
"name": "totexcessyr",
"type": "integer",
"description": "Excess or deficit",
"mode": "nullable",
},
{
"name": "othrchgsnetassetfnd",
"type": "integer",
"description": "Other changes in net assets",
"mode": "nullable",
},
{
"name": "networthend",
"type": "integer",
"description": "Net assets EOY",
"mode": "nullable",
},
{
"name": "totassetsend",
"type": "integer",
"description": "Total assets e-o-y",
"mode": "nullable",
},
{
"name": "totliabend",
"type": "integer",
"description": "Total liabilities e-o-y",
"mode": "nullable",
},
{
"name": "totnetassetsend",
"type": "integer",
"description": "Total net worth e-o-y",
"mode": "nullable",
},
{
"name": "actvtynotprevrptcd",
"type": "string",
"description": "Activity not previously reported?",
"mode": "nullable",
},
{
"name": "chngsinorgcd",
"type": "string",
"description": "Significant changes to governing docs?",
"mode": "nullable",
},
{
"name": "unrelbusincd",
"type": "string",
"description": "UBI over $1000?",
"mode": "nullable",
},
{
"name": "filedf990tcd",
"type": "string",
"description": "Organization Filed 990T",
"mode": "nullable",
},
{
"name": "contractioncd",
"type": "string",
"description": "Liquidation dissolution termination or contraction",
"mode": "nullable",
},
{
"name": "politicalexpend",
"type": "integer",
"description": "Direct or indirect political expenditures",
"mode": "nullable",
},
{
"name": "filedf1120polcd",
"type": "string",
"description": "File Form 1120-POL?",
"mode": "nullable",
},
{
"name": "loanstoofficerscd",
"type": "string",
"description": "Loans to/from officers directors or trustees?",
"mode": "nullable",
},
{
"name": "loanstoofficers",
"type": "integer",
"description": "Amount of loans to/from officers",
"mode": "nullable",
},
{
"name": "initiationfee",
"type": "integer",
"description": "Initiation fees and capital contributions",
"mode": "nullable",
},
{
"name": "grspublicrcpts",
"type": "integer",
"description": "Gross receipts for public use of club facilities",
"mode": "nullable",
},
{
"name": "s4958excessbenefcd",
"type": "string",
"description": "Section 4958 excess benefit transactions?",
"mode": "nullable",
},
{
"name": "prohibtdtxshltrcd",
"type": "string",
"description": "Party to a prohibited tax shelter transaction?",
"mode": "nullable",
},
{
"name": "nonpfrea",
"type": "integer",
"description": "Reason for non-PF status",
"mode": "nullable",
},
{
"name": "totnooforgscnt",
"type": "integer",
"description": "Number of organizations supported",
"mode": "nullable",
},
{
"name": "totsupport",
"type": "integer",
"description": "Sum of amounts of support",
"mode": "nullable",
},
{
"name": "gftgrntsrcvd170",
"type": "integer",
"description": "Gifts grants membership fees received (170)",
"mode": "nullable",
},
{
"name": "txrevnuelevied170",
"type": "integer",
"description": "Tax revenues levied (170)",
"mode": "nullable",
},
{
"name": "srvcsval170",
"type": "integer",
"description": "Services or facilities furnished by gov (170)",
"mode": "nullable",
},
{
"name": "pubsuppsubtot170",
"type": "integer",
"description": "Public support subtotal (170)",
"mode": "nullable",
},
{
"name": "exceeds2pct170",
"type": "integer",
"description": "Amount support exceeds total (170)",
"mode": "nullable",
},
{
"name": "pubsupplesspct170",
"type": "integer",
"description": "Public support (170)",
"mode": "nullable",
},
{
"name": "samepubsuppsubtot170",
"type": "integer",
"description": "Public support from line 4 (170)",
"mode": "nullable",
},
{
"name": "grsinc170",
"type": "integer",
"description": "Gross income from interest etc (170)",
"mode": "nullable",
},
{
"name": "netincunreltd170",
"type": "integer",
"description": "Net UBI (170)",
"mode": "nullable",
},
{
"name": "othrinc170",
"type": "integer",
"description": "Other income (170)",
"mode": "nullable",
},
{
"name": "totsupp170",
"type": "integer",
"description": "Total support (170)",
"mode": "nullable",
},
{
"name": "grsrcptsrelated170",
"type": "integer",
"description": "Gross receipts from related activities (170)",
"mode": "nullable",
},
{
"name": "totgftgrntrcvd509",
"type": "integer",
"description": "Gifts grants membership fees received (509)",
"mode": "nullable",
},
{
"name": "grsrcptsadmissn509",
"type": "integer",
"description": "Receipts from admissions merchandise etc (509)",
"mode": "nullable",
},
{
"name": "grsrcptsactivities509",
"type": "integer",
"description": "Gross receipts from related activities (509)",
"mode": "nullable",
},
{
"name": "txrevnuelevied509",
"type": "integer",
"description": "Tax revenues levied (509)",
"mode": "nullable",
},
{
"name": "srvcsval509",
"type": "integer",
"description": "Services or facilities furnished by gov (509)",
"mode": "nullable",
},
{
"name": "pubsuppsubtot509",
"type": "integer",
"description": "Public support subtotal (509)",
"mode": "nullable",
},
{
"name": "rcvdfrmdisqualsub509",
"type": "integer",
"description": "Amounts from disqualified persons (509)",
"mode": "nullable",
},
{
"name": "exceeds1pct509",
"type": "integer",
"description": "Amount support exceeds total (509)",
"mode": "nullable",
},
{
"name": "subtotpub509",
"type": "integer",
"description": "Public support subtotal (509)",
"mode": "nullable",
},
{
"name": "pubsupplesub509",
"type": "integer",
"description": "Public support (509)",
"mode": "nullable",
},
{
"name": "samepubsuppsubtot509",
"type": "integer",
"description": "Public support from line 6 (509)",
"mode": "nullable",
},
{
"name": "grsinc509",
"type": "integer",
"description": "Gross income from interest etc (509)",
"mode": "nullable",
},
{
"name": "unreltxincls511tx509",
"type": "integer",
"description": "Net UBI (509)",
"mode": "nullable",
},
{
"name": "subtotsuppinc509",
"type": "integer",
"description": "Subtotal total support (509)",
"mode": "nullable",
},
{
"name": "netincunrelatd509",
"type": "integer",
"description": "Net income from UBI not in 10b (509)",
"mode": "nullable",
},
{
"name": "othrinc509",
"type": "integer",
"description": "Other income (509)",
"mode": "nullable",
},
{
"name": "totsupp509",
"type": "integer",
"description": "Total support (509)",
"mode": "nullable",
},
],
)
irs_990_ez_2014_transform_csv >> load_irs_990_ez_2014_to_bq
| apache-2.0 |
mekhod/Pandas-Multi-Colomn-Processor | MultiColProcessor/MultiColProcessor.py | 1 | 4999 | import pandas as pd
import numpy as np
from sklearn import preprocessing
##
class MultiColomnLabelEncoder:
##
def __init__(self):
self.dataTypes = {}
self.__catColumns = []
self.__MultiLE = {}
## Later, self.dataTypes will be used to convert dtypes to the original ones.
def __Get_Dtypes(self, data=pd.DataFrame()):
##to get original data datatypes
for colomn in data.columns:
self.dataTypes[colomn] = data[colomn].dtypes
return self
##
def fit(self, data):
##
self.__Get_Dtypes(data)
##
self.__catColumns = [cat for cat in self.dataTypes.keys()
if (self.dataTypes[cat].name == 'category')]
##
for col in self.__catColumns:
le = preprocessing.LabelEncoder()
le.fit(data.loc[:, col])
self.__MultiLE[col] = le
##
return self
##
def transform(self, data):
##
catData = data[self.__catColumns]
data = data.drop(self.__catColumns, axis=1)
##
def Transform_Rec(dta=catData):
##
nCol = dta.shape[1]
##
if nCol == 1:
col = dta.columns[0]
le = self.__MultiLE[col]
transformed = le.transform(dta.iloc[:, 0])
transformed = pd.DataFrame({col: transformed})
##
return transformed
else:
##
if (nCol % 2 == 0):
middle_index = int(nCol / 2)
else:
middle_index = int(nCol / 2 - 0.5)
##
left = dta.iloc[:, :middle_index]
right = dta.iloc[:, middle_index:]
##
return pd.concat([Transform_Rec(dta=left), Transform_Rec(dta=right)], axis=1)
##
catData = Transform_Rec(dta=catData)
catData.set_index(data.index, inplace=True)
##
data = pd.concat([data, catData], axis=1)
##
for i, j in self.dataTypes.items():
try:
data[i] = data[i].astype(j)
except:
pass
##
return data
##
class MultiColomnOneHotEncoder:
##
def __init__(self):
self.__catColumns = []
self.__MultiOHE = {}
##
def __getCategoryColomns(self, data=pd.DataFrame()):
catColumns = []
for i, j in enumerate(data):
if (data.dtypes[i].name == 'category'):
catColumns.append(j)
else:
continue
##
self.__catColumns = catColumns
##
return
##
def fit(self, data):
##
self.__getCategoryColomns(data)
##
for col in self.__catColumns:
OneHotEncoder = preprocessing.OneHotEncoder(sparse=False)
OneHotEncoder.fit(np.array(data.loc[:, col]).reshape(-1, 1))
self.__MultiOHE[col] = OneHotEncoder
##
return self
def transform(self, data):
##
catData = data[self.__catColumns]
data = data.drop(self.__catColumns, axis=1)
##
def Transform_Rec(dta=catData):
##
nCol = dta.shape[1]
##
if nCol == 1:
##
col = dta.columns[0]
OneHotEncoder = self.__MultiOHE[col]
transformed = OneHotEncoder.transform(np.array(dta.loc[:, col]).reshape(-1, 1))
transformed = pd.DataFrame(transformed)
transformed.columns = [str(col) + '_' + str(c) for c in transformed.columns]
##
return transformed
else:
##
if (nCol % 2 == 0):
middle_index = int(nCol / 2)
else:
middle_index = int(nCol / 2 - 0.5)
##
left = dta.iloc[:, :middle_index]
right = dta.iloc[:, middle_index:]
##
return pd.concat([Transform_Rec(dta=left), Transform_Rec(dta=right)], axis=1)
##
transformedCatData = Transform_Rec(dta=catData)
transformedCatData.set_index(data.index, inplace=True)
##
return pd.concat([data, transformedCatData], axis=1)
##
class MultiColomnScaler:
##
def __init__(self):
self.scaler = object()
##
def fit(self, data):
##
self.scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
self.scaler.fit(data)
##
return self
##
def transform(self, data):
##
columns = data.columns.tolist()
##
data = pd.DataFrame(self.scaler.transform(data.as_matrix()))
##
data.columns = columns
##
return data | mit |
dimkal/mne-python | mne/time_frequency/tfr.py | 1 | 51415 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <[email protected]>
# Hari Bharadwaj <[email protected]>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import check_fname
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar
from ..externals.h5io import write_hdf5, read_hdf5
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != len(picks):
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
if show:
plt.show()
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
import matplotlib.pyplot as plt
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
layout=layout, colorbar=colorbar, vmin=vmin,
vmax=vmax, cmap=cmap, layout_scale=layout_scale,
title=title, border=border, x_label='Time (ms)',
y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/datasets/synthetic.py | 40 | 7451 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synthetic dataset generators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets.base import Dataset
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def circles(n_samples=100,
noise=None,
seed=None,
factor=0.8,
n_classes=2,
*args,
**kwargs):
"""Create circles separated by some value
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
factor: float, size factor of the inner circles with respect to the outer
ones
n_classes: int, number of classes to generate
Returns:
Shuffled features and labels for 'circles' synthetic dataset of type
`base.Dataset`
Note:
The multi-class support might not work as expected if `noise` is enabled
TODO:
- Generation of unbalanced data
Credit goes to (under BSD 3 clause):
B. Thirion,
G. Varoquaux,
A. Gramfort,
V. Michel,
O. Grisel,
G. Louppe,
J. Nothman
"""
if seed is not None:
np.random.seed(seed)
# Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle
linspace = np.linspace(0, 2 * np.pi, n_samples // n_classes)
circ_x = np.empty(0, dtype=np.int32)
circ_y = np.empty(0, dtype=np.int32)
base_cos = np.cos(linspace)
base_sin = np.sin(linspace)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
circ_x = np.append(circ_x, base_cos)
circ_y = np.append(circ_y, base_sin)
base_cos *= factor
base_sin *= factor
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
circ_x = np.append(circ_x, np.cos(np.random.rand(extras) * 2 * np.pi))
circ_y = np.append(circ_y, np.sin(np.random.rand(extras) * 2 * np.pi))
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((circ_x, circ_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def spirals(n_samples=100,
noise=None,
seed=None,
mode='archimedes',
n_loops=2,
*args,
**kwargs):
"""Create spirals
Currently only binary classification is supported for spiral generation
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
n_loops: int, number of spiral loops, doesn't play well with 'bernoulli'
mode: str, how the spiral should be generated. Current implementations:
'archimedes': a spiral with equal distances between branches
'bernoulli': logarithmic spiral with branch distances increasing
'fermat': a spiral with branch distances decreasing (sqrt)
Returns:
Shuffled features and labels for 'spirals' synthetic dataset of type
`base.Dataset`
Raises:
ValueError: If the generation `mode` is not valid
TODO:
- Generation of unbalanced data
"""
n_classes = 2 # I am not sure how to make it multiclass
_modes = {
'archimedes': _archimedes_spiral,
'bernoulli': _bernoulli_spiral,
'fermat': _fermat_spiral
}
if mode is None or mode not in _modes:
raise ValueError('Cannot generate spiral with mode %s' % mode)
if seed is not None:
np.random.seed(seed)
linspace = np.linspace(0, 2 * n_loops * np.pi, n_samples // n_classes)
spir_x = np.empty(0, dtype=np.int32)
spir_y = np.empty(0, dtype=np.int32)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
base_cos, base_sin = _modes[mode](linspace, label * np.pi, *args, **kwargs)
spir_x = np.append(spir_x, base_cos)
spir_y = np.append(spir_y, base_sin)
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
if extras > 0:
x_extra, y_extra = _modes[mode](np.random.rand(extras) * 2 * np.pi, *args,
**kwargs)
spir_x = np.append(spir_x, x_extra)
spir_y = np.append(spir_y, y_extra)
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((spir_x, spir_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Archimedes spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = theta * np.cos(theta + theta_offset), theta * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _fermat_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Parabolic (Fermat's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = np.sqrt(theta) * np.cos(theta + theta_offset), np.sqrt(theta) * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
| apache-2.0 |
dimkal/mne-python | examples/datasets/plot_brainstorm_data.py | 8 | 2004 | """
============================
Brainstorm tutorial datasets
============================
Here we compute the evoked from raw for the Brainstorm
tutorial dataset. For comparison, see:
http://neuroimage.usc.edu/brainstorm/Tutorials/MedianNerveCtf
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID 879716,
13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets.brainstorm import bst_raw
from mne.io import Raw
print(__doc__)
tmin, tmax, event_id = -0.1, 0.3, 2 # take right-hand somato
reject = dict(mag=4e-12, eog=250e-6)
data_path = bst_raw.data_path()
raw_fname = data_path + '/MEG/bst_raw/' + \
'subj001_somatosensory_20111109_01_AUX-f_raw.fif'
raw = Raw(raw_fname, preload=True)
raw.plot()
# set EOG channel
raw.set_channel_types({'EEG058': 'eog'})
# show power line interference and remove it
raw.plot_psd()
raw.notch_filter(np.arange(60, 181, 60))
events = mne.find_events(raw, stim_channel='UPPT001')
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Compute epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False)
# compute evoked
evoked = epochs.average()
# remove physiological artifacts (eyeblinks, heartbeats) using SSP on baseline
evoked.add_proj(mne.compute_proj_evoked(evoked.crop(tmax=0, copy=True)))
evoked.apply_proj()
# fix stim artifact
mne.preprocessing.fix_stim_artifact(evoked)
# correct delays due to hardware (stim artifact is at 4 ms)
evoked.shift_time(-0.004)
# plot the result
evoked.plot()
# show topomaps
evoked.plot_topomap(times=np.array([0.016, 0.030, 0.060, 0.070]))
| bsd-3-clause |
yingzha/blocks-examples | markov_chain/dataset.py | 9 | 1480 | """Defines the dataset for a Markov chain.
Has to be in a separate module from the main script in order to be
unpicklable by a third party.
"""
import numpy
import copy
from fuel.datasets import Dataset
class MarkovChainDataset(Dataset):
"""Training data generator."""
num_states = 3
trans_prob = numpy.array([[0.1, 0.5, 0.4],
[0.1, 0.9, 0.0],
[0.3, 0.3, 0.4]])
values, vectors = numpy.linalg.eig(trans_prob.T)
equilibrium = vectors[:, values.argmax()]
equilibrium = equilibrium / equilibrium.sum()
trans_entropy = trans_prob * numpy.log(trans_prob + 1e-6)
entropy = equilibrium.dot(trans_entropy).sum()
provides_sources = ("data",)
def __init__(self, rng, seq_len, **kwargs):
self.rng = rng
self.seq_len = seq_len
super(MarkovChainDataset, self).__init__(**kwargs)
def open(self):
return copy.deepcopy(self.rng)
def _next_single(self, rng):
states = [0]
while len(states) != self.seq_len:
states.append(rng.multinomial(
1, self.trans_prob[states[-1]]).argmax())
return states
def get_data(self, state, request):
"""Generate random sequences from the family."""
assert isinstance(request, int)
x = numpy.zeros((self.seq_len, request), dtype='int64')
for i in range(request):
x[:, i] = self._next_single(state)
return (x,)
| mit |
AnonymousBee/bitexchange | libs/coinkit/coinkit/words.py | 11 | 726962 | # -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
TOP_ENGLISH_WORDS = ["the", "of", "and", "to", "a", "in", "for", "is", "on", "that", "by", "this", "with", "i", "you", "it", "not", "or", "be", "are", "from", "at", "as", "your", "all", "have", "new", "more", "an", "was", "we", "will", "home", "can", "us", "about", "if", "page", "my", "has", "search", "free", "but", "our", "one", "other", "do", "no", "information", "time", "they", "site", "he", "up", "may", "what", "which", "their", "news", "out", "use", "any", "there", "see", "only", "so", "his", "when", "contact", "here", "business", "who", "web", "also", "now", "help", "get", "view", "online", "c", "e", "first", "am", "been", "would", "how", "were", "me", "s", "services", "some", "these", "click", "its", "like", "service", "x", "than", "find", "price", "date", "back", "top", "people", "had", "list", "name", "just", "over", "state", "year", "day", "into", "email", "two", "health", "n", "world", "re", "next", "used", "go", "b", "work", "last", "most", "products", "music", "buy", "data", "make", "them", "should", "product", "system", "post", "her", "city", "t", "add", "policy", "number", "such", "please", "available", "copyright", "support", "message", "after", "best", "software", "then", "jan", "good", "well", "d", "where", "rights", "public", "books", "high", "school", "through", "m", "each", "links", "she", "review", "years", "order", "very", "privacy", "book", "items", "company", "r", "read", "group", "sex", "need", "many", "user", "said", "de", "does", "set", "under", "general", "research", "university", "january", "mail", "full", "map", "reviews", "program", "life", "know", "games", "way", "days", "management", "p", "part", "could", "great", "united", "hotel", "real", "f", "item", "international", "center", "must", "store", "travel", "comments", "made", "development", "report", "off", "member", "details", "line", "terms", "before", "hotels", "did", "send", "right", "type", "because", "local", "those", "using", "results", "office", "education", "national", "car", "design", "take", "posted", "internet", "address", "community", "within", "states", "area", "want", "phone", "shipping", "reserved", "subject", "between", "forum", "family", "l", "long", "based", "w", "code", "show", "o", "even", "black", "check", "special", "prices", "index", "being", "women", "much", "sign", "file", "link", "open", "today", "technology", "south", "case", "project", "same", "pages", "uk", "version", "section", "own", "found", "sports", "house", "related", "security", "both", "g", "county", "american", "photo", "game", "members", "power", "while", "care", "network", "down", "computer", "systems", "three", "total", "place", "end", "following", "download", "h", "him", "without", "per", "access", "think", "north", "resources", "current", "posts", "big", "media", "law", "control", "water", "history", "pictures", "size", "art", "personal", "since", "including", "guide", "shop", "directory", "board", "location", "change", "white", "text", "small", "rating", "rate", "government", "children", "during", "usa", "return", "students", "v", "shopping", "account", "times", "sites", "level", "digital", "profile", "previous", "form", "events", "love", "old", "john", "main", "call", "hours", "image", "department", "title", "description", "non", "k", "y", "insurance", "another", "why", "shall", "property", "class", "cd", "still", "money", "quality", "every", "listing", "content", "country", "private", "little", "visit", "save", "tools", "low", "reply", "customer", "december", "compare", "movies", "include", "college", "value", "article", "york", "man", "card", "jobs", "provide", "j", "food", "source", "author", "different", "press", "u", "learn", "sale", "around", "print", "course", "job", "canada", "process", "teen", "room", "stock", "training", "too", "credit", "point", "join", "science", "men", "categories", "advanced", "west", "sales", "look", "english", "left", "team", "estate", "box", "conditions", "select", "windows", "gay", "thread", "week", "category", "note", "live", "large", "gallery", "table", "register", "however", "june", "october", "november", "market", "library", "really", "action", "start", "series", "model", "features", "air", "industry", "plan", "human", "provided", "tv", "yes", "required", "second", "hot", "accessories", "cost", "movie", "march", "la", "september", "better", "say", "questions", "july", "going", "medical", "test", "friend", "come", "dec", "study", "application", "cart", "staff", "articles", "san", "again", "play", "looking", "issues", "april", "never", "users", "complete", "street", "topic", "comment", "financial", "things", "working", "against", "standard", "tax", "person", "below", "mobile", "less", "got", "party", "payment", "equipment", "login", "student", "let", "programs", "offers", "legal", "above", "recent", "park", "stores", "side", "act", "problem", "red", "give", "memory", "performance", "social", "q", "august", "quote", "language", "story", "sell", "experience", "rates", "create", "key", "body", "young", "america", "important", "field", "few", "east", "paper", "single", "ii", "age", "activities", "club", "example", "girls", "additional", "password", "z", "latest", "something", "road", "gift", "question", "changes", "night", "ca", "hard", "texas", "oct", "pay", "four", "poker", "status", "browse", "issue", "range", "building", "seller", "court", "february", "always", "result", "light", "write", "war", "nov", "offer", "blue", "groups", "al", "easy", "given", "files", "event", "release", "analysis", "request", "china", "making", "picture", "needs", "possible", "might", "professional", "yet", "month", "major", "star", "areas", "future", "space", "committee", "hand", "sun", "cards", "problems", "london", "washington", "meeting", "become", "interest", "id", "child", "keep", "enter", "california", "share", "similar", "garden", "schools", "million", "added", "reference", "companies", "listed", "baby", "learning", "energy", "run", "delivery", "net", "popular", "term", "film", "stories", "put", "computers", "journal", "reports", "co", "try", "welcome", "central", "images", "president", "notice", "god", "original", "head", "radio", "until", "cell", "color", "self", "council", "away", "includes", "track", "australia", "discussion", "archive", "once", "others", "entertainment", "agreement", "format", "least", "society", "months", "log", "safety", "friends", "sure", "trade", "edition", "cars", "messages", "marketing", "tell", "further", "updated", "association", "able", "having", "provides", "david", "fun", "already", "green", "studies", "close", "common", "drive", "specific", "several", "gold", "feb", "living", "collection", "called", "short", "arts", "lot", "ask", "display", "limited", "solutions", "means", "director", "daily", "beach", "past", "natural", "whether", "due", "et", "five", "upon", "period", "planning", "says", "official", "weather", "mar", "land", "average", "done", "technical", "window", "france", "pro", "region", "island", "record", "direct", "conference", "environment", "records", "st", "district", "calendar", "costs", "style", "front", "statement", "parts", "aug", "ever", "early", "miles", "sound", "resource", "present", "applications", "either", "ago", "document", "word", "works", "material", "bill", "written", "talk", "federal", "rules", "final", "adult", "tickets", "thing", "centre", "requirements", "via", "cheap", "nude", "kids", "finance", "true", "minutes", "else", "mark", "third", "rock", "gifts", "europe", "reading", "topics", "bad", "individual", "tips", "plus", "auto", "cover", "usually", "edit", "together", "percent", "fast", "function", "fact", "unit", "getting", "global", "meet", "far", "economic", "en", "player", "projects", "lyrics", "often", "subscribe", "submit", "germany", "amount", "watch", "included", "feel", "though", "bank", "risk", "thanks", "everything", "deals", "various", "words", "jul", "production", "commercial", "james", "weight", "town", "heart", "advertising", "received", "choose", "treatment", "newsletter", "archives", "points", "knowledge", "magazine", "error", "camera", "girl", "currently", "construction", "toys", "registered", "clear", "golf", "receive", "domain", "methods", "chapter", "makes", "protection", "policies", "loan", "wide", "beauty", "manager", "india", "position", "taken", "sort", "models", "michael", "known", "half", "cases", "step", "engineering", "florida", "simple", "quick", "none", "wireless", "license", "paul", "friday", "lake", "whole", "annual", "published", "later", "basic", "shows", "corporate", "church", "method", "purchase", "customers", "active", "response", "practice", "hardware", "figure", "materials", "fire", "holiday", "chat", "enough", "designed", "along", "among", "death", "writing", "speed", "html", "countries", "loss", "face", "brand", "discount", "higher", "effects", "created", "remember", "standards", "oil", "bit", "yellow", "political", "increase", "advertise", "kingdom", "base", "near", "thought", "stuff", "french", "storage", "oh", "japan", "doing", "loans", "shoes", "entry", "stay", "nature", "orders", "availability", "africa", "summary", "turn", "mean", "growth", "notes", "agency", "king", "monday", "european", "activity", "copy", "although", "drug", "western", "income", "force", "cash", "employment", "overall", "bay", "river", "commission", "ad", "package", "contents", "seen", "players", "engine", "port", "album", "regional", "stop", "supplies", "started", "administration", "bar", "institute", "views", "plans", "double", "dog", "build", "screen", "exchange", "types", "soon", "lines", "electronic", "continue", "across", "benefits", "needed", "season", "apply", "someone", "held", "ny", "anything", "printer", "condition", "effective", "believe", "organization", "effect", "asked", "mind", "sunday", "selection", "casino", "lost", "tour", "menu", "volume", "cross", "anyone", "mortgage", "hope", "silver", "corporation", "wish", "inside", "solution", "mature", "role", "rather", "weeks", "addition", "came", "supply", "nothing", "certain", "executive", "running", "lower", "necessary", "union", "jewelry", "according", "dc", "clothing", "mon", "com", "particular", "fine", "names", "robert", "hour", "gas", "skills", "six", "bush", "islands", "advice", "career", "military", "rental", "decision", "leave", "british", "teens", "pre", "huge", "sat", "woman", "facilities", "zip", "bid", "kind", "sellers", "middle", "move", "cable", "opportunities", "taking", "values", "division", "coming", "tuesday", "object", "appropriate", "machine", "length", "actually", "nice", "score", "statistics", "client", "ok", "returns", "capital", "follow", "sample", "investment", "sent", "shown", "saturday", "christmas", "england", "culture", "band", "flash", "ms", "lead", "george", "choice", "went", "starting", "registration", "fri", "thursday", "courses", "consumer", "hi", "foreign", "artist", "outside", "furniture", "levels", "channel", "letter", "mode", "ideas", "wednesday", "structure", "fund", "summer", "allow", "degree", "contract", "button", "releases", "wed", "homes", "super", "male", "matter", "custom", "virginia", "almost", "took", "located", "multiple", "asian", "distribution", "editor", "inn", "industrial", "cause", "potential", "song", "ltd", "los", "focus", "late", "fall", "featured", "idea", "rooms", "female", "responsible", "inc", "communications", "win", "associated", "thomas", "primary", "cancer", "numbers", "reason", "tool", "browser", "spring", "foundation", "answer", "voice", "friendly", "schedule", "documents", "communication", "purpose", "feature", "bed", "comes", "police", "everyone", "independent", "approach", "brown", "physical", "operating", "hill", "maps", "medicine", "deal", "hold", "chicago", "forms", "glass", "happy", "tue", "smith", "wanted", "developed", "thank", "safe", "unique", "survey", "prior", "telephone", "sport", "ready", "feed", "animal", "sources", "mexico", "population", "pa", "regular", "secure", "navigation", "operations", "therefore", "ass", "simply", "evidence", "station", "christian", "round", "favorite", "understand", "option", "master", "valley", "recently", "probably", "sea", "built", "publications", "blood", "cut", "improve", "connection", "publisher", "hall", "larger", "networks", "earth", "parents", "impact", "transfer", "introduction", "kitchen", "strong", "tel", "carolina", "wedding", "properties", "hospital", "ground", "overview", "ship", "accommodation", "owners", "disease", "excellent", "paid", "italy", "perfect", "hair", "opportunity", "kit", "classic", "basis", "command", "cities", "william", "express", "award", "distance", "tree", "peter", "assessment", "ensure", "thus", "wall", "ie", "involved", "el", "extra", "especially", "pussy", "partners", "budget", "rated", "guides", "success", "maximum", "ma", "operation", "existing", "quite", "selected", "boy", "amazon", "patients", "restaurants", "beautiful", "warning", "wine", "locations", "horse", "vote", "forward", "flowers", "stars", "significant", "lists", "owner", "retail", "animals", "useful", "directly", "manufacturer", "ways", "est", "son", "providing", "rule", "mac", "housing", "takes", "iii", "bring", "catalog", "searches", "max", "trying", "mother", "authority", "considered", "told", "traffic", "programme", "joined", "strategy", "feet", "agent", "valid", "bin", "modern", "senior", "ireland", "teaching", "door", "grand", "testing", "trial", "charge", "units", "instead", "canadian", "cool", "normal", "wrote", "enterprise", "ships", "entire", "educational", "md", "leading", "metal", "positive", "fl", "fitness", "chinese", "opinion", "asia", "football", "abstract", "uses", "output", "funds", "mr", "greater", "likely", "develop", "employees", "artists", "alternative", "processing", "responsibility", "resolution", "java", "guest", "seems", "publication", "pass", "relations", "trust", "van", "contains", "session", "photography", "republic", "fees", "components", "vacation", "century", "academic", "assistance", "completed", "skin", "indian", "mary", "il", "expected", "ring", "grade", "dating", "pacific", "mountain", "organizations", "pop", "filter", "mailing", "vehicle", "longer", "consider", "int", "northern", "behind", "panel", "floor", "german", "buying", "match", "proposed", "default", "require", "iraq", "boys", "outdoor", "deep", "morning", "otherwise", "allows", "rest", "protein", "plant", "reported", "hit", "transportation", "mm", "pool", "politics", "partner", "disclaimer", "authors", "boards", "faculty", "parties", "fish", "membership", "mission", "eye", "string", "sense", "modified", "pack", "released", "stage", "internal", "goods", "recommended", "born", "unless", "richard", "detailed", "japanese", "race", "approved", "background", "target", "except", "character", "maintenance", "ability", "maybe", "functions", "ed", "moving", "brands", "places", "pretty", "spain", "southern", "yourself", "etc", "winter", "rape", "battery", "youth", "pressure", "submitted", "boston", "incest", "debt", "medium", "television", "interested", "core", "break", "purposes", "throughout", "sets", "dance", "wood", "itself", "defined", "papers", "playing", "awards", "fee", "studio", "reader", "virtual", "device", "established", "answers", "rent", "las", "remote", "dark", "external", "apple", "le", "regarding", "instructions", "min", "offered", "theory", "enjoy", "remove", "aid", "surface", "minimum", "visual", "host", "variety", "teachers", "martin", "manual", "block", "subjects", "agents", "increased", "repair", "fair", "civil", "steel", "understanding", "songs", "fixed", "wrong", "beginning", "hands", "associates", "finally", "classes", "paris", "ohio", "gets", "sector", "capacity", "requires", "jersey", "un", "fat", "fully", "father", "electric", "saw", "instruments", "quotes", "officer", "driver", "businesses", "dead", "respect", "unknown", "specified", "restaurant", "mike", "trip", "worth", "mi", "procedures", "poor", "teacher", "xxx", "eyes", "relationship", "workers", "farm", "georgia", "peace", "traditional", "campus", "tom", "showing", "creative", "coast", "benefit", "progress", "funding", "devices", "lord", "grant", "sub", "agree", "fiction", "hear", "sometimes", "watches", "careers", "beyond", "goes", "families", "led", "museum", "themselves", "fan", "transport", "interesting", "wife", "accepted", "former", "ten", "hits", "zone", "complex", "th", "cat", "galleries", "references", "die", "presented", "jack", "flat", "flow", "agencies", "literature", "respective", "parent", "spanish", "michigan", "columbia", "setting", "dr", "scale", "stand", "economy", "highest", "helpful", "monthly", "critical", "frame", "musical", "definition", "secretary", "path", "employee", "chief", "gives", "bottom", "magazines", "packages", "detail", "francisco", "laws", "changed", "pet", "heard", "begin", "individuals", "colorado", "royal", "clean", "switch", "russian", "largest", "african", "guy", "titles", "relevant", "guidelines", "justice", "bible", "cup", "basket", "applied", "weekly", "vol", "installation", "described", "demand", "pp", "suite", "na", "square", "chris", "attention", "advance", "skip", "diet", "army", "auction", "gear", "lee", "os", "difference", "allowed", "correct", "charles", "nation", "selling", "lots", "piece", "sheet", "firm", "seven", "older", "illinois", "regulations", "elements", "species", "jump", "cells", "resort", "facility", "random", "certificate", "minister", "motion", "looks", "fashion", "directions", "visitors", "monitor", "trading", "forest", "calls", "whose", "couple", "giving", "chance", "vision", "ball", "ending", "clients", "actions", "listen", "discuss", "accept", "naked", "goal", "successful", "sold", "wind", "communities", "clinical", "situation", "sciences", "markets", "lowest", "highly", "publishing", "appear", "emergency", "lives", "currency", "leather", "determine", "temperature", "palm", "announcements", "patient", "actual", "historical", "stone", "bob", "commerce", "perhaps", "persons", "difficult", "scientific", "satellite", "fit", "tests", "village", "accounts", "amateur", "ex", "met", "pain", "particularly", "factors", "coffee", "cum", "buyer", "cultural", "steve", "easily", "oral", "ford", "poster", "edge", "functional", "root", "au", "fi", "closed", "holidays", "ice", "pink", "zealand", "balance", "graduate", "replies", "shot", "architecture", "initial", "label", "thinking", "scott", "sec", "recommend", "canon", "league", "waste", "minute", "bus", "optional", "dictionary", "cold", "accounting", "manufacturing", "sections", "chair", "fishing", "effort", "phase", "fields", "bag", "fantasy", "po", "letters", "motor", "va", "professor", "context", "install", "shirt", "apparel", "generally", "continued", "foot", "mass", "crime", "count", "breast", "ibm", "johnson", "sc", "quickly", "dollars", "religion", "claim", "driving", "permission", "surgery", "patch", "heat", "wild", "measures", "generation", "kansas", "miss", "chemical", "doctor", "task", "reduce", "brought", "himself", "nor", "component", "enable", "exercise", "bug", "santa", "mid", "guarantee", "leader", "diamond", "israel", "se", "processes", "soft", "alone", "meetings", "seconds", "jones", "arizona", "interests", "flight", "congress", "fuel", "walk", "produced", "italian", "wait", "supported", "pocket", "saint", "rose", "freedom", "argument", "competition", "creating", "jim", "drugs", "joint", "premium", "fresh", "characters", "attorney", "di", "factor", "growing", "thousands", "km", "stream", "apartments", "pick", "hearing", "eastern", "entries", "dates", "generated", "signed", "upper", "administrative", "serious", "prime", "limit", "began", "louis", "steps", "errors", "shops", "bondage", "del", "efforts", "informed", "ga", "ac", "thoughts", "creek", "ft", "worked", "quantity", "urban", "practices", "sorted", "reporting", "essential", "myself", "tours", "platform", "load", "labor", "immediately", "nursing", "defense", "machines", "tags", "heavy", "covered", "recovery", "joe", "guys", "configuration", "cock", "merchant", "comprehensive", "expert", "universal", "protect", "drop", "solid", "presentation", "languages", "became", "orange", "compliance", "vehicles", "prevent", "theme", "rich", "im", "campaign", "marine", "improvement", "vs", "guitar", "finding", "pennsylvania", "examples", "saying", "spirit", "ar", "claims", "challenge", "acceptance", "mo", "seem", "affairs", "touch", "intended", "towards", "sa", "goals", "hire", "election", "suggest", "branch", "charges", "serve", "reasons", "magic", "mount", "smart", "talking", "gave", "ones", "latin", "avoid", "certified", "manage", "corner", "rank", "computing", "oregon", "element", "birth", "virus", "abuse", "requests", "separate", "quarter", "procedure", "leadership", "tables", "define", "racing", "religious", "facts", "breakfast", "kong", "column", "plants", "faith", "chain", "identify", "avenue", "missing", "died", "approximately", "domestic", "recommendations", "moved", "houston", "reach", "comparison", "mental", "viewed", "moment", "extended", "sequence", "inch", "attack", "sorry", "centers", "opening", "damage", "reserve", "recipes", "plastic", "produce", "snow", "placed", "truth", "counter", "failure", "follows", "eu", "dollar", "camp", "ontario", "automatically", "des", "minnesota", "films", "bridge", "native", "fill", "williams", "movement", "printing", "baseball", "owned", "approval", "draft", "chart", "played", "contacts", "cc", "jesus", "readers", "clubs", "wa", "jackson", "equal", "adventure", "matching", "offering", "shirts", "profit", "leaders", "posters", "institutions", "assistant", "variable", "ave", "advertisement", "expect", "headlines", "yesterday", "compared", "determined", "wholesale", "workshop", "russia", "gone", "codes", "kinds", "extension", "seattle", "statements", "golden", "completely", "teams", "fort", "cm", "wi", "lighting", "senate", "forces", "funny", "brother", "gene", "turned", "portable", "tried", "electrical", "applicable", "disc", "returned", "pattern", "boat", "named", "theatre", "earlier", "manufacturers", "sponsor", "classical", "warranty", "dedicated", "indiana", "direction", "harry", "objects", "ends", "delete", "evening", "assembly", "nuclear", "taxes", "mouse", "signal", "criminal", "issued", "brain", "sexual", "wisconsin", "powerful", "dream", "obtained", "false", "da", "cast", "flower", "felt", "personnel", "passed", "supplied", "identified", "falls", "pic", "soul", "aids", "opinions", "promote", "stated", "professionals", "appears", "carry", "flag", "decided", "covers", "hr", "em", "advantage", "hello", "designs", "maintain", "tourism", "priority", "newsletters", "adults", "savings", "iv", "graphic", "atom", "payments", "estimated", "binding", "brief", "ended", "winning", "eight", "anonymous", "iron", "straight", "script", "served", "wants", "miscellaneous", "prepared", "void", "dining", "alert", "integration", "atlanta", "dakota", "tag", "interview", "mix", "framework", "disk", "installed", "queen", "credits", "clearly", "fix", "handle", "sweet", "desk", "dave", "massachusetts", "diego", "hong", "vice", "associate", "ne", "truck", "behavior", "enlarge", "ray", "frequently", "revenue", "measure", "changing", "votes", "du", "duty", "looked", "discussions", "bear", "gain", "festival", "laboratory", "ocean", "flights", "experts", "signs", "lack", "depth", "iowa", "whatever", "vintage", "train", "exactly", "dry", "explore", "maryland", "spa", "concept", "nearly", "eligible", "reality", "forgot", "handling", "origin", "knew", "gaming", "feeds", "billion", "destination", "scotland", "faster", "intelligence", "dallas", "bought", "con", "ups", "nations", "route", "followed", "specifications", "broken", "frank", "alaska", "blow", "battle", "residential", "speak", "decisions", "industries", "protocol", "query", "clip", "partnership", "editorial", "nt", "expression", "es", "equity", "provisions", "speech", "wire", "principles", "suggestions", "rural", "shared", "sounds", "replacement", "tape", "strategic", "judge", "economics", "acid", "cent", "forced", "compatible", "fight", "apartment", "height", "null", "zero", "speaker", "filed", "netherlands", "obtain", "recreation", "offices", "designer", "remain", "managed", "pr", "failed", "marriage", "roll", "korea", "banks", "fr", "participants", "secret", "bath", "kelly", "leads", "negative", "austin", "favorites", "toronto", "theater", "springs", "missouri", "andrew", "var", "perform", "healthy", "translation", "estimates", "font", "assets", "injury", "mt", "joseph", "ministry", "drivers", "lawyer", "figures", "married", "protected", "proposal", "sharing", "philadelphia", "portal", "waiting", "birthday", "beta", "fail", "gratis", "banking", "officials", "brian", "toward", "won", "slightly", "assist", "conduct", "contained", "legislation", "calling", "serving", "bags", "miami", "comics", "matters", "houses", "doc", "postal", "relationships", "tennessee", "wear", "controls", "breaking", "combined", "ultimate", "wales", "representative", "frequency", "introduced", "minor", "finish", "departments", "residents", "noted", "displayed", "reduced", "physics", "rare", "spent", "performed", "extreme", "samples", "davis", "daniel", "bars", "reviewed", "row", "oz", "forecast", "removed", "helps", "administrator", "cycle", "contain", "accuracy", "dual", "rise", "sleep", "bird", "brazil", "creation", "static", "scene", "hunter", "addresses", "lady", "crystal", "famous", "writer", "chairman", "violence", "fans", "oklahoma", "speakers", "drink", "academy", "dynamic", "gender", "eat", "permanent", "agriculture", "dell", "cleaning", "portfolio", "practical", "delivered", "exclusive", "seat", "concerns", "colour", "vendor", "originally", "utilities", "philosophy", "regulation", "officers", "reduction", "aim", "bids", "referred", "supports", "nutrition", "recording", "regions", "junior", "toll", "les", "cape", "ann", "rings", "meaning", "tip", "secondary", "wonderful", "mine", "ladies", "henry", "ticket", "announced", "guess", "agreed", "prevention", "whom", "ski", "import", "posting", "presence", "instant", "mentioned", "automatic", "viewing", "maintained", "ch", "increasing", "majority", "connected", "christ", "dan", "dogs", "sd", "directors", "aspects", "austria", "ahead", "moon", "participation", "scheme", "utility", "fly", "manner", "matrix", "containing", "combination", "amendment", "despite", "strength", "guaranteed", "turkey", "libraries", "proper", "distributed", "degrees", "singapore", "enterprises", "delta", "fear", "seeking", "inches", "phoenix", "convention", "shares", "principal", "daughter", "standing", "comfort", "colors", "wars", "ordering", "kept", "alpha", "appeal", "cruise", "bonus", "previously", "hey", "buildings", "beat", "disney", "household", "batteries", "adobe", "smoking", "becomes", "drives", "arms", "alabama", "tea", "improved", "trees", "achieve", "positions", "dress", "subscription", "dealer", "contemporary", "sky", "utah", "nearby", "rom", "carried", "happen", "exposure", "hide", "signature", "gambling", "refer", "miller", "provision", "outdoors", "clothes", "caused", "luxury", "babes", "frames", "certainly", "indeed", "newspaper", "toy", "circuit", "layer", "printed", "slow", "removal", "easier", "liability", "trademark", "hip", "printers", "nine", "adding", "kentucky", "mostly", "eric", "spot", "taylor", "prints", "spend", "factory", "interior", "grow", "americans", "optical", "promotion", "relative", "amazing", "clock", "dot", "hiv", "identity", "suites", "conversion", "feeling", "hidden", "reasonable", "victoria", "serial", "relief", "revision", "influence", "ratio", "importance", "rain", "onto", "planet", "copies", "recipe", "zum", "permit", "seeing", "proof", "tennis", "bass", "prescription", "bedroom", "empty", "instance", "hole", "pets", "ride", "licensed", "orlando", "specifically", "tim", "bureau", "maine", "represent", "conservation", "pair", "ideal", "recorded", "don", "pieces", "finished", "parks", "dinner", "lawyers", "sydney", "stress", "cream", "runs", "trends", "discover", "ap", "patterns", "boxes", "louisiana", "hills", "fourth", "nm", "advisor", "mn", "marketplace", "nd", "evil", "aware", "wilson", "shape", "evolution", "irish", "certificates", "objectives", "stations", "suggested", "op", "remains", "greatest", "firms", "concerned", "operator", "structures", "generic", "usage", "cap", "ink", "charts", "continuing", "mixed", "census", "peak", "competitive", "exist", "wheel", "transit", "dick", "salt", "compact", "poetry", "lights", "tracking", "angel", "bell", "keeping", "preparation", "attempt", "receiving", "matches", "accordance", "width", "noise", "engines", "forget", "array", "discussed", "accurate", "stephen", "elizabeth", "climate", "reservations", "pin", "alcohol", "greek", "instruction", "managing", "sister", "raw", "differences", "walking", "explain", "smaller", "newest", "establish", "happened", "expressed", "jeff", "extent", "sharp", "ben", "lane", "paragraph", "kill", "mathematics", "compensation", "ce", "export", "managers", "aircraft", "sweden", "conflict", "conducted", "versions", "employer", "occur", "percentage", "knows", "mississippi", "describe", "concern", "requested", "citizens", "connecticut", "heritage", "immediate", "holding", "trouble", "spread", "coach", "agricultural", "expand", "supporting", "audience", "assigned", "jordan", "collections", "ages", "participate", "plug", "specialist", "cook", "affect", "virgin", "experienced", "investigation", "raised", "hat", "institution", "directed", "dealers", "searching", "sporting", "helping", "affected", "lib", "totally", "plate", "expenses", "indicate", "blonde", "ab", "proceedings", "favourite", "transmission", "anderson", "characteristics", "der", "lose", "organic", "seek", "experiences", "cheats", "extremely", "contracts", "guests", "diseases", "concerning", "equivalent", "chemistry", "tony", "neighborhood", "nevada", "thailand", "anyway", "continues", "tracks", "advisory", "cam", "curriculum", "logic", "prince", "circle", "soil", "grants", "anywhere", "psychology", "responses", "atlantic", "wet", "circumstances", "edward", "identification", "ram", "leaving", "appliances", "matt", "cooking", "speaking", "fox", "respond", "sizes", "plain", "exit", "entered", "iran", "arm", "keys", "launch", "wave", "checking", "costa", "belgium", "holy", "acts", "guidance", "mesh", "trail", "enforcement", "symbol", "crafts", "highway", "buddy", "observed", "dean", "poll", "glossary", "fiscal", "celebrity", "styles", "denver", "unix", "filled", "bond", "channels", "appendix", "notify", "blues", "chocolate", "pub", "portion", "scope", "hampshire", "cables", "cotton", "controlled", "requirement", "authorities", "biology", "dental", "killed", "border", "ancient", "debate", "representatives", "starts", "pregnancy", "causes", "arkansas", "biography", "leisure", "attractions", "learned", "transactions", "notebook", "explorer", "historic", "attached", "opened", "husband", "disabled", "authorized", "crazy", "britain", "concert", "retirement", "financing", "efficiency", "sp", "comedy", "adopted", "efficient", "linear", "commitment", "specialty", "bears", "jean", "hop", "carrier", "edited", "constant", "visa", "mouth", "jewish", "meter", "linked", "portland", "interviews", "concepts", "gun", "reflect", "pure", "deliver", "wonder", "hell", "lessons", "fruit", "begins", "qualified", "reform", "lens", "treated", "discovery", "draw", "classified", "relating", "assume", "confidence", "alliance", "fm", "confirm", "warm", "neither", "lewis", "howard", "leaves", "engineer", "consistent", "replace", "clearance", "connections", "inventory", "suck", "organisation", "babe", "checks", "reached", "becoming", "objective", "indicated", "sugar", "crew", "legs", "sam", "stick", "securities", "allen", "relation", "enabled", "genre", "slide", "montana", "volunteer", "tested", "rear", "democratic", "enhance", "switzerland", "exact", "bound", "formal", "dimensions", "contribute", "lock", "storm", "colleges", "mile", "showed", "challenges", "editors", "mens", "threads", "bowl", "supreme", "brothers", "recognition", "presents", "ref", "tank", "submission", "dolls", "estimate", "encourage", "navy", "kid", "inspection", "consumers", "cancel", "limits", "territory", "transaction", "manchester", "weapons", "paint", "delay", "pilot", "outlet", "contributions", "continuous", "czech", "resulting", "cambridge", "initiative", "novel", "pan", "execution", "disability", "increases", "ultra", "winner", "idaho", "contractor", "episode", "examination", "potter", "dish", "plays", "bulletin", "ia", "pt", "indicates", "modify", "oxford", "adam", "truly", "painting", "committed", "extensive", "universe", "candidate", "patent", "slot", "outstanding", "ha", "eating", "perspective", "planned", "watching", "lodge", "messenger", "mirror", "tournament", "consideration", "sterling", "sessions", "kernel", "stocks", "buyers", "journals", "gray", "catalogue", "ea", "antonio", "charged", "broad", "taiwan", "und", "chosen", "greece", "swiss", "sarah", "clark", "labour", "hate", "terminal", "publishers", "nights", "behalf", "caribbean", "liquid", "rice", "nebraska", "loop", "salary", "reservation", "foods", "guard", "properly", "orleans", "saving", "remaining", "empire", "resume", "twenty", "newly", "raise", "prepare", "gary", "depending", "illegal", "expansion", "vary", "hundreds", "rome", "arab", "lincoln", "helped", "premier", "tomorrow", "purchased", "milk", "decide", "consent", "drama", "visiting", "performing", "downtown", "keyboard", "contest", "collected", "nw", "bands", "boot", "suitable", "ff", "absolutely", "millions", "lunch", "audit", "push", "chamber", "guinea", "findings", "muscle", "iso", "implement", "clicking", "scheduled", "polls", "typical", "tower", "yours", "sum", "significantly", "chicken", "temporary", "attend", "shower", "alan", "sending", "jason", "tonight", "dear", "sufficient", "shell", "province", "catholic", "oak", "vat", "vancouver", "governor", "beer", "seemed", "contribution", "measurement", "swimming", "formula", "constitution", "solar", "jose", "catch", "jane", "pakistan", "ps", "reliable", "consultation", "northwest", "sir", "doubt", "earn", "finder", "unable", "periods", "classroom", "tasks", "democracy", "attacks", "kim", "merchandise", "const", "resistance", "doors", "symptoms", "resorts", "memorial", "visitor", "twin", "forth", "insert", "baltimore", "gateway", "ky", "dont", "drawing", "candidates", "charlotte", "ordered", "biological", "fighting", "transition", "happens", "preferences", "spy", "romance", "instrument", "bruce", "split", "themes", "powers", "heaven", "br", "bits", "pregnant", "twice", "classification", "focused", "egypt", "physician", "bargain", "cellular", "norway", "vermont", "asking", "blocks", "normally", "lo", "spiritual", "hunting", "suit", "shift", "chip", "res", "sit", "bodies", "photographs", "cutting", "simon", "writers", "marks", "flexible", "loved", "favourites", "numerous", "relatively", "birds", "satisfaction", "represents", "char", "pittsburgh", "superior", "preferred", "saved", "paying", "cartoon", "shots", "intellectual", "moore", "granted", "choices", "carbon", "spending", "comfortable", "magnetic", "interaction", "listening", "effectively", "registry", "crisis", "outlook", "massive", "denmark", "employed", "bright", "treat", "header", "cs", "poverty", "formed", "piano", "echo", "que", "sheets", "patrick", "experimental", "puerto", "revolution", "consolidation", "displays", "allowing", "earnings", "mystery", "landscape", "dependent", "mechanical", "journey", "delaware", "bidding", "risks", "banner", "applicant", "charter", "fig", "barbara", "cooperation", "counties", "acquisition", "ports", "directories", "recognized", "dreams", "notification", "licensing", "stands", "teach", "occurred", "rapid", "pull", "hairy", "diversity", "cleveland", "ut", "reverse", "deposit", "investments", "wheels", "specify", "dutch", "sensitive", "formats", "depends", "boots", "holds", "si", "editing", "poland", "completion", "pulse", "universities", "technique", "contractors", "voting", "courts", "notices", "subscriptions", "calculate", "detroit", "alexander", "broadcast", "converted", "anniversary", "improvements", "strip", "specification", "pearl", "accident", "nick", "accessible", "accessory", "resident", "plot", "possibly", "typically", "representation", "regard", "pump", "exists", "arrangements", "smooth", "conferences", "strike", "consumption", "birmingham", "flashing", "narrow", "afternoon", "threat", "surveys", "sitting", "putting", "controller", "ownership", "committees", "penis", "legislative", "vietnam", "trailer", "anne", "castle", "gardens", "missed", "malaysia", "antique", "labels", "willing", "molecular", "acting", "heads", "stored", "residence", "attorneys", "antiques", "density", "hundred", "ryan", "operators", "strange", "philippines", "statistical", "beds", "breasts", "mention", "innovation", "employers", "grey", "parallel", "amended", "operate", "bills", "bold", "bathroom", "stable", "opera", "definitions", "von", "doctors", "lesson", "asset", "scan", "elections", "drinking", "reaction", "blank", "enhanced", "entitled", "severe", "generate", "stainless", "newspapers", "hospitals", "vi", "humor", "aged", "exception", "lived", "duration", "bulk", "successfully", "indonesia", "pursuant", "fabric", "visits", "primarily", "tight", "domains", "capabilities", "contrast", "recommendation", "flying", "sin", "berlin", "cute", "organized", "ba", "para", "adoption", "improving", "cr", "expensive", "meant", "capture", "pounds", "buffalo", "organisations", "plane", "pg", "explained", "seed", "programmes", "desire", "mechanism", "camping", "ee", "jewellery", "meets", "welfare", "peer", "caught", "eventually", "marked", "driven", "measured", "bottle", "agreements", "considering", "marshall", "massage", "rubber", "conclusion", "closing", "thousand", "meat", "legend", "grace", "susan", "ing", "adams", "monster", "alex", "bang", "villa", "bone", "columns", "disorders", "bugs", "collaboration", "hamilton", "detection", "ftp", "cookies", "inner", "formation", "med", "engineers", "entity", "gate", "holder", "proposals", "sw", "settlement", "portugal", "lawrence", "roman", "duties", "valuable", "erotic", "tone", "ethics", "forever", "dragon", "busy", "captain", "fantastic", "imagine", "brings", "heating", "leg", "neck", "hd", "wing", "governments", "purchasing", "appointed", "taste", "dealing", "commit", "tiny", "rail", "liberal", "jay", "trips", "gap", "sides", "tube", "turns", "corresponding", "descriptions", "cache", "belt", "jacket", "determination", "animation", "oracle", "er", "matthew", "lease", "productions", "aviation", "proud", "excess", "disaster", "console", "commands", "jr", "instructor", "giant", "achieved", "injuries", "shipped", "seats", "approaches", "alarm", "anthony", "usual", "loading", "stamps", "appeared", "franklin", "angle", "rob", "mining", "melbourne", "worst", "betting", "scientists", "liberty", "wyoming", "argentina", "era", "convert", "possibility", "commissioner", "dangerous", "garage", "exciting", "thongs", "unfortunately", "respectively", "volunteers", "attachment", "finland", "morgan", "derived", "pleasure", "honor", "asp", "eagle", "pants", "columbus", "nurse", "prayer", "appointment", "workshops", "hurricane", "quiet", "luck", "postage", "producer", "represented", "mortgages", "dial", "responsibilities", "cheese", "comic", "carefully", "jet", "productivity", "investors", "crown", "par", "underground", "diagnosis", "maker", "crack", "principle", "picks", "vacations", "gang", "calculated", "fetish", "appearance", "smoke", "apache", "incorporated", "craft", "cake", "apart", "fellow", "blind", "lounge", "mad", "coins", "andy", "gross", "strongly", "cafe", "valentine", "hilton", "ken", "horror", "su", "familiar", "capable", "douglas", "till", "involving", "pen", "investing", "christopher", "admission", "shoe", "elected", "carrying", "victory", "sand", "madison", "joy", "editions", "mainly", "ethnic", "ran", "parliament", "actor", "finds", "seal", "situations", "fifth", "citizen", "vertical", "corrections", "structural", "municipal", "describes", "prize", "sr", "occurs", "jon", "absolute", "disabilities", "consists", "substance", "prohibited", "addressed", "lies", "pipe", "soldiers", "guardian", "lecture", "simulation", "ill", "concentration", "classics", "lbs", "lay", "interpretation", "horses", "dirty", "deck", "wayne", "donate", "taught", "bankruptcy", "worker", "alive", "temple", "substances", "prove", "discovered", "wings", "breaks", "restrictions", "participating", "waters", "promise", "thin", "exhibition", "prefer", "ridge", "cabinet", "harris", "bringing", "sick", "dose", "tiffany", "tropical", "collect", "bet", "composition", "streets", "definitely", "shaved", "turning", "buffer", "purple", "existence", "commentary", "larry", "developments", "def", "immigration", "lets", "mutual", "necessarily", "syntax", "li", "attribute", "prison", "skill", "chairs", "nl", "everyday", "apparently", "surrounding", "mountains", "moves", "popularity", "inquiry", "checked", "exhibit", "throw", "trend", "sierra", "visible", "cats", "desert", "ya", "oldest", "rhode", "obviously", "mercury", "steven", "handbook", "greg", "navigate", "worse", "summit", "victims", "spaces", "fundamental", "burning", "escape", "somewhat", "receiver", "substantial", "tr", "progressive", "boats", "glance", "scottish", "championship", "arcade", "richmond", "sacramento", "impossible", "russell", "tells", "obvious", "fiber", "depression", "graph", "covering", "platinum", "judgment", "bedrooms", "talks", "filing", "foster", "passing", "awarded", "testimonials", "trials", "tissue", "nz", "clinton", "masters", "bonds", "cartridge", "explanation", "folk", "commons", "cincinnati", "subsection", "fraud", "electricity", "permitted", "spectrum", "arrival", "pottery", "emphasis", "roger", "aspect", "awesome", "mexican", "confirmed", "counts", "priced", "hist", "crash", "lift", "desired", "inter", "closer", "assumes", "heights", "shadow", "riding", "infection", "lisa", "expense", "grove", "venture", "korean", "healing", "princess", "mall", "entering", "packet", "spray", "studios", "dad", "buttons", "observations", "thompson", "winners", "extend", "roads", "subsequent", "pat", "dublin", "rolling", "fell", "yard", "disclosure", "establishment", "memories", "nelson", "te", "arrived", "creates", "faces", "tourist", "cocks", "av", "mayor", "murder", "sean", "adequate", "senator", "yield", "grades", "cartoons", "pour", "digest", "reg", "lodging", "tion", "dust", "hence", "entirely", "replaced", "rescue", "undergraduate", "losses", "combat", "reducing", "stopped", "occupation", "lakes", "butt", "donations", "associations", "closely", "radiation", "diary", "seriously", "kings", "shooting", "kent", "adds", "ear", "flags", "baker", "launched", "elsewhere", "pollution", "conservative", "shock", "effectiveness", "walls", "abroad", "ebony", "tie", "ward", "drawn", "arthur", "ian", "visited", "roof", "walker", "demonstrate", "atmosphere", "suggests", "kiss", "beast", "ra", "operated", "experiment", "targets", "overseas", "purchases", "dodge", "counsel", "federation", "invited", "yards", "assignment", "chemicals", "gordon", "mod", "farmers", "queries", "rush", "ukraine", "absence", "nearest", "cluster", "vendors", "whereas", "yoga", "serves", "woods", "surprise", "lamp", "rico", "partial", "phil", "everybody", "couples", "nashville", "ranking", "jokes", "http", "simpson", "sublime", "palace", "acceptable", "satisfied", "glad", "wins", "measurements", "verify", "globe", "trusted", "copper", "milwaukee", "rack", "warehouse", "ec", "rep", "kerry", "receipt", "supposed", "ordinary", "nobody", "ghost", "violation", "stability", "mit", "applying", "southwest", "boss", "pride", "expectations", "independence", "knowing", "reporter", "keith", "champion", "cloudy", "linda", "ross", "personally", "chile", "anna", "plenty", "solo", "sentence", "throat", "ignore", "maria", "uniform", "excellence", "wealth", "tall", "somewhere", "vacuum", "dancing", "attributes", "recognize", "brass", "writes", "plaza", "survival", "quest", "publish", "screening", "toe", "trans", "jonathan", "whenever", "nova", "lifetime", "pioneer", "booty", "forgotten", "plates", "acres", "venue", "athletic", "essays", "behaviour", "vital", "telling", "fairly", "coastal", "cf", "charity", "intelligent", "edinburgh", "vt", "excel", "modes", "obligation", "campbell", "wake", "stupid", "harbor", "hungary", "traveler", "segment", "realize", "lan", "enemy", "puzzle", "rising", "aluminum", "wells", "opens", "insight", "restricted", "republican", "secrets", "lucky", "latter", "merchants", "thick", "repeat", "philips", "attendance", "penalty", "drum", "glasses", "enables", "nec", "builder", "vista", "jessica", "chips", "terry", "flood", "ease", "arguments", "amsterdam", "orgy", "arena", "adventures", "pupils", "stewart", "announcement", "outcome", "xx", "appreciate", "expanded", "casual", "grown", "polish", "lovely", "extras", "centres", "jerry", "clause", "smile", "lands", "ri", "troops", "indoor", "bulgaria", "armed", "broker", "charger", "regularly", "believed", "pine", "cooling", "tend", "gulf", "rick", "trucks", "cp", "mechanisms", "divorce", "laura", "tokyo", "partly", "tradition", "candy", "pills", "tiger", "donald", "folks", "exposed", "hunt", "angels", "deputy", "sealed", "physicians", "loaded", "fred", "complaint", "scenes", "experiments", "balls", "afghanistan", "scholarship", "governance", "mill", "founded", "chronic", "moral", "den", "finger", "keeps", "pound", "locate", "pl", "trained", "burn", "roses", "ourselves", "bread", "tobacco", "wooden", "motors", "tough", "roberts", "incident", "gonna", "lie", "conversation", "decrease", "chest", "pension", "billy", "revenues", "emerging", "worship", "capability", "ak", "fe", "craig", "herself", "producing", "churches", "precision", "damages", "reserves", "contributed", "solve", "reproduction", "minority", "diverse", "ingredients", "sb", "ah", "johnny", "sole", "franchise", "recorder", "complaints", "facing", "nancy", "promotions", "tones", "passion", "rehabilitation", "maintaining", "sight", "laid", "clay", "defence", "patches", "weak", "refund", "towns", "divided", "reception", "wise", "cyprus", "odds", "correctly", "consequences", "makers", "hearts", "geography", "appearing", "integrity", "worry", "discrimination", "eve", "carter", "legacy", "marc", "pleased", "danger", "widely", "phrase", "genuine", "raising", "implications", "paradise", "hybrid", "reads", "roles", "emotional", "sons", "leaf", "pad", "glory", "platforms", "ja", "bigger", "versus", "combine", "overnight", "geographic", "exceed", "rod", "saudi", "fault", "cuba", "hrs", "preliminary", "districts", "introduce", "silk", "kate", "babies", "bi", "karen", "compiled", "romantic", "revealed", "specialists", "generator", "albert", "examine", "jimmy", "graham", "suspension", "bristol", "margaret", "sad", "correction", "wolf", "slowly", "communicate", "rugby", "supplement", "cal", "portions", "infant", "promoting", "samuel", "fluid", "grounds", "fits", "kick", "regards", "meal", "ta", "hurt", "machinery", "unlike", "equation", "baskets", "probability", "pot", "dimension", "wright", "barry", "proven", "admissions", "warren", "slip", "studied", "reviewer", "involves", "quarterly", "profits", "devil", "grass", "comply", "marie", "illustrated", "cherry", "continental", "alternate", "deutsch", "achievement", "limitations", "kenya", "cuts", "funeral", "earrings", "enjoyed", "chapters", "charlie", "quebec", "passenger", "convenient", "dennis", "mars", "francis", "sized", "noticed", "socket", "silent", "literary", "egg", "signals", "caps", "pill", "theft", "childhood", "swing", "symbols", "lat", "meta", "humans", "facial", "choosing", "talent", "dated", "flexibility", "seeker", "wisdom", "shoot", "boundary", "mint", "offset", "philip", "elite", "gi", "spin", "holders", "believes", "swedish", "poems", "jurisdiction", "displaying", "witness", "collins", "equipped", "stages", "encouraged", "sur", "winds", "powder", "broadway", "acquired", "wash", "cartridges", "stones", "entrance", "roots", "declaration", "losing", "attempts", "noble", "glasgow", "rev", "gospel", "advantages", "shore", "loves", "induced", "ll", "knight", "preparing", "loose", "aims", "recipient", "linking", "extensions", "appeals", "earned", "illness", "islamic", "athletics", "southeast", "ho", "alternatives", "pending", "parker", "determining", "lebanon", "kennedy", "sh", "soap", "ae", "triple", "cooper", "vincent", "jam", "secured", "unusual", "answered", "destruction", "increasingly", "migration", "disorder", "routine", "rocks", "conventional", "titans", "applicants", "wearing", "axis", "sought", "mounted", "habitat", "median", "guns", "herein", "animated", "horny", "judicial", "rio", "adjustment", "hero", "bachelor", "attitude", "engaged", "falling", "montreal", "carpet", "lenses", "binary", "attended", "difficulty", "collective", "coalition", "pi", "dropped", "duke", "walter", "ai", "pace", "besides", "wage", "producers", "ot", "collector", "arc", "hosts", "moments", "atlas", "strings", "dawn", "representing", "observation", "feels", "torture", "carl", "coat", "mitchell", "mrs", "restoration", "convenience", "returning", "ralph", "opposition", "container", "yr", "defendant", "warner", "confirmation", "app", "embedded", "supervisor", "wizard", "corps", "actors", "liver", "liable", "morris", "petition", "recall", "picked", "assumed", "departure", "minneapolis", "belief", "killing", "memphis", "shoulder", "texts", "brokers", "roy", "ion", "diameter", "ottawa", "doll", "ic", "tit", "seasons", "peru", "refine", "bidder", "singer", "evans", "herald", "literacy", "fails", "aging", "intervention", "fed", "attraction", "diving", "invite", "modification", "alice", "suppose", "reed", "involve", "moderate", "terror", "younger", "thirty", "mice", "opposite", "understood", "rapidly", "ban", "mercedes", "assurance", "clerk", "happening", "vast", "mills", "outline", "amendments", "holland", "receives", "metropolitan", "compilation", "verification", "ent", "odd", "wrap", "refers", "mood", "favor", "veterans", "gr", "attractive", "occasion", "jefferson", "victim", "demands", "sleeping", "careful", "beam", "gardening", "obligations", "arrive", "orchestra", "sunset", "tracked", "moreover", "lottery", "tops", "framed", "aside", "licence", "essay", "discipline", "amy", "dialogue", "identifying", "alphabetical", "camps", "declared", "dispatched", "aaron", "trace", "disposal", "shut", "packs", "ge", "switches", "romania", "voluntary", "thou", "consult", "greatly", "mask", "midnight", "ng", "commonly", "pe", "photographer", "inform", "turkish", "coal", "cry", "quantum", "murray", "intent", "tt", "zoo", "largely", "pleasant", "announce", "constructed", "additions", "requiring", "spoke", "arrow", "engagement", "rough", "weird", "tee", "lion", "inspired", "holes", "weddings", "blade", "suddenly", "oxygen", "meals", "canyon", "meters", "merely", "arrangement", "conclusions", "passes", "bibliography", "pointer", "stretch", "durham", "furthermore", "permits", "cooperative", "muslim", "xl", "neil", "sleeve", "cleaner", "cricket", "beef", "feeding", "stroke", "township", "cad", "hats", "robin", "robinson", "jacksonville", "strap", "headquarters", "sharon", "crowd", "transfers", "surf", "olympic", "transformation", "remained", "attachments", "dir", "entities", "customs", "administrators", "personality", "rainbow", "hook", "roulette", "decline", "gloves", "cord", "cloud", "facilitate", "subscriber", "valve", "val", "explains", "proceed", "feelings", "knife", "jamaica", "shelf", "liked", "adopt", "denied", "incredible", "donation", "outer", "crop", "deaths", "rivers", "commonwealth", "manhattan", "tales", "katrina", "islam", "tu", "fy", "thumbs", "seeds", "cited", "lite", "hub", "realized", "twelve", "founder", "decade", "dispute", "portuguese", "tired", "adverse", "everywhere", "eng", "steam", "discharge", "ef", "drinks", "ace", "voices", "acute", "climbing", "stood", "sing", "tons", "perfume", "carol", "honest", "albany", "hazardous", "restore", "stack", "somebody", "sue", "ep", "reputation", "democrats", "hang", "curve", "creator", "amber", "qualifications", "museums", "variation", "passage", "transferred", "trunk", "lb", "damn", "pierre", "photograph", "oakland", "colombia", "waves", "camel", "lamps", "underlying", "hood", "wrestling", "suicide", "chi", "arabia", "gathering", "projection", "juice", "chase", "mathematical", "logical", "sauce", "fame", "extract", "specialized", "panama", "indianapolis", "af", "payable", "corporations", "courtesy", "criticism", "automobile", "confidential", "statutory", "accommodations", "athens", "northeast", "judges", "retired", "remarks", "detected", "decades", "paintings", "walked", "arising", "bracelet", "ins", "eggs", "juvenile", "injection", "yorkshire", "populations", "protective", "afraid", "railway", "indicator", "pointed", "causing", "mistake", "norton", "locked", "eliminate", "fusion", "mineral", "ruby", "steering", "beads", "fortune", "preference", "canvas", "threshold", "parish", "claimed", "screens", "cemetery", "croatia", "flows", "venezuela", "exploration", "fewer", "nurses", "stem", "proxy", "astronomy", "lanka", "edwards", "drew", "contests", "translate", "announces", "costume", "berkeley", "voted", "killer", "gates", "adjusted", "rap", "tune", "bishop", "pulled", "corn", "shaped", "compression", "seasonal", "establishing", "farmer", "counters", "puts", "constitutional", "grew", "perfectly", "tin", "slave", "instantly", "cultures", "norfolk", "coaching", "examined", "trek", "encoding", "litigation", "heroes", "painted", "ir", "horizontal", "resulted", "portrait", "ethical", "carriers", "mobility", "floral", "builders", "ties", "struggle", "schemes", "suffering", "neutral", "fisher", "rat", "spears", "prospective", "bedding", "ultimately", "joining", "heading", "equally", "artificial", "bearing", "spectacular", "seniors", "worlds", "guilty", "affiliated", "naturally", "haven", "tablet", "jury", "dos", "tail", "subscribers", "charm", "lawn", "violent", "underwear", "basin", "soup", "potentially", "ranch", "crossing", "inclusive", "cottage", "drunk", "considerable", "crimes", "resolved", "byte", "nose", "branches", "delhi", "holdings", "alien", "selecting", "processors", "broke", "nepal", "zimbabwe", "difficulties", "juan", "complexity", "constantly", "browsing", "resolve", "barcelona", "presidential", "documentary", "cod", "territories", "melissa", "moscow", "thesis", "thru", "jews", "discs", "rocky", "bargains", "frequent", "nigeria", "ceiling", "ensuring", "legislature", "hospitality", "gen", "anybody", "diamonds", "fleet", "bunch", "singing", "theoretical", "afford", "exercises", "surveillance", "quit", "distinct", "lung", "substitute", "inclusion", "hopefully", "brilliant", "turner", "sucking", "cents", "ti", "todd", "spoken", "stayed", "civic", "manuals", "sees", "termination", "watched", "thereof", "households", "redeem", "rogers", "grain", "authentic", "regime", "wishes", "bull", "montgomery", "architectural", "louisville", "depend", "differ", "movements", "ranging", "monica", "repairs", "breath", "amenities", "virtually", "cole", "mart", "candle", "hanging", "colored", "authorization", "tale", "verified", "lynn", "formerly", "bp", "situated", "comparative", "seeks", "loving", "strictly", "routing", "docs", "stanley", "psychological", "surprised", "elegant", "gains", "renewal", "genealogy", "opposed", "deemed", "scoring", "expenditure", "brooklyn", "liverpool", "sisters", "critics", "spots", "oo", "hacker", "madrid", "similarly", "margin", "coin", "solely", "fake", "salon", "norman", "excluding", "headed", "voters", "cure", "madonna", "commander", "arch", "ni", "murphy", "thinks", "suggestion", "soldier", "phillips", "aimed", "justin", "bomb", "harm", "interval", "mirrors", "tricks", "brush", "investigate", "thy", "panels", "repeated", "assault", "spare", "deer", "tongue", "bowling", "tri", "pal", "monkey", "proportion", "filename", "skirt", "florence", "invest", "honey", "um", "analyses", "drawings", "significance", "ye", "lovers", "atomic", "arabic", "gauge", "essentials", "junction", "protecting", "faced", "mat", "rachel", "solving", "transmitted", "produces", "oven", "ted", "intensive", "chains", "kingston", "sixth", "engage", "noon", "switching", "quoted", "correspondence", "farms", "imports", "supervision", "cheat", "bronze", "expenditures", "sandy", "separation", "testimony", "suspect", "celebrities", "sender", "boundaries", "crucial", "celebration", "adjacent", "filtering", "tuition", "spouse", "exotic", "threats", "luxembourg", "puzzles", "reaching", "vb", "damaged", "laugh", "joel", "surgical", "destroy", "citation", "pitch", "yo", "premises", "perry", "proved", "offensive", "imperial", "dozen", "benjamin", "teeth", "cloth", "studying", "colleagues", "stamp", "lotus", "salmon", "olympus", "separated", "cargo", "tan", "salem", "mate", "likes", "butter", "pepper", "weapon", "luggage", "burden", "chef", "zones", "races", "isle", "stylish", "slim", "maple", "luke", "grocery", "offshore", "depot", "kenneth", "comp", "alt", "pie", "blend", "harrison", "julie", "occasionally", "attending", "emission", "pete", "finest", "janet", "bow", "penn", "recruiting", "apparent", "autumn", "traveling", "probe", "midi", "toilet", "ranked", "jackets", "routes", "packed", "excited", "helen", "mounting", "recover", "tied", "balanced", "prescribed", "catherine", "timely", "talked", "delayed", "chuck", "reproduced", "hon", "dale", "explicit", "calculation", "villas", "ebook", "consolidated", "occasions", "brooks", "newton", "oils", "sept", "exceptional", "anxiety", "whilst", "unto", "prompt", "precious", "minds", "annually", "considerations", "pays", "cox", "fingers", "sunny", "ebooks", "delivers", "je", "queensland", "necklace", "musicians", "leeds", "composite", "cedar", "arranged", "lang", "theaters", "advocacy", "raleigh", "stud", "fold", "essentially", "designing", "threaded", "uv", "qualify", "fingering", "blair", "hopes", "mason", "diagram", "burns", "pumps", "slut", "ejaculation", "sg", "vic", "peoples", "victor", "mario", "pos", "attach", "licenses", "removing", "advised", "brunswick", "spider", "ranges", "pairs", "trails", "preservation", "hudson", "isolated", "interim", "assisted", "divine", "streaming", "approve", "chose", "compound", "intensity", "technological", "syndicate", "abortion", "venues", "blast", "calcium", "newport", "addressing", "pole", "discounted", "indians", "shield", "harvest", "membrane", "prague", "bangladesh", "constitute", "locally", "concluded", "desperate", "mothers", "iceland", "demonstration", "governmental", "manufactured", "candles", "graduation", "bend", "sailing", "variations", "sacred", "morocco", "tommy", "springfield", "refused", "brake", "exterior", "greeting", "oliver", "congo", "glen", "delays", "synthesis", "olive", "undefined", "unemployment", "scored", "newcastle", "velocity", "relay", "composed", "tears", "performances", "oasis", "cab", "angry", "fa", "societies", "brazilian", "identical", "petroleum", "compete", "ist", "norwegian", "lover", "belong", "honolulu", "lips", "escort", "retention", "exchanges", "pond", "rolls", "thomson", "barnes", "wondering", "malta", "daddy", "ferry", "rabbit", "profession", "seating", "dam", "separately", "physiology", "collecting", "das", "exports", "omaha", "tire", "dominican", "chad", "loads", "friendship", "heather", "passport", "unions", "treasury", "warrant", "frozen", "occupied", "josh", "royalty", "scales", "rally", "observer", "sunshine", "strain", "drag", "ceremony", "somehow", "arrested", "expanding", "provincial", "investigations", "ripe", "rely", "hebrew", "gained", "rochester", "dying", "laundry", "stuck", "solomon", "placing", "stops", "adjust", "assessed", "enabling", "filling", "sophisticated", "imposed", "silence", "soviet", "possession", "cu", "laboratories", "treaty", "vocal", "trainer", "organ", "stronger", "volumes", "advances", "vegetables", "lemon", "darkness", "nuts", "nail", "vienna", "implied", "span", "stanford", "stockings", "joke", "respondent", "packing", "statute", "rejected", "satisfy", "destroyed", "shelter", "chapel", "manufacture", "layers", "guided", "accredited", "appliance", "compressed", "bahamas", "powell", "mixture", "bench", "tub", "rider", "radius", "perspectives", "mortality", "logging", "hampton", "christians", "borders", "pads", "butts", "inns", "bobby", "impressive", "sheep", "accordingly", "architect", "railroad", "lectures", "challenging", "wines", "nursery", "harder", "cups", "ash", "microwave", "cheapest", "accidents", "stuart", "contributors", "salvador", "ali", "salad", "monroe", "tender", "violations", "foam", "temperatures", "paste", "clouds", "discretion", "tanzania", "preserve", "poem", "unsigned", "staying", "easter", "theories", "repository", "praise", "jeremy", "venice", "jo", "christianity", "veteran", "streams", "landing", "signing", "executed", "katie", "negotiations", "realistic", "integral", "asks", "relax", "namibia", "generating", "christina", "congressional", "synopsis", "hardly", "prairie", "reunion", "composer", "bean", "sword", "absent", "photographic", "sells", "ecuador", "hoping", "accessed", "spirits", "modifications", "coral", "float", "colin", "bias", "imported", "paths", "bubble", "por", "acquire", "contrary", "millennium", "tribune", "vessel", "acids", "cheaper", "admitted", "dairy", "admit", "mem", "fancy", "equality", "samoa", "achieving", "tap", "fisheries", "exceptions", "reactions", "beliefs", "ci", "companion", "squad", "analyze", "ashley", "scroll", "relate", "divisions", "swim", "wages", "suffer", "forests", "fellowship", "invalid", "concerts", "martial", "males", "victorian", "retain", "colours", "execute", "tunnel", "genres", "cambodia", "patents", "yn", "chaos", "lithuania", "wheat", "chronicles", "obtaining", "beaver", "distribute", "readings", "decorative", "confused", "compiler", "enlargement", "eagles", "bases", "vii", "accused", "bee", "campaigns", "unity", "loud", "bride", "rats", "defines", "airports", "instances", "indigenous", "begun", "brunette", "packets", "anchor", "socks", "parade", "corruption", "stat", "trigger", "incentives", "gathered", "essex", "notified", "differential", "beaches", "dramatic", "surfaces", "terrible", "cruz", "pendant", "dresses", "baptist", "scientist", "hiring", "clocks", "females", "wallace", "nevertheless", "reflects", "taxation", "fever", "cuisine", "surely", "practitioners", "transcript", "inflation", "thee", "ruth", "pray", "compounds", "pope", "drums", "contracting", "arnold", "reasonably", "chicks", "bare", "hung", "cattle", "radical", "graduates", "rover", "recommends", "controlling", "treasure", "flame", "tanks", "assuming", "monetary", "elderly", "pit", "arlington", "floating", "extraordinary", "tile", "indicating", "bolivia", "spell", "hottest", "stevens", "kuwait", "exclusively", "emily", "alleged", "limitation", "compile", "webster", "struck", "illustration", "plymouth", "warnings", "construct", "inquiries", "bridal", "annex", "mag", "inspiration", "tribal", "curious", "affecting", "freight", "eclipse", "sudan", "downloading", "shuttle", "aggregate", "stunning", "cycles", "affects", "detect", "actively", "knee", "prep", "pb", "complicated", "fastest", "butler", "injured", "decorating", "expressions", "ton", "courier", "shakespeare", "hints", "collapse", "unlikely", "oe", "gif", "pros", "conflicts", "beverage", "tribute", "wired", "immune", "travelers", "forestry", "barriers", "cant", "rarely", "infected", "offerings", "martha", "genesis", "barrier", "argue", "incorrect", "trains", "metals", "bicycle", "furnishings", "letting", "arise", "guatemala", "celtic", "thereby", "jamie", "particle", "perception", "minerals", "advise", "humidity", "bottles", "boxing", "wy", "renaissance", "pathology", "sara", "bra", "ordinance", "hughes", "bitch", "jeffrey", "chess", "operates", "survive", "oscar", "festivals", "menus", "joan", "possibilities", "duck", "reveal", "canal", "phi", "contributing", "herbs", "cow", "manitoba", "analytical", "missions", "watson", "lying", "costumes", "strict", "dive", "circulation", "drill", "offense", "bryan", "cet", "protest", "assumption", "jerusalem", "hobby", "tries", "invention", "nickname", "fiji", "enquiries", "washing", "exploring", "trick", "enquiry", "raid", "timber", "intense", "showers", "supporters", "ruling", "steady", "dirt", "statutes", "withdrawal", "myers", "drops", "predicted", "wider", "saskatchewan", "enrolled", "screw", "ministers", "publicly", "hourly", "blame", "geneva", "veterinary", "handed", "suffered", "informal", "incentive", "butterfly", "mechanics", "heavily", "fifty", "mistakes", "numerical", "ons", "uncle", "defining", "counting", "reflection", "sink", "accompanied", "assure", "invitation", "devoted", "princeton", "jacob", "sodium", "randy", "spirituality", "meanwhile", "proprietary", "timothy", "brick", "grip", "naval", "medieval", "porcelain", "bridges", "captured", "watt", "decent", "casting", "dayton", "translated", "shortly", "cameron", "pins", "carlos", "reno", "donna", "andreas", "warrior", "diploma", "cabin", "innocent", "scanning", "consensus", "polo", "copying", "delivering", "patricia", "horn", "eddie", "uganda", "fired", "journalism", "perth", "frog", "grammar", "intention", "syria", "disagree", "klein", "harvey", "tires", "logs", "undertaken", "hazard", "leo", "gregory", "episodes", "circular", "anger", "mainland", "illustrations", "suits", "chances", "snap", "happiness", "arg", "substantially", "bizarre", "glenn", "ur", "auckland", "fruits", "geo", "ribbon", "calculations", "doe", "conducting", "trinidad", "kissing", "wal", "handy", "swap", "exempt", "crops", "reduces", "accomplished", "geometry", "impression", "guild", "correlation", "gorgeous", "capitol", "sim", "dishes", "barbados", "nervous", "refuse", "extends", "fragrance", "mcdonald", "replica", "brussels", "tribe", "neighbors", "trades", "superb", "buzz", "transparent", "rid", "trinity", "charleston", "handled", "legends", "boom", "calm", "champions", "floors", "selections", "inappropriate", "exhaust", "comparing", "shanghai", "speaks", "burton", "vocational", "davidson", "copied", "scotia", "farming", "gibson", "fork", "troy", "roller", "batch", "organize", "appreciated", "alter", "ghana", "edges", "mixing", "handles", "skilled", "fitted", "albuquerque", "harmony", "distinguished", "projected", "assumptions", "shareholders", "twins", "rip", "triangle", "amend", "anticipated", "oriental", "reward", "windsor", "zambia", "completing", "hydrogen", "comparable", "chick", "advocate", "sims", "confusion", "copyrighted", "tray", "warranties", "escorts", "thong", "medal", "coaches", "vessels", "harbour", "walks", "sucks", "sol", "sage", "knives", "vulnerable", "arrange", "artistic", "bat", "honors", "booth", "reflected", "unified", "bones", "breed", "ignored", "polar", "fallen", "precise", "sussex", "respiratory", "invoice", "lip", "sap", "gather", "maternity", "backed", "alfred", "colonial", "carey", "forming", "embassy", "cave", "journalists", "danny", "rebecca", "slight", "proceeds", "indirect", "amongst", "wool", "foundations", "arrest", "horizon", "nu", "deeply", "marina", "liabilities", "prizes", "bosnia", "decreased", "patio", "tolerance", "lloyd", "describing", "optics", "pursue", "lightning", "overcome", "eyed", "ou", "quotations", "grab", "inspector", "attract", "brighton", "beans", "bookmarks", "ellis", "disable", "snake", "succeed", "leonard", "lending", "reminder", "xi", "searched", "riverside", "plains", "raymond", "abilities", "initiated", "sullivan", "za", "trap", "lonely", "fool", "ve", "lancaster", "suspended", "observe", "julia", "attitudes", "karl", "berry", "collar", "simultaneously", "racial", "bermuda", "amanda", "sociology", "exhibitions", "confident", "retrieved", "exhibits", "officially", "dies", "terrace", "bacteria", "replied", "novels", "recipients", "ought", "delicious", "traditions", "jail", "safely", "finite", "kidney", "periodically", "fixes", "sends", "durable", "allied", "throws", "moisture", "hungarian", "referring", "spencer", "uruguay", "transform", "tablets", "tuning", "gotten", "educators", "tyler", "futures", "vegetable", "verse", "humanities", "independently", "wanting", "custody", "scratch", "launches", "henderson", "bk", "britannica", "ellen", "competitors", "rocket", "bullet", "towers", "racks", "lace", "nasty", "latitude", "consciousness", "ste", "tumor", "ugly", "deposits", "beverly", "mistress", "encounter", "trustees", "watts", "duncan", "hart", "bernard", "resolutions", "ment", "forty", "tubes", "attempted", "col", "priest", "floyd", "ronald", "queue", "trance", "nicholas", "yu", "bundle", "hammer", "invasion", "witnesses", "runner", "rows", "administered", "notion", "sq", "skins", "mailed", "spelling", "arctic", "rewards", "beneath", "strengthen", "defend", "frederick", "seventh", "gods", "une", "welsh", "belly", "aggressive", "advertisements", "quarters", "stolen", "soonest", "haiti", "disturbed", "determines", "sculpture", "ears", "fist", "fitting", "fixtures", "mere", "agrees", "passengers", "quantities", "petersburg", "consistently", "cons", "elder", "cheers", "dig", "taxi", "punishment", "appreciation", "subsequently", "om", "nat", "gravity", "providence", "thumb", "restriction", "incorporate", "backgrounds", "treasurer", "essence", "flooring", "ethiopia", "mighty", "athletes", "humanity", "transcription", "holmes", "complications", "scholars", "remembered", "galaxy", "chester", "loc", "worn", "synthetic", "shaw", "vp", "segments", "testament", "twist", "stomach", "partially", "buried", "minimize", "darwin", "ranks", "wilderness", "debut", "generations", "tournaments", "bradley", "deny", "anatomy", "judy", "fraction", "trio", "proceeding", "cube", "defects", "uncertainty", "breakdown", "milton", "reconstruction", "subsidiary", "clarity", "rugs", "sandra", "adelaide", "encouraging", "furnished", "monaco", "settled", "folding", "comparisons", "beneficial", "belize", "fate", "promised", "penny", "robust", "threatened", "republicans", "discusses", "porter", "gras", "jungle", "ver", "responded", "rim", "zen", "ivory", "alpine", "dis", "prediction", "fabulous", "alias", "individually", "battlefield", "literally", "newer", "kay", "spice", "oval", "implies", "soma", "ser", "cooler", "consisting", "periodic", "submitting", "overhead", "ascii", "prospect", "shipment", "breeding", "citations", "geographical", "donor", "mozambique", "tension", "trash", "shapes", "tier", "earl", "manor", "envelope", "diane", "disclaimers", "excluded", "andrea", "breeds", "rapids", "sheffield", "bailey", "aus", "finishing", "emotions", "wellington", "incoming", "prospects", "bulgarian", "eternal", "cite", "aboriginal", "remarkable", "rotation", "nam", "productive", "boulevard", "eugene", "ix", "gdp", "pig", "metric", "minus", "penalties", "bennett", "imagination", "joshua", "armenia", "varied", "grande", "closest", "actress", "mess", "assign", "armstrong", "politicians", "lit", "accommodate", "tigers", "aurora", "una", "slides", "milan", "premiere", "lender", "villages", "shade", "chorus", "christine", "rhythm", "digit", "argued", "dietary", "symphony", "clarke", "sudden", "accepting", "precipitation", "lions", "ada", "pools", "tb", "lyric", "claire", "isolation", "speeds", "sustained", "matched", "approximate", "rope", "carroll", "rational", "fighters", "chambers", "dump", "greetings", "inherited", "warming", "incomplete", "chronicle", "fountain", "chubby", "grave", "legitimate", "biographies", "burner", "investigator", "plaintiff", "finnish", "gentle", "prisoners", "deeper", "muslims", "hose", "mediterranean", "worthy", "reveals", "architects", "saints", "carries", "sig", "duo", "excessive", "devon", "helena", "saves", "regarded", "valuation", "unexpected", "cigarette", "fog", "characteristic", "marion", "lobby", "egyptian", "tunisia", "outlined", "consequently", "treating", "punch", "appointments", "gotta", "cowboy", "narrative", "enormous", "karma", "consist", "betty", "queens", "quantitative", "lucas", "subdivision", "tribes", "defeat", "distinction", "honduras", "naughty", "hazards", "insured", "harper", "livestock", "exemption", "tenant", "cabinets", "tattoo", "shake", "algebra", "shadows", "holly", "silly", "yea", "mercy", "hartford", "freely", "marcus", "sunrise", "wrapping", "mild", "fur", "nicaragua", "tar", "belongs", "readily", "soc", "fence", "infinite", "diana", "relatives", "lindsay", "clan", "legally", "shame", "satisfactory", "revolutionary", "bracelets", "civilian", "mesa", "fatal", "remedy", "breathing", "briefly", "thickness", "adjustments", "genius", "discussing", "fighter", "flesh", "retreat", "adapted", "barely", "wherever", "estates", "rug", "democrat", "borough", "maintains", "failing", "ka", "retained", "pamela", "andrews", "marble", "extending", "jesse", "hull", "surrey", "dem", "blackberry", "highland", "meditation", "macedonia", "combining", "brandon", "instrumental", "giants", "organizing", "shed", "balloon", "winston", "ham", "solved", "tide", "hawaiian", "partition", "invisible", "consoles", "funk", "magnet", "translations", "jaguar", "reel", "sheer", "commodity", "posing", "wang", "kilometers", "bind", "thanksgiving", "rand", "hopkins", "urgent", "guarantees", "infants", "gothic", "cylinder", "witch", "buck", "indication", "eh", "congratulations", "cohen", "sie", "puppy", "acre", "cigarettes", "revenge", "expires", "enemies", "aqua", "chen", "emma", "finances", "accepts", "enjoying", "conventions", "eva", "patrol", "smell", "pest", "coordinates", "carnival", "roughly", "promises", "responding", "reef", "physically", "divide", "consecutive", "satin", "bon", "deserve", "attempting", "representations", "chan", "worried", "tunes", "garbage", "competing", "combines", "mas", "beth", "bradford", "len", "phrases", "kai", "peninsula", "chelsea", "boring", "reynolds", "dom", "jill", "accurately", "speeches", "reaches", "considers", "sofa", "ministries", "vacancies", "parliamentary", "prefix", "lucia", "savannah", "barrel", "typing", "nerve", "dans", "planets", "deficit", "boulder", "pointing", "renew", "coupled", "viii", "harold", "circuits", "texture", "jar", "somerset", "acknowledge", "thoroughly", "antigua", "nottingham", "thunder", "tent", "caution", "identifies", "qualification", "locks", "modelling", "namely", "miniature", "hack", "dare", "interstate", "pirates", "aerial", "hawk", "consequence", "rebel", "systematic", "perceived", "origins", "hired", "textile", "lamb", "madagascar", "nathan", "tobago", "presenting", "cos", "centuries", "magnitude", "richardson", "hindu", "vocabulary", "licking", "earthquake", "fundraising", "weights", "albania", "geological", "lasting", "wicked", "introduces", "kills", "pushed", "ro", "participated", "junk", "wax", "lucy", "answering", "hans", "impressed", "slope", "failures", "poet", "conspiracy", "surname", "theology", "nails", "evident", "epic", "saturn", "organizer", "nut", "sake", "twisted", "combinations", "preceding", "merit", "cumulative", "planes", "edmonton", "tackle", "disks", "arbitrary", "prominent", "retrieve", "lexington", "vernon", "sans", "irs", "fairy", "builds", "shaft", "lean", "bye", "occasional", "leslie", "deutsche", "ana", "innovations", "kitty", "drain", "monte", "fires", "algeria", "blessed", "luis", "reviewing", "cardiff", "cornwall", "favors", "potato", "panic", "explicitly", "sticks", "leone", "ez", "citizenship", "excuse", "reforms", "basement", "onion", "strand", "sandwich", "uw", "lawsuit", "alto", "cheque", "hierarchy", "influenced", "banners", "reject", "eau", "abandoned", "bd", "circles", "italic", "merry", "mil", "gore", "complement", "cult", "dash", "passive", "mauritius", "valued", "cage", "requesting", "courage", "verde", "extraction", "elevation", "coleman", "hugh", "lap", "utilization", "beverages", "jake", "efficiently", "textbook", "dried", "entertaining", "luther", "frontier", "settle", "stopping", "refugees", "knights", "hypothesis", "palmer", "medicines", "flux", "derby", "peaceful", "altered", "doctrine", "scenic", "intersection", "sewing", "consistency", "collectors", "conclude", "recognised", "munich", "oman", "propose", "azerbaijan", "lighter", "rage", "uh", "prix", "astrology", "pavilion", "tactics", "trusts", "occurring", "supplemental", "travelling", "talented", "annie", "pillow", "induction", "derek", "precisely", "shorter", "harley", "spreading", "provinces", "relying", "paraguay", "steal", "parcel", "refined", "bo", "fifteen", "widespread", "incidence", "fears", "predict", "boutique", "rolled", "avon", "incidents", "peterson", "rays", "shannon", "enhancing", "flavor", "alike", "walt", "homeless", "horrible", "hungry", "metallic", "blocked", "interference", "warriors", "palestine", "undo", "atmospheric", "wm", "dana", "halo", "curtis", "parental", "strikes", "lesser", "publicity", "marathon", "ant", "proposition", "pressing", "gasoline", "apt", "dressed", "scout", "belfast", "dealt", "niagara", "inf", "eos", "charms", "trader", "bucks", "allowance", "denial", "uri", "designation", "thrown", "raises", "gem", "duplicate", "criterion", "badge", "wrist", "civilization", "analyzed", "heath", "tremendous", "ballot", "varying", "remedies", "validity", "trustee", "weighted", "angola", "performs", "realm", "corrected", "jenny", "helmet", "salaries", "elephant", "yemen", "encountered", "scholar", "nickel", "surrounded", "geology", "creatures", "coating", "commented", "wallet", "cleared", "accomplish", "boating", "drainage", "corners", "broader", "vegetarian", "rouge", "yeast", "yale", "newfoundland", "sn", "pas", "clearing", "investigated", "ambassador", "coated", "intend", "stephanie", "contacting", "vegetation", "doom", "louise", "kenny", "specially", "owen", "hitting", "yukon", "beings", "bite", "aquatic", "reliance", "habits", "striking", "myth", "infectious", "singh", "gig", "gilbert", "continuity", "brook", "fu", "phenomenon", "ensemble", "assured", "biblical", "weed", "conscious", "accent", "eleven", "wives", "utilize", "mileage", "auburn", "unlock", "pledge", "vampire", "angela", "relates", "nitrogen", "dice", "dock", "differently", "framing", "organised", "musician", "blocking", "sorts", "limiting", "dispatch", "revisions", "papua", "restored", "hint", "armor", "riders", "chargers", "remark", "dozens", "varies", "reasoning", "rendered", "picking", "charitable", "guards", "annotated", "convinced", "openings", "buys", "replacing", "watershed", "councils", "occupations", "acknowledged", "nudity", "pockets", "granny", "pork", "zu", "equilibrium", "inquire", "pipes", "characterized", "laden", "cottages", "merge", "privilege", "edgar", "develops", "qualifying", "estimation", "barn", "pushing", "fleece", "fare", "pierce", "allan", "dressing", "sperm", "bald", "frost", "leon", "institutes", "mold", "dame", "fo", "sally", "yacht", "tracy", "prefers", "drilling", "herb", "ate", "breach", "whale", "traveller", "appropriations", "suspected", "tomatoes", "beginners", "instructors", "bedford", "stationery", "idle", "mustang", "unauthorized", "clusters", "competent", "momentum", "fin", "io", "pastor", "mud", "calvin", "uni", "shark", "contributor", "demonstrates", "phases", "grateful", "emerald", "gradually", "laughing", "grows", "cliff", "desirable", "tract", "ballet", "ol", "journalist", "abraham", "bumper", "afterwards", "religions", "garlic", "shine", "senegal", "explosion", "banned", "briefs", "signatures", "cove", "casa", "mu", "daughters", "conversations", "radios", "tariff", "opponent", "simplified", "muscles", "wrapped", "swift", "vagina", "eden", "distant", "champagne", "ala", "decimal", "deviation", "superintendent", "dip", "hostel", "housewives", "employ", "mongolia", "penguin", "magical", "influences", "irrigation", "miracle", "reprint", "reid", "hydraulic", "centered", "robertson", "yearly", "penetration", "wound", "belle", "rosa", "conviction", "hash", "omissions", "writings", "hamburg", "lazy", "qualities", "fathers", "charging", "cas", "marvel", "lined", "cio", "dow", "prototype", "petite", "apparatus", "terrain", "pens", "explaining", "yen", "strips", "gossip", "rangers", "nomination", "empirical", "rotary", "worm", "dependence", "beginner", "boxed", "lid", "cubic", "deaf", "commitments", "suggesting", "sapphire", "skirts", "mats", "remainder", "crawford", "labeled", "privileges", "marking", "commodities", "serbia", "sheriff", "griffin", "declined", "guyana", "spies", "neighbor", "elect", "highways", "concentrate", "intimate", "reproductive", "preston", "deadly", "molecules", "rounds", "refrigerator", "intervals", "sentences", "exclusion", "holocaust", "keen", "peas", "receivers", "disposition", "variance", "navigator", "investigators", "cameroon", "baking", "computed", "needle", "baths", "cathedral", "brakes", "og", "nirvana", "ko", "owns", "til", "sticky", "destiny", "generous", "madness", "climb", "blowing", "fascinating", "landscapes", "heated", "lafayette", "wto", "computation", "hay", "salvation", "dover", "adrian", "predictions", "accompanying", "vatican", "brutal", "selective", "arbitration", "token", "editorials", "zinc", "sacrifice", "seekers", "isa", "removable", "yields", "gibraltar", "levy", "suited", "anthropology", "skating", "aberdeen", "emperor", "grad", "bras", "belts", "blacks", "educated", "reporters", "burke", "proudly", "necessity", "rendering", "inserted", "pulling", "curves", "suburban", "touring", "clara", "tomato", "waterproof", "expired", "travels", "flush", "pale", "hayes", "humanitarian", "invitations", "functioning", "delight", "survivor", "garcia", "economies", "alexandria", "moses", "counted", "undertake", "declare", "continuously", "johns", "valves", "gaps", "impaired", "achievements", "donors", "tear", "jewel", "teddy", "convertible", "teaches", "ventures", "nil", "stranger", "tragedy", "julian", "nest", "painful", "velvet", "tribunal", "ruled", "pensions", "prayers", "nowhere", "cop", "paragraphs", "gale", "joins", "adolescent", "nominations", "wesley", "dim", "lately", "cancelled", "mattress", "likewise", "banana", "introductory", "cakes", "stan", "reservoir", "occurrence", "idol", "bloody", "remind", "worcester", "charming", "mai", "tooth", "disciplinary", "annoying", "respected", "stays", "disclose", "affair", "drove", "upset", "restrict", "beside", "mines", "portraits", "rebound", "logan", "mentor", "interpreted", "fought", "baghdad", "elimination", "metres", "hypothetical", "immigrants", "complimentary", "pencil", "freeze", "performer", "abu", "titled", "commissions", "sphere", "moss", "concord", "graduated", "endorsed", "ty", "surprising", "walnut", "lance", "ladder", "italia", "unnecessary", "dramatically", "liberia", "sherman", "cork", "hansen", "senators", "mali", "yugoslavia", "bleeding", "characterization", "colon", "likelihood", "lanes", "purse", "fundamentals", "contamination", "endangered", "compromise", "masturbation", "stating", "dome", "caroline", "expiration", "bless", "engaging", "negotiation", "crest", "opponents", "triumph", "nominated", "electoral", "welding", "deferred", "alternatively", "heel", "alloy", "plots", "polished", "yang", "gently", "locking", "casey", "controversial", "draws", "blanket", "bloom", "lou", "elliott", "recovered", "fraser", "justify", "blades", "loops", "surge", "aw", "tahoe", "advert", "possess", "demanding", "defensive", "sip", "forbidden", "vanilla", "deutschland", "picnic", "souls", "arrivals", "practitioner", "dumb", "smithsonian", "hollow", "vault", "securely", "examining", "groove", "revelation", "pursuit", "delegation", "wires", "dictionaries", "mails", "backing", "greenhouse", "sleeps", "blake", "transparency", "dee", "travis", "endless", "orbit", "niger", "bacon", "survivors", "colony", "cannon", "circus", "forbes", "mae", "mel", "descending", "spine", "trout", "enclosed", "feat", "cooked", "transmit", "fatty", "gerald", "pressed", "scanned", "reflections", "hunger", "sic", "municipality", "joyce", "detective", "surgeon", "cement", "experiencing", "fireplace", "endorsement", "disputes", "textiles", "missile", "closes", "seq", "persistent", "deborah", "marco", "assists", "summaries", "glow", "gabriel", "auditor", "violin", "prophet", "bracket", "isaac", "oxide", "oaks", "magnificent", "erik", "colleague", "naples", "promptly", "adaptation", "hu", "harmful", "sexually", "enclosure", "dividend", "newark", "kw", "paso", "phantom", "westminster", "turtle", "distances", "absorption", "treasures", "warned", "ware", "fossil", "mia", "badly", "apollo", "wan", "disappointed", "persian", "continually", "communist", "greene", "grenada", "creations", "jade", "scoop", "acquisitions", "foul", "earning", "excitement", "somalia", "verbal", "blink", "presently", "seas", "carlo", "mysterious", "novelty", "bryant", "tiles", "librarian", "switched", "stockholm", "pose", "grams", "richards", "promising", "relaxation", "goat", "render", "carmen", "ira", "sen", "thereafter", "hardwood", "temporal", "sail", "forge", "commissioners", "dense", "brave", "forwarding", "awful", "nightmare", "reductions", "southampton", "impose", "organisms", "telescope", "asbestos", "portsmouth", "meyer", "enters", "pod", "savage", "advancement", "wu", "willow", "resumes", "bolt", "gage", "throwing", "existed", "whore", "generators", "lu", "wagon", "dat", "favour", "knock", "urge", "generates", "potatoes", "thorough", "inexpensive", "kurt", "peers", "roland", "quilt", "huntington", "creature", "ours", "mounts", "syracuse", "lone", "refresh", "aluminium", "michel", "subtle", "notre", "shipments", "stripes", "antarctica", "cope", "shepherd", "cradle", "chancellor", "lime", "kirk", "flour", "controversy", "legendary", "sympathy", "choir", "avoiding", "beautifully", "blond", "expects", "fabrics", "hygiene", "wit", "poultry", "virtue", "burst", "examinations", "surgeons", "bouquet", "promotes", "mandate", "departmental", "ind", "corpus", "johnston", "terminology", "gentleman", "fibre", "reproduce", "shades", "jets", "qui", "threatening", "spokesman", "frankfurt", "prisoner", "daisy", "halifax", "encourages", "assembled", "earliest", "donated", "insects", "terminals", "crude", "morrison", "maiden", "sufficiently", "examines", "viking", "myrtle", "bored", "yarn", "knit", "conditional", "mug", "bother", "budapest", "knitting", "attacked", "mating", "compute", "arrives", "translator", "automobiles", "allah", "continent", "ob", "fares", "longitude", "resist", "challenged", "hoped", "pike", "insertion", "hugo", "wagner", "constraint", "touched", "strengthening", "cologne", "wishing", "ranger", "smallest", "insulation", "newman", "marsh", "scared", "infringement", "bent", "laos", "subjective", "monsters", "asylum", "robbie", "stake", "cocktail", "outlets", "varieties", "arbor", "poison", "dominated", "costly", "derivatives", "prevents", "stitch", "rifle", "severity", "notable", "warfare", "judiciary", "embroidery", "mama", "inland", "greenland", "interpret", "accord", "modest", "countryside", "sorting", "liaison", "unused", "bulbs", "consuming", "tourists", "sandals", "seconded", "waist", "attributed", "seychelles", "fatigue", "owl", "patriot", "sewer", "crystals", "kathleen", "bosch", "forthcoming", "num", "treats", "marino", "detention", "carson", "exceeds", "complementary", "gallon", "coil", "battles", "traders", "carlton", "bitter", "memorandum", "burned", "cardinal", "dragons", "converting", "romeo", "din", "incredibly", "delegates", "turks", "roma", "balancing", "att", "vet", "sided", "claiming", "courtyard", "presidents", "offenders", "depart", "cuban", "tenants", "expressly", "distinctive", "lily", "brackets", "unofficial", "oversight", "privately", "minded", "resin", "allies", "twilight", "preserved", "crossed", "kensington", "monterey", "linen", "rita", "ascending", "seals", "nominal", "alicia", "decay", "weaknesses", "quartz", "registers", "eighth", "usher", "herbert", "authorised", "improves", "advocates", "phenomena", "buffet", "deciding", "skate", "joey", "hackers", "tilt", "granite", "repeatedly", "lynch", "masses", "transformed", "athlete", "franc", "bead", "enforce", "similarity", "landlord", "leak", "timor", "assorted", "implements", "adviser", "flats", "compelling", "vouchers", "expecting", "heels", "voter", "urine", "capri", "towel", "ginger", "suburbs", "imagery", "sears", "als", "flies", "competence", "inadequate", "crying", "matthews", "amateurs", "crane", "defendants", "deployed", "governed", "considerably", "investigating", "rotten", "habit", "bulb", "scattered", "honour", "useless", "protects", "northwestern", "audiences", "iris", "coupe", "hal", "benin", "bach", "manages", "erosion", "abundance", "carpenter", "khan", "insufficient", "highlands", "peters", "fertility", "clever", "primer", "che", "lords", "bu", "tends", "enjoyable", "crescent", "freshman", "playground", "negotiate", "sixty", "exploit", "orgies", "permanently", "concentrated", "distinguish", "ei", "projections", "spark", "illustrate", "lin", "patience", "securing", "pathway", "shallow", "stir", "spike", "plated", "jacques", "drawer", "ingredient", "togo", "lifting", "judith", "curtain", "disclosed", "davies", "tactical", "pilots", "copenhagen", "expedition", "pile", "operative", "humour", "maturity", "caller", "distortion", "prosecution", "het", "tonga", "imprint", "natalie", "receipts", "assisting", "shirley", "sanctions", "goodbye", "emerged", "defect", "poorly", "goddess", "backs", "observers", "magnets", "formulas", "spacious", "shoulders", "nas", "argues", "wade", "soils", "chapman", "organs", "det", "loyalty", "beloved", "sometime", "ballard", "beating", "faithful", "libya", "offence", "invested", "whatsoever", "numbered", "terminated", "expands", "sedan", "pony", "comprises", "leap", "bolton", "founding", "swan", "covenant", "dropping", "archaeology", "sailor", "fittings", "lining", "banquet", "cares", "sanctuary", "flora", "statue", "hilary", "quotation", "equals", "hardy", "caravan", "diagrams", "harness", "manipulation", "bells", "vascular", "alongside", "impressions", "yankees", "forwarded", "gal", "transmitter", "dorothy", "freeman", "andre", "ems", "puppies", "relaxing", "delphi", "trophy", "emotion", "nets", "sights", "uniforms", "disasters", "asterisk", "versatile", "liquor", "kindergarten", "profitable", "wounded", "clayton", "derivative", "suffolk", "necklaces", "tot", "occupancy", "doses", "educate", "baked", "glove", "prejudice", "herzegovina", "probable", "baldwin", "incorporation", "rem", "evolutionary", "arriving", "decoration", "trojan", "assistants", "counselor", "spinal", "eliminated", "sooner", "struggling", "enacted", "tenure", "plush", "weber", "unstable", "elk", "nelly", "fulfill", "urged", "reflecting", "brent", "gaining", "definitive", "appropriately", "shifts", "inactive", "lansing", "traveled", "adapt", "extracted", "accession", "patterson", "carriage", "therein", "terminate", "rex", "fuels", "traditionally", "withdraw", "soy", "brett", "anchorage", "paula", "landmark", "greens", "neat", "naming", "stern", "bentley", "bud", "slaves", "dentist", "utilizing", "mis", "burkina", "tutor", "idiot", "comprised", "winnipeg", "charities", "mickey", "sebastian", "aliens", "domino", "raven", "defeated", "strains", "dwelling", "slice", "tanning", "gambia", "aspen", "lacking", "symbolic", "cest", "objectionable", "angles", "pressures", "webb", "mediation", "venus", "bump", "cowboys", "flames", "primitive", "auf", "stocking", "esp", "balloons", "malcolm", "georgetown", "norwich", "halls", "decorations", "pause", "simplicity", "postscript", "dividends", "relaxed", "periodicals", "pearson", "demon", "welcomed", "infinity", "gabon", "notation", "chandler", "aunt", "interviewed", "crow", "dia", "discontinued", "concurrent", "decides", "caption", "bargaining", "complain", "pulmonary", "adhesive", "toledo", "asses", "altitude", "compass", "closet", "couch", "evolved", "downs", "exceeding", "rogue", "unfair", "electronically", "augusta", "infantry", "renowned", "corridor", "philosophical", "scripture", "celebrating", "sahara", "justification", "rebuild", "vacant", "manuscript", "fixing", "gram", "hiding", "methodist", "dye", "sits", "alphabet", "shelves", "toes", "cleaned", "honored", "optic", "hannah", "telephones", "insect", "frances", "diaries", "chili", "grief", "leicester", "sweat", "dolphin", "pendants", "wonders", "ventilation", "masks", "bust", "lateral", "quake", "alley", "gardner", "sanders", "pathways", "telegraph", "pertaining", "memorable", "professors", "monument", "formally", "twain", "ile", "nevis", "dew", "lavender", "justified", "withdrawn", "breeze", "debates", "gems", "outgoing", "mann", "yankee", "outs", "deficiency", "gum", "progression", "adv", "saddle", "malaria", "loyal", "torrent", "odyssey", "spite", "nero", "capita", "imply", "inaccuracies", "tendency", "caledonia", "wholly", "chill", "utilized", "embrace", "ein", "liner", "manila", "auxiliary", "initiate", "ua", "elevated", "purely", "fry", "lifts", "vivid", "allegations", "stationary", "corresponds", "foil", "whitney", "celebrated", "alarms", "hunters", "roi", "allison", "stairs", "kt", "acted", "byron", "critique", "honestly", "skull", "continuation", "carnegie", "servant", "falcon", "jointly", "canadians", "avoided", "comprising", "tick", "terrier", "listened", "explanations", "renewed", "incorporating", "variant", "riley", "equatorial", "critic", "sediment", "translators", "squares", "deg", "bot", "lea", "vans", "od", "honeymoon", "percussion", "glue", "cone", "margins", "sands", "survived", "spinning", "adequately", "spectral", "prevalence", "dominica", "contaminated", "fragment", "finishes", "lecturer", "embroidered", "bucket", "steak", "commits", "cobra", "threw", "sutton", "djibouti", "authorize", "decorated", "credited", "cherokee", "apo", "ao", "recruit", "simmons", "gals", "hoc", "wherein", "appearances", "performers", "dessert", "dissertation", "walsh", "nos", "marry", "blankets", "enthusiasm", "confusing", "celebrations", "approaching", "bounce", "ivan", "spiral", "governors", "weakness", "wills", "katherine", "atoms", "jacobs", "mauritania", "tissues", "reminded", "drake", "cynthia", "roosevelt", "practicing", "schmidt", "nicely", "surprisingly", "expressing", "della", "laurel", "carolyn", "rails", "fried", "cairo", "ambulance", "practically", "traded", "signaling", "vivo", "domination", "shrimp", "chords", "molecule", "dedication", "desires", "woody", "dismissed", "cried", "psychic", "cracks", "analyzing", "sincerely", "beaten", "piercing", "antilles", "establishments", "marginal", "visions", "efficacy", "prestige", "cocaine", "accelerated", "pinnacle", "tucker", "recognizes", "plugs", "responsive", "supra", "omitted", "molly", "proximity", "ku", "belonging", "unbiased", "pear", "chiefs", "franz", "collision", "supplementary", "clue", "scandal", "lodges", "dangers", "lys", "travellers", "gia", "scream", "discrepancies", "pirate", "senses", "repeats", "willie", "rival", "slower", "simulated", "culinary", "fairfax", "beck", "huh", "accountant", "propaganda", "offender", "waterloo", "warwick", "rounded", "boarding", "vanity", "mitigation", "tome", "prof", "homer", "daylight", "macdonald", "gases", "dependency", "dioxide", "fireworks", "genus", "approached", "catching", "cutter", "connects", "ont", "liberals", "aperture", "roofing", "dixon", "elastic", "melody", "sins", "cousin", "hath", "recalls", "consultations", "debts", "phillip", "burial", "balcony", "prescriptions", "prop", "avril", "willis", "myths", "camden", "coupling", "knees", "neglect", "emerge", "winchester", "clutch", "shy", "poets", "auditorium", "pedro", "maid", "sid", "carrie", "towels", "canterbury", "trent", "barber", "intuitive", "rigid", "sta", "degradation", "ret", "orthodox", "erin", "ferguson", "fragments", "mariana", "qualitative", "claude", "minorities", "blown", "diffusion", "baton", "polynesia", "barton", "umbrella", "rods", "stimulation", "abbey", "pigs", "olivia", "refugee", "straps", "maya", "discourse", "lancashire", "headache", "stained", "marital", "socialist", "bruno", "attracted", "undertaking", "slavery", "notwithstanding", "feasible", "romans", "credibility", "shores", "fest", "thames", "flowing", "montenegro", "deed", "whirlpool", "perfumes", "sustain", "mechanic", "bauer", "eliminating", "rejection", "bowls", "dissemination", "cardinals", "cosmic", "dawson", "defective", "lengths", "beacon", "hoover", "politically", "elective", "forensic", "botanical", "quartet", "suspense", "drafting", "cruel", "observing", "advertised", "commencement", "southwestern", "conform", "helmets", "firing", "eager", "denise", "touching", "vacancy", "papa", "settlements", "strawberry", "chang", "gloria", "elevator", "pupil", "feast", "maggie", "redemption", "profound", "canton", "nina", "registering", "seth", "warn", "conservatives", "bonnie", "laying", "provisional", "compiling", "strive", "releasing", "martinique", "shells", "painter", "ankle", "peso", "leagues", "monkeys", "historically", "transitions", "prevented", "digits", "err", "banker", "sup", "easiest", "borrow", "bamboo", "lv", "denotes", "communicating", "ki", "decks", "vibration", "stepped", "vent", "blunt", "protector", "aux", "react", "understands", "rises", "issuing", "accents", "insane", "buddha", "voyage", "een", "colonel", "transitional", "mozart", "acceleration", "sketch", "hoffman", "balances", "firearms", "nightly", "pitt", "deduction", "dancer", "coats", "pol", "capsules", "hyde", "firmly", "doo", "dots", "pursuing", "aston", "mugs", "washed", "resonance", "mosaic", "rhodes", "fiesta", "vase", "forcing", "fairs", "flute", "durability", "meadows", "hindi", "harsh", "outfit", "substitution", "burma", "cease", "deserves", "aboard", "irving", "perfection", "joints", "overwhelming", "poles", "bounds", "lyon", "santiago", "vera", "advising", "altogether", "devils", "dignity", "europa", "wondered", "cheshire", "boyd", "sliding", "accumulation", "descriptive", "inst", "feasibility", "negotiating", "homo", "pier", "sioux", "cote", "premiums", "lutheran", "fellows", "valencia", "superman", "perkins", "ideally", "splash", "equip", "saga", "probation", "ast", "gran", "commissioned", "hedge", "ke", "fender", "violet", "dancers", "mutation", "envelopes", "alle", "compulsory", "favorable", "rue", "preparations", "maxwell", "illustrates", "inheritance", "curry", "oblique", "pearls", "worms", "satisfying", "succeeded", "apples", "elf", "dewey", "surviving", "pouch", "advent", "proposes", "hooks", "ces", "exploitation", "singers", "mayo", "tasmania", "mansion", "cha", "surrender", "schneider", "accumulated", "arsenal", "dub", "screws", "pyramid", "enjoys", "hacking", "stripe", "averages", "peaks", "tai", "como", "lisp", "limousine", "churchill", "affirmative", "keynote", "planted", "petitioner", "spoon", "bombs", "niche", "fortunately", "cigar", "vis", "calculating", "erie", "berkshire", "proportional", "credentials", "deprecated", "municipalities", "chin", "locker", "jenkins", "squash", "expectation", "severely", "spotted", "curse", "ajax", "coconut", "interrupt", "conductor", "wont", "liberation", "grandfather", "removes", "luxurious", "titan", "booked", "anita", "indirectly", "nile", "blessing", "lumber", "pillows", "portals", "illustrator", "asleep", "potassium", "prompted", "shout", "presidency", "abnormal", "delicate", "convince", "whoever", "straw", "lifted", "mankind", "uncertain", "paramount", "upright", "breakfasts", "inspectors", "emergencies", "ernest", "shocked", "alcoholic", "bakery", "lieutenant", "orchid", "histories", "loses", "atkins", "variability", "observatory", "soda", "waited", "preventive", "peach", "calculus", "stefan", "breathe", "dunn", "smiling", "ounces", "economically", "uncut", "intact", "noting", "shifting", "samurai", "moines", "ivy", "delegate", "lightly", "negotiated", "herman", "congestion", "runners", "stove", "accidental", "talents", "nixon", "refuge", "brady", "guadeloupe", "walton", "carved", "ark", "freak", "obstacles", "preferably", "bluff", "jasper", "sed", "newborn", "sadly", "laughed", "avail", "emerson", "regulate", "orchard", "mythology", "trousers", "hatch", "replaces", "tomb", "regina", "stein", "shortage", "privileged", "spill", "goodness", "drift", "extracts", "professions", "explored", "mysteries", "fuller", "decreases", "crisp", "cor", "keeper", "reinforced", "johannesburg", "spells", "specifying", "buddhist", "inevitable", "etiquette", "environ", "nic", "coloured", "births", "kr", "cubs", "wheeler", "ritual", "miguel", "pulp", "onset", "interpreter", "specimens", "initiation", "assay", "reconciliation", "pots", "recognizing", "leigh", "slam", "respects", "tents", "plaque", "accounted", "deposited", "lowe", "beavers", "crib", "defending", "pulls", "autonomous", "granting", "motoring", "appropriation", "condensed", "philippine", "theological", "quietly", "scenery", "drying", "assemblies", "collateral", "learner", "welcomes", "swallow", "tara", "transplant", "usenet", "marines", "lighthouse", "proves", "crab", "jen", "brightness", "maurice", "brooke", "consumed", "maxim", "bore", "depreciation", "technically", "enjoyment", "cows", "austrian", "correspond", "slate", "suzanne", "confined", "inhabitants", "straightforward", "delighted", "morton", "peel", "cue", "jupiter", "simultaneous", "monopoly", "debris", "han", "intentions", "pagan", "widow", "sac", "peg", "randall", "benson", "sleeves", "troubled", "footnote", "vibrant", "evolving", "sweater", "approximation", "skies", "barrett", "burners", "alison", "fitzgerald", "kicks", "disappeared", "canoe", "sovereign", "reminds", "organism", "corrupt", "violated", "correspondent", "drought", "bake", "hurricanes", "symptom", "laughter", "propagation", "ignorance", "explosive", "inventor", "scaling", "juicy", "moody", "fashioned", "grains", "vicinity", "thyroid", "purification", "heal", "southeastern", "wizards", "horoscope", "prosperity", "rainfall", "mum", "launching", "pedal", "plantation", "storing", "asa", "tote", "jumped", "seemingly", "tuned", "passionate", "staples", "mayer", "backward", "sour", "combustion", "scrap", "administer", "bilateral", "bella", "blondes", "disposable", "williamson", "sock", "gentlemen", "terra", "literal", "questioned", "guiding", "charcoal", "vapor", "beware", "aloud", "glorious", "overlap", "handsome", "grounded", "bail", "goose", "fn", "judgement", "cruiser", "cumberland", "gifted", "esteem", "cascade", "endorse", "strokes", "shelby", "hen", "ancestry", "dolphins", "adopting", "landed", "nucleus", "detached", "scouts", "warsaw", "ib", "mist", "verb", "chic", "objection", "phosphate", "noisy", "abide", "sentinel", "birthdays", "preserving", "vest", "neal", "economist", "meridian", "marriages", "regret", "stakes", "rotating", "brigade", "movable", "doubles", "bliss", "humiliation", "tens", "litter", "reflective", "abbreviations", "executing", "greenwich", "flooding", "rugged", "jelly", "grandmother", "renovation", "puma", "appoint", "panthers", "perceptions", "greenwood", "ignition", "humble", "petrol", "midway", "mania", "edwin", "ax", "clare", "recognise", "hostile", "aphrodite", "establishes", "whites", "rant", "trapped", "bolts", "diplomatic", "fringe", "linguistic", "internally", "planetary", "laurent", "ego", "manuel", "gaza", "influenza", "gill", "rude", "sang", "steele", "citing", "viewpoint", "nay", "servants", "meanings", "conception", "unemployed", "heavenly", "exeter", "amusement", "middlesex", "curl", "albanian", "overflow", "hastings", "subsidies", "thirds", "willingness", "implicit", "patriotic", "simplify", "darling", "schwartz", "satan", "ornaments", "oppose", "terrific", "definite", "congregation", "regiment", "cheer", "everett", "reviewers", "misleading", "marty", "vine", "vale", "whereby", "deceased", "sparks", "simpler", "captures", "capitalism", "hancock", "falkland", "cur", "mammals", "grape", "russ", "peppers", "deeds", "lively", "inequality", "educator", "premature", "tripod", "immigrant", "demonstrations", "obsolete", "rust", "lon", "interfere", "traps", "shuffle", "wardrobe", "vin", "successes", "racer", "fabrication", "guilt", "sweep", "nash", "exploited", "bladder", "inflammatory", "iss", "immunity", "bets", "doyle", "ducks", "paints", "neighbourhood", "cheating", "carr", "fade", "tastes", "storms", "smiled", "jurisdictions", "scrutiny", "regeneration", "lunar", "differentiation", "shields", "nonsense", "invented", "elaine", "posed", "subjected", "tasting", "gwen", "mob", "expose", "borrowing", "arises", "imf", "precautions", "branded", "manning", "lisbon", "forks", "monk", "boxer", "shining", "weigh", "clerical", "voyager", "hobart", "moose", "dorset", "buenos", "conscience", "crush", "mystic", "solicitation", "rectangular", "fischer", "pooh", "enthusiast", "positively", "shaping", "ich", "afghan", "inspire", "paulo", "torn", "meantime", "pumping", "patented", "revival", "disappear", "lever", "redundant", "regency", "tasty", "gag", "mccarthy", "heck", "civilians", "bark", "carts", "wasted", "cocoa", "invites", "cushion", "reversed", "lynx", "goa", "specimen", "ancestors", "panther", "mixes", "graves", "branding", "examiner", "vineyard", "meadow", "feeder", "mercer", "roms", "goodman", "listener", "chloride", "awaiting", "kane", "becker", "bulls", "orion", "councillor", "hurry", "clarkson", "beneficiary", "hanson", "offspring", "panorama", "roth", "odor", "demanded", "wastes", "clash", "fidelity", "sis", "castro", "flew", "holden", "ale", "sem", "rhapsody", "trumpet", "solitaire", "decreasing", "freezing", "kaiser", "wallis", "criminals", "retire", "rumors", "accomplishments", "emergence", "theatres", "apex", "crimson", "compassion", "needing", "twentieth", "pronounced", "extensively", "stain", "conrad", "wished", "transient", "kicked", "coloring", "curb", "reign", "trivial", "coke", "clauses", "baron", "sensible", "unlawful", "bates", "webs", "swinging", "accountable", "thrust", "proving", "opposing", "novice", "hewitt", "dei", "delightful", "cane", "cruising", "fury", "personalities", "stiff", "todo", "noah", "wore", "christchurch", "traces", "rabbi", "puffy", "weston", "headings", "enthusiasts", "ridiculous", "scattering", "secretaries", "contracted", "elbow", "fights", "scholarly", "detailing", "stark", "roberto", "strongest", "hammond", "padded", "circa", "revise", "contributes", "surroundings", "proficiency", "uranium", "honours", "consolidate", "daniels", "billions", "hut", "stafford", "labrador", "refusal", "lima", "suppression", "weaver", "readiness", "secular", "majesty", "fishery", "teresa", "distributing", "estimating", "outdated", "dues", "pewter", "distress", "pumpkin", "notably", "intends", "trevor", "homosexual", "garment", "supplying", "secondly", "razor", "cough", "cerebral", "grandma", "oceans", "displacement", "backwards", "arrows", "volunteering", "presumably", "plea", "constructive", "bundles", "tibet", "pres", "isles", "stretching", "ovens", "garrett", "esther", "abundant", "deductible", "priests", "accompany", "compares", "hesitate", "inspiring", "prey", "deposition", "laurie", "tas", "zodiac", "pavement", "keller", "pedestrian", "fencing", "artery", "inlet", "rub", "violate", "stimulate", "realise", "fluids", "conveniently", "lick", "gov", "stealth", "ter", "ness", "repayment", "canopy", "gloss", "whip", "porch", "pertinent", "lifelong", "promoter", "collegiate", "construed", "interchange", "remotely", "fletcher", "concise", "fibers", "handful", "brains", "curtains", "eaten", "indigo", "retaining", "kelley", "autobiography", "conditioned", "prohibition", "motions", "emphasize", "excite", "rebels", "believing", "hilarious", "salisbury", "gu", "quoting", "sinks", "steep", "dynasty", "creed", "nan", "raiders", "spreads", "elegance", "volatile", "pointers", "sensory", "throne", "chartered", "slopes", "socially", "unfortunate", "seized", "territorial", "leases", "consisted", "randolph", "memoirs", "alkaline", "expire", "och", "midst", "borne", "forgive", "competitor", "mansfield", "neighbours", "marvin", "conversions", "usable", "tempo", "mutations", "readable", "almanac", "conway", "ay", "gail", "responds", "denote", "slayer", "payne", "purchaser", "relies", "inserting", "tibetan", "prepares", "concludes", "waterford", "rodney", "cylinders", "mus", "selects", "fulton", "directing", "nationality", "torch", "zurich", "stretched", "depressed", "encounters", "haunted", "spares", "symmetry", "bout", "salons", "olympia", "hank", "negligence", "screened", "helper", "carlisle", "rancho", "transferring", "stepping", "hacks", "attic", "appetite", "sensation", "piper", "morality", "honorable", "wealthy", "handicap", "skinny", "sewage", "endowment", "demonstrating", "avec", "sonoma", "esta", "defender", "amos", "wretch", "sunlight", "stems", "wo", "ventura", "convey", "ang", "evergreen", "bearings", "govern", "feather", "fond", "sore", "fiat", "sixteen", "blinds", "traits", "tightly", "graded", "successor", "intrusion", "sickness", "guiana", "underneath", "prohibit", "noel", "cans", "abused", "avery", "brushes", "tenth", "anthology", "prosecutor", "smiles", "merged", "auditors", "grandchildren", "desks", "capsule", "aided", "suspend", "eternity", "introductions", "weighing", "currents", "aide", "kindly", "nes", "protests", "sharks", "notch", "minors", "dances", "revealing", "reprinted", "fernando", "mapped", "resurrection", "lieu", "decree", "tor", "discovering", "tuberculosis", "lacks", "horizons", "daytime", "elaborate", "contour", "gamble", "fra", "descent", "gravel", "analyse", "disturbing", "judged", "shutter", "illusion", "ambitious", "ole", "notorious", "ibid", "residue", "reds", "enlarged", "stephens", "transforming", "stripping", "bart", "assert", "fluctuations", "bowie", "archaeological", "inspect", "thrice", "babylon", "edison", "casualty", "musings", "poses", "noir", "eli", "evan", "mushroom", "designate", "scent", "sequel", "gymnastics", "titanic", "knob", "wolves", "exquisite", "upward", "sentenced", "dundee", "principe", "acquiring", "judging", "unchanged", "kicking", "meg", "fines", "grasp", "streak", "ounce", "thirteen", "tragic", "theodore", "buena", "irrelevant", "professionally", "liberties", "sounding", "milano", "toast", "happily", "hooked", "shrink", "knox", "unesco", "mutually", "beaded", "remembering", "boca", "exodus", "compartment", "brittany", "dove", "testified", "iis", "cunningham", "derive", "affinity", "presbyterian", "pretend", "buddhism", "amnesty", "borrower", "gloucester", "warrants", "owens", "fairness", "needles", "coll", "quota", "discreet", "versa", "imp", "oi", "mack", "pu", "sung", "lowell", "whichever", "starr", "elliot", "uae", "chooses", "tuscany", "crowded", "tickling", "wee", "unreal", "wounds", "advisers", "manufactures", "physiological", "addison", "charters", "generalized", "unprecedented", "flint", "dummy", "financially", "awake", "sanitation", "swivel", "ally", "dissolved", "cleanliness", "kung", "collectively", "inhibition", "burnt", "solidarity", "frustrated", "muhammad", "alma", "ger", "hanover", "inverse", "clifton", "holt", "isis", "verdict", "nominee", "medals", "dickinson", "christi", "lister", "recurring", "studs", "rhetoric", "modifying", "incubus", "impulse", "surveyed", "creditors", "dull", "tis", "cabins", "commenced", "ballroom", "employing", "satellites", "ignoring", "stevenson", "coherent", "beetle", "converts", "majestic", "bicycles", "omni", "clifford", "critically", "cy", "composers", "localities", "owe", "reciprocal", "accelerate", "hatred", "questioning", "manifest", "indications", "petty", "permitting", "som", "behave", "bees", "zeppelin", "felix", "shiny", "carmel", "encore", "smash", "angelina", "braun", "destructive", "sockets", "claimant", "psa", "ample", "countless", "energies", "repealed", "listeners", "abusive", "merits", "scarf", "strangers", "garland", "voor", "riviera", "apprentice", "obscure", "napoleon", "glamour", "hated", "sigh", "trolley", "principals", "sidney", "spicy", "frankly", "chronological", "itinerary", "fools", "beard", "discoveries", "economical", "miniatures", "wedge", "adjusting", "mock", "peggy", "bats", "patriots", "ruins", "sheila", "dependencies", "benton", "chateau", "denis", "homestead", "changer", "sergeant", "melt", "syrian", "ned", "cypress", "courtney", "cites", "prospectus", "protectors", "interiors", "encouragement", "disadvantages", "abbott", "tailor", "chocolates", "faux", "supervised", "interpreting", "pascal", "tha", "serenity", "ore", "pant", "sheridan", "gallons", "attainment", "sanitary", "cooperate", "dreaming", "fortunate", "mushrooms", "interpretations", "geoffrey", "faults", "silva", "grease", "diablo", "cairns", "premise", "epidemic", "prima", "rite", "cinnamon", "lac", "discharged", "alba", "underworld", "variants", "palms", "lawsuits", "seated", "lattice", "realization", "absorbed", "sirius", "chord", "vous", "turf", "asphalt", "improper", "dilemma", "rebuilding", "livingston", "commenting", "shifted", "tangible", "smoked", "hawks", "irons", "comet", "berg", "baltic", "corrective", "competency", "muse", "probing", "teachings", "tyne", "fowler", "xv", "youngest", "contingent", "refreshing", "syrup", "xii", "warmth", "hawkins", "lust", "correlated", "augustine", "dominion", "verses", "astronomical", "solvent", "luna", "amplitude", "aesthetic", "commercially", "dion", "wolfgang", "completeness", "irregular", "barker", "solids", "capturing", "certify", "consulted", "realised", "jude", "eighteen", "singular", "jennings", "demons", "unacceptable", "redistribute", "coping", "baxter", "outbreak", "abdominal", "deficiencies", "curved", "milestone", "erase", "lien", "nip", "bites", "prose", "marx", "incidental", "toni", "arguing", "vein", "hale", "swear", "bel", "clown", "spontaneous", "summers", "taboo", "equestrian", "malicious", "consume", "amazed", "fourteen", "legislators", "volcano", "capacities", "skeleton", "tsp", "suspects", "displaced", "sounded", "honesty", "dwarf", "bis", "northeastern", "shocks", "rewarding", "battalion", "candid", "schooling", "thornton", "schoolgirl", "caesar", "pines", "stellar", "davenport", "locating", "monogram", "philippe", "aix", "ornament", "urges", "sophie", "attacking", "microscope", "threaten", "bait", "badges", "kitten", "brides", "dent", "stealing", "bullets", "emphasized", "glossy", "informations", "haired", "alterations", "pablo", "biographical", "confirms", "cavity", "molded", "vladimir", "ida", "probate", "terrestrial", "completes", "beams", "props", "incense", "formulated", "dough", "stool", "towing", "welch", "rosemary", "millionaire", "turquoise", "exposures", "boone", "substituted", "horde", "paperwork", "nanny", "suburb", "hutchinson", "cohort", "succession", "alliances", "sums", "averaged", "glacier", "pueblo", "rigorous", "relieve", "clarion", "override", "angus", "enthusiastic", "lame", "squeeze", "sar", "burgundy", "struggles", "farewell", "soho", "ashes", "vanguard", "natal", "locus", "evenings", "misses", "troubles", "elton", "purity", "shaking", "witnessed", "cellar", "friction", "prone", "valerie", "enclosures", "mer", "equitable", "fuse", "lobster", "judaism", "atlantis", "amid", "onions", "corinthians", "crosses", "uncomfortable", "sylvia", "furnace", "poisoning", "doubled", "clues", "inflammation", "rabbits", "icc", "transported", "crews", "goodwill", "anxious", "tariffs", "norris", "ly", "baptism", "cutlery", "overlooking", "knot", "rad", "gut", "staffordshire", "factories", "swords", "advancing", "timed", "evolve", "yuan", "esa", "suspicious", "leased", "subscribed", "tate", "dartmouth", "brewing", "coop", "blossom", "scare", "confessions", "bergen", "lowered", "thief", "prisons", "pictured", "feminine", "grabbed", "rocking", "nichols", "blackwell", "fulfilled", "sweets", "nautical", "imprisonment", "employs", "gutenberg", "bubbles", "ashton", "pitcher", "judgments", "muscular", "motif", "illnesses", "plum", "saloon", "prophecy", "loft", "historian", "elm", "facsimile", "hurts", "folded", "sofia", "comprise", "lump", "disposed", "chestnut", "engraved", "halt", "alta", "pastoral", "unpaid", "ghosts", "doubts", "locality", "substantive", "bulletins", "worries", "hug", "rejects", "spear", "nigel", "referee", "transporter", "jolie", "broadly", "ethereal", "crossroads", "aero", "constructing", "smoothly", "parsons", "bury", "blanc", "autonomy", "bounded", "insist", "birch", "slash", "exercised", "detecting", "howell", "digestive", "entertain", "cinderella", "sesame", "duct", "touches", "joanne", "housewife", "pursued", "lend", "corvette", "yachts", "stacy", "christie", "unrelated", "lois", "levi", "stimulating", "mont", "misuse", "cosmos", "speculation", "dixie", "pans", "enforced", "legion", "fulfillment", "assertion", "shook", "lincolnshire", "dismissal", "mah", "shocking", "overland", "prolonged", "isaiah", "backbone", "unanimously", "sausage", "neighboring", "uncommon", "centralized", "stratford", "heidi", "objections", "unpublished", "ames", "slaughter", "enlightenment", "pistol", "juniors", "rockets", "seymour", "arithmetic", "supposedly", "bombay", "originals", "enrichment", "milford", "buckle", "bartlett", "fetch", "kitchens", "wat", "rey", "divers", "townsend", "blackburn", "founders", "sundays", "upside", "admiral", "patron", "sandwiches", "sinclair", "boiler", "anticipate", "induce", "annapolis", "padding", "diagonal", "unite", "cracked", "debtor", "polk", "mets", "shear", "mortal", "sovereignty", "franchises", "rams", "cleansing", "gown", "ponds", "archery", "excludes", "sabbath", "ruin", "trump", "nate", "escaped", "precursor", "mates", "stella", "passages", "vu", "cereal", "comprehension", "sy", "tow", "resolving", "drills", "alexandra", "champ", "agreeing", "rented", "deductions", "harrisburg", "brushed", "augmentation", "otto", "annuity", "assortment", "credible", "ik", "cultured", "importing", "deliberately", "openly", "crawl", "theo", "sparkling", "bindings", "convincing", "flaws", "este", "tracing", "deviations", "incomes", "fragile", "jeremiah", "sapiens", "nyt", "olsen", "serbian", "hai", "restoring", "sanchez", "rushing", "behold", "amherst", "alteration", "murdered", "hazel", "ledger", "scarlet", "crushed", "laughs", "connie", "referendum", "modulation", "statues", "depths", "spices", "communion", "uncertainties", "colonies", "followers", "caldwell", "squadron", "bei", "rupee", "subsidy", "demolition", "irene", "felony", "lungs", "monuments", "veronica", "filtered", "growers", "vinci", "adj", "haul", "acknowledgement", "duly", "roasted", "tenders", "inviting", "rig", "ov", "mick", "mustard", "strait", "masterpiece", "obey", "donkey", "jacks", "conceived", "boasts", "praying", "oss", "multiply", "intercourse", "radial", "mare", "instructed", "stole", "kirby", "armour", "summarized", "avalanche", "northampton", "manuscripts", "cary", "exhibited", "disciples", "shaving", "bishops", "kite", "destroying", "humorous", "faa", "corona", "heap", "griffith", "erection", "quasi", "energetic", "disturbance", "saunders", "ribbons", "jew", "exile", "bilder", "reside", "cashier", "jaw", "butterflies", "eats", "knots", "flea", "offences", "anton", "pals", "celebrates", "hail", "armenian", "longitudinal", "historians", "realities", "mentions", "samson", "jumps", "fleming", "optimistic", "wasting", "acclaimed", "seldom", "morrow", "glitter", "giovanni", "lasted", "awhile", "scaled", "contingency", "wiltshire", "vague", "wraps", "constituents", "herd", "handicapped", "exported", "lag", "warns", "harmless", "sting", "bravo", "believers", "dispersion", "curiosity", "resting", "missiles", "persistence", "coarse", "continents", "carpets", "recovering", "submarine", "blessings", "prevailing", "originated", "axe", "sculptures", "intrinsic", "thoughtful", "nicht", "archer", "hertfordshire", "warmer", "calf", "basil", "grouped", "dominate", "orient", "contra", "damaging", "populated", "renee", "boiling", "journeys", "parsing", "splitting", "derbyshire", "abandon", "rave", "ej", "dy", "cigars", "nicolas", "inference", "ras", "recalled", "transformer", "weiss", "declarations", "rib", "chattanooga", "giles", "drafts", "excursions", "jerk", "shack", "marrow", "tavern", "bathing", "lambert", "epilepsy", "allowances", "goggles", "ses", "unhappy", "foregoing", "certainty", "sleek", "gerard", "antarctic", "ord", "successive", "neglected", "ariel", "monty", "cafes", "classmates", "hitch", "fracture", "ama", "foremost", "nineteenth", "chesapeake", "mahogany", "actresses", "clarence", "ernst", "buster", "moderated", "mal", "nassau", "flap", "ignorant", "allowable", "compositions", "sings", "marcos", "sorrow", "carte", "canned", "collects", "treaties", "endurance", "teaspoon", "insulated", "dupont", "harriet", "philosopher", "rectangle", "woo", "queer", "pains", "decatur", "wrapper", "ahmed", "buchanan", "drummer", "sobre", "ceremonies", "satisfies", "appellate", "comma", "conformity", "avant", "supper", "fulfilling", "hooded", "instability", "seminary", "presenter", "offenses", "emulation", "lengthy", "sonata", "fortress", "contiguous", "perez", "inaccurate", "explanatory", "settlers", "stools", "ministerial", "xavier", "torah", "fao", "publishes", "stacks", "owning", "andersen", "sermon", "facilitating", "complained", "ferdinand", "taps", "thrill", "lagoon", "undoubtedly", "withheld", "insisted", "reluctant", "headaches", "ramsey", "oath", "pigeon", "rivals", "freed", "constrained", "parrot", "magnum", "invoked", "invaluable", "keystone", "inclined", "gala", "cheek", "traction", "utterly", "gavin", "illuminated", "lasts", "gloucestershire", "psychologist", "dane", "claudia", "perpetual", "solicitor", "clustering", "glimpse", "verbatim", "innocence", "quicker", "grandparents", "cardboard", "attributable", "sketches", "angelo", "tertiary", "exhausted", "smarter", "shelters", "attain", "dora", "inconvenience", "tang", "vaccination", "farther", "chats", "riot", "fats", "mandarin", "dungeon", "germans", "lilly", "shire", "mosquito", "kashmir", "lyons", "putnam", "corpse", "speedy", "ming", "lush", "barrels", "transformations", "analogue", "werner", "clyde", "honorary", "irwin", "brewer", "exchanged", "adhere", "fran", "rafael", "ccc", "enquire", "toilets", "mains", "whales", "lindsey", "parity", "partitions", "grim", "hubbard", "prism", "chasing", "flop", "aggregation", "shelley", "batting", "borrowed", "rests", "toss", "depicted", "grapes", "proposing", "winding", "ripped", "cobalt", "pity", "downward", "catalogues", "aspire", "harvesting", "garfield", "groom", "jewels", "saturated", "georges", "quincy", "doughty", "weeds", "stripped", "clive", "fixture", "canary", "steadily", "imagined", "darby", "woke", "fills", "proportions", "grips", "clergy", "solicitors", "moderately", "altar", "salvage", "stanton", "creators", "kilometres", "cuff", "repeating", "empires", "oyster", "sturdy", "massacre", "undergo", "risen", "blended", "imperative", "beg", "digging", "lantern", "catches", "evangelical", "eaton", "ruler", "henri", "tokens", "piping", "swept", "staring", "seventy", "troop", "arose", "decomposition", "chatham", "becky", "elders", "interpreters", "supporter", "klaus", "conquest", "repairing", "assemble", "whistle", "dresden", "diversified", "fertilizer", "analytic", "predominantly", "amethyst", "woodward", "rewritten", "concerto", "adorable", "ambition", "torres", "apologize", "restraint", "eddy", "condemned", "berger", "parole", "corey", "kendall", "slips", "trays", "stewardship", "esq", "kisses", "kerr", "regulating", "flock", "exporting", "arabian", "bending", "boris", "ammunition", "vega", "pleasures", "shortest", "denying", "shave", "sexe", "disruption", "galway", "colt", "artillery", "furnish", "precedence", "grinding", "rubbish", "missionary", "knocked", "swamp", "pitching", "bordeaux", "manifold", "wf", "tornado", "possessed", "upstairs", "turtles", "vauxhall", "welcoming", "learns", "manipulate", "dividing", "hickory", "renovated", "inmates", "slices", "cody", "lawson", "quo", "damned", "beethoven", "faint", "rebuilt", "proceeded", "lei", "tentative", "peterborough", "fierce", "jars", "authenticity", "hips", "rene", "gland", "wigs", "resignation", "striped", "zion", "blends", "garments", "fraternity", "tapestry", "originating", "stu", "chap", "blows", "inevitably", "converse", "gardener", "winnie", "ita", "higgins", "warwickshire", "penguins", "attracting", "jeeves", "harp", "wes", "denton", "anthem", "tack", "whitman", "nowadays", "woodstock", "sack", "inferior", "abuses", "inspected", "deb", "jockey", "indicative", "incumbent", "ithaca", "edmund", "upholstery", "aggression", "practiced", "ella", "casualties", "monarch", "housed", "administering", "temptation", "havana", "roe", "nasal", "restrictive", "costing", "ranged", "hier", "spruce", "paradox", "billings", "jeanne", "oxidation", "marin", "halfway", "amending", "conflicting", "georgian", "compensate", "recherche", "loser", "claus", "braves", "cracking", "sued", "shoots", "interrupted", "hemisphere", "miranda", "clover", "kindness", "porto", "directs", "jolly", "snakes", "swelling", "spanning", "politician", "femme", "unanimous", "railways", "approves", "scriptures", "misconduct", "lester", "resides", "wording", "obliged", "perceive", "rockies", "siege", "exercising", "voluntarily", "atkinson", "nord", "truths", "grouping", "wolfe", "thereto", "authorizing", "enamel", "toby", "radiant", "virgins", "firstly", "martini", "butte", "reeves", "suspicion", "disadvantage", "bastard", "spends", "hicks", "pratt", "pedigree", "fraudulent", "sherwood", "forgiveness", "almond", "har", "petitions", "francais", "trenton", "chalk", "omar", "alexis", "axle", "puppet", "cultivation", "surveying", "grazing", "pillar", "mirage", "questionable", "seaside", "precinct", "renamed", "cobb", "unbelievable", "soluble", "piracy", "rowing", "siding", "hardest", "forrest", "reminders", "negro", "blanca", "equivalents", "johann", "pineapple", "wrath", "opal", "simplest", "patrons", "peculiar", "toon", "europeans", "commence", "descendants", "redmond", "safeguard", "lars", "obsession", "grind", "albeit", "billiards", "clint", "bankers", "righteous", "eo", "redistribution", "freaks", "tra", "sincere", "intentionally", "blitz", "tended", "censorship", "cactus", "viva", "attained", "blew", "howe", "nap", "splendid", "janice", "lava", "leonardo", "sucked", "scissors", "cooks", "sharply", "granada", "laurence", "rebellion", "rainy", "tho", "regent", "evelyn", "vinegar", "vie", "pluto", "gil", "vail", "fisherman", "misery", "undergoing", "limerick", "envy", "sweeping", "healthier", "ussr", "preface", "jameson", "grievance", "unread", "sentiment", "pencils", "galloway", "forged", "viola", "disclosures", "provence", "computerized", "rustic", "rumor", "dillon", "shah", "eleanor", "deception", "conducts", "divorced", "rushed", "weighs", "magnolia", "diver", "disappointment", "castles", "notions", "plateau", "dexter", "palette", "blaze", "wreck", "threatens", "strengthened", "sammy", "wakefield", "devastating", "centro", "arabs", "bild", "robbery", "eine", "jasmine", "crochet", "brock", "crowds", "hoops", "macon", "stamped", "increment", "ju", "ideals", "chloe", "ape", "gee", "apologies", "malignant", "dismiss", "preceded", "lawful", "stag", "crosby", "rash", "gateways", "collapsed", "horns", "diversion", "fantasies", "beginnings", "reversal", "lex", "presses", "ordination", "oxfordshire", "yves", "tandem", "boil", "deliberate", "gagged", "surprises", "abe", "roc", "barley", "potent", "vo", "amusing", "mastering", "nerves", "retains", "chimney", "naomi", "proverbs", "risky", "mistaken", "carving", "miracles", "clair", "slipped", "realism", "crete", "fractions", "bloodhound", "sherry", "desperately", "indies", "tulip", "madame", "remedial", "vain", "bert", "dalton", "bologna", "departing", "maze", "barefoot", "remuneration", "bohemian", "imposing", "damon", "tivoli", "rode", "amen", "marching", "evacuation", "owing", "warp", "catholics", "imo", "faculties", "denies", "reinforce", "inception", "draper", "bowman", "subversion", "benny", "spires", "barney", "homosexuality", "declares", "masonry", "medicinal", "accrued", "temples", "realizing", "annum", "cemeteries", "indoors", "telescopes", "magellan", "champs", "averaging", "salads", "addicted", "flashlight", "disappointing", "eighty", "unlocked", "scarce", "roche", "ropes", "spiders", "obedience", "plague", "diluted", "canine", "gladly", "brewery", "lineage", "mehr", "brew", "vaughan", "kern", "julius", "coup", "cannes", "morse", "dominance", "piston", "itu", "cords", "revisited", "cass", "sealing", "topped", "rag", "despair", "fore", "absorb", "injected", "alps", "commodore", "enlisted", "prophets", "supernatural", "overlooked", "ditch", "feared", "prelude", "rowe", "slick", "limestone", "commentaries", "manpower", "lec", "chunk", "reels", "lob", "slept", "gregg", "drafted", "chalet", "hopper", "sus", "specialization", "abstraction", "ludwig", "scandinavian", "detained", "luncheon", "zenith", "browns", "waits", "tenor", "softly", "plenary", "scrub", "wilkinson", "limb", "intestinal", "poe", "refusing", "suffers", "occupy", "gan", "bethlehem", "caves", "authoritative", "celestial", "immense", "audrey", "merlin", "aiming", "seizure", "stuttgart", "diplomacy", "differing", "foreigners", "limp", "capitalist", "mute", "prescott", "protestant", "metre", "tricky", "ordinances", "koch", "topaz", "ans", "imaginary", "albion", "sutherland", "dar", "dart", "wrought", "robe", "theresa", "heidelberg", "multitude", "tutors", "ezra", "housekeeping", "captive", "kettle", "visitation", "chr", "gibbs", "baggage", "dusty", "patty", "serena", "satire", "tortured", "pioneers", "crate", "episcopal", "moonlight", "mast", "unfinished", "goth", "cared", "affection", "sworn", "bowen", "vicious", "educating", "kin", "cozy", "mackenzie", "slippers", "earthquakes", "hayward", "wandering", "comb", "liquids", "beech", "vineyards", "amer", "zur", "frogs", "consequential", "unreasonable", "osborne", "stimulus", "economists", "miners", "agnes", "constituency", "rocker", "acknowledges", "alas", "sawyer", "maori", "tense", "predicting", "filipino", "cooled", "prudential", "basel", "migrant", "devotion", "invoke", "arte", "leaning", "paddle", "watkins", "oxley", "anterior", "chop", "rooted", "onyx", "benches", "illumination", "freedoms", "foolish", "finale", "weaker", "foley", "fir", "stirling", "moran", "compose", "nausea", "comfortably", "hoop", "temps", "clearer", "floods", "fritz", "mover", "modeled", "erica", "malaga", "sustaining", "repaired", "diocese", "francois", "obituary", "painters", "thistle", "tem", "sleepy", "footnotes", "rupert", "shrine", "purified", "striving", "dire", "attendant", "gull", "jour", "mir", "northumberland", "memoir", "betsy", "meredith", "fauna", "cliffs", "hayden", "roadside", "smells", "dispose", "waking", "feathers", "reflex", "falcons", "spurs", "sion", "crashed", "travelled", "urgency", "gould", "brit", "eliza", "graduating", "rims", "harmonic", "darts", "shin", "intriguing", "flaw", "tails", "emulator", "discarded", "bibles", "hangs", "joanna", "synonyms", "stranded", "horton", "dolce", "hercules", "pane", "browning", "angular", "veins", "folds", "sneak", "incorrectly", "avoidance", "sauces", "conquer", "probabilities", "immortal", "mariners", "endeavor", "creole", "mateo", "teas", "settling", "badger", "mohammed", "saturdays", "partisan", "pri", "gratitude", "impress", "willy", "anon", "eminent", "ribs", "communicated", "exceptionally", "quilts", "splits", "subscribing", "companions", "cheques", "edith", "screwed", "magna", "sectional", "fashionable", "polly", "tidal", "ballots", "hog", "testify", "poole", "boycott", "vitality", "clerks", "crust", "bothered", "traverse", "vengeance", "dolly", "garrison", "sal", "barb", "huns", "miner", "fashions", "barr", "analogy", "insomnia", "constituent", "aura", "cecil", "sponge", "sect", "diner", "anticipation", "enduring", "scarborough", "regis", "winters", "nous", "explosives", "mound", "xiv", "backgammon", "ox", "snatch", "mole", "obs", "owed", "ethan", "kissed", "buff", "butcher", "psalms", "rum", "chefs", "engraving", "constituted", "hamlet", "clad", "excursion", "inverness", "orb", "grange", "resigned", "fled", "enriched", "harrington", "brandy", "swings", "scion", "elle", "reptiles", "vortex", "swallowing", "purses", "bodily", "xiii", "awe", "beaumont", "australasia", "mandy", "hoods", "fireplaces", "requisite", "retrospective", "emphasizes", "lizard", "hawthorne", "bouquets", "wears", "shropshire", "baja", "regal", "safeguards", "cabbage", "cub", "spectator", "arrests", "circumstance", "numbering", "sliced", "reproductions", "byrd", "sidewalk", "prob", "breaker", "curly", "alberto", "asserted", "jealous", "refinement", "durban", "learnt", "hound", "squirrel", "concealed", "wharf", "rhythms", "departures", "shotgun", "stimulated", "chickens", "langley", "briggs", "cheyenne", "lug", "surveyor", "maize", "extinction", "unaware", "discretionary", "ry", "psalm", "scented", "gowns", "spying", "nicholson", "lied", "ek", "bloc", "recurrent", "talbot", "leaks", "tam", "swell", "obstacle", "ville", "mantle", "chico", "driveway", "irony", "gesture", "fairbanks", "parfum", "armies", "hy", "hugs", "greenfield", "santos", "owls", "cutters", "acquires", "ceased", "merging", "plaques", "breadth", "mammoth", "convictions", "intentional", "sophia", "prohibits", "innings", "reorganization", "pronunciation", "concession", "measurable", "ami", "parcels", "pastry", "manners", "phosphorus", "viper", "hid", "volcanic", "gypsy", "thieves", "preaching", "repeal", "uncovered", "hemp", "eileen", "proficient", "pelican", "apocalypse", "cousins", "discharges", "giorgio", "admire", "nk", "poured", "usefulness", "unsolicited", "binds", "unveiled", "burt", "titus", "suffix", "installment", "spindle", "heavens", "wink", "mister", "rounding", "inorganic", "flare", "scholastic", "wight", "withholding", "foliage", "nod", "ocr", "fife", "generals", "crank", "goats", "autographs", "stub", "fundamentally", "creamy", "exposition", "rains", "buckley", "middleton", "organise", "tort", "brace", "novelties", "gigantic", "abdul", "sheldon", "ryder", "octave", "struts", "ud", "suppress", "harding", "dams", "deserved", "violates", "rutherford", "separates", "proofs", "precedent", "confirming", "garth", "nolan", "mach", "facilitated", "paolo", "metaphor", "bridget", "infusion", "jessie", "organising", "argus", "mango", "spur", "jubilee", "landmarks", "polite", "sith", "thigh", "paving", "cyclone", "perennial", "jacqueline", "seventeen", "meats", "wie", "bulldog", "cleavage", "analysed", "uma", "gradual", "brethren", "embodiment", "violating", "recruited", "toilette", "trailing", "pact", "honourable", "lulu", "windy", "punished", "chronology", "mastery", "thermometer", "cranberry", "kan", "downhill", "vita", "steer", "nesting", "vogue", "aired", "outward", "whisper", "ipswich", "compromised", "confession", "deprived", "benedict", "vodka", "molding", "zaire", "bricks", "communism", "leopard", "flowering", "wig", "jingle", "bounty", "arcadia", "fishes", "ringing", "knobs", "taurus", "whiskey", "absurd", "tolerant", "stoves", "enactment", "embryo", "ska", "nora", "salts", "marietta", "furious", "iteration", "vida", "ceilings", "dispenser", "respecting", "approving", "unsafe", "separating", "soups", "residing", "richie", "markings", "moist", "trina", "drained", "mule", "cummings", "cessation", "append", "motive", "pests", "seasoned", "sunflower", "duel", "bernardino", "stocked", "bethel", "entre", "sunderland", "doris", "motives", "reinforcement", "dwight", "provost", "guessing", "tal", "mead", "harlem", "throttle", "gong", "ber", "sympathetic", "fridays", "isolate", "unconscious", "bays", "faulty", "affidavit", "messiah", "infamous", "pleasing", "seizures", "appealed", "surveyors", "tenacious", "waterfall", "sensual", "persecution", "petit", "burgess", "gaze", "chlorine", "freshly", "saxon", "cabo", "rye", "isabella", "monies", "assassination", "remarkably", "pointe", "stall", "deere", "entirety", "destined", "marcel", "lad", "hulk", "ora", "bal", "flores", "olivier", "portage", "dwellings", "informing", "yellowstone", "characterize", "ricardo", "yourselves", "rotterdam", "hostage", "cracker", "anglican", "monks", "compliment", "camino", "storey", "scotch", "sermons", "remembers", "freddie", "contention", "juliet", "adjunct", "guernsey", "bangor", "persia", "axes", "stirring", "wil", "haze", "pits", "utter", "bottled", "ants", "gastric", "influencing", "rents", "christy", "theirs", "mattresses", "donovan", "lax", "colts", "rehearsal", "strauss", "reputable", "wei", "tuck", "rei", "slab", "lure", "ren", "archbishop", "ling", "incompatible", "emblem", "roadway", "overlapping", "walters", "dunes", "murders", "miserable", "unsuccessful", "decorate", "appleton", "bottoms", "revocation", "vomiting", "chesterfield", "exposing", "pea", "tubs", "simulate", "medina", "thankful", "alaskan", "friedrich", "elephants", "pinch", "flynn", "braces", "calhoun", "deficient", "annotations", "filth", "moderation", "worrying", "outrageous", "kraft", "blackboard", "nitrate", "skates", "comstock", "hers", "grin", "footprint", "tunnels", "crises", "trillion", "comforter", "cashmere", "heavier", "meteorological", "spit", "labelled", "darker", "salomon", "globes", "dissent", "daly", "choral", "unrestricted", "happenings", "leicestershire", "neu", "contempt", "socialism", "hem", "edible", "anarchy", "arden", "clicked", "ineffective", "drawers", "byrne", "acme", "leakage", "shady", "chemist", "evenly", "reclamation", "rove", "lionel", "praised", "rhymes", "blizzard", "erect", "refining", "concessions", "commandments", "malone", "confront", "vests", "lydia", "coyote", "breeder", "electrode", "pollen", "drunken", "mot", "avis", "valet", "cheng", "shrubs", "watering", "barrow", "eliot", "jung", "transporting", "rifles", "posterior", "aria", "elgin", "excise", "poetic", "mortar", "blamed", "rae", "recommending", "inmate", "dirk", "posture", "thereon", "valleys", "declaring", "commencing", "armada", "wrench", "thanked", "arranging", "thrilled", "bas", "amelia", "jonah", "discomfort", "scar", "indictment", "apology", "collars", "andover", "pudding", "plato", "examiners", "salzburg", "rot", "possesses", "squared", "needless", "pies", "palma", "barnett", "ther", "heterogeneous", "aspirations", "fences", "excavation", "luckily", "rutland", "lighted", "pneumonia", "monastery", "erected", "expresses", "migrate", "carton", "lorraine", "councillors", "hague", "transforms", "ammonia", "roxy", "outlaw", "saws", "bovine", "dislike", "systematically", "ogden", "interruption", "demi", "imminent", "madam", "tights", "compelled", "criticized", "hypertext", "electra", "communal", "landlords", "emu", "libby", "seite", "dynamite", "tease", "motley", "aroma", "pierced", "translates", "mais", "cognition", "cain", "verona", "syn", "delegated", "chatting", "punish", "fishermen", "conforming", "causal", "stringent", "rowan", "assigning", "dwell", "hacked", "inaugural", "awkward", "weaving", "metropolis", "psychologists", "diligence", "stair", "dine", "enforcing", "struggled", "lookout", "arterial", "injustice", "mystical", "ironing", "commanded", "woodlands", "guardians", "manifesto", "slap", "jaws", "finn", "pedestal", "widening", "underwood", "saline", "sonny", "longevity", "paw", "isabel", "sterile", "botany", "dissolution", "pauline", "quart", "bison", "suppressed", "allegro", "materially", "cit", "amor", "xvi", "fungi", "phyllis", "bengal", "scrolls", "awakening", "fairies", "prescribe", "greed", "nominate", "sparkle", "autograph", "migrating", "refrain", "lastly", "overcoming", "wander", "kona", "relieved", "luc", "elena", "intermittent", "ante", "vols", "revolving", "bundled", "covert", "crater", "leah", "favored", "bred", "fractional", "fostering", "thence", "birthplace", "bleed", "reverend", "transmitting", "serie", "neptune", "caucasian", "goblet", "inventions", "dea", "practicable", "fronts", "ancestor", "russians", "incur", "canonical", "nodded", "confronted", "believer", "australians", "declines", "peacock", "utmost", "yates", "leroy", "helpers", "elapsed", "academies", "tout", "gre", "imitation", "harvested", "dab", "hopeful", "furnishing", "negatively", "residences", "spinach", "liquidation", "predecessor", "cheeks", "hare", "beasts", "philanthropy", "peanuts", "discovers", "discard", "cavalry", "breakers", "quorum", "forwards", "prevalent", "plat", "exploits", "dukes", "offended", "trimmed", "py", "worcestershire", "bonn", "prostitution", "mosque", "horseback", "vested", "terribly", "earnest", "homme", "clancy", "tory", "rossi", "oldham", "gonzales", "vor", "confederate", "presumed", "annette", "climax", "blending", "weave", "postponed", "philosophers", "speeding", "creditor", "exits", "pardon", "oder", "abby", "teller", "mandates", "siena", "veil", "peck", "custodian", "dante", "lange", "quarry", "seneca", "oceanic", "tres", "helm", "burbank", "festive", "rosen", "alla", "preserves", "ingram", "jess", "secretion", "insult", "scraps", "waived", "cured", "buggy", "kennel", "drilled", "souvenirs", "prescribing", "slack", "gin", "differentiate", "jays", "pilgrim", "vines", "susceptibility", "ambiguous", "disputed", "scouting", "royale", "instinct", "gorge", "righteousness", "carrot", "opaque", "bullying", "saul", "flaming", "apis", "marian", "liens", "caterpillar", "remington", "chew", "benefited", "prevail", "musik", "undermine", "omission", "boyle", "mio", "diminished", "jonas", "locke", "cages", "jolla", "capitals", "correctness", "implication", "pap", "banjo", "shaker", "natives", "tive", "stout", "rewarded", "athena", "deepest", "matthias", "duane", "sane", "climbed", "corrupted", "relays", "hanna", "husbands", "fading", "colchester", "persuade", "roaming", "determinations", "weighed", "ashamed", "concierge", "gorilla", "gatherings", "endure", "nom", "cheltenham", "dickens", "juniper", "repetition", "siberian", "preparatory", "fielding", "dune", "hee", "adler", "yosemite", "cursed", "youths", "migrants", "massey", "tumble", "stare", "unlocking", "missy", "meade", "contradiction", "helium", "wonderfully", "dug", "congenital", "trojans", "insanity", "embraced", "finely", "authenticated", "reformed", "tolerate", "lest", "adhesion", "tic", "noticeable", "cette", "aesthetics", "smoker", "benign", "hypotheses", "afforded", "aisle", "dunno", "blur", "evidently", "limbs", "unforgettable", "punt", "tanned", "altering", "bunker", "multiplication", "paved", "fabricated", "pasture", "richest", "cruelty", "mormon", "scots", "genuinely", "neighbouring", "plugged", "tyson", "souvenir", "mifflin", "cucumber", "occurrences", "marshal", "anders", "seize", "decisive", "spawn", "blanks", "dungeons", "sailors", "stony", "fayette", "shelving", "annals", "sadness", "periodical", "moe", "dime", "losers", "punta", "flavour", "crypt", "accomplishment", "onwards", "bogus", "carp", "prompts", "witches", "skinner", "dusk", "nouveau", "customary", "vertically", "crashing", "cautious", "possessions", "urging", "passions", "faded", "counterpart", "utensils", "secretly", "tying", "lent", "magician", "indulgence", "johan", "melted", "lund", "fam", "nel", "extremes", "puff", "galileo", "bloomfield", "obsessed", "flavored", "groceries", "motto", "singled", "alton", "staple", "pathetic", "craftsman", "irritation", "rulers", "collisions", "militia", "eis", "conservatory", "bananas", "adherence", "defended", "grille", "elisabeth", "claw", "pushes", "alain", "flagship", "kittens", "illegally", "deter", "tyre", "furry", "cubes", "transcribed", "bouncing", "wand", "cavalier", "ish", "rinse", "outfits", "charlton", "respectfully", "ulster", "tides", "chu", "weld", "venom", "writ", "patagonia", "dispensing", "puppets", "tapping", "immersion", "explode", "toulouse", "escapes", "berries", "happier", "mummy", "punjab", "stacked", "brighter", "cries", "speciality", "warranted", "ruined", "damp", "sanity", "ether", "suction", "crusade", "rumble", "correcting", "shattered", "heroic", "retreats", "formulate", "sheds", "anomalies", "homogeneous", "humphrey", "spheres", "belonged", "assigns", "sofas", "croix", "cushions", "fern", "defenders", "odessa", "lore", "whipped", "vox", "dinners", "rosie", "genealogical", "terre", "selfish", "eventual", "nach", "mitigate", "jamestown", "elisa", "shelton", "boiled", "neville", "natasha", "endeavour", "roswell", "haute", "herring", "unfamiliar", "expectancy", "deterioration", "proclaimed", "arid", "coincidence", "idiots", "mona", "muddy", "nuevo", "hitchcock", "cid", "neighbour", "raspberry", "illusions", "spikes", "enumeration", "suche", "permissible", "yielded", "nuisance", "siam", "latent", "marcia", "drowning", "spun", "shalt", "ric", "loch", "commanding", "sparrow", "poorest", "hector", "brotherhood", "milling", "sinking", "sulphur", "wicker", "balm", "figs", "browne", "nephew", "confess", "chit", "chaotic", "alexandre", "lays", "principally", "visor", "mundo", "jarvis", "drip", "traced", "outright", "melodies", "myriad", "stains", "sandal", "rubbing", "naive", "wien", "skeptical", "remembrance", "detects", "dragged", "foreman", "allegiance", "conduit", "dependable", "echoes", "ladders", "prudent", "glowing", "alchemy", "linden", "sven", "geographically", "alternating", "tristan", "audible", "folio", "presiding", "mans", "waterways", "aff", "fractures", "apprenticeship", "childbirth", "dumped", "barre", "rama", "johannes", "fiery", "convex", "richer", "mop", "urn", "soleil", "connor", "northamptonshire", "biscuits", "disclaims", "sich", "restless", "unanswered", "paired", "vaults", "ahmad", "tossed", "caucus", "cooke", "pillars", "katy", "zoe", "overwhelmed", "salute", "parody", "compensated", "lacked", "circulated", "soo", "maltese", "acorn", "bosses", "pint", "ascension", "ply", "mornings", "mentioning", "flagstaff", "pretoria", "thrive", "rightly", "paragon", "basal", "persist", "wilde", "indispensable", "illicit", "liar", "pledged", "pictorial", "curling", "ares", "smoky", "opus", "aromatic", "flirt", "slang", "emporium", "princes", "restricting", "promoters", "soothing", "freshmen", "departed", "aristotle", "finch", "inherently", "krishna", "forefront", "largo", "amazingly", "plural", "dominic", "skipped", "hereinafter", "nur", "extracting", "analogous", "hebrews", "tally", "unpleasant", "uno", "tempted", "blindness", "creep", "staining", "shaded", "cot", "plaster", "novo", "hearted", "obstruction", "agility", "complying", "otis", "overture", "newcomers", "noteworthy", "agile", "sacks", "ionic", "stray", "runaway", "slowing", "watchers", "supplemented", "poppy", "monmouth", "frenzy", "jargon", "kangaroo", "sleeper", "elemental", "unnamed", "doncaster", "particulars", "jerking", "bungalow", "bazaar", "predicate", "recurrence", "recruits", "sharper", "tablespoons", "supervise", "termed", "frauen", "stamping", "coolest", "reilly", "basque", "ire", "pegasus", "silhouette", "dorado", "daring", "realms", "maestro", "turin", "gus", "forte", "tipping", "holster", "fiddle", "crunch", "leipzig", "bard", "kellogg", "reap", "exemplary", "caliber", "apostle", "playful", "icelandic", "multiplied", "enchanted", "belgrade", "styled", "commanders", "thor", "waive", "bethany", "vance", "soprano", "polishing", "marquis", "wen", "translating", "frontiers", "adjoining", "greet", "acclaim", "hardship", "hast", "miriam", "cavaliers", "rollers", "carleton", "pumped", "differentiated", "sonia", "verifying", "almighty", "vel", "intuition", "revoked", "openness", "circulating", "bryce", "ilo", "latch", "verbs", "drank", "darlington", "slippery", "galerie", "outpost", "seville", "mira", "chatter", "santo", "lettuce", "raging", "tidy", "jong", "oppression", "bows", "yielding", "torso", "occult", "expeditions", "nok", "hooker", "lorenzo", "beau", "subordinate", "lilies", "articulate", "ecstasy", "sweetheart", "fulfil", "calcutta", "hobbs", "mediator", "tad", "cultivated", "rang", "disconnected", "consulate", "wilkes", "disagreement", "strands", "sicily", "compost", "adjourned", "familiarity", "erroneous", "pulses", "theses", "stuffing", "jeux", "wilton", "flooded", "reverted", "crackers", "greyhound", "corsair", "ironic", "wards", "unsupported", "hinge", "ultima", "cockpit", "venetian", "sew", "carrots", "faire", "laps", "memorials", "resumed", "conversely", "emory", "stunt", "excuses", "vitae", "hustle", "stimuli", "upwards", "witty", "transcend", "loosely", "anchors", "hun", "atheist", "capped", "oro", "liking", "preacher", "complied", "intangible", "compassionate", "substitutes", "flown", "frau", "dubbed", "silky", "vows", "macy", "distorted", "nathaniel", "attracts", "bern", "qualifies", "grizzly", "micah", "hurting", "homicide", "await", "sparse", "corridors", "sont", "mcdowell", "fossils", "victories", "chemically", "compliments", "cider", "crooked", "gangs", "segregation", "nemo", "overcast", "inverted", "lenny", "achieves", "forehead", "skye", "percy", "scratches", "conan", "lilac", "intellect", "charmed", "denny", "harman", "hears", "wilhelm", "nationalism", "pervasive", "auch", "enfield", "nie", "clears", "knowingly", "pivot", "undergraduates", "digestion", "mixtures", "soaring", "dragging", "virtues", "flushing", "deprivation", "delights", "foreword", "glide", "transverse", "engagements", "withstand", "newbury", "authorizes", "blooms", "soar", "uniformly", "todos", "piedmont", "empowered", "asi", "lena", "outlying", "slogan", "subdivisions", "deducted", "ezekiel", "totaling", "elijah", "compton", "vigorous", "flee", "biscuit", "creme", "submits", "woes", "waltz", "menace", "emerges", "classify", "paige", "downstairs", "statesman", "cheerful", "blush", "leaflet", "monde", "weymouth", "spherical", "favourable", "informs", "dramas", "cher", "billiard", "aut", "malay", "unseen", "optimism", "silica", "kara", "unusually", "widest", "impotence", "medley", "cadet", "redskins", "temper", "asserts", "stew", "hereafter", "retiring", "smashing", "accumulate", "tahiti", "mariner", "collier", "hush", "whispered", "generosity", "vibrating", "lama", "artisan", "akin", "raphael", "lola", "embarrassing", "aqueous", "pembroke", "stockholders", "lillian", "splinter", "ibn", "preferable", "juices", "ironically", "morale", "morales", "solder", "trench", "persuasion", "practise", "lodged", "revolt", "renders", "pristine", "francaise", "shines", "catalan", "auditory", "applause", "trait", "popped", "busted", "basins", "farmhouse", "pounding", "picturesque", "ottoman", "eater", "utopia", "insists", "willard", "lettering", "marlborough", "pouring", "concentrating", "soak", "buckingham", "hides", "goodwin", "manure", "savior", "dade", "secrecy", "wesleyan", "duplicated", "dreamed", "fertile", "hinges", "plausible", "creepy", "narrator", "augustus", "fahrenheit", "hillside", "standpoint", "nationalist", "piazza", "denoted", "oneself", "royalties", "abbreviation", "blanco", "critiques", "stroll", "anomaly", "thighs", "boa", "expressive", "infect", "pers", "dotted", "frontal", "havoc", "ubiquitous", "arsenic", "synonym", "yer", "doomed", "francs", "ballad", "sling", "contraction", "devised", "explorers", "billie", "ravens", "underline", "obscene", "mes", "hymn", "continual", "slowed", "aladdin", "tolerated", "quay", "outing", "instruct", "wilcox", "overhaul", "peruvian", "indemnity", "lev", "imaginative", "weir", "remarked", "portrayed", "clarendon", "ferris", "julio", "spelled", "epoch", "mourning", "phelps", "aft", "plaid", "fable", "rescued", "exploded", "padres", "scars", "whisky", "tes", "uptown", "susie", "batter", "reyes", "vivian", "nuggets", "silently", "pesos", "shakes", "dram", "impartial", "punctuation", "initials", "spans", "pallet", "pistols", "mara", "tanner", "avenues", "dun", "compress", "apostles", "sober", "tread", "legitimacy", "zoology", "steals", "unwilling", "lis", "paddy", "plunge", "pearce", "vos", "sinister", "burr", "arteries", "formations", "vantage", "texans", "diffuse", "boredom", "norma", "crosse", "mondo", "helpless", "wyatt", "spades", "slug", "visionary", "coffin", "otter", "navajo", "earns", "amplified", "recess", "dispersed", "shouted", "shilling", "resemble", "carbonate", "mimi", "discriminate", "stared", "crocodile", "ratification", "vases", "advises", "sind", "coward", "inequalities", "garde", "dyes", "viz", "turbulence", "yell", "fins", "ritchie", "dresser", "rake", "ornamental", "riches", "resign", "injunction", "intervene", "poised", "barking", "josephine", "dread", "dag", "handwriting", "serpent", "tapped", "articulated", "pitched", "wisely", "accustomed", "bremen", "steaks", "playhouse", "superficial", "suns", "josef", "casts", "bunk", "stab", "sanction", "dyer", "effected", "tubular", "moi", "ode", "avoids", "richter", "evidenced", "heinz", "argos", "dit", "larvae", "dyke", "cassidy", "kernels", "mobilization", "amt", "wilkins", "manipulated", "alleviate", "seam", "riddle", "comedies", "fainter", "respectful", "cabaret", "recession", "awaited", "nozzle", "externally", "needy", "wheeled", "booksellers", "darn", "diners", "greeks", "reich", "armored", "weary", "solitary", "photographed", "tweed", "snowy", "pianist", "emmanuel", "acapulco", "surrounds", "knocking", "cosmopolitan", "magistrate", "everlasting", "pigment", "faction", "tous", "argentine", "scandinavia", "minnie", "genie", "linn", "handel", "microscopic", "clarified", "coherence", "sensations", "orphan", "conferred", "acp", "disturbances", "chandelier", "embryonic", "carver", "paterson", "delle", "graceful", "intercept", "shouts", "ascertain", "veto", "exhaustive", "annoyed", "bureaucracy", "paz", "stalls", "fined", "bien", "inward", "reflector", "greeted", "hartley", "defenses", "meaningless", "clam", "francesco", "hes", "georg", "negligible", "starch", "melinda", "godfather", "apron", "guts", "ros", "pragmatic", "tyranny", "warehouses", "regimen", "axel", "antony", "hahn", "fluffy", "marianne", "slender", "hereford", "aides", "forma", "absorbing", "cherries", "gaelic", "gomez", "alec", "distinguishing", "glazed", "judd", "dashed", "libyan", "dickson", "distressed", "shouting", "bullock", "villagers", "acknowledgments", "ethiopian", "mermaid", "buds", "sexes", "wilder", "sire", "centred", "confinement", "islanders", "ding", "uncover", "contested", "coma", "husky", "conserve", "bland", "abatement", "originator", "whipping", "skipping", "routed", "rudolph", "abigail", "missionaries", "householder", "plotting", "yan", "succeeding", "elmer", "sails", "schuster", "overlook", "robes", "sham", "fungus", "astonishing", "graveyard", "chunks", "bourne", "revert", "ignores", "popping", "captains", "loaf", "pandora", "gabrielle", "stad", "abel", "enigma", "glands", "militant", "jug", "inferno", "torrents", "outset", "confuse", "yvonne", "attaching", "adept", "doubtful", "ratified", "insecure", "explosions", "trunks", "gareth", "versatility", "lothian", "fem", "intricate", "strata", "depository", "hubert", "proclamation", "beauties", "hybrids", "gillian", "darrell", "irrespective", "imposition", "ensured", "kidnapped", "sai", "cereals", "outrage", "poop", "scrubs", "orchestral", "bellingham", "dripping", "afterward", "devote", "facets", "musique", "frightened", "noises", "ambiguity", "booths", "discourage", "elusive", "speculative", "madeira", "intimacy", "hallway", "whey", "ripping", "mei", "hob", "reloaded", "garry", "ester", "annan", "thriving", "hampers", "bragg", "gracious", "snail", "curt", "demise", "theoretically", "grooves", "sutra", "conveyed", "swine", "typographical", "ellison", "ado", "trophies", "quicken", "werden", "heron", "graft", "moth", "crossings", "derrick", "mash", "germ", "envoy", "breckenridge", "pug", "antoine", "domingo", "resembles", "doorway", "grandson", "tat", "catalina", "redding", "accompaniment", "derivation", "warden", "voir", "tug", "margarita", "clans", "instituted", "notary", "thi", "sociological", "offending", "forgetting", "macedonian", "votre", "reservoirs", "barlow", "tyrone", "halle", "edged", "encompass", "spade", "hermes", "glare", "metaphysical", "insignificant", "exchanging", "pledges", "mentality", "turbulent", "pip", "pup", "fortunes", "sultan", "masked", "casing", "plotted", "haley", "generously", "amounted", "icy", "repression", "reaper", "honoring", "facto", "climatic", "broaden", "begging", "wharton", "sui", "freddy", "bushes", "contend", "restraints", "truncated", "gibbons", "nitric", "atop", "glover", "railroads", "unicorn", "normandy", "floats", "justices", "orderly", "wafer", "puck", "roofs", "reefs", "hover", "quarantine", "detrimental", "molds", "elias", "hou", "subsistence", "chilled", "foe", "citadel", "topography", "leaflets", "wrinkle", "contemplated", "adolescence", "nun", "harmon", "indulge", "bernhard", "hearth", "edna", "embarrassed", "aggressively", "coincide", "maynard", "genoa", "enlightened", "clippings", "radicals", "penetrate", "stride", "catastrophe", "greatness", "archie", "parasites", "entertained", "inventors", "ferret", "louisa", "agony", "marseille", "taller", "doubling", "stupidity", "moor", "stephenson", "enrich", "foreground", "revelations", "replying", "incapable", "parte", "acknowledgment", "labyrinth", "africans", "sway", "undergone", "lacey", "preach", "triangular", "disabling", "cones", "inversion", "thankfully", "taxed", "presumption", "excitation", "salesman", "hatfield", "constantine", "confederation", "petals", "imprisoned", "heller", "docks", "landowners", "sul", "juno", "deux", "defiance", "bully", "valiant", "constructions", "youngsters", "toad", "breasted", "banging", "vertigo", "unsatisfactory", "fluent", "rhyme", "eros", "aan", "mcintosh", "suffice", "convened", "nah", "accusations", "debated", "stallion", "equipments", "necessities", "camelot", "deserted", "keepers", "logically", "caravans", "oranges", "bum", "presse", "olga", "contends", "snort", "occupants", "organiser", "vim", "luminous", "crowe", "unparalleled", "anyhow", "waterfalls", "obtains", "antwerp", "ulrich", "hardened", "primal", "straits", "upheld", "wir", "malt", "sinai", "endowed", "cameo", "attire", "blaine", "typewriter", "pomona", "goddard", "fanny", "plagiarism", "milky", "combs", "upland", "unconstitutional", "adopts", "macao", "snaps", "defends", "depicts", "pilgrimage", "elevators", "ohne", "narrowed", "eighteenth", "hurst", "inscription", "ascent", "pisa", "tedious", "pods", "universally", "chewing", "accommodated", "tendencies", "rowland", "welded", "conforms", "reggie", "refreshments", "depict", "coils", "callers", "navel", "arbitrator", "prolific", "nurseries", "footsteps", "indefinitely", "sucker", "bumps", "frightening", "wildly", "sable", "retarded", "neatly", "singleton", "spaniel", "somerville", "worthless", "git", "spool", "jeopardy", "rovers", "voiced", "annoy", "clap", "aspiring", "dazzling", "cornelius", "scientifically", "grandpa", "cornish", "guessed", "kennels", "sera", "axiom", "stamina", "hardness", "abound", "curing", "socrates", "aztec", "confer", "vents", "mater", "oneida", "aiken", "crowned", "sandstone", "adapting", "cranes", "rooster", "proctor", "prehistoric", "balkans", "dictate", "joker", "wiped", "contours", "abdomen", "baden", "tudor", "paws", "villains", "poke", "prayed", "inefficient", "heirs", "parasite", "shortcomings", "cures", "concentrates", "preclude", "fasting", "loudly", "horseshoe", "zeus", "constellation", "recital", "utrecht", "freud", "bedtime", "thinkers", "hume", "reminiscent", "rapport", "ephesians", "dope", "truss", "kiln", "peaches", "depressing", "strangely", "narratives", "sud", "skipper", "gy", "drains", "maxima", "unification", "sous", "testimonial", "khaki", "distributes", "navigating", "slough", "prodigy", "embossed", "mould", "jock", "blasts", "poorer", "anglia", "dyed", "dissatisfied", "bourbon", "staggering", "bismarck", "hoe", "rubbed", "wasp", "bookseller", "fuss", "muir", "uterus", "chimes", "webber", "aggregated", "pico", "exhibiting", "gimme", "nee", "beaufort", "radically", "terminating", "platter", "chamberlain", "steamboat", "brewster", "inferred", "croft", "ism", "uplifting", "penal", "exclusions", "pageant", "henley", "purchasers", "pitchers", "tracts", "morally", "hosiery", "yt", "reptile", "overdue", "cowan", "mohawk", "riots", "hassan", "schwarz", "persuaded", "teasing", "rejecting", "emphasizing", "unbound", "quentin", "shepard", "sacrifices", "delinquent", "contrasting", "nestle", "correspondents", "guthrie", "imperfect", "disguise", "eleventh", "embassies", "lapse", "wally", "phenomenal", "civilizations", "friendships", "marjorie", "shrub", "kindred", "reconsider", "sanctioned", "parfums", "condemn", "renegade", "awaits", "hue", "augmented", "amends", "fullest", "shafts", "finer", "ys", "burdens", "invocation", "gillespie", "brooch", "motifs", "nineteen", "griffiths", "invaders", "edmond", "volunteered", "swollen", "liste", "grasses", "scatter", "steward", "ito", "cherished", "smack", "incidentally", "sine", "depleted", "holiness", "divinity", "campaigning", "tougher", "sherlock", "comprehend", "cloak", "pamphlet", "clipper", "umbrellas", "priceless", "mig", "assassin", "exploiting", "cynical", "toro", "etched", "bray", "choke", "underwent", "comforts", "appoints", "keene", "rachael", "swallowed", "imperialism", "mouths", "halter", "ley", "ike", "pumpkins", "shrinking", "roar", "novelist", "potomac", "arroyo", "tipped", "amidst", "insurgents", "wanda", "etching", "discouraged", "gall", "oblivion", "gravy", "inherit", "sprinkle", "stitching", "advisable", "loi", "meme", "gladstone", "jugs", "congregations", "handing", "payer", "ze", "beforehand", "laborer", "watcher", "vibrations", "apes", "strawberries", "abbas", "moods", "dobson", "ives", "soaked", "abridged", "palate", "thierry", "masculine", "realizes", "kahn", "petitioners", "constable", "sayings", "unconditional", "vue", "progressively", "topping", "baird", "chilling", "translucent", "glaze", "newcomer", "branching", "unmarried", "unexpectedly", "funniest", "bona", "scorpion", "mirrored", "sel", "anatomical", "misdemeanor", "tobias", "salle", "infra", "strasbourg", "commemorative", "implicitly", "ewing", "austen", "assurances", "comedian", "rascal", "nid", "roberta", "dizzy", "outbreaks", "annuities", "slit", "whitening", "occupying", "depicting", "ordnance", "verge", "ransom", "nomad", "dagger", "thorn", "preamble", "mor", "spins", "solicit", "provoking", "orchids", "buckets", "spoil", "blazing", "palermo", "snapped", "alligator", "detectives", "rochelle", "nomenclature", "abdullah", "invade", "regulates", "rendezvous", "strives", "trapping", "gardeners", "clemens", "deuteronomy", "diminish", "britannia", "manifestations", "tak", "stitches", "promulgated", "mediocre", "passports", "ayrshire", "invent", "eagerly", "damascus", "reformation", "hypocrisy", "parishes", "trooper", "bun", "compendium", "disappears", "hymns", "monotone", "palsy", "propositions", "locomotive", "debating", "cuffs", "prosperous", "famine", "orally", "elliptical", "grabbing", "jogging", "stipulated", "persuasive", "horrors", "bearer", "pastors", "acquainted", "dependents", "dizziness", "ture", "brilliance", "nicky", "originate", "respectable", "horace", "prohibiting", "disappearance", "morals", "invaded", "spoiled", "monet", "pickle", "quaker", "haunting", "manipulating", "tangent", "tempest", "petra", "dominique", "waving", "dai", "uneven", "plata", "plurality", "warrington", "adventurous", "luigi", "bayou", "accueil", "confluence", "blossoms", "succeeds", "orphans", "louder", "boilers", "reunions", "yelling", "trough", "leaned", "quadrant", "discrepancy", "slid", "antioch", "tonic", "magnus", "harrow", "jig", "reckless", "raining", "peasant", "vader", "qua", "figuring", "crushing", "thorpe", "ordained", "hodges", "saucer", "chinook", "passover", "byzantine", "tomas", "triangles", "curvature", "rites", "sideways", "devious", "dreamer", "acknowledging", "estuary", "burglary", "pouches", "thrilling", "spectacle", "sentiments", "ditto", "nana", "waiter", "oddly", "suchen", "raft", "cul", "nutshell", "arrogant", "hermann", "induces", "thrift", "sae", "admired", "stunts", "iaea", "youthful", "stumbled", "emitted", "sufficiency", "tempered", "slipping", "solitude", "cylindrical", "destroyer", "fide", "undesirable", "mongolian", "weakly", "parsley", "undue", "stunned", "smiths", "magyar", "hostility", "groves", "pursuits", "reflux", "adaptations", "jurisprudence", "invariably", "lecturers", "progressed", "brow", "elves", "kearney", "graeme", "kimball", "chant", "turnkey", "sprays", "tighten", "revolver", "crowns", "intermediary", "matted", "apricot", "tufts", "cuckold", "unreliable", "rosewood", "parry", "existent", "tongues", "dictator", "jehovah", "fanatics", "coeur", "perpendicular", "fay", "hedgehog", "raves", "mamma", "entails", "folly", "wheeling", "sharpe", "hawthorn", "mural", "bankrupt", "wager", "purge", "interpolation", "adjournment", "pitfalls", "stationed", "ambrose", "nightmares", "aggravated", "deem", "melville", "cavern", "ene", "sumner", "descended", "disgusting", "flax", "weakened", "imposes", "withdrew", "tart", "guerrilla", "spoons", "persona", "poser", "tram", "distinctions", "peabody", "alia", "iced", "faulkner", "scarcely", "excused", "fused", "madeleine", "roaring", "witchcraft", "stopper", "fibres", "cullen", "crested", "stump", "scalp", "gunn", "erwin", "conductors", "criticisms", "hadley", "diplomat", "sylvester", "melon", "tablespoon", "manganese", "siren", "clasp", "olives", "nino", "summons", "lucrative", "porous", "shrewsbury", "bile", "siegel", "cara", "ese", "ils", "hinduism", "elevations", "thirst", "endeavors", "sportsman", "scratching", "iodine", "phoebe", "wipes", "fro", "krone", "urgently", "exposes", "natures", "liberalism", "meer", "derry", "suisse", "frankenstein", "parc", "heir", "phy", "successors", "eccentric", "yarmouth", "transports", "amour", "illustrative", "prosecuted", "sailed", "craving", "advocating", "titel", "leaking", "escaping", "possessing", "suicidal", "cruisers", "masonic", "forage", "loco", "hellenic", "kwh", "ethel", "distinctly", "assertions", "baba", "pebble", "staffs", "ets", "hoo", "denomination", "patched", "patriotism", "battling", "tickle", "bandit", "acquaintance", "lambs", "loom", "blouse", "heightened", "chests", "ambitions", "feline", "grub", "ulcer", "slew", "menstrual", "canals", "negatives", "threading", "duet", "intolerance", "ammonium", "zephyr", "tearing", "muffins", "naar", "autor", "fannie", "foothills", "atrium", "thine", "superiority", "gestures", "nemesis", "engel", "confessional", "cardigan", "taunton", "evaporation", "devise", "abolished", "sorrento", "blanchard", "uns", "toying", "parma", "wreath", "plight", "opium", "irrational", "arches", "naturalist", "encompassing", "penetrating", "destroys", "prussia", "lowers", "cookery", "nal", "beatrice", "policeman", "cartilage", "turnpike", "migratory", "jurors", "mea", "enumerated", "sheltered", "doctrines", "seams", "pleaded", "pca", "elasticity", "cel", "gutter", "ulcers", "sloppy", "flannel", "volcanoes", "ridden", "contradictory", "misunderstood", "steamer", "cong", "barometer", "exclaimed", "diem", "barge", "spartan", "nea", "crystalline", "rumours", "famed", "brandt", "riga", "bengali", "respite", "grimm", "shetland", "provocative", "guido", "tasted", "licked", "banged", "rufus", "hopeless", "henrik", "safest", "daphne", "ame", "pollock", "meteor", "granville", "veneer", "anonymously", "manageable", "slant", "disciplined", "pollard", "comme", "chops", "broom", "plainly", "ibrahim", "snare", "shank", "uphold", "revising", "insignia", "nurture", "leash", "hunts", "faber", "plantations", "factions", "falmouth", "humility", "commentators", "impeachment", "acton", "engages", "carbide", "pullman", "characterised", "kinder", "deems", "outsiders", "dodd", "dissolve", "adrienne", "deduct", "crawling", "modifier", "muck", "colombo", "hoax", "cohesion", "reconnaissance", "antagonists", "bachelors", "observes", "corporal", "ligne", "wary", "locust", "condenser", "articulation", "villain", "tre", "oft", "secures", "leviticus", "impending", "rejoice", "pickering", "poisson", "bursts", "versailles", "hurdles", "lucie", "geese", "condemnation", "candies", "sidewalks", "formidable", "pun", "autres", "mecca", "rested", "paused", "macbeth", "abandonment", "nada", "bertrand", "broth", "wentworth", "seduction", "fertilizers", "maison", "contrasts", "giuseppe", "tae", "improperly", "nebula", "crows", "blooming", "mace", "seminole", "taper", "synagogue", "sugars", "burnham", "allure", "intestine", "ambassadors", "reclaim", "isla", "kingdoms", "richness", "converge", "pianos", "dol", "workings", "penelope", "extinct", "ponder", "revue", "lunches", "fooled", "smear", "rigging", "derives", "praises", "detachment", "luca", "caracas", "lids", "pore", "ey", "radiance", "oily", "quitting", "ina", "grover", "screams", "masking", "patchwork", "heinrich", "breton", "assures", "joys", "involuntary", "allegation", "infinitely", "dorchester", "serge", "morphine", "gymnasium", "waldo", "diese", "chiefly", "judah", "conjecture", "mich", "restitution", "indicted", "blasting", "confronting", "mastered", "powders", "debtors", "grit", "slain", "nearer", "ancestral", "mujeres", "faithfully", "revolutions", "sei", "quail", "tanker", "administrations", "sho", "rector", "ballast", "immature", "recognises", "taxing", "icing", "substituting", "executes", "originality", "pinned", "gables", "discontinue", "bantam", "bianca", "zimmer", "earthly", "conceive", "forfeiture", "disastrous", "gladiator", "poplar", "ence", "recourse", "martian", "equinox", "hinder", "fredericksburg", "presume", "weil", "armchair", "cecilia", "strut", "kari", "pavel", "appropriateness", "tame", "solstice", "oats", "italien", "wolff", "plume", "sparta", "calypso", "pantry", "etienne", "italics", "reversing", "murderer", "courteous", "wilt", "smoothing", "billet", "pretending", "hammock", "receptions", "revoke", "intruder", "wagons", "jennie", "platte", "plank", "paddling", "ting", "interrogation", "neue", "longing", "irresistible", "pilgrims", "disappearing", "sau", "enact", "inertia", "misunderstanding", "deity", "pruning", "agra", "mandolin", "rolf", "swiftly", "claws", "brightly", "manly", "emit", "shortened", "fearful", "potency", "ifc", "flawless", "peril", "alessandro", "breaches", "resultant", "nestled", "hairs", "dumfries", "drastic", "guarded", "celery", "reconcile", "grammatical", "collin", "ven", "admiration", "zanzibar", "offend", "severance", "somali", "combating", "numb", "retina", "maids", "tempting", "bureaus", "voyages", "galatians", "flo", "planters", "rocco", "sheath", "louie", "chaplain", "benefiting", "dubious", "occupies", "mammal", "shielded", "degeneration", "listens", "swirl", "emery", "twists", "scot", "intrigue", "blanche", "dialect", "nominating", "fanatic", "upton", "pave", "coverings", "danced", "slightest", "libre", "bromley", "revive", "corolla", "predominant", "abode", "savoy", "vogel", "insecurity", "trustworthy", "uniformity", "conquered", "alarming", "dur", "amused", "horizontally", "knitted", "exploding", "narrowly", "campo", "rampant", "suitcase", "embarrassment", "spectators", "coronado", "retaliation", "inquirer", "dreadful", "metaphysics", "drifting", "ritter", "attends", "nicer", "mellow", "boast", "gents", "respiration", "absentee", "duplicates", "dubois", "corollary", "tighter", "predetermined", "asparagus", "airy", "progresses", "canister", "stiffness", "thrifty", "canning", "workmanship", "complexities", "shan", "wrinkles", "illustrating", "perch", "craven", "divergence", "homage", "atrocities", "londonderry", "hops", "emmy", "chez", "admittedly", "ruiz", "angst", "liturgy", "nativity", "surety", "tranquil", "disseminated", "staircase", "cutler", "cradles", "electorate", "airs", "reconstructed", "resent", "opposes", "silvia", "distraction", "dominates", "kimberley", "despatch", "fugitive", "tucked", "jericho", "turmoil", "gilles", "dietrich", "haines", "unjust", "markedly", "fascinated", "disturb", "terminates", "exempted", "bounced", "rankin", "brightest", "saddles", "scotsman", "fitzpatrick", "gushing", "distracted", "secluded", "criticize", "bog", "livelihood", "godfrey", "minerva", "superseded", "iceberg", "caleb", "christening", "jealousy", "plumber", "hagen", "squeezed", "judas", "valle", "dole", "wick", "gertrude", "communists", "owes", "scents", "bertha", "levied", "sag", "barns", "covenants", "peat", "proprietor", "lizzie", "raids", "solos", "compartments", "maj", "foi", "importation", "mss", "planter", "ici", "metz", "immaculate", "pur", "reindeer", "telegram", "ruben", "shaken", "wares", "rivalry", "verve", "charley", "carpenters", "spree", "sunk", "morley", "bespoke", "inflicted", "abbreviated", "drowned", "escorted", "brute", "barracks", "kidneys", "warbler", "onward", "kidnapping", "inducing", "lancet", "antelope", "terminus", "castings", "flanders", "pellets", "enclosing", "starred", "deacon", "kabul", "sweeps", "butch", "mercure", "bookcase", "assembling", "diaphragm", "questo", "chores", "consignment", "yarns", "liv", "seedlings", "fortified", "reconsideration", "barnard", "profoundly", "bartender", "mayfair", "jag", "maneuver", "ridder", "vanished", "lair", "enclose", "sinners", "lille", "calves", "defer", "desmond", "liars", "els", "sod", "lacy", "pharaoh", "advocated", "itching", "alles", "devotional", "taft", "comparatively", "spartans", "tourney", "reasoned", "lawton", "degli", "saith", "astral", "ach", "parallels", "yelled", "wren", "terence", "hamper", "balkan", "blurred", "smuggling", "instincts", "hutton", "masquerade", "deans", "duality", "sensational", "kites", "smoother", "expulsion", "withhold", "romano", "grievances", "betrayed", "dumps", "buckles", "joyful", "generalization", "hin", "pancakes", "crave", "cordova", "focussed", "ripple", "claimants", "consolidating", "goldsmith", "inclination", "measles", "arcs", "portman", "baptized", "expelled", "rupees", "betrayal", "flourish", "heed", "mein", "graf", "hawking", "divides", "composing", "handicrafts", "healed", "burmese", "boon", "valor", "pedestrians", "gathers", "pawn", "stitched", "camille", "ceases", "dorsal", "collie", "hereditary", "exaggerated", "buccaneers", "spleen", "allotment", "jeu", "multiplying", "empress", "orbits", "whence", "bois", "trusting", "sabre", "stigma", "abduction", "attaches", "tartan", "twisting", "tore", "eth", "mimic", "shielding", "stormy", "vulgar", "pathological", "hodge", "trimming", "emanuel", "serene", "obligatory", "corrugated", "queenstown", "forbid", "unhealthy", "felicity", "ticks", "fascination", "sono", "experimenting", "splendor", "vigil", "robbed", "rebirth", "winona", "progressing", "fragrant", "defeating", "hotter", "instantaneous", "operatives", "carmichael", "bulky", "exponent", "desperation", "parlor", "setter", "monumental", "olaf", "fer", "stirred", "toughest", "fil", "facade", "frankfort", "monograph", "booze", "widen", "adjective", "disciple", "cipher", "arrears", "rhythmic", "unaffected", "starving", "vide", "lennox", "sil", "hearty", "triton", "deus", "devine", "adore", "entertainer", "colds", "dependant", "thicker", "weeping", "chandeliers", "moneys", "infancy", "dips", "honoured", "yachting", "cleanse", "chilly", "digs", "bolivar", "womb", "irritating", "monarchy", "corset", "hinged", "attendants", "cummins", "robins", "booming", "artikel", "scandals", "screamed", "cramps", "enid", "herrera", "digger", "espionage", "pups", "avenged", "norte", "glade", "pendulum", "bounces", "nehemiah", "thinner", "noch", "licks", "soto", "caste", "jus", "daft", "sampson", "psyche", "rudolf", "angling", "stubborn", "diplomats", "physicist", "tagalog", "coo", "requiem", "bleu", "redeemed", "sighed", "lures", "bavaria", "devastation", "heroine", "bingham", "achilles", "flaps", "indifferent", "cadence", "frosted", "schubert", "rhine", "manifested", "denominations", "interrupts", "rattle", "insults", "oatmeal", "marta", "distilled", "stricken", "unrest", "cascades", "druid", "dunbar", "outsider", "ris", "abstinence", "nag", "poodle", "wunder", "stefano", "sitter", "colder", "laborers", "whispers", "swarm", "elise", "ledge", "winthrop", "historia", "peasants", "nectar", "anecdotes", "gilt", "masterpieces", "symbolism", "monsoon", "drown", "strife", "esprit", "attaining", "consular", "treason", "reckon", "gaston", "prosper", "napier", "supremacy", "capillary", "germain", "islington", "anchored", "yong", "vers", "mulberry", "sinful", "cheeses", "bradshaw", "mythical", "abyss", "whitehall", "malachi", "ble", "clipping", "niece", "irresponsible", "pleas", "softer", "paralysis", "devastated", "tarzan", "shutters", "flask", "arisen", "femmes", "relentless", "ribbed", "omnibus", "stables", "inhabited", "hereof", "untold", "observable", "gretchen", "lanterns", "tulips", "vigorously", "interfering", "idols", "designating", "nugget", "reminding", "gusts", "xviii", "magistrates", "procession", "spiritually", "attentive", "rupture", "trad", "assimilation", "lyrical", "concorde", "angelica", "braided", "wooded", "intensely", "propelled", "artisans", "bastards", "bassett", "aspiration", "appended", "slammed", "aviator", "implicated", "seriousness", "conformation", "intimidation", "paladin", "ihr", "nests", "civilized", "marched", "cassandra", "cath", "sighted", "hopping", "destin", "rosary", "platoon", "andres", "loneliness", "pulley", "alleging", "synonymous", "confectionery", "regrets", "consciously", "cours", "footprints", "priscilla", "stimulates", "darkest", "implying", "conducive", "uncontrolled", "ballads", "mathew", "hugely", "sevilla", "hostages", "rosario", "fruitful", "franks", "indemnify", "satisfactorily", "thinker", "contestants", "sia", "influx", "convoy", "sled", "pyramids", "depended", "conveyance", "tortoise", "milo", "cultivate", "crocker", "dialogues", "abolition", "coax", "padre", "lees", "mari", "quattro", "foresight", "peppermint", "tod", "castillo", "remnants", "nailed", "alum", "frantic", "zachary", "comrades", "cocoon", "doth", "gladys", "bowers", "strengthens", "qual", "dictatorship", "breezy", "plow", "mundane", "douglass", "barclay", "foes", "cloths", "clowns", "lombard", "barren", "histoire", "plead", "behaved", "embargo", "condensation", "yokohama", "vow", "claudio", "blot", "primera", "commentator", "patterned", "sheen", "specter", "imam", "assent", "hove", "shading", "scrubbed", "warts", "roundabout", "harmed", "paternity", "conceal", "starvation", "appointing", "seine", "flowed", "sewn", "zulu", "rin", "barnet", "rift", "saviour", "lapel", "turk", "cupboard", "archipelago", "peep", "deceptive", "undertakings", "tinted", "congratulate", "constance", "vanishing", "legislator", "notifying", "aches", "kitchener", "leaked", "genera", "idioms", "gardiner", "gli", "poisonous", "chime", "spence", "mischief", "argent", "delinquency", "cou", "sentimental", "unsuitable", "mildly", "forging", "pew", "waitress", "caribou", "merced", "expansive", "footing", "manu", "sligo", "remit", "bonnet", "stumble", "undertook", "promenade", "exhaustion", "unborn", "wendell", "hammers", "coasts", "emitting", "concur", "exert", "madeline", "sanskrit", "torre", "worldly", "wedges", "corded", "heirloom", "pleasantly", "portray", "pero", "esoteric", "luxe", "messengers", "landings", "graphically", "shameless", "communicates", "bourgeois", "yeh", "napkins", "unloading", "bakers", "selma", "pears", "heats", "lucid", "lobe", "canaan", "oppressed", "infer", "prosecute", "thatcher", "bret", "hauling", "inconsistencies", "indebtedness", "scramble", "adversary", "elsa", "quaint", "oswald", "dipping", "revere", "troopers", "domaine", "olde", "guerra", "solemn", "eruption", "celeste", "gentry", "enchanting", "preached", "mica", "cadets", "lads", "endured", "ensuite", "fermentation", "careless", "chemists", "inca", "fad", "julien", "dandy", "narcotic", "moulin", "paine", "incompetent", "ain", "predecessors", "lancer", "sorcerer", "fishers", "invoking", "muffin", "motherhood", "wexford", "ihre", "dressings", "partridge", "synod", "noticing", "inte", "newmarket", "amigo", "discerning", "caddy", "burrows", "furnaces", "zee", "occupant", "livingstone", "juggling", "wildfire", "seductive", "scala", "pamphlets", "rambling", "kidd", "bedside", "lausanne", "legality", "arbitrarily", "heb", "luz", "regulars", "robson", "mysticism", "accompanies", "summed", "chopin", "torches", "dominating", "joiner", "viejo", "explorations", "guaranty", "procure", "stillwater", "sunsets", "cropping", "anastasia", "arrogance", "diverted", "forgiven", "bleak", "christophe", "wenn", "drudge", "dolores", "tramp", "saliva", "chichester", "artemis", "lessen", "weller", "syringe", "diversions", "admiralty", "powdered", "granger", "prevailed", "glacial", "alleges", "shredded", "antiquity", "zeal", "valparaiso", "blaming", "embark", "manned", "porte", "johanna", "granular", "sant", "orkney", "bah", "vero", "oscillations", "sphinx", "spiegel", "mujer", "ceremonial", "sonnet", "constituencies", "sprung", "hedges", "inflated", "crooks", "prospecting", "quilted", "walled", "immensely", "trafalgar", "relapse", "descend", "jakob", "bolster", "nietzsche", "fol", "rocked", "rancid", "disparity", "malice", "vom", "knapp", "swimmers", "syllable", "painfully", "sweating", "demolished", "catholicism", "trident", "lemonade", "absences", "andes", "ciudad", "josie", "persists", "propeller", "dents", "anarchist", "submerged", "entrusted", "essen", "calming", "intending", "cromwell", "drummond", "dissertations", "highlander", "solicitations", "lar", "punto", "survives", "darcy", "funnel", "moons", "gent", "thirsty", "freshness", "lathe", "shabby", "punched", "petri", "virgil", "gaa", "marbles", "cottonwood", "mildred", "deletions", "cleopatra", "undecided", "startling", "inductive", "inadvertently", "bursting", "wird", "halves", "moulding", "melancholy", "observance", "leaps", "halen", "galvanized", "hoy", "teapot", "conveys", "lends", "squire", "ache", "counterfeit", "waller", "duval", "yoke", "resonant", "mak", "outskirts", "expedite", "grayson", "sweetness", "crook", "rearing", "davison", "tins", "deliberations", "indifference", "xix", "invading", "dives", "loot", "coyotes", "stale", "cosmo", "levers", "cog", "incarnation", "strained", "putty", "reacted", "admissible", "sunless", "puzzled", "unexplained", "patsy", "thermometers", "fourteenth", "compounded", "chippewa", "eldest", "terrifying", "climbs", "uprising", "gasp", "swans", "tories", "hap", "remnant", "immoral", "sacrificed", "unequal", "weaken", "braxton", "categorical", "cupid", "stalking", "sturgeon", "jap", "piers", "ensuing", "mitigating", "tint", "dykes", "revived", "joachim", "eet", "earle", "hosea", "sua", "haste", "flakes", "alfalfa", "corfu", "argyll", "emil", "joking", "rhetorical", "simmer", "vert", "smallpox", "overwhelmingly", "waterway", "migrated", "reacts", "bain", "norbert", "complication", "aubrey", "adaptable", "sainte", "bitte", "fleur", "muy", "berth", "uninterrupted", "lint", "chalmers", "crabs", "tuscan", "lingo", "einer", "budding", "roam", "resemblance", "hackney", "toto", "hebron", "saber", "cataract", "midday", "fait", "innate", "medallion", "prominently", "kant", "nazareth", "nadia", "glanced", "calais", "rapture", "sunbeam", "abruptly", "beetles", "caspian", "impair", "stun", "shepherds", "susanna", "philosophies", "lager", "projecting", "goblin", "bluffs", "parrots", "anthems", "terrified", "nocturnal", "nueva", "emulate", "accuse", "hunted", "diminishing", "lew", "ridley", "produits", "zipped", "intrepid", "babel", "clustered", "primate", "eyebrows", "compromising", "willingly", "harlequin", "revisit", "insulting", "prominence", "cuckoo", "parrish", "inspires", "acacia", "fang", "netting", "contemplating", "erasmus", "sop", "recalling", "practising", "hermitage", "starlight", "foyer", "palaces", "brood", "azure", "compel", "contradictions", "festivities", "trenches", "sabine", "doorstep", "sniff", "dangling", "negligent", "gliding", "woe", "meditations", "tranquility", "halted", "liza", "drawback", "smyrna", "hostess", "weep", "posse", "mosquitoes", "commun", "weldon", "frying", "hesitation", "imprinted", "bereavement", "surrendered", "iam", "bestand", "westward", "converged", "leopold", "recognizable", "ludlow", "sprague", "saba", "embraces", "gustav", "waxing", "gael", "sinner", "auspices", "coles", "ergo", "dissenting", "melee", "radcliffe", "countess", "pleading", "crafty", "llama", "montague", "troubling", "vowel", "reuben", "cob", "fearing", "coronation", "isabelle", "reluctance", "inconsistency", "apostolic", "summoned", "treble", "galley", "shovel", "kam", "entail", "mashed", "aire", "pacing", "moan", "opec", "jimmie", "henson", "unfolding", "tottenham", "deserts", "milking", "wilbur", "suitably", "enormously", "aber", "cicero", "scribe", "nellie", "sleigh", "formulae", "fen", "sank", "frontage", "blister", "ration", "humid", "portrayal", "guile", "lacquer", "unfold", "hammered", "tutti", "mined", "caucasus", "intervening", "bale", "astronomers", "thrills", "therefor", "sores", "fel", "pastures", "unattended", "playwright", "carthage", "zechariah", "selves", "naturalization", "whispering", "dissipation", "sprite", "keel", "leighton", "atheism", "gripping", "cellars", "tainted", "remission", "praxis", "affirmation", "perturbation", "wandered", "reeds", "angler", "astounding", "cosy", "resend", "augment", "flares", "shedding", "glastonbury", "funerals", "eucalyptus", "conservatism", "questa", "bumped", "fortuna", "cripple", "lofty", "proclaim", "cropped", "merton", "ere", "richly", "ravi", "dogma", "priori", "vaguely", "yam", "ple", "siberia", "melons", "farley", "seer", "evils", "spontaneously", "unavoidable", "ruthless", "almonds", "ecclesiastes", "aptitude", "vial", "chao", "sharpening", "seniority", "prompting", "objected", "equator", "guilds", "blatant", "favoured", "ridges", "oysters", "gust", "cate", "receptacle", "mendoza", "haus", "puberty", "shorten", "shawl", "samaritan", "bends", "grimes", "unison", "tabular", "amir", "dormant", "nell", "restrained", "tropics", "concerted", "avenir", "refrigerated", "crouch", "pence", "formulating", "lamentations", "napkin", "emile", "contagious", "inaccessible", "administers", "crockett", "conspicuous", "barbarian", "soaking", "reforming", "gar", "intrusive", "thyme", "parasitic", "abusing", "receptive", "capt", "uwe", "xvii", "vulcan", "musk", "lucille", "executions", "refreshed", "guarding", "atwood", "windmill", "lice", "garter", "footed", "dedicate", "libros", "renewing", "burroughs", "ioc", "skim", "touche", "welt", "veal", "perpetrators", "embarked", "quickest", "euclid", "tremendously", "anglais", "smashed", "oscillation", "thunderstorm", "retrospect", "jog", "hailed", "bahia", "miraculous", "hounds", "tightening", "draining", "paroles", "sensibility", "rags", "punching", "distinguishes", "poi", "dazzle", "dangle", "eaters", "exceedingly", "inauguration", "inquired", "repentance", "unprotected", "merle", "savory", "evacuated", "reclaimed", "prefecture", "accented", "crawley", "baum", "racket", "hannibal", "sickle", "violently", "attest", "untouched", "comforting", "creeping", "kerosene", "appraised", "restorative", "chet", "peacefully", "stature", "sentry", "pel", "assaults", "berwick", "vices", "amo", "tolls", "degrading", "forster", "fireman", "maniac", "antics", "deze", "formative", "recognising", "wordsworth", "wrongly", "cree", "physicists", "falsely", "abbot", "officio", "consul", "plagued", "lahore", "aiding", "kunst", "suckers", "swallows", "patronage", "canoes", "matilda", "fodder", "impetus", "peeled", "whining", "arson", "hirsch", "tapestries", "transatlantic", "jak", "freeing", "kilkenny", "redress", "settles", "seaman", "skulls", "cayenne", "treatise", "defeats", "testimonies", "kali", "weitere", "itch", "withdrawing", "solicited", "jai", "gard", "brilliantly", "deja", "mccann", "spalding", "dill", "reopen", "potts", "erased", "resisting", "congregational", "antiquities", "dunham", "monsieur", "inhaled", "fuses", "britt", "blinded", "madras", "sacrificing", "faiths", "tinker", "sonora", "echoed", "elisha", "gazing", "skepticism", "zane", "eighties", "groupe", "freehold", "braid", "ance", "forester", "resisted", "alp", "munro", "agar", "arundel", "shiraz", "disgrace", "mediate", "rein", "realisation", "irritable", "cunning", "fists", "pennies", "jos", "hemorrhage", "awning", "ointment", "spilled", "tripping", "occidental", "vigor", "chariot", "buoy", "geraldine", "matrimonial", "squads", "niet", "tenn", "disclosing", "masthead", "ursula", "disbursements", "boucher", "chadwick", "candidacy", "hypnotic", "adultery", "fis", "seventeenth", "temperament", "prostitutes", "healer", "hive", "circulate", "glued", "sycamore", "belinda", "westmoreland", "shuts", "tenderness", "ocular", "smelling", "dung", "keine", "scratched", "conclusive", "alder", "polluted", "undersigned", "lark", "oda", "carlyle", "restores", "lullaby", "sanderson", "hoes", "lawns", "midas", "choking", "castor", "plentiful", "bonner", "stately", "raced", "deuce", "oma", "squirrels", "paddington", "drawbacks", "evoked", "dictates", "studded", "individuality", "spared", "anticipating", "californian", "brownie", "undressing", "quits", "ensign", "restraining", "blockade", "girard", "nearing", "ruff", "burglar", "warped", "tributes", "freezes", "knoll", "thinning", "reddy", "primrose", "parting", "humber", "michelangelo", "corduroy", "torpedo", "muffler", "troublesome", "eucharist", "wadsworth", "magnetism", "hodgson", "inventive", "speculate", "craze", "dispatches", "craftsmen", "desiring", "felipe", "hoffmann", "texan", "nombre", "grated", "submarines", "provoke", "romana", "accommodating", "grenoble", "calvary", "banded", "deportation", "harald", "cuttings", "invests", "sculptor", "kildare", "commended", "roper", "narrowing", "sergey", "mechanically", "profanity", "playmate", "scum", "seasoning", "adolf", "adjourn", "widows", "conveying", "precincts", "volta", "mediums", "discern", "bran", "fumes", "futile", "disqualified", "fenced", "eel", "animate", "faro", "resembling", "buren", "totem", "experimentally", "drinkers", "hermione", "indus", "harms", "asserting", "affluent", "ell", "protesting", "dix", "lonesome", "liberated", "unconventional", "amore", "reckoning", "fabian", "concurrence", "closets", "carve", "metaphors", "muster", "labourer", "heartfelt", "pertain", "democracies", "gideon", "mallory", "gauntlet", "martyrs", "cots", "victorious", "sylvan", "beverley", "unnatural", "swish", "confessed", "nae", "drumming", "patching", "fret", "abiding", "luscious", "sighting", "relic", "slipper", "augsburg", "bil", "argyle", "cling", "prophetic", "commune", "agatha", "tut", "haut", "gesellschaft", "circumcision", "neutrality", "aqui", "snoring", "trembling", "reproducing", "comets", "unitarian", "governs", "gums", "delaying", "mainz", "reconstruct", "toned", "erred", "modelled", "expiring", "mabel", "whistles", "jewellers", "kann", "caron", "understandings", "dared", "herndon", "nudge", "seeming", "rosebud", "alf", "andromeda", "sixteenth", "origination", "uso", "doves", "landowner", "preachers", "leiden", "ramona", "glib", "brutality", "fictitious", "francesca", "rumour", "immortality", "saffron", "ragged", "peerless", "constitutions", "improbable", "reiterated", "jesuit", "excessively", "mounds", "extraordinarily", "parted", "munster", "sufferers", "skunk", "interruptions", "placer", "lingering", "brooches", "heaps", "hydra", "anvil", "blinking", "sweetest", "noe", "dishonest", "stalk", "kun", "inert", "favorably", "vocation", "tribunals", "cedric", "favours", "witnessing", "eject", "seventies", "rayon", "dryden", "foreigner", "policemen", "unfavorable", "anomalous", "katharine", "barter", "rowley", "modifies", "frugal", "starry", "thanking", "nouns", "consequent", "entrances", "danube", "evasion", "filenames", "mayors", "gospels", "wicket", "cora", "lazarus", "vile", "misguided", "reunited", "conversational", "inspirations", "blasted", "shingles", "gresham", "cumbersome", "immersed", "philemon", "roasting", "accrue", "loire", "vented", "pont", "consolation", "cer", "frazer", "outlay", "dreaded", "airing", "alternately", "gracefully", "intrigued", "antagonist", "exalted", "cadre", "serb", "jaeger", "overthrow", "patiently", "cabot", "controversies", "narrated", "squat", "illuminating", "artificially", "saucepan", "freshest", "noi", "martyr", "hacienda", "koran", "quito", "tiara", "elegantly", "temptations", "skinned", "irrigated", "hives", "groundwork", "cyril", "kew", "resentment", "glaciers", "peri", "manfred", "gaping", "infringe", "porta", "inferences", "abrupt", "gambler", "dissection", "nightingale", "landau", "contemplate", "amigos", "putt", "colonization", "coon", "crock", "ailments", "disagreed", "boldly", "narration", "unopened", "insisting", "yeas", "brushing", "resolves", "sacrament", "cram", "shortening", "cloves", "marketable", "presto", "hiram", "broadening", "hens", "bowed", "whimsical", "harden", "molten", "repaid", "warmly", "hogs", "sporadic", "eyebrow", "strickland", "unnecessarily", "iom", "tess", "trois", "painless", "serbs", "verdi", "annexation", "dissatisfaction", "alpes", "applaud", "haben", "primo", "abolish", "climates", "uneasy", "busiest", "fray", "florian", "clogs", "flank", "cartel", "numerically", "perforated", "intensified", "sexton", "postmaster", "washes", "shrugged", "electors", "departs", "mindful", "lurking", "hitherto", "egyptians", "looms", "spectre", "downright", "refractory", "counsellor", "inexperienced", "outraged", "belgique", "smother", "frosty", "mules", "sash", "truro", "moaning", "ponies", "originates", "blight", "physique", "independents", "contentious", "cheering", "archibald", "emancipation", "duchess", "commemorate", "spout", "perish", "hoist", "narrower", "captivity", "peyton", "overloaded", "shorthand", "ceres", "bravery", "lizards", "einen", "fergus", "sincerity", "calder", "oar", "mullins", "flagged", "relics", "relish", "imagining", "belongings", "lire", "legislatures", "unchecked", "knocks", "alfonso", "contradict", "fleurs", "scarcity", "ashby", "fleeing", "filament", "abingdon", "theorists", "hof", "southwark", "celia", "disguised", "implanted", "thrash", "antiquarian", "dina", "fluency", "uniting", "behaves", "slabs", "conceivable", "agate", "incline", "hartmann", "bai", "soliciting", "thoroughbred", "calle", "oneness", "climber", "commonplace", "intellectually", "casanova", "himalayan", "downfall", "bookcases", "strides", "vanish", "ute", "transmits", "adair", "impatient", "aforesaid", "elbows", "truce", "bette", "stairway", "woodrow", "sou", "boar", "vertebrate", "laird", "multiplicity", "objectively", "resigns", "anguish", "petal", "perfected", "tomlinson", "odors", "mite", "blackstone", "clipped", "lago", "jed", "dries", "mejor", "sikh", "annoyance", "grating", "prostitute", "mina", "elixir", "guardianship", "gamblers", "autre", "peeps", "rol", "reverence", "sardinia", "outweigh", "verne", "gaylord", "bunting", "avenger", "spar", "waugh", "captivating", "tiers", "centurion", "propagate", "prosecuting", "montpellier", "willem", "slavic", "nutritious", "marguerite", "vapour", "pluck", "cautiously", "prick", "contingencies", "coercion", "picard", "rubble", "scrambled", "agitation", "chas", "truthful", "woodpecker", "herds", "corsica", "penetrated", "sein", "adder", "weakest", "weakening", "nome", "thorne", "anticipates", "poignant", "germs", "frees", "punishable", "fractured", "waterman", "brat", "uranus", "salient", "gabe", "censor", "semitic", "wits", "perverted", "bordering", "widowed", "tombstone", "begged", "flushed", "cautions", "lavish", "roscoe", "brighten", "vixen", "whips", "marches", "xxi", "anew", "commandment", "undetermined", "horner", "yah", "conceded", "circumference", "postpone", "disproportionate", "pheasant", "alonso", "bally", "zijn", "guillaume", "marrying", "carvings", "complains", "resided", "terriers", "weasel", "venerable", "preis", "toasted", "admirable", "illuminate", "holbrook", "fades", "bulge", "eller", "lucinda", "brittle", "bandits", "politely", "desde", "watermelon", "ingenious", "carols", "pensioners", "obadiah", "mannheim", "hepburn", "fetched", "alderman", "lockwood", "coughing", "hiatus", "upholstered", "evangelist", "louvre", "spurious", "gloom", "severn", "angelic", "astrological", "nobility", "bayern", "afternoons", "ramifications", "wakes", "ashore", "workman", "swimmer", "sitio", "unload", "loon", "marge", "wanderers", "sips", "badness", "undertakes", "miscarriage", "vulgate", "stoned", "provoked", "herr", "fables", "crumbs", "wort", "palisades", "confidently", "commences", "dispense", "dangerously", "figaro", "sadie", "protested", "capitalists", "accusing", "stink", "convent", "valdez", "childish", "adhered", "priesthood", "jagged", "dispersal", "overt", "verbally", "squeak", "constituting", "nuns", "pronounce", "scorpions", "incompleteness", "thurston", "dearly", "suggestive", "osa", "electrified", "unbalanced", "gypsum", "slime", "baroness", "winnings", "imaginable", "bromide", "lui", "crusaders", "summing", "lament", "gregor", "terraces", "canyons", "predatory", "towne", "descendant", "disgust", "banked", "rationality", "screwing", "dismal", "ranches", "cochin", "wipo", "prologue", "whaling", "patrols", "stumbling", "swung", "outlaws", "sinn", "waved", "libel", "ellipse", "alarmed", "justine", "jest", "garda", "eskimo", "caesars", "luce", "strapped", "reluctantly", "woodwork", "centrifugal", "authorship", "cavities", "buxton", "cravings", "decidedly", "pau", "apathy", "mercantile", "stalled", "infused", "peaked", "stronghold", "huxley", "moritz", "bearded", "greasy", "vowed", "carnage", "asher", "ingenuity", "mort", "infested", "creeks", "bessie", "adele", "ota", "rattan", "coroner", "irregularities", "tiled", "elaboration", "hectic", "lun", "snuff", "convene", "vai", "calmly", "horribly", "dilute", "contemplation", "sino", "uhr", "carta", "gaseous", "afflicted", "gloomy", "kirkwood", "orchards", "prophecies", "marques", "septuagint", "pertains", "clothed", "plummer", "italians", "talon", "repellent", "laval", "sorcery", "abstain", "elsie", "barring", "undermined", "tid", "bestowed", "habeas", "inactivity", "crewe", "grassy", "aprons", "clumsy", "columbian", "ayr", "pounded", "carrington", "stint", "rousseau", "sarcasm", "accomplishing", "overturned", "uphill", "maximus", "warmed", "parable", "jolt", "affords", "deadlock", "deriving", "quadrangle", "elects", "liebe", "eradicate", "likeness", "ral", "jem", "unter", "alpaca", "degrade", "flemish", "shred", "conseil", "steamed", "aroused", "remittance", "sieve", "bloch", "alienation", "reddish", "impulses", "interpol", "pleads", "whitby", "goliath", "caprice", "hors", "horned", "fowl", "janus", "hester", "benevolent", "superstition", "cohorts", "camilla", "rarity", "limbo", "shove", "accusation", "bernardo", "flake", "hating", "pate", "sewers", "spores", "mahmoud", "shears", "mucho", "flutes", "tabernacle", "minced", "westerly", "despatched", "munitions", "symmetrical", "ornate", "midwife", "uniformed", "snug", "coveted", "prohibitions", "moulded", "deceived", "convict", "nai", "tossing", "regularity", "criticised", "lawfully", "goethe", "slade", "dumas", "jester", "notifies", "recount", "dearest", "nook", "commensurate", "schiller", "bowler", "wiser", "gallant", "disbelief", "gon", "unqualified", "cautioned", "recollection", "locomotives", "condemns", "fastening", "jeweler", "nuremberg", "ostrich", "maud", "flirting", "misplaced", "prosecutions", "dido", "poisoned", "researches", "chou", "discriminating", "exclamation", "collingwood", "intercepted", "ascendant", "flung", "clovis", "eam", "railing", "cremation", "banter", "balconies", "awaken", "pigeons", "singularity", "signify", "granddaughter", "subdirectory", "bancroft", "progeny", "alters", "gratefully", "divergent", "fleets", "dorian", "juli", "tackled", "shoals", "tributary", "clique", "rosy", "satanic", "stubbs", "durch", "torment", "mussels", "emigration", "howl", "wel", "iglesias", "hir", "ecclesiastical", "crippled", "hilltop", "tabor", "peut", "tenet", "fifteenth", "chute", "bohemia", "mountainous", "fonds", "ogre", "unforeseen", "pickles", "submissive", "curses", "stampede", "utilised", "trieste", "whine", "nus", "fatality", "tierra", "looming", "zo", "sped", "ankles", "mosques", "fuchs", "guerilla", "squeezing", "fisk", "canes", "follower", "euler", "alumina", "degenerate", "spiked", "cru", "misrepresentation", "strung", "chanting", "wrestler", "officiating", "hermit", "behaving", "colbert", "josiah", "deepen", "acadia", "eso", "remy", "pats", "valentin", "mora", "cri", "enrico", "reciprocity", "crease", "wis", "ook", "bartholomew", "perseverance", "catalonia", "yorktown", "impede", "clasps", "tilted", "vicar", "confines", "prank", "dass", "repent", "dio", "agreeable", "riddles", "bennington", "pulpit", "appreciates", "marshes", "bellies", "corrosive", "ambush", "palazzo", "franciscan", "figurative", "gait", "emphasised", "bonfire", "aversion", "vicente", "stiles", "stewards", "chauffeur", "elicit", "henrietta", "slapped", "bitten", "lind", "salamanca", "martyn", "dynamo", "hobson", "stow", "summon", "skeletons", "parchment", "lingua", "distractions", "forfeit", "pepe", "paddles", "unpopular", "republics", "inspecting", "retainer", "hardening", "loosen", "beowulf", "undiscovered", "einem", "imputed", "cabs", "cheated", "willows", "hump", "delft", "communicative", "grieving", "chastity", "faust", "fright", "harbors", "adorned", "obnoxious", "diligently", "decays", "mortimer", "marvellous", "nouvelle", "easing", "mathieu", "picket", "thrones", "emilia", "eyre", "maturing", "seu", "illogical", "awakened", "beet", "suing", "brine", "lorna", "waning", "cartwright", "armoire", "piled", "twinkle", "lodgings", "maitland", "supple", "geld", "soi", "fabio", "unfit", "uttered", "rumanian", "shaggy", "elongated", "ordeal", "pegs", "astronomer", "incompetence", "flicker", "ramsay", "relieving", "towering", "operas", "slaughtered", "assaulted", "mena", "rouse", "appel", "armand", "spiel", "impurities", "stemming", "inscriptions", "hos", "tentatively", "tragedies", "interlude", "oates", "dialects", "vas", "ovid", "carcass", "casually", "scamp", "freedman", "reprise", "zig", "lash", "ills", "simms", "danes", "pebbles", "quicksilver", "sacked", "omen", "forfeited", "stipend", "conceptions", "lii", "amulet", "informally", "sarcastic", "indemnification", "hawke", "complexion", "daisies", "informant", "sorrows", "ite", "aegean", "andere", "sluggish", "brig", "tiempo", "marsden", "coy", "grouse", "reginald", "wierd", "pasted", "moths", "batavia", "evoke", "dispositions", "haywood", "staunton", "nit", "amorphous", "tributaries", "townships", "nantes", "assam", "mousse", "shameful", "chiffon", "archaic", "elevate", "deafness", "bec", "sala", "laureate", "contemporaries", "syphilis", "vigilance", "appalling", "palmyra", "foxes", "davie", "affixed", "ticking", "pantheon", "gully", "bitterness", "brill", "defy", "stor", "consumes", "lovingly", "agua", "thrush", "bribery", "smokes", "ventilated", "kettles", "ascend", "nutmeg", "chained", "magnify", "precautionary", "travail", "livres", "fiddler", "wholesome", "wrists", "severed", "mites", "puddle", "azores", "vegetative", "agora", "sob", "elaborated", "reeve", "embellishments", "willful", "grandeur", "plough", "pritchard", "mansions", "macpherson", "overheard", "persisted", "whereabouts", "haydn", "symphonies", "reclining", "rodrigo", "bounding", "annexed", "atheists", "umpire", "orthodoxy", "kilt", "doubtless", "keyed", "esquire", "cryptic", "primus", "wherefore", "cholera", "midsummer", "colouring", "intoxicated", "mysore", "jerks", "mise", "darius", "bullion", "deflection", "hateful", "propensity", "journalistic", "essences", "dispensed", "lemons", "stratum", "vendetta", "lod", "felicia", "restrain", "clutches", "cults", "whit", "amaze", "manassas", "rembrandt", "estado", "easel", "reisen", "potion", "ovation", "paddock", "numerals", "surpassed", "vino", "gable", "johnnie", "thirteenth", "laced", "quill", "saa", "mares", "enthusiastically", "fetching", "chaps", "tendon", "bellows", "keats", "deceit", "caro", "unmarked", "joyous", "boswell", "venting", "infringing", "blythe", "chisholm", "gunner", "verso", "samoan", "absorbent", "grossly", "cleft", "clog", "hongkong", "impoverished", "stabbed", "teaspoons", "comedians", "awnings", "sill", "lucknow", "bleaching", "isolde", "startled", "mathematician", "untrue", "algonquin", "hurried", "vir", "dieser", "staggered", "vacated", "vente", "fitz", "dura", "fingered", "apprentices", "cerca", "booted", "allie", "sens", "sprouts", "bower", "moab", "wolcott", "extremity", "orphaned", "requisites", "prudence", "kaufmann", "bij", "gingerbread", "biggs", "tasteful", "puritan", "osiris", "affirming", "salud", "excavations", "forearm", "distract", "seaport", "flashed", "longs", "dawes", "buns", "deceive", "civilisation", "starved", "amico", "colosseum", "stipulation", "emptiness", "maddox", "shoemaker", "cushioned", "dada", "osborn", "hastily", "ful", "invader", "patriarch", "consents", "nils", "polynesian", "swain", "lain", "groningen", "emilio", "mourn", "abandoning", "oddities", "soften", "troupe", "blacksmith", "suicides", "powerfully", "compromises", "helene", "thirdly", "classifying", "deepening", "unfairly", "connexions", "calico", "wrongs", "pores", "johnstone", "undermining", "burnside", "colossus", "frivolous", "indecent", "dishonesty", "oiled", "turnbull", "microbes", "sharpen", "phonetic", "oppressive", "coined", "tito", "moray", "simeon", "onslaught", "nationale", "noses", "treasured", "sharpness", "corral", "fortnight", "lia", "plunged", "reals", "modulated", "defiant", "brisk", "meath", "jena", "ponce", "perjury", "mua", "generality", "vigilant", "pronto", "vistas", "eerie", "arne", "stonewall", "wrestlers", "jackass", "geometrical", "priory", "epsom", "corpses", "wiping", "mercenaries", "bronchitis", "therese", "whirlwind", "howling", "apprehension", "raisins", "turkeys", "tio", "hora", "bobbie", "shale", "diligent", "nachrichten", "dann", "adversity", "wiggins", "torts", "egress", "adjectives", "crepe", "dum", "sheepskin", "concave", "heresy", "armory", "forthwith", "avert", "oat", "guise", "curiously", "fullness", "culminating", "kipling", "vomit", "compounding", "afar", "ebb", "shaky", "brutally", "pennant", "nicest", "willoughby", "necks", "lak", "mathias", "levee", "hindus", "powerless", "populace", "deliberation", "soles", "jetty", "luster", "overrun", "undone", "delia", "habitual", "alhambra", "mee", "uplift", "causeway", "murderers", "reopened", "guid", "inhabit", "lorenz", "conglomerate", "fastened", "tompkins", "extradition", "geschichte", "perils", "jerky", "proportionate", "compte", "algo", "boroughs", "deliverance", "resists", "lovell", "discourses", "subdued", "adhering", "falk", "suspicions", "hampered", "bruxelles", "detriment", "prejudices", "purported", "tron", "ine", "mangrove", "gab", "fawn", "scaffolding", "prin", "narrows", "sensed", "insuring", "babcock", "rhys", "boasting", "norah", "ascertained", "fluctuation", "jeannie", "ond", "twenties", "monstrous", "stetson", "accuses", "calibre", "nobles", "fumble", "attrition", "atherton", "lassen", "proverb", "darin", "mercenary", "clams", "reis", "tightened", "levies", "speck", "gutters", "murderous", "rudder", "amusements", "scares", "deformed", "wretched", "decadent", "incarcerated", "unsurpassed", "surpass", "annihilation", "pietro", "memoranda", "steaming", "magnifying", "serra", "hideous", "abreast", "intuitively", "extremities", "tyrant", "decency", "papal", "sprang", "palais", "obscured", "duets", "mountaineers", "blount", "butchers", "apologise", "geologist", "piccadilly", "axioms", "mogul", "fiercely", "varnish", "hysteria", "nei", "insistence", "aer", "clockwork", "mecklenburg", "intelligently", "fuer", "vials", "imputation", "albrecht", "densely", "droit", "odin", "colton", "distrust", "ulm", "assassins", "hatton", "fraternal", "refinements", "eloquent", "cwt", "silas", "wondrous", "decrees", "touchstone", "etext", "drayton", "grieve", "reigns", "pleasurable", "dobbs", "tunis", "olin", "bustling", "galt", "flue", "lucerne", "fiasco", "emir", "deacons", "slings", "dwarfs", "apportionment", "thoreau", "reins", "anson", "broadest", "scrambling", "misfortune", "drenched", "astonished", "kiel", "subconscious", "agi", "incandescent", "disappoint", "mobs", "cris", "rehearsals", "massa", "firewood", "serenade", "weathered", "truffles", "anno", "kepler", "teatro", "lawless", "gout", "coincides", "inhuman", "gentiles", "jardin", "fag", "rubs", "irritated", "despise", "floated", "fresco", "auteur", "custard", "prius", "dias", "hasan", "branched", "shipbuilding", "mildew", "tombs", "frown", "fulfilment", "accords", "privy", "caretaker", "antonia", "feeble", "gentile", "contractions", "combatants", "annuals", "champlain", "valence", "deteriorated", "droits", "disobedience", "gat", "unpack", "divination", "haw", "nationalities", "cultivating", "triumphant", "superbly", "hombres", "constrain", "magicians", "gra", "hobbes", "contended", "nazarene", "potsdam", "genevieve", "shiloh", "damper", "afrika", "forgiving", "yahweh", "madman", "sor", "slumber", "shimmering", "rigidity", "bane", "marius", "inventing", "chipped", "ane", "forts", "tumbling", "interprets", "surat", "dormitory", "confiscated", "discharging", "unnoticed", "ridicule", "thaw", "vandals", "reinstated", "lizzy", "unpacking", "darien", "intersect", "finden", "janvier", "garnish", "designates", "peeling", "levis", "blindly", "unintentional", "durant", "repertory", "toi", "disagreements", "gatt", "bene", "fifties", "goody", "dugout", "battleship", "talisman", "eels", "shun", "blackwood", "giggle", "worden", "deforestation", "streaks", "roderick", "bor", "corinth", "perverse", "glittering", "jails", "casket", "brigitte", "detour", "husbandry", "visibly", "defunct", "unveil", "circulars", "merciful", "ines", "tun", "tipperary", "kinship", "springtime", "philipp", "blouses", "hemlock", "sniffing", "uncanny", "stork", "concede", "combustible", "fallacy", "nicknames", "noxious", "tunic", "farce", "drowsiness", "chants", "ashe", "rhone", "lunatic", "pyrenees", "auctioneer", "recovers", "haggard", "manger", "chills", "whack", "drone", "breezes", "esteemed", "godly", "spire", "distillation", "edging", "langdon", "mathematicians", "soe", "cymbals", "antidote", "emblems", "caricature", "shroud", "stead", "recoil", "reconciled", "daze", "raisin", "amb", "amounting", "schon", "boer", "poisons", "nameless", "trot", "musically", "intensify", "voltaire", "harmonies", "benito", "accumulating", "indebted", "wald", "breathed", "misled", "mani", "culprit", "transact", "billig", "spiced", "berne", "pron", "puncture", "nella", "lighten", "practised", "canteen", "fein", "hysterical", "fick", "darkened", "requisition", "shrug", "boils", "enchantment", "greta", "covey", "donne", "pena", "loathing", "duc", "woof", "ominous", "parlour", "hammocks", "quieter", "poking", "tallest", "wrestle", "entrenched", "rectify", "virtuous", "ous", "davy", "snails", "decipher", "incapacity", "mittens", "ferns", "curls", "ens", "wrecked", "wince", "friendliness", "invincible", "healthiest", "prometheus", "rushes", "deities", "wor", "comanche", "melts", "trickle", "disapprove", "erratic", "familiarize", "insufficiency", "drifted", "propagated", "hardships", "sabres", "foraging", "wasps", "chien", "mitre", "tonnage", "corals", "mille", "continuance", "unrecognized", "premieres", "affectionate", "baptiste", "unimportant", "ferrara", "greener", "bowles", "endowments", "grudge", "zoological", "norse", "wetting", "bosom", "bales", "blackbird", "causation", "persecuted", "deciduous", "straighten", "convocation", "merrick", "precaution", "playmates", "philanthropic", "maneuvers", "stratified", "critter", "begs", "emphasise", "uit", "adresse", "connell", "busts", "cutaneous", "porters", "forgery", "pereira", "infrequent", "mull", "ort", "brandenburg", "incision", "jumble", "cognac", "wading", "imitate", "grasping", "borneo", "mortuary", "bode", "thorns", "rightful", "scarecrow", "mosaics", "pious", "utterance", "undeveloped", "basalt", "undisputed", "distracting", "urns", "unfolds", "brocade", "seaweed", "prevails", "candlelight", "votive", "wafers", "messina", "schumann", "tarts", "cuthbert", "nance", "babble", "pessimistic", "niches", "untill", "quid", "cadiz", "shortwave", "overlooks", "diversify", "hugging", "postman", "oas", "overboard", "goddesses", "faithless", "regained", "coolidge", "ephraim", "foggy", "shone", "criticizing", "leafy", "passionately", "stroking", "matured", "dolor", "procured", "excellency", "camels", "partie", "tou", "justifying", "eased", "slay", "deprive", "kremlin", "thea", "lusty", "virtuoso", "buzzing", "dauphin", "steed", "cowley", "paraffin", "unites", "stimulant", "realising", "millet", "invert", "vermilion", "grinned", "marche", "thelma", "enlightening", "endlessly", "hasty", "dexterity", "puzzling", "nods", "dieses", "sumatra", "nigger", "scrape", "kendrick", "prized", "arresting", "bewitched", "resumption", "irma", "intimidated", "traitor", "clove", "illiterate", "widened", "bordered", "mallet", "leech", "giver", "discontent", "gaz", "punishing", "seedling", "dwellers", "mouthpiece", "nymph", "reassuring", "astor", "myles", "prematurely", "frail", "adventurer", "irradiated", "awfully", "mayflower", "arched", "enlist", "vedic", "exemplified", "profane", "ubi", "cornelia", "romney", "macaroni", "electing", "dictation", "tage", "robber", "evacuate", "tus", "conveniences", "roving", "drinker", "softened", "peking", "fillet", "maar", "churn", "nimbus", "nog", "smartest", "neale", "ett", "madre", "impart", "feats", "concomitant", "donner", "scaffold", "oui", "ano", "millie", "libro", "leisurely", "loki", "dislikes", "mayonnaise", "dra", "limitless", "knopf", "hangman", "sloping", "mitt", "constitutionally", "disapproval", "bavarian", "crucified", "pocahontas", "masons", "surges", "literatures", "unlucky", "yawn", "distort", "mun", "wahl", "loosing", "canopies", "handicraft", "buscar", "piling", "basilica", "amine", "robbers", "juliana", "lowland", "sausages", "spake", "feud", "subordinated", "awoke", "unheard", "prune", "endanger", "cairn", "nomadic", "disgusted", "olfactory", "prolong", "fontaine", "knits", "thinly", "tant", "garnett", "galen", "arable", "parallelism", "brut", "vernacular", "latitudes", "alkali", "mowing", "foreseen", "palmerston", "sever", "expend", "stahl", "gist", "auntie", "afghans", "blames", "subdivided", "happiest", "lucca", "francine", "reserving", "nagasaki", "wid", "indented", "humming", "disclaim", "frans", "diameters", "exerted", "justifies", "freiburg", "regenerate", "titre", "tumbler", "bonne", "improvised", "flocks", "bothering", "garnered", "fling", "comrade", "ascended", "juliette", "porcupine", "chopping", "enacting", "stabbing", "metamorphosis", "hilda", "wanderer", "flattened", "dawkins", "spitting", "inconvenient", "seacoast", "imperfections", "lewes", "chancery", "raving", "hed", "executor", "anglesey", "choirs", "wreaths", "tasteless", "tomahawk", "tact", "projet", "instructive", "absorbs", "susannah", "toutes", "mathematically", "godwin", "drier", "bothers", "parades", "shoved", "invokes", "cannons", "hamish", "chromatic", "rife", "rallying", "enoch", "carriages", "dales", "polled", "agnostic", "emptied", "denounced", "delusion", "rimini", "verity", "turret", "precede", "huts", "betts", "domes", "eras", "wildest", "foodstuffs", "wessex", "priming", "vowels", "sulphate", "clandestine", "migrations", "hovering", "texte", "tamper", "pugh", "punishments", "dagen", "heathen", "unduly", "rigged", "domicile", "chargeable", "fanning", "meu", "spurred", "broughton", "wha", "osage", "peregrine", "tabitha", "puede", "crumb", "fostered", "culmination", "revolves", "mend", "theoretic", "softening", "glimpses", "hattie", "tastefully", "capo", "grate", "lourdes", "diseased", "kenilworth", "margot", "socialists", "deduced", "buttocks", "unmanned", "rainbows", "gunnar", "burials", "eunice", "bountiful", "salazar", "mesopotamia", "jetzt", "poseidon", "ratify", "mexicans", "fiend", "drapery", "bernice", "deported", "muzzle", "entrant", "schoolhouse", "retribution", "yusuf", "stallman", "slander", "basing", "baits", "fireside", "disposing", "herzog", "suffrage", "triumphs", "fortifying", "sleepless", "schiff", "watered", "lass", "fleas", "tully", "ventured", "recite", "kneeling", "negation", "dismay", "smelled", "jute", "heals", "prim", "trespass", "conciliation", "compasses", "groomed", "leaping", "impunity", "sunken", "inaugurated", "encountering", "infernal", "sewell", "pang", "swag", "reared", "pampered", "inquiring", "numero", "praising", "momentary", "commemoration", "favre", "poli", "holstein", "serpentine", "hangings", "lugar", "sundry", "protestants", "therefrom", "espace", "wrecking", "cristo", "pique", "swore", "novembre", "fawcett", "journeyman", "enlighten", "descartes", "flashy", "prowess", "abstractions", "enriching", "trampling", "signet", "bello", "iroquois", "digested", "rothschild", "trumpets", "embodies", "messe", "manhood", "kincaid", "cannibal", "nephews", "oblivious", "icao", "atmospheres", "stricter", "jeter", "memes", "roughness", "ancients", "snapping", "jethro", "cauliflower", "feudal", "unbearable", "perpetrated", "basses", "juni", "boarded", "olympian", "sedgwick", "livre", "mano", "interferes", "devotions", "myra", "devotees", "acquaintances", "sectarian", "fathom", "cools", "segundo", "appreciative", "innumerable", "parramatta", "noticeably", "furs", "atonement", "extant", "ignacio", "unmask", "chisel", "mysteriously", "wayward", "redness", "dreamland", "wands", "illustrious", "fishy", "nao", "pauses", "intoxication", "glimmer", "blooded", "slamming", "syllables", "whim", "filmy", "timid", "ismail", "tampering", "weavers", "magically", "pied", "thyself", "rooting", "pretended", "nigh", "therewith", "interment", "partitioned", "aller", "populous", "modesty", "veils", "frei", "zest", "sumptuous", "wayside", "spotless", "wich", "summits", "ner", "banc", "barbed", "legions", "dona", "lustre", "wer", "sunflowers", "sommer", "ecstatic", "campania", "blasphemy", "wisp", "countenance", "skinning", "sift", "ooze", "recounts", "adventurers", "oktober", "bigotry", "leaky", "contradicts", "leven", "pagans", "dinars", "diesem", "fume", "afloat", "bruised", "flattering", "brigades", "leur", "engrossed", "dashes", "impeach", "atrophy", "hur", "brag", "earls", "confers", "totality", "circumvent", "boulders", "negotiator", "yolanda", "muff", "maude", "odour", "bellamy", "snag", "fringes", "gough", "excavated", "smoothed", "affirms", "gulch", "irrevocable", "wieder", "moaned", "axles", "graciously", "radiated", "bribe", "propel", "outspoken", "verily", "ardent", "forcibly", "presided", "shimmer", "tremor", "gnp", "loaned", "violins", "extravagant", "ghent", "astute", "jamieson", "pemberton", "inflict", "invalidate", "ridiculously", "legible", "towed", "disregarded", "auguste", "puc", "salted", "attractiveness", "calamity", "brewed", "aristocrats", "fiance", "sprawling", "vulture", "mislead", "ventral", "twa", "retard", "medio", "platters", "canto", "germanic", "harassed", "discriminated", "estelle", "sponges", "cavendish", "receptacles", "jacinto", "revered", "harassing", "dislocation", "shingle", "timbers", "undergoes", "tilting", "conquering", "harass", "meditate", "hues", "alsace", "denominated", "ostensibly", "lumps", "facie", "emploi", "cretaceous", "fished", "drizzle", "bracing", "mesure", "blackmail", "corte", "remorse", "navarre", "clout", "jours", "wag", "fella", "mountaineer", "pondering", "purposely", "worshipped", "lucifer", "unholy", "spectacles", "dulce", "muttered", "aquila", "hoff", "mme", "spat", "henceforth", "argo", "strapping", "expedient", "unconditionally", "ices", "secreted", "buch", "chaucer", "livery", "recapture", "chevalier", "incompatibility", "anchoring", "navigable", "personas", "milieu", "stonehenge", "injure", "knuckles", "zoeken", "intermission", "amazement", "medusa", "pagoda", "manifests", "primed", "keg", "recited", "reformers", "ensued", "justly", "throats", "aron", "barrage", "pis", "pari", "buoyancy", "aussi", "curled", "raoul", "peeping", "paces", "heaviest", "walnuts", "ena", "broadened", "lashes", "esplanade", "prairies", "mandel", "conical", "tricked", "etymology", "cheaply", "allege", "draped", "subtly", "manslaughter", "consort", "shad", "fleeting", "sibley", "plumb", "needlework", "caballero", "annoyances", "uti", "bacchus", "chuckle", "unfolded", "israelites", "rit", "briar", "wavy", "moulds", "hindered", "bloated", "pranks", "mantel", "languedoc", "fatima", "disordered", "belated", "englishman", "winder", "paralyzed", "junta", "shrunk", "crammed", "aar", "hatchet", "unsuspecting", "dismissing", "cetera", "windfall", "filaments", "jocelyn", "companionship", "creeper", "cuando", "epidemics", "illegitimate", "slag", "undisturbed", "transcendental", "georgina", "chantilly", "farmed", "fuentes", "malo", "complicate", "alston", "indistinguishable", "skillful", "groot", "compensating", "overrated", "reasonableness", "nuances", "knuckle", "bastion", "scraping", "gypsies", "concurring", "assemblage", "watery", "tro", "juanita", "coiled", "yucatan", "sipping", "beatrix", "cheerfully", "sledge", "gilded", "murdering", "dijon", "unbroken", "sages", "tropic", "capella", "beim", "condemning", "entourage", "travers", "familia", "iota", "realist", "suppressing", "scorn", "crusades", "pris", "whirl", "pervert", "defences", "humiliating", "circled", "withers", "sprout", "elicited", "swirling", "campos", "clinging", "bunches", "bagged", "negotiators", "deviate", "blackened", "whereupon", "muriel", "hostilities", "atelier", "penned", "conte", "horatio", "cheered", "bled", "throbbing", "sleepers", "seiten", "zeit", "sallie", "solace", "lucien", "havre", "moles", "unloaded", "projectile", "transplanted", "bandages", "handcuffs", "beacons", "stucco", "intrinsically", "geschichten", "impervious", "shams", "shawls", "aos", "flourishing", "precedes", "bruises", "instructs", "palatine", "lor", "carnation", "kangaroos", "slum", "ruffle", "knack", "rivet", "aragon", "aggie", "tilly", "sonya", "haue", "grunt", "talmud", "grammars", "overalls", "doubted", "ravaged", "whistling", "upholding", "ailing", "obeyed", "tattooed", "ghostly", "mutiny", "delusions", "foresee", "rations", "bitterly", "windmills", "perpetrator", "cleverly", "misunderstandings", "amerika", "counsellors", "amis", "sisterhood", "lightening", "overturn", "doit", "thoughtfully", "mortem", "rencontre", "risking", "proprietors", "tatiana", "ingress", "gros", "barbers", "retires", "duro", "commotion", "deduce", "bolted", "materialism", "eternally", "senseless", "rabid", "reassure", "recollections", "probed", "pox", "hamlets", "unwritten", "jammed", "moveable", "housekeeper", "agrarian", "humana", "lovable", "sawmill", "abram", "catharine", "consented", "perseus", "styx", "congested", "banished", "terraced", "buttermilk", "laces", "toil", "hugged", "flurry", "gower", "warmest", "horrified", "walpole", "cada", "alte", "bertram", "perturbations", "adversaries", "aunts", "mau", "vapors", "skylight", "gemma", "constantinople", "monarchs", "unsolved", "strenuous", "roost", "unreasonably", "shuffling", "ludicrous", "tenets", "albanians", "pius", "garb", "steadfast", "reckoned", "promissory", "overflows", "queried", "squarely", "softness", "crayon", "rotting", "exhilarating", "excepted", "flavoured", "marque", "ditches", "millionaires", "evade", "pars", "scourge", "twig", "lapis", "bandage", "detach", "virginity", "mala", "doctrinal", "adaptability", "cramped", "wept", "ganz", "racking", "corrects", "avignon", "servicio", "vanishes", "obedient", "selkirk", "mur", "sects", "modo", "anxiously", "ascribed", "strikers", "optimist", "gratification", "seashore", "automaton", "otros", "pierson", "unskilled", "brigadier", "consonant", "acetic", "unarmed", "dyeing", "intolerable", "republished", "tawny", "absinthe", "hygienic", "sufferings", "tahitian", "propagating", "sacraments", "layman", "vellum", "ignatius", "emperors", "ferro", "stalks", "stanza", "londres", "terminations", "novices", "grasped", "bequest", "deo", "beggars", "redeemer", "florin", "quixote", "chaise", "paternal", "dey", "rained", "indigent", "trellis", "trabajo", "mythic", "crystallization", "marries", "echoing", "recitation", "aptly", "alleviation", "liege", "remittances", "romances", "nieces", "characterizes", "papyrus", "fop", "candlestick", "circling", "hellas", "sheik", "pints", "girdle", "siamese", "veiled", "blotting", "intimates", "eruptions", "javelin", "ipsum", "stares", "eastward", "tecumseh", "yon", "entree", "desist", "grasshopper", "rheumatic", "autobiographical", "piety", "embody", "petites", "gris", "crawled", "soiled", "dich", "froze", "superfluous", "gai", "disarm", "sot", "tacit", "chansons", "parenthesis", "reorganized", "daybreak", "rallied", "quakers", "pentecost", "beulah", "unveiling", "burg", "astray", "blisters", "infirmary", "hinted", "sanctity", "gad", "modus", "pedantic", "beste", "dennison", "grandes", "bullies", "notoriously", "lucius", "kirsty", "caustic", "rook", "gleaming", "dominoes", "tua", "parochial", "bertie", "moreau", "precedents", "exiled", "howells", "pall", "mustered", "pretext", "whisk", "flared", "kleine", "deference", "artful", "eld", "audacity", "margate", "judson", "downwards", "moat", "inasmuch", "plotters", "caress", "hails", "swam", "wilfred", "mauve", "hazy", "twitch", "alegre", "glorified", "combed", "reclaiming", "baptists", "paraphrase", "flounder", "crept", "fibrous", "swamps", "epilogue", "hoof", "epistle", "exiles", "wheatley", "clapping", "finesse", "sociale", "cordelia", "infrequently", "favoring", "converging", "cour", "firma", "inquisition", "reputed", "dinah", "seduce", "bearers", "kimono", "guesses", "foote", "impossibility", "ceylon", "courant", "invasions", "eminence", "canna", "liberate", "gracie", "gunther", "hanged", "flatter", "acquitted", "dimmer", "sola", "cauldron", "dredge", "tingling", "preferring", "cordial", "reassurance", "superintendents", "nervousness", "delineated", "imaginations", "quarrel", "bess", "aryan", "tendering", "transitive", "furthering", "connoisseur", "idealism", "separable", "awa", "liqueur", "spokes", "pastime", "pursues", "bugle", "luxemburg", "disperse", "incoherent", "fours", "treffen", "devout", "strom", "alva", "unfurnished", "blinding", "inaction", "northward", "trotter", "subversive", "contre", "impediments", "armoured", "breathless", "intertwined", "steen", "corkscrew", "trop", "affections", "inherits", "mortals", "purgatory", "vise", "comer", "tillage", "pere", "discloses", "easterly", "lagged", "hawker", "vertebrates", "toughness", "disrespect", "lagging", "uncovering", "indeterminate", "refreshment", "momentarily", "festa", "langer", "lute", "rosette", "changeable", "tragically", "waverley", "clapham", "trumps", "justifiable", "twofold", "sicilian", "marlowe", "unearned", "thwart", "potted", "chanson", "amelie", "incurring", "gracias", "convalescent", "terme", "mackerel", "goings", "brim", "clinch", "provident", "leprosy", "chum", "cometh", "fitter", "glut", "fasten", "locksmith", "interrupting", "sulla", "daggers", "pleases", "moors", "arno", "geranium", "kendal", "revolve", "choc", "waged", "waxed", "concourse", "confine", "jaded", "mingle", "purify", "desolate", "withdraws", "choked", "whereof", "pape", "gruesome", "pleadings", "defying", "sacs", "perished", "erskine", "tentacles", "britons", "pringle", "outcast", "faraday", "oblong", "ophelia", "wearer", "propriety", "attainable", "hearsay", "roomy", "brutus", "obscurity", "heros", "colonists", "matting", "overflowing", "capers", "entice", "lasso", "soot", "yonder", "virulence", "heretic", "draught", "comical", "generalizations", "waiters", "gasped", "geologists", "caverns", "boarder", "bumping", "eines", "greets", "ova", "waxes", "whiz", "bevel", "straining", "seduced", "angrily", "croquet", "vacate", "stanislaus", "soundness", "marquise", "bonjour", "xxiii", "protracted", "siegfried", "affaires", "digby", "eyelid", "undeniable", "taming", "precluded", "repressed", "perforce", "barons", "boundless", "hopelessly", "grandchild", "sucre", "pasteur", "valuables", "indignation", "sprinkled", "menstruation", "stuffs", "antichrist", "emptying", "reiterate", "himalayas", "monopolies", "sowing", "frills", "wad", "shearing", "ruining", "pinion", "yew", "windward", "hermosa", "haunts", "unsere", "brawl", "delirium", "unfounded", "heroism", "gillis", "rutledge", "barrister", "neglecting", "saxony", "karel", "vane", "alienated", "tum", "synagogues", "entangled", "mane", "reise", "liberating", "embarking", "tonneau", "cynicism", "bayonet", "considerate", "extraneous", "janitor", "environs", "reverses", "reunite", "hawkeye", "steers", "ravenna", "crockery", "juries", "presidente", "nang", "gare", "legacies", "tial", "theologians", "arnaud", "enticing", "embankment", "quadruple", "crazed", "xxii", "equipping", "fondly", "whither", "counteract", "sighs", "discouraging", "flasks", "preservative", "tribulation", "bridesmaids", "rhea", "raided", "salaried", "mises", "intolerant", "rarities", "battled", "obstructions", "discredit", "grotesque", "artistes", "perugia", "gij", "spoils", "monasteries", "crucible", "modena", "generalize", "hasta", "pronouns", "misconception", "rudimentary", "sown", "protege", "vulgaris", "beak", "settler", "prag", "rabble", "rung", "piped", "orpheus", "retour", "insurgent", "rightfully", "hilfe", "medici", "fabrice", "marshals", "nue", "crumbling", "relegated", "allotments", "immer", "stagnant", "giacomo", "follies", "dells", "cleanly", "unclean", "seizing", "molasses", "tablecloth", "hutchins", "purifying", "delineation", "schooner", "dignified", "numbness", "papier", "machinist", "anima", "apologized", "meshes", "grotto", "marais", "loam", "politique", "carnations", "rivets", "jeune", "hatching", "leveled", "graces", "corinne", "adheres", "collusion", "rawhide", "propos", "knotted", "agitated", "sorter", "misused", "relieves", "linguist", "rigorously", "erroneously", "especial", "betray", "dario", "cui", "heywood", "suspending", "mormons", "davids", "bennet", "proclaiming", "purposeful", "undress", "procrastination", "hemel", "gauze", "precepts", "constellations", "gazed", "skips", "forceful", "fuente", "magdalena", "rut", "sehr", "hera", "subterranean", "rumored", "galicia", "amuse", "villager", "fixer", "condensing", "emanating", "assassinated", "brodie", "untimely", "associating", "romp", "idiom", "tangle", "legitimately", "congratulated", "couriers", "unwelcome", "concurred", "upsets", "sceptre", "confederacy", "matinee", "snatched", "plunder", "maa", "impromptu", "searchers", "gamut", "czar", "putney", "shattering", "refute", "amphibious", "mush", "shudder", "eyesight", "parson", "infidelity", "firemen", "contrived", "exhausts", "opposites", "dreamers", "foal", "hesse", "hesitated", "precarious", "hodder", "pease", "testifying", "topographical", "instructing", "dreary", "crispin", "horrid", "dryness", "wreckage", "paras", "captives", "despised", "conqueror", "innocents", "unprepared", "dost", "treacherous", "filet", "infidel", "volley", "carnal", "larceny", "versed", "confronts", "parliaments", "mitigated", "youngster", "enigmatic", "bridle", "stretcher", "cosa", "enfants", "leila", "berliner", "effecting", "hallucinations", "unravel", "smugglers", "intimidate", "rubens", "galilee", "frenchman", "tiller", "orifice", "bragging", "hordes", "beryl", "ferre", "forerunner", "grinning", "slashed", "watchful", "appalled", "silenced", "vanities", "evaporated", "affliction", "zag", "intestines", "saute", "iba", "schuyler", "idyllic", "satchel", "peruse", "revel", "alleys", "crucifixion", "hearn", "madly", "stiller", "experimented", "comming", "steeped", "gripe", "summa", "eyelids", "thereupon", "archers", "steamers", "bubbling", "forbids", "disdain", "exhausting", "absurdity", "magnified", "horsemen", "alabaster", "reigning", "deane", "georgie", "zara", "bribes", "kidnap", "coercive", "romanticism", "luo", "forme", "reinstate", "unthinkable", "lowly", "outburst", "scant", "mattered", "fitzroy", "ove", "raspberries", "sorely", "pail", "obtainable", "elvira", "mastiff", "drummers", "reformer", "solemnly", "liberally", "dahlia", "concentric", "loin", "ved", "unwarranted", "marmalade", "sandoval", "applauded", "ravine", "exponents", "brice", "ressources", "californians", "procuring", "pours", "leer", "nave", "arranges", "valhalla", "adoration", "amity", "superiors", "decanter", "starve", "leek", "shortness", "fronted", "lightest", "banquets", "picnics", "compulsion", "prerogative", "abscess", "paraphernalia", "heretofore", "memento", "lina", "tumbled", "masterful", "insoluble", "cockburn", "harwich", "casas", "semper", "repressive", "clos", "sweeter", "mattie", "deutscher", "spilling", "saucers", "gondola", "elizabethan", "hein", "spines", "reiter", "amphitheatre", "stupendous", "flutter", "acumen", "absolut", "shiver", "lumiere", "shatter", "pickled", "nieuwe", "hades", "superimposed", "burdened", "randal", "dandelion", "nuance", "classmate", "catechism", "driftwood", "rosalind", "giorni", "juin", "bigelow", "anointed", "mythological", "interspersed", "horseman", "nervously", "intruders", "chaparral", "nya", "decaying", "vez", "muses", "padlock", "oars", "gilead", "classed", "informer", "freer", "toute", "calabria", "dismantled", "overcame", "exertion", "solidly", "affidavits", "weaves", "chimera", "handkerchief", "foaming", "tailors", "barbarians", "splendour", "niveau", "sheriffs", "tassel", "admiring", "harmonized", "khartoum", "leans", "frankreich", "baffled", "wasteful", "hertford", "tripoli", "refraction", "grainger", "penzance", "fillets", "aztecs", "consults", "hoi", "foils", "retract", "inaudible", "nurtured", "frantically", "buoys", "tait", "disintegration", "theologian", "aquitaine", "sigmund", "individualism", "starboard", "precludes", "burdensome", "brest", "renown", "murky", "truthfully", "deutschen", "tongs", "perpetuate", "vigo", "cabal", "musa", "materia", "interwoven", "beggar", "pard", "extinguished", "silhouettes", "abundantly", "declination", "excesses", "mucous", "poked", "caricatures", "artiste", "bogen", "repose", "hasten", "tendered", "temperance", "risque", "resembled", "helpfulness", "omitting", "earthy", "adored", "embellished", "feathered", "aggrieved", "hacer", "assisi", "aggravating", "insulted", "fugitives", "passe", "anecdote", "partake", "pseudonym", "altitudes", "carolinas", "strikingly", "zy", "rancher", "morn", "bodyguard", "gnats", "solon", "eduard", "detract", "portraying", "pitted", "enlarging", "wrecks", "bombardment", "buckner", "dares", "tems", "eigen", "siesta", "satirical", "paar", "antoinette", "ugo", "cynic", "amenable", "runways", "frowned", "sass", "rout", "pus", "rubies", "checkered", "hatched", "sketching", "hypocritical", "trample", "courtship", "cupboards", "tolerable", "magi", "brescia", "alonzo", "tutto", "attenuated", "inefficiency", "merci", "booms", "demented", "eri", "bonaparte", "musketeers", "twickenham", "glee", "forgets", "grapple", "lowlands", "stimulants", "greenery", "proverbial", "tranquillity", "numa", "monastic", "uncles", "eph", "soared", "householders", "nestor", "impediment", "hel", "anarchists", "freund", "perilous", "devonshire", "tanto", "violets", "nouvelles", "nether", "nomads", "ramble", "ambulances", "natura", "hams", "idiotic", "parti", "cerberus", "bering", "formosa", "erg", "bough", "hoot", "herewith", "workmen", "grist", "penrose", "duster", "pronoun", "signer", "sloth", "steely", "pulleys", "fates", "stews", "nourishment", "gravitation", "loophole", "drags", "retrograde", "sade", "exaggeration", "shadowy", "liquors", "archangel", "fenwick", "creases", "primordial", "nourish", "vit", "uplifted", "percival", "gingham", "batterie", "gossamer", "hairdresser", "plover", "weg", "mow", "disliked", "leinster", "impurity", "worshipping", "chasm", "nuovo", "greenish", "regiments", "adel", "selfishness", "reactionary", "adriatic", "ejected", "grappling", "hammering", "mingling", "earnestly", "scribes", "leed", "monologue", "amphitheater", "vive", "signaled", "clem", "littered", "acutely", "razors", "masse", "legumes", "speculated", "worded", "quant", "fleshy", "desirability", "sundown", "persistently", "decoy", "balsam", "baruch", "verdicts", "authorise", "outcry", "eyeglass", "waterside", "grime", "extortion", "cordon", "colorless", "idealistic", "cutlass", "rigor", "greyhounds", "amalgamation", "preponderance", "cowardly", "pretentious", "cervantes", "wielding", "gusto", "maidens", "weimar", "mijn", "humbly", "langue", "unworthy", "expectant", "laurens", "azalea", "jeannette", "fruition", "florentine", "dwelt", "vlaanderen", "oberon", "enslaved", "vil", "cathay", "jura", "correspondingly", "legalized", "predicament", "hilly", "aisles", "trusty", "gratuitous", "fatally", "caged", "ephemeral", "radium", "dissimilar", "mutilation", "kon", "waging", "infringed", "overwhelm", "cognizant", "profil", "andalusia", "rowdy", "popes", "bravely", "sportsmen", "stumbles", "clematis", "slashing", "leger", "incomprehensible", "suez", "clogged", "gabriella", "fluctuating", "demeanor", "shipboard", "labourers", "paganism", "fido", "sounder", "mest", "caledonian", "hegel", "stench", "cursing", "pmb", "wickedness", "crouching", "attila", "emits", "culminated", "thefts", "sturm", "weiter", "auld", "spanned", "ebenezer", "closeness", "redeeming", "polity", "scriptural", "transylvania", "obscenity", "gaul", "heartache", "reigned", "entitles", "exacting", "wanton", "pelle", "enforces", "necessitate", "locket", "aver", "commemorating", "reconciling", "desolation", "gander", "bastille", "traceable", "voila", "savor", "darkly", "faithfulness", "resourceful", "heraldry", "incomparable", "dilated", "angered", "condone", "ahora", "mademoiselle", "constitutionality", "viscount", "preliminaries", "devolved", "liquefied", "alcatraz", "streamed", "resorting", "garters", "adamant", "pontoon", "tableau", "vernal", "napoleonic", "tennyson", "rubicon", "disorderly", "tala", "ivanhoe", "destroyers", "analogies", "frigate", "instalment", "dazed", "sentient", "entrust", "iti", "puffs", "burying", "dispatching", "cyclops", "veritable", "posterity", "keenly", "healthful", "nem", "meine", "repealing", "gourd", "groaned", "ferocious", "voicing", "mons", "sacrificial", "defies", "abnormally", "resuming", "bruising", "flogging", "religiously", "mundi", "encroachment", "demande", "seaboard", "laplace", "southerly", "humiliated", "unearthed", "sut", "cataracts", "subordinates", "vagabond", "consecrated", "oscillating", "jib", "bodice", "foray", "opiate", "cristal", "unmistakable", "filly", "rhubarb", "silencing", "aesop", "hab", "diminishes", "tidings", "sneaking", "unassisted", "insidious", "dike", "immutable", "croton", "depots", "nodding", "jasmin", "libri", "misrepresented", "amici", "substantiate", "algiers", "ocho", "templar", "cedars", "fortitude", "aloft", "mated", "wart", "tribus", "hollander", "ruffled", "armament", "plums", "tien", "revisiting", "fairer", "enterprising", "prides", "grafting", "smoothness", "trinket", "neutralize", "vasco", "playwrights", "wishful", "fal", "herod", "trailed", "habitation", "rogues", "speechless", "expanse", "preside", "arles", "colette", "delightfully", "oeuvres", "concealment", "unruly", "uncompromising", "moriarty", "obstruct", "unbounded", "coincided", "encased", "undertaker", "flickering", "sive", "gush", "saddened", "bathe", "scarred", "ignited", "crowding", "tew", "vrouw", "gladiators", "krebs", "stoddard", "scrooge", "aeroplane", "nagging", "contemporaneous", "precipitated", "hiss", "outlawed", "injuring", "bellow", "girth", "poppies", "inlaid", "notched", "baldness", "didactic", "lillie", "irritability", "provocation", "lustrous", "reeling", "desertification", "rennes", "crests", "molto", "loafers", "slapping", "tiene", "squires", "insures", "slaying", "mie", "frauds", "lobes", "dios", "thundering", "remus", "coals", "succulent", "heartily", "hic", "yellowish", "unsuccessfully", "moderne", "moustache", "geen", "lobsters", "eventful", "feasts", "stiletto", "teacup", "rebekah", "kein", "alvarado", "secession", "countered", "instinctively", "conspiracies", "chapels", "grado", "minions", "brunt", "infraction", "gory", "glens", "strangest", "stagnation", "displace", "countrymen", "perishable", "lyra", "gustave", "proteus", "denoting", "apiece", "jeanie", "strasse", "gammon", "storming", "islet", "conduits", "cinco", "headway", "friars", "maples", "alluring", "ikke", "edouard", "buzzard", "bony", "halting", "sana", "halley", "cranks", "headwaters", "reviving", "burrow", "universality", "veranda", "underrated", "insatiable", "exquisitely", "unfriendly", "hatches", "christened", "actuality", "teased", "murad", "attica", "flatten", "savant", "appreciating", "stinging", "membres", "gulls", "prescribes", "sultry", "sinned", "globular", "asiatic", "macaulay", "depositing", "engravings", "showering", "fanatical", "caper", "yann", "predicated", "montezuma", "lentils", "quack", "bruges", "grooms", "ousted", "cask", "grocer", "speedily", "auberge", "negroes", "chases", "intervened", "mezzo", "incarnate", "chimneys", "hela", "preoccupied", "hither", "diggers", "glances", "tyrants", "constantin", "giddy", "denounce", "entertainments", "oaths", "furness", "ripples", "herz", "bloodshed", "maw", "viento", "upsetting", "durante", "oxen", "nascent", "toda", "reinforcements", "precept", "salerno", "pavements", "murmured", "propellers", "violinist", "himalaya", "gibbon", "gratifying", "delirious", "excepting", "unlawfully", "spanien", "urchin", "polygamy", "utterances", "devising", "sustains", "woodman", "gravely", "errands", "hells", "cartes", "impulsive", "spasms", "rationally", "psychologie", "uproar", "savages", "craters", "wilmot", "mockery", "railings", "paulina", "northerly", "tenths", "quench", "passer", "projekt", "encompassed", "broil", "hurrah", "modestly", "epitaph", "allahabad", "insurrection", "brugge", "alger", "emigrated", "barges", "nota", "tremblant", "antennae", "fermented", "enfant", "headmaster", "walrus", "secretive", "grievous", "generative", "assyrian", "repetitions", "pensioner", "spellbound", "bretagne", "tengo", "domenico", "fend", "sapphires", "compressing", "intoxicating", "crumble", "resorted", "lecturing", "retreated", "senza", "magdalene", "veer", "netted", "dispel", "warships", "tamar", "woodbine", "straightening", "envious", "regretted", "colic", "oni", "membre", "adolph", "farthest", "iniquity", "fooling", "vaulted", "warms", "formalities", "resounding", "aku", "brazos", "saucy", "blistering", "illuminates", "masque", "kazan", "shillings", "gleaned", "decomposed", "flowery", "scandalous", "blas", "ciel", "menacing", "elector", "lili", "neurotic", "bituminous", "askew", "phipps", "groan", "dusting", "lombardy", "uncontrollable", "shackles", "shrines", "bridged", "consenting", "torturing", "toile", "relentlessly", "bracken", "couches", "decadence", "antes", "nourishing", "herschel", "reconsidered", "anche", "arduous", "morten", "assimilated", "creeps", "gripped", "sama", "unscrupulous", "nymphs", "unsettled", "inseparable", "caso", "jurist", "vestal", "dismisses", "variously", "arran", "unintentionally", "sprites", "dashing", "tiring", "abate", "piloting", "decreed", "mossy", "ores", "banque", "keyhole", "usages", "wickham", "vieux", "bowels", "cornet", "reversion", "sanctuaries", "convicts", "osman", "lodger", "santee", "thunderbolt", "claudius", "tremors", "apropos", "pitiful", "winkel", "sparrows", "bleached", "arbiter", "locomotion", "hus", "antimony", "hater", "buoyant", "expel", "martine", "combatant", "swoop", "neuter", "prejudicial", "gente", "introspection", "meister", "mariage", "benedictine", "reputations", "vitally", "mavis", "undivided", "chatted", "lured", "hurling", "brevity", "visage", "prickly", "septembre", "astonishment", "overshadowed", "rescuing", "sensibilities", "meritorious", "beheld", "martyrdom", "manna", "octobre", "moorings", "buddhists", "soars", "gnat", "housework", "gunpowder", "undressed", "southward", "liszt", "zwei", "zorn", "recounted", "denials", "prussian", "adorn", "contemplative", "awkwardly", "etta", "projets", "lik", "belles", "stipulations", "lifeless", "baffle", "pared", "sobriety", "slums", "burnet", "spaniards", "piloted", "successively", "cucumbers", "squaw", "snowdon", "pomegranate", "glas", "bouts", "transcends", "murmur", "bookkeeper", "crickets", "extinguishing", "noche", "attache", "bulging", "chemise", "epics", "smug", "flanking", "dons", "stadt", "prejudiced", "larva", "laziness", "mouldings", "tireless", "leander", "growl", "gorges", "stata", "canons", "pastimes", "diurnal", "coolness", "busca", "recumbent", "shipwreck", "fader", "unconsciously", "buffaloes", "marne", "dissolving", "osmond", "highness", "abstracted", "typhoid", "perfecting", "nez", "furtherance", "suis", "slits", "inquires", "yule", "phantasy", "sprache", "hoss", "crusty", "stillness", "precipitate", "underlie", "pharisees", "nicknamed", "drones", "minster", "sully", "bate", "pert", "depositions", "camped", "fraught", "perplexed", "replenish", "necessitated", "slowest", "unwillingness", "sehen", "trimmings", "esperanza", "divan", "lehrer", "holborn", "concours", "extraordinaire", "eloquence", "definitively", "natchez", "tripped", "strewn", "rubles", "bewildered", "beatings", "copious", "cade", "tremble", "instantaneously", "thump", "ghi", "pompeii", "alluded", "aberrations", "sojourn", "stateroom", "palacio", "adherents", "herbaceous", "distinguishable", "immaterial", "sina", "surging", "lop", "greased", "contraband", "flagging", "willed", "wounding", "inclement", "ange", "magpie", "stil", "robbing", "impartiality", "phosphates", "harpsichord", "capes", "impersonal", "proposer", "interpolated", "strolling", "moro", "salvo", "twigs", "furiously", "epitome", "joked", "breaths", "lilian", "glancing", "discarding", "fared", "fleck", "inflamed", "clough", "unlink", "shadowing", "wert", "regimental", "signifying", "tutte", "rectified", "savoie", "flanked", "bayonne", "primacy", "fuego", "buckland", "centrale", "eyeing", "bade", "insolvent", "mists", "nuit", "carmine", "relinquish", "emilie", "succinct", "palpable", "eton", "estar", "inhale", "dreamt", "convulsions", "snowshoes", "fiancee", "fue", "blumen", "yolk", "mediocrity", "rhyming", "sucht", "transcendent", "lichen", "lapsed", "stroked", "gallop", "cull", "unsatisfied", "wmo", "minstrel", "ewe", "contentment", "fareham", "cranium", "politic", "exchequer", "falsehood", "slugs", "carcasses", "piero", "candlesticks", "rosalie", "mingled", "rafts", "indulgent", "longed", "rammed", "wailing", "shrugs", "negros", "vertebrae", "moans", "buffets", "aristocracy", "eaves", "popularly", "brinkley", "marred", "falconer", "watchman", "venturing", "entitle", "bagley", "alibi", "ahoy", "jellies", "postponement", "brooding", "juncture", "greenleaf", "naturalized", "pikes", "haar", "meager", "commandant", "copernicus", "bourgeoisie", "plucked", "inflexible", "flowered", "bueno", "discord", "patrolling", "injurious", "voiture", "utilitarian", "compacted", "ende", "doughnuts", "reread", "stormed", "crucifix", "irreverent", "censure", "carbine", "credo", "heartless", "contented", "vultures", "forcible", "bushy", "thickening", "moins", "porches", "inoculation", "luxuries", "glorify", "abner", "maris", "admixture", "heredity", "nominally", "forza", "chloroform", "nettle", "mismanagement", "convincingly", "evangeline", "descends", "mischievous", "fateful", "complacency", "impregnated", "insular", "lagoons", "sensuality", "vere", "affix", "professed", "unrivalled", "sensuous", "owne", "sawing", "yelp", "herding", "mammalia", "hopped", "sceptical", "arma", "interfered", "halcyon", "bowing", "cogent", "parishioners", "traversing", "uninformed", "yorke", "aberration", "mollie", "nef", "conclusively", "calcareous", "tufted", "chieftain", "gestalt", "honeysuckle", "zeitschrift", "unspoken", "ishmael", "apprehended", "rhoda", "jammer", "forbidding", "sparring", "mindanao", "adonis", "domed", "distressing", "prettiest", "lif", "panes", "testifies", "filipinos", "chambre", "dainty", "crackle", "jes", "thwarted", "alban", "planks", "orville", "belcher", "spirals", "speculations", "sedentary", "extermination", "plumes", "outweighed", "transposition", "acheter", "beets", "repel", "pali", "coleridge", "anxieties", "poste", "onerous", "tenderly", "bonny", "haddock", "virginian", "pyjamas", "finns", "oftentimes", "entanglement", "miserably", "savoir", "rojas", "argosy", "elba", "stumps", "clouded", "diverting", "derogatory", "esteban", "xxiv", "sear", "rouen", "inaccuracy", "assimilate", "medea", "regenerated", "laine", "gottfried", "rapp", "credence", "welling", "patrolled", "georgette", "lovelace", "caen", "conferring", "incite", "divulge", "wardens", "scrubbing", "laughable", "momentous", "footpath", "entreprise", "harem", "fussy", "civility", "deluge", "squadrons", "ventricle", "fluted", "sweetened", "pry", "venison", "shoal", "basking", "pare", "blushing", "breathes", "lectured", "babylonian", "annonce", "morte", "bord", "skillfully", "heady", "confucius", "bombarded", "celts", "bathed", "cortes", "intractable", "corresponded", "speckled", "enumerate", "persuading", "onondaga", "diphtheria", "plaines", "hoard", "offre", "courting", "petrie", "lading", "woodcock", "churning", "chariots", "battalions", "unquestionably", "presque", "reproach", "viol", "vishnu", "cherub", "lieder", "trumpeter", "straws", "serrated", "puny", "emphatically", "reassured", "perceiving", "commendation", "leben", "contending", "patriarchal", "spelt", "barks", "dodging", "antiseptic", "browned", "oed", "hendrik", "highlanders", "ligaments", "wurde", "upheaval", "cringe", "crimea", "sugarcane", "mouthful", "gazelle", "gauche", "minion", "complicity", "unstrung", "tendons", "thrives", "penchant", "drab", "roared", "prospector", "unwise", "financier", "allegory", "harbours", "konstantin", "acropolis", "stifle", "tiberius", "paradoxical", "rousing", "sebastopol", "knelt", "radiating", "devour", "treachery", "petting", "inoculated", "princesses", "rossini", "portraiture", "incapacitated", "attested", "ope", "nuestra", "overcrowded", "warring", "arouse", "ticked", "purged", "repulsive", "sikkim", "seclusion", "elucidate", "fated", "frighten", "amputation", "halts", "subtlety", "creditable", "protruding", "appreciable", "delicacy", "paradis", "cinch", "futility", "dumplings", "diesen", "upholds", "enlistment", "inroads", "blissful", "boasted", "zealanders", "stirs", "platonic", "donkeys", "etna", "averse", "siempre", "afield", "endearing", "mishap", "lackey", "quod", "labors", "whooping", "sonnets", "musing", "masai", "barricade", "inquest", "snipe", "hapless", "cuenta", "polen", "ably", "montagne", "brun", "mirza", "beaux", "traversed", "sparsely", "shrinks", "channing", "fib", "ail", "innkeeper", "mistrust", "overcomes", "lordship", "egregious", "cubans", "transacted", "blaise", "chaplains", "conventionally", "nuestro", "perceptive", "haber", "lard", "destitute", "platz", "disbanded", "singly", "headless", "petrified", "emigrants", "thane", "salve", "hindustan", "marseilles", "beauchamp", "grates", "fissure", "curtail", "talker", "divorces", "vitesse", "winks", "harte", "loopholes", "soit", "novelists", "bestow", "homespun", "hulls", "complimented", "intonation", "proclaims", "dissecting", "clamped", "retracted", "friar", "hospitable", "melodrama", "creased", "preparer", "postures", "trapper", "makeshift", "tattered", "embarrass", "slanted", "plagues", "jota", "harvests", "surged", "blume", "natured", "clemency", "woolly", "blemish", "ajouter", "bushels", "tapers", "geniuses", "rind", "whiskers", "huntsman", "personne", "perpetually", "soundings", "evicted", "rara", "divisible", "accumulations", "lightness", "avoir", "quelle", "admirers", "marcello", "harbinger", "mustache", "revolutionize", "dwindling", "beaker", "arcades", "baggy", "jeweled", "rejoicing", "uomo", "ariadne", "dickie", "quiver", "sylvie", "frequented", "coronet", "agnew", "discredited", "taverns", "prodigal", "aden", "wield", "resolute", "adage", "wetter", "jeg", "conjure", "rote", "recitals", "adrift", "confiscation", "stings", "budge", "ilk", "ose", "silks", "sequins", "fringed", "goblins", "delineate", "organist", "kneel", "illuminations", "chuckled", "tacitus", "armenians", "excels", "furthest", "virulent", "masts", "garret", "commendable", "inadequacy", "barbaric", "deliciously", "ruse", "persephone", "lifelike", "culled", "muss", "presbytery", "tumblers", "gunshot", "desiree", "supposing", "sculptors", "charme", "calicut", "inde", "castilla", "zealous", "rattlesnake", "iridescent", "robberies", "elms", "excelled", "twine", "meteors", "judicious", "unaltered", "collation", "geist", "silvio", "parke", "diction", "unoccupied", "tigris", "pedestals", "tribulations", "colman", "sabina", "meilleurs", "buckwheat", "enshrined", "surpasses", "yearling", "agape", "wrenching", "damnation", "rapidity", "bajo", "tempus", "deleterious", "intersecting", "garibaldi", "alluvial", "xxv", "incisive", "concealing", "clutching", "drifts", "tenement", "discernment", "chalice", "hypocrite", "harrowing", "prefect", "sweetly", "cleave", "flimsy", "strada", "delilah", "bedded", "shivering", "formality", "produit", "mangroves", "suffices", "bingley", "whosoever", "comte", "tigre", "cham", "graced", "ultimo", "statuary", "moraine", "moravian", "intermittently", "armaments", "grins", "chewed", "accomplishes", "inapplicable", "bly", "pasha", "scour", "motionless", "notaries", "galant", "fallow", "indictments", "aileen", "leapt", "pelo", "widower", "quagmire", "taffy", "purging", "cleansed", "bem", "fainting", "theorist", "scaring", "serviceable", "obstructed", "indigestion", "jackal", "snowflakes", "massacres", "entailed", "curative", "bier", "traitors", "igneous", "cambio", "lull", "rinsed", "delectable", "proletariat", "lise", "fanciful", "bey", "mystics", "fresher", "consummate", "brows", "technic", "veda", "ephesus", "domesticated", "dismayed", "steered", "remitted", "shew", "miraculously", "lapses", "romagna", "freemasonry", "dwells", "penitentiary", "shrewd", "impatience", "italie", "crass", "spaulding", "jot", "gott", "benevolence", "lancelot", "suspiciously", "eugenia", "reprimand", "mangled", "staunch", "shaven", "fez", "feld", "molestation", "quarts", "yells", "lacs", "blindfolded", "premiers", "wraith", "nimble", "hyacinth", "yonge", "durst", "naturalists", "derelict", "gle", "shrouded", "clarissa", "brazen", "inundated", "joie", "brahma", "anni", "veracity", "pinocchio", "angers", "gustavus", "raps", "unwittingly", "counsels", "battlefields", "antecedent", "matty", "dorothea", "licht", "legislate", "voluptuous", "complacent", "germania", "grandmothers", "dalla", "objet", "unaccompanied", "schooled", "picts", "foresters", "hag", "guerre", "dorn", "ainsi", "orinoco", "loveless", "sharpened", "nostrils", "cambrian", "impure", "gridiron", "innermost", "wry", "pilate", "pinning", "alms", "stung", "koko", "phantoms", "retort", "congregate", "meditative", "smirking", "chestnuts", "expositions", "begotten", "gainsborough", "sparkles", "collared", "stringed", "barnabas", "weeding", "evasive", "smirk", "ancora", "pausing", "grands", "replete", "inconceivable", "antworten", "crutches", "apportioned", "pawnee", "accumulates", "failings", "otra", "bristle", "classe", "terrors", "uriah", "oblige", "visite", "panacea", "vibrate", "penetrates", "mayhew", "cathedrals", "toads", "liber", "perceives", "nubian", "stumped", "cramp", "sodom", "imitations", "mistletoe", "naam", "hallowed", "appease", "hawes", "furlong", "heralded", "linde", "clearest", "supersede", "shovels", "renaud", "phrasing", "quarries", "sensibly", "vio", "mouthed", "gills", "braids", "milder", "inexplicable", "counterfeiting", "expeditious", "intently", "chrysalis", "rechercher", "hoary", "corse", "crocodiles", "ronde", "eze", "zeno", "deceiving", "oedipus", "beamed", "scraped", "chagrin", "vill", "tickled", "hindrance", "discreetly", "sparing", "emeralds", "wanders", "disillusioned", "preoccupation", "stato", "restful", "aristocratic", "scouring", "profitably", "pinched", "purport", "plunging", "shambles", "juillet", "marten", "admittance", "stinking", "porridge", "symbolize", "standstill", "unattractive", "diffused", "firmer", "reproduces", "promulgation", "unshaven", "rakes", "sante", "incognito", "silliness", "burgh", "giggling", "coldest", "proviso", "quando", "barnyard", "dikes", "vento", "donal", "artifice", "dato", "glides", "allot", "witte", "vad", "progenitor", "abomination", "erste", "mote", "argumentation", "passively", "hurled", "vesta", "jacky", "wold", "habe", "straightened", "deranged", "contesting", "darwinian", "touchy", "rafters", "unintelligible", "whitworth", "hinten", "infantile", "unspeakable", "demolish", "comforted", "disgraceful", "worshippers", "servitude", "aqueduct", "framers", "streamers", "humbled", "marcella", "radiate", "stipulate", "proximate", "secretions", "attains", "gallus", "idem", "hark", "perturbed", "cemented", "dissolves", "crowning", "bettina", "smuggled", "punctuated", "blunder", "euston", "zucker", "belted", "baal", "felon", "deen", "thud", "hagar", "antlers", "doubting", "dunkirk", "libretto", "debatable", "reaping", "aborigines", "estranged", "merthyr", "ihn", "joh", "decisively", "swims", "undeniably", "spasm", "kom", "notables", "eminently", "snorting", "seguro", "mercilessly", "firs", "cobbler", "invigorating", "heinous", "dusky", "kultur", "esso", "linnaeus", "infallible", "loaves", "dieu", "heeled", "quibble", "meandering", "incessant", "baines", "blick", "namen", "cheery", "curbing", "harshly", "betterment", "rump", "oben", "sweethearts", "slush", "mutton", "coi", "blinked", "altri", "lenore", "townshend", "zigzag", "lesen", "dragoon", "sympathies", "leggings", "benefactor", "thales", "nacht", "merrily", "vouch", "pompey", "blackness", "transitory", "gales", "hypocrites", "larynx", "droughts", "ancona", "springing", "bethune", "nocturne", "perdue", "altruism", "ceasing", "dutchman", "capricious", "angelique", "harmonize", "crescendo", "gipsy", "frederik", "miserables", "amalgamated", "obeying", "gunners", "pent", "mishaps", "subsidence", "plastering", "promiscuous", "asturias", "basso", "dusted", "sago", "inlets", "fords", "pekka", "parentage", "mutter", "litters", "brothel", "rive", "shelled", "outlandish", "sneezing", "sancho", "variegated", "abysmal", "personnes", "bourse", "tenacity", "partir", "moslem", "fourths", "revolutionized", "permanence", "coincident", "inez", "minding", "permis", "enviable", "accessions", "carpeted", "zeke", "eloquently", "overtaken", "hock", "subheading", "renews", "extinguish", "oli", "lowing", "bullied", "accruing", "dirge", "actuated", "bluish", "tingle", "captivated", "parlors", "lamented", "bruise", "cesare", "perfumed", "dames", "unfettered", "imogen", "lewd", "thither", "rebuke", "collated", "occasioned", "swayed", "dupe", "bogs", "affording", "assuredly", "allusions", "shadowed", "seamen", "intelligible", "overlaid", "censors", "shakespearean", "edict", "octavia", "boyhood", "sustenance", "shrew", "freya", "disrespectful", "confounding", "dispensation", "arian", "depreciated", "diagonally", "cased", "laterally", "prays", "nonce", "lemme", "elevating", "augustin", "beresford", "loup", "likened", "bericht", "sketched", "plage", "firmness", "injustices", "longfellow", "unequivocally", "perspiration", "mirth", "serre", "pauper", "brooms", "horus", "casi", "fois", "ushered", "remedied", "vocations", "depuis", "scorched", "instep", "wilfrid", "machiavelli", "ivor", "mignon", "houseboat", "krieg", "clementine", "smokeless", "stanhope", "thorax", "recherches", "warship", "corinthian", "rattles", "esti", "garten", "dislocated", "marvels", "booby", "conceivably", "persians", "injunctions", "crunching", "exuberant", "dus", "composure", "contradicted", "birthright", "errant", "proofread", "rearranged", "heifer", "earthen", "uplands", "paget", "portcullis", "noose", "recur", "desirous", "exemplar", "shivers", "smitten", "rarest", "quiero", "averted", "publique", "dissipated", "gregorio", "masquerading", "discernible", "looser", "ptolemy", "lauded", "pais", "consonants", "demarcation", "miocene", "steeple", "concussion", "nailing", "deadliest", "sparingly", "penance", "priestly", "curtailed", "lovejoy", "rollo", "conspicuously", "risked", "bowled", "modernized", "blemishes", "eagerness", "pearly", "recklessly", "islets", "apothecary", "gagne", "looted", "padua", "jointed", "heyday", "voce", "pulsating", "beaming", "dore", "taint", "lounging", "predisposition", "outwardly", "tumultuous", "overseer", "chine", "crier", "decompose", "unimaginable", "briton", "glistening", "moonshine", "jurgen", "leurs", "scribble", "anselm", "fete", "puerta", "peculiarities", "lichtenstein", "favourably", "beset", "romain", "involuntarily", "swede", "discoverer", "livers", "plowing", "militarism", "glassy", "riddled", "wealthiest", "shrill", "swedes", "headland", "agitator", "utensil", "volk", "sheba", "glows", "heighten", "surpassing", "ladle", "pasa", "pinks", "rusted", "naturalistic", "dogmatic", "tristram", "ballon", "surly", "presente", "sonne", "fertilized", "admirer", "seco", "gibt", "motioned", "catastrophes", "thickened", "indra", "candor", "sabin", "wigwam", "animales", "beheaded", "postmark", "helga", "bereaved", "malin", "drugged", "motte", "volga", "rivalries", "gnomes", "denne", "affectionately", "uneducated", "necessitates", "blunders", "proportionately", "corea", "porque", "mocked", "holler", "fain", "hae", "sint", "darrin", "mois", "cruelly", "tapioca", "furrow", "fewest", "parables", "drowsy", "bushel", "beholder", "sedition", "lutherans", "examen", "ghastly", "vaudeville", "succumb", "criticise", "inquisitive", "doorways", "sirs", "overruled", "menagerie", "osgood", "teamsters", "seul", "forked", "apprehensive", "cowards", "cielo", "cowl", "captors", "fils", "laity", "prefixed", "arming", "amassed", "itinerant", "felons", "dormitories", "dearth", "palatable", "unmasked", "instinctive", "corpo", "sais", "restlessness", "baptised", "burlesque", "regaining", "perversion", "swells", "sujet", "acquaint", "tog", "altro", "havelock", "lengthening", "taut", "laa", "romulus", "sommers", "doings", "financiers", "foolishness", "unequivocal", "noire", "arriba", "silken", "stringing", "bazar", "thrusting", "pavilions", "maddy", "clung", "hie", "bist", "needlessly", "squatting", "cordially", "wilkie", "succumbed", "superstitions", "spangled", "rectory", "alli", "multum", "iliad", "graze", "looped", "unobtrusive", "judea", "currant", "underlies", "intricacies", "afoot", "oddity", "gerrit", "cornered", "auspicious", "splashing", "hotly", "puffed", "disapproved", "interlaced", "instalments", "presumptive", "comprehensible", "tempore", "fallacies", "theodor", "sawdust", "metaphorical", "leaped", "alertness", "embers", "assemblages", "searchlight", "heil", "swinton", "ize", "snob", "stave", "vertu", "snowing", "bleeds", "canaries", "semblance", "shins", "fickle", "outnumbered", "recht", "lukewarm", "quai", "rotunda", "observances", "faintly", "indiscriminate", "alphonse", "piu", "raison", "eyeballs", "barricades", "devoting", "idolatry", "decked", "introspective", "aggravation", "sedge", "nou", "pinching", "tine", "pretenders", "infidels", "dweller", "diabolic", "demonstrable", "letzte", "priestess", "nimrod", "irritate", "siguiente", "beards", "churchyard", "despicable", "canter", "reminiscences", "racy", "stoop", "intr", "rendu", "facile", "christiana", "coerced", "billets", "sneeze", "sian", "dignitaries", "somber", "overgrown", "statesmen", "vecchio", "advices", "coffers", "sikhs", "awry", "celt", "lode", "elia", "zora", "rages", "clumps", "tithe", "subordination", "fictions", "deposed", "trending", "disinterested", "forsake", "conspirators", "swinburne", "unresponsive", "baboon", "romani", "swamped", "ensues", "habla", "seit", "elated", "buttered", "sangre", "selfe", "stuffy", "depress", "eccentricity", "transgression", "idealized", "clings", "flamboyant", "memoria", "nachricht", "macht", "toma", "clergyman", "sociales", "scape", "francia", "pledging", "dependants", "rechte", "puddings", "partisans", "mausoleum", "idler", "dawned", "generale", "carelessly", "narcissus", "crusoe", "einfach", "skimming", "stomachs", "namesake", "slaps", "maximilian", "gratuity", "reorganize", "foothold", "reggio", "usted", "madge", "gleam", "rudyard", "supposition", "sprinkling", "besieged", "malaise", "draperies", "newby", "rococo", "brabant", "superlative", "presser", "chamois", "dwt", "voy", "seared", "tinged", "professorship", "diamant", "leeward", "fruitless", "tamer", "ticklish", "alienate", "displeasure", "connoisseurs", "mutilated", "usefully", "instituting", "balzac", "moyen", "threefold", "innocently", "deepened", "clef", "dak", "pura", "regarder", "trice", "pretense", "jungles", "imitating", "shreds", "petitioned", "thad", "archway", "danse", "loudest", "ultimatum", "shuffled", "moy", "shelling", "visita", "zeitung", "observant", "unhappiness", "cinder", "pelt", "ung", "laurels", "methodical", "engulfed", "bequests", "monotonous", "pythagoras", "operatic", "malevolent", "lessened", "stile", "reciting", "naught", "antagonism", "prisms", "debby", "coinage", "unproductive", "banqueting", "nefarious", "stoppage", "defray", "endangering", "zealots", "weighty", "oeuvre", "subsided", "sahib", "gasping", "idiocy", "frenzied", "postulate", "senor", "trespassing", "pendent", "edifice", "vermin", "loosening", "dialectic", "tantalizing", "rhinoceros", "adjutant", "otro", "sickening", "pondered", "teil", "snows", "steeper", "rangoon", "depriving", "stalwart", "verandah", "schreiben", "buttery", "deformity", "cronies", "undervalued", "invalidity", "soundly", "dank", "pinkerton", "canvases", "weakens", "paulus", "ebcdic", "politik", "lariat", "pursuance", "scapegoat", "anathema", "comptes", "trifle", "forefathers", "piraeus", "xxvi", "eradicated", "toga", "fram", "inadmissible", "strasburg", "berths", "innocuous", "heroines", "retake", "unpacked", "gonzalo", "clenched", "groupes", "evaporate", "midwinter", "compagnie", "bellini", "undoing", "communes", "cassava", "disappointments", "glace", "puns", "hilt", "devoured", "inwardly", "adeline", "smothered", "eulogy", "siva", "lond", "forsythe", "pernicious", "fenster", "continua", "babbitt", "reims", "scrimmage", "privates", "whims", "hew", "skirmish", "roan", "nonsensical", "gallows", "rheumatism", "devotee", "nieuw", "cowardice", "fabled", "fangs", "animosity", "wily", "wiles", "ensue", "jaffa", "sagging", "chemin", "crumbled", "sybil", "pekin", "defied", "hopelessness", "errand", "yeoman", "slimy", "unser", "coerce", "overhang", "ihren", "jeunes", "sobbing", "muslin", "deliberative", "gute", "tattooing", "shekels", "emigrant", "dodo", "jahr", "thorny", "epistles", "trampled", "anthracite", "meditating", "merciless", "clump", "transcribe", "atrocity", "elinor", "proportionally", "untrained", "beene", "thrusts", "tiresome", "splashed", "antonyms", "lune", "moccasins", "parthenon", "abounds", "salutes", "collided", "tilde", "potash", "boarders", "lapping", "chivalry", "corazon", "frustrate", "sideboard", "poaching", "montmartre", "foiled", "flocked", "connaught", "tether", "hyperbole", "borghese", "schrieb", "brahman", "charlemagne", "pulsing", "heralds", "sterility", "dynasties", "prowl", "amiable", "akt", "sittings", "undulating", "thatched", "felice", "esto", "irrevocably", "bunyan", "hinders", "tubers", "unrelenting", "expeditiously", "antiquated", "jerked", "sputtering", "opulent", "mots", "dimly", "coconuts", "confuses", "executors", "squall", "nothingness", "hebrides", "demeter", "antagonistic", "bowery", "immovable", "caterpillars", "consigned", "rhein", "fervor", "pret", "scooped", "exerts", "idling", "cursory", "dissipate", "hymen", "refuted", "ionian", "americanism", "pessimism", "vehemently", "velvety", "vedere", "wheezing", "teeming", "paradoxes", "lampe", "foolishly", "ordre", "eer", "inanimate", "panting", "comers", "romaine", "wulf", "peckham", "tacks", "veille", "effusion", "lunacy", "loathe", "notoriety", "showered", "brats", "huddle", "taxicab", "confounded", "coughs", "pretends", "faery", "eloise", "widens", "omnipotent", "gautier", "poise", "zeeland", "ringed", "cima", "huddled", "unsteady", "zwischen", "duchy", "malacca", "wol", "magda", "carrion", "summarily", "heine", "voi", "ejaculations", "leopards", "dette", "sanctified", "tradesmen", "excitedly", "pentru", "braced", "gaunt", "nourished", "cornstarch", "doch", "effie", "daffodils", "lettre", "boden", "pollute", "bara", "kamen", "neuer", "pomp", "noms", "stora", "sprouting", "summoning", "annabel", "tartar", "brownish", "rejoin", "rosettes", "etats", "volition", "crawls", "suave", "riddance", "gulp", "lottie", "hac", "lurk", "smudge", "tulle", "helplessness", "circumstantial", "dermot", "naturalism", "haga", "colle", "galloping", "indestructible", "principality", "indulging", "allusion", "bosh", "samaria", "smeared", "gouvernement", "liqueurs", "winifred", "parasol", "coloration", "stingy", "succinctly", "devotes", "manet", "anos", "vigour", "snares", "schnell", "illegible", "mortars", "didst", "curiosities", "wither", "schloss", "seamed", "calmed", "flattered", "babbling", "roch", "admirably", "vipers", "nightfall", "nul", "manos", "hurl", "loyalists", "dory", "sheltering", "forego", "castile", "klasse", "blockquote", "tyrol", "irreparable", "immunities", "broiled", "superstitious", "evangelists", "insides", "sedative", "defraud", "toothed", "bygone", "wilds", "intercession", "complet", "lettered", "mirada", "paa", "apricots", "darkening", "depressions", "mache", "toasting", "exhale", "markt", "altars", "abolishing", "chauncey", "recesses", "kinsman", "payed", "overworked", "cecile", "orbs", "aime", "mutable", "delicacies", "toujours", "scorching", "coffins", "jove", "cashed", "ushers", "jewry", "copperfield", "chapelle", "whoop", "cacao", "andra", "annoys", "heiress", "godhead", "canvassing", "portia", "shyness", "angelus", "subjecting", "momento", "escorte", "unsightly", "frayed", "criminality", "woolen", "repos", "levelling", "shrapnel", "arthurian", "burgos", "litany", "fairest", "nutter", "bristles", "larder", "ganges", "machen", "truthfulness", "atrocious", "obelisk", "valeria", "claret", "fru", "samos", "consecration", "forbearance", "acerca", "plastered", "apostrophe", "stepmother", "ruf", "lapland", "publius", "ihnen", "jesuits", "voluminous", "mottled", "plu", "tosses", "manifesting", "estella", "publics", "rien", "normandie", "scrip", "rocher", "inadequately", "arabella", "matti", "throng", "flemming", "shunned", "abandons", "appetites", "turnip", "juxtaposition", "crushes", "carnivorous", "berber", "mince", "banish", "flapping", "fino", "frets", "schism", "sculptured", "suivant", "jemima", "heretics", "dogged", "apparition", "barristers", "scrutinized", "earthworks", "thrashing", "salome", "thumping", "vara", "quenching", "hunch", "amaryllis", "messes", "perdition", "wintering", "topple", "chickasaw", "pungent", "discontinuance", "unbridled", "astrologer", "dut", "canvass", "manifestly", "emphatic", "susy", "outgrowth", "homeward", "withered", "baiting", "surrendering", "fortification", "mingo", "spurt", "elation", "wail", "artistically", "elma", "epileptic", "crag", "hace", "feller", "enmity", "sanctum", "mazes", "jenks", "schutz", "materialistic", "boaz", "jahre", "gud", "oncoming", "racked", "cloister", "provincia", "fancied", "spoilt", "predisposed", "hydrochloric", "filippo", "strode", "agen", "marchand", "disorganized", "shaftesbury", "littoral", "denn", "aggressor", "giggled", "consummation", "fronting", "zola", "heute", "unfaithful", "executioner", "titular", "swears", "diminutive", "paring", "damning", "matrimony", "armas", "humbug", "signalled", "granulated", "ailment", "homely", "perpetuity", "stepfather", "disprove", "dinero", "bernhardt", "incurable", "dixit", "shoving", "furnishes", "anointing", "corinna", "strictest", "domiciled", "minx", "eclipses", "prise", "misdemeanors", "hadrian", "supremely", "mensch", "hastened", "perpetuating", "prostrate", "provisionally", "cocked", "raged", "boyne", "singularly", "elam", "gobble", "preposterous", "symbolized", "breech", "ripening", "pyramidal", "shee", "choruses", "obstructing", "phosphoric", "parquet", "vint", "pasquale", "reparation", "amply", "damask", "rejoined", "impotent", "spits", "papacy", "thimble", "lacquered", "ablaze", "simmering", "nettie", "grasshoppers", "senatorial", "thawed", "unexplored", "transpired", "toulon", "fortifications", "dens", "loafer", "quin", "insurmountable", "prettier", "peu", "haystack", "komen", "chaque", "confining", "louvain", "etchings", "impenetrable", "gymnastic", "tink", "purr", "duped", "stifling", "realises", "vindicated", "bund", "invades", "oust", "suo", "dipper", "signified", "talkers", "exemplify", "inane", "byways", "ibsen", "justus", "bluntly", "bask", "mermaids", "contemplates", "inglis", "defensible", "spinster", "goblets", "interrogated", "yolks", "famille", "dello", "magdeburg", "tarnished", "deducting", "fie", "brimming", "ridiculed", "baie", "ionia", "olden", "herne", "unending", "abominable", "rattled", "basse", "farmhouses", "tambourine", "venomous", "impressively", "inextricably", "etexts", "tapering", "prinz", "unjustly", "rehearse", "apertures", "seducing", "screeching", "reedy", "ceded", "sido", "imbued", "fearsome", "bureaux", "sleds", "christendom", "biographer", "wreak", "planta", "bridegroom", "swarming", "hava", "accomplice", "vivre", "moni", "mui", "ili", "servi", "irregularity", "gash", "impeded", "gravestone", "pompous", "sunt", "subvert", "hanno", "instrumentality", "barnaby", "antwort", "impassioned", "mous", "esau", "desperado", "flavoring", "mouton", "bau", "contagion", "archimedes", "desecration", "pocketbook", "anselmo", "misinterpreted", "garlands", "varma", "mongol", "audacious", "midshipmen", "degrades", "maggiore", "protestantism", "soreness", "boldness", "schip", "inhalt", "otras", "cassius", "powdery", "exportation", "diverge", "loosened", "misunderstand", "virility", "inalienable", "norden", "untamed", "eben", "viel", "xxviii", "meddling", "objecting", "gib", "shoddy", "salutation", "altercation", "octagonal", "mended", "navigators", "notches", "odysseus", "unfavourable", "abject", "heretical", "riveted", "quiescent", "strangeness", "rideau", "tincture", "erecting", "tenderer", "wirtschaft", "lucian", "jaar", "persevere", "fittest", "tarnish", "isthmus", "giuliano", "wordt", "hildebrand", "feu", "treads", "lengthen", "bahn", "prodigious", "spoonful", "sociable", "requisitions", "deftly", "raucous", "toasts", "exaggerate", "odes", "blushed", "saddest", "grinds", "immorality", "addington", "marcellus", "ciencia", "wench", "celle", "spontaneity", "illusory", "sympathize", "faggot", "barrows", "tantamount", "slaughtering", "dissected", "borrows", "frigid", "hemispheres", "woollen", "musick", "speculating", "pawns", "outermost", "selwyn", "westphalia", "augmenting", "winded", "poder", "methinks", "rambles", "namur", "tyme", "dawning", "lait", "klang", "congratulating", "sempre", "flagrant", "wane", "loins", "uneventful", "quis", "scoundrels", "distraught", "assassinate", "unwavering", "confidentially", "piecemeal", "soll", "inferiority", "burnished", "clothe", "swelled", "vides", "breda", "gentleness", "staked", "rigidly", "simile", "phalanx", "hindering", "sloped", "sifting", "fixe", "isobel", "loudness", "guillotine", "reverting", "dionysus", "leanings", "groans", "herbst", "canker", "keener", "embellishment", "confesses", "mistresses", "breakwater", "smuggler", "busily", "poached", "aram", "shopkeeper", "hailing", "imparted", "traduction", "contradicting", "headlong", "captor", "indelible", "tethered", "whiteness", "grazed", "unfulfilled", "acquittal", "meilleur", "fluently", "ascribe", "stalked", "deluded", "trembled", "gens", "doon", "unobserved", "labored", "tete", "twitching", "smacks", "silber", "troughs", "unbelievers", "hungerford", "brothels", "skilful", "werk", "basta", "bolder", "omits", "endures", "heeft", "silencio", "laski", "selle", "pueden", "impersonation", "hote", "lavinia", "intents", "unconnected", "ovum", "pruned", "wedded", "lashed", "valladolid", "contentions", "bickering", "whaler", "unobstructed", "menschen", "fondling", "cref", "laissez", "ricks", "spenser", "astounded", "permanency", "smacked", "personen", "pallas", "anatole", "sleet", "disgraced", "philippa", "royaume", "grooved", "resigning", "appareil", "alcove", "termine", "ungodly", "felling", "landes", "hout", "ois", "disclaimed", "aucun", "upp", "appartement", "couleur", "montagu", "steamship", "condescending", "recounting", "breeches", "appellation", "mitglied", "abbe", "montes", "exemple", "handsomely", "fille", "segovia", "untenable", "messer", "deformities", "necktie", "huis", "xxvii", "tardy", "disregarding", "matron", "seaward", "uppermost", "adolphus", "ciphers", "nibble", "heim", "volver", "exerting", "fenn", "fleeces", "industrious", "foie", "decayed", "proprietorship", "essere", "allgemeine", "umsonst", "harps", "hedged", "cleanest", "selon", "teutonic", "viceroy", "maintenant", "ingrained", "caspar", "swordsman", "commissary", "yellows", "habitually", "naman", "maxime", "majorities", "rendus", "mummies", "conquests", "brimstone", "quand", "trowel", "tyndall", "profiting", "beseech", "hitched", "mucha", "mair", "smelt", "fatale", "margery", "yearn", "mismo", "culprits", "trinkets", "whig", "enchant", "austere", "earths", "selbst", "storehouse", "cowhide", "plumage", "antecedents", "diabolical", "tugs", "rapier", "unspoiled", "haughty", "relinquished", "assaulting", "admirals", "cosi", "meisjes", "esmeralda", "captivate", "terug", "deterred", "agostino", "apathetic", "uninteresting", "lyre", "yawning", "centralization", "prunes", "buller", "cossacks", "attuned", "herons", "raiding", "deft", "seething", "carne", "jardins", "alligators", "instigated", "superstructure", "husk", "grandiose", "clerkship", "concisely", "sah", "scepticism", "quatre", "constancy", "plats", "countryman", "insufficiently", "reappear", "boudoir", "affinities", "glades", "crutch", "rioting", "espoused", "mamie", "frisch", "discursive", "disputing", "unpaved", "lieber", "repudiation", "clarice", "dimples", "inhabitant", "flourishes", "colonized", "hessian", "feder", "ardour", "hing", "erat", "arbeit", "levant", "imitators", "talkative", "phonograph", "speculators", "sty", "quelques", "smelting", "cuss", "slats", "transcribing", "manoeuvre", "offends", "lumpy", "landlocked", "embattled", "wisest", "giulio", "zin", "diminution", "ging", "rencontres", "southernmost", "freckles", "civilised", "airship", "galls", "ammon", "imitated", "inflicting", "inducement", "heave", "cud", "gegen", "proclamations", "rarer", "slowness", "wrongfully", "lessening", "aurelius", "pout", "cognate", "mire", "sufferer", "mores", "raindrops", "elegy", "sanctification", "sanded", "indignant", "godless", "sloop", "politeness", "baffling", "hurriedly", "characterise", "purporting", "passo", "taunt", "ick", "hinting", "schoolboy", "bailiff", "outpouring", "deflected", "inflection", "lettres", "myrrh", "infuse", "chaff", "defaced", "mimicking", "counseled", "showy", "altruistic", "aldermen", "commends", "moorish", "etre", "bobbing", "defiantly", "colonels", "posible", "bli", "cualquier", "pathos", "battleships", "smartly", "laments", "spied", "playthings", "argumentative", "roused", "aloof", "snore", "charred", "industria", "hij", "ihrer", "dunstan", "bolshevik", "unsound", "hatter", "creepers", "recreations", "profusely", "intelligences", "sorrel", "reverie", "colloquial", "callous", "oom", "perplexing", "splashes", "homesick", "gainer", "ochre", "dois", "bystander", "quell", "repulsion", "capitan", "balk", "imagines", "softens", "harnessed", "exuberance", "flocking", "unnumbered", "outbursts", "undying", "stubble", "bande", "amie", "envie", "tle", "quivering", "ete", "euery", "wein", "sark", "commending", "sofort", "flattery", "soothes", "millstone", "mortgaged", "impossibly", "giorno", "compels", "succes", "drunkenness", "indulged", "habitable", "spn", "subtleties", "ministre", "trappings", "afterthought", "damsel", "euphrates", "schoen", "decorum", "hommes", "spoiling", "yellowing", "robs", "giselle", "earthenware", "incendiary", "selina", "lenient", "dined", "idly", "freda", "devilish", "aristocrat", "scathing", "twinkling", "nichts", "pantomime", "familie", "wanderings", "decimated", "overthrown", "moored", "peered", "bores", "regrettable", "strangled", "maxims", "cama", "engrossing", "fere", "jezebel", "lethargy", "komm", "frolic", "painstaking", "goths", "finality", "toppled", "ewes", "mending", "wrestled", "hurtful", "alternation", "receding", "gast", "laban", "neuen", "paix", "candelabra", "outposts", "treading", "hedwig", "downy", "conformed", "characteristically", "canadien", "goldsmiths", "swarms", "geographers", "somos", "evolutions", "escorting", "irregularly", "oratory", "sharpest", "palisade", "moccasin", "circumcised", "growled", "auxiliaries", "benefactors", "terse", "insistent", "peppered", "sterne", "avez", "utile", "frightful", "trite", "gentler", "vex", "dilapidated", "mien", "avance", "wollen", "dela", "stubby", "sixpence", "hoch", "visto", "impaled", "forays", "charon", "flanks", "pavia", "curbed", "efficacious", "philanthropist", "thaddeus", "convinces", "rede", "minder", "orator", "abet", "dien", "ropa", "sence", "steppe", "plowed", "sires", "transgressions", "lingers", "smothering", "encampment", "roque", "prophesy", "recast", "misrepresentations", "bards", "bestial", "neuf", "buddhas", "oozing", "vicenza", "richelieu", "curd", "bookish", "subdue", "raking", "denouncing", "ascertaining", "stags", "vittoria", "soldered", "privateer", "milly", "vicarious", "traverses", "seedy", "imbedded", "elysium", "quenched", "antithesis", "envoyer", "awakens", "accentuate", "squandered", "sortie", "withal", "eyelashes", "colliers", "minuten", "tilden", "asti", "blindfold", "rampart", "possessive", "feldspar", "facades", "idealist", "constables", "mourns", "solidified", "cura", "conceit", "needful", "locusts", "thatch", "cappadocia", "weathers", "grunts", "thicket", "zou", "depraved", "continence", "treatises", "renseignements", "sauvage", "prying", "rascals", "voyageurs", "rudely", "weeps", "deplorable", "smacking", "aggravate", "quoth", "snowstorm", "lacuna", "chambres", "rawson", "levelled", "incessantly", "toit", "apres", "flaring", "neues", "langton", "testa", "lye", "ditty", "pestilence", "rapide", "thoroughfare", "skiff", "belligerent", "impeached", "hight", "eclipsed", "conspired", "catacombs", "agonizing", "bottomless", "sows", "attributing", "londoners", "faut", "sardis", "excruciating", "punctual", "runaways", "boniface", "grafted", "watercourse", "propped", "beaton", "telegrams", "staking", "conversing", "acetylene", "calamities", "viennese", "fancies", "accuser", "bystanders", "minos", "ganymede", "enjoined", "animating", "mercurial", "bargained", "repugnant", "citron", "clave", "pageants", "grosses", "tacked", "zeigen", "supplant", "slates", "prue", "corroborated", "andros", "tipsy", "tabac", "recognisable", "neuralgia", "timbre", "clasped", "pecking", "womanhood", "crimean", "exorbitant", "tish", "grieved", "experimenter", "tallies", "serpents", "tampered", "severally", "bedstead", "acquis", "bostonian", "whirlpools", "sotto", "caressing", "reliefs", "tassels", "culpa", "whiter", "froth", "obliterated", "regalia", "peerage", "deceitful", "storied", "unprofitable", "doublet", "astonishingly", "dein", "cannibalism", "menos", "mera", "pretender", "mosses", "subside", "burney", "conspiring", "nostra", "retaliate", "deafening", "beleaguered", "jarring", "baptismal", "magdalen", "brackish", "direkt", "vse", "tinsel", "edel", "scrutinize", "adverb", "mumbled", "commis", "yams", "breve", "mut", "worthiness", "lazily", "disarming", "ween", "woefully", "kaj", "promontory", "eres", "paye", "smote", "taunting", "etruscan", "outwards", "rend", "hezekiah", "depravity", "wealthier", "onda", "scientifique", "disagreeable", "drei", "castes", "corrupting", "massif", "murat", "kine", "lus", "overtures", "pharaohs", "fraudulently", "plunges", "gibberish", "cela", "tammany", "boulevards", "redistributing", "darken", "dowry", "chateaux", "quam", "skirting", "adieu", "kindling", "affluence", "passable", "shouldered", "hilarity", "fulfils", "predominance", "mitten", "conquerors", "thar", "admonition", "ferdinando", "perchance", "rots", "demetrius", "precocious", "rood", "sachsen", "luzon", "moravia", "byzantium", "gaf", "altre", "repress", "domini", "loro", "moiety", "steeply", "darned", "locum", "denser", "moorland", "coincidences", "divinely", "skimmed", "lassie", "congratulation", "seminaries", "hotchkiss", "trotting", "ambushed", "combing", "travesty", "bewildering", "hunchback", "aback", "deepens", "griff", "enactments", "scaly", "heaped", "fantastically", "cobham", "oracles", "untied", "quince", "lage", "profusion", "conjectures", "glint", "incitement", "hansel", "figuratively", "sorceress", "stoic", "fatigued", "unconsciousness", "quarto", "improvise", "incipient", "avalanches", "cheval", "crackling", "creeds", "thro", "outrun", "extenuating", "blackberries", "amiss", "cavernous", "snodgrass", "darlings", "reprieve", "shanty", "rapping", "proffered", "rowena", "livid", "distasteful", "distinctively", "luft", "hares", "overturning", "attestation", "bravado", "overpowering", "ravings", "childless", "voix", "grecian", "proportioned", "lavishly", "smite", "forthright", "kritik", "foretold", "dado", "engraver", "saddled", "tortures", "crusts", "vamos", "loge", "presupposes", "trickery", "adherent", "fragen", "populi", "astrologers", "wuz", "vindication", "opined", "falter", "chatty", "auvergne", "philistines", "retainers", "tener", "cherbourg", "imperfection", "sorrowful", "unchanging", "predominate", "wodehouse", "molested", "titres", "hyena", "wedlock", "erstwhile", "vist", "obtuse", "caudal", "sternly", "chanted", "jonson", "klug", "savour", "stabs", "indecency", "lingered", "elke", "feasting", "suffocation", "softest", "sniffed", "lurks", "tenses", "lawlessness", "recollect", "alors", "projectiles", "heures", "larch", "interrogatories", "dess", "whet", "impatiently", "suspecting", "dessous", "aline", "disjointed", "seizes", "reine", "triomphe", "thebes", "doer", "pandemonium", "lege", "ravished", "discerned", "seulement", "icicles", "fanaticism", "flamed", "godsend", "rubbers", "eder", "anderen", "rehearsed", "alix", "outrageously", "bagdad", "petticoat", "inhabiting", "unrestrained", "injures", "botha", "pigtail", "appraising", "enthralled", "strays", "embroiled", "toussaint", "armistice", "ellery", "damped", "southerners", "fissures", "clinched", "forlorn", "apologetic", "absolution", "inordinate", "burdett", "clank", "individualistic", "conseils", "marts", "obra", "artemisia", "evermore", "engendered", "manchu", "disconcerting", "priestley", "appropriating", "shinto", "attentions", "regno", "gawd", "inhaling", "calmer", "passers", "fluttering", "irishman", "brier", "phoenician", "hundredth", "firstborn", "coves", "armes", "betraying", "screech", "fetches", "paltry", "carelessness", "threes", "broadside", "importante", "doers", "sods", "technicalities", "thais", "groaning", "beckons", "rejoiced", "quickness", "jeunesse", "onze", "entertains", "turban", "freie", "ruffles", "infatuation", "gaiters", "meisje", "geben", "nulla", "plutarch", "curving", "misrepresent", "tankard", "xxxix", "amorous", "kurz", "overflowed", "jesu", "weaned", "armchairs", "appartements", "vagueness", "grumble", "wronged", "politiques", "fireflies", "hoisting", "falsified", "dialectical", "schatz", "labours", "espagne", "flatly", "harsher", "inciting", "malleable", "indecision", "unselfish", "shem", "starke", "alight", "epochs", "nosotros", "genial", "langues", "revolved", "ifad", "snowed", "cachet", "fortify", "cherubs", "armature", "implicate", "tolling", "provisioned", "sista", "syriac", "dived", "baffles", "infamy", "dapper", "belfry", "elysian", "odious", "rehearsing", "ellipsis", "outhouse", "romanesque", "gobierno", "vanquish", "imparts", "sobs", "laudable", "thawing", "tienen", "writs", "omnipresent", "gesundheit", "hovered", "devouring", "renunciation", "stunted", "munching", "fumbling", "purl", "lasse", "banal", "rears", "portico", "excites", "placard", "quartermaster", "peculiarly", "placards", "transposed", "ganga", "thrace", "waistcoat", "vier", "perusal", "petrus", "childlike", "shamelessly", "saison", "tomo", "cloaked", "lichens", "brotherly", "uninhabited", "sawn", "unbelief", "overtaking", "transference", "arjuna", "pliable", "mantua", "sardines", "dictating", "studien", "crystallized", "reprisal", "blighted", "kunz", "dissect", "rumbling", "perceptible", "blazes", "encircled", "odette", "saxons", "transcending", "snout", "goodly", "philosophically", "directeur", "bigot", "bramble", "persisting", "bouillon", "scribbled", "celibacy", "beaucoup", "tooting", "gruppe", "displeased", "portant", "lather", "falstaff", "unchallenged", "strayed", "commutation", "spiritualism", "gracia", "omnia", "engender", "fini", "jurists", "cloaks", "streaked", "downe", "chieftains", "garrick", "perches", "scrapes", "silhouetted", "crouched", "juana", "gradation", "tole", "unanimity", "radnor", "tycho", "impeding", "reino", "grisly", "fornication", "contro", "sassafras", "heure", "tramps", "assis", "blossoming", "barbary", "irate", "partisanship", "wean", "omelet", "suh", "sheaf", "folios", "iban", "dictum", "refutation", "posthumous", "inclinations", "ledges", "wenig", "muchas", "enlisting", "roars", "swindle", "revolting", "candied", "plaine", "macedon", "dingy", "bons", "frieze", "staircases", "horas", "multiplies", "impressing", "twirling", "lachlan", "entwicklung", "sergeants", "overcoat", "shak", "tyrannical", "infinitesimal", "scharf", "spouting", "origine", "humbling", "truer", "limes", "katharina", "martians", "sullen", "machin", "prolonging", "battering", "superficially", "upstart", "ihm", "imps", "divulged", "shrunken", "quays", "reprehensible", "provokes", "distancia", "dedicating", "confessing", "forbade", "incursions", "viele", "pieced", "arching", "bett", "gloriously", "gourds", "worsted", "nevermore", "sanguine", "acorns", "slung", "rowers", "shockingly", "viaje", "vagrant", "empties", "bight", "entra", "fells", "morgen", "lors", "dormer", "geht", "ahab", "prolongation", "uprooted", "talons", "germaine", "dualism", "intrigues", "cannibals", "pounce", "marchant", "vedas", "panier", "mouthfuls", "instilled", "calyx", "valour", "litle", "mightily", "cuzco", "unwieldy", "perpetuated", "steht", "exaggerating", "smoldering", "peuvent", "snub", "coarsely", "voz", "withstanding", "thickens", "hissing", "crumpled", "topmost", "intrude", "behest", "pitkin", "snatching", "resto", "charmer", "escapades", "haphazard", "infirm", "pontiff", "menage", "preaches", "varios", "growling", "indescribable", "arraignment", "eugen", "kentish", "napping", "sabatini", "toppling", "sten", "astley", "bouton", "excellently", "ier", "pails", "burly", "derecho", "formule", "hillsides", "segunda", "xxix", "contenu", "divest", "mange", "unfairness", "abated", "sohn", "tiniest", "mowed", "sano", "overhauled", "caskets", "lecteur", "congenial", "lut", "fervently", "sprained", "harlot", "ravages", "choix", "superhuman", "conclave", "humanly", "altura", "livia", "causa", "dentro", "magnificence", "sacramental", "peddler", "eterna", "mystere", "fayre", "glared", "adverbs", "donc", "ugliness", "constantia", "shavings", "lusts", "nunca", "helplessly", "quintessence", "throes", "malabar", "crowbar", "blots", "nettles", "scud", "raked", "cruised", "stupidly", "lashing", "gaudy", "merriman", "swoon", "buckskin", "kommt", "recluse", "displacing", "neapolitan", "blacker", "haarlem", "quel", "aspires", "telegraphic", "quali", "frescoes", "patted", "puritans", "gentlewoman", "somme", "meinen", "nouveaux", "victors", "revels", "droves", "slur", "laetitia", "eisen", "phrased", "puddles", "nobleman", "kort", "assailant", "luxuriously", "flatness", "pardons", "debauchery", "wij", "extravagance", "buttress", "entrada", "junge", "rigors", "foregone", "stellung", "overjoyed", "bourgogne", "newhaven", "apologists", "fut", "allemagne", "vind", "waddington", "refilled", "whiff", "burrowing", "strolled", "estos", "regen", "encrusted", "clashed", "harpoon", "sombre", "machinations", "hearse", "libertad", "roamed", "approbation", "nen", "wut", "calmness", "confound", "lengthwise", "fatter", "abstained", "chasse", "christen", "comparaison", "valeur", "senile", "cobwebs", "tusk", "hellish", "conquers", "iglesia", "preceptor", "claro", "ugliest", "ungrateful", "renounced", "clashing", "decomposing", "sauter", "sain", "postponing", "israelite", "graver", "flees", "torrid", "absalom", "preconceived", "zug", "engrave", "dishonor", "hoarding", "bauxite", "barrack", "compatriots", "stereotyped", "conscription", "maken", "philosophie", "minna", "tradesman", "embodying", "unscathed", "moslems", "courageously", "snugly", "tarry", "fevers", "interrogate", "eocene", "muddled", "sklaven", "leonora", "militaire", "subjection", "punctuality", "hoarse", "misfortunes", "vexed", "delos", "vanquished", "ibi", "inquisitor", "floored", "inheriting", "historique", "plied", "beaters", "twang", "ombre", "conceiving", "syrians", "mij", "indivisible", "poetical", "stagger", "crusted", "heraldic", "belli", "maladies", "adjudged", "adolphe", "fou", "wissen", "turrets", "pression", "efter", "calms", "misgivings", "presumes", "juggler", "obeys", "stifled", "preposition", "vestibule", "heer", "mournful", "ameliorate", "scheming", "disarmed", "baseless", "voile", "picturing", "dismemberment", "quartered", "agrippa", "lioness", "appendages", "feverish", "pavillon", "couleurs", "neglects", "suckling", "scythe", "heaving", "homily", "pensive", "lado", "fum", "upshot", "sifted", "felder", "fuerte", "boisterous", "sate", "alleviated", "outbuildings", "icj", "decanters", "elevates", "poitiers", "goed", "ferment", "bounties", "incursion", "aurelia", "thinned", "consternation", "hoisted", "aeroplanes", "auteurs", "antigone", "chirp", "dimmed", "yore", "scurry", "growths", "thoth", "halve", "conversant", "torpedoes", "sovereigns", "unencumbered", "eliciting", "tamed", "fiends", "farmyard", "condense", "garbled", "tallow", "unforgiving", "immobile", "indisputable", "unkind", "prismatic", "aunty", "paucity", "expediency", "frisian", "lieutenants", "philology", "prophesied", "backwoods", "pheasants", "slouch", "amulets", "cargoes", "accentuated", "eddies", "kategorien", "disobey", "literatur", "bandy", "watercourses", "amicable", "prospered", "savoury", "colloquy", "retorted", "fiftieth", "joyfully", "onder", "offensively", "plausibility", "magnate", "pillage", "vengeful", "lunatics", "satis", "nol", "edom", "impracticable", "misdirected", "weer", "surrenders", "manchuria", "playfully", "barony", "leyden", "gruff", "snatches", "buxom", "deciphering", "botanist", "deine", "timidity", "musty", "silences", "guineas", "hebben", "ministering", "strangle", "swerve", "proscribed", "chattering", "esser", "franconia", "dominions", "plateaus", "berthold", "spaniard", "plummet", "transplanting", "onlookers", "wissenschaft", "phebe", "easiness", "trepidation", "squatters", "plantain", "pepys", "frailty", "neutralized", "tangier", "ismael", "guten", "bateau", "mourners", "twos", "passageway", "reestablish", "fondo", "parsonage", "quien", "sulphide", "outcasts", "mortally", "oot", "agni", "carbonic", "unassuming", "disillusionment", "nouvel", "knead", "wilful", "gaol", "erudite", "appreciably", "equalize", "prepositions", "petits", "tarn", "endeavoured", "enl", "attentively", "interred", "indiscriminately", "encumbered", "herodotus", "favouring", "neutrals", "conspire", "recompense", "colonnade", "unde", "eustace", "abides", "yuh", "damen", "seus", "strove", "ogni", "dissenters", "imparting", "apologizing", "coups", "verdant", "secrete", "libris", "twirl", "noo", "beadle", "denizens", "cockney", "guppy", "leeches", "convoys", "manoeuvres", "shapely", "rooks", "shuddered", "stelle", "ornamentation", "lynching", "sommes", "perdido", "dictatorial", "uncomfortably", "defenseless", "glean", "amory", "ander", "edad", "gratified", "participle", "schlegel", "watchmen", "galleon", "travaux", "eten", "enim", "chafing", "betrays", "assyria", "inwards", "corsican", "libertine", "immeasurable", "esthetic", "testator", "distaste", "offshoot", "smithson", "resolutely", "friendliest", "uttering", "jacobus", "construe", "algemeen", "mourned", "despotism", "flotilla", "fragmentary", "anjou", "omniscient", "gladness", "frisky", "generalities", "condolence", "siddhartha", "brightening", "inimitable", "ineffectual", "armorial", "poppa", "thickly", "blossomed", "cistern", "tableaux", "latins", "phaeton", "fecundity", "malle", "caliph", "dysentery", "soir", "grenier", "funnels", "pasty", "cuffed", "peau", "tumult", "defoe", "curate", "donned", "wilks", "allegorical", "monotony", "reve", "ohr", "lucile", "amazons", "manon", "unabated", "plante", "curzon", "wohl", "marksman", "philosophic", "denna", "troubadour", "volgende", "truest", "hypnotized", "voitures", "rudeness", "felled", "alleen", "tinned", "concoction", "flay", "patter", "seinen", "tortoises", "roxana", "pli", "crone", "possessor", "wintry", "gode", "admonished", "wickedly", "laver", "shamed", "eluded", "incriminating", "unsealed", "misinformed", "tambien", "journeyed", "presenta", "sett", "magnificently", "unpunished", "albatros", "apostasy", "bereft", "lucretia", "hibernian", "vitriol", "vicarage", "vestry", "gleefully", "mercies", "paralleled", "entwined", "fosse", "taille", "resplendent", "thrall", "barked", "cormac", "sju", "unum", "scorned", "relapsed", "thicken", "sanaa", "ceci", "selene", "artfully", "pilgrimages", "fides", "blazed", "edda", "wheelbarrow", "maimed", "chor", "dernier", "duda", "pater", "meno", "mused", "jamais", "puffing", "besten", "wielded", "futurity", "quicksand", "trestle", "souffle", "rebus", "proces", "sentinels", "pardoned", "wormwood", "sighing", "harz", "awed", "shrank", "conceals", "glycerine", "staub", "abolitionist", "foamy", "aventure", "meunier", "unpainted", "knolls", "unwell", "unconscionable", "wedged", "outgrown", "evading", "commemorated", "lurid", "annunciation", "rumoured", "idee", "coalesce", "brougham", "windings", "strongholds", "burglars", "shrimps", "stirrup", "seria", "creo", "dictionnaire", "finde", "flopped", "elbe", "whitewash", "subservient", "suivante", "stubbornly", "benediction", "disobedient", "seamstress", "immortals", "euripides", "uninitiated", "mikko", "mond", "zwart", "briskly", "afflictions", "buon", "zon", "weariness", "ascendancy", "affront", "telephoned", "treasuries", "energetically", "tinge", "fingal", "defection", "murmurs", "slog", "gav", "dispersing", "tractable", "lapped", "syl", "petitioning", "clawed", "einmal", "winsome", "presuming", "englishmen", "equaled", "flog", "notte", "deferring", "quills", "oud", "practises", "unattainable", "lengthened", "dramatist", "grayish", "hallucination", "exhortation", "arousing", "hippopotamus", "wile", "forgeries", "chartres", "recline", "maitre", "remembrances", "disturbs", "chums", "determinate", "heeded", "telephoning", "sophocles", "humiliate", "erfurt", "wasser", "tomes", "ingen", "accompaniments", "clairvoyant", "shriek", "ferocity", "quoi", "withering", "procreation", "xxxi", "exasperated", "eerste", "groping", "soule", "pinnacles", "miser", "scaffolds", "reprisals", "culpable", "unserer", "asunder", "qualms", "unharmed", "sheaves", "tritt", "godmother", "impresses", "lidia", "plusieurs", "buttoned", "sprouted", "armoury", "marshalling", "longue", "omelette", "disintegrated", "forgetfulness", "muerte", "stilts", "samaritans", "knocker", "underfoot", "roofed", "jinn", "nunc", "primeval", "sakes", "horsemanship", "aviators", "destinies", "jure", "sherbet", "nutritive", "hurrying", "helden", "tepid", "opportune", "intuitions", "dissuade", "hemmed", "personified", "cornice", "smock", "musket", "beautify", "tannery", "sooty", "buckled", "purveyor", "kindled", "provencal", "schein", "stairways", "methodists", "bourg", "pretence", "questioner", "repute", "nakedness", "scabbard", "covet", "debe", "rippling", "mony", "nelle", "rationalism", "wistful", "admires", "hissed", "overpowered", "pervades", "mele", "tirade", "elucidation", "prongs", "fumbled", "acte", "confided", "mumbling", "abstaining", "giotto", "punkte", "lancers", "heimlich", "waren", "confederates", "stretchers", "demosthenes", "warum", "avait", "devonian", "infinitum", "justo", "antti", "ointments", "tugging", "opulence", "appomattox", "bentham", "coursing", "beschreibung", "patrician", "zacharias", "melodramatic", "effet", "inexperience", "palabras", "aantal", "rime", "casement", "kalle", "serially", "gefunden", "apprised", "thoughtless", "comparer", "goad", "parle", "muddle", "levites", "christus", "blasphemous", "unaided", "candidature", "clapped", "fatherland", "evergreens", "recede", "dears", "willkommen", "spry", "objets", "toki", "maggots", "calor", "hominem", "tints", "waver", "handkerchiefs", "punishes", "salut", "acquiescence", "disaffected", "manors", "chronicled", "laure", "inundation", "earshot", "omens", "brule", "transfiguration", "punctured", "coughed", "repaying", "filial", "mocks", "niggers", "refrained", "shallower", "durer", "patriarchs", "respectability", "commode", "overbearing", "townspeople", "adoring", "trodden", "reaped", "bequeathed", "grumbling", "elude", "decently", "metaphorically", "tripe", "glitters", "ahmet", "austerity", "mitte", "informe", "enjoin", "dazu", "boyish", "egotistical", "neared", "claes", "rostov", "diverging", "estoy", "uninvited", "irkutsk", "trappers", "aniline", "tuk", "spilt", "forgetful", "conceding", "brightened", "inconveniences", "maun", "rigour", "evinced", "uneasiness", "afresh", "taal", "bunks", "ducked", "situate", "sowie", "escapade", "loomed", "egbert", "hungarians", "clamor", "abdallah", "hond", "pews", "workhouse", "handbuch", "unorganized", "whalers", "smuggle", "laboring", "nooks", "wud", "autocratic", "titania", "broder", "shyly", "stewed", "disguises", "stowed", "unmanageable", "denunciation", "squeal", "ducking", "throb", "scorch", "perusing", "duels", "villainous", "caius", "pythagorean", "steadfastly", "abstention", "genealogies", "ruthlessly", "falsify", "swagger", "flicked", "emigrate", "arbour", "accomplices", "nonproprietary", "gebraucht", "toothless", "frankincense", "commendations", "comprehended", "bravest", "crevice", "papel", "telltale", "typewritten", "progenitors", "forges", "loosed", "madcap", "neigh", "evie", "casimir", "persecute", "voracious", "foret", "rescuer", "massacred", "signification", "quarrels", "remoteness", "dominus", "botticelli", "balmy", "hele", "splinters", "kleiner", "epithet", "blonds", "ravenous", "mongols", "camphor", "savagery", "ober", "navigated", "dieppe", "mies", "pretensions", "thunders", "prins", "diogenes", "comings", "danke", "farthing", "crevices", "wringing", "tearful", "betwixt", "florent", "unmistakably", "unu", "massed", "plucking", "slavonic", "reprimanded", "rebelled", "thunderous", "rolle", "encloses", "sorties", "revives", "toleration", "suitors", "minutiae", "deviated", "sleight", "burman", "skirted", "coachman", "bigots", "reappeared", "comprehending", "reckons", "inexhaustible", "canny", "fainted", "pianoforte", "rifts", "winking", "firmament", "hovers", "thoroughness", "confessor", "gooseberry", "aimlessly", "pronouncing", "agassiz", "dazzled", "inborn", "manera", "ould", "consuls", "eure", "doria", "newness", "ascetic", "bearable", "russet", "specie", "hothouse", "incas", "skein", "virginie", "mettle", "ojo", "endeavored", "matin", "demonstrative", "seis", "detta", "bigoted", "discordant", "lilacs", "levying", "elles", "oriel", "buoyed", "malady", "brahmin", "grandsons", "tempers", "quinine", "thirtieth", "sige", "grog", "fester", "permeated", "retards", "resentful", "headlands", "saintly", "oude", "aught", "cornelis", "adjuncts", "jeweller", "wooing", "conjunctions", "embellish", "cordes", "moonlit", "intercepting", "denounces", "besser", "wegen", "dienst", "corks", "obscuring", "tages", "nullify", "corroborate", "envied", "chins", "runt", "nursed", "loathsome", "cosas", "althea", "dando", "icebergs", "sacking", "settee", "driest", "scipio", "stealthy", "flaunt", "mistaking", "saxe", "dyspepsia", "tryst", "cede", "annihilate", "candidly", "honorably", "shifty", "ello", "deceptions", "snorted", "signe", "shivered", "teem", "replenished", "assailants", "degeneracy", "giovanna", "consummated", "cosimo", "cotes", "obstinate", "farquhar", "retrace", "revolvers", "lurch", "gregarious", "allee", "oor", "nightgown", "bombard", "missus", "mystified", "drooping", "diable", "inconsiderate", "swirled", "darted", "warlike", "colons", "supplication", "fretted", "gauged", "suet", "overhanging", "impropriety", "maligned", "thackeray", "nought", "barbarous", "grandi", "olly", "diu", "scepter", "writhing", "enticed", "schmuck", "gasps", "exclaim", "greve", "vestiges", "rustling", "recaptured", "marauders", "spars", "howls", "answerable", "inky", "ock", "sneer", "allay", "derision", "zog", "dutifully", "octavo", "jerrold", "maddening", "plundered", "damit", "henriette", "decry", "buen", "devant", "conspirator", "luring", "gallantry", "hewn", "whisked", "pericles", "desertion", "rumania", "yow", "wherewith", "siliceous", "mund", "circulates", "signore", "coldly", "envoys", "restorer", "staves", "coldness", "existe", "friesland", "orden", "riviere", "gusty", "brazier", "bayreuth", "sonntag", "semaine", "godliness", "docile", "maliciously", "vole", "cantons", "siglo", "enveloping", "piedra", "subito", "tangles", "meanest", "hollows", "luckiest", "officiate", "mumble", "espacio", "oppress", "grandfathers", "usury", "russes", "greedily", "vizier", "ojos", "nostril", "tombstones", "wavering", "barbarism", "vienne", "alway", "surmise", "blanch", "inscrutable", "campagne", "syne", "xxxii", "saluted", "protectorate", "hieroglyphics", "materialist", "landlady", "blameless", "amalia", "absurdly", "garnished", "fernand", "corporeal", "passivity", "partiality", "circumscribed", "steno", "disposes", "berta", "emanate", "rummage", "headstrong", "plies", "scantily", "waar", "befriended", "professing", "nestling", "piedras", "immortalized", "leper", "animus", "dimple", "noblest", "supine", "bloodthirsty", "squint", "vitals", "lamenting", "benedetto", "vindictive", "overtook", "goe", "palast", "triumphed", "scanty", "difficile", "vagaries", "undaunted", "lucan", "hemming", "nuevas", "defiled", "faltering", "saracens", "tisch", "eke", "conceited", "denys", "naissance", "laymen", "shopkeepers", "mortification", "combats", "indulgences", "tard", "fattening", "drench", "digesting", "cupola", "hund", "kommer", "canst", "idleness", "lunge", "mahmud", "minuet", "entombed", "fers", "diverged", "spouts", "pontifical", "glided", "sleeplessness", "iago", "axed", "overdone", "socratic", "revulsion", "rosamond", "schwarze", "criticising", "porpoise", "nowe", "oligarchy", "psychical", "rives", "houten", "fanned", "berge", "wagging", "germinate", "chrysanthemums", "misdeeds", "acto", "earnestness", "wetted", "undercurrent", "steerage", "granary", "befitting", "whitish", "irreconcilable", "giveth", "concocted", "essayist", "epicurean", "blacked", "refit", "boite", "unwashed", "detaining", "shod", "oratorio", "befall", "appurtenances", "wearily", "northernmost", "trollope", "enchanter", "unscientific", "withstood", "sandhills", "heaviness", "knapsack", "animaux", "calcul", "consciences", "inflected", "linseed", "caisse", "staccato", "dels", "agamemnon", "dodged", "refusals", "outrages", "cuneiform", "footstool", "dopo", "uncircumcised", "emblazoned", "mettre", "wrangling", "dorcas", "confiscate", "bloods", "odours", "mongrel", "forewarned", "degenerated", "eventide", "impairing", "dispossessed", "meagre", "mopping", "iver", "fantastical", "dorf", "yama", "laatste", "chintz", "nebulous", "slink", "lineal", "droll", "honouring", "grenadier", "anachronism", "methodically", "stiffened", "athenians", "hautes", "aleppo", "whimper", "whomsoever", "viciously", "fiddlers", "endow", "raum", "indistinct", "counterbalance", "razed", "anzahl", "invents", "loungers", "wilberforce", "manus", "tenfold", "scoured", "schule", "carley", "knotty", "stewardess", "furthered", "chancel", "inexorably", "mitglieder", "worships", "ironed", "inhabits", "domestication", "olof", "japon", "appendage", "geographer", "omnis", "naphtha", "clairvoyance", "frente", "aeneas", "narrates", "girdles", "heartbroken", "parola", "lameness", "offal", "smithy", "dawns", "frais", "couverture", "staid", "encircling", "verte", "wove", "pithy", "caressed", "infinitive", "hysterically", "incantation", "blissfully", "shirk", "pangs", "monsignor", "fulness", "commande", "domestics", "unpretentious", "poachers", "galvanic", "narr", "joven", "parlance", "lethargic", "drunkard", "conveyances", "steinmetz", "cowper", "bronzes", "essa", "knell", "profited", "flavia", "startle", "algernon", "exterminate", "heikki", "exalt", "nein", "zal", "interludes", "jahren", "bide", "suitor", "russe", "bevy", "gravelly", "inconspicuous", "juste", "wisps", "urbane", "hoek", "nebuchadnezzar", "diffusing", "stupor", "gratuitously", "aimless", "parfait", "flit", "quietness", "accede", "sicher", "overshadow", "xli", "principale", "turnips", "statuette", "theobald", "dwindled", "dispenses", "fertilizing", "ower", "narcissist", "sextant", "falsehoods", "swampy", "euch", "wast", "obtenir", "donning", "cecily", "sappho", "estancia", "wurden", "fama", "lustful", "guano", "presbyterians", "worshiped", "duque", "autem", "rebuked", "cloisters", "luella", "presumptuous", "toothache", "presage", "boars", "afore", "dour", "moistened", "kegs", "unadulterated", "reciprocate", "rade", "quia", "begat", "propelling", "ripen", "suffocating", "athos", "grasse", "cinq", "xxxiii", "brawn", "frowning", "gaius", "matchless", "boatman", "unconcerned", "dood", "orthography", "conjured", "assyrians", "selv", "vaulting", "fonte", "gossiping", "freshen", "tugged", "gog", "outdone", "detest", "paraded", "trifling", "undergrowth", "enamored", "carlotta", "ceux", "cuatro", "methode", "ulterior", "puro", "heracles", "whirled", "passim", "thei", "gebruik", "vraag", "jovial", "scoundrel", "romany", "xxxviii", "duplicity", "meddle", "exaltation", "handiwork", "andras", "joyously", "heaping", "strident", "oration", "grunted", "riche", "pilote", "wampum", "dreading", "humorist", "nourishes", "vite", "cun", "combative", "winked", "unhappily", "rube", "chronometer", "squaring", "wring", "apparitions", "shrieking", "graaf", "erst", "scurvy", "peacocks", "ophir", "wouldst", "pocketed", "enormity", "coarser", "hypnotism", "oeil", "dissociated", "exclaims", "ceaseless", "emblematic", "lerwick", "fertilize", "disengage", "commonest", "daj", "unreserved", "lessens", "judicially", "vend", "smattering", "taunts", "stealthily", "ripened", "cleverness", "roped", "sorcerers", "clang", "sardinian", "waltzes", "sunlit", "attests", "parched", "peaceable", "achtung", "stanzas", "infuriated", "dismounted", "incongruous", "kindest", "stam", "intervenes", "vieles", "bonnets", "bared", "frenchmen", "callow", "edicts", "lemuel", "inattentive", "transmutation", "sweeten", "confide", "voiceless", "sombrero", "isidore", "headdress", "nuestros", "tannin", "limite", "boughs", "naturel", "overseers", "presentment", "sprigs", "amiens", "diez", "prudently", "foresees", "patronizing", "presentable", "pales", "dais", "adornment", "precipitating", "hearken", "insolence", "blockhead", "einige", "patting", "hippocrates", "elaborately", "lundi", "gaslight", "presides", "divested", "pith", "eaux", "transvaal", "gaff", "disintegrating", "folie", "frock", "bleue", "flambeau", "fuming", "veel", "chattel", "wrest", "forgives", "waterless", "effectual", "unimproved", "paddled", "inkling", "vigils", "schoenen", "garcons", "gauntlets", "patria", "blacksmiths", "menor", "ploughing", "timon", "parsimony", "typified", "darting", "ashen", "blunted", "snarl", "comptoir", "echt", "pained", "inexcusable", "laud", "mutterings", "precipice", "geschrieben", "recalcitrant", "wos", "thoughtfulness", "harshness", "ailes", "neuve", "limping", "darum", "utters", "processions", "gluttony", "kneading", "etwas", "sait", "templars", "nineveh", "mesures", "enquired", "aphorisms", "compleat", "consumptive", "dalmatia", "noisily", "readjustment", "unaccountable", "weise", "trickling", "commoner", "reminiscence", "pouvoir", "yeux", "fui", "waned", "assented", "overcharged", "pucker", "sanctify", "messrs", "insolent", "octavio", "portes", "finis", "beastly", "fortresses", "matrons", "thun", "gawain", "guinevere", "heresies", "annihilated", "tardiness", "mangan", "mose", "specks", "futur", "incredulous", "dere", "calvinist", "suas", "buckler", "peal", "asunto", "adroit", "dilettante", "georgiana", "ecstacy", "peasantry", "oppressors", "boeken", "corns", "faring", "dama", "unos", "pinkish", "blurted", "tutelage", "merited", "hacia", "peculiarity", "decrepit", "encroaching", "solemnity", "equivocal", "lieb", "amass", "maketh", "ihrem", "disengaged", "distilling", "effigy", "saloons", "assailed", "incensed", "zachariah", "veneration", "broach", "miseries", "personification", "partes", "scuttle", "rougher", "supplanted", "sardonic", "aghast", "raiment", "disused", "vetter", "stooped", "dower", "andalusian", "wordy", "feudalism", "achille", "magister", "bolting", "lumbering", "fourfold", "forgave", "antonius", "indien", "replenishing", "immemorial", "indwelling", "seh", "jaunt", "genere", "ipso", "quartier", "wallow", "unabashed", "haf", "homeric", "overpower", "expounded", "downpour", "dumbfounded", "cubits", "outlast", "frothy", "macedonians", "labouring", "pouvez", "nothings", "kommen", "allgemein", "colonist", "sorbonne", "rares", "colla", "philippi", "adduced", "agli", "unrequited", "mangle", "alludes", "theseus", "commuted", "medan", "saracen", "annulled", "covertly", "dalle", "rapped", "foreboding", "fortuitous", "autumnal", "xxxv", "sepulchre", "kunt", "despotic", "dicky", "beholden", "celui", "apostate", "enda", "faltered", "queda", "entrar", "sicherheit", "gorse", "louse", "wilfully", "paralysed", "tillie", "distanced", "vespers", "scylla", "vats", "urchins", "implore", "kindle", "pricks", "tenements", "tithes", "thinnest", "sipped", "mando", "pulsation", "hitching", "xxxiv", "obediently", "calvinism", "milked", "vesuvius", "disembodied", "aylmer", "scoff", "confidant", "nape", "disparaging", "impolite", "bataille", "oia", "domine", "sluice", "darke", "whistled", "furor", "austrians", "craves", "soiree", "trouver", "enslave", "dimanche", "grimly", "espouse", "casks", "conjoined", "cabled", "muchos", "lightened", "spongy", "verner", "specious", "threshing", "infliction", "frederica", "entranced", "deprives", "onde", "scimitar", "holz", "uninterested", "cavalcade", "adulation", "loitering", "dastardly", "ludovic", "avarice", "sangen", "butchered", "pointedly", "ouverture", "rustle", "excitable", "hermanos", "alluding", "frere", "insipid", "unfathomable", "ingmar", "holiest", "arbre", "effeminate", "vainly", "straying", "venereal", "mercifully", "blatt", "pansies", "acceded", "dregs", "obscures", "millicent", "foresaw", "befriend", "anker", "malign", "abortive", "embarkation", "varnished", "zarathustra", "valent", "knoweth", "anemones", "sacre", "hunched", "buzzed", "pickets", "astringent", "soothed", "vins", "premeditated", "cherche", "aucune", "pueblos", "sentimentality", "tenable", "jumbled", "triumphantly", "leva", "vergessen", "scolded", "fetters", "vulgarity", "magasin", "perpetuation", "tafel", "pliny", "sewed", "jubilant", "sangamon", "continuo", "welche", "silesia", "staat", "amputated", "reappears", "enquiring", "masha", "redden", "kreis", "faccia", "gae", "sobbed", "omnium", "copie", "snuggled", "surest", "bribed", "alarmingly", "kosten", "bloodless", "basle", "sigurd", "tute", "obliterate", "dort", "perron", "pestle", "falsity", "sapling", "elapse", "myne", "enamelled", "torments", "tortuous", "oiseaux", "seafaring", "mooted", "repented", "infirmity", "corydon", "selfishly", "drudgery", "pacha", "shrubbery", "navies", "impartially", "imperfectly", "slanderous", "interminable", "ancien", "soins", "indomitable", "unseemly", "vix", "godlike", "scrambles", "arbeiten", "merriment", "rotted", "thetis", "repulsed", "garni", "brickwork", "soulless", "abbots", "frontispiece", "vivacious", "bloodshot", "salutations", "pela", "dogmas", "forsooth", "geordie", "orestes", "deathbed", "indefensible", "brutish", "trill", "venetia", "melchior", "xerxes", "poudre", "ramparts", "disband", "symmetrically", "reek", "hearers", "frigates", "availed", "externals", "principales", "damsels", "spielen", "monotheism", "menelaus", "morsels", "hatte", "skirmishes", "congratulatory", "zuletzt", "melodious", "baited", "veined", "kens", "norwegians", "imitates", "conjugal", "boldest", "hafen", "flaubert", "enunciated", "strictures", "flinging", "ferme", "discouragement", "werke", "vesper", "parapet", "filles", "usurp", "gerade", "traduire", "peremptory", "unrecorded", "seiner", "gallia", "hayne", "lorsque", "fronds", "interposed", "jugglers", "veri", "dessin", "weet", "naively", "nominative", "cleaves", "doivent", "avenging", "ploughed", "severing", "ety", "hev", "cremona", "martyred", "afflict", "crags", "mimicry", "intersected", "tomkins", "winced", "literati", "trotted", "hungrily", "scold", "chirping", "utan", "tress", "vaunted", "astride", "nostro", "ruy", "emancipated", "ordain", "rapt", "wirt", "sawed", "receded", "emboldened", "pessimist", "sedate", "stammered", "supposes", "genteel", "engulf", "huguenot", "epicurus", "gouverneur", "upu", "hankering", "normans", "enumerating", "toiling", "spiteful", "governess", "alternated", "colander", "croak", "abhor", "boek", "inexorable", "chercher", "harmoniously", "bijoux", "worshiping", "gewicht", "coolly", "accompli", "wann", "vieille", "ellos", "hecho", "verry", "rowed", "elfin", "ingots", "ridding", "tegen", "troppo", "meads", "exhaled", "demolishing", "pratique", "calabash", "brigantine", "zeb", "fitzhugh", "rioters", "persecutions", "arriva", "cramming", "chuckling", "disfigured", "mers", "chios", "muro", "oreille", "transcended", "xxxvi", "cuerpo", "tiel", "faintest", "bleek", "adela", "genitive", "civile", "haupt", "testy", "physiologist", "imprison", "repelled", "abend", "eran", "quem", "plundering", "abhorrent", "rebellions", "sympathizers", "scribbling", "phineas", "emissary", "inhumanity", "wem", "belittle", "repudiated", "divina", "leonie", "sympathetically", "permet", "elis", "liddy", "dabei", "rollicking", "offhand", "geraniums", "bashful", "doze", "currants", "absolve", "conjectured", "grandest", "kinsmen", "lier", "welk", "shipwrecked", "doen", "tacitly", "dint", "reverberation", "quickening", "waal", "mistook", "apprehensions", "aunque", "celestine", "schoolmaster", "impressionable", "gingerly", "apologised", "riven", "taketh", "cornfield", "fretting", "fetter", "jeers", "manufactory", "jarred", "theorie", "armen", "bewilderment", "loveliness", "ministered", "idiomatic", "scalping", "slav", "attics", "wilhelmina", "hermits", "gullies", "prerogatives", "banishment", "tempering", "kampf", "fallacious", "vestments", "morsel", "leniency", "scrupulous", "woodsman", "bocca", "dicta", "meisten", "aubert", "richtig", "clumsily", "catholique", "turpentine", "ells", "cussed", "evaded", "thickets", "clink", "personage", "cavallo", "vender", "daar", "bouche", "delinquents", "furlough", "angleterre", "snarling", "samedi", "creaking", "bequeath", "subjugation", "gape", "clase", "unquestionable", "prendre", "irritates", "whigs", "despatches", "titian", "arras", "fathoms", "printemps", "physic", "nuptial", "thickest", "bulbous", "whist", "mieux", "darauf", "expound", "eget", "exhilaration", "ziel", "lordships", "chanced", "fastenings", "ketch", "treeless", "adores", "aground", "splendidly", "feuille", "inattention", "discolored", "traf", "sinning", "jouer", "forestall", "vater", "moselle", "gnawing", "crudely", "saplings", "profuse", "dispelling", "attainments", "gane", "couched", "bestows", "sone", "particularity", "knighthood", "blesses", "dure", "sickened", "tali", "canteens", "thoroughfares", "donatello", "penniless", "abrogated", "druck", "kingship", "puis", "manes", "relapsing", "arcadian", "claud", "swart", "eschew", "vastness", "precipitous", "detachments", "arsenals", "hoofd", "tramping", "vieja", "thereabouts", "bloed", "resultat", "betrothed", "pourquoi", "dispelled", "pierrot", "duca", "sameness", "scruples", "gloved", "bete", "dowdy", "clamoring", "aguas", "visitations", "recklessness", "stirrups", "intimated", "allspice", "squirming", "thunderstruck", "pleiades", "surreptitiously", "finery", "langen", "eugenie", "sequestered", "hesitating", "stoops", "stiffening", "scrutinizing", "allude", "sprawled", "interesse", "tomar", "courted", "condoned", "unsavory", "deservedly", "blackbirds", "vowing", "plying", "gangrene", "purplish", "stille", "enliven", "hollowed", "graven", "lengua", "craved", "fracas", "envelop", "dismount", "grudgingly", "quae", "bole", "believeth", "unafraid", "stamens", "omnipotence", "irresponsibility", "zelf", "seaports", "conscientiously", "boomed", "jussi", "joust", "grander", "shackled", "weedy", "sacra", "ipsa", "grope", "suomen", "echte", "brightens", "muertos", "jailer", "gleich", "gladden", "sarcastically", "tuft", "quickened", "reverent", "braved", "jaune", "joli", "beckoned", "unquestioned", "scrawled", "savagely", "usurped", "monstrosity", "certains", "ravishing", "grumbled", "disheartening", "nobis", "stolz", "unavoidably", "blest", "govinda", "menial", "clayey", "delighting", "vielen", "conjuring", "dutiful", "absurdities", "cabeza", "ony", "gordian", "edification", "flinch", "xxxvii", "despot", "affaire", "insincere", "inger", "vuelta", "beckoning", "vivant", "vendre", "ignis", "begone", "lucidity", "feuds", "toque", "wille", "primi", "hiver", "lateness", "dier", "nunnery", "forefinger", "rudiments", "erwartet", "heathens", "celibate", "simul", "clatter", "werd", "faultless", "awkwardness", "praiseworthy", "mosca", "seigneur", "ails", "frage", "vapours", "jij", "delphine", "bruder", "remiss", "languishing", "entrails", "erreur", "cossack", "thrashed", "topsail", "modicum", "malte", "solange", "ethiopians", "rajah", "persuasions", "steppes", "sheathed", "derided", "encroach", "correlative", "maire", "diametrically", "fasted", "eunuch", "algunos", "gazes", "virginians", "negligently", "sistine", "higginson", "hadden", "unmoved", "glum", "perplexity", "particulier", "sabe", "sulky", "guarda", "skyward", "woeful", "grund", "droop", "neque", "dislodge", "voyageur", "waded", "flore", "unacknowledged", "quietest", "carven", "aptitudes", "bonnes", "confusions", "fara", "alimentary", "wus", "republik", "encroachments", "ineffable", "hearer", "awakes", "republique", "generis", "zit", "probity", "formas", "grubs", "unflinching", "murmuring", "gaan", "jungen", "kop", "triumphal", "affable", "hijo", "worshipers", "avons", "flail", "adulterated", "nicodemus", "ardor", "wissenschaften", "veo", "missive", "ascends", "splintered", "transacting", "vus", "nomine", "busen", "loafing", "talus", "republicanism", "foibles", "cose", "choses", "squatter", "waldemar", "colourless", "unyielding", "flabby", "enlarges", "apace", "doktor", "harbored", "bulwark", "stringy", "seront", "sonorous", "breastplate", "draughts", "heaved", "lazare", "uel", "fashioning", "churned", "correspondance", "dappled", "gallic", "tacking", "feigned", "dross", "solidity", "doge", "indecisive", "recurs", "dripped", "epicure", "levity", "journeying", "dito", "oppressor", "metrical", "kopf", "immeasurably", "tussle", "fiendish", "glorification", "wayfarer", "arabians", "expanses", "nuits", "dervish", "irrepressible", "leider", "joppa", "wilted", "emoluments", "egal", "conned", "mutes", "outwit", "magnesia", "patronize", "impassable", "serf", "koning", "buries", "vobis", "signor", "phlegm", "reste", "freedmen", "obliging", "hermetically", "gravestones", "uncommonly", "nudged", "inhospitable", "dissension", "intermingled", "dwarfed", "langs", "asters", "surmounted", "elspeth", "salutary", "bringt", "frosts", "ached", "defile", "odio", "ansehen", "effectually", "unprovoked", "apocryphal", "pallid", "sulphuric", "antipathy", "atone", "douce", "storeroom", "theodora", "paler", "lhe", "wereld", "offing", "infest", "dampier", "hardens", "frisk", "alister", "expelling", "obliges", "pertained", "beneficent", "luxuriant", "mulatto", "plausibly", "concubine", "complimenting", "courtly", "dampness", "zusammen", "platitudes", "pois", "porphyry", "deviating", "taunted", "ernestine", "bubbled", "tienes", "korte", "mortified", "upturned", "cordage", "hobbled", "loath", "gagner", "nibbling", "unsophisticated", "vexing", "longa", "digression", "astonish", "dynastic", "cognizance", "piquet", "loveliest", "nearness", "vif", "procurator", "plaintive", "exult", "claps", "disreputable", "seraph", "dressmaker", "fehler", "publican", "hoar", "movimiento", "kreuz", "rebuffs", "reichstag", "woche", "handmaid", "oir", "chemises", "consuelo", "impostor", "nomen", "ponderous", "maisons", "scrupulously", "plaisir", "intruding", "baptize", "fatigues", "asaph", "princesse", "franche", "plucky", "dessins", "eusebius", "untidy", "loggia", "tribesmen", "subsist", "tuin", "augen", "beholding", "scarfs", "leve", "shallows", "ersten", "unjustifiable", "growls", "sported", "quaking", "refraining", "commingled", "coasting", "logement", "kindern", "conciliatory", "stiffen", "showman", "officiated", "distemper", "subterfuge", "jede", "aspired", "mathilde", "pues", "lazaro", "mouvement", "beispiel", "penitent", "toyed", "anglaise", "lamentation", "tunc", "extol", "patrimony", "belgians", "knave", "functionaries", "croup", "broadcloth", "disuse", "reeled", "quire", "goeth", "fascinate", "garish", "baronet", "bombastic", "francie", "scoffed", "thieving", "minde", "thinke", "snarled", "unearthly", "predestination", "verbindung", "regulus", "vidi", "trouve", "rapides", "reviled", "coverlet", "lustig", "bringen", "fearfully", "musketeer", "fiddles", "furlongs", "fens", "ancienne", "arraigned", "liquide", "tanz", "whitewashed", "gilding", "twining", "explication", "violette", "humanely", "jungfrau", "verdad", "perrine", "gaiety", "alten", "uttermost", "aristophanes", "letitia", "overthrew", "lave", "frowns", "fabricius", "sheepish", "diferentes", "antic", "abed", "edifying", "dreadfully", "aun", "sadder", "ravage", "contemptible", "unfailing", "fowls", "untoward", "gloster", "venu", "clergymen", "fiel", "endeavouring", "dislodged", "casse", "obviate", "juster", "genom", "ueber", "primero", "saluting", "beguiling", "bayonets", "trompe", "flavius", "gie", "playfulness", "confluent", "orde", "deel", "lernen", "husks", "beckon", "raved", "herren", "anfang", "jewelled", "reaps", "fatto", "traum", "premonition", "recut", "sureties", "montre", "grunting", "baubles", "personages", "actes", "exigencies", "marveled", "peloponnesian", "gotha", "tasso", "waffen", "cultivator", "nihil", "quintus", "crucify", "unsaid", "fonctions", "untie", "instigator", "girt", "annul", "lanky", "illa", "blushes", "shewed", "outdo", "sycamores", "truant", "shrieked", "ermine", "corroboration", "juge", "circe", "capitulation", "aspirant", "germinal", "vindicate", "repelling", "gesucht", "fallible", "pantheism", "strutting", "incalculable", "tijd", "soliloquy", "mammy", "beaks", "caresses", "quello", "indolent", "ursus", "banns", "thistles", "idiosyncrasies", "inducements", "ennui", "abetted", "expending", "ista", "sweltering", "purer", "hedgerows", "narrowest", "disapproving", "meses", "interrogative", "squealing", "feverishly", "sneaked", "obras", "drowns", "nostri", "persuasively", "walloon", "squalor", "panelled", "ossian", "chaplet", "narrate", "peleus", "ebon", "hesiod", "maman", "bleat", "glorifying", "gleamed", "valiantly", "steeds", "elli", "infallibility", "voll", "altes", "franciscans", "comport", "malheur", "overdo", "ragusa", "sette", "radishes", "deeming", "flaccid", "eum", "putrid", "unguarded", "prodded", "fasts", "sterner", "tras", "womanly", "surmised", "northwards", "tiu", "mayest", "judiciously", "worshipper", "diderot", "ruts", "regretting", "scolding", "bosphorus", "dimpled", "massing", "offen", "leathery", "hjem", "caballos", "grimace", "bribing", "unbecoming", "bridles", "rinaldo", "dejected", "vosges", "comely", "prow", "sprig", "apulia", "squander", "swarmed", "wields", "dragoons", "brune", "landholders", "cradled", "dreads", "spurring", "sollte", "plaything", "pander", "stamm", "abominations", "viene", "reestablished", "strangling", "cultivators", "insignificance", "deceiver", "helle", "sputtered", "faites", "merrier", "simples", "ruggles", "miel", "subsides", "nobler", "michaelmas", "bildung", "howled", "blanched", "allemand", "unequalled", "cicely", "temperamental", "dally", "malays", "nauseous", "brandishing", "wags", "chronicler", "allem", "fais", "disproved", "justinian", "lutte", "dobbin", "riz", "coquette", "menge", "remarking", "cobweb", "punctually", "unwillingly", "cadeau", "undoubted", "formless", "shipmates", "englische", "plaats", "shorn", "doubtfully", "typhus", "reticent", "welter", "lande", "exertions", "insel", "sprachen", "eins", "retentive", "gerda", "plodding", "deserter", "rending", "gaillard", "consign", "mantles", "neatness", "adornments", "britannic", "becher", "unbeliever", "parading", "gamin", "confederated", "lume", "overwhelms", "embankments", "quanto", "speculator", "madmen", "listless", "wheaten", "deprecating", "faggots", "ducal", "downcast", "tedium", "seamanship", "gascoigne", "pomegranates", "sooth", "knie", "sportive", "hewson", "aout", "turan", "undeserved", "principalities", "aider", "excelling", "misadventure", "meiner", "rond", "dramatists", "servile", "rickety", "enchantments", "fuori", "secondo", "figura", "prosaic", "diadem", "pani", "outa", "bedeutung", "sincerest", "sagen", "tittle", "imprudent", "keer", "trou", "nannie", "laat", "deliberated", "snubbed", "suffocate", "applauding", "epithets", "toch", "floundering", "preserver", "revolts", "espy", "deren", "hallow", "wharves", "kunde", "canvassed", "chastisement", "unmitigated", "whined", "sashes", "assail", "flirtation", "unterhaltung", "courtiers", "carboniferous", "brillant", "equanimity", "agitators", "venerated", "curs", "neer", "assimilating", "proudest", "subjunctive", "harun", "perishing", "inaugurate", "slavs", "libres", "noiseless", "cayley", "worshipful", "geh", "spurned", "selim", "chastised", "zich", "forethought", "viscera", "excitability", "madder", "exterminated", "mette", "bronzed", "grimy", "lascivious", "ille", "dispassionate", "bonheur", "charmingly", "glimpsed", "partaking", "firebrand", "deprecation", "intimation", "chequered", "glimmering", "alphonso", "falla", "disbelieve", "brevet", "darf", "troyes", "exterminating", "revolted", "bunched", "besoin", "scrutinised", "allez", "herded", "athanasius", "gemacht", "deliberating", "humaines", "londoner", "aeschylus", "plantagenet", "episcopalian", "zwar", "soldat", "nisi", "thucydides", "tapa", "repudiate", "advisability", "lope", "festering", "relinquishing", "dessa", "mercia", "furies", "piqued", "jinks", "biddy", "compris", "theophilus", "crony", "sambo", "stellen", "professes", "wherewithal", "shrieks", "taas", "ominously", "caer", "ablution", "demure", "athene", "jist", "ipse", "parasols", "munition", "veered", "jonge", "serfdom", "gossips", "rawlinson", "scuffle", "uncritical", "infatuated", "rhythmically", "gaat", "riotous", "tenga", "embittered", "unleavened", "veces", "stockade", "parece", "bushmen", "babylonia", "tempts", "tempel", "uur", "devolve", "satyr", "fearlessly", "ajar", "pampas", "altra", "suppers", "fluttered", "untrustworthy", "exhorted", "ravines", "yokes", "howitzer", "interjection", "stocky", "bazaars", "himmel", "greate", "strenuously", "wildness", "compensations", "laxity", "deathly", "unloved", "balked", "fairyland", "balaam", "hamar", "rekindled", "drams", "entreat", "brainless", "souci", "cessing", "cocking", "railed", "abounding", "fount", "poacher", "invisibly", "lithe", "intercede", "tusks", "hatten", "ayrton", "courtier", "blotted", "impetuous", "grammes", "shrouds", "ambergris", "hellen", "clearness", "embroider", "hubbub", "robed", "unchangeable", "wunsch", "haya", "magisterial", "boor", "recites", "anguished", "ailleurs", "meteoric", "jacopo", "equalled", "palabra", "arithmetical", "royally", "molle", "plantes", "dishonorable", "thwarting", "venise", "scurrying", "subverted", "urbino", "effets", "broadsword", "blankly", "auras", "bonfires", "allt", "cloudless", "conflagration", "xenophon", "bevis", "dethroned", "chapitre", "vestige", "courrier", "cheerfulness", "egoism", "cataclysm", "harried", "transshipment", "cuore", "fatherless", "puedo", "groen", "seers", "cretan", "roumania", "blubber", "appeased", "coaxed", "pageantry", "disparage", "triste", "chimed", "phraseology", "verdienen", "memoire", "morass", "intimes", "righting", "moder", "tasse", "dessus", "striding", "panelling", "braving", "prayerful", "raad", "transfixed", "balle", "leaven", "lout", "tucking", "unwary", "herrings", "cubit", "begets", "groundless", "prancing", "amelioration", "wark", "beeld", "bezahlen", "mightier", "enthroned", "overburdened", "dwindle", "lindau", "beter", "sujets", "acquiesce", "alacrity", "drawbridge", "gude", "overhauling", "girle", "pulverized", "holier", "mauer", "everard", "uncivil", "nondescript", "employes", "temperaments", "consulter", "simpleton", "brutes", "howsoever", "unsympathetic", "jermyn", "dico", "rejoinder", "condescension", "dilate", "rasch", "tiber", "bekanntschaft", "feuer", "secours", "skilfully", "abolitionists", "flustered", "compactly", "lasses", "fus", "corsage", "hym", "laboured", "enumerates", "decir", "relinquishment", "ohg", "sall", "cession", "liken", "forfeits", "heeding", "fata", "revenu", "helder", "verder", "caesarea", "naturelle", "wordless", "sleepily", "prowling", "harmonie", "eludes", "revelry", "deface", "propensities", "mimicked", "mete", "algunas", "uninjured", "rivage", "populaire", "lief", "toddy", "disheartened", "ruinous", "spoor", "upanishads", "eigene", "bewitching", "mihi", "individu", "accusers", "sunshade", "cuir", "hals", "furrows", "throngs", "sarcophagus", "dozing", "siete", "chink", "likenesses", "pervading", "caxton", "soames", "fermenting", "beiden", "blithe", "paralyze", "kazi", "tilling", "hereunto", "daad", "languish", "feathery", "reasoner", "adorning", "gaily", "weib", "samt", "jubilation", "tels", "storks", "accoutrements", "abeyance", "ciudades", "enfin", "suivi", "iniquities", "nadie", "purring", "squinting", "strolls", "encuentra", "gradations", "conocer", "vsed", "molest", "appetizing", "encamped", "trifles", "sammlung", "langage", "importantes", "suiting", "hesitates", "paralytic", "eastwards", "parsimonious", "pinafore", "alwyn", "albertine", "disposer", "politische", "foreknowledge", "galleys", "sunning", "farcical", "weel", "toiled", "incited", "rhythmical", "rippled", "tresses", "agitating", "oriana", "frankness", "castilian", "bunsen", "buenas", "susa", "sulle", "fuera", "outlived", "anny", "repulse", "basaltic", "hinter", "middling", "minstrels", "personae", "wain", "englander", "gascoyne", "knighted", "torchlight", "teniendo", "emanated", "southerner", "persevered", "hounded", "butted", "longings", "galilean", "ayant", "dominicans", "helmsman", "meditated", "shuddering", "homesteads", "abrogation", "justicia", "jutting", "deliverer", "knecht", "aeneid", "vehemence", "befell", "ette", "klar", "neige", "sneered", "chattels", "brambles", "disembark", "secede", "unmixed", "grieves", "prises", "tumbles", "sogenannten", "parnassus", "debarred", "dandelions", "abyssinian", "maler", "bulgarians", "coaxing", "marshy", "terres", "inne", "preying", "grasps", "subsisting", "freunde", "bladders", "avions", "junto", "bloemen", "latium", "shuttered", "alchemists", "morose", "poore", "regretfully", "abbeys", "dutchmen", "agitate", "vien", "abdication", "discontents", "botanists", "bohemians", "blir", "foreheads", "narrating", "gering", "pedant", "stubbornness", "distantly", "humaine", "averting", "pyre", "faubourg", "wooed", "chalky", "teamster", "beached", "fringing", "glans", "thousandth", "sacrilege", "demagogue", "demean", "changement", "stipulating", "propping", "straighter", "weirdly", "broods", "rejoices", "limber", "hablar", "mahomet", "telegraphy", "lehre", "doeth", "verschiedenen", "chrysostom", "blackfeet", "waistcoats", "chalked", "mightiest", "marvelously", "apse", "bailiffs", "infirmities", "illum", "aboot", "jolted", "manne", "jacobite", "viendo", "freckled", "plenipotentiary", "philistine", "gambled", "chaleur", "unimaginative", "joyeux", "gratify", "meuse", "certainties", "zie", "fittingly", "gelatine", "undid", "quelque", "publick", "electioneering", "nette", "ressource", "betel", "moisten", "demoralized", "peopled", "suffi", "swooped", "doctored", "soured", "quieted", "albumen", "encircle", "carmelite", "anges", "exhort", "voyagers", "tendrils", "thal", "nullification", "ostensible", "malarial", "exasperation", "stumpy", "jeden", "whereon", "entente", "nala", "mainsail", "inom", "promptness", "retraite", "excommunicated", "scalding", "storekeeper", "muskets", "uglier", "witchery", "predilection", "wavered", "climes", "firelight", "contrivance", "anoint", "scatters", "wallowing", "hindrances", "braver", "repartee", "boggy", "vragen", "termes", "chiming", "modulations", "philanthropists", "urteil", "retaliated", "founds", "poplars", "knightly", "debater", "tarde", "millinery", "appian", "irresistibly", "endeavoring", "comically", "substratum", "porpoises", "snel", "persuades", "rapports", "foreshadowed", "meekness", "audibly", "dewy", "obliquely", "uneasily", "meted", "liveth", "outre", "agin", "phoenicia", "boven", "jaunty", "balthazar", "squeamish", "tono", "parmi", "eccentricities", "pasar", "potentialities", "anthea", "letzten", "airships", "presuppose", "hetty", "affectation", "abdicate", "creak", "archdeacon", "haciendo", "pretension", "descents", "vicissitudes", "dupes", "larks", "tormentor", "tagen", "postilion", "weal", "grudges", "perversity", "convulsive", "inflame", "zien", "eclat", "doric", "pathetically", "bluster", "witching", "depreciate", "bellum", "gendarme", "dionysius", "imperceptible", "fattest", "atolls", "tibi", "parley", "jessamine", "palatial", "prelate", "flippant", "libations", "convivial", "trat", "adorns", "kamer", "grubbing", "commoners", "cultivates", "thankfulness", "nich", "unturned", "workroom", "zukunft", "phoebus", "censured", "sache", "relished", "boers", "toils", "salles", "enorme", "instigation", "veuve", "indefatigable", "overthrowing", "maudlin", "excusable", "craggy", "gushed", "extricate", "provocations", "deplore", "defrauded", "laut", "aplomb", "centum", "cabbages", "epee", "truism", "employe", "fervour", "babylonians", "fabius", "despondent", "ostia", "cunningly", "bathers", "turbid", "sceptics", "pollyanna", "bort", "privateers", "knowe", "preoccupations", "ludovico", "besonders", "villainy", "feuilles", "diverses", "maladie", "hurtling", "squabble", "ravin", "seest", "omnes", "methodism", "mente", "luego", "overtakes", "predominates", "phillis", "startlingly", "couplet", "falta", "inimical", "imperious", "townsmen", "sondern", "revoir", "handfuls", "gratia", "formant", "gongs", "eigenen", "larga", "pentateuch", "immobility", "purifies", "sparkled", "interchanged", "lulled", "disrepute", "rechten", "implacable", "sert", "employments", "carinthia", "attired", "uncalled", "repels", "zat", "aika", "pliant", "reappearance", "urbain", "avocat", "emaciated", "gern", "vassal", "cantos", "manse", "pining", "unknowing", "blithely", "moderns", "fashionably", "virginal", "augur", "colonizing", "bodleian", "bicameral", "chapeau", "dramatized", "bringeth", "paquet", "regle", "broomstick", "suffocated", "voulez", "marauding", "cynically", "assuage", "estrangement", "versicherung", "limped", "yearned", "fondest", "parce", "frightens", "incontinent", "amante", "perpetrate", "nombres", "mientras", "fiercest", "coining", "invective", "sueur", "depose", "pacify", "sunder", "excommunication", "grizzled", "lade", "caballo", "loathed", "florid", "fatalism", "despises", "chanter", "quacks", "arme", "wend", "blackest", "reihe", "roubles", "relented", "meinung", "tarred", "beget", "mooi", "stenographer", "nipped", "disguising", "invulnerable", "flickered", "quiere", "kummer", "hideously", "motherly", "modele", "vexatious", "coachmen", "girlish", "reddening", "foremen", "shamefully", "herculean", "tormenting", "pleura", "bragged", "pester", "deputation", "oppressing", "domineering", "obtrusive", "wrinkling", "wiry", "labyrinths", "jealously", "beare", "welches", "footman", "pense", "chafe", "tapis", "schoolboys", "alexandrian", "sinless", "manche", "nobly", "absolutism", "hause", "grosser", "gudrun", "sharer", "confidences", "wakefulness", "monopolize", "gehen", "consoled", "mayores", "contrition", "diener", "resound", "unsuspected", "archbishops", "tarpaulin", "abajo", "mustapha", "cherokees", "peaceably", "exacted", "oddest", "purposed", "evince", "hyenas", "schoolmates", "luogo", "breathlessly", "hoarded", "naturalness", "flings", "irritably", "gorgeously", "helt", "noonday", "courteously", "sinuous", "availing", "meekly", "briefer", "serfs", "vives", "homburg", "wailed", "ippolito", "thunderbolts", "tule", "hustling", "milanese", "foran", "bloomed", "hortense", "scrawl", "manana", "sprechen", "foamed", "refectory", "yearns", "unaccustomed", "platoons", "unbelieving", "luminary", "quitter", "purser", "pratiques", "furtive", "renouncing", "accosted", "conning", "tiempos", "incantations", "enchantress", "parallelogram", "wonderment", "pasado", "groped", "warder", "morbidly", "palfrey", "persecuting", "feign", "swooping", "jackals", "niceties", "outlive", "dereliction", "exactness", "barbarossa", "dray", "silurian", "detaching", "sunburned", "spasmodic", "interlacing", "elegante", "corne", "quietude", "roundly", "monarchies", "trost", "rhododendrons", "flirted", "vraiment", "royalist", "untroubled", "aspirants", "sheepishly", "denk", "haft", "parisienne", "russie", "warily", "cadmus", "telle", "aflame", "gits", "aright", "windlass", "studious", "fineness", "estan", "setzen", "pharisee", "devenir", "cercle", "urania", "amicably", "tureen", "nuptials", "greif", "flints", "satirist", "visiter", "pone", "camillo", "hade", "extort", "staaten", "gleeful", "sprightly", "grindstone", "speaketh", "sacredness", "menton", "petticoats", "proffer", "haply", "pronounces", "fussing", "stragglers", "scowl", "tinder", "omniscience", "vot", "leaden", "advantageously", "kinderen", "pounced", "statt", "wollte", "bayeux", "tertullian", "pompe", "fastidious", "ensconced", "cyprian", "sagacity", "nipping", "fogs", "ausbildung", "protestations", "trickled", "lungo", "erde", "fondled", "poids", "wistfully", "abounded", "heureux", "disloyal", "paralyzing", "staggers", "contorted", "polemical", "neighborly", "dabbled", "villes", "piteous", "olen", "perfunctory", "pervaded", "doorsteps", "falsetto", "tatters", "whan", "puissance", "tunics", "lepers", "gloating", "dismembered", "hierro", "perfidy", "minne", "meaner", "propounded", "valois", "insubordination", "impious", "absolved", "dishonored", "vivir", "bathsheba", "klara", "stilted", "hastening", "dines", "capon", "stiffly", "folgenden", "cacher", "festivity", "grk", "thessaly", "folgende", "ayre", "afire", "sowed", "proprio", "brahmins", "gloat", "entanglements", "clawing", "wrangle", "autour", "immensity", "squabbling", "acquiesced", "rosamund", "deinen", "consecrate", "pursuers", "predestined", "gneiss", "gevonden", "rhin", "disobeyed", "firme", "dishonour", "lavished", "courtesan", "unkempt", "bassin", "zeichen", "jeder", "interjected", "humorously", "victoriously", "ascents", "hingegen", "retarding", "indiscretion", "undertone", "adot", "decease", "stigmatized", "tactful", "friable", "palatinate", "liegen", "fawning", "decoction", "resents", "orientals", "squeaking", "tinkling", "drie", "nostrum", "masterly", "dunce", "fera", "butchery", "wresting", "treacle", "frankrijk", "foolhardy", "bristling", "boreas", "cherubim", "nightcap", "massy", "consoling", "nues", "characterises", "antiochus", "cutlets", "hoofs", "drawl", "veux", "manoeuvring", "lances", "helfen", "rivier", "imogene", "impute", "dainties", "leghorn", "directness", "glutton", "laquelle", "unnaturally", "disquiet", "deerskin", "meest", "sufficed", "extolling", "wearied", "barbe", "pitied", "hame", "sibyl", "lignes", "victoire", "erring", "geschiedenis", "acclamation", "ypres", "gigante", "solamente", "berenice", "cisterns", "kist", "panoply", "credulity", "coiling", "capuchin", "verkehr", "sympathise", "piti", "sist", "noirs", "pitying", "twitched", "clefs", "actuel", "vem", "panted", "midshipman", "juda", "gondolas", "swiftness", "necessaries", "nullity", "tuli", "tenemos", "relishing", "unsuited", "gurgling", "imaginings", "hvis", "boatswain", "hearthstone", "fondle", "cuddled", "superintendence", "regeln", "betters", "joab", "corruptions", "persevering", "transversely", "abelard", "illusive", "octavius", "disquieting", "ripeness", "veering", "alguna", "tiere", "junker", "vapid", "hohe", "pieds", "unremitting", "rechnung", "clenching", "cordials", "bandaged", "evanescent", "fevered", "indignity", "pinches", "aglow", "midden", "sieg", "notamment", "bullocks", "peinture", "moyenne", "valerius", "chucked", "ransacked", "bugbear", "wreaked", "hogshead", "masques", "halfpenny", "fetes", "kneels", "reticence", "iambic", "lisbeth", "deplored", "icke", "unfashionable", "jacobean", "loveth", "sceptic", "vociferous", "eunuchs", "comed", "salz", "languished", "sneering", "coitus", "churchman", "lisette", "cocoons", "deserters", "ainda", "verre", "smallness", "esas", "remotest", "retorts", "housekeepers", "farewells", "conscript", "redder", "cria", "troupes", "tiptoe", "sociability", "idealists", "xlv", "crowing", "celles", "thankless", "avers", "hochzeit", "schuld", "quale", "sublimity", "birches", "crunched", "ratifications", "ringleader", "thundered", "fumed", "feste", "thereunto", "compatriot", "discontented", "droning", "yawned", "scuttled", "wochen", "inoffensive", "erudition", "bedsteads", "perrot", "strictness", "welke", "entretien", "frivolity", "gulped", "subtler", "vestidos", "inviolable", "toten", "riflemen", "insufferable", "clasping", "landen", "interjections", "usurpation", "brimmed", "subjugated", "unlearned", "prostrated", "kaffee", "excusing", "rejoining", "subir", "etiam", "slanting", "maisie", "detested", "overal", "dauntless", "pulsations", "frugality", "apprenticed", "reflexion", "vomited", "loth", "undisciplined", "signalized", "lunged", "alii", "vergil", "wiens", "verts", "opere", "pouting", "watling", "daher", "vrij", "creer", "cranny", "springy", "perplex", "lamentable", "signes", "besuchen", "rebelling", "destitution", "rummaging", "broached", "puckered", "squalid", "shunning", "erhalten", "cinders", "interrogatory", "syndic", "cleaving", "semicircular", "montant", "trow", "overwork", "kirche", "farben", "roches", "pommel", "intermixed", "logik", "rerum", "freemen", "mellan", "linnet", "heightening", "goede", "laddie", "bellowed", "tante", "sair", "questi", "entier", "timbered", "sxi", "unrighteousness", "shrilly", "catullus", "dulled", "nuestras", "interlocutor", "kingly", "chided", "turbans", "acquit", "tota", "choisir", "hvor", "singe", "stunden", "harping", "etwa", "akimbo", "beeches", "seule", "augmenter", "hieroglyphic", "aryans", "banishing", "unicameral", "clamour", "sopra", "alvar", "punkt", "dunkel", "erle", "unadorned", "prefaced", "wijn", "gleichen", "verband", "majesties", "endearment", "fealty", "disputation", "leicht", "whoso", "thracian", "forerunners", "exhalation", "investiture", "animates", "ruffian", "turkestan", "balthasar", "ourself", "invariable", "inclines", "southey", "patronising", "deciphered", "shudders", "voie", "gerne", "ardently", "granitic", "untried", "luise", "narada", "intruded", "marmaduke", "coppice", "autocracy", "backwardness", "undiminished", "caput", "connaissance", "discomforts", "clammy", "indisputably", "rifled", "meglio", "pomerania", "fane", "latterly", "flogged", "disadvantageous", "philological", "enamoured", "unpalatable", "shrugging", "disse", "persistency", "conscripts", "chimeras", "befits", "instants", "denunciations", "pervade", "entrapped", "suerte", "apaches", "archduke", "myriads", "physiologists", "egotism", "motherless", "cien", "tiberias", "chaldean", "comedie", "reciprocated", "squabbles", "buffoon", "tilled", "rumbled", "mittel", "ambos", "disobeying", "drusilla", "sidon", "acrid", "dijo", "trespasses", "conversed", "ingeniously", "howitt", "counterbalanced", "undertakers", "pricked", "coppers", "reddened", "exhortations", "wohnung", "againe", "hijos", "poulet", "degenerates", "demeanour", "broadsides", "closeted", "unceremoniously", "genuineness", "bungay", "poissons", "volte", "suoi", "wirklich", "iho", "crannies", "prospering", "dearer", "familles", "minutely", "seditious", "trotz", "inarticulate", "turba", "brust", "rameau", "silvered", "youse", "seno", "poche", "neuem", "fromage", "gunboat", "drippings", "voici", "alida", "messager", "asceticism", "reconciles", "disentangle", "bestowing", "belie", "ostend", "divining", "balustrade", "fortieth", "adulterous", "slyly", "shied", "plantains", "eveline", "deferential", "enlivened", "coterie", "magnanimous", "plait", "guttural", "prided", "anciens", "capsized", "breslau", "unreality", "weiteren", "murs", "lath", "encampments", "hindenburg", "whiten", "derniers", "entendre", "cuidado", "reynard", "remarque", "katrine", "perused", "refrains", "furrowed", "tabernacles", "virile", "poignancy", "detestable", "pouce", "certaines", "sombra", "narbonne", "voisin", "jilted", "centurions", "poring", "quivers", "flaunting", "peeped", "kiu", "ellas", "quer", "wails", "gild", "debonair", "indignantly", "invigorated", "bucolic", "disaffection", "grappled", "executioners", "belial", "harde", "blessedness", "courtesies", "misericordia", "apotheosis", "jette", "bettering", "tigress", "geworden", "occhi", "chante", "bleating", "stratagem", "squatted", "dagon", "hugues", "atalanta", "partage", "authoritatively", "unpleasantness", "bettered", "imbecile", "gravest", "defilement", "butting", "gobbled", "hispaniola", "conceives", "townsfolk", "afflicts", "thinness", "counteracting", "marilla", "ramshackle", "dullness", "syllogism", "wrenched", "giovane", "usurping", "arouses", "augustinian", "scald", "rois", "rodolphe", "heliotrope", "aquiline", "reapers", "uncouth", "allein", "whimpering", "eleazar", "portent", "fatten", "crossly", "hadst", "fier", "admonish", "battlements", "transgress", "leant", "lank", "governorship", "tolled", "zealously", "aen", "dowager", "werken", "squealed", "convents", "romane", "vertrag", "usurper", "recitations", "inculcate", "olla", "encumber", "blut", "golfe", "wier", "unimpaired", "liue", "heedless", "rancor", "trots", "providential", "freiheit", "daresay", "kapitel", "liberality", "principes", "semaines", "stort", "indulges", "unthinking", "tutta", "marcelle", "flossie", "inestimable", "whiles", "henne", "distrusted", "prie", "mohawks", "ignoble", "frankish", "jeroboam", "timidly", "lurked", "greyish", "imitative", "igual", "pagodas", "ganze", "hobble", "maan", "roten", "kannst", "tills", "repentant", "comite", "meanness", "wege", "biding", "unassailable", "sidan", "mutters", "singhalese", "mammon", "cavour", "discoverable", "letty", "tombe", "beltane", "whir", "afflicting", "posto", "biographers", "escrito", "hyacinths", "demandes", "freeholders", "ventre", "facetious", "tinkle", "wormed", "histoires", "weiber", "approche", "civilly", "unhurt", "incredulity", "yawns", "croker", "liisa", "proscription", "foretell", "hoards", "boccaccio", "whimpered", "businesslike", "egypte", "juba", "frill", "landward", "cripples", "amusingly", "cornices", "ostentatious", "vrai", "pocketing", "bereits", "shylock", "deseo", "paymaster", "canaanites", "carnac", "gnarled", "doce", "gnashing", "preuve", "plod", "damals", "covetousness", "dammed", "piebald", "unawares", "scornful", "crosswise", "tuneful", "hache", "girolamo", "quienes", "humdrum", "distended", "faun", "parler", "folgen", "fatness", "summe", "lente", "dangled", "fixedly", "feebly", "objekt", "vexation", "bastions", "bailly", "threadbare", "emissaries", "weh", "vertue", "subsiding", "hebe", "purred", "lieve", "contingents", "squirmed", "haren", "sangue", "cringing", "saal", "kleinen", "hys", "outstrip", "demerits", "highwayman", "contes", "hussars", "fatherly", "jehu", "southwards", "swerved", "unas", "recurred", "roams", "fuhr", "hemos", "terrify", "licentiate", "periode", "innerhalb", "inflammable", "freundin", "disowned", "parlement", "surmount", "hellenes", "unheeded", "siecle", "nicholl", "magis", "wolle", "apprendre", "habitations", "warf", "cowering", "overhear", "tawdry", "doublets", "saintes", "buona", "gaspard", "skall", "canonized", "solicitous", "findet", "vorbei", "hulking", "realidad", "seconde", "carcase", "caballeros", "unwound", "whiche", "progres", "reveille", "garrisons", "professeur", "shames", "schicken", "predominated", "wilden", "pittance", "gironde", "gosse", "escutcheon", "winging", "alcibiades", "schatten", "curds", "sinfulness", "recapitulation", "trudged", "junger", "hummed", "convalescence", "verite", "spada", "priam", "unceasing", "disdainful", "cackling", "blancs", "freres", "aimer", "parsnips", "trembles", "davon", "dryly", "ingratitude", "postes", "godt", "largesse", "humped", "mooie", "rowboat", "perfections", "restive", "hackneyed", "canticle", "peine", "naivete", "circuitous", "frieden", "imploring", "erebus", "abridge", "picardy", "glisten", "clubbed", "turnings", "unblemished", "trenchant", "lilla", "volleys", "hommage", "girlhood", "freshening", "rill", "andar", "lodgment", "clumsiness", "witless", "regale", "crus", "siya", "amuses", "pallor", "unwholesome", "parsifal", "copra", "journeymen", "filipinas", "hippolyte", "marsa", "galling", "vei", "quitted", "tomba", "musta", "brawny", "quella", "fueron", "prattle", "partakers", "climat", "ilium", "livy", "incorruptible", "puritanism", "carthaginian", "assiduously", "nibbled", "appeasing", "piquant", "grond", "magno", "leute", "unreservedly", "tattle", "baste", "manier", "willst", "inseparably", "anthers", "buttonhole", "uncivilized", "insensible", "seasick", "redouble", "theodosius", "liberte", "rostrum", "ejaculated", "eux", "sables", "pian", "admonitions", "shewing", "suelo", "cower", "erfahren", "inferiors", "singed", "gird", "territoire", "pierces", "jugend", "kleidung", "erfahrungen", "solicitude", "pawnbroker", "reverently", "deign", "eher", "hominy", "doting", "fuerza", "blistered", "glittered", "hanseatic", "pestered", "preeminence", "billows", "biens", "etten", "carted", "despots", "gnaw", "bandied", "liegt", "vinden", "rijk", "perversely", "bors", "transfigured", "dauer", "quizzical", "couper", "informers", "resentments", "bartered", "sugared", "spittle", "circumspect", "demerit", "shouldst", "roundness", "acrimonious", "pulpits", "warding", "unbuttoned", "brot", "feit", "frolics", "groat", "matins", "formes", "bellowing", "platon", "abhorrence", "verbo", "osten", "blackish", "emme", "aphorism", "emanation", "miscreants", "unction", "redan", "seguir", "noblemen", "barque", "deride", "kirke", "houseman", "sedges", "pitiless", "zwarte", "portly", "jangle", "jarl", "beauteous", "veld", "contrive", "huguenots", "estimable", "scowled", "ministration", "willet", "wriggle", "impudent", "xlii", "petted", "meist", "prude", "heroically", "phoenicians", "enjoining", "willen", "hustled", "jinny", "surreptitious", "petulant", "unfurled", "sauf", "lits", "chinaman", "nonchalant", "disloyalty", "laconic", "westwards", "nase", "paha", "askance", "misma", "binnen", "baronial", "charrette", "denouement", "belied", "obliquity", "satiric", "quivered", "sche", "sanctimonious", "natt", "ebbs", "obed", "ezek", "heet", "stammering", "waked", "logis", "foolscap", "sorte", "oases", "brach", "limites", "calma", "unmeasured", "statuettes", "nubes", "unga", "gegeben", "satz", "twinge", "cultus", "trudging", "narcisse", "feasted", "rebukes", "colquhoun", "quadrille", "inconnu", "lucretius", "sprach", "ihres", "docteur", "meubles", "whome", "repressing", "embroideries", "booke", "ingenio", "intellects", "brawling", "veut", "tient", "gelatinous", "meilleures", "figur", "gentlemanly", "underbrush", "bemoan", "norsemen", "forsaking", "souvent", "bobbed", "diversities", "gouden", "pontus", "unintelligent", "holies", "annexing", "vriend", "amas", "asylums", "satires", "coffer", "costliest", "ravaging", "rarefied", "nebel", "gleichzeitig", "leyes", "deprecate", "lvi", "serait", "esos", "chivalrous", "overruling", "gendarmerie", "konnte", "groene", "obstinacy", "caked", "delude", "similes", "seeme", "puertas", "recedes", "wroth", "emetic", "gestellt", "holde", "capitale", "steamboats", "naturelles", "towered", "fastness", "gautama", "alsatian", "unrighteous", "torpor", "leser", "desecrated", "transgressed", "publiques", "rawdon", "endeared", "arsene", "pecked", "colonne", "dozed", "outstripped", "chaldeans", "perdu", "repast", "annee", "majestically", "shapeless", "heen", "contrite", "pursed", "principio", "entreated", "heliopolis", "chel", "righteously", "marvelled", "seductions", "taga", "propitious", "domesticity", "dashwood", "veta", "chastise", "inveterate", "peacefulness", "extolled", "absently", "promis", "breit", "copse", "espada", "highwaymen", "orators", "incorrigible", "abating", "sonore", "feigning", "passant", "liveliest", "sixtieth", "reproof", "filets", "baiser", "credulous", "inflections", "lintel", "allora", "stak", "hereupon", "clod", "alaric", "beneficence", "impregnable", "poca", "dessen", "penmanship", "dese", "girded", "bessy", "inscribe", "adelante", "serenely", "nosing", "crowed", "vnto", "cooped", "overwrought", "vivacity", "incontrovertible", "forenoon", "clotted", "jolyon", "certitude", "marshalled", "approvingly", "waif", "ruder", "suffused", "fanden", "altijd", "artless", "morne", "cowed", "longueur", "deeps", "forger", "busied", "venir", "kith", "vrouwen", "valenciennes", "komt", "noblesse", "jostling", "satiety", "tolerably", "consanguinity", "wint", "convulsion", "slumbering", "heraclitus", "semicircle", "vient", "squinted", "exaggerations", "editorship", "rapturous", "unobtrusively", "sabes", "choicest", "tempestuous", "vaillant", "bamboos", "noticia", "signora", "flitting", "laboriously", "inmost", "jehan", "vorhanden", "poesie", "snuffed", "cannot", "vache", "sere", "slighted", "keinen", "maner", "stammer", "inordinately", "fidget", "borst", "comprehends", "gleams", "sieges", "magnifique", "pollux", "sieben", "muzzles", "peleg", "punic", "oser", "saman", "epirus", "fantastique", "tilbage", "astern", "pelted", "stoutly", "insinuating", "auge", "leib", "unequally", "profligate", "sated", "acht", "apprise", "bothe", "goda", "beady", "oberst", "abdicated", "reveries", "hauteur", "unerring", "arter", "euer", "denizen", "elegiac", "bivouac", "owain", "doggedly", "hermano", "ladyship", "kneeled", "longe", "rire", "marcha", "problematical", "tanden", "drapeau", "crackled", "defenceless", "pricking", "invalids", "eiland", "harbouring", "droite", "fastens", "igen", "paysage", "fleshly", "striven", "lurched", "blotches", "persoon", "herre", "pistil", "legen", "northumbrian", "apprehending", "werde", "insinuate", "deadening", "froid", "angele", "dolt", "propria", "schreef", "agreeably", "scouted", "intime", "splendors", "capstan", "feint", "muscovite", "pursuer", "letto", "wrappings", "daunted", "candido", "ske", "aurore", "couplets", "socialistic", "narrowness", "dwelleth", "mogelijk", "moustaches", "manzoni", "brushwood", "arrogantly", "traurig", "lieux", "barricaded", "pillaging", "vingt", "tief", "perles", "bungling", "impel", "schlecht", "expectantly", "perching", "solum", "broiling", "gangway", "tantalus", "rapacious", "uniquement", "debased", "concubines", "jogged", "sentido", "entangle", "steepness", "franchi", "puritanical", "capacious", "prefects", "clew", "biscay", "unrolled", "tambour", "watchword", "drummed", "verging", "interdict", "geplaatst", "scamper", "devoutly", "transmigration", "deshalb", "redoubt", "meus", "kerk", "revenant", "instil", "boastful", "bilious", "orsini", "despondency", "disheveled", "exclamations", "allegories", "entonces", "trudge", "mincing", "scurried", "setzt", "homesickness", "metamorphosed", "hussy", "stoicism", "congregated", "covetous", "ewer", "grootste", "doux", "directe", "hysterics", "procures", "stimme", "aceite", "concerne", "devours", "waists", "judaea", "leden", "quidam", "potentate", "barbarity", "extirpated", "charlatan", "slouching", "susceptibilities", "plaited", "floe", "surtout", "agonies", "misjudged", "writhed", "beine", "housemaid", "eurydice", "undeserving", "untruth", "directement", "preyed", "relent", "zillah", "verba", "horsehair", "seinem", "handelt", "gien", "mandarins", "sforza", "indifferently", "nevil", "shuns", "teile", "retinue", "hulda", "impostors", "stehen", "brawls", "derangement", "mesmo", "hinaus", "epictetus", "impertinent", "ouvrir", "buffeted", "physiognomy", "hecuba", "oiseau", "behooves", "misshapen", "scrubby", "jedoch", "unpolished", "vales", "steadiness", "ceaselessly", "irishmen", "charmes", "succor", "branche", "efecto", "ague", "sodden", "helpe", "changements", "unavailing", "vagabonds", "irreverence", "ditt", "chaises", "statesmanship", "papst", "popolo", "saner", "tendre", "halla", "demoralizing", "prest", "disillusion", "frocks", "poner", "thronged", "iets", "beseeching", "irksome", "burgesses", "abbess", "minuit", "uncounted", "schoolroom", "varus", "terrasse", "teufel", "teaspoonful", "rambled", "bertin", "monta", "kneaded", "fertilised", "rosse", "emanations", "veiling", "squandering", "wahrheit", "quiescence", "gilet", "widowhood", "eut", "swarthy", "abyssinia", "populaires", "poetically", "durance", "farnese", "chid", "menaces", "desir", "ambling", "perilously", "numbed", "acteurs", "regel", "bathes", "drover", "wees", "dogmatism", "chasseur", "grudging", "reciprocally", "effusions", "snared", "brogue", "passeth", "gret", "namn", "squeaked", "seance", "stilled", "bygones", "assez", "mentre", "contentedly", "roughest", "entreaties", "ridiculing", "alternations", "penitence", "discours", "avails", "velvets", "completer", "streit", "recevoir", "tactfully", "speake", "gericht", "borde", "drunkards", "danton", "hurries", "smolensk", "terreno", "tweede", "ouvert", "duchesse", "mingles", "strafe", "corrals", "rectitude", "semble", "engen", "erreichen", "encircles", "garratt", "jorden", "uncleanness", "viens", "pried", "supplications", "onely", "deportment", "marchandises", "invidious", "weten", "seraphic", "gedanken", "malevolence", "wetten", "alcalde", "judicature", "vigueur", "einzelne", "exhorting", "libation", "facit", "soient", "duas", "rechts", "bagatelle", "chaine", "nonchalantly", "drenching", "verhaal", "subi", "chiens", "prance", "lapsing", "suivre", "edifices", "gruel", "fing", "exasperating", "grievously", "hauts", "partout", "hesitancy", "courte", "chafed", "kennen", "interposition", "callings", "satisfactions", "distrustful", "incredulously", "zij", "obsequious", "moyens", "dissolute", "briefest", "lamplight", "sharpshooters", "druggist", "absolu", "unprincipled", "sweated", "lieth", "flinched", "zeer", "pacification", "nitrogenous", "sackcloth", "enraptured", "indique", "boeuf", "fidgety", "disown", "sophistry", "illumined", "thir", "agonized", "pickpocket", "warbling", "shriveled", "conformable", "imprisoning", "incongruity", "uselessly", "gallantly", "bended", "drang", "poignantly", "untiring", "hostelry", "slumbers", "forfeiting", "fertig", "humphry", "numberless", "intemperance", "definiteness", "reproved", "privation", "westen", "peevish", "tapio", "pedagogue", "soothsayer", "facings", "multiform", "peuple", "herculaneum", "carthaginians", "micheline", "indelibly", "ashy", "cependant", "cruelties", "unseren", "cadences", "slavish", "bawling", "awestruck", "bluer", "felicitous", "caravel", "calles", "plaudits", "schooners", "mycket", "chacun", "demander", "weniger", "eltern", "adepts", "clefts", "kapital", "underhand", "sophist", "heimat", "idolatrous", "secundum", "smouldering", "tradespeople", "untersuchung", "polytheism", "varias", "revellers", "rebuff", "appellations", "draughtsman", "boulet", "verandas", "pwh", "pindar", "iscariot", "bombast", "soyez", "bateaux", "impulsively", "cuarto", "seeth", "milch", "depredations", "dews", "kalt", "temerity", "mlle", "eluding", "adventitious", "interdit", "corked", "deluged", "fleecy", "antelopes", "daub", "unanswerable", "darkens", "excellencies", "strahl", "isak", "gedicht", "atque", "untainted", "eigenschaften", "slays", "crees", "whirring", "miserly", "troth", "contemptuously", "frequenting", "mannes", "celerity", "grottoes", "marthe", "milliner", "komma", "blase", "hoose", "exonerate", "righted", "sayd", "travailler", "imperishable", "degen", "spurn", "famished", "romping", "oozed", "cuanto", "contient", "devrait", "bidden", "tuileries", "samen", "contraire", "vasili", "monopolized", "abstruse", "stripling", "overshadowing", "succour", "whizzing", "headman", "saat", "mellowed", "ebenso", "contiguity", "morts", "retracing", "similitude", "servent", "verdure", "sward", "exclusiveness", "anwendung", "forse", "deines", "tira", "reclined", "throbbed", "divines", "prostration", "wretchedness", "admis", "festooned", "barest", "steadfastness", "boog", "digressions", "diocletian", "fellers", "begrudge", "xliii", "coxswain", "schriften", "counselled", "sentries", "reproaches", "pediment", "hayti", "geef", "cassio", "meinem", "wanneer", "baleful", "swifter", "timotheus", "hulp", "gelten", "miroir", "promesse", "apenas", "hillock", "fearlessness", "neben", "waggon", "unalterable", "beelzebub", "inexpressible", "indios", "cherishing", "crooning", "bref", "wist", "eius", "disavow", "peals", "mariette", "backsliding", "ziehen", "whisking", "wantonly", "samovar", "zweifel", "oppresses", "footstep", "stewing", "schnee", "acrimony", "bristly", "soever", "ruefully", "unfavorably", "slothful", "sitt", "diep", "exhorts", "moloch", "epigram", "wafted", "keepe", "expends", "golde", "reassuringly", "thwarts", "sitz", "staats", "jedenfalls", "abhorred", "zeigt", "sollten", "mene", "worketh", "phosphorescent", "sauntered", "foundling", "illiberal", "deserting", "onlooker", "deathless", "assurer", "scandinavians", "legate", "dissuaded", "paled", "ascribes", "hearths", "duller", "discoverers", "furled", "denken", "caminos", "esdras", "typify", "ganzen", "commissariat", "seele", "abydos", "cornfields", "ebbing", "evelina", "resta", "portents", "venetians", "unnerved", "demain", "participles", "harmlessly", "purty", "possessors", "mephistopheles", "pologne", "seene", "fortes", "liveliness", "godson", "passa", "peur", "conserver", "paling", "deur", "bisher", "schwester", "autocrat", "shouldering", "hovel", "gauls", "conforme", "honneur", "stirrings", "decider", "lusitania", "rustled", "unquenchable", "foreseeing", "indolence", "profundity", "lawe", "paru", "vostro", "turgid", "exigency", "exige", "necesario", "reined", "prend", "unenviable", "genau", "unfeeling", "cooing", "haine", "bishopric", "espoir", "severest", "lesse", "beautifying", "glistened", "encroached", "corriente", "suppleness", "irascible", "eigenes", "canute", "vibrated", "denuded", "rendre", "subjugate", "commissaire", "gulden", "naturaleza", "niobe", "incorporeal", "orderlies", "thrushes", "dient", "ferried", "wriggling", "crape", "mouldy", "amant", "merest", "wordes", "perpendicularly", "expounding", "nutzen", "gestern", "swaddling", "benighted", "hysteric", "robespierre", "tillbaka", "exultation", "fand", "blanke", "selfsame", "overcoats", "calvinists", "grovel", "soberly", "therfore", "mellem", "gayest", "vais", "fetid", "boatmen", "vespasian", "singleness", "kette", "yearnings", "remise", "unquiet", "einzige", "herbage", "adduce", "twaddle", "unitarians", "unutterable", "outshine", "parisians", "stellt", "patronized", "aldus", "pommes", "inelegant", "clambered", "histrionic", "subsists", "degenerating", "recommande", "sergius", "taciturn", "sways", "bristled", "flecked", "mustering", "allemande", "sophy", "paramaribo", "betrothal", "boorish", "posa", "queste", "sinon", "devoir", "hunde", "adjoined", "soumis", "pire", "vilest", "niin", "vassals", "throttled", "fonder", "entrancing", "elope", "seid", "nehmen", "welshman", "beguiled", "besoins", "violetta", "stillen", "sinew", "mordant", "clotilde", "ascribing", "zahl", "compter", "germanicus", "declension", "fawns", "damaris", "anodyne", "dearie", "verum", "voller", "lequel", "enigmas", "kinde", "bezoek", "humored", "befalls", "endlich", "yli", "primeros", "chere", "fussed", "anabaptists", "xliv", "disembarked", "burgundian", "telles", "pente", "thumped", "superbe", "conjectural", "tendance", "idlers", "eigentlich", "hoog", "contortions", "effusive", "heilig", "cloistered", "redoubled", "choristers", "bosoms", "flapped", "supernumerary", "aqueducts", "ngon", "reprobate", "despues", "indiscretions", "riper", "forsook", "hittites", "tatler", "prelates", "unserem", "ensigns", "sauve", "miei", "spendthrift", "antipodes", "chers", "grossest", "shanties", "ploughs", "lashings", "noemi", "loue", "persecutors", "averred", "valueless", "imperceptibly", "jaren", "uden", "dise", "crevasse", "hastens", "huizen", "davantage", "brilliancy", "gushes", "marechal", "surer", "frae", "traitorous", "hacen", "levite", "quieting", "candour", "pacified", "drin", "gored", "remunerative", "intricacy", "coralie", "pendulous", "eare", "mourner", "enfold", "wirst", "troubadours", "amours", "reentered", "paupers", "bludgeon", "welled", "naturae", "inconsiderable", "cotyledons", "cackle", "sallow", "gemaakt", "montagnes", "reformatory", "demeure", "ostentation", "ninguna", "cherishes", "souper", "wrathful", "thuis", "partook", "ehe", "familiars", "blacken", "zorg", "possibles", "vannes", "schemer", "lika", "actuellement", "deiner", "writhe", "friendless", "proboscis", "fitful", "sicut", "genii", "intrust", "illi", "dishonoured", "unquestioning", "desultory", "fabrique", "pitifully", "egen", "menacingly", "emmeline", "linken", "disinclined", "lackeys", "codicil", "puerile", "kleber", "journaux", "worthlessness", "oblation", "franziska", "caracalla", "civilizing", "conseiller", "corneille", "merken", "dorp", "palaver", "gorgias", "tribu", "unvarnished", "overran", "folies", "wretches", "hoarsely", "bonhomme", "hellenism", "statecraft", "familien", "propia", "flout", "studiously", "reveled", "confounds", "pitiable", "countrie", "reiteration", "corsairs", "indiscreet", "duelling", "pedantry", "lugged", "debilitated", "blazon", "gars", "looseness", "neglectful", "gamla", "pillaged", "voces", "reasonings", "vestido", "agathe", "niemand", "tost", "worthily", "passy", "verfahren", "insomuch", "anneke", "scruple", "steadied", "coolie", "honeyed", "recoiled", "comprendre", "disliking", "chinks", "unripe", "shipmate", "convulsed", "noce", "cleanness", "unmolested", "insistently", "fording", "linie", "telegraphs", "coverts", "transgressors", "redolent", "impudence", "ananias", "vied", "eulogies", "weakling", "griefs", "yoked", "steeples", "tares", "detto", "tottering", "grossen", "scalps", "despaired", "quails", "satiated", "plupart", "principaux", "lightnings", "repenting", "souldiers", "manliness", "churchmen", "parthian", "knowen", "chirped", "facta", "himselfe", "derisive", "imbibed", "hanoverian", "samma", "warton", "equipage", "prophesying", "abodes", "kring", "spouted", "clanging", "windpipe", "veronese", "guiltless", "burnings", "caractere", "estaba", "distresses", "retaken", "heere", "intermingling", "foundered", "mandat", "blinde", "dispensations", "irretrievably", "thralls", "crise", "connivance", "miscreant", "bitterest", "uncertainly", "resenting", "kingdome", "familiarly", "reviens", "scowling", "swaggering", "grandly", "publicans", "graciousness", "footlights", "smarting", "pueda", "hatreds", "imperil", "salamis", "supplie", "zweite", "censer", "surfeit", "schneller", "obeisance", "whelp", "fantaisie", "monnaie", "ignominious", "entschieden", "sulking", "keenest", "ungainly", "darstellung", "bauble", "circlet", "rouses", "dormir", "consolations", "enslaving", "medes", "deale", "odorous", "indefinable", "faits", "kenne", "ironical", "sympathized", "uncultivated", "functionary", "suppositions", "jehoshaphat", "chevaux", "elegies", "carbines", "richt", "kaffir", "livelier", "gervase", "grenadiers", "bruit", "acacias", "magnanimity", "aleck", "propio", "fiesole", "gallops", "dexterous", "connaissances", "hebt", "beaute", "hoor", "modernes", "undignified", "stesso", "conocimiento", "mord", "endear", "effigies", "folge", "counteracted", "planking", "blockhouse", "confiance", "urbanity", "lawgiver", "totter", "rumpled", "scalded", "importations", "laughingly", "prefaces", "tenue", "idolaters", "seducer", "haire", "tenaciously", "moonbeams", "inculcated", "monate", "verschiedene", "wohin", "generall", "reposed", "cicerone", "mustaches", "hasard", "leddy", "mildest", "restlessly", "uselessness", "lezen", "doet", "oaken", "endroit", "harlots", "conduite", "rouges", "humours", "humain", "voltaic", "derriere", "xlviii", "flot", "cudgel", "aurait", "multifarious", "runneth", "tenu", "llegar", "abhors", "minarets", "wrack", "bleiben", "vividness", "beatitude", "husbandman", "procureur", "stuk", "douleur", "heaves", "xlvii", "sagt", "passi", "subaltern", "appui", "bharata", "longingly", "apud", "bandes", "roseate", "ruffians", "servir", "contralto", "tenter", "rues", "dote", "valdemar", "curtly", "resuscitated", "exemples", "confidante", "rashly", "athen", "leering", "soudan", "clearings", "pleasantries", "louer", "uomini", "atoning", "insinuated", "xlvi", "warble", "prodigies", "herbes", "phrygia", "overige", "dardanelles", "familiarized", "fakir", "rato", "divinities", "ostracism", "magasins", "buttresses", "drovers", "obelisks", "vierge", "doggerel", "existences", "farre", "extravagantly", "hauptmann", "builded", "volle", "slandered", "demagogues", "cephas", "flighty", "opposer", "ejus", "gabled", "convient", "ofta", "enrage", "sinews", "flemings", "glanz", "serjeant", "shadrach", "shallowness", "ensnared", "loyally", "sneezed", "darkling", "subservience", "nightingales", "gaped", "subduing", "apoplexy", "poorhouse", "sunbeams", "kaan", "brigand", "jahrhundert", "chasms", "jealousies", "ditties", "dignitary", "wenches", "dite", "gesicht", "improbability", "shrewdly", "sneers", "bloodhounds", "meed", "impish", "menaced", "seneschal", "deafened", "hooting", "cyrene", "dejection", "economize", "prophetess", "hatchets", "witz", "spoonfuls", "unten", "ebene", "funereal", "wrested", "deceives", "plaint", "imperio", "demesne", "briny", "nimbly", "supped", "calumny", "sigismund", "herrn", "verger", "ludicrously", "portend", "reves", "spattered", "couloir", "straggling", "cochon", "berthe", "acadians", "comtesse", "jailers", "chaud", "disastrously", "intimations", "arzt", "xlix", "heterodox", "manque", "codfish", "debility", "shirking", "rustlers", "demas", "zaken", "aloes", "obliterating", "victuals", "certo", "dully", "leonore", "exalting", "chide", "entrap", "indignities", "nombreux", "rhymed", "whirls", "compassionately", "hussar", "scow", "voorbeeld", "beide", "honora", "remorseful", "obstinately", "zei", "peste", "aggrandizement", "jotted", "unpopularity", "deluding", "boileau", "naast", "charta", "royalists", "lachen", "hennes", "nej", "achaeans", "cravat", "genug", "pinions", "mindre", "praetor", "peche", "sunburnt", "superficie", "grotesquely", "mown", "soms", "vagrants", "transept", "patois", "atlee", "seuil", "petrograd", "aveva", "bulged", "bated", "seines", "thereat", "aise", "recours", "cloven", "apollyon", "intemperate", "confiding", "fleisch", "eares", "compunction", "bonum", "unceasingly", "herdsman", "haat", "frightfully", "reprises", "fierceness", "remodelled", "unpleasantly", "szene", "bouches", "aggressions", "spectacled", "telegraphed", "resounded", "mickle", "sagacious", "moralists", "abimelech", "gehe", "valise", "prompter", "provincials", "distaff", "imbibe", "hisses", "garcon", "doel", "freude", "gnawed", "sieht", "oog", "clattering", "traite", "bleus", "tente", "reverberating", "incomparably", "bearskin", "ripens", "darunter", "benares", "recitative", "factotum", "zoon", "screeched", "quare", "anticipations", "determinedly", "calamitous", "pria", "hughie", "egli", "mopped", "sacrilegious", "fatuous", "elocution", "cilicia", "retraced", "palliation", "kunne", "misanthropy", "protruded", "hanse", "incompetency", "mebbe", "plainer", "chambermaid", "sapping", "perfidious", "voyaging", "humiliations", "umbrage", "fatiguing", "awaking", "presencia", "portmanteau", "moralist", "farbe", "legere", "tormentors", "distinctness", "expiation", "insinuation", "indem", "alehouse", "practicability", "swindler", "standen", "inquisitors", "dreamily", "frobisher", "digo", "motivo", "gibbet", "exactitude", "promenades", "grise", "epitaphs", "jostled", "mannen", "globules", "herdsmen", "conmigo", "reprove", "heareth", "ipsi", "inviolate", "zoroaster", "orations", "vistula", "laten", "examina", "erster", "autant", "schrift", "resemblances", "termina", "cuales", "lordly", "complexions", "despising", "assiduous", "verstehen", "epigrams", "dagny", "thenceforth", "girths", "swerving", "surpris", "frappe", "pobre", "lebens", "muerto", "enfance", "gesetz", "portentous", "conjurer", "dramatis", "receiued", "sergent", "hurls", "habt", "couronne", "dullest", "erschienen", "venal", "gebe", "grete", "lauter", "gourmand", "wearisome", "sortir", "exaggerates", "gurgle", "antislavery", "laertes", "apologetically", "clime", "poultice", "ministrations", "gendarmes", "telemachus", "sommet", "remonstrance", "capitulated", "karna", "prettily", "reeking", "cheapside", "citie", "zuerst", "persuader", "epistolary", "flutters", "elemente", "maitresse", "reappearing", "dudgeon", "pilasters", "theban", "kennis", "unwisely", "grammarian", "figlio", "peruvians", "lateran", "sente", "reverberated", "plenitude", "faim", "unpardonable", "robarts", "volgens", "bowmen", "blundering", "dishevelled", "exorcise", "scurrilous", "squalls", "parla", "vaste", "jedes", "shewn", "hiki", "vasudeva", "objetos", "briefe", "valets", "corruptible", "pedlar", "impassive", "abasement", "faints", "vicomte", "pillory", "dieux", "inquirers", "orte", "brahmana", "toren", "prostituted", "quartering", "amorites", "disavowed", "undulations", "redressed", "waifs", "cuyo", "siegmund", "steg", "harangue", "liefde", "yeomanry", "lepanto", "matilde", "passepartout", "gentil", "ablest", "faveur", "dicho", "whitest", "bastante", "handmaiden", "humors", "sollen", "cooed", "knabe", "gunboats", "comradeship", "inopportune", "exhaling", "lurching", "plumed", "poesy", "cheapness", "scythian", "proche", "backe", "sapped", "starched", "tasche", "insieme", "undistinguished", "unes", "gayer", "seceded", "belligerents", "baser", "ribald", "coursed", "habitants", "brusque", "officious", "hert", "gorka", "flannels", "contrivances", "capitulate", "wayfaring", "kammer", "dejar", "disfavor", "staden", "umgebung", "liveries", "sieur", "devez", "anatomist", "laundress", "bugles", "manie", "swindlers", "clandestinely", "sitte", "avere", "fichte", "coolies", "edra", "briars", "tarentum", "chaude", "unfitness", "annihilating", "swathed", "extorted", "tanta", "avaricious", "entfernt", "waft", "popish", "darning", "pasos", "crois", "fidgeting", "resinous", "granit", "flayed", "paramour", "enunciation", "josue", "frailties", "haunches", "morea", "chastened", "dropsy", "impositions", "wriggled", "displease", "agit", "moneyed", "halten", "peligro", "armee", "langsam", "toutefois", "cloche", "neatest", "howitzers", "mantelpiece", "proclivities", "rache", "falkenberg", "imitator", "agonising", "maximilien", "tuer", "meerschaum", "impiety", "loiter", "actuelle", "schwer", "begot", "suddenness", "baneful", "templo", "wenden", "twirled", "furtively", "betrayer", "jingling", "arrowroot", "welcher", "readjusted", "assails", "priestesses", "jostle", "admonishing", "avocations", "allons", "humblest", "haec", "mohammedan", "solitudes", "insurrections", "lodgers", "kunna", "cacique", "exalts", "grec", "cajole", "mhw", "swooning", "wincing", "unswerving", "enjoyments", "thirsting", "savants", "kentuckians", "monarchical", "celebes", "divans", "immodest", "perquisites", "flatters", "gedichte", "herzen", "beurre", "meni", "sayest", "lutter", "heissen", "voeux", "juges", "papists", "jeer", "premeditation", "waken", "tearfully", "sagged", "pugnacious", "companie", "bedecked", "finalmente", "soin", "oftener", "motioning", "saunter", "universelle", "firmin", "llamado", "versant", "flaxen", "pseud", "soie", "tempter", "miscarried", "rivulets", "corde", "appertaining", "nostre", "prochaine", "lohn", "partridges", "qualche", "nooit", "swum", "dunkle", "staan", "brakeman", "regretful", "coasted", "democritus", "yawl", "endast", "permettre", "drooped", "mehrere", "exacts", "licentious", "antiguo", "fermer", "deadlier", "doest", "romanus", "agog", "ponts", "liii", "yeomen", "lothario", "maal", "charybdis", "wazir", "habituated", "doff", "fede", "jests", "brandished", "jeremias", "raisons", "gouty", "twined", "comprend", "resister", "stoics", "soldiering", "viso", "tyrannies", "natuur", "greenbacks", "puesto", "sullied", "calvinistic", "abridgment", "frequents", "faite", "hoffnung", "leipsic", "bekommen", "fiercer", "entreaty", "creaked", "disconcerted", "roule", "interpose", "saan", "neveu", "hearkened", "mournfully", "surprize", "tenanted", "kerchief", "marvellously", "allerdings", "unenforceability", "moralizing", "phantasmagoria", "glutinous", "pretexts", "recollecting", "omdat", "jemand", "hundredweight", "hags", "severities", "sobered", "fournir", "coiffure", "forasmuch", "lige", "aliment", "moeten", "salir", "caprices", "laufen", "blockaded", "ignominy", "tempests", "scythia", "recriminations", "olim", "geeft", "dismally", "insinuations", "smiting", "hapsburg", "bevor", "zeiten", "lulls", "pompeius", "peux", "misrule", "unasked", "illo", "kuka", "copiously", "freien", "wildernesses", "perpetration", "transmuted", "abideth", "blaspheme", "blacking", "quelled", "threescore", "sitteth", "keenness", "quickens", "scornfully", "puerperal", "multis", "worldliness", "croaking", "ignoramus", "howbeit", "sisterly", "briers", "ouvrage", "faible", "avidity", "gascon", "bergs", "accustom", "consiste", "venez", "prouder", "pleaseth", "cottonwoods", "dienste", "superintending", "spectres", "poetess", "moluccas", "leguminous", "brigands", "quarrelsome", "moine", "damnable", "etruscans", "poeta", "tottered", "theil", "disdained", "shrivel", "ouvrages", "avaient", "firstfruits", "sinne", "daran", "untying", "slights", "throbs", "whitened", "genoese", "inclosed", "couche", "dismounting", "procede", "fattened", "planche", "vasari", "freier", "enkel", "jupe", "heaths", "enjoins", "terrestre", "insuperable", "recapitulate", "vois", "drays", "rester", "enceinte", "starlit", "wohnen", "inauspicious", "prescience", "capitaine", "magnates", "predilections", "picketed", "knaves", "sware", "scampered", "imposible", "academical", "krank", "ploughman", "heilige", "mettez", "conscientiousness", "basilio", "morceau", "splendide", "arabes", "cire", "acceptation", "schlug", "novitiate", "humoured", "idolized", "rivulet", "seethed", "geest", "etruria", "geboren", "senti", "allayed", "pored", "perceval", "wagen", "antiquary", "muscovy", "shoemakers", "zullen", "diggings", "legte", "emancipate", "achter", "burghers", "ignorantly", "ancor", "erlaubt", "diviner", "laisser", "bleibt", "discoloured", "gooseberries", "jahres", "wolde", "quarreling", "enterprize", "augustan", "fruitfulness", "slanders", "quelli", "embalmed", "uprightness", "stephanus", "apposite", "milles", "slaveholders", "kansan", "parlez", "nimi", "arbres", "kloster", "zulus", "limpid", "bridled", "forecastle", "statuesque", "polyphemus", "knowed", "encouragingly", "harboured", "foole", "misschien", "dolorous", "benefice", "unenlightened", "sagte", "croaked", "symbolical", "magistracy", "alighting", "schritte", "foretaste", "porthos", "incoherently", "ladylike", "iphigenia", "pleine", "allured", "jahrhunderts", "lucilla", "constitue", "sogar", "palpably", "weder", "improbably", "expressionless", "bowstring", "sickens", "jolting", "soundless", "hadde", "freest", "unspeakably", "gestalten", "unconquerable", "contemplations", "foretells", "empor", "pasteboard", "mangy", "artaxerxes", "misapprehension", "perche", "reverential", "sledges", "schoolmate", "utiles", "denke", "befinden", "infallibly", "unbidden", "callousness", "bloss", "tooke", "prefatory", "herakles", "extirpation", "pantaloons", "noiselessly", "adventuress", "fluch", "commodious", "pincers", "freshened", "artificer", "animo", "entangling", "quarrelling", "blackening", "appeareth", "partakes", "regaled", "disputants", "freundlich", "junks", "ingenuous", "floundered", "entrer", "jeered", "strabo", "assignation", "kleider", "mismos", "sheeted", "beefsteak", "undervalue", "pensar", "reden", "particuliers", "oratorical", "sacerdotal", "baying", "dikke", "dieren", "fief", "poate", "repents", "cleverer", "scheiden", "recommandation", "nimmer", "goaded", "ecke", "mislaid", "rotund", "zenobia", "pickaxe", "babbled", "gentlest", "sibi", "besiege", "blandly", "hobbling", "myn", "miletus", "scythians", "mainspring", "dinge", "slake", "drame", "dirent", "jedem", "speared", "attaque", "galleons", "sensorial", "legation", "strutted", "leafless", "deigned", "slaver", "iseult", "recommence", "giue", "aventures", "hellespont", "anciennes", "dalliance", "youthfulness", "privations", "trouvez", "monstrosities", "assai", "goest", "bonbons", "chroniclers", "vitam", "erregt", "dignities", "livings", "ferryman", "mockingly", "caisses", "devolves", "perder", "chemins", "hoeing", "debauched", "doute", "parlons", "loquacious", "vore", "saada", "annat", "displeasing", "intrusted", "prudish", "pelting", "drizzling", "soothingly", "wayfarers", "englanders", "flouted", "worthies", "courtesans", "heavenward", "theodoric", "meget", "charmian", "bezit", "ustedes", "exhilarated", "ansicht", "clanking", "repugnance", "joyless", "execrable", "lucrezia", "loftier", "stolid", "unacquainted", "simonides", "pawing", "balcon", "visigoths", "titter", "otranto", "defraying", "mondes", "charlot", "deified", "grecians", "princeps", "sumptuously", "unemotional", "coarseness", "universel", "enormes", "piedi", "flamme", "selber", "flitted", "toen", "gants", "disproportion", "counterpane", "gulfs", "gewalt", "surnamed", "logique", "deare", "venerate", "tomahawks", "scoffs", "unsavoury", "zephyrs", "exemplification", "waarom", "pleader", "lieben", "bawl", "casque", "cleverest", "convolutions", "siendo", "verloren", "foretelling", "munched", "vrienden", "receiveth", "jene", "ostler", "waddling", "pencilled", "escalier", "drachm", "colline", "plebeian", "eintritt", "ionians", "bekannt", "grammarians", "pflanzen", "undefiled", "furred", "segun", "overhearing", "puissant", "donnez", "blundered", "meines", "congealed", "pierres", "pouvoirs", "maister", "yit", "blasphemies", "covenanted", "disparagement", "anstatt", "minut", "teint", "sachen", "pretences", "unimpeachable", "meditates", "cheerily", "faintness", "effaced", "meself", "beguile", "revenus", "dagar", "rearguard", "saide", "inextricable", "rameses", "popery", "trustful", "lewdness", "sanat", "satiate", "sorge", "stupefied", "treu", "caire", "brasses", "lethe", "secondes", "tepee", "euphemia", "joue", "measureless", "scandalized", "jerkin", "stunde", "aforetime", "reflectively", "trackless", "patroness", "impossibilities", "inconsolable", "shouldest", "explicable", "plucks", "wreathed", "criminel", "alexius", "marksmen", "enthusiasms", "slaven", "standeth", "geven", "lesbia", "quellen", "worte", "drave", "blowed", "vare", "canting", "propitiation", "sinewy", "gamekeeper", "dulcie", "agir", "maakt", "uproarious", "gebruikt", "penitential", "glinting", "seeketh", "condescend", "terrifies", "humbler", "expence", "cavaliere", "pettiness", "slackened", "heur", "hija", "predominating", "auftrag", "endureth", "unapproachable", "boons", "vouchsafed", "lunga", "gamle", "philibert", "cordiality", "billow", "relativement", "inconstant", "effete", "storehouses", "carcases", "crestfallen", "iemand", "gloomily", "pouted", "lunching", "wakened", "eerst", "sidled", "tartars", "ebbed", "steckte", "issachar", "astir", "reasserted", "trente", "hardi", "reeked", "dispirited", "insidiously", "divined", "revelling", "mazzini", "befahl", "lovelier", "odium", "fettered", "hustings", "rasping", "besotted", "charioteer", "papered", "primum", "clamber", "adroitly", "ferne", "descente", "holte", "alders", "tache", "unformed", "ducats", "watchfulness", "gottes", "kleines", "steamships", "hvad", "cime", "sundered", "irretrievable", "roguish", "tenir", "maand", "ovat", "rapacity", "sicken", "elopement", "ardente", "worke", "folles", "besuch", "rummaged", "peons", "incontestable", "languor", "israels", "frivolities", "mantilla", "instante", "slovenly", "ambled", "celebre", "clementina", "necesidad", "hesitations", "protagoras", "curtained", "purloined", "lounged", "rustics", "purposeless", "visites", "skirmishers", "flinching", "certaine", "trumpeters", "disbelieved", "anderes", "tableland", "plaatsen", "infini", "revile", "unselfishness", "burrowed", "prussians", "buttercups", "footfall", "cocoanut", "cajoled", "sublimely", "tribunes", "kraal", "meilen", "whizzed", "dritte", "multitudinous", "javelins", "grenzen", "beatific", "bigness", "artificiality", "jeering", "maltreated", "chaperon", "consorts", "stimmen", "priester", "muckle", "vergeten", "causer", "respecter", "bornes", "propter", "churlish", "treasonable", "stowing", "twinkled", "schal", "existenz", "swindled", "vasta", "ridicules", "deres", "wechsel", "gracchus", "undine", "timorous", "soeur", "rende", "ensnare", "spurted", "quarrelled", "beggarly", "mutineers", "schwert", "inseln", "monter", "keiner", "fascinations", "suum", "unhesitatingly", "vivere", "prieur", "treacherously", "repas", "fyra", "disengaging", "propres", "moping", "obviated", "roue", "kracht", "merveilles", "fuerzas", "lunettes", "pirandello", "blare", "historiques", "comest", "sullenly", "kurze", "oppressions", "steadier", "miedo", "trebled", "demurred", "conciliate", "contenant", "ransomed", "donnant", "bedchamber", "chevaliers", "aufs", "calme", "roughs", "drawled", "niets", "ruhe", "florins", "einheit", "sechs", "tagus", "lydian", "pointes", "ehren", "remis", "vele", "imputing", "endowing", "spangles", "peterkin", "armer", "simplement", "brillante", "servia", "disunion", "shepherdess", "sextus", "linge", "lucht", "rueful", "sterk", "unbending", "ideen", "anderer", "beispiele", "equinoctial", "constante", "varuna", "jugement", "inheritor", "ginevra", "tarried", "remorseless", "disputations", "querido", "apennines", "gesehen", "wirkung", "redoubtable", "interessant", "antechamber", "seasonable", "clarisse", "moche", "platina", "anden", "viande", "ravish", "dubiously", "battlement", "gamester", "byword", "warded", "stygian", "referable", "rigueur", "jangling", "parfois", "doleful", "baize", "debasement", "besieging", "shrewdness", "interstices", "mayst", "parried", "demanda", "principios", "elbowed", "zahlung", "landschaft", "furze", "neighbourly", "nahe", "haast", "sensitiveness", "gelesen", "gascony", "pawned", "outen", "mendicant", "exigences", "keepeth", "beginnen", "vindt", "giddiness", "gebruiken", "warders", "senat", "retributive", "pyrrhus", "vont", "flagon", "traduit", "innere", "geste", "barefooted", "chattered", "overhung", "demoralization", "pebbly", "stellan", "abashed", "samme", "aurelian", "sacristy", "charitably", "joka", "boutons", "folle", "brooded", "sylvanus", "guter", "dandies", "oracular", "undefended", "lecteurs", "kleid", "hizo", "humorists", "unities", "papiers", "rakish", "effervescence", "enthalten", "unworthiness", "isaias", "moraines", "dorrit", "unflagging", "wur", "corroborative", "komme", "ruffling", "voet", "hardihood", "bougie", "calleth", "greenness", "recrimination", "basked", "embarrassments", "aureole", "disgusts", "nombreuses", "tiden", "sledging", "igitur", "footmen", "recoils", "quadrupeds", "tahi", "bewailed", "morceaux", "roughened", "gewoon", "thinketh", "thoughtlessly", "depute", "besteht", "returne", "savours", "edes", "bulwarks", "clods", "maoris", "mantled", "encouragements", "unfaithfulness", "fenian", "boten", "eateth", "bedraggled", "chiffres", "readier", "ineradicable", "floes", "steadying", "cowered", "monseigneur", "grotte", "verschillende", "pluie", "dispassionately", "mirar", "holen", "slacken", "disgorge", "warre", "avantages", "clamouring", "attainder", "followeth", "communing", "mischievously", "communistic", "jongens", "thys", "zweiten", "chastising", "mouvements", "derisively", "lopped", "spoliation", "pleasantness", "meilleure", "montrer", "phosphorescence", "daba", "lustily", "avantage", "antediluvian", "irreligious", "vindicating", "objeto", "ascetics", "creuse", "scorns", "laggard", "vues", "jadis", "blockheads", "saddening", "llena", "malcontents", "gentes", "nane", "satins", "danser", "unmindful", "indescribably", "unruffled", "inclining", "aquellos", "drapeaux", "animosities", "inured", "pardoning", "weshalb", "somit", "conoce", "giorgione", "enfranchisement", "rebuking", "perceptibly", "cierto", "vitiated", "wizened", "wintered", "comique", "sympathizing", "beziehungen", "townsman", "continuer", "gorged", "mildness", "luckless", "maecenas", "caracteres", "gunwale", "indigestible", "jowl", "prinzessin", "unclosed", "warten", "causas", "inclosure", "voluptuousness", "solide", "paroxysm", "merchandize", "construire", "meester", "whetted", "seraglio", "scourges", "corroding", "lejos", "leadeth", "soupe", "jongen", "guiltily", "teaspoonfuls", "acquainting", "parapets", "twittering", "augurs", "admiringly", "illumine", "selten", "awfulness", "encamp", "henceforward", "scalped", "huddling", "erfolg", "combated", "evinces", "gewinnen", "deputed", "clambering", "surplice", "factitious", "fitfully", "vrede", "ascanio", "perishes", "oncle", "laisse", "blanches", "vieilles", "skulking", "demur", "monstrously", "imposts", "diaphanous", "theodosia", "wagged", "aske", "vilka", "peradventure", "surmounting", "satyrs", "grandsire", "evasions", "lumbered", "cortege", "rapidement", "countenances", "beholds", "contradistinction", "scampering", "easie", "tourna", "sainted", "inglorious", "contrario", "whereat", "discuter", "defrayed", "kirchen", "kaum", "trouverez", "repudiating", "insupportable", "undisguised", "discerns", "tantum", "juden", "deaden", "victime", "unalloyed", "venial", "widger", "griselda", "hansom", "nonchalance", "frapper", "regarde", "amoureux", "cypresses", "phrygian", "lamed", "workingman", "scoffing", "hulks", "sauvages", "breede", "ruminating", "honorius", "abjured", "jacobin", "communiquer", "nere", "insincerity", "persecutor", "dichter", "cloches", "crevasses", "singen", "burgher", "ferner", "unstained", "unflinchingly", "subsisted", "notaire", "tamen", "entro", "songer", "surprized", "rehoboam", "fromme", "deputations", "ringlets", "retourne", "scourged", "survivals", "mollify", "commonwealths", "blockading", "shakspeare", "triumphing", "ecstasies", "rends", "nahm", "bilden", "bedclothes", "impertinence", "commissaries", "languidly", "sedulously", "venne", "grimaces", "neger", "loftiest", "decembre", "recommenced", "stuhl", "pochi", "depopulated", "upraised", "formen", "whereunto", "fuit", "vorst", "unfruitful", "conceits", "shrivelled", "geschenk", "jesting", "begriff", "erfahrung", "tendril", "quoque", "dayes", "entendu", "ercole", "indes", "beareth", "sleighs", "pensiero", "licentiousness", "uren", "unshaken", "englishwoman", "limply", "hereward", "ahasuerus", "pythian", "compassed", "hablando", "unsettle", "proconsul", "coarsest", "jenseits", "woord", "gentility", "assizes", "devons", "serue", "quadruped", "honourably", "insbesondere", "chivalric", "helgi", "womankind", "streng", "penknife", "copyist", "eadem", "entwickelt", "solemnized", "palpitation", "haughtily", "valentinian", "kindreds", "counterfeited", "sweetmeats", "tousled", "unfastened", "venire", "courser", "flaunted", "canopied", "dethrone", "vouchsafe", "hereabouts", "blackguard", "unitarianism", "gegenwart", "garrulous", "eftersom", "controverted", "serviette", "venga", "amiably", "schreibt", "sowohl", "nappe", "fulsome", "terribles", "gauzy", "verie", "cornes", "noires", "echter", "mangel", "marcher", "beetje", "vostra", "patrie", "lvii", "dilatory", "unco", "jagd", "debase", "hoher", "alltid", "wollten", "distil", "cinna", "splendours", "fronte", "abreve", "clinking", "apposition", "maddened", "vaster", "florentin", "slouched", "remonter", "aguinaldo", "sorrowing", "revenir", "hohenzollern", "neere", "devient", "moeder", "exultant", "pilfering", "trousseau", "frisson", "kaikki", "unconquered", "farces", "connu", "perjured", "seeke", "eloped", "corpuscles", "obscurely", "dreamless", "dadurch", "lamely", "curdled", "haie", "schoon", "wonted", "gallants", "dasein", "respectably", "fixity", "zehn", "yelping", "vaine", "croesus", "obdurate", "ofte", "tuuli", "absolue", "christabel", "ransack", "belisarius", "schlag", "taler", "piously", "quaintly", "rationalistic", "usque", "partis", "seras", "schritt", "disinclination", "eingang", "aloofness", "arminius", "dilating", "parthia", "felucca", "premisses", "glibly", "putrefaction", "unfortunates", "pottage", "ligger", "tubercles", "herzlich", "manservant", "unluckily", "plumped", "disinherited", "resounds", "crut", "anciently", "tiens", "remaineth", "ratione", "begetting", "gurgled", "scheint", "hopefulness", "poil", "voiles", "hez", "citer", "dehors", "vindictiveness", "potest", "lolling", "aboue", "extorting", "adventured", "elkaar", "clattered", "pouvant", "oure", "unsteadily", "sufferance", "muu", "charmant", "mede", "raptures", "dinna", "barrenness", "placidly", "bawled", "enkele", "protoplasm", "dyspeptic", "gaue", "diffident", "affianced", "communs", "zeker", "guileless", "ebbe", "wery", "opprobrium", "geheime", "imputations", "marchioness", "pferd", "capriciously", "ganske", "superintend", "bantering", "indorsement", "perspiring", "dissensions", "baseness", "blotched", "implores", "gewesen", "digne", "hillocks", "jalousie", "straat", "nogle", "solche", "fretful", "geheimnis", "dresse", "inquisitorial", "circumspection", "unsullied", "spirituous", "garrisoned", "supercilious", "soldiery", "skirmishing", "profaned", "ordinaire", "prochain", "ebullition", "avowedly", "notwendig", "remoter", "reflexions", "clamorous", "sois", "scullery", "seemeth", "etait", "blasphemed", "disconsolate", "einde", "antiquaries", "quibus", "whimsically", "spinsters", "hohen", "fahren", "exactions", "cupful", "lugger", "bestimmt", "patricians", "atoned", "tourbillon", "causeth", "unpromising", "geluid", "caissons", "surcharged", "stoff", "quarreled", "suckled", "soort", "pulpy", "militaires", "partaker", "pigmy", "censures", "morir", "digged", "fust", "confessors", "kleur", "braut", "lacerated", "promptings", "vouched", "obligingly", "puo", "yerself", "jael", "tragen", "spinifex", "unexpressed", "lunched", "scourging", "haroun", "manfully", "vidare", "revolutionist", "kennt", "tracery", "ebers", "surmises", "torno", "bedingungen", "falle", "seemly", "catched", "saura", "habet", "preso", "naughtiness", "derecha", "fastidiousness", "demoniac", "penury", "wainscot", "supernal", "impelling", "cellule", "einzelnen", "modeste", "flits", "vacillating", "jocular", "galop", "jacobins", "forsyte", "fathomless", "chiding", "savoured", "algun", "marvelling", "plentifully", "wakeful", "conter", "dicen", "homelike", "swooned", "unsociable", "puisque", "allgemeinen", "fatta", "drear", "erreurs", "buffoonery", "rashness", "pensamiento", "impels", "dissembling", "consistence", "intimating", "dieth", "missis", "appeler", "possa", "aemilius", "slunk", "deswegen", "coadjutor", "footfalls", "lombards", "jego", "jewess", "endued", "sorrowfully", "iniquitous", "tramped", "ecclesiastic", "agriculturist", "hanc", "hildegarde", "waylaid", "blustering", "blauwe", "uniforme", "granaries", "ombres", "dolch", "estaban", "deras", "dishonourable", "bespeaks", "smilingly", "avow", "whar", "certa", "assize", "ducat", "suuri", "schrijven", "nachdem", "hundredfold", "poing", "knickerbockers", "hechos", "fiers", "betook", "caressingly", "hooted", "gjort", "instanced", "shet", "corpulent", "jacobites", "stumm", "veldt", "springen", "moros", "tierras", "mystification", "eorum", "recoiling", "pshaw", "erscheint", "ruban", "apoplectic", "lingvo", "basest", "fitly", "marchands", "flirtations", "conocido", "unctuous", "enlivening", "sentir", "mauvaise", "beaumarchais", "plaints", "entfernung", "startles", "colonnades", "theatricals", "hoogte", "intimacies", "remonstrated", "leichter", "braying", "nuages", "lassitude", "leibnitz", "moonless", "changeless", "sagely", "unfavourably", "valorous", "endurable", "leid", "prolix", "trespassed", "shews", "longtemps", "sidelong", "principalement", "clamored", "einigen", "scheldt", "perte", "idiosyncrasy", "clucking", "glaube", "cualquiera", "donjon", "messieurs", "goutte", "workingmen", "paleness", "festen", "alack", "trivialities", "tristesse", "discourteous", "dimness", "besetting", "daunt", "boue", "vorm", "indisposed", "rente", "drog", "strategical", "thermopylae", "ivanovna", "landet", "skola", "amidships", "meete", "garder", "buiten", "beeves", "nemen", "alwayes", "looke", "preternatural", "versuch", "conduce", "sien", "centimes", "feare", "retourner", "neder", "earldom", "indubitable", "juifs", "handsomest", "decorous", "chagrined", "gemeinde", "imbecility", "ouverte", "goud", "buffeting", "doorkeeper", "absolument", "schwarzenberg", "bushrangers", "bounteous", "steine", "lulling", "toucher", "steeled", "patronised", "whisperings", "detests", "haughtiness", "ilka", "defiling", "frenchwoman", "betide", "estime", "emolument", "rivalled", "prithee", "wisse", "expedients", "beautified", "precipices", "llevar", "walketh", "mutta", "diffidence", "tablespoonful", "meum", "bestowal", "tingled", "hangen", "conduire", "unrelieved", "morgon", "ariosto", "swindling", "saragossa", "gladiatorial", "parthians", "parer", "reichen", "bacchanal", "perplexities", "ablutions", "arten", "innan", "vallen", "tulla", "unkindly", "lovest", "stratagems", "carousing", "envies", "condescended", "freighted", "gange", "compagnies", "slackening", "pardner", "wondrously", "dingen", "teilen", "shimmered", "tror", "anteroom", "agriculturists", "marins", "slechts", "watermen", "citoyens", "sorti", "megara", "mayenne", "beardless", "cheerless", "tenido", "goot", "tuch", "wacht", "moistening", "unprejudiced", "explications", "dissimulation", "restes", "pined", "inculcating", "combien", "pensando", "oorlog", "plaits", "fleuve", "agrippina", "neen", "erit", "satt", "budded", "liest", "plaintively", "devenu", "threateningly", "profligacy", "gwendolen", "subtil", "meshach", "videre", "armie", "hoffe", "hungered", "pecho", "bluntness", "kuin", "lebe", "gesticulating", "pourraient", "athwart", "hermana", "shambling", "tenderest", "ordains", "propound", "immoderate", "acuteness", "hewed", "kindnesses", "douze", "unaccountably", "neun", "plainest", "boire", "sech", "pesar", "gavest", "subtlest", "racines", "partaken", "gruffly", "etes", "welkin", "breviary", "lineaments", "unburied", "insatiate", "intolerably", "discomfiture", "puso", "mirando", "threepence", "ebenfalls", "libanus", "unmercifully", "milord", "behandlung", "velours", "tochter", "itse", "noces", "lampes", "chary", "quas", "danach", "wouldest", "primroses", "manumission", "mortifying", "gondoliers", "krijgen", "ministres", "garbed", "adelheid", "memnon", "nuo", "desperadoes", "nuage", "sesterces", "coucher", "freunden", "civilize", "phial", "faute", "arrant", "offrir", "appealingly", "multe", "declamation", "miscarry", "complacently", "unmerited", "insubordinate", "feux", "assuaged", "dukedom", "efface", "dazzlingly", "peintre", "looketh", "whalebone", "minutest", "ungovernable", "wellnigh", "meuble", "ziet", "wittily", "schmerz", "foolery", "exulting", "habitant", "craned", "ennobled", "profundo", "arbeid", "apuleius", "pourtant", "wantonness", "scenting", "beziehung", "fik", "flinty", "comanches", "ordnung", "ceremoniously", "gloire", "wobei", "hollowness", "zeggen", "jardinier", "serai", "plw", "desierto", "fancying", "protuberance", "largeur", "divin", "portait", "tersely", "deploring", "sallies", "frontiersmen", "contraries", "armful", "envers", "extricated", "dissemble", "bouteille", "impost", "countenanced", "essayed", "findeth", "gesagt", "zustand", "pandavas", "vaguest", "fenetre", "passen", "feebleness", "plodded", "lesquels", "excellente", "gik", "nieder", "brise", "facilement", "inflaming", "prete", "augury", "diabolus", "revelled", "mayhap", "humbles", "poetes", "metier", "personnages", "demoiselle", "unhampered", "matelas", "puisse", "indissoluble", "netta", "nicety", "tablespoonfuls", "witticisms", "enfeebled", "surveiller", "revolutionists", "cozen", "middel", "penitents", "imprudence", "tiptoed", "reicher", "magyars", "civilities", "trussed", "dulcet", "sirrah", "rapporter", "festal", "couteau", "baronne", "heartrending", "devotedly", "plancher", "amies", "steeps", "salubrious", "spearmen", "houden", "marriageable", "imposture", "mutinous", "jabbering", "tyrian", "pourra", "peremptorily", "whirlwinds", "despoiled", "lugubrious", "ringleaders", "begriffe", "listlessly", "affronted", "debout", "probablement", "daintily", "pikemen", "deinem", "partager", "exaction", "unlighted", "washstand", "overspread", "losse", "piteously", "politischen", "tager", "largess", "weightier", "plenipotentiaries", "muka", "insensibly", "snart", "contento", "parchments", "uusi", "scotchman", "repousse", "ingratiating", "bairn", "poisoner", "prodigiously", "unerringly", "qualm", "aquel", "marseillaise", "uncharitable", "bestimmung", "shiftless", "visages", "subjoined", "pierrette", "befindet", "daubed", "ostentatiously", "unvarying", "choisi", "whereto", "cottagers", "voluble", "ingratiate", "helpmate", "ligt", "soldats", "gloaming", "adamantine", "weinig", "kansa", "rudest", "forcer", "einfluss", "brunnen", "oreilles", "varit", "braucht", "gutes", "irresolute", "mogen", "aarde", "smartness", "burthen", "attente", "bekend", "lleva", "unsparing", "bewegung", "paard", "alcide", "espied", "effrontery", "vacuity", "pillared", "queerest", "impolitic", "defiles", "byles", "indubitably", "mottoes", "molti", "questioningly", "generalship", "debasing", "victimes", "demurely", "talar", "donker", "peuples", "humains", "comun", "prettiness", "usurpations", "plebeians", "habia", "meurs", "philosophique", "sloops", "regierung", "savez", "gesang", "gick", "saturnine", "trinken", "hungering", "unreasoning", "morto", "thoughtlessness", "pobres", "rasped", "celestials", "florrie", "turneth", "childishness", "glauben", "revenged", "radiantly", "gefahr", "prohibitory", "destine", "forestalled", "converses", "commonplaces", "waggons", "interet", "duenna", "outwitted", "summat", "bespeak", "pocos", "waarde", "wheresoever", "compromis", "wyth", "obwohl", "partei", "meddlesome", "bustled", "neckerchief", "brahmanas", "misgiving", "farthings", "gebiet", "disfigure", "rancorous", "forsakes", "torpid", "doctrina", "atem", "canne", "intendant", "bereit", "fiere", "swiftest", "confidants", "unwonted", "astonishes", "joues", "recondite", "sightless", "blunderbuss", "besondere", "chiselled", "unconsidered", "hottentot", "tarda", "fausta", "beholders", "quelles", "vertes", "invitingly", "gloated", "wearying", "straitened", "disdainfully", "romish", "servitor", "ingrate", "unvisited", "officier", "bairns", "bedeutet", "sorgen", "autrement", "quinze", "entreating", "longues", "voisine", "insensibility", "washerwoman", "ufer", "caldron", "offert", "summum", "reiche", "irreproachable", "quels", "penser", "sentimentalist", "tenia", "avea", "sublimate", "mitad", "deutlich", "encima", "bowsprit", "antrag", "childishly", "envying", "austerities", "largeness", "hemlocks", "chiffre", "sadden", "passionless", "haunch", "signifie", "thronging", "plainness", "wolfish", "breakfasted", "quidem", "semblant", "ressort", "intrepidity", "pferde", "affectations", "filthiness", "rayons", "sommeil", "hateth", "spitze", "fomented", "opfer", "dietro", "iesus", "conjuncture", "vivante", "docility", "moravians", "wretchedly", "preciso", "nosegay", "fidgeted", "trooped", "deadened", "brimful", "antwoord", "mistrusted", "florentines", "circonstances", "bedarf", "commencer", "fevrier", "vyasa", "assailing", "unseasonable", "blod", "minstrelsy", "voies", "paunch", "sobriquet", "horatius", "serapis", "soeurs", "chaffing", "wahr", "unlettered", "prowled", "uninviting", "buttoning", "agesilaus", "entender", "jaunes", "tragical", "charakter", "vesture", "spricht", "richtung", "salver", "milliers", "profoundest", "reproachful", "petulance", "grovelling", "companionable", "kindliness", "convulsively", "laudanum", "residuum", "tombeau", "servility", "strew", "dites", "unendurable", "ennen", "cassock", "khasi", "aufgabe", "excommunicate", "erwarten", "zaal", "arabesques", "avowal", "interposing", "retirer", "pathless", "revers", "juist", "trooping", "rencontrer", "marteau", "stanch", "perspicacity", "pawed", "swains", "hinzu", "undulation", "versuchen", "highroad", "wesen", "gondolier", "douleurs", "ascendency", "sammen", "hasted", "sehnsucht", "stupefying", "pealed", "stets", "citoyen", "requite", "larges", "omnibuses", "windless", "hinc", "sanguinary", "mohammedans", "tyburn", "souhaite", "firmest", "neus", "dumbly", "allemands", "inquisitiveness", "fourni", "erkennen", "bethought", "debajo", "lebt", "slipshod", "rundt", "produire", "heeds", "tevens", "doted", "overmuch", "chastening", "waxen", "cadaverous", "stroom", "spielt", "croire", "contriving", "waddled", "circassian", "especie", "whin", "greediness", "preferment", "geschreven", "ziele", "remounted", "ontvangen", "strewed", "artifices", "assenting", "anaxagoras", "unge", "cousine", "presentiment", "sturdily", "falleth", "quitte", "censorious", "ouvre", "mekka", "noontide", "ewigkeit", "tausend", "pranced", "augenblick", "pudo", "glowering", "suppliants", "heare", "personnelle", "gezien", "schemed", "disentangled", "qualite", "husbandmen", "fruitlessly", "guerrier", "huntsmen", "photoplay", "dritten", "duchies", "cuirass", "flotte", "hireling", "overweening", "joies", "abruptness", "sieh", "moed", "warred", "nourriture", "niver", "conducteur", "regicide", "dedans", "roved", "remplacer", "ajoute", "auquel", "siller", "touchingly", "hisself", "bliver", "industriously", "confusedly", "eying", "befit", "edified", "profondeur", "portier", "malignity", "revient", "sibylla", "karakter", "becometh", "poort", "halloo", "pasturage", "loisir", "puits", "voort", "soixante", "voglia", "pandu", "geval", "pouvait", "smarted", "paroxysms", "coquin", "mirthful", "vergangenheit", "coeval", "pharao", "ceinture", "galvanometer", "finna", "graceless", "slinking", "enlever", "brocades", "ennobling", "prevenir", "harten", "pleasanter", "hindoo", "falseness", "drap", "betimes", "natuurlijk", "procurer", "malefactors", "lysias", "handmaids", "gefallen", "gaar", "straten", "dommage", "bewail", "rhenish", "twitter", "erano", "schar", "irreverently", "misjudge", "revengeful", "interdicted", "suppliant", "monotonously", "benignly", "certes", "averil", "sauntering", "zusammenhang", "gebracht", "inexpedient", "confiscations", "heartiest", "untutored", "forbears", "exulted", "uninfluenced", "gallies", "omne", "taches", "tourner", "marcius", "pealing", "campagnes", "quoniam", "leathern", "ecclesiastics", "interceded", "nimmt", "intelligibly", "craftily", "chaplets", "abends", "englischen", "bestaat", "makest", "nerved", "braccio", "philosophe", "couvert", "musketry", "caribs", "enfranchised", "maer", "casements", "eatable", "dets", "meanly", "profonde", "theyr", "aspecto", "disinterestedness", "soumettre", "plebe", "nier", "jeta", "blaspheming", "benutzt", "pantheistic", "slumbered", "hostler", "fous", "quartette", "hoed", "stettin", "brusquely", "rankled", "nonconformists", "intonations", "scandalously", "sirup", "exercer", "reproachfully", "pauvre", "rivalling", "obtenu", "eeuw", "howat", "existencia", "delusive", "sepulchral", "sarebbe", "fuor", "pareil", "remplir", "fourscore", "teacheth", "guld", "droned", "balles", "traiter", "rapporte", "wellen", "abler", "wallowed", "recompensed", "quil", "chamberlains", "disgracefully", "brung", "manches", "quei", "atteindre", "asuras", "lamentably", "achaean", "loups", "lowliest", "braggart", "somersetshire", "indisposition", "mithridates", "reconnu", "nutriment", "unkindness", "tranquille", "froh", "gardes", "talo", "rascally", "gardien", "sanoi", "strumpet", "zigzags", "discoursed", "erreicht", "haare", "accost", "manoeuvred", "libels", "blighting", "vileness", "blessures", "soldados", "abase", "outcries", "stampeded", "bithynia", "cupidity", "soundest", "consentement", "risings", "fervid", "truculent", "illimitable", "gayly", "forbearing", "kvar", "despatching", "potentates", "putteth", "impetuosity", "jutted", "encomium", "decke", "behoves", "querulous", "mener", "manchus", "pemmican", "discomfited", "dienen", "sidste", "steden", "mollified", "sulphurous", "entierement", "parterre", "subtile", "ziemlich", "quon", "enfolded", "gedacht", "belongeth", "parian", "emot", "nowise", "vaan", "verdient", "detestation", "theophrastus", "indiens", "sallied", "infinitude", "unchristian", "nachbar", "hubo", "quaff", "scuffling", "commotions", "belang", "numidia", "craning", "indistinctly", "aldrig", "zes", "houdt", "chiefest", "casuistry", "siis", "manchmal", "purposing", "justness", "hundert", "simpering", "soothsayers", "charwoman", "mittag", "facere", "aquella", "chasseurs", "countersign", "frem", "cambric", "thron", "spluttered", "leetle", "quos", "glinted", "facon", "coupable", "lowliness", "lesquelles", "turc", "trundled", "desolated", "kindles", "shineth", "woning", "falchion", "asperity", "pousse", "dran", "secretaire", "effulgence", "banisters", "extricating", "valt", "hesitatingly", "affray", "pensively", "meretricious", "promiscuously", "overset", "chuse", "ruido", "undefinable", "scorning", "multa", "lacedaemonians", "aristoteles", "friede", "censers", "aufgenommen", "tandis", "talke", "trifled", "intelligente", "delightedly", "chimerical", "kanske", "importunate", "disgraces", "zeg", "agitations", "piratical", "indigence", "acquirement", "mutely", "billowy", "querelle", "suzerainty", "imperturbable", "milliners", "pensa", "fecit", "gleiche", "vacillation", "innocente", "toilers", "snored", "heathenism", "rancour", "apercu", "facetiously", "riband", "pecado", "slaine", "vaut", "disdains", "gedaan", "hvem", "amain", "cavil", "kohta", "huskily", "unwarrantable", "glowered", "curates", "anent", "wenigen", "konnten", "worthier", "vooral", "leered", "palmy", "religieux", "truncheon", "hovels", "milliards", "unlovely", "abjure", "plenteous", "piedmontese", "debauch", "holocausts", "imperatively", "philadelphus", "darky", "ravening", "kentuckian", "methought", "fagot", "foulest", "rills", "gaven", "treize", "leise", "dragoman", "micht", "affrighted", "unsocial", "loger", "dejectedly", "tamely", "reposing", "ausdruck", "phlegmatic", "mightest", "dispossess", "cataloguers", "gibe", "drily", "languorous", "paire", "tode", "foulness", "zelfs", "calumnies", "scythes", "shirked", "disapprobation", "propitiate", "hilft", "usurpers", "lagen", "estis", "inspirer", "gainsay", "ambrosial", "atteinte", "intanto", "conciencia", "provender", "schulter", "navire", "matronly", "andern", "sourire", "ungracious", "overawed", "mukaan", "relenting", "bijna", "angesehen", "coude", "dickon", "vapeur", "maintenir", "sluices", "geweest", "erziehung", "zitten", "importe", "raisonnable", "canot", "grundlage", "hessians", "undreamed", "equable", "oppressively", "chacune", "zaak", "pourront", "indorsed", "kasteel", "indulgently", "takaisin", "superfluity", "pantalon", "gossiped", "generalissimo", "coquettish", "zegt", "konung", "accepter", "expiate", "commiseration", "voudrais", "counterpoise", "sawest", "inquiringly", "betes", "romanism", "northmen", "folgt", "cuya", "schicksal", "travaille", "thae", "leitung", "unfeigned", "impalpable", "murmurings", "conjointly", "excitements", "zambesi", "vilken", "comeliness", "verra", "hambre", "indiquer", "grossness", "cuivre", "noget", "countrey", "carefulness", "blijft", "douceur", "vaporous", "oarsmen", "seigneurs", "toilsome", "proprieties", "listlessness", "waarin", "pities", "tredje", "mortify", "gipsies", "neapel", "unhallowed", "injudicious", "gesetze", "remonstrances", "uninterruptedly", "revanche", "suam", "ither", "unmanly", "mazy", "forebodings", "fickleness", "tuvo", "gelukkig", "geschlecht", "unsheathed", "freilich", "heiligen", "palest", "impulsion", "empirische", "vano", "sitten", "illis", "votaries", "factious", "braw", "verdadero", "shabbily", "hollande", "camarades", "slighter", "yere", "homewards", "trous", "achten", "rapine", "materie", "snuffing", "schwarzen", "sterben", "bezig", "abnegation", "yeare", "vostre", "kerl", "widerstand", "betrachten", "erinnern", "betake", "arbeiter", "klaar", "outspread", "thim", "sendeth", "winde", "lichaam", "zetten", "whirr", "alarum", "doigt", "daarom", "liten", "declara", "gebrauch", "jambe", "paie", "unmerciful", "apporter", "demoiselles", "reprobation", "lache", "burgomaster", "camest", "sonder", "extravagances", "esset", "fellah", "verlassen", "gewinn", "wakening", "vacantly", "discoursing", "cablegram", "tourne", "attendre", "schlechte", "lauf", "injuriously", "spluttering", "felsen", "gloried", "argives", "paarden", "japhet", "cabane", "hende", "zacht", "promontories", "mignonette", "supplicate", "joindre", "freundschaft", "pattering", "unromantic", "sophistical", "frescoed", "sauver", "nobleness", "sealskin", "bewilder", "gwine", "zeven", "consulship", "aminta", "brauchen", "fuite", "unclouded", "affability", "affright", "recantation", "threshed", "malen", "gladdened", "weisen", "fausse", "ruses", "expostulation", "faisait", "heraus", "paille", "delawares", "devait", "tirer", "reines", "galled", "esel", "verres", "atteint", "slaveholder", "fuisse", "meddled", "soldaten", "protestation", "cambyses", "enmities", "becalmed", "genou", "verbunden", "hver", "muut", "leprous", "lambent", "wolken", "sacristan", "lavishing", "wending", "disquieted", "solchen", "benedictions", "niggardly", "herte", "teki", "ankunft", "solides", "gesetzt", "dangereux", "evincing", "vraie", "fauteuil", "naturels", "eue", "buckboard", "noisome", "veinte", "malades", "impassible", "oblations", "worten", "intoxicate", "prenant", "graue", "entweder", "exasperate", "curtsey", "bestimmten", "exclusivement", "babyhood", "sojourned", "censuring", "disrespectfully", "mesmeric", "apprehensively", "roofless", "despoil", "direst", "razones", "inroad", "terminer", "vainglorious", "wenige", "benevolently", "archbishopric", "hatchway", "eigenschaft", "pinnace", "slighting", "vorher", "falsch", "maintien", "ellinor", "sepulchres", "extirpate", "adrianople", "imposer", "schlimmer", "wies", "imperiously", "kuu", "rhetorician", "totta", "portefeuille", "unconcern", "toucheth", "requited", "geburt", "suffit", "peloponnesus", "postern", "irremediable", "hamilcar", "quavering", "unperceived", "leonine", "botte", "wonderingly", "haversack", "liet", "ennemi", "handen", "dawdling", "spiritless", "thorwald", "rejoindre", "inutile", "signally", "loitered", "benefices", "hewing", "abysses", "beginnt", "mouldering", "schmerzen", "everlastingly", "descried", "aquellas", "vosotros", "miten", "froward", "elend", "audaciously", "indelicate", "einrichtung", "umfang", "chinamen", "prostrating", "ceremonious", "slaveholding", "unworldly", "ideality", "fece", "fathomed", "boord", "waan", "plafond", "erzeugt", "gekommen", "tranquilly", "delectation", "honoria", "couldst", "prattling", "suivent", "terram", "prate", "submissively", "whithersoever", "parcourir", "assise", "soutenir", "girdled", "abased", "versucht", "niemals", "antient", "semblables", "despairingly", "alguno", "munificence", "throwed", "gervaise", "habitude", "impetuously", "providentially", "veulent", "coom", "harangued", "provincias", "wahren", "glorying", "cockade", "unfrequently", "inconstancy", "betrifft", "ninguno", "doun", "gratifications", "impenitent", "gayety", "arriver", "sagesse", "kwam", "foule", "turm", "bildet", "blijven", "sternness", "vede", "lames", "gunst", "complot", "knapsacks", "engross", "tristes", "appelle", "gracefulness", "communed", "calmest", "glutted", "largement", "dallying", "witticism", "fatted", "blauen", "hottentots", "penances", "brengen", "glimmered", "bretons", "servitors", "refus", "fehlt", "cxar", "ewig", "airily", "gegeven", "schluss", "maudit", "autoridad", "kinsfolk", "erinnerung", "essayer", "distrusting", "tartary", "genoeg", "fremde", "droops", "blandishments", "individus", "remonstrate", "improvident", "handsomer", "blazoned", "vatten", "plainte", "damps", "machten", "bonhomie", "adverted", "soweit", "sacerdote", "productiveness", "gestes", "druse", "quaver", "trouw", "ausgang", "versuche", "wrapt", "draweth", "prit", "tampoco", "versification", "sojourning", "acclamations", "aimez", "unfaltering", "loftiness", "emendation", "behandelt", "clownish", "criado", "tellement", "fordi", "remettre", "redound", "auront", "objektive", "moodily", "discords", "outworn", "honeycombed", "gedanke", "venant", "anspruch", "drauf", "trouvent", "allers", "superannuated", "schauen", "viands", "amiability", "kaisers", "victualling", "religieuse", "wirklichkeit", "envoie", "dicha", "strenge", "unwearied", "punctilious", "turne", "entscheidung", "egotist", "jouissance", "falsche", "schier", "ursprung", "importunity", "distractedly", "zele", "vexations", "seraient", "piastres", "boche", "bewitch", "allures", "frisking", "rottenness", "rufen", "sentimentalism", "clanged", "jupes", "rechter", "privily", "ungenerous", "asketh", "eigenlijk", "absented", "euboea", "fiefs", "honom", "sympathised", "upbraided", "thermidor", "ignominiously", "mischiefs", "appertain", "joko", "perd", "enviously", "wahrscheinlich", "joyed", "gegner", "einfache", "bhishma", "clairement", "eate", "maddest", "adresser", "cabalistic", "conventionality", "italiens", "aliquid", "lidt", "whiffs", "lleno", "manufactories", "twelvemonth", "undimmed", "gjorde", "heah", "parvenir", "faithlessness", "vilain", "contrives", "wistfulness", "genannt", "geleden", "munificent", "fortement", "glaive", "maggior", "convoked", "veste", "malefactor", "gelangen", "dotage", "palliate", "oxus", "pedants", "quaked", "malade", "affronts", "explique", "reproaching", "excellences", "venturesome", "roues", "severer", "fremd", "fusillade", "muita", "feareth", "endroits", "maanden", "bareheaded", "girding", "anzi", "taire", "kopje", "illud", "ilman", "maxence", "wrings", "ferma", "hummocks", "detraction", "dicht", "perdre", "charbon", "foure", "subserve", "cherubims", "toilettes", "liebhaber", "lenity", "songe", "respecte", "sabots", "podia", "insolently", "blik", "dimpling", "quiconque", "ehre", "littleness", "homines", "gammal", "highnesses", "awaked", "upbraid", "unsubstantial", "muren", "dezelfde", "proselyte", "authoress", "fabel", "grandee", "pleasantry", "setteth", "chaldea", "pensioned", "yeardley", "tiefe", "considerately", "gattung", "denkt", "poursuite", "teuton", "pestilent", "sofern", "bountifully", "desisted", "senecas", "jollity", "enrica", "inexpressibly", "sunshiny", "dicitur", "handeln", "begint", "oeufs", "amanuensis", "dreariness", "animi", "comprenant", "smites", "schlacht", "schauspieler", "bezeichnet", "orisons", "reposes", "vart", "hauses", "geduld", "fieri", "mischance", "koska", "hospitably", "metaphysician", "vulgarly", "construit", "invectives", "poitrine", "perdus", "blive", "voulu", "pompously", "discourtesy", "hazarded", "curtsy", "palpitating", "marido", "plaisirs", "ennoble", "dira", "unsought", "palsied", "sartin", "panegyric", "profanation", "unfitted", "halfe", "drinken", "imprecations", "virtuously", "inconceivably", "vouloir", "assiduity", "entstehen", "abschied", "asiatics", "artificers", "ohren", "murderess", "pouvons", "radicle", "volontaires", "villany", "forded", "superintended", "abominably", "zweck", "familier", "enervating", "tumults", "philippus", "pouces", "forswear", "astuteness", "heiter", "liebes", "kenntnis", "gehn", "molte", "lediglich", "musst", "hauberk", "domestique", "geluk", "unspotted", "altname", "legt", "bounden", "declaimed", "unexampled", "todes", "tearless", "basely", "vorstellung", "labios", "vond", "hubiera", "speakest", "teemed", "killeth", "preternaturally", "genommen", "pauvres", "negress", "seien", "haranguing", "quaintness", "verser", "stoical", "tyd", "aptness", "retrouve", "mehreren", "malediction", "givest", "discreditable", "brilliants", "unseeing", "connived", "connais", "mourir", "reicht", "crabbed", "obsequies", "perverseness", "latticed", "pleadingly", "besiegers", "busying", "brazo", "cudgels", "heisst", "paroisse", "befehl", "machte", "soldierly", "musste", "richten", "exhalations", "rapturously", "forelock", "luy", "esteems", "agonised", "hirelings", "hoogste", "jauntily", "erscheinen", "declivity", "vivants", "reviling", "sixe", "altid", "retrouver", "ailed", "garlanded", "abjectly", "vernunft", "churl", "vrijheid", "guds", "rendue", "erden", "erant", "telegraphing", "archly", "statesmanlike", "souverain", "yeares", "duft", "gezegd", "kust", "woorden", "quelconque", "dunghill", "declaim", "bucklers", "stouter", "seuls", "unpractical", "sehe", "reverenced", "derfor", "hominum", "voeten", "liveried", "disfavour", "genially", "gezeigt", "modish", "plomb", "gennem", "prier", "vorn", "deigns", "careering", "thenceforward", "trug", "hasdrubal", "kanssa", "hempen", "miltiades", "growed", "decrepitude", "thinkest", "effluvia", "ordres", "figurer", "grimness", "repassed", "meditatively", "sinecure", "mettent", "stopt", "riseth", "kanzler", "invloed", "verlust", "figger", "underrate", "laune", "jederzeit", "pardonable", "vnder", "choleric", "inclose", "bided", "beggary", "desto", "boeotia", "pleasantest", "deil", "gashed", "exordium", "tocsin", "alcun", "spitefully", "gehalten", "tonnerre", "abbia", "brocaded", "forwardness", "drawling", "testily", "gebunden", "ruhig", "unfasten", "tyran", "precocity", "resistless", "wangen", "spasmodically", "mesdames", "resignedly", "festoons", "aboute", "varlet", "viennent", "threatenings", "erkenntnis", "prevision", "dacht", "squaws", "cesse", "mahomed", "plunderers", "navires", "tremblement", "comfortless", "incautious", "luxuriance", "petto", "creditably", "jolies", "impressiveness", "cheyennes", "finit", "needeth", "superabundance", "precipitately", "unceremonious", "sidewise", "anacreon", "lisping", "sonna", "delante", "rideaux", "prig", "gezicht", "parfaite", "vituperation", "manifeste", "cabman", "fawned", "oever", "untaught", "juley", "einiger", "voorkomen", "gelijk", "forsworn", "imperilled", "sichtbar", "promptitude", "indiaman", "cantered", "allurements", "bataillon", "lasst", "omkring", "juicio", "noin", "distressful", "justifier", "bestimmungen", "verbinden", "bestimmte", "foremast", "bestaan", "stimmung", "meeste", "accorder", "thirsted", "irruption", "professedly", "geschwind", "groweth", "stupefaction", "lanterne", "larmes", "harangues", "remorselessly", "appartient", "naturall", "stupide", "dexterously", "extempore", "viscid", "abaft", "auraient", "reproving", "ottilie", "waer", "scandale", "turnus", "helpen", "begonnen", "pestilential", "schaffen", "merchantmen", "flammen", "atter", "ensi", "circumlocution", "queenly", "livest", "grandees", "devenue", "adjure", "allant", "obstreperous", "gnaden", "olet", "heedlessly", "soif", "lolled", "flatterer", "stube", "sentimentally", "gowned", "tutelary", "hindmost", "furent", "faibles", "monkish", "zouaves", "ineffectually", "contraste", "duidelijk", "turbaned", "guillotined", "conformably", "meane", "zugleich", "disdaining", "solcher", "ouvrier", "zieht", "lowness", "annoncer", "unpleasing", "disgracing", "disant", "begon", "heartiness", "recompence", "petulantly", "prinzip", "casteth", "rhetoricians", "sulkily", "minuteness", "solemnities", "vexes", "tomando", "impecunious", "avond", "menschlichen", "loob", "aliis", "snaky", "confessedly", "slecht", "wheedle", "hushing", "gxi", "corpore", "ungraceful", "queerly", "schwere", "parfaitement", "holdeth", "straggled", "picturesquely", "mainmast", "disquisition", "tiefer", "vorgestellt", "dulness", "pistoles", "unexceptionable", "finnes", "soumission", "liebt", "maie", "centaines", "havde", "mutinied", "terwijl", "palanquin", "contenir", "milesian", "poursuivre", "lacedaemonian", "volgen", "respire", "gehad", "untrammelled", "stentorian", "flatterers", "tomber", "cantering", "minces", "foible", "questionings", "choeur", "kehrt", "manacled", "haud", "thereabout", "contenta", "soone", "hauptstadt", "daheim", "heedlessness", "coquetry", "wended", "getan", "leggen", "onkel", "barbadoes", "wifely", "tantas", "cuius", "rouler", "expliquer", "mortel", "worthiest", "pusillanimous", "personnage", "swaggered", "accepte", "forbore", "gravelled", "publikum", "opportunely", "odoriferous", "insensate", "showeth", "causeless", "partem", "dennoch", "imprudently", "drollery", "makt", "uncongenial", "feront", "noght", "philosophes", "sententious", "reconnoitre", "doigts", "eatables", "intorno", "quiera", "sabines", "catholiques", "housetops", "rostro", "descry", "zouden", "dachte", "drona", "complaisance", "tinkled", "rappelle", "bewailing", "entrenchments", "llegado", "stilte", "sternest", "vijf", "vaches", "befitted", "preeminently", "enervated", "profiter", "ceremonials", "sedately", "choisis", "trone", "gabble", "searchingly", "somewheres", "patriotes", "tyrannous", "wigwams", "paysan", "blevet", "ooit", "suffisamment", "monosyllables", "sluggard", "gelegen", "dissembled", "verlieren", "ieder", "impudently", "jotka", "contrariety", "unprovided", "prinzen", "ruhm", "cerveau", "inclosing", "osaa", "supping", "anteil", "diplomatist", "barefaced", "plighted", "faudrait", "unterschied", "fermes", "verborgen", "ofttimes", "neemt", "steersman", "caitiff", "thebans", "keek", "aient", "seyn", "brumaire", "embroil", "pennon", "athirst", "gnashed", "neighing", "durchaus", "glaces", "magnanimously", "compagnon", "anchorite", "boisterously", "chancing", "dagegen", "tantos", "prenez", "momente", "sterke", "provinz", "withall", "lippen", "donnent", "consorted", "miry", "hollanders", "perh", "exactement", "exacte", "entend", "gewonnen", "moindre", "humeur", "souple", "proserpina", "fluss", "conclure", "dotter", "effectivement", "feelingly", "noised", "bondmen", "unseres", "bashfulness", "vaunt", "wollt", "greatcoat", "unmeaning", "turcs", "untrodden", "nerveless", "insurrectionary", "ruisseau", "refuser", "quondam", "zimmern", "raillery", "faciles", "accordant", "mixt", "ruft", "humide", "sensibles", "prudente", "indissolubly", "teils", "treten", "geschlossen", "extenuation", "favori", "compagnons", "merriest", "loftily", "pourrez", "placidity", "hicieron", "gueule", "regne", "doffed", "herodes", "quatorze", "tegenwoordig", "usurer", "voluntad", "geniality", "twopence", "froide", "rampe", "hearkening", "flippancy", "breastworks", "ruleth", "pellucid", "couvre", "frighted", "hearest", "evadne", "kreise", "oublier", "idees", "irreligion", "bruits", "waarschijnlijk", "prodigality", "bessere", "vuol", "enveloppe", "freshet", "stoutest", "takest", "livelong", "joyeuse", "serez", "citadelle", "appeare", "schaden", "sublimes", "verfassung", "opprobrious", "cnut", "propitiatory", "voyez", "acquirements", "drearily", "grenze", "estuvo", "violences", "hideousness", "drawed", "bewegen", "satte", "appartenant", "paquets", "synes", "parecer", "mechlin", "diciendo", "collines", "cabals", "scherz", "disait", "atli", "superscription", "lieue", "filched", "suffrages", "darkies", "maitres", "swineherd", "unworthily", "disturber", "foresaid", "redoubts", "boding", "ouvriers", "benumbed", "wenigstens", "carouse", "habere", "composedly", "paleis", "nilus", "eenvoudig", "heiresses", "schien", "pistolet", "ambuscade", "repine", "thinges", "geheel", "amants", "jingled", "autrefois", "breakfasting", "noeud", "regardez", "zufall", "drowsily", "religieuses", "voisins", "verfasser", "nogen", "engraven", "nahrung", "gaoler", "bancs", "waarop", "jolis", "evasively", "draps", "weisheit", "habitantes", "brouillard", "resentfully", "acquaintanceship", "declamatory", "elate", "juif", "halb", "geister", "quiso", "gleicher", "supplicating", "schlaf", "zahlreichen", "trembler", "wickedest", "bekannten", "adroitness", "bestir", "helst", "multitud", "wachten", "auxquels", "dropt", "schoolmistress", "obloquy", "profitless", "mourant", "wijze", "saidst", "flucht", "unconcealed", "mettant", "coursers", "disent", "mohammedanism", "finir", "abstemious", "krankheit", "cannonade", "otti", "brume", "grandmamma", "fahrt", "moeite", "tediousness", "verdadera", "ongeveer", "horreur", "licet", "ouvertes", "warbled", "genomen", "vuestra", "clamors", "complaisant", "votary", "hesper", "flossy", "zufrieden", "geloof", "luxuriantly", "loopt", "haled", "grizel", "certainement", "duquel", "inharmonious", "amatory", "todavia", "hindoos", "warme", "officiers", "meaneth", "videtur", "knavery", "dije", "blivit", "prennent", "harrowed", "appris", "podido", "stod", "mussulman", "unhesitating", "sybarite", "montrent", "leaue", "fulco", "irresolution", "geschickt", "schlagen", "proverbially", "waywardness", "maturer", "nennen", "treiben", "servius", "bepaald", "daraus", "faudra", "caresse", "bijzonder", "benignant", "appartiennent", "domestiques", "trifft", "arraign", "khoja", "cawing", "fragt", "gilds", "bottes", "antipathies", "afeard", "bishoprics", "marier", "bewegt", "teutons", "whelps", "bestehen", "victual", "healths", "heutigen", "kertaa", "benignity", "whitsuntide", "gesund", "coxcomb", "shrewdest", "couverts", "hecha", "jener", "undistinguishable", "satrap", "haen", "stateliness", "copses", "richesse", "poursuit", "adown", "brokenly", "coffre", "gilberte", "eddying", "couvent", "hawser", "circumstanced", "werry", "muratori", "heartlessness", "foully", "boors", "quailed", "esquimaux", "peint", "helas", "broils", "contenting", "troublous", "nulle", "kinswoman", "puissent", "bunten", "silencieux", "gegend", "quaffed", "fervency", "schuldig", "sortes", "courbe", "bethink", "eind", "comen", "serried", "careworn", "abstractedly", "besitzen", "unbent", "frolicsome", "foudre", "overrate", "directoire", "jambes", "betweene", "stolidly", "gerechtigkeit", "throned", "feind", "gnade", "saisir", "farine", "affably", "lendemain", "aristocracies", "hexameter", "volontaire", "pracht", "cravate", "aikana", "irgendwo", "fanns", "parricide", "strewing", "prosperously", "allurement", "curtsied", "mither", "recreant", "expiated", "bedienen", "roula", "blott", "allait", "reihen", "tournant", "entgegen", "bareness", "shamefaced", "bords", "perspicuity", "gegenstand", "visitant", "mulle", "organes", "kriege", "connue", "annos", "enow", "jocund", "unutterably", "entdeckt", "winna", "brahmanism", "appius", "inextinguishable", "batavian", "remarquable", "knaben", "betokened", "griechischen", "braccia", "merchantman", "habited", "betrachtet", "sympathising", "hvide", "rejoicings", "draga", "entreats", "conciliated", "foeman", "confute", "voulait", "unexpectedness", "indispensably", "gevoel", "endearments", "interj", "wheedling", "touchant", "aliud", "coyness", "quarante", "zuvor", "tirant", "teilnahme", "dirige", "mantling", "extenuate", "interessen", "battre", "quartiers", "bracht", "vormen", "disinherit", "restent", "aufenthalt", "calomel", "ouverts", "entsteht", "disquietude", "naething", "enormities", "kerchiefs", "helft", "remercie", "beruht", "genoux", "artillerymen", "hoeren", "flatteries", "unfading", "gehabt", "dight", "jouir", "waylay", "benefactions", "angenommen", "pitilessly", "pattered", "varandra", "assister", "daies", "cacha", "moest", "uncomplaining", "tulee", "pillowed", "courtes", "sayde", "saisi", "linien", "temor", "imploringly", "unsuspicious", "picturesqueness", "kende", "unresisting", "besitzt", "yez", "tronc", "begann", "musingly", "blieb", "protract", "connus", "disconcert", "argive", "profond", "choler", "pinioned", "tarrying", "hatless", "baith", "epigrammatic", "ilmarinen", "usurers", "boded", "dallied", "seekest", "couverte", "dettes", "schoot", "messire", "vorschlag", "semblent", "geschehen", "seelen", "traversa", "vassalage", "offenen", "manasses", "zuster", "breake", "auxquelles", "designedly", "whensoever", "conciliating", "frucht", "discouragements", "gingen", "semblable", "gegensatz", "inundations", "gelegenheit", "scandalised", "cinquante", "pudiera", "algonquins", "comported", "bange", "fasse", "servian", "stond", "unterschiede", "propitiated", "hogsheads", "contumely", "ollut", "connaitre", "provoquer", "herrschaft", "erinnert", "clamoured", "lacedaemon", "peines", "meint", "bourgeoise", "nerfs", "aiment", "begge", "possit", "nomme", "plis", "piquancy", "unpremeditated", "desirest", "declaiming", "bestimmen", "marchesa", "dizzily", "pauperism", "samnites", "schlief", "livrer", "sobald", "nettled", "allerede", "odeur", "comprends", "peroration", "preuves", "dahin", "verbergen", "aandacht", "vertreter", "daarna", "lourd", "wilfulness", "betrekking", "grunde", "retenir", "esteeming", "fallait", "ressemble", "klage", "hauing", "prolixity", "sonner", "subterfuges", "stof", "zahlreiche", "harer", "expostulated", "barbarities", "prudery", "bivouacked", "fusil", "langt", "passagers", "firesides", "vicissitude", "salido", "allerlei", "joyousness", "vorsicht", "behoved", "porticoes", "gebirge", "tragedian", "fastnesses", "nebst", "waarvan", "ruminated", "reprend", "commonalty", "lapset", "guerres", "indorse", "suffisante", "curst", "flounces", "upbraiding", "revenging", "feebler", "venger", "miteinander", "chaffed", "overstrained", "consolatory", "houre", "einzigen", "spreken", "contemporains", "heut", "augured", "verran", "sanscrit", "halfpence", "cutlasses", "cupfuls", "tremulously", "quavered", "puir", "governesses", "besluit", "hetzelfde", "veracious", "wesentlich", "readiest", "disconsolately", "squally", "captaine", "demandez", "inzwischen", "seules", "cumbrous", "palings", "satisfait", "geschikt", "devoirs", "rappeler", "croit", "orten", "habent", "didna", "demoniacal", "voraus", "distempers", "execration", "drest", "colonnes", "tabooed", "retenue", "guicciardini", "gaed", "vuestro", "cierta", "einfachen", "hundra", "belike", "saltpetre", "forborne", "cuyas", "tardily", "satisfaire", "dicere", "verbrechen", "zichzelf", "superabundant", "vilja", "versteht", "brengt", "scudding", "verschieden", "destinee", "deprecatory", "larboard", "keinem", "manuscrit", "shrubberies", "volkes", "pertinacity", "amabel", "parme", "herrlich", "hunc", "flurried", "avevano", "deferentially", "souviens", "mazarine", "infiniment", "overborne", "rempli", "goeden", "reinen", "engager", "jocose", "shawnees", "vaterland", "blessure", "restant", "maist", "ursache", "oublie", "eminences", "obscur", "afstand", "kepe", "cailloux", "enemigo", "toits", "weite", "pm", "video", "info", "ebay", "dvd", "website", "photos", "forums", "yahoo", "server", "pc", "feedback", "blog", "options", "audio", "fax", "rss", "porn", "faq", "sep", "powered", "electronics", "database", "microsoft", "url", "update", "downloads", "apr", "hosting", "videos", "tech", "linux", "jun", "listings", "sony", "google", "environmental", "pics", "sponsored", "eur", "pdf", "usr", "homepage", "lesbian", "logo", "airport", "phones", "cnet", "hp", "eg", "ip", "cameras", "ratings", "paypal", "thu", "rentals", "worldwide", "anti", "nokia", "tx", "anal", "interface", "technologies", "gmt", "xml", "input", "sexy", "mb", "multi", "graphics", "prev", "ads", "mini", "usb", "php", "trademarks", "phentermine", "keywords", "msn", "programming", "isbn", "az", "updates", "desktop", "pst", "fucking", "blogs", "evaluation", "implementation", "angeles", "networking", "australian", "kb", "connect", "dev", "vegas", "module", "pricing", "dvds", "documentation", "coverage", "automotive", "developing", "milf", "ringtones", "xbox", "www", "settings", "monitoring", "nc", "llc", "hardcore", "provider", "techniques", "rd", "websites", "servers", "keyword", "username", "fuck", "paperback", "classifieds", "providers", "upgrade", "auctions", "therapy", "samsung", "affiliate", "admin", "designated", "integrated", "cds", "ipod", "porno", "motorola", "strategies", "affiliates", "multimedia", "xp", "tits", "interactive", "developer", "sitemap", "lab", "cvs", "gamma", "weekend", "lcd", "dj", "parking", "ct", "hentai", "laser", "icon", "basketball", "stats", "hawaii", "nj", "clips", "rw", "vhs", "criteria", "pubmed", "logged", "laptop", "checkout", "tripadvisor", "zoom", "anime", "spam", "bytes", "gb", "bc", "consulting", "aa", "lingerie", "shemale", "parameters", "jazz", "profiles", "mom", "singles", "amounts", "usd", "mg", "pharmacy", "constitutes", "collectibles", "infrastructure", "intel", "soccer", "math", "healthcare", "preview", "devel", "rs", "voyeur", "cisco", "certification", "bookmark", "specials", "bbc", "avg", "panasonic", "permalink", "viagra", "src", "faqs", "trackback", "revised", "broadband", "pda", "dsl", "webmaster", "dna", "diff", "sql", "specs", "ss", "yeah", "sexo", "javascript", "gps", "acc", "euro", "encyclopedia", "interracial", "tn", "suppliers", "playstation", "annotation", "gnu", "lesbians", "aol", "modules", "backup", "personals", "kevin", "perl", "bike", "utc", "albums", "verzeichnis", "hosted", "developers", "kits", "variables", "agenda", "template", "investor", "wildlife", "elementary", "sponsors", "unlimited", "printable", "hardcover", "setup", "booking", "ericsson", "supplier", "bluetooth", "tm", "upcoming", "scores", "weblog", "nh", "alerts", "mysql", "offline", "lifestyle", "converter", "blowjob", "safari", "pdt", "parameter", "adapter", "processor", "node", "hockey", "micro", "laptops", "regulatory", "db", "ph", "epinions", "affordable", "databases", "psp", "ds", "discounts", "boobs", "jennifer", "demo", "lg", "gourmet", "nfl", "avatar", "dildo", "featuring", "misc", "calculator", "holdem", "awareness", "spyware", "packaging", "wallpaper", "biggest", "alumni", "hollywood", "wikipedia", "diabetes", "ml", "wow", "mapping", "indexed", "grid", "plasma", "voip", "consultants", "implemented", "sf", "blogger", "kg", "textbooks", "seminar", "latina", "nasa", "sexcam", "accessibility", "templates", "tab", "router", "concrete", "folder", "womens", "css", "upload", "milfhunter", "mc", "metro", "toshiba", "qty", "airline", "uniprotkb", "beastiality", "lp", "consultant", "researchers", "unsubscribe", "bio", "upskirt", "exam", "logos", "milfs", "sustainable", "pcs", "honda", "cinema", "ag", "blowjobs", "deluxe", "monitors", "sci", "edt", "pmid", "recruitment", "siemens", "expertise", "medline", "innovative", "tampa", "ks", "python", "tutorial", "cruises", "moderator", "tutorials", "collectables", "scripts", "abc", "stereo", "operational", "airlines", "livecam", "hobbies", "telecommunications", "bestiality", "biz", "voltage", "nintendo", "vinyl", "highlights", "designers", "ongoing", "imaging", "blackjack", "analyst", "reliability", "gcc", "ringtone", "oriented", "desktops", "semester", "cumshot", "applies", "casinos", "filters", "nv", "notebooks", "algorithm", "semi", "proteins", "exp", "debian", "epson", "terrorism", "cpu", "allocated", "anytime", "nr", "layout", "initiatives", "lol", "mp", "optimization", "genetic", "modem", "mph", "evaluate", "toyota", "nationwide", "vector", "limousines", "destinations", "pipeline", "ethernet", "postposted", "nba", "busty", "coordinator", "epa", "coupons", "cialis", "bb", "ron", "modeling", "memorabilia", "alberta", "org", "okay", "workplace", "wallpapers", "firefox", "eligibility", "clinic", "involvement", "placement", "vbulletin", "funded", "motorcycle", "presentations", "wiki", "radar", "citysearch", "nsw", "pci", "guestbook", "pizza", "rc", "bmw", "mpeg", "shoppers", "cst", "ceo", "twiki", "counseling", "medication", "shareware", "dicke", "configure", "institutional", "metabolism", "rm", "pdas", "outcomes", "sri", "thumbnail", "api", "acrobat", "thermal", "config", "urw", "regardless", "wishlist", "sms", "shit", "trailers", "syndrome", "iraqi", "foto", "tabs", "gm", "rt", "shopper", "nikon", "customize", "sensor", "telecom", "indicators", "thai", "emissions", "dd", "boost", "spanking", "supplements", "icons", "tranny", "catering", "aud", "camcorder", "implementing", "labs", "dynamics", "crm", "rf", "cumshots", "bukkake", "shorts", "td", "amp", "sm", "usc", "environments", "trembl", "blvd", "amd", "emails", "wv", "insider", "seminars", "ns", "vitamin", "processed", "functionality", "intermediate", "billing", "diesel", "bs", "promotional", "chevrolet", "compaq", "authentication", "showtimes", "sectors", "bandwidth", "img", "schedules", "cached", "rpm", "florist", "webcam", "nutten", "automated", "pee", "nipples", "tvs", "manga", "mhz", "orientation", "analog", "packard", "payday", "deadline", "robot", "assess", "gnome", "gadgets", "automation", "impacts", "cl", "ieee", "corp", "personalized", "gt", "conditioning", "teenage", "nyc", "partnerships", "slots", "toolbar", "basically", "genes", "firewall", "scanner", "occupational", "hs", "integer", "treatments", "camcorders", "basics", "rv", "struct", "genetics", "punk", "enrollment", "interfaces", "advertisers", "deleted", "rica", "inkjet", "peripherals", "brochure", "bestsellers", "eminem", "antenna", "bikini", "decor", "lookup", "harvard", "podcast", "interactions", "nike", "pissing", "plugin", "latinas", "customized", "dealtime", "temp", "intro", "zus", "fisting", "tramadol", "jeans", "fonts", "quiz", "mx", "sigma", "xhtml", "recordings", "ext", "minimal", "polyphonic", "outsourcing", "adjustable", "allocation", "michelle", "ts", "demonstrated", "handheld", "florists", "installing", "ncaa", "phd", "blogging", "cycling", "messaging", "pentium", "aka", "sampling", "refinance", "cookie", "goto", "calendars", "compatibility", "netscape", "rankings", "measuring", "tcp", "dv", "israeli", "medicare", "skiing", "hewlett", "flickr", "priorities", "bookstore", "timing", "parenting", "fotos", "britney", "freeware", "fucked", "pharmaceutical", "workforce", "nodes", "ghz", "targeted", "organizational", "skype", "gamecube", "rr", "titten", "excerpt", "halloween", "methodology", "housewares", "resistant", "recycling", "gbp", "coding", "slideshow", "tracker", "hiking", "jelsoft", "headset", "distributor", "archived", "photoshop", "jp", "bt", "diagnostic", "rfc", "downloaded", "sl", "seo", "isp", "nissan", "acoustic", "cassette", "initially", "hb", "jpg", "tc", "sunglasses", "planner", "stadium", "mins", "sequences", "coupon", "ssl", "gangbang", "opt", "flu", "mlb", "tagged", "bikes", "gp", "submissions", "oem", "lycos", "zdnet", "broadcasting", "artwork", "cosmetic", "terrorist", "informational", "ecommerce", "dildos", "coordination", "connector", "brad", "combo", "activation", "mitsubishi", "constraints", "dimensional", "mozilla", "toner", "latex", "anymore", "oclc", "locator", "pantyhose", "plc", "msg", "nylon", "palestinian", "trim", "pixels", "hispanic", "cv", "cb", "procurement", "espn", "untitled", "totals", "marriott", "starring", "referral", "nhl", "optimal", "protocols", "highlight", "reuters", "fc", "gel", "omega", "evaluated", "assignments", "fw", "doug", "saver", "grill", "gs", "aaa", "wanna", "macintosh", "projector", "std", "herbal", "retailer", "vitamins", "vid", "panties", "connectivity", "algorithms", "bbw", "collaborative", "fda", "turbo", "thats", "hdtv", "asin", "spotlight", "reset", "expansys", "connecting", "logistics", "kodak", "danish", "scenario", "fs", "approx", "symposium", "nn", "weekends", "screenshots", "deviant", "adapters", "macro", "mandatory", "syndication", "gym", "kde", "viewer", "signup", "cams", "receptor", "piss", "autos", "deployment", "proc", "directive", "fx", "dl", "starter", "upgrades", "tapes", "governing", "retailers", "ls", "cbs", "spec", "realty", "instructional", "phpbb", "permissions", "biotechnology", "outreach", "lopez", "upskirts", "debug", "boob", "exclude", "peeing", "equations", "bingo", "spatial", "respondents", "lt", "ceramic", "scanners", "atm", "xanax", "eq", "unavailable", "assessments", "cms", "footwear", "beijing", "utils", "phys", "sensitivity", "calgary", "dialog", "wellness", "antivirus", "previews", "pickup", "nascar", "mega", "moms", "addiction", "chrome", "ecology", "botswana", "nav", "cyber", "verizon", "enhancement", "clone", "dicks", "lambda", "baseline", "silicon", "beatles", "soundtrack", "lc", "cnn", "lil", "participant", "scholarships", "recreational", "electron", "motel", "sys", "solaris", "icq", "yamaha", "medications", "homework", "advertiser", "encryption", "downloadable", "scsi", "focuses", "toxic", "dns", "thumbnails", "pty", "ws", "bizrate", "sox", "gamespot", "wordpress", "vulnerability", "accountability", "celebrate", "zoophilia", "univ", "scheduling", "therapeutic", "travesti", "relocation", "np", "competitions", "tft", "jvc", "vibrator", "cosmetics", "concentrations", "vibrators", "estonia", "dt", "cgi", "showcase", "pixel", "focusing", "viruses", "gc", "stickers", "leasing", "lauren", "macromedia", "additionally", "nano", "copyrights", "mastercard", "updating", "kijiji", "conjunction", "cfr", "validation", "cholesterol", "slovenia", "folders", "routers", "starsmerchant", "arthritis", "bios", "pmc", "myspace", "theorem", "nb", "stylus", "topless", "structured", "jeep", "mba", "reload", "distributors", "levitra", "mono", "particles", "coordinate", "widescreen", "squirting", "rx", "apps", "gsm", "rebate", "meetup", "ddr", "rec", "forecasts", "sluts", "ciao", "ampland", "chem", "shopzilla", "payroll", "cookbook", "uploaded", "americas", "connectors", "twinks", "techno", "elvis", "latvia", "jd", "gpl", "irc", "dm", "bangkok", "photographers", "infections", "brisbane", "configured", "amino", "clinics", "mls", "saddam", "threesome", "handjob", "transexuales", "technician", "inline", "executives", "audi", "staffing", "cognitive", "closure", "ppc", "volt", "div", "playlist", "registrar", "jc", "cancellation", "plugins", "sensors", "freebsd", "acer", "prostores", "reseller", "dist", "intake", "relevance", "tucson", "swingers", "headers", "geek", "xnxx", "hormone", "childrens", "thumbzilla", "avi", "pichunter", "thehun", "columnists", "bdsm", "ide", "valium", "rpg", "cordless", "pd", "prot", "trivia", "adidas", "tgp", "retro", "livesex", "statewide", "semiconductor", "boolean", "diy", "interact", "olympics", "identifier", "worldsex", "jpeg", "startup", "suzuki", "ati", "calculators", "abs", "slovakia", "flip", "rna", "chrysler", "plumbing", "nuke", "projectors", "pharmacies", "ln", "introducing", "nicole", "latino", "uc", "asthma", "developmental", "zope", "regulated", "gmbh", "buf", "ld", "webshots", "sprint", "inputs", "genome", "documented", "paperbacks", "keyboards", "eco", "indie", "detector", "notifications", "msgid", "transexual", "mainstream", "evaluating", "subcommittee", "suse", "mf", "motels", "msgstr", "volleyball", "mw", "adipex", "toolbox", "ict", "browsers", "dp", "surfing", "creativity", "oops", "nipple", "behavioral", "bathrooms", "sku", "ht", "insights", "midwest", "karaoke", "nonprofit", "hereby", "containers", "integrate", "mobiles", "screenshot", "kelkoo", "consortium", "pts", "seafood", "rh", "rrp", "playboy", "fg", "mazda", "roster", "symantec", "wichita", "nasdaq", "ooo", "hz", "timer", "highs", "ipaq", "alignment", "masturbating", "comm", "nhs", "aye", "visibility", "reprints", "accessing", "midlands", "analysts", "dx", "sk", "locale", "biol", "oc", "fujitsu", "exams", "aj", "medicaid", "treo", "infrared", "tex", "cia", "sublimedirectory", "poly", "dod", "wp", "naturals", "neo", "motivation", "lenders", "pharmacology", "bloggers", "powerpoint", "surplus", "sonic", "obituaries", "belarus", "zoning", "guitars", "lightweight", "tp", "jm", "dpi", "scripting", "gis", "snapshot", "caring", "expo", "dominant", "specifics", "itunes", "cn", "newbie", "bali", "sponsorship", "headphones", "volkswagen", "marker", "strengths", "emirates", "terrorists", "airfare", "distributions", "vaccine", "crap", "viewpicture", "volvo", "bookings", "minolta", "gui", "rn", "abstracts", "pharmaceuticals", "andale", "remix", "thesaurus", "ecological", "cg", "appraisal", "maritime", "href", "benz", "wifi", "fwd", "homeland", "championships", "disco", "endif", "lexmark", "cleaners", "hwy", "cashiers", "guam", "preventing", "compliant", "hotmail", "refurbished", "activated", "conferencing", "trackbacks", "marilyn", "findlaw", "programmer", "vocals", "yrs", "foo", "gba", "bm", "nightlife", "footage", "howto", "entrepreneur", "freelance", "screensaver", "metallica", "headline", "str", "bahrain", "academics", "pubs", "shemales", "screensavers", "vip", "clicks", "mardi", "sustainability", "formatting", "nutritional", "weblogs", "timeline", "rj", "affiliation", "nudist", "ensures", "sync", "telephony", "realtors", "graphical", "aerospace", "meaningful", "shortcuts", "voyeurweb", "specifies", "logitech", "briefing", "belkin", "accreditation", "wav", "modular", "microphone", "moderators", "memo", "kazakhstan", "standings", "gratuit", "fbi", "qatar", "porsche", "cayman", "rp", "tba", "usgs", "kathy", "graphs", "surround", "lows", "controllers", "consultancy", "hc", "italiano", "rca", "fp", "sticker", "stakeholders", "hydrocodone", "gst", "cornell", "mailto", "promo", "jj", "schema", "catalogs", "quizzes", "obj", "myanmar", "metadata", "floppy", "handbags", "ev", "incurred", "questionnaire", "dept", "euros", "makeup", "troubleshooting", "uzbekistan", "indexes", "pac", "rl", "erp", "gl", "ui", "dh", "fragrances", "vpn", "fcc", "markers", "assessing", "eds", "roommate", "webcams", "webmasters", "df", "computational", "acdbentity", "handhelds", "reggae", "whats", "rides", "rehab", "allergy", "enzyme", "zshops", "condo", "pokemon", "amplifier", "ambien", "worldcat", "titanium", "contacted", "cdt", "recorders", "casio", "postings", "postcards", "dude", "transsexual", "pf", "informative", "girlfriend", "bloomberg", "beats", "scuba", "checklist", "bangbus", "lauderdale", "scenarios", "gazette", "hitachi", "divx", "batman", "hearings", "calibration", "eval", "anaheim", "ping", "prerequisite", "sao", "pontiac", "regression", "trainers", "muze", "enhancements", "renewable", "passwords", "celebs", "gmc", "hh", "adsl", "advisors", "finals", "fd", "acrylic", "tuner", "asn", "toddler", "acne", "listprice", "libs", "cadillac", "malawi", "pk", "sagem", "knowledgestorm", "ppm", "referenced", "gays", "exec", "warcraft", "catalyst", "vcr", "prepaid", "electro", "vietnamese", "lexus", "maui", "handjobs", "squirt", "plastics", "postcard", "tsunami", "internationally", "psi", "buses", "expedia", "pct", "wb", "smilies", "vids", "shakira", "qld", "dk", "findarticles", "routines", "issn", "podcasts", "sas", "ferrari", "outputs", "insulin", "mysimon", "ambient", "oecd", "prostate", "adaptor", "hyundai", "xerox", "merger", "softball", "referrals", "quad", "firewire", "mods", "nextel", "rwanda", "integrating", "vsnet", "msie", "wn", "liz", "ccd", "sv", "burlington", "researcher", "kruger", "viral", "aruba", "realtor", "chassis", "dubai", "llp", "pediatric", "boc", "dg", "asus", "techrepublic", "vg", "filme", "craps", "fuji", "brochures", "tmp", "alot", "benchmark", "highlighted", "antibody", "wiring", "ul", "js", "webpage", "hostels", "pn", "wendy", "diffs", "mumbai", "ozone", "disciplines", "nvidia", "pasta", "serum", "motherboard", "runtime", "inbox", "focal", "bibliographic", "incl", "hq", "propecia", "nbc", "samba", "inspections", "manually", "wt", "flex", "mv", "mpg", "retrieval", "cindy", "lolita", "carb", "importantly", "rb", "upc", "dui", "mh", "discrete", "sexuality", "polyester", "kinase", "televisions", "specializing", "pvc", "blah", "mime", "motorcycles", "thinkpad", "cunt", "feof", "bunny", "chevy", "longest", "tions", "dentists", "usda", "workstation", "flyer", "dosage", "urls", "customise", "marijuana", "adaptive", "enb", "gg", "fairfield", "invision", "emacs", "jackie", "cardiovascular", "ww", "sparc", "cardiac", "learners", "gd", "configuring", "guru", "convergence", "numeric", "kinda", "malpractice", "dylan", "rebates", "pix", "mic", "basename", "kyle", "obesity", "vertex", "bw", "hepatitis", "nationally", "andorra", "mj", "waiver", "specialties", "cingular", "bacterial", "lf", "ata", "bufing", "pam", "dryer", "nato", "funky", "secretariat", "scary", "mpegs", "brunei", "slovak", "mixer", "wc", "sbjct", "demographic", "washer", "springer", "evaluations", "helicopter", "hk", "powerseller", "ratios", "maximize", "cj", "workout", "mtv", "optimize", "leu", "namespace", "align", "peripheral", "confidentiality", "changelog", "orgasm", "condos", "greensboro", "tulsa", "fridge", "qc", "simpsons", "upgrading", "pgp", "frontpage", "trauma", "flashers", "subaru", "tf", "programmers", "pj", "monitored", "installations", "spank", "cw", "motivated", "wr", "fioricet", "rg", "bl", "vc", "wx", "figured", "currencies", "positioning", "heater", "promoted", "moldova", "paxil", "temporarily", "ntsc", "thriller", "apnic", "frequencies", "mariah", "usps", "bg", "planners", "intranet", "psychiatry", "conf", "wma", "aquarium", "cir", "looksmart", "modems", "paintball", "prozac", "acm", "glucose", "norm", "playback", "supervisors", "ips", "dsc", "neural", "hometown", "transcripts", "collectible", "handmade", "entrepreneurs", "robots", "keno", "gtk", "mailman", "sanyo", "nested", "biodiversity", "movers", "workflow", "voyuer", "subsidiaries", "tamil", "garmin", "ru", "fuzzy", "indonesian", "therapist", "mrna", "budgets", "toolkit", "erotica", "dts", "qt", "airplane", "istanbul", "sega", "viewers", "cdna", "harassment", "barbie", "soa", "smtp", "replication", "receptors", "optimum", "neon", "interventions", "internship", "snowboard", "beastality", "webcast", "evanescence", "coordinated", "maldives", "firmware", "lm", "canberra", "mambo", "bool", "cho", "jumping", "antibodies", "polymer", "immunology", "wiley", "bbs", "spas", "convicted", "indices", "roommates", "adware", "intl", "zoloft", "activists", "ultram", "cursor", "stuffed", "restructuring", "simulations", "cz", "cleanup", "crossword", "conceptual", "hl", "bhutan", "liechtenstein", "redhead", "tractor", "unwrap", "telecharger", "safer", "instrumentation", "ids", "groundwater", "gzip", "ricky", "ctrl", "theta", "lightbox", "swaziland", "mediawiki", "configurations", "ethnicity", "lesotho", "rfid", "retailing", "oscommerce", "nonfiction", "homeowners", "racism", "vaio", "gamers", "slr", "licensee", "bisexual", "rel", "ign", "installer", "powershot", "bestselling", "insure", "packaged", "behaviors", "clarify", "activate", "tg", "pv", "sandisk", "vitro", "cosponsors", "hyatt", "burundi", "demos", "btw", "psychiatric", "tittens", "teenagers", "grading", "valentines", "vonage", "wetlands", "quicktime", "underwater", "pbs", "vanuatu", "erotik", "supportive", "vw", "targeting", "preschool", "dw", "hm", "jl", "hg", "megapixel", "booklet", "cancun", "reimbursement", "turnover", "cheryl", "radeon", "italicized", "chromosome", "optimized", "ffl", "upgraded", "colorful", "popup", "mk", "garnet", "ppp", "oceania", "formulation", "fresno", "handbag", "bypass", "ies", "logout", "boyfriend", "hogtied", "wl", "clipart", "detectors", "newsgroups", "spectra", "mailbox", "athlon", "iq", "landscaping", "mol", "korn", "directv", "viable", "deviantart", "qa", "hunks", "appellant", "xsl", "lithium", "ctr", "planting", "alphabetically", "facials", "calories", "airways", "refill", "reagan", "kazaa", "einstein", "pornstar", "vcd", "jumper", "majors", "headsets", "toxicity", "sz", "denim", "greenville", "scat", "neighborhoods", "buick", "slipknot", "mst", "residual", "bf", "bash", "ngos", "storesshop", "postgraduate", "daytona", "wastewater", "constructor", "technicians", "debbie", "issuance", "sj", "mbps", "nationals", "ij", "alito", "waterfront", "diagnosed", "biotech", "turkmenistan", "woodland", "iranian", "unsecured", "kyoto", "cis", "eb", "barcode", "xd", "regulator", "txt", "postcode", "makefile", "ansi", "vicodin", "shawn", "suv", "lacrosse", "crafted", "eritrea", "bbq", "wh", "debit", "dmx", "edits", "unwanted", "xr", "bn", "noaa", "lemma", "kyrgyzstan", "sensing", "postgresql", "kbps", "trac", "dolby", "ecosystem", "pkg", "dashboard", "nikki", "technorati", "esl", "alzheimer", "jk", "wk", "handler", "semantic", "globalization", "atv", "vga", "atari", "sch", "reebok", "mfg", "jb", "blogthis", "inspirational", "wilmington", "faso", "sdram", "motherboards", "blk", "inherent", "jw", "tailored", "vodafone", "romanian", "xt", "ucla", "celeb", "assoc", "palo", "usability", "backyard", "novell", "refunds", "newsroom", "tina", "kia", "taxpayer", "fb", "cola", "boise", "bsd", "saab", "refinancing", "cert", "buffy", "doctoral", "backpack", "npr", "identities", "tajikistan", "sheraton", "snacks", "booster", "taxable", "imc", "ufo", "linksys", "dentistry", "renal", "fedora", "nyse", "guideline", "freezer", "pcr", "bnet", "binoculars", "demographics", "enroll", "daemon", "buddies", "kc", "crashes", "outlines", "steroids", "pogo", "konica", "hotline", "amps", "accountants", "coefficient", "transvestite", "upstream", "digg", "ladyboy", "hussein", "biochemistry", "duplication", "scottsdale", "ninja", "tj", "avalon", "voucher", "tw", "wheelchair", "gw", "epidemiology", "pentagon", "diabetic", "stressed", "libdevel", "dvi", "biomedical", "gameboy", "subset", "gucci", "https", "websphere", "cheney", "zombie", "recycled", "followup", "nih", "hdd", "bidders", "simulator", "exporters", "ninth", "mutant", "ssh", "authoring", "specializes", "irvine", "olds", "ramp", "jakarta", "tl", "pgsql", "malls", "jensen", "impairment", "scooter", "wap", "mcgraw", "lr", "cheerleader", "edu", "lotion", "substrate", "mmc", "ashanti", "homemade", "ukrainian", "freshwater", "topical", "rms", "isdn", "coded", "alcatel", "suriname", "parkway", "femdom", "palau", "duff", "ck", "bonuses", "scam", "biking", "microsystems", "timeout", "aerosmith", "resellers", "portfolios", "ops", "semantics", "scarface", "beige", "auditing", "rolex", "amplifiers", "coli", "executable", "pentax", "restart", "overstock", "eps", "hmm", "explores", "torque", "memberships", "renting", "icann", "ticketmaster", "cdc", "meridia", "hsn", "oncology", "nf", "woven", "bloglines", "audioslave", "wikimedia", "lipitor", "remodeling", "redhat", "enom", "haha", "coordinating", "holistic", "salsa", "encarta", "childcare", "dvr", "cdn", "soundtracks", "napster", "wong", "debugging", "rechargeable", "engineered", "jerseys", "pw", "superstore", "hex", "wg", "blogroll", "evite", "micronesia", "dreamweaver", "diets", "sauna", "multiplayer", "crt", "caicos", "qaeda", "shareholder", "kitts", "tivo", "deletion", "ptr", "macau", "mudvayne", "ceramics", "freestyle", "organizers", "smartphone", "cmd", "hypertension", "searchable", "aguilera", "servicing", "counselling", "ecards", "acura", "clit", "cops", "fedex", "snowboarding", "laserjet", "cooker", "lego", "microbiology", "internships", "sgh", "vectors", "craigslist", "hamas", "shane", "heaters", "rdf", "bj", "visualization", "newswire", "hf", "spermshack", "brokerage", "overtime", "staind", "wd", "sourcing", "filings", "boeing", "sizing", "exceeded", "presley", "godsmack", "labeling", "whois", "paradigm", "msc", "linguistics", "snmp", "standardized", "liu", "gta", "nutrients", "kosovo", "barbuda", "napa", "abt", "nickelback", "lj", "nazi", "jenna", "arrays", "syllabus", "rgb", "rodriguez", "animations", "activism", "fargo", "chairperson", "reged", "leverage", "sgt", "anguilla", "radisson", "apc", "hitler", "handset", "vulnerabilities", "pga", "activist", "palestinians", "ldap", "prerequisites", "maintainer", "benq", "lx", "bv", "knoxville", "mentoring", "pak", "mos", "didnt", "classrooms", "residency", "deadlines", "tk", "bookshop", "nonetheless", "hifi", "gf", "forex", "diagnostics", "ew", "dreamcast", "tumors", "vm", "kyocera", "nudes", "rationale", "hubs", "pasadena", "bissau", "subway", "hpa", "fgets", "citrus", "cameltoe", "reuse", "sightseeing", "therapies", "widget", "renault", "comoros", "suede", "selector", "gop", "diaper", "hotwire", "ngo", "pvt", "atp", "subtotal", "coefficients", "duplex", "mvp", "jh", "analyzer", "charset", "clin", "nutrient", "zhang", "underway", "govt", "cbc", "excerpts", "formatted", "gorillaz", "inhibitors", "uu", "prestigious", "deploy", "gameplay", "autism", "taxpayers", "martinez", "bombing", "wwe", "metrics", "winxp", "inability", "goo", "coronary", "bldg", "mediated", "prom", "scans", "vaginal", "isps", "rookie", "theatrical", "interdisciplinary", "kerala", "enzymes", "analytics", "jacuzzi", "lesbianas", "parser", "razr", "jt", "styling", "snack", "weezer", "randomly", "semiconductors", "coca", "acs", "peugeot", "bollywood", "mentally", "horoscopes", "noun", "xmas", "silicone", "cpa", "dn", "scoreboard", "proliferation", "squid", "hw", "customised", "trilogy", "hike", "imdb", "clic", "ars", "pharmacist", "marley", "typepad", "xs", "deliveries", "recruiters", "screaming", "cygwin", "gprs", "png", "pornography", "robotics", "chopped", "contexts", "init", "svn", "oslo", "foreclosures", "audits", "pesticides", "fave", "residues", "ashlee", "viet", "orbitz", "invasive", "helsinki", "hardback", "vuitton", "nextag", "inconsistent", "narnia", "alfa", "twp", "geoff", "rename", "atx", "markup", "breakthrough", "ietf", "beneficiaries", "copier", "uncategorized", "xm", "geforce", "defaults", "foreclosure", "clarification", "espresso", "hendrix", "homeowner", "mib", "tees", "glu", "winnt", "tec", "hydro", "nonlinear", "spokane", "playa", "gh", "csi", "radioactive", "desserts", "doi", "socio", "pcmcia", "grooming", "validate", "nederlands", "bst", "filmography", "outerwear", "parse", "dsp", "implementations", "attendees", "toc", "downstream", "webcasts", "accelerator", "masterbating", "flyers", "tacoma", "radiology", "locals", "mms", "tungsten", "typed", "desc", "datasheet", "shutdown", "xenical", "computerworld", "tattoos", "peptide", "sweatshirt", "hassle", "regents", "gn", "docket", "dll", "elsevier", "nordic", "privat", "geometric", "taxonomy", "deli", "intern", "nsf", "sata", "xxxx", "megan", "allergies", "bangalore", "clutter", "predator", "xlibs", "belgian", "adolescents", "djs", "coventry", "clamp", "pricegrabber", "cloning", "args", "madden", "smugmug", "visually", "alright", "laguna", "limo", "aligned", "pesticide", "transformers", "avid", "outpatient", "lam", "encrypted", "wholesalers", "coldfusion", "dcr", "shooter", "switchboard", "vince", "fluorescent", "cookware", "lavigne", "param", "environmentally", "gradient", "ncbi", "inserts", "kvm", "programmable", "bibtex", "chemotherapy", "vr", "dysfunction", "livejournal", "diazepam", "rodeo", "sampler", "jovi", "timetable", "corrosion", "positioned", "checker", "workstations", "cathy", "darren", "cmp", "udp", "sts", "milfseeker", "sbc", "midland", "synchronization", "informatics", "oakley", "rants", "tarot", "didrex", "brenda", "purdue", "figurines", "footer", "maternal", "jedi", "seamless", "ghetto", "thr", "panty", "subunit", "aires", "commercials", "regulators", "influential", "carlson", "yy", "benchmarks", "ug", "emi", "retrieving", "reactor", "kiribati", "telnet", "biker", "parked", "financials", "peanut", "converters", "nauru", "dishwasher", "rcs", "neurons", "ios", "feminist", "yds", "ive", "ecosystems", "gadget", "cctv", "leukemia", "deco", "ticker", "habitats", "remover", "incorporates", "brasil", "unicode", "prod", "spreadsheet", "lowering", "discography", "encoded", "researching", "pediatrics", "sushi", "asap", "onsite", "mapquest", "deleting", "compilations", "therapists", "appealing", "lifestyles", "dst", "swimwear", "applet", "pricetool", "threesomes", "quinn", "daewoo", "antigen", "ultrasound", "mgmt", "procedural", "cern", "macros", "msa", "aussie", "advisories", "lendingtree", "belmont", "acad", "bilingual", "barbecue", "localization", "customization", "gigs", "indexing", "lori", "spacecraft", "ivoire", "montserrat", "telecommunication", "coatings", "eureka", "pcb", "sdk", "preparedness", "systemic", "playoffs", "adaptors", "forecasting", "specialize", "drm", "enya", "masterbation", "tubing", "bloomington", "conditioner", "plaintiffs", "vanessa", "nucleotide", "bronx", "listmania", "middot", "netgear", "panda", "crc", "symbian", "emailed", "chf", "constants", "clr", "isuzu", "webring", "redirect", "interoperability", "msrp", "tuvalu", "shampoo", "neoplasms", "artifacts", "vac", "pseudo", "dinar", "carat", "microphones", "nobel", "galaxies", "verlag", "scrapbook", "dummies", "magnesium", "pagina", "kenwood", "roundup", "imac", "faxes", "plump", "uss", "wwii", "methyl", "campuses", "ramada", "tesco", "dba", "architectures", "acdbline", "getty", "cdr", "msi", "prog", "firewalls", "tester", "polling", "fifa", "bins", "consumables", "highbeam", "msdn", "statistically", "mps", "agp", "cont", "adverts", "programmed", "lohan", "unclear", "aromatherapy", "nederland", "stockton", "clearwater", "trustpass", "topology", "airborne", "antennas", "sundance", "lifecycle", "dhcp", "trucking", "iraqis", "shortcut", "racist", "profitability", "unc", "fairmont", "globally", "aaliyah", "reboot", "newsgroup", "audiovox", "phuket", "jf", "metabolic", "sarasota", "billed", "lim", "toons", "danielle", "exc", "relied", "mesothelioma", "trafficking", "eff", "bizjournals", "michele", "kk", "cutie", "creampie", "seoul", "printf", "columnist", "transplantation", "jerome", "nwt", "rammstein", "scrapbooking", "sequential", "uniquely", "goodies", "auth", "gina", "sugababes", "rsa", "rcw", "whistler", "airfares", "huntsville", "ths", "layouts", "servicemagic", "herpes", "newsgator", "contractual", "akron", "bh", "rebounds", "compressor", "samantha", "khz", "webmail", "carcinoma", "taipei", "stance", "aps", "kumar", "gemini", "kinky", "supervisory", "ostg", "kl", "chiropractic", "throughput", "netbsd", "misplace", "serviced", "opener", "vaccines", "jigsaw", "jumbo", "unspecified", "jsp", "turbine", "percentages", "lett", "maths", "probes", "frustration", "americana", "complexes", "varsity", "insurer", "croatian", "multicast", "certifications", "pradesh", "px", "proton", "allegedly", "kaplan", "linens", "roast", "testers", "debuginfo", "complainant", "inhibitor", "knowledgeable", "jimi", "hummer", "telefonsex", "putative", "hyperlink", "presario", "motorsports", "getaway", "robbins", "kimberly", "unsure", "dinosaur", "tac", "ashland", "dlp", "royce", "sophomore", "antibiotics", "landfill", "warehousing", "filesize", "celebrex", "verisign", "registrations", "wavelength", "slashdot", "transvestites", "cheerleaders", "friedman", "coolpix", "blocker", "tawnee", "hud", "mov", "entrepreneurship", "percentile", "linkage", "lh", "ripper", "afp", "kd", "accomodation", "mcafee", "counselors", "competitiveness", "burger", "microscopy", "hyper", "madthumbs", "linkin", "gmail", "utf", "scooters", "reserveamerica", "organisational", "ezine", "reactive", "clipboard", "gamer", "alexa", "pollutants", "directorate", "savvy", "uploads", "terri", "norms", "implants", "alibaba", "hormones", "hype", "addr", "nfs", "urinary", "institut", "condoms", "directives", "zelda", "fetal", "dong", "reportedly", "edi", "kudoz", "replay", "flavors", "ig", "quickcheck", "ziff", "placebo", "lotto", "textures", "pid", "dep", "seagate", "nanotechnology", "toggle", "emc", "spacing", "frameworks", "mergers", "filtration", "gpa", "cpus", "incremental", "corr", "sbin", "scalable", "ji", "intra", "wetland", "olson", "methodologies", "fremont", "someday", "sha", "exporter", "mri", "hum", "ifdef", "killers", "multicultural", "lasers", "dataset", "savers", "powerpc", "steelers", "enhances", "fucks", "relational", "graffiti", "cassettes", "pussies", "doesnt", "tiff", "cnc", "refrigeration", "houghton", "countdown", "decker", "natl", "extern", "enron", "codec", "broadcasts", "checksum", "directional", "breeders", "lethal", "decals", "macs", "archival", "seismic", "baccarat", "mommy", "teenager", "smokers", "declining", "lineup", "hotspot", "bellevue", "hj", "req", "gigabit", "worksheet", "allocate", "aftermath", "roach", "continuum", "feng", "pep", "nylons", "chipset", "msnbc", "hillary", "factual", "carisoprodol", "tutoring", "spectroscopy", "gemstone", "psc", "phonephone", "unregistered", "moto", "gonzalez", "dior", "pops", "osha", "goldberg", "preteen", "bonding", "insurers", "prototypes", "proactive", "issuer", "sponsoring", "malaysian", "easton", "sentencing", "bulldogs", "worthwhile", "ideology", "cervical", "tallahassee", "userpic", "attribution", "acta", "yep", "iec", "differs", "starters", "uml", "bur", "kris", "sizeof", "spi", "regs", "shinedown", "standby", "arin", "unisex", "wallets", "identifiable", "ethanol", "cannabis", "rsvp", "dynamically", "grenadines", "constr", "subtitle", "librarians", "manson", "autocad", "powerbook", "swinger", "infiniti", "ppl", "williamsburg", "supp", "snyder", "budgeting", "backpacks", "resale", "mikes", "scalar", "unresolved", "hep", "seiko", "electromagnetic", "arial", "tos", "zoofilia", "hcl", "validated", "sco", "annotate", "joomla", "helix", "sx", "env", "biomass", "phs", "hierarchical", "lesions", "financed", "surnames", "reconditioned", "allergic", "rk", "abn", "eliminates", "addict", "matte", "melanie", "secunia", "metering", "genetically", "zebra", "runway", "admits", "chennai", "ions", "asshole", "faroe", "glendale", "speedway", "sweatshirts", "yay", "activex", "logon", "recruiter", "popcorn", "espanol", "disadvantaged", "trong", "niue", "ux", "supermarket", "mfr", "boo", "hmmm", "genomic", "helpdesk", "refuses", "afb", "adhd", "avian", "exe", "visas", "matrices", "anyways", "xtreme", "etiology", "tcl", "mellon", "webmd", "personalised", "hospice", "zerodegrees", "qos", "exhibitor", "sportswear", "recap", "toddlers", "astro", "chanel", "jabber", "hgh", "hx", "rotate", "fema", "subwoofer", "amortization", "neurology", "ack", "radiator", "competencies", "hotspots", "trainee", "nielsen", "podcasting", "centennial", "tuna", "bluegrass", "wipe", "acronyms", "autographed", "loader", "latency", "themed", "messy", "dmc", "ments", "empowerment", "replacements", "subtitles", "gcse", "acupuncture", "workload", "highlighting", "grassroots", "gentoo", "redevelopment", "cellphone", "sax", "triggered", "frontgate", "routinely", "asc", "uploading", "managerial", "nsu", "celine", "finepix", "wks", "tonnes", "hypermail", "thunderbird", "investigative", "letras", "bylaws", "wmv", "lao", "facesitting", "breastfeeding", "mccartney", "anglo", "kathryn", "randomized", "motivational", "gratuite", "gerry", "kappa", "neuroscience", "blender", "blaster", "remediation", "decoder", "genocide", "heathrow", "indy", "pantera", "sidebar", "authored", "snoop", "winery", "rbi", "photon", "overlay", "rusty", "pharma", "fayetteville", "champaign", "fyi", "xc", "pakistani", "ics", "apa", "bitches", "urbana", "diagnose", "secsg", "franco", "announcing", "trivium", "amature", "showroom", "cx", "swarovski", "liter", "akon", "brendan", "condosaver", "amex", "classicvacations", "blackpool", "fh", "inuyasha", "nominees", "cuz", "viewsonic", "dryers", "fujifilm", "ams", "hallmark", "counterparts", "paced", "engl", "asians", "seether", "milestones", "parkinson", "mclean", "checkboxes", "lobbying", "mgm", "cinemas", "islander", "encoder", "importers", "impressum", "phe", "maroon", "kontakt", "ers", "kawasaki", "licences", "bose", "fountains", "clones", "crossover", "situ", "specificity", "runoff", "osteoporosis", "approvals", "bea", "jukebox", "nexus", "cancers", "tango", "melting", "garner", "aba", "karate", "qb", "optimizing", "switchfoot", "coldplay", "vioxx", "tty", "bsc", "celexa", "guitarist", "symmetric", "kuala", "bbb", "geeks", "jg", "repec", "insightful", "unrated", "diva", "adsense", "exemptions", "integrates", "csa", "bookstores", "cimel", "hvac", "leica", "agendas", "nws", "busch", "armani", "bipolar", "menopause", "inbound", "shortlist", "gainesville", "tiava", "eclectic", "headphone", "regimes", "readme", "binder", "xemacs", "helicopters", "ngc", "intercontinental", "workspace", "customizable", "softcover", "realtime", "electrons", "subsystem", "appl", "kinetic", "caffeine", "xf", "nib", "httpd", "slac", "calorie", "graphite", "stroller", "bowel", "sweaters", "mafia", "futuna", "predictable", "susceptible", "insest", "skyline", "sulfur", "scams", "lipid", "tao", "quot", "ritz", "networked", "localhost", "cabling", "stills", "perimeter", "biased", "cardiology", "playoff", "sti", "chiang", "payload", "merrill", "oldsmobile", "grilled", "misty", "conserved", "searchsearch", "rewrite", "vending", "keygen", "janeiro", "heh", "transexuals", "prentice", "cumbria", "diaz", "vegan", "congressman", "recombinant", "ubuntu", "superstar", "closeout", "corel", "kayaking", "synergy", "eta", "backpacking", "accidentally", "bonded", "sticking", "dudley", "osama", "oprah", "inflatable", "beers", "glassware", "amc", "kos", "coursework", "kayak", "mayotte", "repetitive", "gears", "orbital", "musicals", "lithuanian", "amatuer", "profiling", "reps", "hn", "sequencing", "panoramic", "deskjet", "rhino", "polynomial", "tau", "nsa", "stakeholder", "signifies", "stochastic", "psu", "santana", "kidding", "swansea", "airmail", "problematic", "roadmap", "ogg", "lesbo", "farrell", "acknowledgements", "tnt", "skincare", "heroin", "mandated", "workbook", "xslt", "hogan", "omg", "sulfate", "timeshare", "oldies", "complaining", "debra", "cdrom", "cle", "thrillers", "fortran", "timeless", "spouses", "vv", "ninety", "tyr", "cues", "bioinformatics", "chung", "subpart", "scheduler", "hypnosis", "kat", "cornerstone", "recycle", "sos", "lsu", "gao", "applicability", "volatility", "uid", "hoteles", "fav", "disneyland", "umd", "gdb", "bro", "offs", "listserv", "fab", "cond", "tokelau", "conformance", "diecast", "bittorrent", "frankie", "oa", "iu", "vf", "alprazolam", "collaborate", "positives", "hunk", "allocations", "lymphoma", "rpc", "freebies", "frontline", "thb", "tele", "imap", "winamp", "stoke", "idg", "polymers", "grills", "phat", "zz", "escrow", "lumpur", "dds", "infospace", "surfers", "kauai", "licensors", "cpc", "stresses", "webhosting", "peoria", "peek", "alr", "ipsec", "bournemouth", "sudoku", "undef", "campground", "sars", "cme", "predictive", "vlan", "aquaculture", "sendmail", "redesign", "nitro", "jackpot", "cortex", "entitlement", "secs", "mixers", "accountancy", "policing", "michaels", "ecc", "kj", "similarities", "kv", "hipaa", "neutron", "duluth", "dogg", "folklore", "dimm", "acoustics", "pensacola", "crs", "condominium", "wildcats", "exhibitors", "ssi", "redwood", "invoices", "tyres", "westwood", "gly", "estonian", "bomber", "songwriter", "shania", "coaster", "typedef", "strippers", "macmillan", "aac", "woodworking", "cbd", "pricerunner", "afl", "catalytic", "bethesda", "privatization", "sourceforge", "sanford", "membranes", "testosterone", "nunavut", "biochemical", "lennon", "suitability", "lara", "kx", "invitational", "handcrafted", "aftermarket", "fellowships", "freeway", "digitally", "hatchback", "rfp", "coa", "subclass", "rutgers", "sampled", "deploying", "interacting", "roanoke", "treadmill", "fiberglass", "osaka", "personalize", "broncos", "jorge", "classifications", "diggs", "rafting", "sle", "jv", "safaris", "contaminants", "scr", "mitch", "mailer", "liners", "asheville", "quinta", "kristin", "bistro", "lw", "voodoo", "caching", "volts", "excalibur", "bots", "sinatra", "interpersonal", "traumatic", "ringer", "zipper", "meds", "briefings", "siblings", "adversely", "pitcairn", "pdb", "onboard", "nucleic", "telecoms", "hehe", "celeron", "lynne", "invariant", "challenger", "redistributed", "uptake", "newsweek", "geared", "svc", "prada", "tycoon", "maxtor", "plone", "dcp", "biochem", "pte", "ors", "compactflash", "antibiotic", "vanderbilt", "cps", "overweight", "metasearch", "taliban", "maureen", "trekking", "coordinators", "digi", "shoreline", "westin", "middleware", "mips", "roundtable", "dementia", "levine", "ripencc", "shoppy", "filesystem", "pow", "docking", "guidebook", "atreyu", "kylie", "pilates", "backstreet", "packers", "localized", "lic", "docume", "xy", "fte", "stl", "yd", "archiving", "disconnect", "multilingual", "gsa", "immunization", "ciara", "cumming", "interviewing", "categorized", "cmos", "transmissions", "receivable", "ronnie", "implant", "playlists", "thematic", "brentwood", "correctional", "katz", "jojo", "buffers", "talkback", "servings", "kobe", "baylor", "otc", "frustrating", "ssa", "zeta", "dinnerware", "sclerosis", "emotionally", "carbohydrate", "estrogen", "odbc", "ipods", "openbsd", "federated", "shui", "rockford", "staging", "statistic", "torino", "schizophrenia", "predators", "mpi", "adhesives", "inventories", "uf", "brokeback", "dumping", "ow", "econ", "footjob", "warez", "magenta", "tagging", "overly", "triggers", "constructs", "impedance", "dragonfly", "underoath", "refundable", "hbo", "billboard", "huang", "sportsbook", "layered", "neurological", "subs", "watchdog", "starbucks", "ibook", "viability", "kh", "filler", "smiley", "genomics", "yi", "yum", "researched", "copiers", "ovarian", "airplanes", "cello", "wlan", "sweepstakes", "antigens", "midtown", "stabilization", "kinetics", "cocos", "impacted", "rumsfeld", "beanie", "thurs", "spaced", "freq", "segmentation", "soaps", "courthouse", "entrepreneurial", "lebanese", "psycho", "maharashtra", "ricoh", "nrc", "chavez", "asst", "overload", "vikings", "kanye", "bootstrap", "wtf", "humane", "scm", "travelocity", "fno", "twink", "nortel", "koh", "affiliations", "pussycat", "appropriated", "escherichia", "mallorca", "reversible", "spd", "oj", "unclassified", "bookshelf", "htdocs", "fps", "initialization", "expat", "raider", "farmington", "timers", "enrolment", "glibc", "lawmakers", "larson", "photosmart", "centrally", "acl", "luv", "dealership", "eyewear", "bakersfield", "decal", "addictive", "clarinet", "fiona", "vn", "gigabyte", "dbz", "rainforest", "federally", "macos", "multinational", "pornstars", "nope", "evo", "aspirin", "spoilers", "machining", "malibu", "gatwick", "shaun", "redundancy", "emo", "detox", "skateboard", "automate", "drosophila", "branson", "ortho", "appraisals", "flashes", "lakewood", "drupal", "prac", "carers", "kramer", "usaid", "idc", "keypad", "richland", "microbial", "adc", "caregivers", "quark", "zyban", "electronica", "mitochondrial", "grinder", "angie", "octet", "wj", "cre", "dinosaurs", "mccoy", "vibe", "snapshots", "ubc", "meth", "trendy", "inpatient", "filming", "fread", "backend", "cartier", "ageing", "containment", "keynes", "protections", "aliases", "maximizing", "handsfree", "tomcat", "walmart", "interestingly", "jules", "ernie", "elem", "organisers", "pissed", "nite", "mckenzie", "lenox", "darussalam", "genital", "mcse", "cajun", "csu", "algebraic", "astm", "kristen", "fsa", "sgd", "chromatography", "overdose", "nad", "gallagher", "mueller", "cao", "ladyboys", "orgasms", "plantronics", "ftd", "freezers", "ibiza", "reese", "digimon", "gastrointestinal", "inspiron", "pagerank", "asm", "smb", "contrib", "blu", "matlab", "netware", "bse", "megapixels", "retriever", "svalbard", "pixar", "dhtml", "winme", "func", "gamespy", "standalone", "antitrust", "equine", "bros", "proto", "jared", "tehran", "dal", "anesthesia", "filemaker", "libtool", "wrongful", "signage", "psy", "encode", "admins", "moc", "dau", "alvin", "accolades", "raton", "stefani", "infertility", "servlet", "collage", "aces", "depeche", "benchmarking", "xxl", "teleflora", "bankruptcies", "gauges", "blueprint", "mccain", "spiderman", "bridging", "flick", "datum", "canceled", "empowering", "ymca", "facilitator", "bos", "macworld", "wwf", "galveston", "rockville", "banff", "smc", "lq", "serv", "ipo", "tek", "ipc", "timestamp", "musica", "bib", "stevie", "rivera", "dermatology", "sandbox", "mdt", "pinkworld", "cambridgeshire", "premiership", "luton", "conftest", "recursive", "registerregister", "fluorescence", "kosher", "additives", "marketed", "mandrake", "camper", "cpr", "liquidity", "lasik", "galactic", "merchandising", "ombudsman", "registrant", "firefighters", "placements", "ih", "elec", "levin", "academia", "amiga", "descriptor", "pimp", "gimp", "cyclic", "swimsuit", "morphology", "versace", "printprinter", "condom", "westerns", "dodgers", "litre", "correlations", "textual", "handsets", "gandhi", "inks", "diarrhea", "seahawks", "mondays", "insertions", "itk", "kms", "couture", "ativan", "summarize", "savesave", "laminated", "citrix", "backups", "turismo", "animalsex", "mayhem", "washers", "grep", "xeon", "polymerase", "optimisation", "easyshare", "cvsroot", "joplin", "dialup", "nx", "thn", "afro", "biosynthesis", "prosecutors", "alloys", "getaways", "miquelon", "wonderland", "zine", "conn", "truman", "jin", "asynchronous", "carla", "messageslog", "clearinghouse", "dwi", "facilitates", "specialised", "ramones", "everquest", "bernstein", "skis", "calc", "marketers", "itc", "lipstick", "brennan", "kpx", "saturation", "stamford", "alamo", "comcast", "hyderabad", "attn", "spaghetti", "tues", "boogie", "abramoff", "ean", "fla", "utilizes", "lesbos", "fasteners", "sakai", "lk", "rajasthan", "committing", "inlog", "laminate", "earring", "aggregator", "datatype", "postnuke", "ergonomic", "dma", "sme", "kp", "refills", "ibis", "yyyy", "unidentified", "atl", "ims", "tractors", "vx", "spp", "coed", "audiobooks", "sheikh", "gk", "hernandez", "kiwi", "ohm", "truste", "acreage", "mfc", "fingerprint", "sorority", "audition", "mca", "plano", "nmr", "lortab", "leveraging", "psychotherapy", "mso", "htm", "stokes", "lakers", "ats", "saxophone", "cocktails", "steroid", "communicator", "horticulture", "dhs", "resets", "util", "ordinator", "bono", "acronym", "veritas", "breathtaking", "streamline", "crowne", "brunch", "pundit", "figurine", "mutants", "cyberspace", "expiry", "exif", "goldman", "msu", "inning", "fries", "initialize", "tlc", "sybase", "foundry", "toxicology", "mpls", "bodybuilding", "fta", "nostalgia", "acetate", "pls", "bmx", "saratoga", "terminator", "badminton", "cyan", "cory", "stacey", "serif", "portability", "fsb", "yearbook", "lubricants", "cns", "hv", "alameda", "aerosol", "mlm", "clemson", "goin", "philly", "coolers", "multilateral", "costello", "audited", "galore", "aloha", "dehydrogenase", "aq", "gx", "postfix", "fj", "altavista", "exponential", "shi", "gev", "secretarial", "todays", "toaster", "cater", "omb", "bac", "kart", "cpl", "sbs", "putin", "questionnaires", "profileprofile", "serials", "equivalence", "vaughn", "aviv", "condominiums", "schematic", "liposuction", "swf", "apoptosis", "pneumatic", "sniper", "vertices", "additive", "professionalism", "libertarian", "rus", "washable", "normalized", "uninstall", "scopes", "fundraiser", "troll", "teamwork", "auditions", "refrigerators", "redirected", "middletown", "widgets", "ontology", "timberland", "mags", "videogames", "concluding", "vallarta", "chopper", "pinball", "pharmacists", "surcharge", "tbd", "ipb", "latvian", "asu", "installs", "malware", "tsn", "nguyen", "horsepower", "algae", "sarbanes", "alcoholism", "bdd", "csc", "maximal", "prenatal", "documenting", "scooby", "moby", "leds", "mcbride", "scorecard", "gln", "beirut", "conditioners", "culturally", "ilug", "janitorial", "propane", "appendices", "collagen", "gj", "nigerian", "ect", "sto", "makeover", "esc", "dragonball", "chow", "stp", "cookbooks", "spoiler", "ari", "avr", "lamborghini", "polarized", "baroque", "ppt", "jihad", "sharepoint", "cts", "abit", "abnormalities", "qtr", "blogshares", "motorsport", "septic", "citroen", "gz", "predicts", "palmone", "expedited", "curricula", "wmd", "pms", "raped", "configurable", "denon", "sloan", "flawed", "cfs", "checkpoint", "rosenberg", "ffi", "iriver", "callaway", "tcm", "dorm", "lakeside", "marquette", "interconnection", "gilmore", "prc", "taxis", "hates", "gamefaqs", "cookers", "ultraviolet", "afc", "haitian", "dialing", "unicef", "identifiers", "mentors", "steiner", "licensure", "tammy", "tz", "dcs", "soybean", "affirmed", "posix", "brewers", "mci", "retractable", "quickbooks", "townhouse", "stormwater", "sgi", "coco", "pipelines", "rudy", "tia", "congrats", "msds", "arafat", "srl", "splitter", "wai", "standardization", "lakeland", "thiscategory", "classy", "acxiom", "triathlon", "kbytes", "thx", "textured", "doppler", "entropy", "snooker", "unleashed", "lux", "nairobi", "importer", "isl", "orioles", "rotor", "theres", "ttl", "dreamy", "backstage", "qq", "lubbock", "suvs", "bmp", "gasket", "firearm", "dss", "bam", "closures", "participatory", "micron", "budgetary", "pcos", "ssk", "pantie", "bombers", "spongebob", "markus", "ideological", "wellbutrin", "rheumatoid", "swindon", "cabernet", "sek", "dsm", "understandable", "shea", "doctorate", "binaries", "slovenian", "showdown", "simone", "spc", "potentials", "tempe", "hklm", "cores", "borrowers", "osx", "bouvet", "multifunction", "nifty", "unveils", "skeletal", "dems", "oahu", "rollover", "infos", "lds", "thanx", "anthrax", "shockwave", "westlife", "bpm", "tamiflu", "touchdown", "planar", "adequacy", "iomega", "xa", "fetisch", "eastman", "franchising", "coppermine", "ged", "ecard", "ue", "kn", "ferries", "faqfaq", "muller", "fudge", "extractor", "usergroupsusergroups", "svenska", "pcg", "myocardial", "everytime", "callback", "encompasses", "sander", "conductivity", "atc", "vicki", "danville", "sedona", "skateboarding", "lexisnexis", "deepthroat", "outback", "reiki", "biopsy", "peptides", "awakenings", "pim", "sediments", "appraiser", "smp", "gaussian", "hustler", "tensions", "linkages", "separator", "schultz", "adr", "concordia", "recon", "fileplanet", "royals", "globalisation", "borland", "pastel", "nottinghamshire", "strollers", "uninsured", "picasso", "mcgill", "discriminatory", "headquartered", "travelodge", "empower", "hurley", "pedals", "teak", "bitmap", "migraine", "sli", "enum", "lamar", "aes", "methane", "pager", "snp", "aclu", "westchester", "nimh", "quilting", "campgrounds", "adm", "densities", "isd", "tional", "turnaround", "navigational", "stargate", "saskatoon", "cen", "minh", "fingertips", "sba", "rockwell", "vl", "pepsi", "rea", "oversized", "snr", "sibling", "ecs", "burberry", "nrs", "cfa", "inhibit", "pps", "screenplay", "unabridged", "ntp", "endpoint", "labelling", "synchronous", "heartland", "cafeteria", "outfitters", "opp", "homelessness", "opengl", "efficiencies", "blowout", "tickboxes", "oversee", "thresholds", "isnt", "waveform", "deficits", "flair", "applegate", "whitewater", "tableware", "bernie", "workgroup", "clement", "cli", "robotic", "mana", "mississauga", "dialysis", "filmed", "staten", "carole", "schwarzenegger", "summarizes", "sludge", "crypto", "christensen", "heavyweight", "lps", "zach", "pdp", "phantomnode", "comptroller", "scalability", "creatine", "embl", "minimizing", "gpo", "dq", "relativity", "mojo", "econo", "shapiro", "rituals", "pq", "ub", "epoxy", "watercolor", "uncensored", "trainees", "tori", "effluent", "infousa", "storytelling", "polarization", "bombings", "smes", "ionamin", "fuckin", "charlottesville", "xu", "aniston", "barred", "equities", "feeders", "jboss", "mobil", "scrolling", "diode", "kaufman", "aloe", "buckinghamshire", "medford", "underlined", "whores", "gemstones", "bmi", "viewpoints", "exim", "appalachian", "dealings", "phillies", "ramblings", "janis", "centric", "optionally", "nightclub", "geophysical", "fictional", "golfing", "rubin", "handlers", "topeka", "openoffice", "bugzilla", "linus", "taco", "mcsg", "humboldt", "scarves", "mla", "repertoire", "emeritus", "macroeconomic", "gundam", "adaptec", "tailed", "voyer", "hostname", "excl", "bx", "arr", "typo", "merchantability", "autodesk", "jn", "winged", "attacker", "catcher", "haynes", "siyabona", "inverter", "abi", "motivate", "mackay", "bridgeport", "assessor", "fullerton", "cpp", "blockbuster", "dz", "amarillo", "pixmania", "pathfinder", "bonsai", "windshield", "tomtom", "spf", "croydon", "convection", "jdbc", "debugger", "boing", "ancillary", "pointless", "alibris", "factoring", "gyms", "inhalation", "faucet", "bitpipe", "arguably", "techs", "electives", "walkman", "midget", "quan", "commissioning", "experimentation", "saltwater", "cpi", "nis", "wacky", "sgml", "anemia", "biting", "reits", "savanna", "crn", "travestis", "mmf", "cancellations", "paging", "coe", "nudists", "fac", "asean", "airsoft", "bontril", "proliant", "keeling", "zh", "accesses", "jive", "bullshit", "casper", "libstdc", "xpress", "datasets", "webdesign", "nicotine", "comeback", "gannett", "curricular", "downtime", "takeover", "lolitas", "thessalonians", "upto", "joaquin", "transistor", "spotting", "wagering", "everest", "disregard", "hanger", "outkast", "pitbull", "rtf", "fairview", "hires", "alienware", "mainframe", "indo", "compilers", "guinness", "heartbeat", "blazer", "timezone", "merck", "tanya", "bmc", "eia", "colleen", "bbbonline", "participates", "syndicated", "lexicon", "integers", "zirconia", "shortages", "plumbers", "jfk", "raf", "igor", "hama", "patton", "pei", "surfer", "diapers", "eas", "waco", "physiol", "adp", "outbound", "breakout", "fakes", "stderr", "kev", "fomit", "injections", "remortgage", "yogurt", "complies", "workaround", "polytechnic", "uber", "shoppe", "berlios", "csr", "penthouse", "synthase", "pistons", "emule", "sauvignon", "bayer", "carrera", "dvb", "cation", "scientology", "cdma", "maxi", "msm", "rac", "feminism", "topps", "webinar", "dewalt", "turnout", "bruins", "clamps", "firefly", "tabletop", "monoclonal", "wholesaler", "typekey", "partnering", "mage", "sqrt", "israelis", "cdp", "headlights", "monophonic", "proquest", "sergio", "swapping", "mev", "particulate", "bedfordshire", "rockport", "nist", "negotiable", "subcategories", "quarterback", "sudbury", "hectares", "upscale", "scrabble", "sdn", "mta", "docbook", "kiosk", "firstgov", "hoodie", "hoodia", "payout", "clinically", "metacritic", "obligated", "decoding", "presenters", "teal", "epstein", "weblogic", "ity", "covington", "esd", "interconnect", "chinatown", "mindless", "purifier", "kz", "greedy", "rodgers", "gloryhole", "suppl", "hotjobs", "downing", "gnd", "libc", "societal", "astros", "halogen", "wyndham", "osu", "tuesdays", "utp", "superpages", "coaxial", "jpy", "liam", "sesso", "arabidopsis", "argv", "hanoi", "ccm", "faucets", "ballistic", "payouts", "rockin", "supermarkets", "bmg", "nacional", "csv", "telstra", "contraception", "polaroid", "underage", "cardio", "timeshares", "atk", "qi", "logger", "kool", "oki", "birding", "detainees", "indi", "lymph", "barrie", "pollutant", "closeouts", "tolkien", "undp", "jbl", "weekday", "homecoming", "increments", "kurdish", "chromium", "mccormick", "pcm", "confrontation", "shreveport", "grower", "frederic", "unpredictable", "dtd", "capacitor", "burnett", "hilfiger", "mda", "litres", "moroccan", "nightwish", "hess", "wheaton", "motorized", "subgroup", "chevelle", "vets", "assays", "ramon", "longhorn", "backdrop", "aerobic", "vgroup", "thursdays", "dansk", "tenerife", "mayen", "oldmedline", "dunlop", "caa", "modernization", "xe", "fourier", "businessman", "watersports", "lucent", "commuter", "orthopedic", "hhs", "tyrosine", "shenzhen", "initiating", "grabs", "erickson", "marlin", "casserole", "canoeing", "cca", "ophthalmology", "geile", "clubhouse", "licensees", "evaluates", "svg", "protesters", "fernandez", "mvc", "sleazydream", "patti", "mz", "sennheiser", "sheehan", "maven", "commute", "staged", "transgender", "customizing", "subroutine", "pong", "hertz", "myr", "bridgewater", "firefighter", "propulsion", "westfield", "catastrophic", "fuckers", "blower", "tata", "giclee", "groovy", "reusable", "actuarial", "helpline", "erectile", "timeliness", "obstetrics", "chaired", "agri", "repay", "prognosis", "colombian", "pandemic", "mpc", "fob", "dimage", "fetus", "determinants", "durango", "noncommercial", "opteron", "superannuation", "ifs", "haas", "wimbledon", "documentaries", "mpa", "rao", "remake", "arp", "braille", "physiopathology", "seperate", "econpapers", "arxiv", "pax", "kalamazoo", "taj", "sinus", "maverick", "anabolic", "allegra", "lexar", "videotape", "educ", "amplification", "larsen", "huron", "snippets", "conserv", "dustin", "wsop", "composites", "wolverhampton", "banning", "cpt", "gauteng", "ftc", "watertown", "pathogens", "mft", "uefa", "jacking", "radiohead", "ooh", "subsections", "definately", "bod", "yin", "tiki", "homepages", "handouts", "cpm", "marvelous", "bop", "asnblock", "stretches", "biloxi", "indymedia", "clapton", "beyonce", "smf", "nabble", "intracellular", "infoworld", "boyz", "waltham", "geisha", "dblp", "briefcase", "mcmahon", "cq", "mcgregor", "modal", "marlboro", "grafton", "phishing", "addendum", "foia", "kirsten", "yorker", "memberlistmemberlist", "gam", "intravenous", "ashcroft", "loren", "newsfeed", "carbs", "yakima", "realtones", "xtc", "vdata", "interpro", "engadget", "tracey", "wac", "darfur", "fragmentation", "behavioural", "kiev", "paranormal", "glossaries", "sonyericsson", "dex", "emoticons", "carbohydrates", "hms", "norwood", "appetizers", "webmin", "stylesheet", "goldstein", "wnba", "englewood", "asf", "hottie", "stripper", "pfc", "adrenaline", "mammalian", "opted", "meteorology", "analyzes", "pioneering", "ctx", "spreadsheets", "regain", "resize", "medically", "tweak", "mmm", "alicante", "graders", "shrek", "universidad", "tuners", "slider", "cymru", "fprintf", "irq", "dads", "sdl", "ebusiness", "hays", "cyrus", "courtroom", "baht", "relocating", "synth", "filthy", "subchapter", "ttf", "optimizations", "infocus", "bellsouth", "sweeney", "aca", "fpo", "layup", "laundering", "fre", "nazis", "cumfiesta", "newbies", "mds", "piles", "vaginas", "bezel", "avatars", "twiztid", "facilitation", "ncr", "xb", "voc", "rts", "applets", "pdfs", "cac", "teh", "undercover", "substrates", "evansville", "joystick", "knowledgebase", "forrester", "xoops", "rican", "uptime", "dooyoo", "spammers", "nuclei", "gupta", "tummy", "axial", "aest", "topographic", "westport", "majordomo", "wednesdays", "burgers", "rai", "watchlist", "campers", "phenotype", "countrywide", "affirm", "directx", "resistor", "bhd", "audubon", "commentsblog", "snowmobile", "publ", "cpg", "subparagraph", "weighting", "rectal", "mckinney", "hershey", "embryos", "garages", "sds", "urology", "aforementioned", "rihanna", "tackling", "obese", "melvin", "collaborations", "isolates", "velcro", "worksheets", "avaya", "srs", "wigan", "hua", "abba", "qd", "orig", "huskies", "frey", "iz", "loyola", "gartner", "xda", "strapon", "chaser", "astra", "expasy", "overdrive", "ripley", "phosphorylation", "cfo", "depletion", "neonatal", "qr", "mclaren", "rowling", "vhf", "flatbed", "golfers", "lira", "technics", "damien", "clippers", "spirited", "gv", "staa", "recharge", "openid", "sassy", "demux", "ribosomal", "tdk", "filmmakers", "transnational", "paralegal", "spokesperson", "fha", "teamed", "preset", "iptables", "pocketpc", "nox", "jams", "pancreatic", "tran", "manicures", "sca", "tls", "prweb", "holloway", "cdrw", "plz", "nadu", "underwriting", "rulemaking", "valentino", "prolyte", "millenium", "collectable", "stephan", "aries", "ramps", "tackles", "dsa", "walden", "catchment", "targus", "tactic", "ess", "partitioning", "voicemail", "acct", "shimano", "lingere", "parentheses", "contextual", "qwest", "jira", "cerevisiae", "dyson", "toxins", "camaro", "cryptography", "signalling", "daycare", "murakami", "merriam", "scorpio", "attr", "emp", "ultrasonic", "ashford", "intergovernmental", "paranoid", "dino", "xvid", "dmoz", "ivtools", "barron", "snorkeling", "chilean", "avs", "suny", "gifs", "qualifier", "hannover", "fungal", "ligand", "aust", "peoplesoft", "freelists", "coastline", "omit", "flamingo", "deformation", "orf", "pfizer", "assembler", "renovations", "genbank", "broadcasters", "employability", "noodles", "retardation", "supervising", "freeport", "lyme", "corning", "prov", "dishnetwork", "amg", "claremont", "moo", "cpe", "childs", "bizkit", "blogosphere", "endocrine", "resp", "carlsbad", "ammo", "bling", "chars", "mcguire", "utilisation", "rulings", "sst", "geophysics", "slater", "broccoli", "foreach", "oakwood", "mcgee", "kissimmee", "linker", "tetris", "tds", "synchronized", "hsbc", "shellfish", "astoria", "trajectory", "epsilon", "knowles", "astrophysics", "hansard", "lai", "authorisation", "vampires", "relocate", "nerd", "dac", "glazing", "provisioning", "mnt", "expandable", "maserati", "bender", "reliably", "fas", "sendo", "hasbro", "corba", "polski", "multidisciplinary", "ventricular", "petersen", "bans", "macquarie", "pta", "poy", "mao", "transferable", "yummy", "momma", "lehigh", "concordance", "greenberg", "trish", "electrodes", "svcd", "cron", "darth", "cramer", "yup", "ching", "melanoma", "thug", "yugoslav", "occ", "cpan", "bizjournalshire", "tco", "shaver", "grammy", "fibrosis", "opel", "hummingbird", "ported", "eeo", "polyethylene", "parametric", "awarding", "dkk", "superbowl", "sse", "haskell", "flatware", "skid", "eyeglasses", "fenton", "polaris", "formulations", "bgp", "parenthood", "latinos", "artworks", "doherty", "dnc", "bci", "allegheny", "arenas", "aaaa", "compressors", "exclusives", "lounges", "consultative", "lst", "ais", "conveyor", "normative", "surg", "rst", "longtime", "ecm", "mckay", "spe", "solver", "ani", "lacie", "solvents", "kudos", "jens", "creams", "poo", "handbooks", "agm", "shawnee", "crowley", "butalbital", "artifact", "mdot", "coldwell", "qs", "depts", "veterinarian", "merseyside", "cso", "krona", "disseminate", "puget", "coasters", "geologic", "fleetwood", "feldman", "endocrinology", "replicas", "polygon", "mcg", "kwazulu", "servo", "riparian", "guelph", "tenuate", "curator", "jaime", "mower", "gamestats", "lvl", "faxing", "meyers", "testsuite", "stressful", "extranet", "remastered", "teac", "neg", "rma", "eastwood", "handspring", "gerber", "duran", "aquarius", "stencil", "srp", "scifi", "redirection", "showcases", "hmv", "refinery", "abort", "drs", "schroeder", "indent", "chardonnay", "removals", "antrim", "accelerating", "guesthouse", "bz", "insiders", "duvet", "decode", "looney", "brigham", "mts", "jewelers", "juneau", "dilution", "veterinarians", "colourful", "grids", "sightings", "binutils", "spacer", "microprocessor", "deloitte", "claiborne", "clie", "cdm", "spills", "assistive", "chronograph", "refunded", "sunnyvale", "spamcop", "lovin", "embracing", "minimise", "salinity", "nbsp", "specialising", "handout", "routledge", "ramirez", "haiku", "paisley", "telemarketing", "cutoff", "visuals", "ccs", "breads", "seg", "martina", "mclaughlin", "headlight", "kemp", "sla", "pipermail", "sonneries", "clinicians", "entertainers", "tripp", "peterthoeny", "blockers", "stash", "jamaican", "semen", "endogenous", "memorex", "showtime", "narcotics", "oceanfront", "flange", "realplayer", "mcc", "mpaa", "gogh", "allentown", "romero", "bnwt", "predefined", "buzznet", "melodic", "isi", "naics", "transgenic", "axim", "brookfield", "endorsements", "viscosity", "cve", "bengals", "estimator", "cls", "concurrently", "leafs", "electrician", "mayfield", "ftse", "samui", "bleach", "unauthorised", "wolverine", "individualized", "ecn", "raffle", "shredder", "embedding", "hydrology", "mascot", "lube", "launcher", "mech", "primers", "caregiver", "lupus", "sachs", "qtek", "oy", "twn", "keane", "gator", "memberlist", "utd", "nordstrom", "roseville", "dishwashers", "walla", "remixes", "cozumel", "replicate", "taped", "mcgrath", "biometric", "incubation", "aggregates", "wrangler", "asymmetric", "cytochrome", "xfm", "sps", "shure", "mcs", "donating", "antec", "giveaway", "cmc", "alyssa", "cnt", "renter", "vmware", "patel", "honeywell", "nightclubs", "barrington", "luxor", "caterers", "capacitors", "rockefeller", "checkbox", "itineraries", "reagents", "christoph", "walkers", "eek", "ensembl", "weekdays", "computations", "wineries", "vdc", "booker", "mattel", "diversification", "wsdl", "matic", "xyz", "antioxidant", "esrb", "archos", "semesters", "naruto", "storyline", "melrose", "streamlined", "analysing", "airway", "iconv", "commas", "vicky", "helvetica", "ssp", "submitter", "cambria", "icp", "manifestation", "subsets", "blazers", "jupitermedia", "merritt", "triad", "webpages", "yp", "clinique", "fitch", "charting", "ugm", "fixation", "bsa", "lenovo", "alamos", "leach", "gravitational", "cyrillic", "prevacid", "designee", "sunni", "netflix", "monoxide", "groupee", "hardin", "colorectal", "outage", "chunky", "raptor", "ima", "coulter", "iain", "mtn", "pbx", "quantify", "dmesg", "elfwood", "substitutions", "lancome", "galleria", "inv", "hillsborough", "booklets", "pln", "cin", "msp", "gluten", "spanked", "orthopaedic", "medi", "nrt", "obispo", "minogue", "turbines", "notepad", "crappy", "golfer", "afs", "receivables", "scripps", "livermore", "cirque", "ost", "marxism", "escondido", "diffraction", "aha", "outlining", "subtract", "bosnian", "hydration", "havent", "preferential", "dre", "interns", "quotas", "methodological", "aarp", "gettysburg", "iseries", "menlo", "walkthrough", "bikinis", "aopen", "bookcrossing", "addicts", "epithelial", "drastically", "clarks", "groupware", "matchmaking", "dict", "descriptors", "aeronautics", "radiography", "norsk", "nps", "afr", "expr", "ejb", "refereed", "afi", "toxin", "poynter", "filmmaker", "grounding", "smartphones", "calvert", "fiduciary", "bayesian", "saccharomyces", "cfp", "humps", "osi", "zimmerman", "javier", "romantics", "trimmer", "bookkeeping", "hmo", "hikes", "kickoff", "magick", "hillsboro", "blm", "fractal", "mtg", "guildford", "twill", "therapeutics", "disruptive", "kicker", "protease", "abrams", "moreno", "newsforge", "timex", "duffy", "racers", "cma", "pairing", "kirkland", "gujarat", "dkny", "catfish", "doubletree", "brink", "transex", "tdd", "hotpoint", "anthologies", "retirees", "dcc", "btu", "investigates", "chelmsford", "anonymity", "gotham", "lyle", "pinot", "responsiveness", "gazetteer", "jacobson", "kda", "imitrex", "monash", "binghamton", "connolly", "homology", "rpms", "psychedelic", "gyn", "rhinestone", "ely", "quadratic", "philharmonic", "dynamical", "cantonese", "quran", "turnovr", "keychain", "shakers", "inhibited", "lexical", "openssl", "ugg", "mathematica", "karachi", "missoula", "abilene", "fdid", "snes", "swat", "pune", "trashy", "expended", "webct", "pvr", "handycam", "zn", "strategically", "dms", "anus", "dnr", "deputies", "emergent", "erika", "authenticate", "aligning", "nautilus", "doulton", "rtp", "dracula", "umm", "modding", "eap", "shaman", "letra", "mandriva", "seti", "extracellular", "jaipur", "stockport", "eiffel", "plywood", "dnp", "morbidity", "wimax", "effexor", "binders", "custodial", "combi", "integrator", "sonnerie", "teri", "sectoral", "trombone", "postsecondary", "rbd", "ambulatory", "lookin", "xff", "camouflage", "beckham", "dispensers", "firebird", "qu", "showbiz", "hbox", "waikiki", "lng", "pds", "antiqua", "boxers", "asics", "barbeque", "workouts", "ini", "mrc", "seamlessly", "ncc", "girlfriends", "songbook", "hepatic", "copeland", "swanson", "aquifer", "ldl", "pgs", "xga", "svensk", "stereotypes", "marlins", "shelly", "exiting", "saginaw", "polyurethane", "seks", "textus", "johansson", "spraying", "hamburger", "reactivity", "lieberman", "windchill", "storefront", "eof", "codeine", "tetex", "cheerleading", "wellbeing", "pkwy", "hairdryer", "punitive", "exon", "outsource", "thier", "siebel", "captions", "kf", "chromosomes", "emailing", "manic", "novotel", "ndp", "transmitters", "nicola", "minidv", "collaborating", "tuxedo", "receptus", "michelin", "bicycling", "itt", "blueberry", "schumacher", "socioeconomic", "hamster", "bushnell", "ergonomics", "finalize", "lumens", "sudanese", "softpedia", "iff", "faceplate", "packer", "ibs", "broward", "globus", "pir", "reco", "softcore", "referencing", "typ", "guangzhou", "nader", "militants", "resins", "cougar", "montrose", "surreal", "irradiation", "redesigned", "raster", "credential", "checklists", "quirky", "oscillator", "finalists", "encrypt", "mgt", "sneakers", "incontinence", "pajamas", "murdoch", "dali", "lubricant", "quests", "mgr", "outsourced", "jody", "plasmid", "schiavo", "unbeatable", "upstate", "lymphocytes", "repayments", "transsexuals", "fueled", "mex", "xanga", "sverige", "extrait", "pelvic", "monochrome", "activating", "antioxidants", "gynecology", "mythtv", "probabilistic", "cooperating", "calibrated", "phased", "godzilla", "eweek", "airbus", "simplex", "webhome", "aerobics", "sabrina", "condor", "gated", "gaap", "sasha", "ebayer", "hmc", "bitrate", "karnataka", "amish", "ffm", "duh", "hyperlinks", "clitoris", "hse", "cribs", "reliant", "subcontractor", "fendi", "giveaways", "wah", "psych", "hydrochloride", "magnification", "twelfth", "proponents", "priceline", "ecco", "backpackers", "kohler", "irb", "initialized", "ava", "silverado", "amr", "ecu", "psychiatrist", "lauder", "soldering", "phono", "crd", "daryl", "trp", "lehman", "daihatsu", "grantee", "enhancer", "anglers", "rottweiler", "filefront", "visualize", "psd", "adb", "hoses", "bidpay", "ias", "turntable", "screenings", "pivotal", "pai", "heuer", "fic", "nix", "lineno", "fdi", "provo", "checkins", "plating", "lycra", "planck", "yugioh", "reactors", "npc", "kingsley", "careerbuilder", "gillette", "fluoride", "stacking", "cochran", "suomi", "sissy", "trang", "calculates", "thunderstorms", "cip", "transcriptional", "finalized", "referees", "deerfield", "lsc", "cochrane", "eldorado", "esmtp", "conservancy", "otrs", "omim", "dielectric", "anand", "electrophoresis", "sprinkler", "imbalance", "cine", "scarlett", "xen", "novak", "backcountry", "artistdirect", "outboard", "pitches", "scc", "lockheed", "raj", "iana", "elmo", "unmatched", "scranton", "ixus", "pinpoint", "gabbana", "neumann", "outta", "dieting", "andhra", "ralf", "appraisers", "xenon", "hybridization", "anh", "abercrombie", "trax", "otherosfs", "ssc", "danbury", "nofx", "sharma", "rockers", "palliative", "recieve", "cufflinks", "queues", "relisted", "beep", "dunedin", "remanufactured", "staffed", "lightspeed", "grilling", "stalin", "kaye", "bps", "camo", "shoutbox", "toms", "homeschool", "ccg", "lifehouse", "windsurfing", "pattaya", "relocated", "untreated", "mkdir", "riaa", "divisional", "chihuahua", "mcconnell", "resell", "chandigarh", "centrino", "osbourne", "burnout", "classpath", "designations", "spl", "microwaves", "coliseum", "ephedra", "spawning", "endothelial", "citrate", "eduardo", "snowman", "edmonds", "potty", "microbiol", "shooters", "norwalk", "bacillus", "fk", "cla", "spooky", "belleville", "venezuelan", "cbr", "colby", "pab", "hom", "subpoena", "hons", "interpretive", "bareback", "extender", "glucosamine", "proj", "modesto", "designjet", "typhoon", "launchcast", "referrer", "zhejiang", "ricci", "superhero", "tooling", "tomography", "berman", "vocalist", "tidbits", "cystic", "pacifica", "kostenlos", "anniversaries", "infrastructures", "littleton", "commenters", "cali", "fairway", "postdoctoral", "prs", "fairchild", "ssb", "spinner", "evanston", "homeopathic", "ordinarily", "hines", "cpd", "braking", "ece", "platelet", "messageboard", "setback", "recipezaar", "installers", "subcategory", "markov", "factbook", "tuple", "fibromyalgia", "rootsweb", "culver", "bratz", "bucharest", "ntl", "lacoste", "renters", "timberlake", "zack", "markham", "gels", "iframes", "thinkgeek", "nafta", "advertisment", "mountaineering", "screwdriver", "hutch", "beckett", "homeschooling", "dealerships", "sakura", "byu", "jupiterweb", "phosphatase", "mahal", "killings", "robyn", "adirondack", "casablanca", "sdp", "pulaski", "mantra", "sourced", "carousel", "mpumalanga", "thermostat", "infarction", "polypropylene", "mailboxes", "southend", "maxell", "tundra", "vars", "youngstown", "farmland", "skater", "iep", "imho", "disrupt", "rampage", "fink", "jurassic", "gpg", "gnupg", "aliasing", "comix", "solves", "hiroshima", "jiang", "oscars", "boosting", "knownsite", "macarthur", "powerhouse", "deodorant", "youre", "compulsive", "perky", "reinforcing", "extensible", "mtb", "catheter", "practicum", "photocopy", "zipcode", "mcpherson", "saharan", "pixma", "hubbell", "lesbienne", "timeframe", "disarmament", "aed", "actin", "interviewer", "vms", "wno", "dbi", "waikato", "syslog", "orr", "gastroenterology", "travelmate", "composting", "mackie", "choi", "uva", "fga", "oceanography", "vastly", "stardust", "radiological", "commando", "bathtub", "urdu", "aedst", "greer", "motorway", "repositories", "freaky", "guangdong", "merlot", "civ", "spielberg", "lesley", "thom", "phoneid", "salinas", "legged", "unilateral", "dsn", "shri", "aegis", "colloquium", "matrox", "vk", "springsteen", "uhf", "fatalities", "supplementation", "embodied", "altec", "mohammad", "verbose", "marbella", "sth", "iterator", "recieved", "slc", "cfl", "deterministic", "nci", "predictor", "salmonella", "nga", "nantucket", "viewable", "subnet", "maximise", "lotr", "isn", "chalets", "reimbursed", "lau", "watermark", "totes", "mohamed", "dyslexia", "hubble", "thugs", "organics", "dearborn", "feds", "yiddish", "dopamine", "multiplier", "winzip", "sacd", "payoff", "spv", "sonar", "monticello", "flasher", "subcontractors", "evangelism", "abortions", "lesion", "akira", "progesterone", "ethyl", "earthlink", "caramel", "immunodeficiency", "washburn", "xtra", "capitalized", "ceos", "maint", "pancreas", "octopus", "xena", "neuro", "ara", "receptionist", "cessna", "tru", "zombies", "cambodian", "interagency", "activision", "synchronize", "jenn", "juegos", "titties", "tay", "hornets", "crossfire", "ankara", "spandex", "hdmi", "tamara", "ctc", "capcom", "cato", "peachtree", "handyman", "aeg", "ethic", "harlan", "taxon", "lcs", "indefinite", "slackware", "cougars", "earch", "ambience", "genet", "photopost", "uo", "infor", "neuronal", "carrollton", "checkers", "torrance", "yuma", "spokeswoman", "baccalaureate", "tripods", "logistic", "middlesbrough", "personalization", "enema", "easement", "goalie", "darkroom", "hydrocarbons", "gpm", "hoh", "hla", "donaldson", "tiscover", "recor", "mori", "adi", "rockland", "uniqueness", "hfs", "cascading", "metros", "hangers", "broadcaster", "musculus", "degraded", "topo", "viewcvs", "eisenhower", "flashlights", "myyahoo", "rosenthal", "affordability", "latham", "jailed", "depp", "grapefruit", "trna", "motorbikes", "verdana", "bonita", "nippon", "decorators", "dwl", "jizz", "pendleton", "psoriasis", "mavericks", "dianne", "earnhardt", "amtrak", "resid", "tostring", "lessee", "goodyear", "utica", "overclocking", "kitchenaid", "cbt", "peacekeeping", "oti", "interferon", "aas", "selectable", "chechnya", "rory", "woodbridge", "jas", "intersections", "sma", "capitalization", "epi", "responder", "qv", "thoracic", "phaser", "forensics", "infiltration", "serine", "bing", "schemas", "orthogonal", "ohms", "boosts", "stabilized", "wordperfect", "msgs", "zhou", "selenium", "grinders", "mpn", "cse", "assn", "punches", "masturbate", "parachute", "glider", "chesney", "taos", "tong", "lotions", "adrenal", "sixties", "booting", "cunts", "dri", "ozzy", "elearning", "zx", "valuations", "kidman", "jpn", "postoperative", "cytology", "nye", "biennial", "ifndef", "bq", "circuitry", "cdw", "robb", "kinja", "tweaks", "readership", "northstar", "dif", "worthington", "groundbreaking", "transducer", "serotonin", "complements", "isc", "params", "radiators", "beagle", "cadmium", "bodoni", "speedo", "detachable", "simplifies", "sleeveless", "motorists", "tbsp", "waivers", "forsyth", "ricerca", "agilent", "plumper", "uterine", "apartheid", "bnc", "businessweek", "morphological", "windham", "ellington", "ria", "cdi", "polio", "clp", "sharm", "alvarez", "regatta", "chatroom", "polarity", "overrides", "riff", "widths", "dest", "attenuation", "kluwer", "martins", "italiana", "telford", "shuman", "grapevine", "russo", "daunting", "topples", "futuristic", "autofocus", "chai", "obsessive", "transplants", "referrers", "junkie", "admitting", "alsa", "galactica", "wkh", "rotational", "withdrawals", "pageviews", "hartman", "finalist", "pornographic", "armageddon", "smallville", "selectively", "albans", "fallout", "brownsville", "galeria", "stalker", "kathmandu", "nyu", "kristina", "dps", "icmp", "sophistication", "wrt", "messed", "oceanside", "foxpro", "taiwanese", "officejet", "helens", "ppg", "sym", "combos", "cloned", "fulham", "dahl", "pla", "nfc", "mathews", "bestseller", "enrique", "minidisc", "downside", "malvinas", "honcode", "reissue", "striker", "memos", "tensor", "whitehead", "whoa", "brookings", "accomodations", "integra", "laredo", "nntp", "logiciel", "jaguars", "mga", "tracer", "frist", "lsd", "synthesizer", "ejaculating", "biodiesel", "mcleod", "waldorf", "microfilm", "lear", "subsidized", "simons", "optimizer", "zire", "pituitary", "sow", "repeater", "teamxbox", "bytecode", "mccall", "wiz", "autopsy", "joltsearch", "ym", "itv", "colo", "ying", "bce", "inode", "glenwood", "allstate", "horticultural", "hahaha", "spamming", "ssn", "wartime", "mou", "hpv", "jain", "geriatric", "mayan", "navman", "futon", "grannies", "hairstyles", "nays", "webspace", "rds", "mellitus", "multiples", "cryptographic", "disparate", "boardwalk", "ineligible", "homeopathy", "entrants", "rallies", "simplification", "abb", "insolvency", "roleplaying", "affective", "wilma", "compusa", "histogram", "wheelchairs", "usaf", "pennington", "lesbiana", "liberalization", "insensitive", "greenpeace", "genotype", "contaminant", "informa", "collaborators", "malvern", "proxies", "rewind", "issuers", "sinh", "kerberos", "schoolgirls", "hilo", "stratton", "idx", "astronaut", "instituto", "lowry", "constipation", "aec", "sheryl", "nashua", "ikea", "oswego", "gbr", "koi", "sues", "cba", "mckenna", "eudora", "candida", "sildenafil", "adjusts", "sqft", "pickups", "squaretrade", "chandra", "cheesecake", "oth", "porting", "lubrication", "shootout", "racine", "webserver", "vnu", "fragmented", "chevron", "reinsurance", "slated", "tera", "guantanamo", "reina", "energizer", "clarksville", "vandalism", "acpi", "acetaminophen", "wolfram", "ofthe", "contraceptive", "necrosis", "iva", "bonanza", "lumbar", "disparities", "umass", "flamenco", "osprey", "flammable", "biometrics", "buspar", "wasnt", "nds", "softwares", "dbm", "alchemist", "marr", "ssw", "mcdonalds", "hormonal", "vh", "calender", "distro", "virgo", "rink", "jesolo", "unrealistic", "rhonda", "pov", "pings", "pcp", "inxs", "desy", "teaser", "impairments", "courageous", "rho", "promos", "transceiver", "warhammer", "iterative", "catered", "callahan", "neuron", "xlibmesa", "pulsar", "enewsletter", "dav", "pedagogy", "bcc", "afrikaans", "ecb", "cinematic", "ugh", "malik", "tshirts", "fellowes", "illus", "telefon", "maguire", "nlm", "numeracy", "caviar", "popups", "sleepwear", "quads", "grady", "kelsey", "enforceable", "bouncy", "vcrs", "retinal", "sponsorships", "textrm", "screenwriter", "vendio", "otago", "ducati", "allele", "sylvania", "optio", "purifiers", "commuting", "hiphop", "kato", "kama", "bcs", "keating", "eczema", "northland", "icu", "veg", "roadster", "confetti", "fv", "raptors", "irda", "veggie", "dharma", "chameleon", "hooper", "luciano", "grp", "abrasive", "henti", "koruna", "edp", "ensembles", "backpacker", "bainbridge", "scs", "comfy", "assuring", "gettext", "registries", "eradication", "herefordshire", "ectaco", "doh", "jodi", "quintet", "groupwise", "ambiance", "chun", "damian", "bakeries", "dmr", "fucker", "polka", "wiper", "wrappers", "giochi", "iterations", "svs", "ntfs", "namespaces", "mismatch", "fdic", "icd", "vj", "oxides", "qualifiers", "battered", "wellesley", "smokey", "passwd", "vacuums", "falun", "precip", "lagos", "rapper", "hooters", "calligraphy", "advantageous", "mustek", "monique", "fearless", "ortiz", "pref", "morningstar", "recessed", "fmt", "palladium", "totaled", "levitt", "vd", "shipper", "darryl", "hobo", "nys", "merrell", "cra", "sly", "reductase", "raul", "shenandoah", "harnesses", "wtc", "loma", "oshkosh", "multivariate", "geil", "kitchenware", "unigene", "lans", "immunoglobulin", "silverstone", "uniden", "telechargement", "remstats", "unitary", "getnetwise", "hospitalization", "clubbing", "microelectronics", "observational", "waverly", "crashers", "schwab", "deregulation", "vba", "carpentry", "steinberg", "sweetie", "mideast", "hispanics", "podium", "paranoia", "faceted", "sito", "gecko", "fullscreen", "interchangeable", "rollins", "scp", "hst", "starship", "miele", "seeded", "cyclists", "fey", "cmt", "nurturing", "enzymology", "amadeus", "usm", "galapagos", "uconn", "picker", "xls", "mulder", "lesbicas", "dialer", "mooney", "syntactic", "envision", "jetta", "downey", "codex", "lsb", "userid", "cosmology", "noodle", "gromit", "sargent", "bangle", "humping", "donnie", "privatisation", "tofu", "rq", "unhcr", "battlestar", "intuit", "adoptive", "cda", "minimized", "partnered", "twat", "filibuster", "glamorgan", "adwords", "tulane", "usp", "facet", "behaviours", "redneck", "imax", "xpath", "synthesized", "encapsulation", "samsonite", "accordion", "rooney", "minimally", "webpreferences", "skoda", "matchups", "ucc", "mailings", "ono", "beachfront", "cem", "crosswords", "pubchem", "integrative", "kelowna", "embed", "gurus", "allotted", "shutterfly", "gerhard", "watersheds", "trimester", "clickable", "spyder", "electricians", "nexium", "capricorn", "dipped", "perm", "rte", "spectrometry", "snippet", "pha", "permeability", "waukesha", "igg", "scart", "wsu", "normalization", "skillet", "neoprene", "vlc", "offeror", "thermo", "huber", "jarrett", "farechase", "maintainers", "maarten", "ginseng", "blackout", "detergent", "rosetta", "grenade", "occured", "karin", "lana", "fontana", "kang", "crafting", "ivillage", "mowers", "bratislava", "policymakers", "sienna", "watford", "misco", "givenchy", "reimburse", "esperanto", "modalities", "pcc", "lighters", "shutting", "endemic", "spr", "carly", "hydrologic", "stansted", "nep", "huddersfield", "aimee", "davey", "csp", "helpsearchmemberscalendar", "ait", "transduction", "silverman", "clarifying", "aortic", "drc", "hoa", "starcraft", "martens", "ficken", "structuring", "konami", "lipids", "jurisdictional", "desi", "cellphones", "cordoba", "xj", "sheppard", "dpkg", "folsom", "triggering", "mapa", "aip", "rackmount", "binocular", "eda", "specialise", "rar", "remortgages", "mckinley", "hanks", "dosing", "strobe", "waffle", "detectable", "pmi", "arrowhead", "nigga", "mcfarlane", "paycheck", "sweeper", "freelancers", "seinfeld", "tdm", "shen", "responders", "keepsake", "birthdate", "gettin", "upbeat", "ayes", "amenity", "donuts", "salty", "interacial", "cuisinart", "nautica", "estradiol", "hanes", "noticias", "gmp", "schaefer", "prototyping", "mth", "zeros", "sporty", "tumour", "fpic", "pdc", "atpase", "pooled", "bora", "shu", "stabilize", "subwoofers", "tcs", "clueless", "sofitel", "woodruff", "southport", "walkthroughs", "radiotherapy", "minifig", "transfusion", "sams", "zend", "newtown", "mcmillan", "csf", "lyn", "witt", "mcd", "unep", "newsflash", "recombination", "messing", "budgeted", "slogans", "flashback", "photometry", "sutter", "inr", "knicks", "ingestion", "mindset", "banda", "adulthood", "inject", "prolog", "dunk", "goofy", "mcintyre", "aga", "guilford", "raglan", "photonics", "cdf", "celtics", "heterosexual", "mappings", "jel", "snip", "fascism", "galerias", "audiovisual", "diagnosing", "neutrino", "wouldnt", "mq", "codecs", "certifying", "dvp", "traduzca", "csb", "subj", "asymptotic", "isotope", "moblog", "locales", "preventative", "brampton", "temperate", "lott", "srv", "meier", "crore", "deserving", "banco", "diagnoses", "thermaltake", "ultracet", "cortical", "itchy", "glaucoma", "homosexuals", "mhc", "estee", "wysiwyg", "oversees", "odp", "categorised", "thelist", "diss", "cta", "diamondbacks", "nzd", "subtype", "psx", "thessaloniki", "dmv", "leafstaff", "literate", "ayp", "bikers", "harcourt", "bubba", "mutt", "orwell", "mietwagen", "bakeware", "cleanser", "lonsdale", "velocities", "renewals", "tsx", "dnl", "mtu", "salford", "ephedrine", "longview", "closeup", "venous", "hereunder", "ouch", "teflon", "cys", "debadmin", "cleans", "fpga", "everton", "rosters", "herbicide", "marlene", "futura", "smd", "cheddar", "ql", "tucows", "regex", "bukake", "chs", "mcclellan", "gopher", "distal", "zar", "frommer", "joss", "shortfall", "harmonica", "geothermal", "texmf", "atlases", "kohl", "lorazepam", "hosp", "lewiston", "stowe", "fluke", "khi", "estes", "hdr", "caches", "stomp", "acidic", "anc", "doin", "tld", "gangster", "deliverables", "censored", "fascist", "lido", "matchbox", "trl", "businessmen", "bpo", "incubator", "experiential", "eraser", "jordanian", "jiwire", "libra", "rtl", "iea", "uniprot", "statystyki", "pkgsrc", "nonprofits", "desnudos", "czk", "ethylene", "slows", "opm", "inhibits", "exploratory", "spectrometer", "outsole", "lista", "tmc", "inset", "polynomials", "elegans", "openers", "shasta", "dob", "inet", "cov", "fallon", "sidekick", "tcb", "dmca", "rewriting", "bahama", "idl", "loretta", "lingvosoft", "dax", "allocating", "newell", "juveniles", "gamermetrics", "lcds", "ortholog", "tasmanian", "hydrocarbon", "lobbyist", "kelvin", "secondhand", "xo", "cheatscodesguides", "mdl", "clientele", "technica", "gratuito", "hts", "arkon", "hort", "bureaucratic", "cooperatives", "raceway", "sopranos", "hotties", "gq", "terrell", "yc", "closings", "registrars", "strlen", "faye", "cto", "lakeview", "ospf", "tunneling", "methamphetamine", "murals", "bangs", "asic", "knockout", "radon", "avantgo", "asl", "obi", "timelines", "roget", "cristina", "visio", "autoimmune", "coder", "replicated", "pom", "timetables", "kline", "anorexia", "errno", "workplaces", "harpercollins", "clk", "heartburn", "empathy", "ica", "motivating", "clockwise", "frisco", "mitzvah", "chong", "bashing", "boosters", "cyl", "grupo", "mikhail", "denominator", "changeset", "cec", "jovencitas", "texttt", "islamabad", "freestanding", "resilient", "eyewitness", "spartanburg", "hippo", "trung", "tenancy", "offsite", "realaudio", "clements", "dogsex", "ticketing", "heterogeneity", "bodied", "dudes", "maytag", "norco", "altos", "sleeved", "overs", "watercraft", "scully", "cellulose", "cathode", "monographs", "nra", "digitized", "rotated", "gaia", "motown", "pryor", "sato", "greeley", "ccr", "agro", "ramos", "quizilla", "citibank", "scotty", "pvp", "meridien", "taxa", "brunettes", "bic", "irl", "mfa", "endo", "unhelpful", "microorganisms", "twister", "krakow", "sequoia", "emt", "activator", "incredibles", "familial", "marquee", "resilience", "thermodynamics", "seton", "makita", "subgroups", "catchy", "aia", "tig", "synaptic", "bobcats", "zappa", "eec", "chicas", "swahili", "nlp", "dzwonki", "enrolling", "commercialization", "smt", "cataloging", "snowboards", "sami", "tesla", "elan", "csd", "ingrid", "longman", "unleaded", "mesquite", "kroner", "frm", "javadoc", "hotbot", "denali", "inhibitory", "phonics", "dbs", "refs", "smh", "thaliana", "meningitis", "motivations", "rees", "asteroid", "donegal", "endings", "mwf", "unlisted", "philippians", "conductive", "sooo", "echostar", "microscopes", "kenmore", "reagent", "achievable", "dla", "glamorous", "interacts", "litchfield", "lavoro", "hobbynutten", "chomsky", "venezia", "yamamoto", "zhu", "interleukin", "flashcards", "homologene", "interception", "voltages", "assignee", "kip", "bla", "algarve", "valance", "stc", "pisces", "cpanel", "orc", "hemingway", "gti", "hdl", "rendition", "danmark", "yun", "sourcebook", "hui", "matador", "smut", "nac", "dang", "bradenton", "meetups", "bilbao", "ewan", "cwa", "akai", "deletes", "adjudication", "autoconf", "rasmussen", "bibliographies", "milne", "fsc", "unplugged", "ttc", "currie", "torvalds", "neff", "tailgate", "hollis", "lanier", "overseeing", "escalation", "polymorphism", "semitism", "sevenfold", "colocation", "woodbury", "tshirt", "epidemiological", "medic", "grail", "espana", "horne", "nostalgic", "aldrich", "tabled", "farsi", "excelsior", "rial", "greenspan", "dhabi", "chobe", "tafe", "pz", "andrei", "frazier", "criminology", "jeanette", "constel", "talkin", "dup", "syd", "permittee", "hangover", "capitalize", "fsu", "motocross", "boomers", "wedgwood", "mcdermott", "youngs", "lep", "grossman", "pecan", "freshmeat", "fnal", "benzene", "mcp", "topper", "ittoolbox", "manny", "arse", "osteoarthritis", "westlake", "czechoslovakia", "addictions", "taxonomic", "judo", "mizuno", "palmetto", "telco", "ltc", "microarray", "electrolux", "elephantlist", "sparked", "qualcomm", "whitaker", "opc", "connelly", "conner", "hospitalized", "fec", "opml", "cana", "ation", "entitlements", "wingate", "healey", "jabra", "qmail", "soybeans", "awd", "electrostatic", "topological", "coz", "oversize", "westinghouse", "unk", "reb", "rios", "craftsmanship", "cic", "pyle", "seuss", "cheetah", "ldp", "competed", "fridges", "hatchery", "judgements", "msr", "zr", "corbett", "asx", "curr", "fingerprints", "conv", "cheesy", "ahmedabad", "dimlist", "winfield", "pinto", "gallerys", "jana", "martindale", "webstatistics", "dhl", "mays", "risc", "hcv", "oboe", "tzu", "hurd", "geotrack", "kolkata", "imation", "hematology", "expressway", "steelhead", "ahh", "turntables", "lindholm", "clooney", "facilitators", "mcnamara", "shiva", "toners", "kenyan", "wynn", "hsa", "motorbike", "niles", "zippo", "sergei", "upfront", "battlefront", "gosh", "fansite", "colossians", "addicting", "gerd", "copa", "gtp", "zlib", "whitespace", "tektronix", "doesn", "mccullough", "cnr", "microfiber", "mdc", "tsa", "deployments", "stearns", "insurgency", "boyer", "behringer", "akg", "ttm", "perceptual", "fz", "midlothian", "follando", "instr", "ott", "bsn", "rambler", "drywall", "suzy", "dekalb", "sumo", "topsites", "hsc", "tse", "refurbishment", "pfam", "tdi", "grassland", "jeffery", "councilman", "swaps", "unbranded", "astronauts", "lockers", "lookups", "attackers", "actuator", "reston", "sftp", "reinstall", "lander", "coby", "methanol", "miscellany", "simplifying", "slowdown", "bridesmaid", "transistors", "marys", "colgate", "lousy", "pharm", "foreseeable", "nutritionists", "techweb", "berkley", "resistors", "blondie", "drwxr", "cfc", "isu", "stm", "villanova", "iw", "tif", "cbi", "cesar", "heuristic", "archivist", "gallup", "valtrex", "usn", "antimicrobial", "biologist", "cobol", "homolog", "fruity", "stratus", "fips", "urea", "bumpers", "lumix", "wildcard", "rvs", "desnudas", "plextor", "oxidative", "brits", "healy", "pliers", "kayaks", "ibanez", "marxist", "couldnt", "naperville", "diplomas", "fieldwork", "damping", "immunol", "regan", "wwwroot", "bootleg", "intellectuals", "winslow", "minis", "rhs", "leftist", "tequila", "limoges", "wildwood", "oop", "germantown", "bergman", "gmac", "pulitzer", "tapered", "mollige", "toothbrush", "delegations", "plutonium", "factsheet", "squarepants", "subsurface", "guadalupe", "halliburton", "underscore", "borg", "glutamine", "slutty", "mcphee", "doa", "herbicides", "usgenweb", "inscribed", "chainsaw", "tablature", "fertilization", "glitch", "gearbox", "stang", "alejandro", "tensile", "varchar", "intercom", "ase", "osg", "mckee", "envisaged", "splice", "splicing", "campfire", "cardbus", "hubby", "graphing", "biologists", "improv", "hempstead", "exilim", "xlr", "debuts", "esi", "diskette", "ubs", "commend", "contender", "southland", "spie", "globals", "diaspora", "anu", "moratorium", "safes", "goodnight", "alcoholics", "asme", "gatlinburg", "cai", "pharmacol", "swe", "xorg", "newsquest", "wavelengths", "unclaimed", "racquet", "cout", "cytoplasmic", "qaida", "kpmg", "lanarkshire", "steakhouse", "stubs", "solarium", "sedo", "fillmore", "shox", "greenhouses", "spotlights", "perks", "harlow", "morrissey", "igp", "lutz", "capacitance", "birthstone", "primitives", "bong", "lingual", "unframed", "iter", "vibes", "tmdl", "programa", "republication", "zap", "veneto", "zhao", "hippie", "acyclovir", "benoit", "organizes", "unaudited", "rz", "summertime", "airbag", "lal", "sweetwater", "bjc", "cfm", "internationale", "krystal", "expansions", "gms", "correlate", "linkout", "poc", "pittsburg", "bylaw", "kenyon", "trims", "epiphany", "pny", "devin", "viewfinder", "homewood", "mcrae", "hind", "renaming", "plainfield", "maxon", "sprintf", "armagh", "livechat", "pdr", "bhp", "lyman", "notfound", "pho", "pathogen", "zagreb", "gayle", "ust", "overwrite", "revitalization", "camry", "postmodern", "jayne", "hci", "kuhn", "typos", "glutamate", "melton", "oneworld", "realtone", "mikey", "telephoto", "pooling", "jy", "drury", "ctw", "tbs", "sct", "custer", "borderline", "surgeries", "lobbyists", "sfo", "zionist", "gaskets", "photoblog", "cushing", "nonstop", "hummel", "corgi", "ellie", "citigroup", "seasonally", "uci", "bizwomen", "dti", "malkin", "adbrite", "psychosocial", "butthole", "ellsworth", "cline", "backlog", "thema", "filmmaking", "wwi", "townhomes", "usf", "instapundit", "mcmaster", "bayside", "thinkcentre", "cea", "biophys", "hodgkin", "vhosts", "laughlin", "congresses", "electrically", "ophthalmic", "yz", "prong", "unreleased", "ipa", "chaplin", "dfw", "histology", "gilman", "klamath", "atrial", "equalizer", "vbscript", "helmut", "lynda", "vax", "yak", "silt", "councilmember", "endorses", "expos", "cherish", "aap", "undead", "pto", "critters", "blob", "kurds", "ela", "ical", "macleod", "devry", "rahman", "fundamentalist", "subtraction", "superstars", "chmod", "leveling", "piggy", "stadiums", "playable", "uz", "sunos", "lancia", "perf", "interconnected", "tunning", "whitepaper", "platt", "lexis", "virology", "csm", "purcell", "vidal", "svcs", "subsystems", "oxfam", "johnstown", "beading", "robustness", "ifn", "interplay", "ayurveda", "mainline", "folic", "vallejo", "ratchet", "cee", "yl", "yee", "wicca", "cygnus", "depiction", "jpl", "tiered", "optima", "seward", "photons", "transactional", "lhc", "doggy", "anodized", "exxon", "hurdle", "donnelly", "metastatic", "encyclopaedia", "errata", "divas", "ong", "trey", "thankyou", "alerting", "insofar", "smileys", "surrogate", "breathable", "differed", "dickies", "gonzo", "programmatic", "trs", "teammates", "barrymore", "ddd", "barracuda", "accesskey", "appellants", "usergroups", "initiates", "pwd", "mation", "aiwa", "whiting", "grizzlies", "okidata", "methadone", "offsets", "tryin", "jodie", "jdk", "tallinn", "descarga", "monterrey", "harrogate", "lotteries", "bozeman", "coauthor", "cybershot", "airflow", "thur", "oper", "stn", "unattached", "maher", "karlsruhe", "yuri", "cheung", "honeymooners", "cheaptickets", "howie", "dieter", "centerpiece", "mplayer", "unwind", "outings", "crotch", "wavelet", "nothin", "pathogenesis", "diodes", "realestate", "reinstatement", "botox", "nge", "dipole", "cleo", "norge", "kata", "tangled", "giga", "walsall", "burnaby", "lilo", "adf", "majorca", "agribusiness", "validator", "jax", "pixie", "proofing", "clits", "keyring", "vehicular", "workbench", "deph", "landscaped", "aziz", "lula", "nucl", "farber", "impala", "commenter", "celsius", "flicks", "hardwear", "prefixes", "racquetball", "endl", "flavours", "pundits", "unset", "murano", "optimised", "bariatric", "hitchhiker", "isotopes", "entrez", "erich", "conduction", "grabber", "orch", "peridot", "produc", "skechers", "pacers", "salvatore", "nts", "rbc", "neurosci", "parton", "apec", "centerville", "mcl", "ebuyer", "dermatitis", "roxio", "nagoya", "sfc", "snowfall", "sss", "fundraisers", "fecal", "vorbis", "hazzard", "lbp", "gorman", "validating", "healthday", "newsstand", "dossier", "psion", "tcc", "corbin", "songwriting", "ecg", "hinton", "nighttime", "fluxes", "kombat", "finders", "dictated", "darlene", "westcott", "dca", "lua", "lpg", "opti", "proximal", "canciones", "irix", "qp", "peroxide", "bryn", "erm", "rfi", "outages", "complemented", "finley", "thanh", "backlash", "gallo", "agence", "zs", "kjv", "jonny", "biblio", "qm", "opacity", "userland", "townsville", "turing", "veggies", "centenary", "barclays", "eid", "drexel", "pedagogical", "lockhart", "fishnet", "combinatorial", "unintended", "raman", "rochdale", "prnewswire", "sthn", "smog", "ucl", "poa", "mics", "punjabi", "prem", "katalog", "kettering", "hayek", "brookline", "montpelier", "titty", "ntt", "fart", "oxidase", "qw", "caterer", "pregnancies", "fiori", "dateline", "stdout", "unassigned", "adriana", "lyndon", "groupings", "mems", "midterm", "campsite", "dropdown", "marketer", "huntingdon", "jcpenney", "gelatin", "qvc", "adenosine", "milliseconds", "swatch", "redefine", "backdoor", "jazeera", "envisioned", "pws", "extrem", "automating", "sempron", "cursors", "divert", "phnom", "tbc", "kanji", "vod", "recreate", "smackdown", "dropout", "jrst", "fallujah", "lockout", "moron", "tnf", "townhouses", "horrific", "abacus", "lifeline", "gto", "torquay", "dao", "conjugate", "winch", "elektra", "webtrends", "shes", "sabotage", "blueprints", "limos", "fraunhofer", "warhol", "suppressor", "dogpile", "birt", "rensselaer", "jocks", "unzip", "floss", "sarge", "endnote", "leland", "telugu", "midwifery", "huff", "pornos", "primates", "rmi", "tangerine", "amoxicillin", "graz", "basingstoke", "crawler", "angled", "comin", "longhorns", "doha", "ebsco", "lynchburg", "overriding", "wilshire", "ard", "wachovia", "groff", "ects", "lok", "invicta", "dongle", "ecumenical", "tanaka", "internacional", "kwan", "cdl", "archiv", "placid", "lenin", "marsha", "gradients", "ritalin", "retrieves", "ferrous", "dhaka", "zillion", "chino", "ltr", "caveat", "gangbangs", "toiletries", "bedrock", "clio", "zines", "multipart", "forklift", "repurchase", "orthopedics", "wsw", "vnc", "nfpa", "dnf", "badgers", "chp", "kinh", "appetizer", "disbursement", "weblinks", "telemetry", "consumable", "winn", "depressive", "stabilizer", "ovary", "rune", "accrual", "creatively", "amateure", "abd", "interfaith", "cay", "automata", "northwood", "payers", "gritty", "dewitt", "rect", "ipx", "sebring", "reborn", "bia", "lagrange", "treadmills", "bebop", "streamlining", "trainings", "seeding", "ulysses", "industrialized", "botanic", "bronco", "moodle", "chased", "cti", "intermediaries", "tei", "rotations", "knoppix", "montessori", "biomed", "murine", "entomology", "rodent", "paradigms", "lms", "putter", "fonda", "recursion", "flops", "initiator", "hsu", "pobox", "zeiss", "ferc", "tanf", "sunscreen", "llvm", "antidepressants", "decentralized", "freaking", "whittier", "elmira", "bassist", "oakville", "skaters", "luminosity", "emulators", "toefl", "keychains", "karat", "modis", "ginny", "egan", "posh", "bangles", "stereos", "submittal", "bnib", "moh", "mink", "simulators", "nagar", "zorro", "ecran", "ealing", "ozark", "pfeiffer", "miers", "vickers", "interactivity", "corso", "constructors", "doj", "ipm", "rnd", "jama", "lsi", "malfunction", "magma", "smithfield", "gtr", "canucks", "hammersmith", "sdi", "cricos", "blum", "parkland", "pcbs", "werewolf", "wnw", "midwestern", "ezboard", "charisma", "chilli", "iac", "suspensions", "nss", "smi", "malnutrition", "logcheck", "layton", "gaines", "inbred", "intercultural", "skateboards", "mainboard", "goshen", "functionally", "rabies", "catalysts", "datetime", "readability", "dakar", "dspace", "cappuccino", "modulus", "krause", "cuisines", "maclean", "tuscaloosa", "boosted", "sprayed", "gearing", "glutathione", "adoptions", "tweaking", "angina", "geeky", "rnb", "coupler", "lexapro", "aig", "paisapay", "zanussi", "minimizes", "hillsdale", "balboa", "penh", "wainwright", "agc", "guadalajara", "pinellas", "umts", "zappos", "daimler", "spo", "tadalafil", "everglades", "chipping", "montage", "geelong", "ionization", "broome", "biases", "sprawl", "marantz", "alfredo", "haunt", "hedging", "insulating", "mcclure", "vbr", "qed", "waterfowl", "adress", "reacting", "virtualization", "itat", "collide", "syst", "mankato", "segregated", "ests", "avengers", "technologist", "pigments", "impacting", "lamont", "aquariums", "rigs", "arginine", "moot", "pleasanton", "televised", "giftshealth", "acd", "simplistic", "hepa", "amphibians", "encapsulated", "injector", "kessler", "gardenjewelrykids", "leung", "edo", "impl", "grained", "relatos", "newsday", "gmat", "dani", "announcer", "barnsley", "cyclobenzaprine", "polycarbonate", "dvm", "marlow", "thq", "osce", "hackett", "divider", "cortez", "associative", "cmo", "rsync", "minivan", "victorinox", "chimp", "flashcoders", "giraffe", "pia", "stroud", "lefty", "cmg", "westside", "heres", "azimuth", "logistical", "firenze", "okavango", "jansen", "tween", "payback", "hydraulics", "endpoints", "perrin", "quantification", "coolant", "nanaimo", "yahooligans", "prilosec", "hutchison", "parsed", "shamrock", "schmitt", "korg", "warmers", "newt", "frontend", "itanium", "alleles", "weiner", "ola", "halftime", "frye", "albright", "wmf", "clemente", "handwritten", "whsle", "launceston", "wembley", "sandman", "mejores", "scoops", "dwg", "truetype", "eigenvalues", "airbrush", "ppb", "comms", "regexp", "quickstart", "beaverton", "trucker", "willamette", "chiropractors", "tyco", "mirroring", "massively", "aeronautical", "lasalle", "pwr", "wordlet", "hanford", "plac", "exhibitionism", "riser", "redux", "gaim", "audiobook", "compensatory", "couplings", "jeezy", "monsanto", "cleric", "rfq", "contactos", "esri", "equiv", "macrophages", "yao", "npt", "computes", "pickett", "oid", "charismatic", "lda", "teleconference", "mma", "whitepapers", "polycom", "tux", "asymmetry", "xpass", "cfd", "barbour", "tijuana", "niv", "hamiltonian", "cdg", "algebras", "quotient", "wildcat", "inlay", "peta", "paco", "avocado", "octets", "dubuque", "evaluator", "gid", "jumpers", "edmunds", "lerner", "manifolds", "awg", "napoli", "kristy", "variances", "pki", "objectivity", "sistema", "massager", "incubated", "feedster", "federer", "turnovers", "bev", "eai", "changers", "frs", "hereto", "osc", "clinician", "alltel", "gss", "curacao", "rapporteur", "arcserve", "gump", "powerline", "aspell", "avp", "safeguarding", "paxton", "herbie", "yabb", "chromosomal", "hickman", "runescape", "salesperson", "superfamily", "tupac", "cassini", "tobin", "zoos", "activates", "hibernate", "ning", "extremists", "montego", "rohs", "cyclical", "cytokines", "improvisation", "mmorpg", "toured", "tpc", "flatts", "cmf", "archiver", "rainer", "rsc", "covariance", "bobble", "vargas", "gulfport", "airfield", "flipping", "disrupted", "restocking", "lgbt", "extremetech", "citrine", "neoplasm", "rethinking", "xfn", "orientations", "calumet", "pellet", "doggie", "inflow", "msw", "lymphocyte", "weinberg", "saigon", "whiteboard", "wic", "brody", "invertebrates", "elliptic", "ffa", "agonist", "hyperion", "partypoker", "rockingham", "sandler", "schweiz", "grundig", "rethink", "musculoskeletal", "aggies", "prereq", "nikita", "aetna", "truckers", "giro", "laserdisc", "kaspersky", "dor", "determinant", "morpheus", "ayers", "junkies", "ccna", "jacquard", "assesses", "okinawa", "autoscan", "quantified", "pnp", "uppsala", "distortions", "subclasses", "glo", "condolences", "hitter", "livelihoods", "psf", "cala", "telluride", "apnea", "mkt", "floodplain", "valera", "wenger", "crusader", "backlinks", "alphabetic", "delonghi", "tailoring", "shavers", "mcdonnell", "aborted", "blenders", "symphonic", "asker", "huffman", "alistair", "navarro", "modernity", "wep", "uab", "olp", "booties", "cancels", "newsblog", "gangsta", "mgp", "foodservice", "teton", "newline", "prioritize", "clashes", "crohn", "bao", "quicklinks", "ethos", "hauppauge", "solenoid", "stis", "underdog", "fredericton", "tep", "bextra", "copywriting", "technol", "mdr", "asteroids", "continous", "hplc", "ovulation", "doggystyle", "quasar", "euthanasia", "schulz", "okanagan", "liters", "tarrant", "blacklist", "clermont", "rooftop", "ebert", "goldfish", "witherspoon", "slimline", "animator", "barbra", "irreversible", "flanagan", "encyclopedias", "csiro", "downtempo", "campsites", "graco", "lighthouses", "xg", "adt", "hemoglobin", "tung", "svga", "postpartum", "condi", "yoda", "jst", "dalai", "xn", "nytimes", "kenzo", "alden", "trampoline", "zi", "restricts", "gees", "intakes", "dogfart", "swearing", "ith", "montel", "ubbcode", "yw", "ninemsn", "lgpl", "jsf", "psychotic", "allyn", "higgs", "pulsed", "ignite", "hornet", "atypical", "contraceptives", "slimming", "dispatcher", "devoid", "jms", "maricopa", "mbs", "northfield", "idf", "elites", "fifo", "correlates", "casters", "heisse", "easygals", "mandalay", "haircare", "climbers", "atty", "madera", "calibex", "mailbag", "smartmedia", "vilnius", "dbl", "doping", "postwar", "strat", "bsp", "barebone", "thrombosis", "smarty", "whitley", "lse", "windermere", "curtin", "dilemmas", "cci", "gwynedd", "edwardian", "hppa", "saunas", "horowitz", "cna", "undergrad", "mocha", "escada", "knockers", "jitter", "supernova", "loughborough", "directtv", "feminization", "extremist", "tuttle", "aoc", "medway", "hobbit", "hetatm", "multipurpose", "dword", "herbalife", "ocala", "cohesive", "bjorn", "dutton", "eich", "tonne", "lifebook", "caster", "critiquer", "glycol", "manicure", "medial", "neopets", "accesories", "faxed", "bloomsbury", "mccabe", "ennis", "colossal", "karting", "mcdaniel", "aci", "brio", "baskerville", "syndromes", "kinney", "northridge", "acr", "emea", "trimble", "webinars", "triples", "boutiques", "freeview", "gro", "screener", "janine", "hanukkah", "caf", "adsorption", "sro", "underwriters", "foxx", "ppi", "noc", "brunton", "mendocino", "pima", "actuators", "internationalization", "wht", "pixies", "pancake", "transmembrane", "photostream", "guerrero", "firth", "hathaway", "emf", "beatty", "andersson", "lunchtime", "miro", "slams", "looping", "crates", "undated", "takahashi", "ramadan", "lowercase", "technologically", "anaerobic", "satelite", "pioneered", "tabloid", "pred", "solubility", "troubleshoot", "etf", "hatcher", "coders", "insecticides", "electrolyte", "watanabe", "firestone", "writeshield", "sph", "descargar", "letterhead", "polypeptide", "velour", "bachelorette", "nurs", "geospatial", "zoned", "pubic", "pizzeria", "mirc", "henning", "acf", "bae", "nitrous", "airspace", "santorini", "vdr", "tms", "convertor", "brahms", "genomes", "workable", "ordinate", "seminal", "rodents", "ytd", "xin", "precursors", "relevancy", "koala", "discus", "giftware", "realistically", "hol", "polska", "loci", "nanotech", "subunits", "awsome", "hula", "laramie", "toothpaste", "maxine", "mennonite", "subtitled", "qms", "maidstone", "abr", "sda", "jcb", "wpa", "fastener", "ctf", "foxy", "sexiest", "jupiterimages", "categorization", "inclusions", "fosters", "conc", "transsexuel", "limbaugh", "cassie", "altman", "lethbridge", "peng", "fillers", "symposia", "nia", "templeton", "stds", "hav", "typography", "ebitda", "eliminator", "accu", "saf", "gardenjewelrykidsmore", "gazebo", "preprint", "htc", "naxos", "bobbi", "cocker", "steph", "protonix", "systemax", "retry", "radford", "implantation", "telex", "humberside", "globalspec", "gsi", "kofi", "musharraf", "detoxification", "ree", "mcnally", "pma", "aureus", "informationweek", "chm", "bonneville", "hpc", "beltway", "epicor", "arrl", "iscsi", "grosse", "dfi", "penang", "zippered", "simi", "brownies", "lessor", "kinases", "panelists", "charlene", "autistic", "riu", "equalization", "corvallis", "reused", "volokh", "vari", "fordham", "hydroxy", "technologists", "snd", "dempsey", "httpdocs", "speakerphone", "reissues", "shalom", "khmer", "recordable", "dlt", "dredging", "dtv", "extrusion", "rtn", "preggo", "defamation", "theron", "proteomics", "spawned", "cep", "phendimetrazine", "wiener", "theorems", "samplers", "rfa", "pasco", "hilbert", "tamworth", "itmj", "msd", "etfs", "cde", "praha", "zona", "landry", "crackdown", "lifespan", "maybach", "cysteine", "responsibly", "slideshows", "aceh", "techtarget", "geotechnical", "fantasia", "camisole", "atoll", "shredders", "gags", "rips", "futurama", "hari", "ironman", "ducts", "marmot", "remand", "hawkes", "spoof", "spammer", "presets", "separations", "penicillin", "amman", "davos", "maturation", "internals", "bungalows", "beckinsale", "refractive", "grader", "ecd", "transducers", "ctxt", "doxygen", "rtd", "akc", "cgc", "intercollegiate", "zithromax", "onkyo", "niosh", "rainier", "furman", "newsfeeds", "larkin", "biztalk", "snapper", "hefty", "ipr", "valdosta", "ulead", "delaney", "hairless", "lactation", "innsbruck", "offbeat", "teenie", "protons", "machined", "holman", "eviction", "dic", "pio", "regionally", "thurman", "canaria", "showcasing", "afa", "certifies", "primes", "renton", "lambeth", "frappr", "liturgical", "easements", "aida", "openafs", "assword", "rving", "exogenous", "sram", "sault", "trolls", "flor", "rfe", "oleg", "smo", "analyzers", "scorer", "swami", "oilers", "nik", "mandela", "listers", "ordinated", "arlene", "dividers", "recoverable", "gators", "intraday", "cruces", "hollister", "enews", "lactose", "gifford", "competitively", "rockstar", "hampstead", "chrono", "nahum", "raja", "nextlast", "xinhua", "ltl", "lofts", "feral", "neurosurgery", "ringgit", "ukranian", "parmesan", "kiosks", "pnt", "hooking", "wip", "rawlings", "physiotherapy", "wrexham", "billabong", "prepayment", "jonesboro", "bangers", "handgun", "miscategorized", "itp", "desoto", "innovator", "mitochondria", "mewn", "sername", "usmc", "amicus", "vijay", "redirecting", "gma", "shih", "cervix", "biblia", "cosby", "lufthansa", "msnshopping", "sewerage", "ele", "mantis", "alerted", "lsp", "intron", "bri", "remodel", "carpal", "natalia", "cjk", "specialises", "condiments", "adventist", "eggplant", "coun", "ctv", "wycombe", "monaghan", "blogarama", "undocumented", "esb", "vaccinations", "gutierrez", "bernd", "needham", "inuit", "wordnet", "wedi", "keyes", "photocopying", "tca", "avn", "dressage", "cafepress", "phylogenetic", "kurtz", "morbid", "inno", "refresher", "freakonomics", "impreza", "cheeky", "arco", "proponent", "brasileiro", "kar", "rojo", "perscription", "aic", "streisand", "eastside", "bioethics", "redo", "piranha", "rps", "cmu", "uncompressed", "vps", "pseudomonas", "sotheby", "avionics", "minimization", "ascot", "linearly", "dolan", "titleist", "genesee", "grays", "fdc", "psychiatrists", "bom", "multiplex", "srt", "bradbury", "babysitting", "asd", "beehive", "aeon", "livin", "leblanc", "shorty", "injecting", "discontinuity", "littlewoods", "enquirer", "downturn", "fission", "modulator", "spybot", "hrc", "worldview", "choreography", "sfx", "nth", "buffering", "denison", "killarney", "scoping", "srm", "mammography", "epc", "nepalese", "communicable", "enzymatic", "melanogaster", "extravaganza", "kamloops", "spss", "tftp", "rotherham", "underestimate", "hana", "mycareer", "pra", "cooley", "gratuitement", "eriksson", "schaumburg", "exponentially", "chechen", "carribean", "bunnies", "choppers", "psyc", "pedersen", "earphones", "outflow", "scarab", "toasters", "skiers", "eax", "jamal", "raunchy", "biologically", "nbr", "ptc", "qe", "zyrtec", "riyadh", "pell", "quicksearch", "coates", "octane", "mtl", "krabi", "funders", "apj", "kal", "fai", "ccp", "environmentalists", "fatah", "ifa", "ackerman", "gbc", "soooo", "soapbox", "newberry", "deanna", "bestellen", "elongation", "webcrawler", "wanking", "ofsted", "yb", "dortmund", "boardroom", "nico", "taping", "mro", "atleast", "somatic", "fcs", "niki", "malloc", "lanzarote", "slump", "nerds", "laude", "mec", "simulating", "enrol", "bts", "cflags", "xps", "datafieldname", "wycliffe", "dda", "apts", "aikido", "slo", "batches", "dap", "ssr", "kournikova", "moshe", "fsbo", "shippers", "mtc", "cav", "rrr", "wildflowers", "polygons", "delimited", "noncompliance", "upi", "sna", "vidsvidsvids", "herts", "bellagio", "webapp", "haryana", "eeg", "dlls", "babysitter", "linotype", "produkte", "lesbica", "pes", "mediators", "hone", "riggs", "jockeys", "seater", "brightstor", "deliverable", "sanding", "buffered", "orton", "indesign", "lakeshore", "ctl", "aland", "clarins", "pelham", "huf", "ronin", "comps", "mgi", "greco", "kontakte", "edema", "leaderboard", "mce", "hsv", "geocities", "argc", "palos", "ori", "carotid", "citi", "squish", "cny", "gorham", "calphalon", "blasen", "midwives", "nara", "nab", "netbeans", "cyclones", "tapety", "snowflake", "blackhawk", "weinstein", "sterilization", "assessors", "chenille", "dehydration", "haircut", "fhwa", "misconceptions", "alternet", "undeclared", "bari", "songwriters", "tolerances", "incarceration", "hierarchies", "redondo", "lactating", "aquamarine", "yg", "edm", "sedimentation", "optometry", "mobilize", "attendee", "bmd", "dialogs", "rpt", "viktor", "trajectories", "federico", "openvms", "ppo", "pag", "precio", "leapfrog", "thermoplastic", "sexchat", "kingman", "deterrent", "ghraib", "duplicating", "tuba", "encodes", "garamond", "cirrus", "alanis", "kilometer", "ballarat", "wacom", "nsta", "actionscript", "ivf", "modifiers", "hijack", "thomasville", "accorded", "fryer", "namco", "xmms", "dammit", "produkter", "motorhome", "ade", "mfrs", "editable", "greats", "milosevic", "marcy", "boron", "creighton", "wolfenstein", "bolivian", "rowbox", "pauls", "phobia", "superfund", "vcc", "sadler", "piercings", "riffs", "briana", "geronimo", "tetra", "freakin", "alb", "retrofit", "cytokine", "stylesheets", "coalitions", "tactile", "cinematography", "vivitar", "wannabe", "blogwise", "amador", "skier", "storyteller", "bpa", "pelicula", "ischemia", "fms", "comput", "wristbands", "livecams", "hibiscus", "rheumatology", "edn", "somers", "cray", "iol", "waterbury", "selectivity", "carlow", "maxx", "haggai", "demonstrators", "raiser", "sanger", "mullen", "periphery", "predictors", "woodwind", "snl", "modblog", "repo", "burnley", "antispyware", "sumter", "rcd", "woodside", "tylenol", "megabytes", "backlight", "naturist", "zephaniah", "airbags", "plethora", "cabriolet", "yh", "retiree", "atol", "sonet", "anthropological", "mikasa", "iverson", "cae", "buckeye", "dollhouse", "stereotype", "uship", "ubisoft", "escalade", "breakaway", "produkt", "sealants", "montclair", "dinghy", "gnus", "melia", "feedbacks", "concurrency", "healthgrades", "hoya", "revista", "lrc", "flied", "tvr", "joliet", "ped", "chappell", "wollongong", "peo", "blowers", "doubleday", "guidant", "remodeled", "eea", "bcp", "situational", "nasd", "chakra", "dfa", "jammu", "wetsuits", "edc", "birkenstock", "vivendi", "emulsion", "fielder", "sorta", "courseware", "biosphere", "skb", "plumpers", "muschi", "qcd", "ollie", "gurgaon", "rwxr", "federalism", "gizmodo", "laminating", "coltrane", "colitis", "unincorporated", "liang", "blogged", "cryogenic", "antispam", "homologous", "hassles", "symptomatic", "rtc", "trademanager", "bipartisan", "rhodium", "exchanger", "preseason", "januar", "bumble", "intimidating", "randi", "placenta", "abbotsford", "upn", "dulles", "brainstorming", "wea", "dougherty", "sarcoma", "sniffer", "rotorua", "bahasa", "iona", "bioscience", "tricia", "residuals", "gforge", "copd", "homie", "leesburg", "afm", "xref", "flashpoint", "mobygames", "cortland", "mailers", "tented", "nicholls", "skew", "mahoney", "infoplease", "budd", "acn", "hollands", "muni", "modernism", "elizabethtown", "dunhill", "eee", "didn", "guidebooks", "scotts", "wye", "wsj", "biosciences", "macgregor", "atms", "habakkuk", "depaul", "binge", "cyst", "hexadecimal", "scissor", "progra", "smyth", "mott", "jazzy", "headboard", "diflucan", "bronson", "standardised", "cations", "cics", "ecole", "centos", "hysterectomy", "housings", "wrc", "movado", "mcdonough", "krista", "pharmacokinetics", "chantal", "morristown", "riverview", "loopback", "torsion", "ultrastructure", "lucida", "leftover", "sykes", "anecdotal", "rheims", "integrators", "unlv", "arboretum", "sharealike", "lowepro", "erc", "ischemic", "illustrators", "plugging", "macbook", "bjp", "arent", "vignette", "qf", "homebrew", "altoona", "pheromone", "fireball", "decorator", "franken", "netpbm", "antalya", "harmonious", "nne", "recordkeeping", "modernisation", "myx", "sdr", "muskegon", "daley", "modality", "liberalisation", "utilise", "arturo", "appellee", "granules", "multidimensional", "rollout", "homegrown", "datamonitor", "reinforces", "dirham", "leahy", "myc", "esophageal", "kira", "approximations", "forzieri", "intermediates", "kgs", "albumin", "grantees", "loveland", "maloney", "sativa", "paramedic", "trademarked", "edgewood", "stressing", "potable", "limpopo", "intensities", "oncogene", "antidepressant", "ballpark", "powys", "orca", "mascara", "proline", "molina", "nema", "wipers", "snoopy", "informationen", "esf", "riverdale", "unleash", "juelz", "bls", "noarch", "koss", "captioned", "paq", "summarizing", "ucsd", "gleason", "baritone", "independant", "chlamydia", "relativistic", "rotors", "driscoll", "andalucia", "mulher", "bagels", "subliminal", "insecticide", "segal", "spline", "undisclosed", "noni", "letterman", "almeria", "bryson", "wtb", "towson", "htaccess", "malayalam", "crue", "loo", "pinoy", "pallets", "uplink", "sheboygan", "terrence", "ghc", "gateshead", "probationary", "abducted", "warlock", "breakup", "fiche", "juror", "bowden", "goggle", "metabolites", "brainstorm", "smu", "ahl", "bateman", "egcs", "chirac", "museo", "coffeehouse", "scitech", "gcn", "trolling", "elmore", "grads", "lz", "andi", "localpref", "kayla", "ccl", "smeg", "donut", "libido", "fuselage", "diabetics", "ballerina", "crp", "morgantown", "paseo", "ptsd", "redheads", "curran", "diam", "ragnarok", "hkd", "summarised", "jx", "caitlin", "conscientious", "bandai", "hobs", "eft", "endometriosis", "cushioning", "mcneil", "belvedere", "nar", "acetyl", "boomer", "perinatal", "idm", "automake", "multichannel", "petr", "daredevil", "corcoran", "mrp", "holliday", "daimlerchrysler", "bowes", "mcgowan", "agfa", "mep", "goss", "mulch", "jvm", "harwood", "ranma", "marinas", "mobipocket", "streptococcus", "murcia", "landfills", "mcknight", "edd", "baud", "mcfarland", "designline", "undies", "prepay", "kodiak", "printout", "nonresident", "marysville", "curso", "palmos", "dorsey", "roo", "soulful", "websearch", "infotrac", "mpgs", "fouls", "openssh", "bravenet", "etsi", "serendipity", "tq", "sequentially", "yogi", "landslide", "howtos", "skool", "evolves", "iberia", "anakin", "duffel", "goodrich", "subfamily", "perennials", "ary", "matchmaker", "sagittarius", "locates", "dysfunctional", "maastricht", "bulletproof", "mcr", "uga", "stenosis", "chg", "recentchanges", "abrasion", "eindhoven", "opportunistic", "pcl", "analogs", "bba", "hillcrest", "cantor", "econometric", "trafford", "opie", "cro", "elkhart", "ringers", "diced", "fairgrounds", "cuyahoga", "plt", "cartons", "mustangs", "enc", "addons", "wstrict", "gow", "pharmacological", "headwear", "paediatric", "genitals", "hendricks", "ivr", "telemedicine", "judi", "icom", "academically", "chilton", "cbo", "amaya", "flickrblog", "fulbright", "foaf", "cllr", "xh", "fulltext", "centrum", "tecra", "kinks", "unisys", "preschools", "mcallen", "contoured", "aberdeenshire", "icm", "schenectady", "schematics", "dojo", "eserver", "nin", "interfacing", "borrowings", "hrt", "heparin", "universiteit", "hardcopy", "connective", "nihon", "oso", "adkins", "dunlap", "nsc", "irr", "clonazepam", "wikiname", "gaithersburg", "biophysics", "chromatin", "mathis", "bulova", "roxanne", "fca", "drg", "refurb", "wasteland", "plotter", "findlay", "cymraeg", "alc", "meek", "phonebook", "doodle", "arb", "wabash", "chronologically", "wms", "whitfield", "mchenry", "eide", "assy", "dusseldorf", "mmol", "shabbat", "nclb", "accommodates", "cmi", "stacker", "msf", "touchdowns", "plasmas", "barbell", "awk", "bibs", "sneaky", "smarts", "lankan", "synthetase", "lightwave", "alignments", "coached", "jac", "framingham", "opensource", "restroom", "videography", "lcr", "spatially", "doanh", "preprocessor", "cohn", "aon", "marginally", "ocs", "bak", "cavalli", "ddc", "grunge", "invoicing", "bigtits", "carney", "braintree", "southside", "vca", "flipped", "cabrera", "mindy", "surfaced", "glam", "cowgirl", "loginlogin", "mtr", "nakamura", "layoffs", "matures", "cty", "apm", "iggy", "margarine", "sneaker", "glycoprotein", "gcs", "queued", "sab", "hydroxide", "hanley", "cellulite", "hwang", "mtd", "mcqueen", "passat", "fluff", "shifter", "cartography", "firstprevious", "vito", "predicates", "bcl", "douay", "zeitgeist", "nickelodeon", "dru", "apar", "tending", "hernia", "preisvergleich", "britton", "stabilizing", "socom", "wsis", "anil", "midsize", "pullover", "lpn", "hoodwinked", "photoes", "beastie", "yucca", "harvester", "emmett", "shay", "obstructive", "pacman", "retroactive", "briefed", "bebe", "krusell", "clickz", "kermit", "gizmo", "atherosclerosis", "demography", "migraines", "wallingford", "newborns", "ljubljana", "restarted", "rnc", "meow", "thayer", "kilograms", "packager", "populate", "pembrokeshire", "arcane", "impractical", "tcg", "decentralization", "honeymoons", "authoritarian", "alu", "judaica", "tropicana", "tyan", "cardholder", "peavey", "gothenburg", "geocaching", "ident", "fluoxetine", "tipton", "teva", "lsa", "effortlessly", "failover", "cysts", "primetime", "kenosha", "kokomo", "penney", "snorkel", "amin", "iridium", "dwyer", "conserving", "toppers", "cfg", "tvc", "alternator", "nysgrc", "underwriter", "springhill", "panhandle", "joann", "isoform", "borden", "bombed", "elt", "halton", "guaranteeing", "fasta", "gonzaga", "boobies", "nadine", "breitling", "nutr", "ingersoll", "sandia", "pacs", "azur", "helms", "beos", "srcdir", "sherpa", "tuff", "ligands", "smalltalk", "sorghum", "nucleotides", "mmv", "ebi", "sbd", "lmao", "enhancers", "collaborated", "produ", "lila", "slotted", "nnw", "fila", "decking", "boz", "accelerators", "howstuffworks", "neighbourhoods", "michal", "rab", "hideaway", "dwayne", "coda", "cyanide", "kostenlose", "grotesk", "marek", "interlibrary", "provenance", "sra", "sog", "zinkle", "fanfare", "mapper", "boyce", "mlk", "dystrophy", "infomation", "footballs", "emailemail", "bathurst", "fof", "duracell", "feinstein", "magnavox", "evra", "servlets", "tss", "neill", "epithelium", "thc", "webbing", "bef", "jaya", "mame", "ppe", "emusic", "tso", "epp", "glencoe", "untested", "overviews", "affleck", "flinders", "informationhide", "hearst", "verifies", "reverb", "kays", "commuters", "rcp", "welivetogether", "crit", "sdm", "durbin", "riken", "canceling", "brookhaven", "gauss", "artistry", "phpnuke", "falkirk", "pitts", "dtp", "kwon", "rubric", "headlamp", "operand", "kristi", "yasmin", "gnl", "acdbvertex", "illini", "macho", "ningbo", "staphylococcus", "busting", "foss", "gfp", "yhoo", "sloane", "wooster", "delong", "mdi", "nilsson", "substring", "gac", "smelly", "gallatin", "hangar", "ephemera", "heli", "choo", "testicular", "miramar", "wearable", "carling", "buildup", "weaponry", "swann", "lian", "landline", "entrees", "corpora", "priv", "geeklog", "antiviral", "profiler", "lodi", "minimalist", "wolverines", "bbcode", "protagonist", "rata", "freephone", "plm", "raytheon", "refseq", "kingfisher", "numark", "moline", "esac", "takers", "gts", "amana", "worldcom", "hiroyuki", "procter", "pragma", "winkler", "walleye", "icf", "bagel", "asbury", "alpharetta", "syncmaster", "wists", "xfx", "wicklow", "tsr", "baer", "yf", "cmr", "chil", "leftfield", "lettings", "walkway", "coos", "petrochemical", "fia", "chula", "zalman", "carer", "humankind", "cmms", "hawley", "inverters", "mccormack", "pdu", "faceplates", "yeats", "motorhomes", "cie", "icts", "mcmurray", "zucchini", "lanai", "pwc", "chiral", "fermi", "newsreader", "multiculturalism", "cuddly", "listinfo", "shp", "primedia", "chl", "estrada", "pricey", "shekel", "apn", "diocesan", "readout", "clarifies", "klm", "dimes", "revlon", "dtr", "cranky", "paparazzi", "zheng", "merida", "bambi", "interceptor", "rox", "jamster", "noritake", "banding", "nonstick", "origami", "marketwatch", "yeti", "arf", "umbilical", "linz", "donates", "foursome", "lawrenceville", "azul", "springdale", "moisturizing", "loeb", "isr", "huston", "gatos", "disqualification", "suunto", "angiotensin", "spitfire", "wfp", "realnetworks", "summation", "plame", "querying", "gpc", "autonomic", "fq", "pathname", "novartis", "ufos", "manatee", "qh", "restructure", "larval", "zeu", "socal", "resettlement", "mistakenly", "radiative", "drapes", "intimately", "koreans", "realy", "womans", "groin", "greenway", "spamassassin", "mata", "gigagalleries", "algerian", "frat", "egullet", "electrics", "joni", "stencils", "reinventing", "reqs", "latte", "shaolin", "shopped", "beattie", "hrm", "hypnotherapy", "muppet", "abp", "checkpoints", "tpa", "derechos", "pieter", "timesselect", "viacom", "strcmp", "kardon", "sideshow", "classifier", "westbrook", "repro", "moser", "studi", "sdf", "colonialism", "supermicro", "scorers", "sitcom", "pastries", "aldo", "azim", "authorizations", "holsters", "neuropathy", "backorder", "humphreys", "metroid", "vcs", "nikkor", "mcf", "jacobsen", "conjugated", "lcc", "unethical", "vacances", "whos", "asr", "alphanumeric", "grumpy", "fixedhf", "holm", "sirens", "lfs", "benelux", "caters", "slp", "prasad", "kirkpatrick", "jamahiriya", "tol", "coagulation", "girly", "bnp", "archdiocese", "orbiter", "edgewater", "lem", "keyless", "repatriation", "tortilla", "dissociation", "industrie", "watercolour", "ucb", "waite", "madsen", "mnh", "opticians", "nop", "newmap", "mse", "bottleneck", "regressions", "linton", "sio", "buckeyes", "bodywork", "applique", "jewell", "gef", "hornby", "redefined", "empowers", "informix", "tots", "goalkeeper", "startseite", "blurb", "feedburner", "dominatrix", "norcross", "compiles", "bancorp", "encoders", "pmp", "boomerang", "temecula", "ghg", "structurally", "caveats", "homeownership", "birdie", "disseminating", "lanyard", "horst", "interlock", "pagers", "esophagus", "ocz", "sexshow", "jackpots", "optometrists", "zak", "krueger", "hickey", "erode", "unlicensed", "termite", "ibuprofen", "drugstore", "audiology", "gannon", "integrals", "fremantle", "lysine", "sizzling", "macroeconomics", "tors", "thule", "gtx", "eeprom", "kaleidoscope", "dmitry", "thawte", "busters", "officemax", "absorber", "nessus", "imager", "cebu", "kannada", "sailboat", "hectare", "netball", "furl", "holographic", "defra", "salaam", "respirator", "countertop", "gla", "installments", "hogg", "partying", "weatherford", "sav", "exited", "crispy", "coffees", "knowhere", "sequin", "bendigo", "unis", "bandwagon", "janssen", "myst", "polymerization", "byval", "nozzles", "labview", "snitz", "rpi", "hcc", "unbelievably", "pasting", "butyl", "ppd", "forested", "unrivaled", "roadways", "varna", "maidenhead", "almanacs", "gfx", "randomness", "middlebury", "muon", "ringo", "svr", "caliper", "lmb", "woolf", "innovators", "anode", "microprocessors", "tps", "stk", "siting", "misinformation", "aneurysm", "closeups", "kinsey", "prp", "cnbc", "eroded", "tris", "lonnie", "hartlepool", "bol", "alastair", "agr", "fafsa", "javac", "uclibc", "fodor", "afrikaanse", "colognes", "contestant", "snell", "prescreened", "believable", "anesthesiology", "elmhurst", "misha", "melatonin", "bongo", "rmb", "mdf", "terr", "xw", "bloke", "avc", "oxnard", "cess", "cedex", "electrochemical", "brevard", "brw", "brenner", "slalom", "waterhouse", "calif", "acces", "aquatics", "cari", "lurker", "buffett", "chews", "hoodies", "phony", "vila", "fsf", "gmake", "nikko", "grasslands", "monolithic", "polifoniczne", "bugtraq", "cpage", "engr", "subcontract", "prophylaxis", "texinfo", "ings", "cotswold", "guillermo", "unstructured", "boop", "hitman", "tla", "mercier", "restated", "nukes", "duplicator", "mehta", "macomb", "fundamentalism", "australasian", "isk", "rerun", "moda", "segmented", "cranberries", "leas", "pleated", "handshake", "digests", "innovate", "goode", "erisa", "jeb", "dismantling", "ferrell", "hellometro", "leavenworth", "snowmobiling", "fora", "fdr", "gaba", "vfs", "dlc", "byers", "codon", "webnotify", "sfr", "pylori", "loomis", "acidity", "gershwin", "formaldehyde", "welder", "cyp", "kendra", "switcher", "ocaml", "goldie", "mab", "gooshing", "mockingbird", "ponte", "xlt", "hogwarts", "juicer", "lloyds", "echelon", "gabba", "arranger", "umbro", "metallurgy", "baa", "neq", "liteon", "queuing", "vsize", "shiite", "valuing", "argon", "coheed", "hooray", "flightplan", "carefree", "souza", "kershaw", "millar", "biotin", "salter", "testicles", "morph", "econometrics", "remo", "msec", "marconi", "ote", "receiverdvb", "expatriate", "tantra", "codified", "ncs", "overlays", "thingy", "comforters", "conservatories", "ruskin", "dpf", "cyndi", "germination", "lipoprotein", "ayurvedic", "planetarium", "tribeca", "bihar", "keenan", "discos", "eastbourne", "robles", "gianni", "dxf", "homebuyers", "nogroup", "freescale", "wiccan", "sess", "merrimack", "groton", "billboards", "searcher", "uttar", "mailinglist", "metacrawler", "priser", "osceola", "bioterrorism", "tourmaline", "leatherman", "microns", "unifying", "anaesthesia", "videogame", "aws", "dtc", "chc", "intranets", "escalating", "bluebird", "iucn", "gls", "mahjong", "interstellar", "kenton", "underestimated", "groupsex", "loudspeakers", "flexi", "vst", "junctions", "redman", "transferase", "bvlgari", "hampden", "nls", "selby", "wausau", "stoppers", "snowshoeing", "uppercase", "cirrhosis", "publib", "metrology", "connexion", "stoneware", "moncton", "traci", "krumble", "pathogenic", "rasmus", "raritan", "riverfront", "humanist", "usefull", "pompano", "skewed", "cleary", "nepa", "ludacris", "sequenced", "xiao", "teaming", "flatshare", "aromas", "positional", "alesis", "glycine", "vee", "breakthroughs", "cashback", "throwback", "charlestown", "nexrad", "gestation", "powering", "magee", "osnews", "logins", "sadism", "emb", "muncie", "panoramas", "plenum", "ato", "aotearoa", "foro", "hydrolysis", "flac", "labia", "immunizations", "existential", "umc", "sweaty", "segond", "addis", "beasley", "breached", "rounder", "rectum", "nha", "perched", "jah", "dsr", "lta", "videoconferencing", "cytoplasm", "makin", "sedimentary", "laurier", "aachen", "wnd", "olney", "massimo", "chlorophyll", "scop", "shipyard", "centering", "manley", "sunroof", "dvorak", "etch", "answerer", "briefcases", "gwent", "bogart", "amit", "kaufen", "untranslated", "raffles", "reconnect", "teeny", "benthic", "mcmanus", "infotech", "carlin", "lithograph", "ure", "stoner", "repost", "iras", "resurfacing", "kelli", "spitzer", "jae", "dunne", "hyperbolic", "pstn", "bisque", "anzeigen", "standoff", "westbury", "solano", "kailua", "acoustical", "photovoltaic", "orchestras", "redline", "reggaeton", "qstring", "declan", "tama", "wank", "virol", "iy", "solvers", "linuxworld", "canadiens", "rockabilly", "smokin", "tumours", "loudspeaker", "handicapping", "tatu", "evangelion", "excretion", "breakage", "negra", "horsham", "jing", "petro", "notations", "midgets", "comprar", "homemaker", "neverwinter", "ddt", "categorize", "geophys", "loa", "tga", "foreskin", "jornada", "inetpub", "premierguide", "reflexology", "sophos", "helphelp", "foundries", "registrants", "sweats", "atvs", "capstone", "adecco", "sensei", "publicized", "transessuale", "federalist", "objectweb", "portrays", "postgres", "fesseln", "hidalgo", "prosthetic", "kristine", "microfiche", "dce", "watergate", "setbacks", "karan", "cdata", "kfc", "grandview", "amerisuites", "aural", "gatekeeper", "heinemann", "decommissioning", "nq", "gestion", "thermodynamic", "patrice", "profiled", "disambiguation", "mmmm", "bittersweet", "mul", "gustavo", "isolating", "xine", "bigfoot", "nrw", "mycobacterium", "yamada", "coldwater", "whitehouse", "cultivars", "santorum", "mugabe", "margo", "rundown", "carbondale", "gizmos", "effingham", "beastility", "agus", "ucd", "dowling", "mitac", "steels", "oakdale", "nda", "mystique", "cortislim", "oes", "disp", "loaders", "trouser", "oai", "hoboken", "sepia", "differentials", "sabi", "dancehall", "sarajevo", "brava", "underscores", "roadshow", "fbo", "sabah", "russel", "nephrology", "squamous", "mvn", "wz", "malden", "mita", "orissa", "ise", "vfr", "chianti", "minsk", "coffey", "domestically", "qantas", "brandi", "artefacts", "solihull", "tation", "tchaikovsky", "refineries", "ronan", "pricewaterhousecoopers", "swimsuits", "automates", "wylie", "whomever", "sidelines", "shaffer", "toolbars", "preservatives", "wagga", "kenai", "bobs", "mortensen", "unplanned", "characterisation", "ppa", "mip", "peering", "fopen", "vgn", "wmissing", "csn", "rudd", "bourke", "pelvis", "goodmans", "potluck", "ioffer", "cial", "davidoff", "creamer", "tsc", "gfs", "contax", "columbine", "portables", "fledged", "aquinas", "kidz", "edonkey", "hourglass", "pagetop", "paloma", "gunmen", "disables", "ssangyong", "antiretroviral", "moschino", "hoyt", "okc", "lockport", "pittsfield", "pollack", "hoyle", "arousal", "inhibiting", "reo", "mammary", "trampolines", "hillman", "trimmers", "bridgestone", "muvo", "wcities", "boi", "diddy", "conveyancing", "apl", "echinacea", "rok", "phish", "frigidaire", "oxo", "hah", "halibut", "penrith", "brno", "silverware", "teoma", "rcra", "mlo", "ideologies", "feminists", "fff", "sculpted", "uq", "rta", "embo", "rollin", "contraindications", "einai", "ssrn", "oup", "rebuttal", "underside", "alumnus", "archeology", "preise", "ontologies", "fenders", "frisbee", "hmmmm", "tipo", "hyperactivity", "seagull", "nanotubes", "polos", "bonaire", "hehehe", "fim", "reece", "elsif", "spinners", "annealing", "maximizes", "pld", "ctp", "eurasia", "dickey", "ako", "carpeting", "yorkers", "ltte", "eukaryotic", "bexley", "sions", "bremer", "marisa", "frustrations", "delgado", "resection", "dioxin", "islamist", "brant", "hss", "kubrick", "fft", "touchscreen", "layoff", "facelift", "decoded", "gry", "shitty", "dodger", "ihs", "lessig", "zaf", "revell", "sched", "rpgs", "euphoria", "acuity", "popper", "lockdown", "nsp", "transmittal", "heatsink", "assholes", "hayman", "novi", "equilibria", "requester", "allrecipes", "serialized", "hangzhou", "bjork", "stringer", "nanjing", "milligrams", "jab", "snohomish", "strathclyde", "yoko", "intramural", "curated", "finalised", "tania", "cdd", "gund", "tascam", "noam", "hardstyle", "arun", "cga", "waistband", "fibroblasts", "leandro", "metastasis", "userpics", "greenbelt", "leuven", "printk", "reachable", "pss", "radioactivity", "caine", "gyfer", "boch", "howdy", "cocksucking", "marlon", "timmy", "liga", "gregorian", "reorder", "aerosols", "archeological", "logarithmic", "sexape", "robby", "completions", "yearning", "transporters", "sandalwood", "megs", "idp", "rapidshare", "tsb", "omnibook", "gamepro", "bca", "decontamination", "tamiya", "euclidean", "salina", "woodford", "formalism", "aching", "nbs", "audigy", "libexec", "eyepiece", "bibl", "bobcat", "freehand", "guo", "ltsn", "itil", "nugent", "esr", "sce", "killeen", "jamming", "applicator", "icrc", "mezzanine", "meghan", "cupertino", "logfile", "zed", "humidifier", "padilla", "susanne", "collapses", "yung", "longwood", "krw", "mainstay", "descr", "dtm", "atcc", "tasman", "accessoires", "mucosa", "dachshund", "zf", "syringes", "breakpoint", "telus", "stoney", "nepali", "regimens", "wok", "canola", "slicing", "reproducible", "experi", "skydiving", "sof", "bogota", "discogs", "datagram", "videographers", "cag", "nicks", "platelets", "trannies", "pamper", "nineties", "bracknell", "disinfection", "perfusion", "postseason", "tigerdirect", "smoothie", "punisher", "tabbed", "tcu", "alene", "lismore", "coquitlam", "auctioneers", "somethin", "daniela", "dials", "enhydra", "kyrgyz", "iia", "bianchi", "iata", "zim", "buscador", "roadrunner", "blackhawks", "jsr", "misfits", "quiksilver", "nwn", "sqlite", "siu", "tarantino", "addi", "jkt", "buyout", "replays", "wcs", "adrenergic", "bottling", "caldera", "baseman", "botanicals", "techie", "farr", "vtech", "donde", "beyer", "versiontracker", "pse", "hashcode", "tradeshow", "lewisville", "aster", "transparencies", "bloomingdale", "northrop", "revo", "overkill", "nlrb", "lazio", "enr", "diag", "chiapas", "freedict", "disponible", "morissette", "effortless", "hydroelectric", "cranial", "hindsight", "orientated", "abrasives", "fpc", "brl", "vpns", "feingold", "thunderbirds", "dha", "wot", "geog", "harrah", "wxga", "nmfs", "boynton", "cashing", "spousal", "abusers", "twinlab", "vick", "aml", "sodimm", "copley", "mallard", "twikipreferences", "airman", "configurator", "clc", "neurobiology", "diamante", "dreamworks", "corsets", "dowd", "escrituras", "bureaucrats", "songtext", "wham", "phpgroupware", "cyclin", "conyers", "youll", "kowloon", "fairytale", "pickens", "bybel", "mln", "wres", "barm", "amplitudes", "nmap", "nvq", "ocd", "ryu", "microcontroller", "premiered", "institutionalized", "hamm", "gyno", "bhopal", "circulatory", "centerline", "chairmen", "guerlain", "pedo", "hussain", "portlet", "proscar", "histone", "opioid", "totalling", "pyobject", "translational", "lehmann", "keaton", "elkins", "jamison", "interstitial", "inest", "tanzanite", "helical", "redlands", "sagradas", "fondue", "windscreen", "adderall", "othello", "supersonic", "pocatello", "maniacs", "sysadmin", "foothill", "earmarked", "highspeed", "uncheck", "rapes", "vlad", "cif", "photosynthesis", "junit", "remotes", "epo", "mcm", "ucf", "nacl", "sfa", "empirically", "dfes", "addon", "pon", "feelin", "callmanager", "deteriorating", "statenvertaling", "cypriot", "entert", "fascia", "woburn", "jalan", "fryers", "cally", "layering", "geriatrics", "picky", "conley", "boces", "barth", "lvm", "mooring", "mcdonell", "expats", "bizarr", "loadavg", "perla", "micheal", "bok", "friendster", "endoscopy", "msx", "buzzwords", "lumen", "airwaves", "jagger", "setups", "inman", "schindler", "limewire", "drawstring", "midrange", "frodo", "superpower", "recliner", "trisha", "trium", "utm", "grimsby", "wyeth", "urs", "kds", "adjuster", "impeccable", "shari", "marketplaces", "tefl", "sudo", "technische", "characterizing", "gawker", "gagging", "cyclist", "atg", "generics", "richey", "magneto", "crunchy", "teletext", "drwxrwxr", "crabtree", "underfull", "hemscott", "webmasterworld", "objc", "musicmatch", "sealant", "timberwolves", "harriers", "shangri", "robo", "roto", "mnem", "nnn", "aidan", "fidel", "executables", "concertos", "vob", "extracurricular", "haverhill", "squirters", "hbp", "tonal", "atr", "ashtray", "gpu", "payton", "psychoanalysis", "hesitant", "poco", "nedstat", "rcmp", "microchip", "eroticos", "fea", "kors", "susquehanna", "userinfo", "modulo", "antler", "bangladeshi", "desking", "nikolai", "nuys", "ludhiana", "rdr", "spankings", "chatrooms", "pretreatment", "brittney", "jer", "tianjin", "qj", "winnebago", "mcfadden", "notecards", "tix", "murfreesboro", "quaternary", "subtracted", "tropez", "mcgovern", "olivetti", "hikers", "vivaldi", "cuties", "lnb", "gilchrist", "preheat", "bernadette", "microdrive", "rookies", "overton", "potpourri", "neiman", "seb", "sigs", "jarhead", "momo", "uzbek", "ttt", "dubya", "signatory", "cim", "energized", "brite", "shs", "minimums", "needlepoint", "deng", "camargo", "oems", "bolle", "webrings", "ehrlich", "azz", "firefighting", "icalendar", "disallow", "exch", "mclachlan", "zaragoza", "brixton", "efi", "kilo", "tcmseq", "moisturizer", "suonerie", "remanded", "empresa", "shoebox", "disagrees", "lowdown", "trove", "filer", "apologetics", "englisch", "texarkana", "threonine", "metart", "siti", "encephalitis", "tomatometer", "arias", "kenner", "anamorphic", "subspace", "cleats", "ifp", "circ", "pressured", "peppermill", "sml", "clarifications", "zionism", "pti", "retin", "klicken", "disjoint", "ema", "openldap", "koenig", "carats", "hijacked", "tch", "burlingame", "checkbook", "candice", "coworkers", "eno", "karla", "cus", "gio", "statm", "haifa", "reincarnation", "budweiser", "heuristics", "tunisian", "hologram", "macular", "eral", "refinishing", "chia", "celestron", "leyland", "reloading", "hombre", "munch", "basf", "rolleyes", "bidirectional", "ahhh", "chica", "starfish", "kurdistan", "boro", "heartbreak", "preps", "irina", "mylar", "congestive", "dmd", "schilling", "twikivariables", "battleground", "tectonic", "equate", "corbis", "inflatables", "naacp", "pathologist", "minnetonka", "langston", "memoriam", "underserved", "rectifi", "elmwood", "fukuoka", "glbt", "rsi", "parr", "pob", "ods", "welles", "gujarati", "sportsline", "leno", "healthwise", "vrml", "sida", "azres", "sapporo", "jscript", "predictability", "pajama", "paddlesports", "adenocarcinoma", "toning", "gestational", "kravitz", "ptcldy", "snowball", "adl", "travelogues", "crl", "zocor", "ecotourism", "leadtek", "hkcu", "morehead", "niro", "fueling", "orthopaedics", "crayons", "tikes", "revamped", "olap", "curfew", "hamlin", "brandeis", "bree", "stylistic", "corneal", "beckman", "crusher", "riva", "prefs", "militaria", "marshfield", "elo", "swank", "matisse", "villeroy", "proactively", "mccarty", "zas", "acdbcircle", "horney", "modeler", "progressives", "grosvenor", "linger", "creationism", "dork", "claritin", "psychosis", "fei", "firsthand", "gigi", "cranston", "hayley", "ags", "muted", "turbidity", "mountable", "kiki", "vz", "avondale", "oceanographic", "zzz", "tsg", "epl", "nonzero", "iwork", "scavenger", "touted", "candace", "kava", "kronos", "adjuvant", "tyneside", "travolta", "sari", "preventable", "bumpy", "aleph", "lga", "conroy", "mastermind", "vaccinated", "coburn", "rawk", "acceptability", "stryker", "surcharges", "noticeboard", "chapin", "permutation", "colpo", "ucsc", "mulligan", "fod", "ketchup", "alimony", "tng", "viscous", "skk", "cmm", "unambiguous", "emphysema", "epistemology", "grantham", "avila", "solana", "toolkits", "soloist", "rejuvenation", "chn", "jse", "anaconda", "bsnl", "carfax", "leveraged", "wega", "scanjet", "ibc", "meng", "burley", "efa", "freesex", "plasmids", "steffen", "xz", "woofer", "lada", "hinckley", "millimeter", "snape", "rollercoaster", "tdc", "connery", "newswatch", "roundups", "keylogger", "parka", "scouse", "unists", "timo", "hea", "spock", "ffs", "bmj", "farrar", "decompression", "draco", "mika", "galena", "msft", "inactivation", "metafilter", "mbna", "lymphatic", "ofc", "gian", "berks", "hdv", "wirral", "boxset", "ashrae", "ilford", "allman", "kroon", "gmo", "sdc", "builtin", "lisboa", "coc", "rollback", "westgate", "thd", "bobo", "crockpot", "weaning", "snowshoe", "hijackthis", "backside", "fetchmail", "candlewood", "angelfire", "ucsf", "painkiller", "nutty", "fenway", "restrooms", "myeloma", "scallops", "osteopathic", "vividly", "rmit", "countermeasures", "ofertas", "gwinnett", "dirs", "duvall", "wildflower", "stackable", "greensburg", "barebones", "merino", "stooges", "chatsworth", "jello", "mtime", "barium", "toric", "looting", "kiefer", "agg", "mauro", "shearer", "decca", "hydrophobic", "unsw", "millard", "btn", "terraserver", "returnable", "ohs", "resuscitation", "cancelling", "rns", "nrg", "stratification", "oliveira", "cahill", "grumman", "webdav", "adagio", "sunburst", "ayumi", "sev", "zt", "bela", "swt", "startups", "ranting", "udaipur", "tonya", "erupted", "ghostscript", "meltdown", "rainwater", "gellar", "alm", "vy", "cnrs", "redefining", "shar", "vesicles", "piccolo", "scalia", "resizing", "showrooms", "verifiable", "lobo", "nunn", "boyds", "havens", "bacterium", "zb", "sideline", "bushing", "ligament", "penpals", "translocation", "costco", "serialization", "wst", "playgrounds", "universidade", "fong", "hbs", "zips", "ntot", "eigenvalue", "conductance", "albemarle", "mudd", "dvs", "niels", "explodes", "lindy", "coimbatore", "panzer", "audioscrobbler", "keri", "soviets", "tweeter", "poncho", "sids", "faerie", "oooh", "oceana", "ayn", "wakeboarding", "stinger", "yuba", "chipsets", "anastacia", "collapsing", "yaoi", "gwyneth", "kuwaiti", "jalbum", "storageworks", "duplicators", "cubicle", "rana", "winfrey", "avanti", "iop", "blige", "papaya", "auger", "macclesfield", "mongoose", "crossfade", "instrumentals", "iconic", "sulfide", "dawg", "mahler", "maurer", "auschwitz", "gambit", "accom", "stb", "uxbridge", "baan", "baumatic", "slt", "landis", "fredrick", "jogger", "occlusion", "jz", "charlize", "covent", "reinvestment", "ssdasdas", "chatterbox", "neutrons", "fss", "silo", "polystyrene", "amon", "jodhpur", "intelligencer", "dundas", "netmag", "molokai", "pluralism", "kobayashi", "tetanus", "bcd", "neuromuscular", "fkq", "caribe", "iit", "nphase", "multifamily", "timres", "nrcs", "farnham", "coors", "execs", "hauser", "citeseer", "hiker", "manuf", "strategist", "electroclash", "outlays", "ktm", "zloty", "osmosis", "mojave", "renova", "hsp", "soothe", "mariposa", "bir", "advancements", "franck", "bock", "fsm", "leary", "slurry", "ker", "dte", "soulmates", "marissa", "sga", "beretta", "chiropractor", "vibrational", "sandusky", "obsidian", "dressers", "winger", "endeavours", "argonne", "runnin", "bfi", "gaye", "colfax", "logics", "camedia", "ctd", "optimise", "ernesto", "voeg", "adamson", "coeds", "subdirectories", "asain", "guilder", "comparator", "sealer", "sleazy", "onstage", "todas", "waterproofing", "devlin", "riel", "pinky", "lewisham", "mints", "wdm", "avocent", "invertebrate", "brea", "rebellious", "carnitine", "trib", "webex", "pairings", "guesthouses", "yikes", "exorcism", "grilles", "mim", "cultivar", "orson", "teammate", "idn", "hrvatska", "sequencer", "grandparent", "demonic", "wonka", "prezzo", "opto", "collaboratively", "oberlin", "nrl", "gorda", "newburgh", "alcoa", "mums", "facs", "lossless", "mmp", "beasteality", "imbalances", "andean", "superconducting", "spectroscopic", "armpit", "dect", "mew", "worsening", "symp", "igf", "metalworking", "groundhog", "clomid", "ginkgo", "decedent", "dimethyl", "retval", "openurl", "baku", "telescopic", "vespa", "phasing", "lactate", "poughkeepsie", "dodson", "monorail", "bookworm", "enero", "sabbatical", "ced", "skeptic", "backlit", "smr", "kentech", "lamette", "gita", "itm", "ath", "hennepin", "foucault", "onshore", "acls", "pwm", "florals", "millimeters", "krauss", "asca", "wicks", "pathologists", "fanfiction", "pathol", "toxics", "ipcc", "kinesiology", "potions", "tern", "squirts", "delmar", "storybook", "grenades", "rls", "etrex", "contrasted", "opting", "hauled", "taupe", "renta", "grd", "odeo", "jiangsu", "osd", "hookup", "myron", "atb", "ctg", "doreen", "altima", "keepsakes", "seawater", "ecko", "zarqawi", "contenders", "conveyors", "accenture", "iagora", "haier", "crutchfield", "fulfills", "rota", "kelso", "petaluma", "ifrs", "servicios", "printmaking", "miata", "julianne", "dotnet", "reconstructive", "metcalf", "vicksburg", "gri", "bookshelves", "supermodels", "glycerol", "wiseman", "sliders", "carhartt", "redford", "itemized", "rsp", "defamatory", "eir", "matheson", "amalfi", "currentversion", "renminbi", "yap", "mangas", "bottlenecks", "pyrex", "huffington", "sculpting", "sedans", "dpt", "hoobastank", "launchers", "finishers", "psychologically", "ssm", "schaeffer", "northside", "interdependence", "microfinance", "droplets", "inducted", "fos", "uninitialized", "conor", "repercussions", "woking", "longmont", "medion", "monika", "hydrological", "runes", "hobbyhuren", "ents", "ortega", "breweries", "landon", "burrell", "forecaster", "quickie", "stephane", "parabolic", "boreal", "bankroll", "bioassay", "martinsville", "ldem", "interventional", "teensex", "tabulation", "joop", "creampies", "trier", "arbitrage", "dogwood", "convergent", "enviar", "hutt", "majoring", "techwr", "glitches", "dugg", "qwerty", "equivalency", "rela", "sedation", "quik", "rosemont", "xk", "harmonics", "devi", "highschool", "orvis", "centimeters", "lavatory", "destructor", "accelerates", "opts", "relocations", "wilco", "tricare", "beckley", "ryde", "januari", "kee", "blacksburg", "anova", "midfielder", "tornadoes", "nand", "ladd", "docklands", "mgs", "tanzanian", "padi", "msl", "clamav", "megastore", "xander", "eon", "winelands", "syllabi", "elif", "lorne", "noida", "visalia", "mykonos", "wcc", "krieger", "safeway", "sheri", "prosite", "wikis", "mozzarella", "glenda", "uta", "dqg", "waterville", "yonkers", "republish", "endoscopic", "dilbert", "vfd", "transen", "konqueror", "feliz", "biscayne", "sexocean", "debconf", "disproportionately", "taskbar", "libero", "synchrotron", "tet", "memorize", "marquez", "williston", "muppets", "volumetric", "umpires", "shuttles", "jumpstart", "motogp", "hyperplasia", "nber", "donahue", "parodies", "prado", "legit", "humax", "scrapped", "ingo", "dillard", "orphanage", "disruptions", "erasure", "preamp", "pde", "mcallister", "ziegler", "loewe", "dowload", "msb", "iptv", "bondi", "freelancer", "felton", "dpp", "umax", "radars", "dmg", "materiel", "megadeth", "cooperstown", "sdh", "staffers", "mawr", "daw", "comptia", "teddies", "upsilon", "sizable", "coenzyme", "enzo", "afterlife", "mather", "ncurses", "harddrive", "cml", "counterpoint", "batesville", "skywalker", "franke", "takashi", "wristband", "jimenez", "esque", "chiller", "barra", "ales", "worthing", "zna", "jonathon", "psr", "sump", "breadcrumb", "sucrose", "amro", "portege", "neogeo", "renewables", "filipina", "sgs", "mbas", "ihop", "cortisol", "banshee", "supersedes", "bullseye", "prezzi", "rbs", "pacino", "cajon", "downloader", "seabrook", "leif", "jrr", "iwc", "taranaki", "chronically", "merkel", "megaman", "setq", "preschoolers", "vcl", "unenforceable", "lto", "busi", "noone", "rotc", "fisheye", "oaxaca", "gerontology", "microsano", "predation", "gaas", "kilimanjaro", "exacerbated", "emr", "infestation", "yarra", "volker", "linearity", "huey", "aerials", "stylist", "porosity", "schofield", "alam", "sprayer", "tirol", "sfu", "gliders", "corby", "wenatchee", "prognostic", "unregulated", "mult", "pittman", "bbl", "hadith", "ots", "kdelibs", "jayhawks", "teesside", "rav", "lobos", "reportable", "dickerson", "carotene", "filesystems", "enrollees", "cena", "sanjay", "compaction", "juicers", "gemm", "methionine", "lala", "toplist", "holyoke", "dewpoint", "rdiff", "osp", "delimiter", "forsaken", "richfield", "hangout", "striptease", "jhi", "amf", "sonicwall", "burgeoning", "unicast", "amnesia", "cipro", "cherie", "klip", "libxt", "menswear", "inthevip", "wrenches", "actuate", "capote", "cvd", "flexeril", "molar", "databank", "montevideo", "sunglass", "lhs", "kassel", "followings", "shipley", "accretion", "asha", "bullpen", "mamas", "schreiber", "gnc", "dysplasia", "freeroll", "efl", "igs", "utopian", "kota", "iden", "dil", "wia", "sosa", "negril", "hyped", "epidermal", "autopilot", "garza", "decrypt", "batik", "crain", "subd", "utilising", "dsu", "fermanagh", "idr", "interoperable", "mam", "delano", "sonja", "plex", "compat", "replaceable", "forint", "nudism", "netcom", "formulary", "irvin", "galery", "hounslow", "fosamax", "striping", "excavating", "recoveries", "mrsa", "mainstreaming", "awt", "hola", "hoody", "dci", "geri", "seasonings", "marcelo", "pantech", "fcp", "scaricare", "roxbury", "clamping", "whiplash", "dildoes", "takeoff", "wiggle", "truely", "henna", "cartesian", "gamezone", "yank", "llewellyn", "shag", "asymmetrical", "universitat", "williamstown", "trolleys", "interlocking", "doped", "headband", "internetweek", "outperform", "ncp", "harmonization", "hamid", "differentiating", "hitters", "konrad", "wickets", "restarting", "bcm", "xilinx", "wideband", "tmobile", "rocha", "pbox", "aea", "stevenage", "moorhead", "directorio", "restructured", "aerodynamic", "hopewell", "evaluative", "zuma", "annuaire", "subtracting", "bram", "kuna", "logbook", "xor", "louth", "pict", "truetones", "gabor", "rotates", "ezcontentobjecttreenode", "leanne", "bgcolor", "rescues", "wim", "corsa", "causality", "tiling", "ethnographic", "waffles", "doubly", "fandango", "powermac", "catalysis", "annexes", "lisle", "pushj", "naylor", "wrongdoing", "paducah", "gunter", "iranians", "aat", "commandos", "abcd", "repeatable", "deh", "epiphone", "scf", "weekender", "milner", "schott", "welders", "semifinals", "quantization", "surfacing", "vegetarians", "hagerstown", "polyclonal", "transponder", "gottlieb", "withdrawl", "geneid", "tierney", "glock", "guatemalan", "iguana", "glaring", "cifras", "salman", "choker", "ecologically", "scoreboards", "mohr", "dpa", "spaceship", "digimax", "moremi", "btc", "technologie", "tunica", "powerbuilder", "aorta", "unconfirmed", "dimitri", "degenerative", "delve", "torrey", "celica", "beloit", "nir", "substr", "lowrance", "ballantine", "crimp", "bss", "mousepad", "umbria", "oregano", "rashid", "microtek", "geary", "boaters", "soyo", "visualisation", "brianna", "handlebars", "weightloss", "interconnects", "playtime", "enrollments", "gyllenhaal", "criticality", "geoscience", "mhonarc", "golive", "deville", "meh", "moseley", "spacers", "unido", "deferral", "hersh", "hilliard", "vlsi", "keegan", "feces", "uy", "bute", "activewear", "transcriptions", "metered", "bugfixes", "cami", "interna", "quintessential", "babycenter", "gardena", "cultura", "stockpile", "psychics", "pediatr", "williamsport", "westlaw", "hetero", "meteorite", "extruded", "lakh", "starware", "phage", "laszlo", "hernando", "vogt", "wolfpack", "lags", "eldridge", "wray", "hajj", "edirectory", "longstanding", "knitwear", "apocalyptic", "fatties", "darmstadt", "mco", "ucsb", "fillings", "marti", "aberystwyth", "infineon", "fdd", "inflows", "tmpl", "estuarine", "lita", "nubuck", "socialization", "estock", "mbit", "valign", "caving", "vec", "alkyl", "artichoke", "leasehold", "directgov", "ubiquitin", "fuerteventura", "hairdressing", "dhhs", "fecha", "nio", "wsi", "quigley", "yellowpages", "pretec", "biomechanics", "microcomputer", "discipleship", "hella", "womack", "magnifier", "acdbtext", "pitney", "esters", "haan", "ofcom", "ablation", "nutcracker", "dosages", "prn", "zm", "dfs", "multiplexing", "indentation", "hazmat", "eac", "dalhousie", "ahem", "retardant", "shankar", "overheads", "southfield", "iee", "gnustep", "spm", "azkaban", "dermal", "metar", "sizeable", "aftershave", "lahaina", "earners", "tenderloin", "dji", "ipp", "chee", "hamburgers", "oliva", "gaultier", "cios", "margie", "nms", "wandsworth", "caltech", "stapleton", "gsc", "francophone", "sqm", "xoxo", "coord", "mocking", "nri", "serengeti", "raccoon", "shrinkage", "prd", "uris", "hamsters", "codphentermine", "thrashers", "calibrate", "gilmour", "rambo", "cleburne", "serrano", "niacin", "strawberrynet", "wesson", "ormond", "oxycontin", "bibliographical", "wynne", "glyph", "nagios", "marinated", "marko", "sfas", "genotypes", "conde", "alford", "madurai", "evacuees", "urbanization", "kilgore", "unwired", "elseif", "pneumoniae", "skyscraper", "ebags", "gnn", "tooled", "intermec", "charlottetown", "submersible", "condensate", "matchup", "undefeated", "krs", "movin", "kino", "vidio", "photographing", "pocono", "footjobs", "trackers", "kinkade", "unify", "dissident", "sperry", "iframe", "tur", "commu", "xterm", "swapped", "stent", "vermillion", "angiography", "areaconnect", "brockton", "daz", "abcdefghijklmnopqrstuvwxyz", "dunst", "livonia", "specialisation", "nsi", "walgreens", "plasticity", "crux", "nhra", "armband", "leamington", "mosley", "iga", "stemmed", "appleby", "grayscale", "labonte", "lek", "cartoonist", "flotation", "geol", "deterrence", "cardin", "aardvark", "cosmological", "dothan", "isotopic", "hadleionov", "langford", "ssg", "understated", "obit", "unt", "randomised", "amphetamine", "shia", "grout", "reba", "wrx", "rsgi", "bharat", "sls", "slg", "kilometre", "tristar", "gippsland", "pastels", "stallions", "paramedics", "fishbase", "rolla", "curie", "bootable", "skit", "sourcewatch", "decimals", "boe", "catania", "countertops", "paola", "elwood", "hocking", "prerelease", "seqtype", "femoral", "anz", "visceral", "fructose", "edta", "silverstein", "broderick", "zooming", "hamasaki", "keswick", "extinguisher", "subpoenas", "spiele", "rincon", "pll", "donny", "vitale", "fledgling", "boinc", "traversal", "bagder", "erick", "kcal", "midfield", "hypersensitivity", "redshift", "glaser", "sado", "cusco", "imagemagick", "uic", "fernandes", "prosthesis", "jsc", "omron", "alberghi", "electricals", "kelp", "taker", "placeholder", "moulton", "yall", "npdes", "massages", "catalist", "metarating", "tupelo", "syriana", "batt", "dbms", "asb", "videotapes", "backseat", "kauffman", "manipulations", "accomodate", "tioga", "aylesbury", "submenu", "kwacha", "chondroitin", "sandpiper", "vamp", "overarching", "janes", "selectors", "condoleezza", "internationals", "estuaries", "schulze", "osti", "paleontology", "emporio", "stepper", "reykjavik", "waterskiing", "renfrewshire", "superheroes", "marg", "leftovers", "mariano", "bangboat", "guestrooms", "urethane", "stoughton", "paphos", "sprinklers", "accum", "bms", "datsun", "sainsbury", "chefmoz", "helo", "yvette", "procmail", "midsole", "ayuda", "geochemistry", "reflectivity", "moog", "anth", "durand", "linea", "butterworth", "datagrid", "metetra", "rodrigues", "apprenticeships", "oncol", "dop", "asymptomatic", "retails", "offroad", "simpletech", "gandalf", "minot", "evidentiary", "kpa", "whelan", "synthesize", "doan", "localisation", "laparoscopic", "pem", "hotelguide", "bayview", "overridden", "sorensen", "hinds", "managment", "racially", "stinky", "riverton", "expertly", "mgc", "langkawi", "ftpd", "colloidal", "guarantor", "imperialist", "suc", "veneers", "reaffirmed", "zambezi", "tibia", "raquel", "wpt", "kiddie", "tulare", "venturi", "sundries", "linebacker", "danzig", "neurol", "beanies", "irreducible", "trixie", "ridgeway", "henckels", "srb", "verifier", "dimensionname", "eurasian", "galbraith", "pesky", "underwire", "salvia", "aep", "radioshack", "sportstar", "alana", "upd", "duma", "osh", "ddbj", "stah", "scripted", "ated", "mutagenesis", "posada", "vocalists", "tiburon", "lpc", "geiger", "cmyk", "everlast", "obits", "jekyll", "sportsbooks", "andaman", "hallam", "spoofing", "rockhampton", "reauthorization", "poolside", "xiamen", "trc", "pita", "chopard", "skeptics", "nast", "motorist", "kwik", "peritoneal", "jaffe", "freebie", "harare", "tunbridge", "spycam", "lowes", "lineto", "ncaab", "publicize", "neohapsis", "sanibel", "bulimia", "newquay", "intros", "ladybug", "analyser", "armando", "conwy", "algorithmic", "rectifier", "banknotes", "aem", "bookshot", "bassoon", "scrapbooks", "hydropower", "clearances", "denominational", "dominguez", "meas", "tamron", "dfid", "vlans", "spreader", "deu", "otolaryngology", "ezines", "vbseo", "snowmobiles", "oca", "phen", "educa", "lagrangian", "dubrovnik", "idt", "eases", "hippocampus", "crim", "repeaters", "longoria", "matsushita", "reimbursements", "kotor", "encodings", "yuen", "eqs", "eca", "actionable", "gangbangsquad", "cornea", "overfull", "southgate", "minibar", "kitchenette", "ols", "liberian", "tuc", "hth", "repairers", "liczniki", "rcc", "numerology", "armitage", "brac", "barware", "corsi", "normalize", "gsp", "bcr", "krt", "buffs", "tamoxifen", "phenotypes", "kinross", "kieran", "informatie", "mccallum", "triplet", "geosciences", "sonics", "timmins", "django", "pllc", "lotta", "upg", "nhtsa", "swissprot", "archaeologists", "voss", "pussys", "moveto", "tentacle", "stx", "iaudio", "prednisone", "salespeople", "motility", "dengue", "gaiman", "incineration", "dumont", "shanks", "bissell", "organza", "centralised", "unbreakable", "supersized", "depictions", "wml", "sexcams", "kaffe", "karim", "aww", "gtc", "pbl", "cael", "separators", "informatique", "resetting", "indepth", "funnies", "cumin", "chicagoland", "keystrokes", "setters", "inertial", "payless", "ona", "pec", "payee", "cinematographer", "preorder", "oig", "teenies", "ppv", "ventilator", "annonces", "camelbak", "klear", "micrograms", "pediatrician", "cymbal", "convective", "haymarket", "nosed", "bre", "shogun", "rescheduled", "bala", "sidestep", "readline", "preemption", "microbiological", "corticosteroids", "pseudoephedrine", "stockholder", "engnet", "quanta", "sturgis", "synapse", "cwd", "innostream", "airplay", "uppers", "sib", "pitman", "bodrum", "leathers", "embossing", "redirects", "fuzz", "roscommon", "meryl", "izmir", "meticulous", "multiplexer", "menorca", "dendritic", "minima", "wstnsand", "naproxen", "operands", "mikael", "conceptually", "crichton", "cct", "nics", "hardwoods", "clarita", "xfs", "capping", "parisian", "humanism", "hiroshi", "hipster", "accel", "annualized", "sandi", "npa", "becca", "basildon", "khoa", "testis", "uclinux", "unusable", "tigger", "approximated", "dhea", "consulates", "wonkette", "versioning", "breakdowns", "dbh", "periodontal", "macmall", "iphoto", "uncredited", "recordi", "lacroix", "rupiah", "bullish", "hippy", "klik", "northerner", "xsd", "mackintosh", "kenney", "fabricators", "mutated", "layne", "moonstone", "scilly", "sheng", "fsp", "yk", "strep", "offical", "hps", "tampere", "testo", "synergies", "fundamentalists", "amyloid", "emachines", "understandably", "icarus", "appletalk", "goff", "dialed", "geoxtrack", "bemidji", "harcore", "intermodal", "spx", "catalunya", "baymont", "niall", "mitts", "rik", "nappy", "diario", "khalid", "fuchsia", "chowhound", "muscat", "ffff", "kmart", "handover", "knott", "butterfield", "hialeah", "finney", "salamander", "driveways", "ummm", "ayres", "lukas", "cavan", "aswell", "skippy", "marginalized", "sooners", "cityguide", "maritimes", "permanente", "texaco", "bookmakers", "speci", "hgtv", "contacto", "mbc", "marston", "newsline", "coverages", "bap", "specialities", "loca", "systematics", "renderer", "matsui", "rework", "snowmass", "deq", "rosh", "coffs", "cleansers", "acu", "webby", "footbed", "inicio", "moretrade", "apogee", "allergens", "worsen", "mlc", "applica", "tankers", "whopping", "issey", "rtr", "bes", "cust", "brookes", "anim", "tull", "informatica", "computeractive", "finline", "permissionrole", "quickcam", "shunt", "rodeway", "scrollbar", "breen", "voyuerweb", "mbe", "kenshin", "dpm", "clackamas", "synch", "patten", "leppard", "allis", "estimators", "functionalities", "rmt", "downes", "koffice", "evidences", "mux", "dbx", "fetishes", "isaacs", "outrigger", "enclave", "fibrillation", "licorice", "statically", "ipl", "dixons", "goldmine", "lhasa", "developmentally", "ziggy", "ingles", "senders", "steamy", "atf", "madhya", "marinade", "passwort", "extinguishers", "stratosphere", "tbilisi", "updater", "geico", "fld", "cabos", "companys", "tinputimage", "ggg", "nicaraguan", "icn", "wanganui", "sconces", "insulator", "endometrial", "mohan", "hegemony", "focussing", "gallerie", "bioperl", "eprint", "tennant", "ebp", "tryptophan", "checkin", "gilroy", "extensibility", "aei", "qg", "mcculloch", "thang", "lorem", "seng", "bianco", "salma", "consortia", "asimov", "renato", "bungee", "murdock", "hokkaido", "alternates", "brdrs", "configures", "multilevel", "mvs", "pce", "albertson", "renoir", "getclass", "perthshire", "mucus", "suspenders", "realtek", "morons", "dismantle", "pharos", "obp", "zovirax", "twikiguest", "reimplemented", "eavesdropping", "orgs", "numerator", "gds", "nme", "resurgence", "metastases", "gino", "timings", "mecha", "carburetor", "merges", "lightboxes", "icra", "jeopardize", "ltp", "loews", "fanlisting", "flet", "bds", "hyland", "experian", "screenwriting", "svp", "keyrings", "hca", "hdc", "hydrolase", "koa", "mobilized", "accutane", "zonealarm", "sexkontakte", "canaveral", "flagler", "someplace", "vcard", "antibacterial", "rund", "extremism", "edgy", "fluctuate", "tasked", "nagpur", "funroll", "tema", "flips", "petsmart", "libuclibc", "chaney", "aventis", "macrophage", "palmas", "useable", "ferndale", "saipan", "councilor", "tcr", "myinfo", "jellyfish", "newington", "reissued", "mpv", "noa", "airconditioning", "wiggles", "bho", "synths", "kennesaw", "rubbermaid", "spector", "medica", "ayer", "incumbents", "ashok", "vern", "writable", "usepa", "reflectance", "mobo", "bunn", "chiba", "uint", "tgb", "yj", "coliform", "selena", "olmsted", "broomfield", "darpa", "nonpoint", "realignment", "undermines", "ferreira", "sasl", "defibrillators", "kraus", "certs", "nwa", "jstor", "aarhus", "supercomputer", "bouncer", "phenol", "jigs", "loudoun", "lifetimes", "grundy", "histamine", "byline", "mbox", "mustafa", "bedlam", "ioexception", "abdel", "bothell", "synergistic", "aur", "lippincott", "maplewood", "tillman", "maints", "rhp", "handball", "shandong", "cch", "stylized", "folate", "lenoir", "manitou", "cytometry", "goofs", "wokingham", "connors", "musc", "ripon", "nypd", "plexus", "systolic", "hyman", "unreachable", "deepak", "desarrollo", "tian", "jisc", "merc", "covina", "noonan", "ufc", "modernist", "waring", "janie", "fams", "yasser", "weathering", "totalitarian", "putters", "waypoint", "prx", "interrelated", "delray", "lifedrive", "santander", "southbound", "solidworks", "cronin", "averatec", "huren", "patios", "firebox", "synopses", "venta", "sadr", "tuples", "brdrnone", "diarrhoea", "sonatas", "barbecues", "walther", "deadwood", "mancini", "rpmlib", "milpitas", "commonsense", "bsi", "piii", "romford", "emporia", "digidesign", "violators", "phrasebook", "reconfiguration", "sledding", "lakefront", "excision", "traceability", "yangon", "booktitle", "lemony", "recursively", "ney", "kilda", "auctioned", "hennessy", "basset", "antwerpen", "paltrow", "rda", "limiter", "imtoo", "jmp", "cornwell", "dah", "blueberries", "notting", "comprehensively", "amar", "deftones", "apg", "zyxel", "kno", "limelight", "schmid", "alg", "bme", "solis", "cdx", "mju", "hoosiers", "criss", "glynn", "aerotek", "unmet", "toa", "competes", "olathe", "ciw", "compositional", "sez", "trig", "taylormade", "catawba", "mbytes", "ordinal", "tth", "inglewood", "gila", "magnitudes", "downed", "firstname", "metairie", "polluting", "wellcome", "pedicure", "duplexes", "edgewall", "webchanges", "backplane", "daschle", "transceivers", "disrupting", "biodegradable", "spore", "meps", "phpmyadmin", "bloodrayne", "tessa", "unrealized", "hei", "artistas", "roomate", "acetone", "alanine", "elko", "dvdrw", "spt", "ries", "inthe", "blitzkrieg", "nickels", "banbury", "igm", "snf", "optra", "choctaw", "issaquah", "interactively", "fredrik", "aventura", "ewa", "dpic", "mufflers", "quarks", "refactoring", "monrovia", "forman", "marrakech", "optoma", "walkways", "heineken", "shelbyville", "oxidized", "bugfix", "sharif", "bloodstream", "yx", "underpinning", "resistivity", "hollinger", "conformal", "racquets", "sherri", "dbd", "nevermind", "moa", "tenchi", "potters", "detergents", "cheri", "bombardier", "subsp", "cytotoxic", "frag", "eseminars", "colophon", "morin", "ico", "tatum", "unforgiven", "thesauri", "gaffney", "harrell", "toowoomba", "friendfinder", "uts", "bootsnall", "relais", "allocates", "freecom", "yoo", "kabbalah", "dgs", "punks", "chorley", "ivanov", "unannotated", "endian", "dari", "patchy", "haters", "mutex", "worldnow", "giuliani", "hina", "millennia", "pathophysiology", "frith", "pao", "doran", "remixed", "hypoxia", "newyork", "penile", "hemi", "positron", "metallurgical", "ordinating", "caregiving", "molybdenum", "easley", "plo", "psn", "hexagonal", "throated", "contravention", "bacteriol", "healers", "superbike", "biosafety", "binomial", "engels", "staybridge", "mullet", "canfield", "hardball", "orem", "scholl", "renovate", "dvdr", "phenterminebuy", "metformin", "actuary", "addressbook", "xquery", "csl", "purdy", "rattus", "xian", "latches", "ardmore", "cosmetology", "emitter", "wif", "grils", "yom", "ralston", "estados", "begining", "apartamentos", "sassoon", "tna", "hotlog", "duquesne", "oclug", "formatter", "rhinestones", "shootings", "splitters", "gdm", "pizzas", "contig", "whittaker", "trafic", "winders", "walkie", "adorama", "uucp", "postmarked", "devolution", "avion", "innes", "reunification", "izumi", "caenorhabditis", "moderating", "gadsden", "cthulhu", "eurostar", "dooley", "diebold", "unsaturated", "hotsync", "ryerson", "bfd", "nonexistent", "liquidated", "decoders", "validates", "dae", "jackman", "biophysical", "mendes", "lasagna", "landers", "belton", "qing", "docu", "tapas", "calla", "curriculums", "supermodel", "rezoning", "schumer", "exclusivity", "motivates", "debuted", "lifeguard", "chrissy", "havasu", "kei", "danforth", "kilmarnock", "bignaturals", "hendersonville", "poweredge", "sequels", "licensor", "pantone", "granby", "laboratoire", "headteacher", "viajes", "etosha", "ndc", "coexistence", "leona", "dpr", "brownfield", "aguilar", "supervises", "orthologs", "pataki", "redistricting", "jil", "amritsar", "lpi", "pram", "acqua", "mekong", "anesthetic", "dsi", "maduras", "pfi", "paperless", "perc", "fansites", "sherbrooke", "egyptienne", "hyn", "anisotropy", "heaton", "rennie", "sno", "redox", "cladding", "seaworld", "hotlist", "trumbull", "retransmission", "luau", "tiscali", "overlaps", "meticulously", "sitka", "ucs", "lsr", "hellboy", "jakub", "hanselman", "rangemaster", "interceptions", "rrc", "dyna", "appt", "nonviolent", "evangelicals", "cunny", "goddamn", "wolfowitz", "epping", "accra", "bimbo", "jamboree", "multicolor", "tritium", "ptfe", "leaching", "sauer", "cricinfo", "isomorphism", "lsat", "estab", "stockbridge", "invariants", "jillian", "islip", "egp", "didier", "capistrano", "yardage", "neve", "enviro", "gte", "bodybuilders", "ranchers", "bremerton", "wbc", "radii", "schwinn", "expander", "regt", "referer", "electrolysis", "signatories", "wetsuit", "flatrate", "vendita", "nazionale", "peroxidase", "folkestone", "angkor", "delcampe", "taylors", "rahul", "mmr", "zp", "vserver", "neurologic", "chd", "opac", "cmv", "macabre", "neurontin", "popeye", "gruber", "excerpted", "spotter", "pyongyang", "hmos", "beltonen", "chamonix", "recycler", "declarative", "semaphore", "dprk", "carmarthenshire", "tristate", "standardize", "recyclable", "knickers", "overloading", "angioplasty", "fanboy", "sharapova", "moen", "irin", "deseret", "eastbay", "bfa", "androgen", "parkes", "kilogram", "pacemaker", "duarte", "evaluators", "tarball", "nears", "kapoor", "pah", "allard", "mog", "tures", "standout", "lll", "holley", "ogs", "ptt", "sfs", "transamerica", "bdrm", "comparability", "buckhead", "industrialization", "cabana", "mbr", "yoshi", "skokie", "catwalk", "homesite", "pecos", "stinson", "blurry", "etrust", "minibus", "coty", "denby", "openbook", "unfunded", "jobsite", "dls", "levinson", "kasey", "disbursed", "cristian", "ballooning", "nats", "antineoplastic", "amplify", "shitting", "coden", "congressmen", "dft", "xsp", "strapless", "qualitatively", "struc", "whitefish", "flourished", "ejection", "puyallup", "bonham", "miu", "cosplay", "gazduire", "dodgy", "parasitology", "thymus", "handlebar", "sanborn", "beale", "lesbianism", "locators", "belive", "mnogosearch", "aoa", "childress", "pppoe", "phytoplankton", "wireline", "handpainted", "suprise", "neath", "casseroles", "generational", "coppola", "burrito", "sandton", "spylog", "biltmore", "coriander", "edtv", "chopra", "streamflow", "montoya", "lesbien", "manipulative", "hypnotize", "liaisons", "backers", "evocative", "mcclelland", "centerfold", "burch", "chesterton", "warlord", "guage", "powerball", "snider", "creuset", "wildland", "oster", "conti", "sichuan", "wrigley", "bollinger", "sensitivities", "offshoring", "uiq", "bayes", "vipix", "amphibian", "substation", "optically", "ceasefire", "haag", "alj", "swartz", "nanoparticles", "affine", "sitios", "woot", "obo", "uname", "employmentnew", "sepa", "asrock", "hijacking", "blurbs", "downsizing", "subcutaneous", "creatinine", "factorization", "netbios", "fleshlight", "reliever", "ender", "indenture", "arlen", "trailblazer", "coney", "avenida", "ern", "shocker", "barnstable", "ioctl", "bronte", "refrigerant", "caterham", "bajar", "movei", "barkley", "datacenter", "presidio", "transfection", "fung", "legg", "moyer", "roux", "rectangles", "caseload", "catharines", "pdx", "wget", "collaborator", "cruzer", "eeoc", "tnc", "cnw", "sausalito", "clas", "xenopus", "reflectors", "endorsing", "qingdao", "kiwanis", "onlinephentermine", "replicator", "assertive", "aldershot", "weirdness", "oblast", "townhall", "sunnyside", "datos", "pham", "glycogen", "tain", "selangor", "detainee", "brd", "hoosier", "balearic", "toluene", "jini", "tubal", "longford", "johansen", "photocopies", "haccp", "narconon", "dyno", "blakely", "klonopin", "photonic", "kyiv", "tami", "hijackers", "buell", "informazioni", "mccracken", "ultrasonography", "cale", "alyson", "taupo", "possum", "milligan", "rosacea", "transgendered", "thos", "toxicological", "mackey", "ristorante", "obama", "dvc", "jermaine", "platypus", "breakbeat", "karina", "jang", "thereunder", "kink", "winton", "holla", "multilayer", "strcpy", "xzibit", "mohair", "chore", "agb", "prt", "abm", "kgb", "preemptive", "guzman", "subcontracting", "counterterrorism", "communicators", "embodiments", "sociedad", "taskforce", "gatineau", "pertussis", "concentrator", "astrophysical", "apap", "pairwise", "nagy", "hofstra", "kbs", "filmstrip", "shortcake", "hsm", "chilliwack", "bidorbuy", "tetracycline", "lovett", "motorhead", "salam", "hofmann", "paramilitary", "flipper", "eyeball", "outfitter", "rsl", "minden", "hardwick", "immunological", "wifes", "phenyl", "telefax", "giao", "famously", "hattiesburg", "telematics", "tsai", "maier", "lca", "bossier", "franchisees", "falco", "armin", "ique", "controllable", "surfactant", "telecommuting", "culvert", "prescriptive", "wcag", "hott", "spanner", "mchugh", "firehouse", "currys", "diadora", "laporte", "wgbh", "telekom", "puri", "factsheets", "karts", "orthodontic", "visors", "leste", "lithography", "bonobo", "hamptons", "proofreading", "rmx", "evokes", "jdm", "dehydrated", "whyte", "interop", "initializing", "manfrotto", "waveguide", "pnc", "aussies", "murtha", "reinhard", "permaculture", "suburbia", "kamal", "catwoman", "optimally", "darko", "windstar", "polymorphisms", "sexist", "mdm", "embryology", "styrene", "alumnae", "inducible", "riesling", "triage", "ees", "krugman", "mrt", "mazatlan", "silencer", "foreclosed", "chernobyl", "rigby", "allergen", "crystallography", "frosting", "gallbladder", "photogallery", "nightwear", "sconce", "vgc", "drivetrain", "skelton", "ovaries", "mamob", "phenterminecheap", "daddies", "impressionist", "tourisme", "hpi", "clif", "fairways", "watercolors", "klipsch", "tekken", "lactic", "bydd", "katana", "ameriquest", "boson", "culo", "milled", "mcarthur", "analgesic", "mya", "btec", "geez", "crocheted", "acetylcholine", "modblogs", "pud", "firsts", "ferrets", "enlight", "wop", "twas", "menzies", "agonists", "eisner", "staroffice", "acg", "photometric", "fokus", "ntc", "buzzer", "tok", "trams", "vickie", "tinnitus", "vectra", "benidorm", "gerrard", "marketworks", "libertarians", "downers", "kevlar", "sequestration", "yoshida", "inositol", "praia", "follicle", "itemsshow", "brunner", "indore", "inspectorate", "ultralight", "toutputimage", "saudis", "octal", "debilitating", "twd", "keypress", "notifyall", "hdf", "corrs", "turku", "centrifuge", "curators", "multipoint", "quang", "marla", "mths", "caffe", "projective", "fandom", "cws", "kao", "debacle", "argh", "tts", "plantings", "landmines", "kes", "sdd", "khaled", "kimmel", "famc", "tva", "arbitrators", "deakin", "instock", "gilligan", "unh", "unpossible", "waldron", "kihei", "daq", "bronchial", "emg", "nanoscale", "hmong", "brownfields", "emmylou", "antcn", "unilaterally", "hypoglycemia", "sodomy", "bukakke", "bigpond", "famosas", "nsync", "zd", "revaluation", "conditionally", "moira", "tenured", "padd", "amato", "debentures", "rfcs", "acyl", "rehoboth", "lmc", "dht", "drucker", "lmi", "tham", "cigna", "dlr", "nifl", "sealy", "axa", "carrey", "ige", "dde", "foy", "evesham", "mcneill", "manitowoc", "baguette", "haves", "erections", "overpriced", "grantor", "sux", "orbiting", "soares", "gsl", "ihep", "resubmit", "bader", "gymboree", "kyo", "yunnan", "miyake", "rah", "saggy", "subtypes", "moultrie", "vasquez", "iogear", "merch", "uplinked", "cognos", "northbound", "cardigans", "ket", "rasa", "taglines", "usernames", "gpsmap", "ngn", "midweek", "pirelli", "rialto", "tvw", "durations", "bustle", "trawl", "shredding", "reiner", "risers", "taekwondo", "ebxml", "unedited", "inhaler", "granularity", "albatross", "pez", "formalized", "retraining", "naa", "nervosa", "jit", "catv", "certificated", "spicer", "karsten", "surfboard", "scl", "garfunkel", "handguns", "ideograph", "papillon", "dmn", "citywide", "stingray", "bmo", "toscana", "analsex", "larsson", "franchisee", "puente", "epr", "twikiusers", "tustin", "physik", "savute", "slinky", "cubase", "weatherproof", "parkplatz", "roadsidethoughts", "oxy", "pthread", "postmenopausal", "mixtape", "tuxedos", "fujian", "batters", "gogo", "nca", "minivans", "yerevan", "duffle", "scraper", "posner", "bwv", "technet", "sdsu", "decl", "lombardi", "musi", "unger", "gophers", "brando", "ksc", "multifunctional", "noes", "relist", "webjay", "vtr", "haworth", "transfected", "dockers", "swg", "screwdrivers", "tir", "guitarists", "manta", "christa", "sff", "moffat", "surfboards", "deteriorate", "compo", "roos", "eesti", "caulfield", "midpoint", "orland", "malagasy", "shoplocal", "standardisation", "matlock", "nair", "polymorphic", "emd", "phenomenology", "substantiated", "slk", "phong", "bandera", "cred", "lorry", "recaps", "fet", "resolver", "kagan", "chiu", "anthropologist", "opcode", "jugg", "revamp", "herbarium", "grb", "readonly", "arista", "barcelo", "unknowns", "kean", "coq", "cpo", "brosnan", "chamomile", "tgf", "mobilizing", "anya", "allo", "geddes", "wayland", "cerro", "methylation", "ecol", "clanlib", "jayson", "prostatic", "uj", "metcalfe", "oppenheimer", "mcclintock", "android", "primaries", "converges", "lation", "anisotropic", "voorraad", "ucr", "mxn", "ambrosia", "springboard", "rubella", "eisenberg", "bif", "constitutive", "vesa", "signoff", "guggenheim", "sapphic", "killington", "otr", "intec", "xem", "instawares", "kearns", "showcased", "summerfield", "cooperatively", "oshawa", "targa", "triplets", "hec", "billionaire", "leucine", "jobless", "slingshot", "cutout", "disgruntled", "coker", "selinux", "crosslinks", "resurrected", "skyscrapers", "spamalot", "sfp", "noob", "crb", "moviefone", "beecher", "goog", "mdgs", "democratization", "biostatistics", "sakaiproject", "cilantro", "equ", "xilisoft", "zc", "terracotta", "garvey", "harford", "pcie", "dartford", "dicaprio", "rosso", "onlinebuy", "gilliam", "certiorari", "walkin", "contributory", "applescript", "esol", "giggles", "suture", "jacobi", "fark", "autoblog", "glaxosmithkline", "dof", "sextoys", "tice", "accor", "buford", "uspto", "balfour", "calipers", "penalized", "pyruvate", "loggers", "envi", "kissinger", "rmc", "whew", "orchestrated", "conformational", "choreographer", "mcsa", "impressionism", "bucknell", "martino", "cranbrook", "taz", "ocp", "subdomain", "precios", "simcoe", "abnormality", "varicose", "newtonian", "genova", "libor", "infomatics", "hyannis", "howland", "federations", "syed", "urination", "bewertung", "broadcom", "cautionary", "escalate", "spotters", "kucinich", "noosa", "sider", "mitral", "dafa", "verdes", "inproceedings", "crestwood", "takingitglobal", "dmz", "antisocial", "baz", "gangsters", "daemons", "foundational", "probs", "huntley", "kanpur", "uah", "elven", "isotropic", "adodb", "enlaces", "edelman", "rubinstein", "flier", "griswold", "ome", "carcinogenic", "micr", "rrna", "goverment", "mercado", "lum", "dekker", "supercharged", "magicyellow", "primavera", "timescale", "fico", "overwritten", "marcinho", "kor", "erb", "keanu", "edina", "perle", "lebron", "terminally", "bundaberg", "lbo", "breyer", "kochi", "pirated", "leavers", "vpl", "pubsulike", "aquifers", "nittany", "dakine", "rescuers", "amsoil", "revitalize", "messageboards", "lakeville", "apotheon", "eukaryota", "permeable", "rsm", "lastname", "pxi", "faxless", "napalm", "annuncio", "usmle", "racetrack", "atenolol", "riveting", "cbbc", "absorbers", "xseries", "biweekly", "parkside", "rez", "hows", "posi", "derailed", "shoebuy", "ashworth", "keira", "meadville", "skynyrd", "threechannel", "fid", "rua", "monologues", "subroutines", "subspecies", "penton", "eoc", "figleaves", "bab", "ketchikan", "immagini", "shafer", "qca", "broiler", "ctn", "lickers", "akbar", "cbl", "skimpy", "fisa", "reflexive", "drool", "godin", "exchangers", "interbase", "sepsis", "appli", "boxdata", "laing", "oscillators", "choline", "doolittle", "trikes", "pdm", "joerg", "removers", "grisham", "diffuser", "indesit", "rouble", "kamasutra", "camila", "belo", "zac", "postnatal", "koizumi", "tallied", "ikezoe", "niggas", "lorain", "tko", "keying", "ballpoint", "kq", "lupin", "eidos", "computerised", "maf", "rsv", "munson", "ftm", "munoz", "hbv", "jeffersonville", "willfully", "orienteering", "eoe", "cavs", "humphries", "puss", "ngs", "podiatry", "truffle", "taka", "beal", "kalahari", "blockage", "hallo", "abo", "recv", "obstet", "bulma", "chicos", "cliche", "sadc", "tolar", "screenname", "chlorinated", "hypothesized", "upbringing", "fmc", "newry", "zonal", "defun", "unsustainable", "maas", "ghostbusters", "interdependent", "rockwood", "dbe", "asda", "civics", "literals", "unanticipated", "seminoles", "plist", "tabulated", "workloads", "chemo", "vhdl", "pretrial", "fermilab", "hotplug", "rotator", "krups", "myosin", "mtx", "carpool", "honky", "matsumoto", "armpits", "clug", "gasolina", "caruso", "fsh", "joysticks", "visualized", "bosworth", "soic", "clitoral", "bers", "carsten", "riverwalk", "convertibles", "literotica", "pgm", "ringetoner", "tpm", "floorplan", "oscilloscope", "getz", "mgd", "dictators", "levees", "annandale", "hillel", "jeffries", "pacheco", "slacker", "miva", "sns", "gca", "xchange", "kraftwerk", "bandana", "pentecostal", "extrapolation", "fennel", "telemark", "spg", "quy", "datasheets", "smit", "flywheel", "futons", "interviewees", "mosfet", "maryville", "oskar", "ital", "quarkxpress", "nondiscrimination", "republika", "icici", "fixings", "leith", "kickboxing", "deming", "deactivated", "caliente", "oligonucleotide", "crtc", "golgi", "channeling", "stopwatch", "maroc", "lemieux", "subscript", "starfleet", "odi", "substandard", "phenterminephentermine", "phoned", "ncl", "gmtime", "convener", "becuase", "dailies", "dansguardian", "miramax", "busta", "maury", "cng", "jizzshot", "moya", "nackt", "commercialisation", "cunni", "cardinality", "machado", "insurances", "qn", "tinting", "epidemiologic", "isset", "burnie", "bushings", "radionuclide", "typeface", "changeover", "jian", "termites", "dotnetnuke", "decryption", "etnies", "subsec", "cxx", "grinnell", "alexei", "helly", "protestors", "signings", "parnell", "gretna", "guida", "abl", "farscape", "hdtvs", "sde", "cyborg", "yanks", "hematopoietic", "clot", "imprints", "opensolaris", "inflationary", "elie", "traceroute", "fgm", "cuddle", "workbooks", "fallback", "permutations", "downer", "abelian", "cabela", "transferee", "quantitatively", "sheepdog", "cameraman", "pinochet", "replicating", "tci", "slashes", "streetpilot", "renovating", "paralympic", "dwarves", "cakewalk", "pyro", "phenterminediscount", "tye", "bna", "uwa", "stinks", "trx", "behav", "blackfoot", "kuo", "schaffer", "kemper", "glycemic", "plesk", "slicer", "joshi", "realtytrac", "sandburg", "dnb", "nwi", "reza", "operable", "wargames", "guerrillas", "saito", "tce", "fullsize", "auc", "anzac", "kulkarni", "rabbis", "mendelssohn", "investigational", "photojournalism", "anaal", "christiansen", "centaur", "rubio", "transando", "rapist", "ert", "pratchett", "climatology", "baise", "labtec", "prioritization", "pinhole", "hdpe", "bioengineering", "dirac", "mcu", "alveolar", "westmeath", "lewinsky", "webx", "acco", "soya", "moz", "exorcist", "biofeedback", "atrios", "honduran", "seaview", "douche", "rsh", "soundcard", "resistive", "sylvain", "chubb", "snooper", "atn", "dbase", "katja", "icr", "firepower", "agu", "ges", "cissp", "mangalore", "laois", "ime", "unmodified", "keystroke", "zell", "parkersburg", "yoon", "gillmor", "joyner", "vinnie", "ccf", "grocers", "simulates", "flathead", "castellano", "sigia", "vesting", "misspelled", "prono", "headcount", "panache", "inu", "hallelujah", "joes", "cayuga", "nob", "tpb", "glug", "zodb", "gubernatorial", "goran", "bauhaus", "sarawak", "sparky", "sebastien", "wirelessly", "wpi", "sysop", "factored", "eula", "ohh", "bsb", "polymeric", "salivary", "mfi", "ftaa", "async", "dnd", "kristian", "circadian", "analgesics", "flintshire", "prakash", "productos", "phenotypic", "pelagic", "agronomy", "vss", "aironet", "weightlifting", "yugo", "audiophile", "unidos", "motorcycling", "raine", "testbed", "pediatricians", "fingerprinting", "bunbury", "tasking", "gmd", "emulated", "tweaked", "phonological", "barco", "gomes", "osf", "faridabad", "aprs", "snappy", "opa", "colonic", "jeroen", "qin", "zircon", "svt", "dansko", "caspase", "encinitas", "tuo", "remoting", "ploy", "achat", "freefind", "spellings", "canopus", "dme", "gaulle", "maplin", "dutchess", "wattage", "puke", "distinfo", "leia", "expeditionary", "amortized", "truckee", "albury", "humanistic", "travelogue", "triglycerides", "gstreamer", "leavitt", "shotguns", "discounting", "etoys", "thirties", "swipe", "dionne", "ebscohost", "tns", "geoquote", "upkeep", "truncation", "gdi", "bausch", "pomeroy", "harrods", "downgrade", "roomates", "biliary", "dumpster", "universalist", "acdbarc", "ywca", "oceanview", "fazendo", "shayne", "tomy", "resized", "yorkie", "qx", "matteo", "shanahan", "japonica", "froogle", "rehnquist", "megabyte", "ginsberg", "vivienne", "penticton", "inseam", "csh", "pressurized", "sld", "faves", "edf", "massagers", "ente", "timesheet", "anniston", "sigur", "toughbook", "histological", "clays", "pcx", "suzie", "honeycomb", "denier", "udo", "etcetera", "reopening", "herrmann", "ifr", "quantifying", "qigong", "cbn", "kurzweil", "chanukah", "programas", "fumbles", "jobseekers", "nitrite", "catchers", "mouser", "rrs", "knysna", "arti", "andrey", "textarea", "weis", "pesto", "ilm", "ponderosa", "kroatien", "transitioning", "whoops", "catamaran", "preoperative", "cbe", "verilog", "helios", "qz", "wheelbase", "narayan", "voyforums", "csg", "unctad", "monomer", "refueling", "ilife", "biennium", "coho", "pellepennan", "quartile", "anwar", "infobank", "hexagon", "ceu", "geodetic", "anda", "emporis", "ahmadinejad", "lubes", "consensual", "altimeter", "nmi", "psm", "lawler", "sharpener", "stellenbosch", "soundex", "setenv", "mpt", "goldfinger", "asahi", "ascorbic", "himachal", "dichotomy", "communigate", "covalent", "cantrell", "tarpon", "bluffton", "radix", "orthologous", "taichi", "borealis", "nerf", "rosedale", "policyholders", "nst", "racecourse", "extraterrestrial", "kok", "servicemen", "starwood", "asco", "nui", "phylogeny", "jis", "tiesto", "ameri", "plankton", "pkt", "seamus", "sublets", "unthreaded", "microstrategy", "cleanups", "fitchburg", "flowchart", "tacky", "sauk", "supercomputing", "antiwar", "illawarra", "benetton", "menopausal", "workgroups", "relive", "ketchum", "nieuws", "mirago", "reproducibility", "abalone", "ashmore", "ssx", "eachother", "gsx", "juggs", "ded", "geometries", "petzl", "edie", "quirks", "sbe", "bundy", "pina", "crayola", "acceptor", "iri", "precondition", "padova", "indica", "roddick", "teasers", "beveled", "consumerism", "flr", "yeovil", "boneless", "intracranial", "kbd", "tatoo", "gameday", "solute", "tupperware", "ridgefield", "gce", "quadro", "mumps", "trucos", "mopar", "haggis", "electromechanical", "styli", "whipple", "fpm", "arcata", "perego", "guwahati", "loudon", "legolas", "rockaway", "exhibitionist", "woolley", "msps", "toolset", "ferragamo", "bott", "godiva", "nsn", "vfw", "masculinity", "schrader", "bld", "lightfoot", "capitalizing", "rucker", "browsed", "hcg", "freenet", "bundling", "cannondale", "mcat", "blt", "mencken", "commerical", "dagenham", "codename", "nesgc", "profess", "rearrange", "warfarin", "stdin", "rohan", "overheating", "condon", "inflate", "npd", "gunnison", "hhh", "sfmt", "devonport", "copywriter", "bodybuilder", "poss", "psigate", "ecp", "airforce", "fleischer", "atmel", "rasta", "ravel", "jupiterresearch", "flycatcher", "cusack", "jenni", "gbps", "bombshell", "llbean", "arnie", "subdomains", "kale", "pcd", "shemp", "findtech", "huck", "vouyer", "horrendous", "complainants", "addy", "ehs", "fabricating", "mmo", "verdate", "cyberpunk", "enotes", "pecans", "ababa", "whitehorse", "barak", "juke", "schnauzer", "hairdressers", "prioritized", "rainforests", "exo", "rabin", "workday", "eared", "earphone", "passaic", "vme", "hypermedia", "udb", "jinx", "illiteracy", "carcinogens", "offres", "addressee", "thefreedictionary", "informants", "tics", "sublimation", "harnessing", "extenders", "fishman", "hmi", "tsk", "inj", "wvu", "zimmermann", "dupage", "belarusian", "maia", "lynyrd", "messianic", "mexicana", "generalist", "gastronomy", "ugs", "huckleberry", "ridgewood", "pii", "dua", "phan", "lightsaber", "vivanco", "catheters", "azerbaijani", "whitmore", "footy", "joinery", "wasatch", "octagon", "equates", "sorenson", "eames", "tacos", "misspellings", "trivandrum", "kingsville", "magnetics", "rce", "halide", "metabolite", "clo", "genders", "headgear", "gretzky", "harming", "insole", "colvin", "kano", "thurrock", "cardstock", "journaling", "univers", "aragorn", "principled", "namibian", "slacks", "mcsd", "wmp", "fairmount", "physica", "subtropical", "sager", "trk", "bowflex", "subcommittees", "jia", "ramesh", "sitepoint", "prawn", "phylum", "mephisto", "prf", "mundial", "waveforms", "algal", "schafer", "riddell", "gimmicks", "reparations", "injectable", "sher", "trondheim", "mhs", "libwww", "phenix", "tlv", "rena", "tcpdump", "quinlan", "ecampus", "kaya", "ethically", "sity", "fkk", "freeradius", "nmh", "puffin", "freeride", "ahern", "shaper", "locksmiths", "lichfield", "cheater", "tora", "hsi", "bootcamp", "torus", "mondeo", "cotta", "oac", "evi", "jre", "vignettes", "aculaser", "waxman", "raping", "oryza", "leashes", "babydoll", "srgb", "practicality", "winer", "thon", "battelle", "inp", "europcar", "pancreatitis", "americus", "immunohistochemistry", "woodlawn", "filigree", "forecasted", "bypassing", "chock", "chocolat", "messier", "gravis", "edson", "nathalie", "calendario", "blenheim", "clarksburg", "trigonometry", "virusscan", "flanges", "bowlers", "tsi", "ipos", "harlingen", "keypads", "sosui", "campanile", "vassar", "regress", "ghosh", "iab", "hao", "ntu", "ivey", "techdirt", "pmt", "minutemen", "pias", "celiac", "hough", "ingested", "hypothyroidism", "boyfriends", "jeong", "equifax", "baroda", "cybernetics", "tissot", "daf", "prefered", "rappers", "discontinuation", "mpe", "elgar", "cumulus", "brltty", "klan", "goku", "offsetting", "airmen", "halliwell", "ionizing", "angebote", "morphy", "bookmaker", "curio", "hookers", "amalgam", "notional", "webactive", "bechtel", "zambian", "reinhardt", "bridgend", "bendix", "dists", "magnetometer", "populist", "mimo", "bsu", "renfrew", "hesperia", "chautauqua", "mnemonic", "interviewers", "garageband", "invariance", "meriden", "aspartate", "aramis", "pleural", "tsu", "mediating", "gabriele", "resonator", "provincetown", "afx", "surpluses", "ertl", "holger", "castlevania", "vaniqa", "finisher", "ead", "quartets", "heber", "muschis", "anthropogenic", "thermos", "macroscopic", "torrington", "gillingham", "geopolitical", "flaherty", "varietal", "assfucked", "engle", "gorillas", "ihc", "shatner", "euc", "juarez", "helicobacter", "epidural", "luisa", "teardrop", "anion", "glosspost", "numeral", "mdx", "orthodontics", "tabby", "cyngor", "onl", "claddagh", "abf", "therm", "myeloid", "pugs", "sprocket", "roh", "unilever", "ctu", "genomebrowser", "sima", "hants", "maclaren", "chairmans", "yim", "workflows", "adn", "ansel", "dragostea", "hrvatski", "ayala", "bfg", "tonawanda", "imovie", "regionals", "kami", "jansport", "fanfic", "tasha", "nikkei", "snm", "lynnwood", "glucophage", "bicentennial", "arl", "radiologic", "kts", "agosto", "mineralogy", "corsicana", "harrier", "sciencedirect", "krugerpark", "oireachtas", "esposito", "adjusters", "olympiad", "fname", "iar", "allende", "ldc", "sited", "surry", "strainer", "paragliding", "whitetail", "pagemaker", "astrid", "tripled", "gwar", "atwater", "overpayment", "faeroe", "wisenut", "nagel", "blatantly", "chicano", "chongqing", "corporates", "applicators", "erasing", "svetlana", "fleer", "bossa", "deuces", "fud", "dalian", "anycom", "gunfire", "mcnair", "subtilis", "hdi", "percutaneous", "cursos", "cols", "urth", "northbrook", "rmk", "mgf", "voli", "leann", "pixmaps", "gigablast", "metronome", "blackman", "fliers", "rdbms", "imprimir", "grouper", "negate", "roessler", "intrastate", "manawatu", "blass", "ainsworth", "denzel", "tfl", "moped", "appointees", "bunkers", "refrigerate", "ligase", "otp", "beleive", "warlords", "hatteras", "symlink", "almeida", "blogcritics", "cochlear", "janelle", "alphabets", "atta", "foldable", "hydroponics", "precast", "univer", "purest", "fatboy", "cei", "westerners", "camarillo", "kelty", "volunteerism", "pdq", "openacs", "hor", "newham", "energie", "radiographic", "kinematics", "errol", "otabletest", "isobaric", "hba", "gratuitos", "innd", "eads", "personalise", "tbl", "fso", "patenting", "reciprocating", "rto", "subcellular", "crosbie", "harmonisation", "dunfermline", "janesville", "egroupware", "caritas", "tsm", "egf", "roa", "debhelper", "nsaids", "milt", "burleson", "pba", "ragtime", "adopters", "impor", "philo", "backseatbangers", "rushville", "saitek", "synthesizers", "vulva", "arapahoe", "posey", "minuteman", "zinfandel", "mayoral", "fortis", "medicina", "gallary", "honeys", "pinus", "interlink", "greening", "tesol", "artnet", "crw", "bansko", "brien", "silvery", "guevara", "thinkin", "sedu", "automakers", "igmp", "overtake", "semicolon", "bubbly", "edwardsville", "ques", "homebuyer", "nodal", "mpo", "unbeaten", "rawls", "ocx", "ork", "sheeting", "hallways", "alzheimers", "snooze", "kestrel", "nadh", "americorps", "prawns", "nonpartisan", "naps", "domina", "eldon", "palomar", "riedel", "hoppers", "onscreen", "gdk", "distillers", "uploader", "caltrans", "tyra", "cocksuckers", "mtbe", "hypertensive", "xie", "chinchilla", "bucs", "transformational", "sailboats", "heisman", "grn", "jct", "exemplifies", "arrhythmia", "astrometric", "workwear", "tolstoy", "asperger", "koop", "newydd", "transpose", "lpr", "xray", "ferrer", "microeconomics", "kafka", "telly", "grandstand", "toyo", "slurp", "allocator", "islas", "ila", "westland", "instantiated", "lewisburg", "stylists", "blackwater", "vivi", "hippies", "pul", "larkspur", "kea", "lesben", "motherwell", "ahs", "cappella", "neocon", "getname", "coyle", "rudi", "departamento", "winrar", "mussel", "britax", "diwali", "raines", "dso", "wyse", "geourl", "etheridge", "docomo", "webindex", "accrediting", "stapler", "pheromones", "woodson", "imm", "volcom", "telewest", "lcp", "bisexuals", "ozzie", "kitsap", "oic", "cutest", "hoon", "mpp", "cte", "dymo", "yolo", "quinton", "jorgensen", "printouts", "tempt", "credentialing", "scalloped", "sealey", "galvin", "etudes", "gurney", "bluefly", "schweitzer", "jawa", "geochemical", "allegany", "aldridge", "digitizing", "aki", "organically", "chatboard", "lomb", "uddi", "yng", "roleplay", "pavillion", "barstow", "patna", "rootkit", "spearhead", "leonid", "sunnis", "reticulum", "dulcimer", "unl", "kalman", "npl", "coronal", "rendell", "transparently", "mfs", "freeform", "gianfranco", "tantric", "reif", "woodhouse", "lifter", "seymore", "ogle", "sayin", "cpas", "videographer", "gpe", "stallone", "uams", "pula", "trudeau", "buss", "ouest", "korner", "fatherhood", "debussy", "qsl", "reflexes", "hlth", "wyman", "kingsport", "gauthier", "vadim", "magnetization", "trd", "aitken", "millers", "titted", "clerics", "busses", "trai", "underpin", "ajc", "dumbledore", "vinny", "delicately", "webroot", "yip", "producti", "teksty", "pullout", "dmi", "yellowcard", "sbi", "dmt", "nce", "birdhouse", "bnd", "neko", "chillicothe", "peacekeepers", "schmitz", "rimming", "solent", "propylene", "supercross", "zsh", "multnomah", "foxconn", "fuelled", "biohazard", "horrifying", "parque", "toffee", "fpl", "riemann", "horsesex", "mahatma", "mubarak", "bachmann", "caswell", "chiron", "hailey", "pippin", "nbp", "ramallah", "isoforms", "dictyostelium", "tauranga", "hawkeyes", "maxxum", "eire", "knowit", "topanga", "geller", "parliamentarians", "inadvertent", "utes", "boardman", "denham", "rofl", "homophobia", "winches", "uptodate", "centralia", "eschaton", "hoaxes", "hillingdon", "buble", "hairspray", "acdsee", "offerte", "urb", "intellicast", "minn", "frc", "antisense", "pelosi", "shader", "gisborne", "grafts", "hillbilly", "intifada", "carina", "fon", "ehow", "vpi", "brunel", "rtx", "roald", "externalities", "metzger", "balsamic", "classically", "calorimeter", "necked", "idiopathic", "lileks", "tahoma", "ogc", "unidirectional", "westbound", "layla", "galeries", "cabinetry", "suarez", "stipulates", "towertalk", "optimizes", "serializable", "universite", "ald", "ringsurf", "toques", "rayleigh", "dropouts", "fws", "gamecocks", "gazprom", "braden", "amet", "sinusitis", "rusk", "fractals", "depressants", "clec", "tryouts", "rushmore", "shel", "adapts", "farlex", "emac", "phl", "remax", "wizbang", "endnotes", "rodman", "dissidents", "iterate", "conair", "ember", "vsa", "neolithic", "mgx", "acuvue", "vetoed", "uruguayan", "corrigan", "libxml", "etronics", "simian", "atmos", "msk", "iib", "multimode", "teensforcash", "annu", "sunbury", "girardeau", "dbg", "morrisville", "netmeeting", "asso", "estore", "universes", "ganglia", "ghanaian", "resonances", "subjectivity", "microarrays", "easypic", "abbeville", "newsre", "cobble", "flightgear", "spode", "berea", "mckinnon", "bucky", "plunger", "xing", "siggraph", "bookends", "klingon", "moreland", "lowery", "histograms", "moll", "floorplans", "netherland", "frasier", "rossignol", "polyline", "laroche", "cytosol", "disposals", "xforms", "mosul", "motu", "amersham", "chordata", "crafters", "kingsbury", "yoox", "hyphen", "dermalogica", "moreton", "glycoproteins", "aristide", "unsorted", "rambus", "ptf", "scorsese", "patricks", "microwarehouse", "bch", "blyth", "grampian", "livedaily", "nces", "alizee", "detain", "andrzej", "optimus", "alfie", "immunisation", "pfaltzgraff", "eyelets", "swordfish", "legals", "hendry", "homogeneity", "hartland", "recreated", "leaded", "hunan", "supersonics", "amstrad", "vinaigrette", "scd", "mch", "nintendogs", "dvx", "unreadable", "plattsburgh", "balsa", "aya", "brasserie", "gcl", "salton", "paulson", "dvdplayer", "silverton", "enduro", "peepshow", "givens", "bristow", "pecuniary", "vintages", "ozarks", "johor", "zia", "mucosal", "prehistory", "histidine", "mti", "drape", "tectonics", "lorentz", "distributive", "sharps", "seguridad", "ghd", "gilberto", "doomsday", "otters", "gervais", "mews", "scarring", "daydream", "gooding", "snicket", "bicarbonate", "boggs", "wps", "dietitian", "itf", "harriman", "paprika", "haviland", "novato", "dyn", "hornsby", "biden", "disallowed", "zahn", "jordi", "correo", "frida", "chappelle", "resourcing", "methuen", "zoneinfo", "adelphi", "orbison", "geffen", "informatik", "novella", "brie", "galeon", "silos", "lrwxrwxrwx", "shortstop", "cua", "dordrecht", "permissive", "creston", "prec", "nco", "nehru", "bromwich", "disposables", "estrogens", "mulholland", "rui", "haz", "eol", "odometer", "tooltip", "ibb", "mosby", "druids", "aggregators", "herfirstbigcock", "rti", "arvada", "fixme", "rodger", "tively", "gizmondo", "cucina", "ivo", "griddle", "pricelist", "juventus", "conroe", "multipliers", "aparthotel", "kitesurfing", "couplers", "aftershaves", "rehabilitate", "patina", "scansoft", "quadra", "sousa", "phonology", "dunkin", "deat", "plasmodium", "bums", "undersea", "aretha", "lts", "boxster", "staf", "bcg", "overexpression", "vanadium", "wilkerson", "riverboat", "voa", "kohn", "bgl", "jiu", "ipi", "contl", "ottumwa", "gynecologic", "unstoppable", "pedometer", "shortfalls", "ksa", "bookmarking", "ingham", "yoder", "esu", "vbs", "barbershop", "drinkware", "idiosyncratic", "googlebot", "floppies", "tashkent", "foxboro", "allstar", "hervey", "fes", "kilowatt", "evga", "nikos", "tance", "varian", "mops", "coughlin", "commutative", "lansdowne", "bcbg", "syrah", "affx", "angiogenesis", "nicosia", "nematode", "kegg", "pkr", "enso", "administratively", "tma", "capa", "ronaldo", "leverages", "cco", "cancerous", "banderas", "gmane", "vq", "gabriela", "secretory", "mmx", "pinehurst", "nro", "reassessment", "ippp", "chillers", "elbert", "sunil", "yuki", "periodicity", "trypsin", "bursary", "dependability", "overdraft", "deirdre", "colonia", "mycoplasma", "lesbains", "adelphia", "scribner", "aro", "activites", "uaw", "frankel", "cacti", "bugaboo", "palmdale", "aeration", "kita", "muscletech", "watersport", "paf", "nxt", "uscg", "yitp", "gibb", "gener", "nak", "unm", "zhong", "chowder", "expatriates", "centerpieces", "freaked", "curbs", "tdp", "gruppensex", "triphosphate", "acronis", "wcw", "prostaglandin", "completo", "darwinports", "abiword", "hippocampal", "atlassian", "technik", "vineland", "commentaires", "ters", "stuttering", "forcefully", "depo", "edinburg", "kwanzaa", "kzsu", "mascots", "harrisonburg", "cadbury", "scoble", "aor", "conundrum", "bullard", "aiff", "comedic", "apical", "synoptic", "miyazaki", "beryllium", "disinfectant", "sentra", "joi", "jokers", "wci", "piglet", "wildcards", "tresor", "sketchbook", "bbd", "halliday", "manolo", "tifton", "repre", "hendrickson", "windhoek", "lomond", "atapi", "hbh", "eccles", "ofa", "dcu", "spatula", "intergenerational", "epub", "cates", "featurette", "gotcha", "kindersley", "drifter", "cvsnt", "ogy", "lagerfeld", "lewin", "youve", "unaids", "larue", "stardom", "assad", "glenview", "brantford", "kelis", "nola", "lxr", "toastmasters", "appr", "recs", "ranchi", "exotics", "articulating", "jiffy", "goodall", "gconf", "verkaufen", "scalextric", "ryobi", "qname", "immerse", "farris", "joinwelcome", "cce", "wittenberg", "capone", "mtp", "busines", "rebounding", "usborne", "hirsute", "prelim", "prepress", "rop", "militias", "ttd", "commodores", "ecnext", "dbf", "goldsboro", "ashburn", "roslyn", "neverland", "coolio", "lindbergh", "freeciv", "indice", "vertebral", "ectopic", "abcs", "lge", "bnl", "coulomb", "minton", "oban", "restatement", "wakeboard", "unscheduled", "dbc", "visser", "clipland", "thermocouple", "masala", "clt", "drw", "rosas", "rdram", "mcclain", "maki", "rosenbaum", "eagan", "slv", "sunburn", "pleistocene", "nips", "sfi", "canisters", "kas", "waddell", "solvency", "lynette", "plainview", "fielded", "blowfish", "zyprexa", "altrincham", "workin", "afton", "topologies", "touts", "pino", "xelibri", "lora", "mendez", "undelete", "samuels", "rajesh", "soros", "unjustified", "nfo", "crf", "digitale", "sitcoms", "analogues", "leukaemia", "ukulele", "paperboard", "fied", "cobain", "trillian", "offaly", "girlie", "ilcs", "friggin", "wq", "davinci", "oxon", "expressionengine", "bains", "rse", "callbacks", "cdv", "hannity", "replicates", "sidewinder", "queueing", "slugger", "humidifiers", "desai", "watermarks", "hingis", "vacanze", "onenote", "montebello", "streetcar", "stoker", "fulcrum", "sadistic", "cassiopeia", "corwin", "qut", "martingale", "saucony", "winslet", "criticizes", "baytown", "synchronizing", "reclassification", "woohoo", "htl", "caithness", "takeaway", "timeouts", "reit", "dietz", "devo", "morgage", "koo", "ducky", "bola", "mdb", "multimodal", "recenter", "hematite", "hensley", "asterix", "hokies", "blumenthal", "multinationals", "aag", "debs", "playin", "emeril", "mcalester", "adria", "shipman", "burzi", "incinerator", "muenchen", "convening", "unorthodox", "fibroblast", "gloryholes", "carrick", "immersive", "darmowe", "catagory", "glob", "cisplatin", "rpa", "fertiliser", "nuova", "halstead", "voids", "vig", "reinvent", "pender", "bellied", "oilfield", "afrique", "ream", "mila", "roundtrip", "mpl", "kickin", "hiatt", "droid", "addenda", "restorations", "boll", "knightley", "worksite", "lcg", "typename", "aris", "isv", "doctype", "balinese", "sportster", "dence", "lesbi", "saversoftware", "bursaries", "cuny", "cardiopulmonary", "biologic", "wanadoo", "shiatsu", "homewares", "dpc", "qk", "schizophrenic", "unplug", "albergo", "pressroom", "gingrich", "basra", "greenbrier", "superoxide", "porcine", "oldfield", "wxdxh", "luder", "shim", "manx", "understatement", "geda", "tormented", "immanuel", "whistleblower", "hopi", "idd", "gol", "bayswater", "lyne", "epox", "kennewick", "subtree", "inshore", "ibd", "hepnames", "benn", "kettler", "clots", "reducer", "naturists", "lvd", "flonase", "sympa", "hinsdale", "trav", "spina", "meatballs", "underrepresented", "bpl", "etb", "brane", "tightness", "tracklisting", "horizonte", "rgd", "concatenation", "suffixes", "kilmer", "cloverdale", "barbera", "seascape", "amdt", "linings", "horseradish", "telepharmacy", "itasca", "varbusiness", "paulsen", "cortina", "ides", "hazelnut", "ashfield", "chaco", "reintegration", "pampering", "boland", "airtime", "surrealism", "imi", "eit", "clamshell", "tonk", "luminance", "ixtapa", "gryphon", "ecos", "cair", "rochas", "farnsworth", "synchronisation", "suresh", "minnow", "bloor", "gumbo", "faqforum", "kunal", "jossey", "rci", "upa", "melamine", "wonwinglo", "episodic", "xcel", "jurys", "descendents", "ezmlm", "twikiaccesscontrol", "tonos", "lated", "montero", "divisive", "soci", "guia", "gastonia", "inappropriately", "valentina", "lubricating", "itworld", "deca", "branford", "kody", "accruals", "epitope", "jdj", "crenshaw", "perlman", "medallions", "rokr", "usg", "microtel", "rsx", "graff", "jcsg", "fds", "cooney", "whittle", "gmthttp", "rayburn", "etat", "suppressant", "hecht", "sportsnation", "sso", "ccnp", "reworked", "etl", "catapult", "vries", "procurve", "cbot", "elitist", "convoluted", "iberian", "optoelectronics", "mailscanner", "kazakh", "stimulator", "schoolchildren", "commweb", "thornhill", "tweezers", "lani", "ouvir", "filetype", "bearcats", "fanclub", "boehringer", "brasileira", "webservices", "kinematic", "chemie", "inoue", "unsupervised", "norvegicus", "copycat", "orrin", "snooping", "hashem", "telesyn", "mcb", "imple", "dorms", "elist", "laminates", "ingalls", "checksums", "tandberg", "iirc", "mackinnon", "roddy", "margolis", "erotaste", "pimps", "mcdougall", "smg", "mpx", "fhm", "travelzoo", "thermally", "teleconferencing", "albino", "cargill", "hyd", "visualizing", "mothercare", "sprinter", "isomorphic", "pepperdine", "cvc", "mahon", "conjugation", "macally", "anklets", "impasse", "disinformation", "beavis", "delicatessens", "intensively", "echocardiography", "pav", "amok", "riddick", "sexism", "ordinates", "gallaries", "baldur", "elon", "beasty", "arty", "leukocyte", "chau", "cotter", "peptidase", "fsi", "postmodernism", "osm", "squeaky", "silicate", "alcohols", "zydeco", "testi", "trujillo", "predictably", "weider", "shareholding", "giordano", "cardiomyopathy", "aprilia", "mcnabb", "lenz", "homeencarta", "disconnection", "scada", "spacetime", "trb", "awol", "espa", "bionic", "batista", "bookshops", "feynman", "captioning", "sibelius", "obstetric", "marigold", "ostsee", "martel", "hcfa", "ino", "ctm", "whi", "typesetting", "ervin", "chroma", "steinbeck", "pusy", "biblioteca", "neutrophils", "dunbartonshire", "lollipop", "brash", "avl", "opi", "declaratory", "corus", "elph", "naf", "htp", "hydrate", "ubb", "littlefield", "neutrinos", "aso", "bric", "subways", "tui", "leominster", "ncsa", "snipsnap", "negativity", "arcview", "picasa", "tortillas", "awww", "dara", "ragga", "innova", "doorbell", "ebc", "sgl", "unsettling", "snps", "explicito", "phila", "bugger", "persson", "embolism", "iip", "silverplate", "lats", "ovc", "roebuck", "sbp", "lipton", "starling", "coreldraw", "haney", "globemedia", "adrenalin", "murphys", "nicklaus", "yardley", "afghani", "tst", "hrd", "haulers", "energize", "prohibitive", "sydd", "nida", "barcodes", "dlink", "includ", "orgie", "macnn", "danni", "imaged", "sprayers", "lindberg", "filesharing", "calibrations", "atorvastatin", "teague", "vantec", "lattices", "cucamonga", "warne", "derwent", "hospitls", "flintstones", "rotisserie", "orcs", "scallop", "biostar", "computationally", "jobseeker", "siem", "sunbathing", "ronda", "npg", "cerritos", "kaz", "chard", "pershing", "clotting", "zhi", "programm", "singlet", "morningside", "simm", "egr", "hackensack", "taf", "kinshasa", "availablity", "lrd", "lugs", "kiddies", "cpsc", "hebert", "asta", "gato", "cimarron", "crowell", "fanart", "nagin", "gfi", "collapsible", "helsing", "haringey", "phu", "stes", "prophylactic", "rosenfeld", "cityscape", "tradeoff", "sask", "instill", "ypsilanti", "lifes", "imate", "firestorm", "homestay", "inept", "peet", "shiseido", "steves", "sascha", "reconstructing", "okt", "droplet", "dhe", "lakota", "revises", "ipt", "macrae", "parlay", "bdt", "woodville", "xlarge", "proform", "gothamist", "coexist", "advisement", "fulltime", "macosx", "metra", "cyg", "turtleneck", "aquos", "hcs", "tsar", "isbl", "gigabytes", "triangulation", "burleigh", "anarchism", "stabilizers", "gbic", "ciba", "activa", "cgt", "terrance", "smoothies", "orsay", "belling", "bnsf", "opps", "representational", "kagome", "snark", "woodard", "malignancy", "makati", "cbm", "bwi", "farah", "sitewide", "newfound", "collider", "candi", "lgf", "boylston", "swi", "rizzo", "wristwatch", "owensboro", "papas", "subscribes", "lah", "wining", "cies", "ganesh", "castleton", "zippers", "decaf", "emphasises", "cbp", "crx", "shakur", "rso", "euroffice", "roush", "caloric", "plaintext", "ofm", "daniele", "nucleoside", "xsi", "buttercup", "oakes", "searle", "shuppan", "lanyards", "cushman", "admissibility", "courtenay", "aspartame", "sleuth", "trudy", "neem", "magix", "cosh", "aurangabad", "golding", "ethnography", "yamaguchi", "bhs", "bulkhead", "kain", "abta", "herzegowina", "minas", "paradiso", "cityscapes", "oit", "replenishment", "autobytel", "kroger", "dexamethasone", "strunk", "yoghurt", "nationalists", "tfs", "definable", "bruin", "psychoanalytic", "reserva", "nasser", "simp", "zmailer", "birthing", "collinsville", "dimer", "powells", "abebooks", "stemware", "landsat", "peebles", "dewar", "docked", "burp", "radioisotopes", "obstetricians", "vinson", "efx", "naia", "idb", "fahey", "multisync", "worley", "oms", "kerri", "arith", "democratically", "datasource", "mcelroy", "cze", "shopgenie", "udev", "nicol", "camara", "degas", "benassi", "prefabricated", "gastro", "accessor", "meteorites", "notts", "lipoproteins", "attleboro", "parenteral", "biosystems", "cerebrovascular", "fsn", "bahraini", "actuaries", "delicatessen", "rng", "marianna", "creatas", "kidderminster", "waukegan", "antifungal", "promulgate", "mvr", "socorro", "maximized", "bde", "dlx", "erythromycin", "dtg", "nady", "leibniz", "flix", "cusp", "homers", "crandall", "holcomb", "beaulieu", "tct", "abington", "pointy", "hamradio", "meso", "monmouthshire", "danvers", "tpl", "baptisms", "backprevious", "carnaval", "recompile", "mainboards", "fclose", "melodias", "cliquez", "doberman", "installshield", "fasb", "estas", "htpc", "stover", "cerruti", "brainerd", "oxycodone", "istituto", "revs", "maha", "compressive", "wombat", "antenne", "patek", "zippy", "neteller", "odeon", "sbir", "backslash", "townhome", "victorville", "amityville", "arpa", "trannys", "goers", "chipper", "gulfstream", "modulate", "xserver", "infosec", "agt", "underwired", "ambiguities", "khai", "norepinephrine", "kundalini", "elkton", "carcassonne", "saygrace", "appending", "marathi", "songbooks", "islamists", "recursos", "newcomb", "stampa", "newscast", "vtp", "stockwell", "nederlandse", "outtakes", "boos", "lavie", "fina", "retinopathy", "deportes", "tremont", "barrio", "buggies", "zacks", "exercisable", "speedup", "holl", "efc", "cibc", "ontological", "thinkstock", "flashbacks", "kennett", "dentures", "eckerd", "xetra", "stg", "reimbursable", "informit", "cdbg", "yeltsin", "nitrates", "aeruginosa", "rpath", "archaeologist", "mitotic", "generalised", "outliers", "sug", "frac", "cowon", "semifinal", "deactivate", "studie", "kazakstan", "sva", "citesummary", "kubota", "chroot", "falciparum", "shifters", "undetected", "mepis", "caries", "microstructure", "ringwood", "pleaser", "compuserve", "disassembly", "miter", "propositional", "javaworld", "ssd", "writeups", "hoskins", "buytop", "frome", "talkie", "loy", "exxonmobil", "emeryville", "gamepad", "metazoa", "kml", "maul", "taoiseach", "siskiyou", "censuses", "offseason", "scienze", "shelved", "etd", "carryover", "fagan", "jada", "wholeheartedly", "polyps", "avast", "northport", "inelastic", "puebla", "idps", "warrenton", "traffickers", "neckline", "aerodynamics", "eto", "satcodx", "leviathan", "dfg", "classico", "harvmac", "wrinkled", "minimising", "bifurcation", "kimi", "npcs", "astrazeneca", "poetics", "jef", "miniseries", "yesterdays", "dcm", "issa", "toxicol", "libdir", "angolan", "waynesboro", "relayed", "fcst", "ulcerative", "bgs", "airlift", "downlink", "endothelium", "suppresses", "weinberger", "appointee", "darcs", "hashes", "nuff", "anza", "borehole", "flt", "htdig", "hain", "nodules", "bowdoin", "tunable", "memcpy", "ucp", "panelist", "opr", "transsexuelle", "mailroom", "nijmegen", "medalist", "ryman", "gmos", "recessive", "putas", "abou", "encrypting", "enola", "rippers", "steyn", "redefinition", "infield", "reformat", "atchison", "yangtze", "zw", "peels", "preterm", "mindfulness", "hwnd", "stances", "synapses", "hashing", "gere", "lrg", "unmounted", "armoires", "archetypes", "behemoth", "stereophonics", "obsessions", "piosenek", "mhp", "thrower", "prana", "trike", "bmps", "distillery", "estudios", "ceredigion", "funnier", "rickard", "disengagement", "gratuita", "gifting", "lpga", "esse", "maglite", "iodide", "bakker", "hariri", "digitization", "fistula", "campaigners", "kel", "acca", "lauri", "rockwall", "kellysearch", "crawfish", "tigi", "symbolizes", "liverishome", "thay", "ecuadorian", "injectors", "natick", "mornington", "booklist", "centrist", "inria", "torbay", "femur", "methotrexate", "landslides", "separatist", "jelinek", "darwen", "aung", "outlooks", "matrimonials", "busybox", "openview", "lifeboat", "hara", "tuskegee", "aly", "ciprofloxacin", "gul", "reconfigure", "ahn", "instantiation", "trw", "spambayes", "shelburne", "programma", "lbl", "escalated", "lucasarts", "eastbound", "grits", "apoptotic", "pulldown", "redditch", "trendnet", "iupui", "nsr", "treehouse", "payson", "jaz", "hedrick", "lineman", "streamlines", "reengineering", "cleaver", "prodotti", "inflight", "tracksuit", "polyphonics", "skidmore", "catia", "overuse", "mge", "newsprint", "visakhapatnam", "miko", "hemorrhoids", "haulage", "torrie", "usergroup", "poms", "mostrar", "convolution", "endtime", "maura", "hefce", "abbie", "mfp", "galician", "golem", "conifer", "phenylalanine", "wareham", "nonpublic", "henk", "inversely", "beebe", "dancefloor", "eyelet", "immunologic", "chengdu", "beeswax", "lanham", "crosswalk", "lecken", "kitsch", "scand", "sweeteners", "farnborough", "jalandhar", "publi", "visioneer", "sprints", "reinhold", "emptive", "compa", "hrk", "faked", "manilow", "burnsville", "banyan", "opinionated", "quirk", "hnl", "caterina", "blinks", "fiore", "rationing", "tellers", "jrnl", "waterborne", "astron", "nity", "gree", "tradeoffs", "goldeneye", "occuring", "calientes", "recomend", "functor", "trowbridge", "niu", "mmvi", "obe", "gyro", "technews", "shampoos", "unfiltered", "sabha", "bundesliga", "enix", "communique", "cantina", "cafta", "polyamide", "selectmen", "lncs", "luge", "necromancer", "carcinomas", "subcontinent", "dodds", "seaton", "transcriptase", "balmoral", "specifier", "subsidize", "icl", "galaxie", "ldflags", "hiya", "nappies", "crippling", "xul", "nti", "aspherical", "misheard", "ecw", "sundial", "odom", "flaky", "schlesinger", "kryptonite", "typology", "hydrangea", "preamps", "aesthetically", "vrs", "alvaro", "htg", "heston", "ghia", "sophomores", "binh", "allrefer", "dcf", "scarica", "chorale", "ooc", "fredonia", "tiaras", "sdio", "distr", "dscp", "cogeneration", "flite", "harddisk", "kennedys", "telefono", "saleen", "bosco", "cyclase", "dreamcatcher", "csw", "braddock", "ethnically", "wbt", "morro", "smurf", "yeager", "gelding", "blurring", "deva", "fom", "mastectomy", "cassell", "sarnia", "jaundice", "lastest", "asterisks", "nympho", "jeffers", "hyun", "cooktop", "fddi", "aspergillus", "agric", "kdc", "medics", "mwh", "photosite", "gip", "affirmations", "variational", "socializing", "crankshaft", "isls", "mensaje", "tagline", "airframe", "beater", "preowned", "dietetic", "storedge", "redacted", "rittenhouse", "stereotypical", "klass", "fpa", "treks", "victimization", "parallax", "zante", "splices", "imagenes", "rete", "akita", "nonresidential", "hellman", "durex", "robison", "tof", "lpd", "seri", "freetype", "nexis", "ldv", "collegefuckfest", "aiu", "molloy", "carcinogen", "brs", "catalyzed", "heatwave", "yv", "spindles", "herron", "sita", "watchtower", "fabrizio", "unmanaged", "gtg", "preteens", "heme", "renumbered", "omr", "cowell", "hyip", "crossbow", "speciation", "tfc", "whidbey", "betta", "imt", "emmet", "jewelery", "lumina", "statistician", "symmetries", "observatories", "bupropion", "telligent", "fungicide", "aiptek", "crosstalk", "mello", "deepsand", "litas", "haart", "worx", "coyne", "adenovirus", "hakim", "countywide", "gnucash", "puree", "stott", "sdg", "mandeville", "portugese", "maurizio", "tachycardia", "aja", "eaa", "warrick", "cosine", "veb", "patong", "ballina", "summarise", "accrington", "rnas", "haddon", "xpc", "swath", "azeri", "wta", "ulf", "kleen", "cvm", "meehan", "jenifer", "infiltrate", "mapinfo", "knightsbridge", "renounce", "jesper", "blairsville", "copilot", "koontz", "fma", "northgate", "phobias", "metaframe", "nutritionist", "effector", "bumsen", "rcm", "hairstyle", "nesbitt", "diuretics", "cemetary", "iap", "discards", "basie", "discontinuous", "iqbal", "uncorrected", "stillman", "chloro", "bighorn", "heartbreaking", "xxxvogue", "leitrim", "prg", "justifications", "gimmick", "brasilia", "recordin", "abra", "trn", "zg", "acrylics", "recensione", "fouled", "wiretap", "dvrs", "vocs", "moniker", "scholes", "sharpeners", "calida", "nse", "calloway", "tpicd", "prods", "hfc", "ltda", "snk", "waypoints", "nrm", "underscored", "herrick", "starwars", "smbs", "unreported", "phelan", "guarani", "tampon", "easels", "sxga", "webform", "artista", "elkhorn", "ventana", "sublet", "chiltern", "antares", "peaking", "stichting", "forall", "menuitem", "marshmallow", "hawai", "nfa", "cals", "seltzer", "utep", "homeostasis", "swp", "akamai", "goodie", "milkshake", "thrasher", "switchers", "brussel", "hartwell", "aup", "electrolytes", "machu", "unshaved", "gor", "ilya", "maneuvering", "gaby", "softwood", "ajay", "croupier", "hausa", "compacts", "similiar", "elev", "egos", "rhinitis", "dreamhack", "aop", "beastialty", "whedon", "microcontrollers", "dreamhost", "overcrowding", "retractions", "pinging", "catheterization", "holton", "smears", "jmd", "melo", "exons", "mariachi", "igi", "bday", "reseal", "compositing", "oskaloosa", "coopers", "psone", "versione", "storys", "escher", "hotfix", "rmp", "gaynor", "biota", "dossiers", "arpt", "winsor", "hairdryers", "axon", "morrowind", "puter", "chubbyland", "deflation", "pdo", "dreyfus", "worsened", "darlin", "treme", "reconstituted", "aveda", "legge", "kasper", "mugler", "yorks", "ddi", "badlands", "deploys", "pols", "internets", "backstroke", "resultados", "spooner", "musicmoz", "toothbrushes", "bugatti", "abrahams", "comentarios", "brandywine", "callaghan", "diskettes", "resonate", "intellivision", "castelle", "advertises", "fives", "titusville", "plas", "royston", "nace", "digitaladvisor", "adesso", "geekbuddy", "lipoic", "hazelwood", "gravatar", "outfield", "carcinogenesis", "gdr", "phenolic", "incrementally", "pqi", "lenght", "acompanhante", "orm", "terrapins", "daria", "vander", "ccie", "mathml", "legalization", "allendale", "modernize", "orl", "gert", "restarts", "juris", "brookside", "streamer", "rollei", "accumulator", "picchu", "abril", "crocus", "zl", "citizenry", "accountemps", "swenson", "unfpa", "ewido", "centreville", "alisa", "kingsway", "erlangen", "offtopic", "laundromat", "redeemable", "maxillofacial", "slutsfree", "glp", "baumann", "revolutionaries", "chillin", "cardomain", "creamed", "tarp", "schering", "aten", "bikaner", "chimpanzee", "petco", "flurries", "rau", "miki", "meson", "parathyroid", "cmb", "analgesia", "nqa", "theyre", "elp", "altera", "jeddah", "nannies", "pawtucket", "bimonthly", "senna", "wardrobes", "surgically", "nongovernmental", "inge", "rmdir", "miso", "itx", "hydrostatic", "attrib", "cheaters", "hagan", "canlii", "leong", "koehler", "clostridium", "nerdy", "mcnulty", "megastores", "imperatives", "bpd", "archetype", "kkk", "oren", "halsey", "artic", "techworld", "vnd", "shamanism", "numara", "csx", "reiserfs", "roussillon", "cheadle", "crea", "alcorn", "ences", "bowser", "fizz", "rationalize", "karoo", "unearth", "biopsies", "inconclusive", "hookups", "herrin", "thermostats", "canoscan", "moldovan", "jamiroquai", "xerces", "subclause", "classname", "makefiles", "bettie", "sheesh", "birdwatching", "speakeasy", "harpers", "hayashi", "epitopes", "drivel", "blandford", "foci", "toppings", "cantilever", "biloba", "pth", "tweety", "initializes", "keck", "fisica", "macromolecular", "eic", "skagit", "kimura", "baca", "pareto", "lymphoid", "apacer", "forklifts", "pvs", "refuges", "jal", "habana", "stateless", "virtua", "cerebellum", "vtk", "breville", "statehood", "dct", "palgrave", "bledsoe", "insanely", "inglese", "aidable", "bubblegum", "aphex", "wroclaw", "rajkot", "taxidermy", "esubscribe", "cartagena", "juergen", "itravel", "pashmina", "gustafson", "jacqui", "salim", "barnum", "anthropologists", "glues", "undercut", "eci", "cstv", "watsonville", "roaster", "redbridge", "hypertrophy", "raza", "duron", "xserve", "wobble", "fergie", "bohr", "boilermakers", "counterstrike", "hinterland", "sufi", "milfcruiser", "afdc", "niggaz", "housewarming", "regenerative", "corre", "liquidators", "clegg", "bagless", "bleachers", "deodorants", "bacteriophage", "sheena", "prez", "brasileiros", "transect", "thumbshots", "soloists", "borges", "sinusoidal", "manpage", "lazer", "babys", "crossovers", "parsers", "lsl", "chuan", "hauler", "cataloguing", "oralsex", "storia", "fotosearch", "usfs", "leappad", "interesdting", "headroom", "fortnightly", "yerba", "kuta", "clearfield", "huggins", "washoe", "srg", "stabilisation", "sayers", "publis", "intangibles", "tameside", "summerville", "uvm", "whalen", "kusadasi", "hcp", "flak", "ual", "cubed", "yuck", "concacaf", "textbox", "erythrocytes", "dinky", "divo", "injunctive", "honed", "coincidentally", "kolb", "kruse", "microm", "portugues", "pil", "tht", "deathmatch", "publica", "mde", "pollination", "ews", "synchro", "etobicoke", "midori", "chutney", "jrs", "naturopathic", "dermatologist", "thumbnailpost", "casein", "chillout", "stefanie", "chewable", "direc", "quintana", "normals", "villeneuve", "scrum", "everyman", "lopes", "eastland", "footballers", "xviewg", "metropole", "swarthmore", "multicenter", "fett", "sagebrush", "convenor", "pco", "proteome", "warheads", "radiologist", "liao", "westview", "optus", "medicinenet", "hitches", "britten", "palettes", "vma", "depauw", "gunman", "agassi", "panoz", "uwb", "movi", "scanlon", "nutri", "mitra", "guilders", "filmpje", "indexer", "ofdm", "ullman", "coachella", "localised", "recom", "downgraded", "ncep", "lalique", "weill", "jeez", "varadero", "chicco", "athabasca", "redd", "azusa", "unbuffered", "phoning", "rtty", "spacey", "fmla", "albatron", "breakpoints", "sperma", "aran", "ciencias", "mortage", "legato", "agarose", "avoca", "reservados", "russellville", "oneonta", "badass", "cfi", "pesca", "carvalho", "nass", "mainpage", "mccord", "kellie", "allstars", "darwinism", "tariq", "workarounds", "omia", "flannery", "rediff", "lecithin", "okmulgee", "lates", "recertification", "phosphorylated", "fusing", "nerc", "avermedia", "abuser", "sevens", "mukherjee", "anatomic", "watercooler", "gatsby", "litho", "mischa", "bangla", "menard", "rattling", "artes", "vacaville", "teo", "enermax", "hypo", "hadron", "gosford", "legalize", "millbrook", "epinephrine", "transom", "liebherr", "mwc", "biel", "vcu", "mils", "oreal", "picayune", "rabanne", "gorbachev", "norelco", "playset", "massacration", "frontman", "garvin", "autologous", "wiretaps", "duggan", "jrc", "chantelle", "liddell", "enraged", "gir", "adrien", "blotter", "jq", "menubar", "gagnon", "sitters", "rdc", "jod", "meteo", "cept", "bih", "programing", "humpback", "fournier", "alquiler", "reprocessing", "chaz", "bartending", "sshd", "opodo", "patiala", "jaques", "glc", "fantastico", "schiffer", "preclinical", "sfn", "conklin", "wheelers", "deductive", "cunard", "pygmy", "jewett", "environnement", "biddle", "basu", "tachometer", "bks", "nonproliferation", "cacharel", "elysees", "orchestration", "adipose", "usu", "freeservers", "potting", "uncomplicated", "piaa", "progs", "ues", "tobey", "sife", "wenzel", "debi", "baez", "tana", "gedcom", "uvc", "puccini", "seca", "ligation", "deconstruction", "inductance", "topicparent", "zanaflex", "medicus", "dmitri", "reallocation", "kalispell", "haight", "teleport", "skylights", "rehabilitative", "swab", "latimer", "boombox", "prorated", "bbr", "pansy", "reassignment", "hydrodynamic", "confirmations", "postulated", "unlabeled", "tosca", "brentford", "integrin", "ranlib", "differentiates", "skelaxin", "velo", "multiprocessor", "tabla", "celluloid", "identically", "saddlery", "whiteside", "eurail", "endicott", "dingo", "sessional", "pagination", "webtopiclist", "infopop", "accc", "iie", "burl", "truncate", "hightower", "polygraph", "allianz", "digress", "overseen", "scg", "thotlib", "bluetake", "cowes", "mailorder", "fetuses", "lowndes", "shr", "childbearing", "aaj", "crayfish", "minotaur", "heist", "mayne", "repaint", "asq", "contr", "zool", "spastic", "suprised", "illuminati", "piezoelectric", "rfps", "cutouts", "ilc", "vinton", "enw", "meir", "tanita", "tpr", "subsidised", "arcsec", "wrestlemania", "fhs", "getter", "mimics", "watermarking", "aftercare", "coombs", "wolfson", "sefton", "compu", "bonaventure", "appz", "ecl", "gview", "temperatura", "diastolic", "defaulted", "cesarean", "dialling", "rescinded", "chitika", "tsvn", "discoloration", "chelan", "morel", "iles", "kashmiri", "stacie", "collages", "enabler", "ogo", "mowbray", "schuler", "finlay", "gezondheid", "ylang", "lufkin", "tenge", "acosta", "turbotax", "herbals", "moderates", "piotr", "chairmanship", "covad", "comunidad", "moores", "hurghada", "malformed", "mks", "seatbelt", "dumbbell", "chasers", "hamer", "sherwin", "redissemination", "stine", "mcmullen", "skopje", "gpx", "supplementing", "lowrider", "liaise", "citric", "opentype", "jpmorgan", "nitride", "achievers", "unbonded", "cowen", "subdir", "rehearing", "balmain", "crissy", "nake", "wtp", "scn", "mendota", "makoto", "alloc", "ultradev", "viaggio", "cig", "scipy", "depositary", "redhill", "caveman", "nunez", "starfire", "whitlock", "pelletier", "lanark", "yada", "sandro", "jervis", "placemats", "pathologic", "darden", "bunnyteens", "gordo", "otitis", "ordinators", "bma", "leningrad", "harkin", "eatery", "peony", "economia", "cytosolic", "glycerin", "tailings", "shirtless", "darla", "rayman", "boardhost", "frontera", "crumpler", "hargreaves", "mkportal", "nucleon", "pkc", "dov", "ndt", "hideout", "lrs", "calcite", "fpu", "fts", "spud", "mang", "nology", "luiz", "belden", "lense", "hendrick", "publicati", "unverified", "untapped", "vario", "pmsa", "recensioni", "xq", "tev", "batty", "briscoe", "dwr", "fingernails", "ocarina", "camus", "mackinac", "itis", "saks", "hahahaha", "romenesko", "croc", "ftes", "keyspan", "aoe", "reposted", "cgs", "moduli", "mra", "ery", "payoffs", "tpi", "maywood", "buchan", "roberson", "defrost", "ecr", "coleraine", "arianna", "biomarkers", "consecutively", "bongs", "loox", "idrc", "pretzels", "anmelden", "vdd", "underdeveloped", "mktg", "yancey", "feta", "peres", "assemblyman", "enforcer", "suk", "customarily", "cillin", "jett", "bility", "mingw", "ltv", "sarees", "aaas", "bloopers", "framemaker", "piscataway", "cytoskeleton", "wuhan", "maximising", "hoists", "fichier", "amitriptyline", "sgr", "scrubber", "gratuites", "reentry", "playtex", "communi", "buisness", "freepics", "kbit", "marmaris", "logarithm", "granola", "inefficiencies", "monocular", "kankakee", "tandy", "ferrite", "formato", "gaysex", "dbus", "autorun", "nivel", "ayatollah", "undifferentiated", "flowershop", "evp", "vazquez", "reaffirm", "dynix", "pictur", "collette", "oooo", "dian", "doxycycline", "weblogging", "cluttered", "sportsmanship", "relievers", "hwa", "vikram", "booktopia", "lampoon", "airtight", "firming", "mrtg", "shoreham", "annular", "hallmarks", "sparking", "anale", "ikon", "lanl", "gfdl", "commandline", "usfws", "adic", "nns", "pmd", "rfd", "ized", "rsd", "guardianfilms", "gryffindor", "ror", "blogspot", "thao", "obsolescence", "linguists", "blogads", "xinjiang", "recode", "onus", "heinlein", "oks", "kimble", "reservists", "blaupunkt", "statins", "descendancy", "obsoleted", "phim", "betacam", "mlp", "rearrangement", "disulfide", "myer", "bypassed", "onefit", "interp", "neutralizing", "tirana", "occupiers", "kingpin", "bnm", "relaying", "bga", "amilo", "overlord", "daffodil", "ukiah", "devotionals", "figueroa", "imd", "warenkorb", "dfo", "habib", "archivos", "lymphocytic", "kala", "deering", "undetectable", "infact", "vermeil", "silage", "ejaculate", "smithers", "gaeilge", "swr", "goudy", "inkl", "bilge", "texto", "satb", "prolactin", "bejeweled", "bastrop", "sunbelt", "chewy", "paginas", "decimation", "coen", "hypotension", "stateful", "pypy", "busby", "gaither", "tta", "patterning", "rdp", "cheep", "ldr", "denbighshire", "wittgenstein", "preexisting", "coffeemaker", "braveheart", "pbr", "ctt", "ginsburg", "superconductivity", "eurostat", "kyi", "amygdala", "corrie", "lonestar", "dueling", "challengers", "reshape", "photoset", "electrolytic", "hasegawa", "gainers", "calidad", "tinkerbell", "aldara", "poway", "physiologic", "optimality", "riyal", "hwn", "dremel", "cerebellar", "dth", "dancin", "summarises", "choy", "heartwarming", "unwin", "strider", "eastlake", "hyp", "cannonball", "mathcad", "skipton", "patently", "bitmaps", "biopharmaceutical", "analytically", "sll", "aramaic", "bogged", "incremented", "homem", "valorem", "publicist", "acb", "muzik", "tempera", "recyclers", "pillsbury", "seach", "intermediation", "lacing", "aggregating", "soundboard", "teapots", "rif", "neb", "archivo", "smartdisk", "boho", "titration", "tschechien", "sef", "boney", "oxidoreductase", "lino", "lcm", "skimmer", "mccullagh", "gats", "extrinsic", "erlbaum", "sketchy", "gooseneck", "bof", "tiffin", "pacer", "battersea", "noname", "gung", "asv", "sasaki", "outboards", "owings", "xue", "tbi", "interlaken", "kampala", "jcc", "tentec", "kilpatrick", "pixmap", "bitty", "pge", "dtmf", "prosser", "ojai", "stethoscope", "monotonic", "ebookmall", "perot", "medien", "kahuna", "washroom", "jacoby", "neurotransmitter", "intercity", "broadview", "micros", "straus", "flack", "amortisation", "pfu", "tonite", "vonnegut", "distros", "teething", "subsector", "mechanistic", "orbis", "flawlessly", "lidar", "frp", "whatnot", "tripartite", "studebaker", "cartographic", "rwd", "preconditions", "gardenia", "adland", "miembro", "irland", "linwood", "biotic", "kowalski", "marymount", "zathura", "highgate", "fudforum", "takeshi", "taro", "mpd", "crowder", "socialize", "scunthorpe", "deepwater", "clickbank", "ruleset", "viscose", "perso", "novica", "manhunt", "pavers", "elks", "aalborg", "occupier", "lunchbox", "euchre", "proporta", "mitosis", "paychecks", "bellaire", "suitcases", "postel", "mdg", "tutu", "paisa", "wbs", "slidell", "psb", "vocab", "mmhg", "clocking", "sks", "hemorrhagic", "plein", "hitchens", "fone", "crores", "classifiers", "novosibirsk", "greenwald", "rtt", "copacabana", "videorecording", "kickstart", "biggie", "neutralization", "pvm", "ksu", "kph", "pdl", "preprocessing", "particulates", "skylark", "llandudno", "squirrelmail", "oviedo", "pauly", "bromsgrove", "starsky", "prion", "simfree", "pennywise", "grier", "apd", "diphosphate", "lbj", "interscan", "pipers", "tronic", "surfside", "tsunamis", "dordogne", "hotlinks", "neely", "jeri", "proteasome", "transl", "goulburn", "vtkusers", "energizing", "butane", "stf", "bluebonnet", "htf", "stmt", "inked", "novatech", "iid", "elektronik", "maturities", "nameserver", "tomlin", "jigsaws", "distorting", "kamikaze", "quaid", "juggernaut", "gordonii", "latrobe", "bboard", "consultancies", "handley", "gramercy", "ccb", "derrida", "mgb", "bioavailability", "ucas", "tdr", "nochex", "lilith", "foreplay", "waas", "mccaffrey", "privatized", "uncovers", "gargoyle", "stockists", "ostream", "lenmar", "mamiya", "mildura", "insn", "bodega", "hardworking", "dockets", "dedham", "ered", "stomping", "kottayam", "carle", "eest", "pondicherry", "mpr", "fiddling", "panamanian", "buyitnow", "bungie", "goya", "superclass", "categoria", "buyback", "uhh", "gigolo", "tmj", "vangelis", "kingwood", "arn", "dorling", "maximization", "wls", "absenteeism", "quantifiable", "pion", "sliver", "leptin", "sxsw", "bummer", "isometric", "retraction", "amboy", "dunning", "grinch", "okeechobee", "shouldnt", "teeniefiles", "gcj", "whatcom", "bbe", "unb", "sws", "hydrocortisone", "cerebrospinal", "susana", "rumba", "bouchard", "yesteryear", "orthotics", "spunk", "superdrive", "jolene", "jalapeno", "propellant", "touchpad", "raisers", "mdma", "confocal", "jochen", "caddo", "dcl", "expatica", "bitstream", "igo", "bartenders", "refilling", "modell", "keighley", "rangefinder", "nostdinc", "oficial", "lanparty", "monza", "sportfishing", "rlc", "exacerbate", "beckwith", "anemone", "equivalently", "duxbury", "zhen", "cordele", "ebel", "ninjas", "milla", "incase", "mva", "zinn", "comercial", "segfault", "wisden", "maingate", "costner", "powerpuff", "gsfc", "lycoming", "regula", "lastminute", "winbook", "talladega", "optiplex", "syrups", "chiles", "estimations", "jaxx", "cercla", "slb", "absolutly", "guesswork", "tradeshows", "javascripts", "irritant", "warcry", "optura", "combinatorics", "graceland", "encino", "disconnects", "castello", "monolith", "mct", "geos", "hls", "intrusions", "glories", "prelims", "kanawha", "yglesias", "squibb", "memset", "edirol", "mandala", "alexey", "homecare", "dugan", "calmodulin", "ameritech", "umar", "timepieces", "nonfarm", "anklet", "wsp", "byrnes", "determinism", "addams", "moeller", "normality", "wiesbaden", "deflect", "taoism", "ikeda", "chakras", "samara", "unsung", "gargoyles", "massaging", "ajmer", "lossy", "mitogen", "hurwitz", "gulliver", "bul", "aerodrome", "darkside", "intensification", "raya", "ruger", "rba", "gennaio", "seaford", "ungarn", "vincenzo", "warszawa", "dillinger", "bandon", "odell", "riddim", "perforation", "cida", "annika", "uart", "tryout", "proxima", "fst", "lladro", "parameterized", "assfucking", "manageability", "crystalspace", "pandas", "choiceshirts", "taa", "servertime", "fmii", "nepean", "tracklist", "indio", "tino", "bernal", "hbr", "homogenous", "policyholder", "distributional", "tidewater", "ngfl", "erlang", "starz", "follicular", "grupos", "oq", "gonorrhea", "blaqboard", "listeria", "afaik", "lawmaker", "datatypes", "arie", "flavorful", "apu", "fyrom", "refunding", "subcontracts", "moissanite", "finchley", "mediates", "polyacrylamide", "bizzare", "standish", "conus", "competences", "jtag", "compatability", "millville", "coches", "biathlon", "mico", "moxie", "biff", "paulette", "chania", "suu", "backspace", "aways", "fugue", "dissonance", "medicated", "initio", "bestality", "hypothermia", "carman", "timberline", "defenselink", "sunfire", "mckean", "smithville", "mtf", "rebooting", "storytellers", "lamisil", "morphing", "chua", "sevenoaks", "haplotypes", "fiskars", "speer", "lathes", "refillable", "yearbooks", "engin", "kyushu", "tricycle", "penne", "amphetamines", "systemworks", "keele", "afficher", "trillium", "nena", "bulfinch", "transients", "hil", "concedes", "swot", "howarth", "andante", "farmingdale", "bitching", "overtly", "rateitall", "tubulin", "gmx", "bannister", "omer", "humanoid", "infringements", "stylebox", "tiredness", "branden", "panning", "wasabi", "morecambe", "hawkesbury", "cocksucker", "sak", "kilobytes", "breather", "slu", "adjudicated", "methylene", "wholeness", "gnue", "gynecol", "uas", "nacogdoches", "simcity", "hummingbirds", "garnier", "kath", "cppflags", "educause", "cotswolds", "heifers", "sephora", "joao", "tremblay", "gynaecology", "vertebrata", "blackcomb", "ffxi", "ottomans", "rodin", "ecac", "actu", "nde", "lockable", "dslr", "evaporator", "antihistamines", "uninstaller", "airliner", "bibdate", "unwrapped", "dumbass", "brc", "arrhythmias", "netweaver", "sateen", "rtos", "eip", "moteur", "fotopage", "uhm", "birr", "autosomal", "protec", "purim", "rhododendron", "canadienne", "profes", "pjm", "ddl", "underlay", "granule", "setfont", "cookin", "gillett", "rocklin", "welland", "ageless", "nuernberg", "bleep", "emedia", "regensburg", "gama", "xfree", "sills", "berwyn", "howler", "hardtop", "carded", "lipo", "zandt", "reformatted", "internment", "dominick", "mahmood", "avent", "swaying", "igloo", "ambler", "voyeurism", "bachman", "referential", "hydrating", "adaware", "dewpt", "repressor", "galego", "neilson", "scorecards", "newlines", "arcana", "aau", "transworld", "nmc", "discoideum", "wairarapa", "fogerty", "beit", "heidegger", "backhoe", "leftists", "quinnipiac", "mannequin", "malloy", "enviroment", "mako", "anl", "noyes", "eprom", "trashed", "ryanair", "betsey", "rath", "lobbies", "silvertone", "cupcakes", "artest", "netfilter", "voldemort", "oldenburg", "bazooka", "gerbera", "cient", "psg", "mittal", "camellia", "pronouncements", "fonseca", "rescind", "asps", "asheron", "mance", "viggo", "qar", "hepatocellular", "styrofoam", "malfunctions", "lindner", "linc", "salida", "dunwoody", "dioxins", "shaq", "epmi", "excavator", "adolescente", "redcar", "urac", "oncolink", "cartoonstock", "cwm", "bibb", "gymnast", "inexpensively", "isystem", "evol", "nmda", "hazen", "davide", "forceps", "motherfucker", "ccw", "mainframes", "sapulpa", "costas", "searcy", "labelle", "adjoint", "mclennan", "killa", "lipscomb", "monocytes", "requestor", "cyn", "splint", "digitech", "mrnas", "llamas", "multifaceted", "gamez", "voorhees", "boas", "solvay", "thorsten", "yeo", "terk", "privatevoyeur", "coolmax", "rebooted", "toskana", "unidiff", "radionuclides", "tilburg", "decoys", "pariah", "offerors", "wmi", "darnell", "meaty", "gages", "zapata", "supt", "bartleby", "vermeer", "pinstripe", "hemodialysis", "artis", "tov", "amateursex", "dailey", "egret", "cornhuskers", "fontconfig", "jordans", "guildhall", "hasselblad", "piney", "unbundled", "kusastro", "onclick", "functioned", "toca", "houseware", "kdebase", "ysgol", "griggs", "nicd", "mdp", "umi", "fullmetal", "pappas", "aransas", "tacacs", "movem", "abundances", "oulu", "fractionation", "cdb", "blitzer", "ruc", "karte", "cashflow", "retouching", "brattleboro", "eprops", "cya", "ubud", "fmri", "infosys", "displacements", "jerez", "dhc", "ielts", "fellas", "mno", "picturemate", "unicorns", "playroom", "dandruff", "albers", "discworld", "leaved", "existance", "unionists", "bloodlines", "follett", "irn", "ramsar", "woodburn", "efs", "auk", "lockergnome", "oocytes", "armadillo", "bsr", "captiva", "rinehart", "brom", "tlp", "gensat", "filers", "lle", "retrievers", "pacifier", "thurmond", "stroudsburg", "dominik", "vivek", "nla", "inmarsat", "unprofessional", "hydrographic", "mcadams", "wailea", "nforce", "scones", "paediatrics", "nzdt", "ilog", "finkelstein", "candylist", "appalachia", "marist", "musgrave", "vakantie", "varanasi", "yushchenko", "relativism", "jardine", "schuylkill", "ericson", "schweizer", "stravinsky", "keds", "ananda", "nsx", "jud", "tripwire", "aves", "rediscovered", "headstone", "depleting", "junkyard", "perma", "copthorne", "multitasking", "distrib", "byob", "tunstall", "hager", "spearheaded", "nacho", "underlining", "heshe", "jcr", "catalogued", "rawlins", "springville", "differentially", "powwows", "tsui", "inductor", "chalabi", "encephalopathy", "grote", "ebs", "raipur", "custodians", "guardia", "jlo", "khalil", "overstated", "webtv", "insulators", "kass", "weds", "servizi", "quicklink", "qso", "dumbest", "prowler", "loadings", "epos", "sizzle", "desalination", "copolymer", "duplo", "lawnmower", "skf", "nontraditional", "piet", "ghaziabad", "dredged", "vct", "marcasite", "kamp", "scoliosis", "arwen", "artie", "fifths", "austell", "fernie", "carport", "dubbing", "weblist", "maximo", "bax", "searls", "scuk", "uiuc", "crustaceans", "yorkville", "wayback", "gcg", "ural", "calibur", "girona", "haig", "perk", "zander", "samir", "freee", "avia", "developement", "pptp", "beac", "urbanized", "trentino", "marzo", "dfl", "lpa", "jiri", "mccollum", "affymetrix", "bevan", "ichiro", "dtt", "cofe", "loyalist", "verma", "daybed", "rimes", "quimby", "barone", "thomasnet", "koeln", "endocrinol", "evaporative", "gwybodaeth", "preshrunk", "hezbollah", "naga", "mmu", "februar", "finalizing", "printhead", "blanton", "zellweger", "manhole", "eroding", "emap", "searchgals", "typewriters", "tabasco", "cpb", "coffman", "lsm", "rhodesia", "halpern", "purebred", "netapp", "masochism", "millington", "bergamot", "shutout", "willson", "chown", "prosthetics", "proms", "zk", "karol", "underlines", "mosh", "bakelite", "kirkby", "intermountain", "holtz", "prensa", "vegf", "galesburg", "lba", "klondike", "webstat", "reeder", "neoplastic", "applesauce", "fibreglass", "kenji", "gluon", "feisty", "hynes", "clogging", "nonverbal", "etoile", "orangeburg", "ladybird", "concat", "milliken", "byproduct", "specializations", "chaintech", "swa", "porterville", "kbyte", "bizwiz", "congruent", "boehm", "selva", "rainey", "aphis", "rfs", "tarantula", "egovernment", "udf", "snuggle", "shang", "batten", "inop", "lough", "vigrx", "trios", "bvi", "unallocated", "nau", "condiciones", "wss", "modi", "componentartscstamp", "dyk", "maldon", "xantrex", "dlg", "edx", "karzai", "navi", "brockport", "cort", "softgels", "engravers", "wether", "hangin", "handicaps", "associazione", "khu", "nfb", "dohc", "clu", "capps", "vijayawada", "griffon", "biologics", "bluescript", "instantiate", "paperweight", "dilation", "izzy", "bedspread", "knudsen", "jabberwacky", "kiowa", "overtones", "gsr", "faithfull", "quezon", "pragmatism", "rct", "usi", "wiretapping", "fabricate", "exabyte", "pitty", "kcl", "pendragon", "opment", "kva", "meeker", "bootlegs", "jimbo", "jarrow", "mullin", "gridsphere", "activesync", "macwarehouse", "vela", "wikiusername", "hessen", "eyelash", "gob", "antifreeze", "beamer", "feedblitz", "harvick", "clicker", "immobilized", "dalmatian", "hemodynamic", "reshaping", "contessa", "elc", "stagecoach", "googling", "maxpreps", "jessup", "faisal", "ruddy", "magazzino", "jippii", "academe", "fjord", "flybase", "alpena", "psl", "junebug", "grissom", "shiki", "knockoff", "kommentar", "westpac", "gosling", "novosti", "mendel", "adtran", "wasserman", "transexuais", "aslan", "hoge", "fouling", "macfarlane", "hideshow", "trailhead", "edg", "bayshore", "preprints", "grs", "duction", "anesthetics", "nalgene", "iaf", "khao", "berhad", "savedrop", "magnifiers", "chitty", "goldwater", "lesbiens", "jumpin", "payables", "victimized", "tabu", "inactivated", "respirators", "ataxia", "mssql", "storylines", "camaraderie", "carpark", "internetworking", "gawk", "planing", "termini", "avaliable", "scho", "buysafe", "hds", "iad", "pleasantville", "fabrications", "wtd", "loh", "jamshedpur", "denture", "gaudi", "bluefield", "telesales", "vpc", "ppr", "jetsons", "protagonists", "fjd", "anoka", "boliviano", "curtiss", "wagoner", "storyboard", "trol", "rajiv", "xfce", "axons", "dmso", "immunotherapy", "namorada", "neva", "zakynthos", "weitz", "quercus", "nhhs", "amara", "microcosm", "raia", "bizarro", "mehmet", "christos", "categorically", "autoresponder", "aad", "adolfo", "welwyn", "nzlug", "vci", "catnip", "whittington", "sorel", "boned", "vittorio", "seta", "tomasz", "annes", "tonka", "nath", "toth", "tomaso", "ascap", "livedoor", "schlampen", "altamonte", "scotweb", "pillowcases", "medlineplus", "ambiente", "masterson", "nlc", "fibonacci", "bridgeton", "wmds", "tyrrell", "junky", "ballasts", "jbuilder", "cnf", "nagano", "hardman", "roadmate", "interleaved", "peirce", "pusher", "egm", "thetford", "rtm", "gnostic", "coreutils", "uninstalling", "heft", "ambivalent", "startpage", "difranco", "mmi", "typist", "estudio", "seiu", "moisturizers", "cardiol", "lamination", "bibi", "mof", "carpe", "scottie", "blackrock", "pons", "fistful", "somethings", "itl", "staffer", "rhiannon", "linspire", "cornucopia", "newsfactor", "countering", "worldpay", "catan", "almaty", "appraise", "runny", "braunfels", "reorg", "icg", "javax", "sema", "albumlist", "heraklion", "stressors", "shg", "collocation", "mccauley", "vesicle", "stuffers", "prego", "ichat", "lubricated", "sinha", "pharmacia", "aggiungi", "shakin", "cyr", "vce", "vigilante", "gauging", "lipase", "constabulary", "biochim", "epcot", "cricketer", "defibrillator", "rcn", "drooling", "stoll", "staines", "tnd", "adversarial", "tbn", "softwa", "pbc", "ptp", "demonstrator", "boingo", "voyeurs", "aoki", "banerjee", "hondo", "hysteresis", "workspaces", "campion", "lugano", "mobilisation", "pruitt", "foals", "aciphex", "sculpt", "iskin", "soledad", "bagpipes", "devaluation", "beastyality", "segway", "mineralization", "grc", "trafficked", "stedman", "gurl", "mcginnis", "dvips", "klee", "garber", "wizardry", "fervent", "headrest", "dermatol", "chaperone", "huygens", "eurythmics", "transboundary", "reclassified", "delusional", "tosh", "pimpin", "husqvarna", "faxpress", "tinkering", "unneeded", "babar", "pago", "hussey", "officeconnect", "mickelson", "leukocytes", "wesnoth", "hydride", "npp", "zondervan", "pele", "opeth", "kottke", "hometwat", "ogm", "mauna", "kilns", "bpi", "kst", "harbin", "assemblers", "karst", "wada", "selfless", "gynecologists", "enewsletters", "willi", "bip", "nami", "guestbooks", "sharjah", "aguirre", "krug", "dongs", "drv", "schoolers", "kidnappers", "lemmon", "ilan", "gnutella", "deutsches", "liquidator", "evers", "uniross", "grassley", "stowaway", "brainer", "organiza", "cellog", "channeled", "tastings", "deccan", "aiaa", "neurosciences", "factorial", "librarianship", "texmacs", "vocabularies", "blasters", "livable", "tifa", "nant", "libjava", "ramblers", "counterproductive", "catskill", "environmentalism", "ufs", "gwalior", "ubl", "kilts", "balenciaga", "alamitos", "newsburst", "septum", "animators", "signifi", "neoclassical", "mediaeval", "piezo", "escudo", "pineville", "botanica", "petter", "adenine", "fren", "lysis", "pastas", "helicase", "dredd", "efinancialcareers", "diehl", "kiley", "kwd", "ihousing", "yoruba", "malformations", "embarassed", "alexia", "checkup", "commited", "nanotube", "becta", "trados", "portofino", "lifesaving", "danh", "sctp", "tayside", "rani", "playmobil", "tualatin", "razorbacks", "ionized", "perodua", "trg", "subst", "cpap", "molex", "vitara", "fostex", "zmk", "placental", "parses", "saic", "newsmakers", "dshield", "homocysteine", "juego", "metamorphic", "cld", "otcbb", "moet", "rado", "watchguard", "sugarland", "singularities", "trophic", "ekg", "dacia", "reversi", "insemination", "houma", "quetzal", "shoshone", "linder", "homing", "highbury", "eizo", "podiatrists", "conch", "crossref", "hda", "poppins", "chaim", "cytotoxicity", "xugana", "weevil", "integrations", "clarkston", "ritek", "morgue", "unpatched", "kickers", "referers", "kitt", "servizio", "biosecurity", "leviton", "twl", "etx", "electrification", "peninsular", "juggle", "yeshiva", "sociologist", "wsc", "sartre", "finitely", "spect", "kathie", "ards", "corny", "brazilians", "lundy", "histocompatibility", "woolwich", "irp", "handango", "cosgrove", "sulfuric", "renderings", "msh", "trt", "ldcs", "lect", "kollam", "edgerton", "bulleted", "acupressure", "thotbool", "hiawatha", "nhfb", "ahps", "operon", "ugandan", "paton", "suspends", "categorie", "stratigraphy", "howes", "surfed", "steins", "babu", "andrade", "agarwal", "ncd", "surefire", "cori", "planetside", "snorkelling", "waterworks", "luk", "headlamps", "anaesthetic", "isomerase", "fdisk", "dunstable", "awb", "hendon", "accreditations", "doral", "nta", "macadamia", "takin", "marriot", "bfs", "disqualify", "ttp", "sixt", "beazley", "rashes", "najaf", "hwg", "bukit", "antiaging", "psychol", "dfe", "bedingfield", "equated", "swig", "lightscribe", "unionist", "lytham", "clocked", "duced", "complementing", "keycode", "pennants", "camas", "eamon", "zaurus", "qnx", "srx", "delux", "uli", "grrl", "bookie", "boggling", "skewers", "richman", "photodisc", "oto", "uav", "cnhi", "umberto", "bautista", "zooms", "newsdesk", "roadblocks", "klum", "goh", "goebel", "pou", "homophobic", "diamondback", "foosball", "rept", "spurgeon", "lumberjack", "marv", "epidermis", "mobley", "oktoberfest", "photoshoot", "rhinoplasty", "peptic", "bauman", "tannins", "psychotropic", "tilley", "malaya", "hypothalamus", "shostakovich", "scherer", "tsh", "manipulator", "calabasas", "coromandel", "pliner", "timestamps", "pango", "edexcel", "snc", "nim", "gwaith", "breaststroke", "oroville", "mitsumi", "ichi", "mobius", "deductibles", "nikola", "berrien", "peacemaker", "ilia", "bookmarked", "letterbox", "halal", "agl", "noor", "noll", "filenet", "freeland", "kirsch", "roadhouse", "charted", "microtubule", "cubicles", "blau", "ladysmith", "gatti", "ection", "switchable", "mcminnville", "hcm", "interactives", "altus", "phospholipase", "transformative", "samuelson", "completly", "anhydrous", "germplasm", "gradzone", "gdansk", "jenner", "parkin", "unmoderated", "wagers", "beliefnet", "hotbar", "canis", "ravioli", "enrolments", "walling", "marblehead", "dvt", "cameltoes", "ribosome", "carnivals", "srf", "speedman", "instrume", "moffett", "augustana", "topsoil", "latifah", "isomers", "pettit", "lemans", "telescoping", "gamedesire", "koha", "balancer", "picton", "underhill", "dinghies", "chooser", "argentinian", "ahrq", "apparels", "timescales", "cef", "athenian", "mcewan", "sexshop", "zermatt", "mha", "geert", "bugging", "trento", "lyndhurst", "nex", "wdc", "symbiotic", "wds", "dyslexic", "nomic", "tecnica", "mmap", "wishbone", "mcad", "prm", "bashir", "licenced", "larissa", "collab", "squirter", "infecting", "penetrations", "protea", "argento", "polyvinyl", "ganglion", "ruud", "bunt", "solgar", "lipper", "chimpanzees", "jdo", "testcases", "tda", "hamza", "meeks", "athol", "centimeter", "excreted", "paros", "azzaro", "nappa", "sirna", "sexvideos", "nonprescription", "lyd", "firework", "crlf", "localize", "tablatures", "jndi", "vigorish", "dcd", "schulte", "gioco", "chested", "universit", "thrivent", "jie", "hydrothermal", "smalley", "hoke", "ramen", "coleoptera", "intensifying", "copyleft", "llb", "outfitted", "khtml", "chatterjee", "adoptee", "augusto", "resnick", "intersects", "grandmaster", "nusa", "deadball", "cksum", "historiography", "amistad", "bellacor", "trcdsembl", "campagnolo", "downgrades", "sexbilder", "scrapping", "pdoc", "haskins", "bullhead", "rhett", "mimosa", "wildfires", "ellyn", "hryvnia", "halved", "cfml", "vatu", "ecademy", "dolore", "shauna", "multilink", "funchal", "ximian", "bergamo", "quarterfinals", "hobbyist", "reardon", "homozygous", "glyn", "popset", "torsten", "puller", "mathworks", "namm", "dena", "mdksa", "dcom", "danskin", "bexar", "dinning", "pfd", "misfit", "hamden", "hardie", "redfield", "scotus", "quotable", "cranfield", "asides", "beacuse", "musicstrands", "kla", "unternehmen", "teg", "roseland", "pgbuildfarm", "volo", "zirconium", "noelle", "httpwww", "agement", "guan", "tcf", "opencube", "shao", "mears", "rectification", "omc", "duisburg", "pows", "hsphere", "entertai", "keeler", "highpoint", "stratospheric", "newegg", "preeminent", "nonparametric", "mistral", "percocet", "zeroes", "kth", "divisor", "wanderlust", "ugc", "cleat", "decentralisation", "shite", "verna", "immediacy", "trak", "swingin", "eckert", "casco", "olivet", "resi", "bergeron", "felonies", "gasification", "vibrio", "animale", "leda", "artesia", "casebook", "nhc", "gruppo", "fotokasten", "yaw", "searing", "detonation", "gse", "approximating", "hollingsworth", "obasanjo", "pinewood", "tangential", "ridgway", "headhunter", "ero", "sharkey", "clwyd", "bretton", "bustier", "apologizes", "manoj", "muskogee", "pismo", "resortquest", "diskeeper", "lathrop", "pala", "glebe", "xterra", "pml", "seahorse", "geneve", "wpointer", "softener", "breaching", "maelstrom", "prioritizing", "jsa", "annunci", "modelos", "seraphim", "raymarine", "dodgeball", "munity", "assfuck", "alopecia", "singaporean", "nowak", "keyboarding", "beachside", "sparco", "robeson", "navbar", "fsr", "contribs", "lineages", "sumitomo", "dermatologists", "marbled", "probleme", "irv", "blackmore", "bothersome", "draconian", "troup", "approver", "pcgs", "saville", "srinivasan", "poldek", "perfor", "articular", "gwynn", "trackball", "asis", "mansell", "unf", "werewolves", "magazin", "sible", "vla", "autocorrelation", "waltrip", "mombasa", "schroder", "alachua", "hks", "duns", "ornl", "cabrio", "guanine", "bridgetown", "rhsa", "luka", "cpf", "roadstar", "creditcard", "frf", "michaela", "willett", "brews", "baskin", "hamel", "zoids", "semantically", "cagliari", "eggert", "valkyrie", "airlie", "salas", "gnomemeeting", "benji", "nent", "cashew", "unproven", "myocardium", "kap", "gini", "prek", "cypher", "paraiso", "nightline", "cursive", "organises", "hydrated", "csk", "schwanz", "martinsburg", "liguria", "hsieh", "forties", "pgc", "sayre", "photosynthetic", "pips", "tongued", "lifetips", "walcott", "cname", "unapproved", "emm", "nematodes", "jaclyn", "kell", "gremlins", "bolero", "togethers", "dicom", "paroxetine", "vivien", "gpr", "bru", "ilt", "lished", "tortola", "mav", "powertrain", "telkom", "immunized", "nuneaton", "fica", "trulia", "ricochet", "kurosawa", "aberrant", "nld", "ukr", "wyandotte", "odpm", "pgk", "dumber", "ruptured", "insoles", "starlet", "earner", "kem", "radiologists", "polydor", "nutraceuticals", "zoomed", "groupie", "brinkmann", "thrombin", "aco", "laminar", "immunoglobulins", "jamnagar", "camber", "vxi", "colliery", "incubators", "procimagem", "sweeties", "landfall", "seanad", "intramurals", "kwok", "borderless", "methyltransferase", "suwannee", "lgs", "cjd", "hyperlinked", "birkenhead", "torrevieja", "purposefully", "gutted", "serveur", "grr", "morrell", "ouachita", "imran", "slat", "freeways", "multithreaded", "newlyweds", "documentum", "ebm", "xiang", "burnin", "reelection", "hales", "rutter", "uunet", "vitreous", "noord", "centrelink", "lempicka", "iru", "countable", "dolomite", "salvaged", "soyuz", "frick", "lwp", "afterglow", "ferent", "maes", "mandi", "secunderabad", "millwork", "sampo", "takedown", "colostrum", "cfnm", "judeo", "wisc", "lata", "sexi", "homies", "tarmac", "customisation", "conservator", "pipettes", "goon", "artefact", "expository", "complementarity", "cosco", "mercosur", "tfm", "benzodiazepines", "mii", "netmask", "stalling", "molnar", "hmso", "huw", "aliso", "decors", "oldman", "nuevos", "acis", "somthing", "zabasearch", "steuben", "minicom", "hausfrau", "goldfields", "rickey", "minichamps", "usagi", "bisexuales", "rothman", "shana", "srivastava", "oemig", "beefy", "senha", "pica", "pucci", "skits", "shenyang", "mussolini", "kootenay", "ethnology", "donohue", "cyc", "childers", "mahjongg", "davao", "tajik", "codemasters", "mydd", "charade", "arnhem", "bobbin", "istudy", "rugrats", "dancewear", "mechanized", "ject", "mayes", "canmore", "reassigned", "nnnn", "crema", "bursa", "cfu", "svm", "riccardo", "realvideo", "lites", "krall", "centrifugation", "welds", "braunschweig", "coptic", "securityfocus", "reorganisation", "conglomerates", "dehumidifiers", "dumper", "hamill", "halston", "iau", "wfc", "spiny", "arezzo", "mbeki", "invisionfree", "dropkick", "elastomer", "wahoo", "anagram", "fogdog", "finnegan", "gof", "newsworthy", "defs", "sensitization", "hyperactive", "sidi", "antenatal", "elektro", "nordsee", "yuna", "pluggable", "hemophilia", "kola", "revitalizing", "seepage", "alitalia", "orale", "wri", "ory", "bcf", "wooten", "nonviolence", "baume", "berkman", "ashdown", "diciembre", "purports", "fcuk", "shillong", "mondial", "brushless", "technicolor", "narragansett", "barenaked", "pandagon", "rehabilitated", "outdoorliving", "expendable", "ponca", "tigard", "soulmate", "kaine", "maxis", "poppers", "allposters", "commercio", "dods", "tsl", "volusia", "iic", "thm", "elibrary", "datebook", "rapists", "ultrasparc", "seabed", "orly", "complicating", "suzi", "texturing", "correspondences", "groomsmen", "avo", "latour", "manipur", "arnett", "suzhou", "headboards", "cil", "palomino", "kol", "pomeranian", "diptera", "gericom", "steiff", "cordis", "erythrocyte", "myelin", "fragility", "drucken", "reso", "hov", "tsukuba", "kustom", "invoiced", "hannigan", "hangul", "montauk", "modulators", "irvington", "tsang", "brownian", "mousepads", "saml", "archivists", "herringbone", "bodom", "harrahs", "daiwa", "juanes", "nids", "moorcock", "ccu", "eyeliner", "totalled", "syp", "woken", "aphids", "cutthroat", "coincidental", "lepidoptera", "buda", "tarrytown", "vaseline", "bluewater", "strontium", "burdick", "crustal", "hackman", "shopnbc", "aicpa", "psal", "albicans", "seduces", "epps", "kroll", "unambiguously", "staley", "cutbacks", "hemet", "ariana", "pch", "cgmp", "mcas", "multimeter", "anubis", "htr", "analyte", "peseta", "enh", "glitz", "kewl", "bidi", "winsock", "lvs", "moldings", "peltier", "iod", "ior", "trackmania", "ballets", "doylestown", "spaceflight", "quicklist", "proportionality", "overruns", "yadav", "sordid", "qpf", "mentorship", "lyx", "tained", "oligonucleotides", "bbci", "spidey", "videotaped", "regnow", "jukeboxes", "xpdf", "portishead", "irt", "splunk", "kommentare", "citywire", "crud", "nev", "febs", "adu", "ird", "ribeiro", "abrahamsson", "epidemiol", "coms", "vdo", "outro", "pneumococcal", "tilton", "brookstone", "apic", "avenge", "alleviating", "sportif", "inservice", "punts", "tives", "sora", "tgs", "daugherty", "yarrow", "wakeup", "meatloaf", "mumford", "datafile", "buchen", "zzzz", "objectclass", "polices", "dogging", "cursus", "plasminogen", "kinsella", "lindgren", "asymptotically", "duce", "wonderwall", "crick", "pvd", "enveloped", "mnfrs", "caseiro", "instabilities", "muskoka", "jeni", "thalia", "apac", "reforestation", "paradoxically", "dren", "dubbo", "inductors", "opin", "symlinks", "gamestracker", "secam", "gatorade", "irm", "cava", "rupp", "wacker", "lanta", "cres", "yue", "oligo", "chairpersons", "incesto", "spca", "zapper", "materialized", "accolade", "memorized", "squidoo", "interpretative", "roping", "rauch", "oxymoron", "reciever", "maryann", "pentagram", "viv", "infusions", "slvr", "choppy", "robotech", "spb", "servic", "saya", "univeristy", "bahamian", "gos", "fwy", "nocd", "stipends", "stirlingshire", "caerphilly", "riboflavin", "fiu", "kalb", "ubiquity", "vandal", "romper", "bitumen", "nolo", "shimizu", "postpost", "rummy", "paleo", "unrhyw", "pinscher", "constructively", "sufjan", "christiane", "spliced", "finca", "gpf", "iaa", "iesg", "brecon", "kiran", "trekearth", "repeatability", "gunning", "byblos", "tadpole", "mitsui", "storytime", "berserk", "wellman", "cardiologist", "jammin", "leis", "hirst", "fellatio", "ggc", "terran", "breadcrumbs", "lorena", "remaster", "tpg", "cifrada", "curvy", "envisage", "boneca", "basements", "sharpton", "crucially", "lfn", "imao", "antonin", "soundgarden", "carrara", "bron", "decoupling", "monroeville", "environmentalist", "msha", "eastenders", "adultfriendfinder", "bein", "stef", "fpgas", "mistreatment", "rbl", "qlogic", "shona", "sutcliffe", "previousprevious", "infective", "estrella", "gans", "shards", "vcds", "acadian", "kahului", "phonetics", "comittment", "blix", "biocompare", "whimsy", "frameset", "kot", "nyack", "lolo", "carboxylic", "pkgconfig", "dipartimento", "traceback", "svlug", "microdermabrasion", "waterbody", "jeeps", "tiverton", "wundef", "spay", "gilmer", "ceqa", "bodog", "followups", "internat", "biarritz", "gurps", "bessemer", "iceman", "pegged", "liberator", "rediscover", "lovecraft", "wavefront", "bhangra", "zuni", "epm", "meningococcal", "ketone", "glazer", "yashica", "geodesic", "congruence", "tenkaichi", "omani", "tenuous", "reuter", "surfactants", "cohomology", "epicenter", "toke", "dwf", "santas", "kutcher", "christo", "lucio", "phenomenological", "debriefing", "miniskirts", "ansmann", "mfps", "lentil", "kannur", "backer", "albedo", "flsa", "pauli", "mcewen", "danner", "angora", "redstone", "lxwxh", "informacion", "phyto", "libpam", "blo", "cocky", "pitchfork", "stratocaster", "mohegan", "brazzaville", "broussard", "beano", "interconnections", "willa", "toiletry", "sats", "beko", "exchangeable", "colm", "arabe", "stretchy", "starburst", "dzd", "neurologist", "leonards", "kitties", "dottie", "rspb", "fwrite", "homicides", "forde", "ipf", "travelpro", "haemophilus", "ronny", "hubris", "bottomline", "kosova", "neuropsychological", "genitalia", "waiving", "swirls", "dampers", "comhairle", "cheech", "eigenvectors", "extrapolated", "chaining", "defected", "yurasov", "gakkai", "justia", "campylobacter", "northumbria", "seidel", "kenseth", "pmr", "kare", "dumbo", "holocene", "jwin", "superconductors", "yeung", "polygram", "egon", "distillate", "unweighted", "gramm", "safeco", "bentonville", "ishikawa", "vuv", "strachan", "bayard", "escalator", "periwinkle", "breakin", "rsmo", "publishi", "darmowy", "outfile", "choreographed", "obrazki", "accross", "yag", "gravesend", "lovemaking", "boucheron", "farrow", "annulment", "kwai", "tubbs", "bartow", "tonbridge", "lesbico", "panerai", "spate", "belladonna", "lexi", "sobering", "carcinogenicity", "djf", "semis", "pcv", "suppressors", "leachate", "dingle", "mbendi", "celina", "hydroponic", "hoyer", "xia", "kovacs", "recalculate", "maltreatment", "hitchin", "medtronic", "meerut", "whsmith", "fontsize", "relaxes", "kis", "halos", "cracow", "saco", "webcomics", "ife", "sauder", "dioceses", "uct", "postdoc", "biceps", "leela", "hydrant", "hamstring", "darrow", "tinderbox", "sify", "naw", "ganguly", "streetwise", "imprinting", "dandenong", "colecovision", "gnuplot", "nucleation", "werbung", "prb", "blr", "croce", "deviance", "goldfrapp", "tetrahedron", "materialize", "homeworld", "foodborne", "baixar", "stagg", "fondness", "ellicott", "merchandiser", "ler", "djia", "eastleigh", "blacklisted", "freetext", "wxhxd", "multiplicative", "metis", "urethra", "dalrymple", "retroactively", "hartnett", "gcd", "kilos", "multivitamin", "vientiane", "koji", "scran", "bwp", "emoticon", "mercator", "lyricist", "macromolecules", "fungicides", "amines", "karcher", "cssa", "freetown", "beneficially", "tugrik", "monotype", "ishii", "kempinski", "pigmented", "mipsel", "ridership", "athenaeum", "twikiweb", "mpm", "faking", "clsid", "kenobi", "endoplasmic", "motorised", "lomax", "geraldton", "eck", "cssrule", "auerbach", "metlife", "apocalyptica", "masa", "risotto", "follicles", "ashtabula", "sussman", "exmouth", "melua", "cvss", "pana", "stimulators", "gnf", "uvic", "asustek", "dieta", "famvir", "conflicted", "retirements", "sixers", "metab", "gregoire", "burris", "creat", "rajan", "brainwashed", "berenstain", "crittenden", "antoni", "gbs", "associ", "yankovic", "gnvq", "rogaine", "kek", "gridlock", "integrable", "chalkboard", "dopod", "unranked", "karlsson", "anaemia", "natur", "permian", "bartley", "unaffiliated", "slrs", "montreux", "partici", "starbuck", "infractions", "karon", "treviso", "backdrops", "turkmen", "standups", "sowell", "aktuelle", "gleeson", "lss", "globulin", "woah", "nte", "midob", "violator", "boxcar", "sagan", "aviso", "pounder", "vieira", "kronor", "tocopherol", "keiko", "newsrx", "lesbe", "pharmacokinetic", "intercepts", "tirelessly", "adsorbed", "ksh", "plunkett", "guenther", "penta", "phospholipid", "reiterates", "wuc", "oversaw", "arraylist", "qy", "outsourcer", "eyeshadow", "pushbutton", "doujinshi", "catagories", "pilar", "paltz", "viaduct", "pugster", "elastomers", "evenflo", "mmk", "wadi", "secularism", "cellspacing", "trekker", "llm", "pakistanis", "glyphs", "neuroblastoma", "loftus", "gigli", "thorp", "seeley", "producten", "glandular", "aligns", "rejuvenate", "grt", "northants", "ifconfig", "sherrill", "wintasks", "xenia", "whangarei", "hra", "expres", "nadir", "recoup", "rnai", "fyr", "franchised", "batchelor", "relocatable", "warhead", "backfill", "fascists", "kedar", "adjacency", "iberostar", "mancha", "gorton", "insta", "jni", "cellpadding", "larnaca", "carmarthen", "endgame", "streamlight", "golan", "thomann", "totten", "curbside", "samhsa", "howrah", "planer", "hermaphrodite", "gavel", "bassinets", "footjoy", "fairtrade", "gah", "prestwick", "paoli", "alben", "laconia", "berkowitz", "inputting", "dimming", "indiatimes", "arcgis", "goof", "landmine", "boracay", "appro", "notifier", "wirth", "valerian", "bucher", "wts", "saad", "weisz", "enrollee", "authenticating", "wheatland", "zildjian", "revisor", "faauto", "profs", "pheonix", "seitz", "administrivia", "foams", "leh", "orbitals", "hammerhead", "dotcom", "xof", "klezmer", "fosgate", "walworth", "niguel", "quickfind", "isakmp", "facia", "stalemate", "multimediacard", "motrin", "glx", "classifies", "ischia", "ankh", "mohali", "incurs", "feist", "ldb", "netzero", "rationalization", "eef", "brokering", "viewport", "isas", "masterbate", "geneseo", "grammer", "garantie", "sanofi", "malignancies", "yaesu", "jpegs", "spitz", "chea", "limassol", "lobbied", "splat", "nostradamus", "gallium", "mobb", "mannered", "dorada", "nalin", "sorbet", "lunenburg", "phc", "tdma", "bodycare", "jobsearch", "sharia", "topiary", "cataloged", "camsex", "avm", "kimber", "extendable", "ager", "pella", "optometrist", "tinh", "bogey", "kana", "pipette", "bln", "coveralls", "teng", "stayz", "isolator", "wicking", "cph", "zany", "umatilla", "austral", "applauds", "taks", "interferometer", "barbican", "ohana", "rebs", "cerf", "criminally", "mkv", "adio", "psychopathology", "lkr", "leyton", "cartoonists", "appellees", "indira", "redraw", "pictbridge", "mahesh", "beng", "ncar", "gord", "nanometer", "faceless", "moyers", "oregonian", "aftershock", "gena", "leggett", "wsdot", "classique", "menon", "spiro", "whiteboards", "strategists", "dnv", "loti", "kaos", "hydrotherapy", "marionette", "islay", "myv", "typeof", "igt", "nitty", "ddb", "quintile", "freightliner", "monkees", "lindley", "dehumidifier", "industrials", "bouncers", "transfered", "mages", "dmb", "roseanne", "chk", "trigraphs", "rer", "bettis", "cyberlink", "browsable", "workhorse", "iterated", "mcfly", "kyd", "pooping", "preferentially", "fraternities", "diuretic", "octubre", "castell", "emerg", "sampras", "gephardt", "zimbabwean", "unexpired", "westmorland", "biscotti", "mavica", "everyones", "shaikh", "nampa", "youngblood", "plana", "refractor", "bouldering", "flemington", "dysphagia", "redesigning", "milken", "xsel", "zooplankton", "gsd", "philatelic", "modularity", "parkview", "keto", "marrone", "wallmounting", "tias", "marengo", "quiche", "epoc", "resales", "maduro", "murrieta", "fairplay", "ddp", "woodinville", "registro", "transcriber", "notarized", "neocons", "franchisor", "diab", "vying", "morehouse", "lauper", "bedspreads", "pooch", "morphism", "gripper", "tavistock", "negated", "javabeans", "nashik", "atomki", "musicianship", "viaggi", "bbn", "cady", "adios", "purview", "bosque", "xxxl", "dyfed", "biomaterials", "overpass", "berners", "goaltender", "speedometer", "ultrium", "carteret", "fatwa", "bottomed", "superscript", "rwandan", "proteinase", "coolermaster", "maca", "haircuts", "crewneck", "discriminant", "bayfield", "mishra", "morey", "multiplexers", "pcga", "stade", "carnivore", "codingsequence", "knowledgealert", "egalitarian", "pombe", "yamato", "jenson", "mortgagee", "middlefield", "iiyama", "schell", "midler", "nags", "caplan", "anyplace", "haridwar", "sternberg", "ventilating", "retreating", "shopsafe", "mohave", "brion", "immun", "zapf", "mingus", "prolly", "trichy", "microform", "olsson", "jdc", "dosimetry", "smelter", "rayovac", "takeda", "mbt", "ied", "dynamism", "fileattachment", "rabat", "devs", "mellor", "manmade", "somaliland", "hashtable", "sdb", "conto", "furtado", "statics", "saleh", "puja", "kamera", "eport", "killian", "rucksack", "janette", "powerware", "phenylephrine", "cupcake", "karp", "bodum", "celular", "zamora", "qian", "dws", "psig", "polycystic", "titts", "krzysztof", "parsippany", "raggedy", "eason", "epg", "bsg", "payloads", "alon", "cebit", "wedgewood", "daten", "pbi", "annexe", "cyclen", "customizations", "stunningly", "hugger", "junio", "jtc", "xcd", "prequel", "strathmore", "champloo", "billerica", "talley", "estoppel", "ameritrade", "torr", "cytomegalovirus", "bpel", "domus", "madigan", "supercool", "ysl", "contaminate", "rxlist", "sailormoon", "ubid", "plovdiv", "mcsweeney", "govideo", "bassinet", "taillights", "typhimurium", "dez", "fci", "visionaries", "salesmen", "nicki", "skagen", "hibernation", "ponders", "rrsp", "middleburg", "innkeepers", "mcauliffe", "gardasee", "pcn", "asce", "aromatics", "interplanetary", "landcare", "towneplace", "downloaden", "discontinuing", "bork", "sealers", "weybridge", "wusthof", "interbank", "hullabaloo", "erratum", "contreras", "sandwell", "novgorod", "earbud", "jds", "coastlines", "echolist", "guntur", "lmp", "trunking", "foxtrot", "rosanna", "patchouli", "inequities", "testes", "defaulting", "alpert", "securitization", "nsfw", "borer", "originators", "postid", "phx", "censoring", "hashimoto", "oriole", "chipotle", "slocum", "ipeople", "rdg", "reusing", "saeed", "wetzel", "mensa", "shiner", "chal", "rhesus", "streptomyces", "datagrams", "invalidated", "shenanigans", "mkii", "sandford", "lennart", "pract", "npi", "travelguide", "championed", "biosolids", "billable", "givers", "tmdls", "cockroaches", "testcase", "faraway", "cfengine", "umbc", "underwritten", "biofuels", "cyberhome", "dinh", "zegna", "tarps", "sociologists", "ellesmere", "ostomy", "vso", "sena", "ingest", "gazebos", "sirloin", "cyclophosphamide", "bitdefender", "catz", "bpp", "giancarlo", "kategorie", "arjan", "valery", "kmc", "insp", "recomended", "dataport", "pfaff", "manuale", "rog", "niven", "mahi", "ghs", "atsdr", "rangeland", "commonality", "xid", "midis", "cwc", "regrettably", "navidad", "yahoogroups", "kaw", "ston", "ves", "pulau", "playbook", "digipak", "jetblue", "kavanagh", "exhibitionists", "armidale", "arquette", "copland", "namib", "cne", "cheapflights", "wyvern", "lucene", "muffled", "vincennes", "inlays", "lockets", "whitey", "brin", "wharfedale", "guyanese", "laryngeal", "outfielder", "nonattainment", "softimage", "cellgroupdata", "literatura", "myoplex", "yorba", "bct", "pva", "slapstick", "cottrell", "dialers", "subculture", "cmx", "modded", "skids", "roselle", "klub", "marathons", "tgt", "skeet", "toucan", "masterclass", "nnp", "calcio", "oxidizing", "alo", "kennebec", "zj", "intergalactic", "biomolecular", "cii", "powweb", "mcwilliams", "phosphorous", "photocopiers", "obligor", "matcher", "listbox", "voigt", "fdl", "dawley", "scribus", "lessors", "npn", "luminaries", "karats", "bridger", "slm", "hadronic", "fairport", "piecewise", "recharging", "dmm", "unionville", "intermedia", "goetz", "urinal", "joystiq", "grosso", "sobaka", "payphone", "rockfish", "duodenal", "uninstalled", "leiter", "coworker", "escuela", "cyclades", "longterm", "taber", "screenplays", "gpt", "shiites", "ntop", "farcry", "jitsu", "lactobacillus", "uniontown", "cloner", "otaku", "hoyas", "kandahar", "kerrville", "akers", "neuropsychology", "multimap", "allston", "femininity", "trask", "accuweather", "deferment", "wam", "fmp", "portlets", "glsa", "westmont", "waders", "cellulare", "homehome", "frogger", "hass", "rya", "seqres", "hellfire", "havering", "montfort", "chokes", "eharmony", "knowsley", "bordellchat", "cvsweb", "houdini", "umr", "canarias", "babyshambles", "bridgette", "cinque", "drezner", "hsin", "alcan", "stas", "outlier", "naira", "neverending", "masson", "khanna", "systeme", "hillsong", "camshaft", "exotica", "milburn", "bijou", "destdir", "innervation", "gga", "oqo", "cunha", "reefer", "techspot", "hibernia", "alpina", "iarc", "constraining", "nym", "dard", "estefan", "fuser", "lepton", "pergamon", "wiktionary", "razer", "poznan", "netscreen", "manda", "npv", "xmb", "kingstown", "topix", "batsman", "wavelets", "cogs", "bigtitsroundasses", "barnhart", "scofield", "ebrd", "desorption", "bellflower", "watertight", "stevia", "photocopier", "haverford", "talc", "penises", "gwendolyn", "buynow", "nairn", "prolab", "lundberg", "backordered", "coh", "mononuclear", "unocal", "brunson", "greenlee", "emer", "txdot", "prichard", "conferees", "renata", "ternary", "footballer", "sisyphus", "directfb", "foolproof", "chastain", "lakshmi", "dsb", "megane", "cdo", "someones", "rebelde", "morrigan", "mymovies", "tiananmen", "immunosuppressive", "mcveigh", "stylin", "brower", "mpltext", "aibo", "pdd", "depositor", "ofcourse", "ecdl", "redenvelope", "acidophilus", "deci", "defensively", "analytica", "cnd", "hrp", "tnr", "tryon", "forgo", "barca", "pahrump", "foros", "pickabook", "hellraiser", "lithographs", "educates", "ediets", "gopal", "signers", "digext", "netbackup", "dimensionality", "triax", "rnase", "aman", "angell", "bochum", "eyepieces", "earbuds", "americablog", "makeovers", "unprocessed", "pfa", "widctlpar", "clausen", "punbb", "centra", "monson", "infogrames", "azt", "xalan", "hydroxyl", "medpix", "interacted", "gpi", "polishes", "canoga", "numismatic", "avoidable", "brantley", "adenoma", "aah", "prostaglandins", "powercolor", "beaconsfield", "lakhs", "mhd", "lesbisch", "flammability", "truancy", "jharkhand", "channelweb", "givn", "flatiron", "midlife", "guerin", "indianola", "unavailability", "rooter", "wanaka", "lompoc", "widener", "cll", "kmail", "websense", "vmi", "residencies", "cablevision", "pye", "disrupts", "onetime", "kenzie", "gating", "boingboing", "sevier", "eberhard", "chek", "edr", "kharagpur", "fotze", "cvp", "deflated", "infestations", "judgmental", "meiji", "antipsychotic", "uwm", "infn", "slaughterhouse", "stix", "asg", "bagging", "brainwashing", "dmp", "disconnecting", "thera", "mclellan", "rong", "telcos", "wilmer", "sphincter", "orgys", "newsom", "infill", "fairhaven", "etude", "stereotyping", "talib", "dreamstime", "rearranging", "geographies", "tipp", "programmatically", "handicapper", "plantar", "ogaming", "xss", "academie", "quarrying", "approachable", "sweetener", "braised", "knut", "tibco", "fseek", "vided", "burk", "spigot", "skilling", "hunterdon", "nailer", "roxette", "hepatocytes", "coupes", "universitet", "mauricio", "lov", "hnd", "roseburg", "berlusconi", "chloroplast", "charing", "kansai", "buzzword", "nepad", "pistachio", "arv", "lanvin", "riverbank", "lilypond", "predominately", "metalware", "saugus", "nmac", "giza", "lancs", "culpepper", "rohm", "pretzel", "warping", "twc", "raitt", "iyer", "connotations", "iiia", "wilber", "yardstick", "neutrophil", "supernatant", "solu", "segmental", "multitudes", "imperium", "radley", "supercharger", "imagen", "thicknesses", "brk", "spew", "vestibular", "klausner", "riba", "witten", "orth", "calaveras", "naep", "deceleration", "bcn", "consignee", "aldehyde", "pronged", "baring", "jacked", "bigalow", "gyd", "centerfolds", "ortofon", "cropland", "wnt", "nazism", "kingswood", "operationally", "trix", "testicle", "rioja", "bhi", "technolo", "lindstrom", "pinter", "minox", "wofford", "guaifenesin", "hup", "bifida", "stratigraphic", "dundalk", "snipers", "kshirsagar", "ridgecrest", "placerville", "gosport", "sjc", "ircd", "rubrics", "kerouac", "ebx", "harken", "foc", "cooperated", "nwo", "cano", "kearny", "shopinfo", "tlb", "etp", "obie", "greaves", "versity", "amoco", "inzest", "msdos", "gabby", "dumbbells", "ncaaf", "ximage", "homotopy", "ironwood", "adiabatic", "pend", "licznik", "cck", "sabian", "saxton", "patties", "hopkinton", "biotherm", "ethno", "videochat", "cantwell", "accelerometer", "filip", "whl", "productio", "milli", "pdi", "bedava", "penobscot", "grav", "llcs", "fmr", "pimsleur", "micky", "setcl", "johnathan", "alisha", "gambier", "enterta", "crosley", "usace", "byrds", "sgm", "darrel", "isola", "laminator", "krazy", "diaryland", "bhubaneshwar", "quadrature", "summerland", "alessandra", "gsn", "dentry", "catskills", "tablecloths", "herder", "gec", "cinematical", "outfall", "unzipped", "plcc", "osb", "interchangeably", "concurs", "wef", "deformations", "farting", "nonspecific", "mek", "ohhh", "atopic", "harker", "culling", "limon", "murata", "zealot", "arca", "jmc", "toot", "rino", "sisley", "iveco", "gooey", "bielefeld", "parrott", "veillard", "lisinopril", "nprm", "tookie", "shanti", "burkett", "wemon", "turmeric", "carnelian", "zea", "geom", "dorman", "hmac", "abstracting", "parietal", "glyphosate", "underpants", "appleseed", "mandating", "prequalification", "macross", "kondo", "muzi", "bidet", "grubb", "redif", "oam", "domenici", "transdermal", "abramson", "recreating", "snot", "ductile", "dimensionless", "carex", "contractually", "kippur", "fibroids", "courtyards", "calderon", "dogster", "flattening", "sterilized", "pkcs", "unformatted", "cvr", "insulate", "afd", "tuolumne", "cobblestone", "showplace", "stockpiles", "mandir", "autore", "ashish", "meijer", "camberley", "babson", "fiennes", "meteorologist", "colonoscopy", "lofi", "tryp", "duromine", "alkaloids", "quesnel", "ake", "initrd", "centrality", "pisses", "campaigned", "twinning", "imag", "taster", "greenlight", "musicbrainz", "sourdough", "warrantless", "mzm", "croat", "arbors", "canwest", "homedics", "anydvd", "jnr", "odm", "dnn", "ashtrays", "punters", "dropper", "sarkar", "szabo", "wack", "ecx", "fette", "axl", "yoy", "spyro", "kendo", "surinam", "suze", "xenophobia", "krypton", "heisenberg", "dvcam", "nary", "ninn", "csis", "reconfigurable", "smil", "courchevel", "kittie", "lipman", "doz", "bsl", "chucky", "schlampe", "webdev", "doubleclick", "bushman", "pornofilm", "ood", "conexant", "hydroxylase", "rme", "multipass", "woodwinds", "telefoon", "ricotta", "motorways", "gandhinagar", "nsg", "edelweiss", "frampton", "humidor", "vacationing", "naturalizer", "dinesh", "techassist", "airdrie", "schiphol", "bruner", "tangy", "cfe", "gurnee", "bogdan", "farina", "gant", "cokin", "tricity", "cutaway", "artsy", "severability", "transferor", "cliches", "nosferatu", "indycar", "klimt", "onetouch", "dooney", "oconee", "smartbargains", "prl", "sackville", "camberwell", "hotlines", "hazelton", "nlg", "reaffirms", "anleitung", "webalizer", "libboost", "golds", "pfs", "imei", "corante", "recipesource", "ranching", "seguin", "calderdale", "anzeige", "toothpick", "volser", "westcoast", "forwarders", "aab", "likable", "ashburton", "natrol", "sonstiges", "shoestring", "vsx", "hosa", "brads", "winsite", "whirling", "doghouse", "displaytime", "bda", "ranitidine", "elit", "grebe", "standup", "playgirl", "flexion", "ibex", "geomagnetic", "lowestoft", "blobs", "footers", "reiss", "lewistown", "droppings", "designator", "causative", "brt", "woolrich", "gwasanaethau", "keefe", "tfp", "loveseat", "diethylpropion", "karyn", "handedly", "uncontested", "fov", "doxorubicin", "nerja", "cardiologists", "militarily", "fsus", "inflating", "sputnik", "barometric", "joburg", "assertequals", "gladwell", "regrowth", "lusaka", "lampwork", "adultos", "cybersex", "banca", "doughnut", "martz", "cribbage", "mela", "rondo", "tigr", "personel", "wcpo", "activ", "uiconstraints", "typescript", "inetd", "scuola", "piste", "pppd", "enos", "ondemand", "altamont", "steubenville", "rur", "danielson", "barfly", "vegetarianism", "extractors", "dictaphone", "callsign", "martinis", "envisions", "flexibly", "nakd", "natwest", "wilsons", "ccn", "reposition", "msci", "orginal", "hobbyists", "anat", "fleshbot", "weta", "sindh", "pcf", "glick", "obsoletes", "mammogram", "sani", "webcasting", "soggy", "apha", "ecologist", "ararat", "narrowband", "bph", "webstore", "maus", "reinstalling", "gendered", "relateddiagram", "kingsland", "ssid", "rackets", "litigants", "shimon", "ducted", "ebsq", "crisps", "modelle", "wristwatches", "xenadrine", "linac", "identifications", "dressy", "authenticator", "arash", "cristobal", "stewie", "depositories", "pcre", "setpoint", "rockdale", "evita", "ballmer", "hemphill", "taormina", "plath", "pickers", "boardgamegeek", "serbo", "oci", "noviembre", "mappoint", "surn", "minisd", "madmums", "mosher", "digitallife", "grahame", "forecasters", "linoleum", "shearling", "stockster", "firstcall", "dorint", "wmc", "culverts", "cuticle", "codebase", "rdfs", "lter", "pimples", "hdb", "shorted", "loghi", "spunky", "razz", "komatsu", "bietet", "madisonville", "readies", "jovenes", "deuterium", "totalitarianism", "trigonometric", "selmer", "popcap", "verbosity", "aashto", "pavarotti", "syncing", "vanden", "majeure", "beret", "fallbrook", "audiovideo", "muay", "longshot", "rollaway", "yor", "nonstandard", "tbr", "manoa", "laundries", "whoo", "tefal", "tothe", "crv", "amx", "falign", "goleta", "holst", "ebola", "redbook", "rangel", "consolidates", "disaggregated", "chromatographic", "supersport", "golly", "flumotion", "seagrass", "congratulates", "anais", "grievant", "reinstalled", "entreprises", "clemons", "eurovision", "airplus", "panchkula", "shahid", "phospholipids", "elsinore", "opendocument", "ankeny", "canzoni", "wakeman", "moana", "wobbly", "seagulls", "megawatts", "denning", "temas", "illuminator", "marylebone", "symbolically", "erotico", "linx", "randle", "nhu", "unsubstantiated", "centroid", "monogrammed", "gambian", "tailgating", "colville", "vpu", "russische", "sgp", "soccernet", "zing", "downunder", "snips", "allawi", "lockup", "cholinergic", "lhr", "barthelemy", "babymint", "benning", "implantable", "ligo", "haddad", "univariate", "katia", "motorcross", "sangha", "shn", "myfonts", "usuarios", "caml", "resiliency", "barossa", "astrobiology", "disinfectants", "kawai", "uktv", "dreamtime", "berkshires", "inhumane", "trobe", "unlocks", "auctex", "pogues", "panicked", "developerworks", "bullitt", "toed", "smartcard", "kushner", "hardcoresex", "crump", "gunderson", "paramus", "cepr", "lma", "politica", "randomization", "rinsing", "reschedule", "tob", "hostal", "preempt", "resold", "cyclo", "phosphor", "frontenac", "wipeout", "mambots", "unscented", "ipfw", "ergonomically", "roosters", "homologues", "loring", "ionosphere", "belvidere", "trotsky", "airworthiness", "sistemas", "devsource", "retroviral", "llnl", "keyloggers", "amgen", "marci", "willey", "yau", "groucho", "foreshore", "gusset", "dissapointed", "dtds", "mibs", "metalwork", "refering", "punting", "triphasil", "scab", "bhavnagar", "creedence", "musee", "wellstone", "lleol", "gpib", "tidbit", "allyson", "teriyaki", "impoundment", "interrelationships", "gres", "coffeecup", "maru", "joon", "josephus", "ulong", "maputo", "chev", "krispy", "dogtown", "abernathy", "raz", "fermion", "weltweit", "fluor", "bergstrom", "inoperable", "esrc", "asdf", "gollum", "ceus", "macintyre", "srd", "cyclonic", "cft", "unsubscribing", "shawna", "pinyin", "ipac", "ramone", "fethiye", "multipath", "hakusho", "tein", "treeview", "atd", "wonderswan", "eugenics", "dustjacket", "emmanuelle", "dlocaledir", "molotov", "sandpaper", "hbc", "fannin", "interscope", "eba", "melayu", "hardiness", "liss", "phew", "furuno", "moynihan", "johnsons", "heng", "dro", "carbonated", "waives", "wraparound", "jfs", "ejackulation", "reboots", "headliner", "sqr", "bustin", "powernetworker", "vul", "superposition", "supremes", "insite", "fanzine", "laney", "purportedly", "antigenic", "rurouni", "dietetics", "assembles", "veracruz", "hausfrauen", "wsf", "benzo", "vietcong", "chairwoman", "petrochemicals", "pata", "cntr", "nettime", "techies", "bentyxxo", "xango", "radish", "gatto", "checkmate", "gantt", "valli", "tuv", "starlets", "plavix", "roomba", "aficionado", "motivator", "bijan", "riv", "storrs", "tabula", "reigate", "emmons", "sandstorm", "laci", "taoist", "nameplate", "axp", "wcb", "mothering", "billard", "chrysanthemum", "reconstructions", "innodb", "sunspot", "aisha", "fluorine", "healdsburg", "retype", "fishin", "likud", "cyberread", "pme", "rothwell", "kmf", "creationist", "wth", "setlist", "scrollbars", "bocelli", "zuckerman", "vtd", "ampicillin", "arcy", "wasn", "cowbell", "rater", "everson", "angebot", "cezanne", "tamagotchi", "earpiece", "franca", "thymidine", "disa", "gearlog", "tranche", "volum", "prsp", "openvpn", "mcentire", "londra", "kaur", "unconstrained", "datadirect", "souter", "redfern", "tulum", "nyy", "pagesize", "osteopathy", "stavanger", "cated", "autry", "fip", "rooftops", "findpage", "discourages", "benitez", "boater", "shackleton", "weirdo", "congresswoman", "dalek", "tass", "itrip", "myob", "helloween", "reperfusion", "fieldhouse", "manukau", "libname", "eucharistic", "mong", "homeware", "ckt", "winmx", "mobic", "farts", "rourke", "lackawanna", "villiers", "comercio", "huy", "brooksville", "falwell", "gwb", "donwload", "wrth", "attrs", "knockoffs", "esm", "bionicle", "hygienist", "nichole", "quidditch", "dartmoor", "rowlett", "stapled", "gardenweb", "butternut", "nummer", "groban", "asw", "arora", "yatsura", "warr", "hainan", "esg", "logoff", "cockroach", "xanadu", "computable", "occup", "playgroup", "tintin", "ethnicities", "webposition", "crafter", "roby", "disassemble", "boltzmann", "caos", "abidjan", "anise", "grainy", "hospitalizations", "notizie", "zoek", "sepultura", "walkabout", "pepperoni", "optimising", "cityreview", "boathouse", "katt", "weissman", "siri", "herkimer", "namecite", "refreshingly", "aph", "ryland", "sculptural", "neurophysiology", "gsk", "hermanus", "mocldy", "ngage", "annexure", "ipchains", "yosef", "tlds", "gozo", "pso", "helton", "outflows", "saas", "asthmatic", "guillemot", "realizations", "linguistically", "jaco", "mckinsey", "dezember", "hylafax", "reconstitution", "amateurwebcam", "lumberton", "interviewee", "intereco", "portola", "hematologic", "sgc", "rebbe", "pinup", "transcendence", "surah", "brendon", "farberware", "statisticians", "swatches", "perioperative", "maoist", "henkel", "lilangeni", "trapeze", "lemmings", "extents", "spams", "omagh", "workcentre", "sunbird", "cellophane", "deland", "blevins", "sacha", "cardholders", "dddd", "accessori", "qo", "araujo", "mylist", "pcu", "kloczek", "enet", "seperated", "clusty", "rolfe", "cuttack", "provantage", "dominio", "hyperbaric", "nannofossil", "logansport", "bulldozer", "blacksonblondes", "subprime", "overpayments", "sharpie", "modutils", "whitehaven", "whaley", "currier", "taproot", "topsite", "delorme", "rayner", "aio", "rossum", "urbanism", "colloquia", "ewr", "capillaries", "mountainside", "menthol", "blackouts", "starkey", "eves", "hpux", "canby", "dragonflies", "montrail", "findfont", "aigner", "urusei", "soundblaster", "beatle", "webzine", "propranolol", "inescapable", "swabs", "absorbance", "lbw", "audiofile", "simba", "mohd", "redgoldfish", "cornbread", "jcaho", "appendixes", "aod", "crestview", "keynotes", "fotolia", "subnets", "cau", "espanola", "busnes", "froggy", "decarboxylase", "elfman", "throughs", "prioritise", "oreck", "schottland", "bagpipe", "terns", "erythematosus", "ftrs", "excitatory", "mcevoy", "fujita", "niagra", "yq", "dribble", "hardwired", "hosta", "grambling", "exten", "seeger", "ringgold", "sondheim", "interconnecting", "inkjets", "ebv", "underpinnings", "lazar", "laxatives", "mythos", "soname", "colloid", "hiked", "defrag", "zanesville", "oxidant", "umbra", "poppin", "trebuchet", "pyrite", "partido", "drunks", "submitters", "branes", "mahdi", "agoura", "manchesteronline", "blunkett", "lapd", "kidder", "hotkey", "tirupur", "parkville", "crediting", "tmo"] | gpl-3.0 |
andrewcmyers/tensorflow | tensorflow/contrib/data/python/kernel_tests/zip_dataset_op_test.py | 39 | 4394 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ZipDatasetTest(test.TestCase):
def testZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64)
]
datasets = tuple([
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
])
zipped = dataset_ops.Dataset.zip(datasets)
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(
equal_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
variable_length_components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, variable_length_components)})
for i in range(2):
results = sess.run(get_next)
for component, result_component in zip(
variable_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64, shape=[4, 20]),
array_ops.placeholder(dtypes.int64, shape=[4, 22]),
array_ops.placeholder(dtypes.float64, shape=[4])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
]
zipped = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([20], get_next[0].shape)
self.assertEqual([22], get_next[1][0].shape)
self.assertEqual([], get_next[1][1].shape)
with self.test_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
result1, (result2, result3) = sess.run(get_next)
self.assertAllEqual(equal_length_components[0][i], result1)
self.assertAllEqual(equal_length_components[1][i], result2)
self.assertAllEqual(equal_length_components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
GoogleCloudPlatform/public-datasets-pipelines | datasets/fec/pipelines/other_committee_tx_2018/other_committee_tx_2018_dag.py | 1 | 7056 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="fec.other_committee_tx_2018",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
other_committee_tx_2018_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="other_committee_tx_2018_transform_csv",
startup_timeout_seconds=600,
name="other_committee_tx_2018",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.fec.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://cg-519a459a-0ea3-42c2-b7bc-fa1143481f74.s3-us-gov-west-1.amazonaws.com/bulk-downloads/2018/oth18.zip",
"SOURCE_FILE_ZIP_FILE": "files/zip_file.zip",
"SOURCE_FILE_PATH": "files/",
"SOURCE_FILE": "files/itoth.txt",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/fec/other_committee_tx_2018/data_output.csv",
"PIPELINE_NAME": "other_committee_tx_2018",
"CSV_HEADERS": '["cmte_id","amndt_ind","rpt_tp","transaction_pgi","image_num","transaction_tp","entity_tp","name","city","state", "zip_code","employer","occupation","transaction_dt","transaction_amt","other_id","tran_id" ,"file_num", "memo_cd","memo_text","sub_id"]',
},
resources={
"request_memory": "3G",
"request_cpu": "1",
"request_ephemeral_storage": "5G",
},
)
# Task to load CSV data to a BigQuery table
load_other_committee_tx_2018_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_other_committee_tx_2018_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/fec/other_committee_tx_2018/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="fec.other_committee_tx_2018",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "cmte_id",
"type": "string",
"description": "Filer Identification Number",
"mode": "nullable",
},
{
"name": "amndt_ind",
"type": "string",
"description": "Amendment Indicator",
"mode": "nullable",
},
{
"name": "rpt_tp",
"type": "string",
"description": "Report Type",
"mode": "nullable",
},
{
"name": "transaction_pgi",
"type": "string",
"description": "Primary-General Indicator",
"mode": "nullable",
},
{
"name": "image_num",
"type": "integer",
"description": "Image Number",
"mode": "nullable",
},
{
"name": "transaction_tp",
"type": "string",
"description": "Transaction Type",
"mode": "nullable",
},
{
"name": "entity_tp",
"type": "string",
"description": "Entity Type",
"mode": "nullable",
},
{
"name": "name",
"type": "string",
"description": "Contributor/Lender/Transfer Name",
"mode": "nullable",
},
{
"name": "city",
"type": "string",
"description": "City/Town",
"mode": "nullable",
},
{
"name": "state",
"type": "string",
"description": "State",
"mode": "nullable",
},
{
"name": "zip_code",
"type": "string",
"description": "Zip Code",
"mode": "nullable",
},
{
"name": "employer",
"type": "string",
"description": "Employer",
"mode": "nullable",
},
{
"name": "occupation",
"type": "string",
"description": "Occupation",
"mode": "nullable",
},
{
"name": "transaction_dt",
"type": "date",
"description": "Transaction Date(MMDDYYYY)",
"mode": "nullable",
},
{
"name": "transaction_amt",
"type": "float",
"description": "Transaction Amount",
"mode": "nullable",
},
{
"name": "other_id",
"type": "string",
"description": "Other Identification Number",
"mode": "nullable",
},
{
"name": "tran_id",
"type": "string",
"description": "Transaction ID",
"mode": "nullable",
},
{
"name": "file_num",
"type": "integer",
"description": "File Number / Report ID",
"mode": "nullable",
},
{
"name": "memo_cd",
"type": "string",
"description": "Memo Code",
"mode": "nullable",
},
{
"name": "memo_text",
"type": "string",
"description": "Memo Text",
"mode": "nullable",
},
{
"name": "sub_id",
"type": "integer",
"description": "FEC Record Number",
"mode": "nullable",
},
],
)
other_committee_tx_2018_transform_csv >> load_other_committee_tx_2018_to_bq
| apache-2.0 |
pkruskal/scikit-learn | sklearn/utils/setup.py | 294 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
blackye/luscan-devel | thirdparty_libs/nltk/tokenize/regexp.py | 12 | 8204 | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Trevor Cohn <[email protected]>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
r"""
Regular-Expression Tokenizers
A ``RegexpTokenizer`` splits a string into substrings using a regular expression.
For example, the following tokenizer forms tokens out of alphabetic sequences,
money expressions, and any other non-whitespace sequences:
>>> from nltk.tokenize import RegexpTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
>>> tokenizer.tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
A ``RegexpTokenizer`` can use its regexp to match delimiters instead:
>>> tokenizer = RegexpTokenizer('\s+', gaps=True)
>>> tokenizer.tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
Note that empty tokens are not returned when the delimiter appears at
the start or end of the string.
The material between the tokens is discarded. For example,
the following tokenizer selects just the capitalized words:
>>> capword_tokenizer = RegexpTokenizer('[A-Z]\w+')
>>> capword_tokenizer.tokenize(s)
['Good', 'New', 'York', 'Please', 'Thanks']
This module contains several subclasses of ``RegexpTokenizer``
that use pre-defined regular expressions.
>>> from nltk.tokenize import BlanklineTokenizer
>>> # Uses '\s*\n\s*\n\s*':
>>> BlanklineTokenizer().tokenize(s)
['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.',
'Thanks.']
All of the regular expression tokenizers are also available as functions:
>>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize
>>> regexp_tokenize(s, pattern='\w+|\$[\d\.]+|\S+')
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
'.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> blankline_tokenize(s)
['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.']
Caution: The function ``regexp_tokenize()`` takes the text as its
first argument, and the regular expression pattern as its second
argument. This differs from the conventions used by Python's
``re`` functions, where the pattern is always the first argument.
(This is for consistency with the other NLTK tokenizers.)
"""
import re
import sre_constants
from nltk.internals import convert_regexp_to_nongrouping
from nltk.tokenize.api import TokenizerI
from nltk.tokenize.util import regexp_span_tokenize
class RegexpTokenizer(TokenizerI):
"""
A tokenizer that splits a string using a regular expression, which
matches either the tokens or the separators between tokens.
>>> tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
:type pattern: str
:param pattern: The pattern used to build this tokenizer.
(This pattern may safely contain grouping parentheses.)
:type gaps: bool
:param gaps: True if this tokenizer's pattern should be used
to find separators between tokens; False if this
tokenizer's pattern should be used to find the tokens
themselves.
:type discard_empty: bool
:param discard_empty: True if any empty tokens `''`
generated by the tokenizer should be discarded. Empty
tokens can only be generated if `_gaps == True`.
:type flags: int
:param flags: The regexp flags used to compile this
tokenizer's pattern. By default, the following flags are
used: `re.UNICODE | re.MULTILINE | re.DOTALL`.
"""
def __init__(self, pattern, gaps=False, discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL):
# If they gave us a regexp object, extract the pattern.
pattern = getattr(pattern, 'pattern', pattern)
self._pattern = pattern
self._gaps = gaps
self._discard_empty = discard_empty
self._flags = flags
self._regexp = None
# Remove grouping parentheses -- if the regexp contains any
# grouping parentheses, then the behavior of re.findall and
# re.split will change.
nongrouping_pattern = convert_regexp_to_nongrouping(pattern)
try:
self._regexp = re.compile(nongrouping_pattern, flags)
except re.error, e:
raise ValueError('Error in regular expression %r: %s' %
(pattern, e))
def tokenize(self, text):
# If our regexp matches gaps, use re.split:
if self._gaps:
if self._discard_empty:
return [tok for tok in self._regexp.split(text) if tok]
else:
return self._regexp.split(text)
# If our regexp matches tokens, use re.findall:
else:
return self._regexp.findall(text)
def span_tokenize(self, text):
if self._gaps:
for left, right in regexp_span_tokenize(text, self._regexp):
if not (self._discard_empty and left == right):
yield left, right
else:
for m in re.finditer(self._regexp, text):
yield m.span()
def __repr__(self):
return ('%s(pattern=%r, gaps=%r, discard_empty=%r, flags=%r)' %
(self.__class__.__name__, self._pattern, self._gaps,
self._discard_empty, self._flags))
class WhitespaceTokenizer(RegexpTokenizer):
r"""
Tokenize a string on whitespace (space, tab, newline).
In general, users should use the string ``split()`` method instead.
>>> from nltk.tokenize import WhitespaceTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> WhitespaceTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\s+', gaps=True)
class BlanklineTokenizer(RegexpTokenizer):
"""
Tokenize a string, treating any sequence of blank lines as a delimiter.
Blank lines are defined as lines containing no characters, except for
space or tab characters.
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\s*\n\s*\n\s*', gaps=True)
class WordPunctTokenizer(RegexpTokenizer):
"""
Tokenize a text into a sequence of alphabetic and
non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``.
>>> from nltk.tokenize import WordPunctTokenizer
>>> s = "Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks."
>>> WordPunctTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
'.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\w+|[^\w\s]+')
######################################################################
#{ Tokenization Functions
######################################################################
def regexp_tokenize(text, pattern, gaps=False, discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL):
"""
Return a tokenized copy of *text*. See :class:`.RegexpTokenizer`
for descriptions of the arguments.
"""
tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags)
return tokenizer.tokenize(text)
blankline_tokenize = BlanklineTokenizer().tokenize
wordpunct_tokenize = WordPunctTokenizer().tokenize
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| gpl-2.0 |
crigroup/criros | src/criros/raveutils.py | 1 | 17736 | #! /usr/bin/env python
import copy
import criros
import itertools
import numpy as np
import scipy.spatial
import sklearn.cluster
import openravepy as orpy
# Transformations
import tf.transformations as tr
# Logger
from criros.utils import TextColors
logger = TextColors()
# Supported IK types
iktype5D = orpy.IkParameterization.Type.TranslationDirection5D
iktype6D = orpy.IkParameterization.Type.Transform6D
SUPPORTED_IK_TYPES = [iktype5D, iktype6D]
class Hole(object):
def __init__(self, position, direction, depth):
self.position = np.array(position)
self.direction = tr.unit_vector(direction)
self.depth = abs(depth)
def __repr__(self):
printoptions = np.get_printoptions()
np.set_printoptions(precision=4, suppress=True)
text = '<Hole(pos: {0} dir: {1} depth: {2})>'.format(self.position, self.direction, self.depth)
np.set_printoptions(**printoptions)
return text
def __str__(self):
return self.__repr__()
def get_ray(self):
return orpy.Ray(self.position, self.direction)
def transform(self, T):
ray = self.get_ray()
Thole = criros.conversions.from_ray(ray)
Tnew = np.dot(T, Thole)
newray = criros.conversions.to_ray(Tnew)
self.position = newray.pos()
self.direction = newray.dir()
def compute_bounding_box_corners(body, Tbody=None, scale=1.0):
"""
Computes the bounding box corners (8 corners) for the given body.
If C{Tbody} is given (not None), the corners are transformed.
The {scale} parameters is a factor used to scale the extents of the
bounding box.
@type body: orpy.KinBody
@param body: The OpenRAVE body
@type Tbody: np.array
@param Tbody: homogeneous transformation to transform the corners. If None,
the corners are given using the current position of the body in OpenRAVE.
@type scale: factor
@param scale: the scale factor to modify the extents of the bounding box.
@rtype: list
@return: list containing the 8 box corners. Each corner is a XYZ point of type C{np.array}.
"""
# Create a dummy body an use OpenRAVE to get the corners
env = body.GetEnv()
dummy = orpy.RaveCreateKinBody(env, '')
dummy.Clone(body, 0)
if Tbody is not None:
with env:
dummy.SetTransform(Tbody)
aabb = dummy.ComputeAABB()
corners = []
for k in itertools.product([-1,1],[-1,1],[-1,1]):
corners.append(aabb.pos() + np.array(k)*aabb.extents()*scale)
return corners
def enable_body(body, enable):
"""
Enables all the links of a body.
@type body: orpy.KinBody
@param body: The OpenRAVE body
@type enable: bool
@param enable: If true, will enable all the links.
"""
env = body.GetEnv()
with env:
for link in body.GetLinks():
link.Enable(enable)
def environment_from_dict(config, env=None, logger=TextColors()):
"""
Loads and configures and OpenRAVE environment from a configuration dictionary.
This approach allows to encapsulate additional information that would be tedious
to include if we only used the OpenRAVE XML specification.
@type config: dict
@param config: The configuration dictionary
@rtype: orpy.Environment
@return: The OpenRAVE environment loaded
"""
if not isinstance(config, dict):
logger.logwarn('config is not a dict')
return None
# Check required fields are in the config dict
required_fields = ['world']
if not criros.utils.has_keys(config, required_fields):
logger.logwarn( 'config dict does not have the required fields: {0}'.format(required_fields) )
return None
if env is None:
env = orpy.Environment()
if not env.Load(config['world']):
env = None
return None
# OPTIONAL parameters
# Viewer parameters
if config.has_key('viewer'):
viewer_name = config['viewer']['name']
if viewer_name == 'default':
env.SetDefaultViewer()
else:
env.SetViewer(viewer_name)
# The camera where we look the viewer from
if config['viewer'].has_key('camera'):
transform_dict = config['viewer']['camera']
camera_fields = ['rotation','translation']
if not criros.utils.has_keys(transform_dict, camera_fields):
logger.logwarn('camera dict does not have the required fields: {0}'.format(camera_fields))
elif env.GetViewer() is not None:
Tcam = criros.conversions.from_dict(transform_dict)
env.GetViewer().SetCamera(Tcam)
# Return configured environment
return env
def destroy_env(env):
"""
Dummy function that destroys properly an OpenRAVE environment.
@note: Useful when working with C{IPython} + QtCoin viewer.
@type env: orpy.Environment
@param env: The OpenRAVE environment
"""
env.Reset()
env.Destroy()
def find_body_holes(body, radius, absolute=True):
import trimesh
mesh_holes = dict()
body_holes = dict()
Tbody = body.GetTransform()
for link in body.GetLinks():
link_holes = []
Tlink = link.GetTransform()
for geometry in link.GetGeometries():
if geometry.GetType() == orpy.GeometryType.Trimesh:
filename = geometry.GetRenderFilename()
scale = geometry.GetRenderScale()
pose = geometry.GetTransformPose()
if filename not in mesh_holes:
mesh = trimesh.load(filename)
mesh_holes[filename] = find_mesh_holes(mesh.vertices, mesh.faces, radius, scale)
for h in mesh_holes[filename]:
hole = copy.deepcopy(h)
Tmesh = np.dot(Tlink, orpy.matrixFromPose(pose))
if absolute:
hole.transform(np.dot(Tbody,Tmesh))
else:
hole.transform(Tmesh)
link_holes.append(hole)
if len(link_holes) > 0:
body_holes[str(link.GetName())] = link_holes
return body_holes
def find_mesh_holes(vert, faces, radius, scale=1., fitplane_eps=1e-8,
fitplane_attempts=10):
vertices = np.array(vert)*scale
# Circles have lots of vertices. Use clustering to locate them
eps = radius + 1e-3
db = sklearn.cluster.DBSCAN(eps=eps, min_samples=10).fit(vertices)
unique_labels = set(db.labels_)
circles_info = []
for k in unique_labels:
if k == -1: # Unknown cluster
continue
points = vertices[db.labels_==k]
for _ in range(fitplane_attempts):
seed = np.zeros(4)
seed[:3] = tr.unit_vector(tr.random_vector(3))
res = criros.spalg.fit_plane_optimize(points, seed=seed)
equation=res[0]
fit_error = res[2]
if fit_error < fitplane_eps:
break
data = dict()
data['center'] = np.mean(points, axis=0)
data['plane'] = criros.spalg.Plane(equation=res[0])
circles_info.append(data)
if fit_error > fitplane_eps:
# Report circles that weren't fitted properly
print 'Circle planefit error above threshold: {0}'.format(fit_error)
# One hole is composed by two circles, pair them
holes = []
num_circles = len(circles_info)
found = set()
for i in range(num_circles):
if i in found:
continue # Skip already paired circles
for j in range(1, num_circles):
if (j in found) or (i == j):
continue # Skip already paired circles
plane_i = circles_info[i]['plane']
plane_j = circles_info[j]['plane']
ni = plane_i.normal
nj = plane_j.normal
parallel = np.isclose(abs(np.dot(ni,nj)), 1.)
center_i = circles_info[i]['center']
center_j = circles_info[j]['center']
pi = center_i
pj = plane_i.project(center_j)
if parallel and np.allclose(pi, pj):
found.add(i)
found.add(j)
position = center_j
diff = center_i - center_j
direction = tr.unit_vector(diff)
depth = np.linalg.norm(diff)
holes.append(Hole(position, direction, depth))
return holes
def generate_convex_decomposition_model(robot, padding):
cdmodel = orpy.databases.convexdecomposition.ConvexDecompositionModel(robot, padding=padding)
cdmodel.generate(padding=padding, minTriangleConvexHullThresh=12000, skinWidth=0, decompositionDepth=8, maxHullVertices=256, concavityThresholdPercent=10, mergeThresholdPercent=30, volumeSplitThresholdPercent=15)
cdmodel.save()
if cdmodel.load():
return cdmodel
else:
return None
def get_arm_length_estimate(robot):
"""
The best estimate of arm length is to sum up the distances of the anchors of all the points in between the chain
"""
manip = robot.GetActiveManipulator()
armjoints = [j for j in robot.GetDependencyOrderedJoints() if j.GetJointIndex() in manip.GetArmIndices()]
baseanchor = armjoints[0].GetAnchor()
eetrans = manip.GetEndEffectorTransform()[0:3,3]
armlength = 0
for j in armjoints[::-1]:
armlength += np.sqrt(np.sum((eetrans-j.GetAnchor())**2))
eetrans = j.GetAnchor()
return armlength
def get_enabled_bodies(env):
"""
Returns a C{set} with the names of the bodies enabled in the given environment
@type env: orpy.Environment
@param env: The OpenRAVE environment
@rtype: set
@return: The names of the enabled bodies
"""
enabled_bodies = []
with env:
for body in env.GetBodies():
if body.IsEnabled():
enabled_bodies.append(body.GetName())
return set(enabled_bodies)
def get_robot_iktypes(robot):
"""
Returns a dict with the manipulator:[iktypes] pairs of available iksolvers .
@type refbody: orpy.Robot
@param refbody: The OpenRAVE robot
@rtype: orpy.Environment
@return: The dict with the manipname:[iktypes] pairs.
"""
robot_iktypes = dict()
for manip in robot.GetManipulators():
iktypes = []
for iktype in SUPPORTED_IK_TYPES:
ikmodel = orpy.databases.inversekinematics.InverseKinematicsModel(iktype=iktype, manip=manip)
if ikmodel.load():
iktypes.append(iktype)
if iktypes:
robot_iktypes[manip.GetName()] = list(iktypes)
return robot_iktypes
def move_origin_to_body(refbody):
"""
Moves everything in the OpenRAVE scene so that the C{refbody} ends-up at the origin.
@type refbody: orpy.KinBody
@param refbody: The body that will be at the origin
"""
env = refbody.GetEnv()
Toffset = criros.spalg.transform_inv( refbody.GetTransform() )
grabbed_names = [body.GetName() for robot in env.GetRobots() for body in robot.GetGrabbed()]
with env:
for body in env.GetBodies():
# Dont move Grabbed bodies. They will move once we move the robot grabbing them.
if body.GetName() in grabbed_names:
continue
Tbody = body.GetTransform()
body.SetTransform( np.dot(Toffset, Tbody) )
def move_out_of_collision(env, body, max_displacement=0.005):
"""
Moves an OpenRAVE body out of collision in the opposite direction to the penetration direction.
@type env: orpy.Environment
@param env: The OpenRAVE environment.
@type body: orpy.KinBody
@param body: The OpenRAVE body.
@type max_displacement: float
@param max_displacement: The maximum displacement we can apply to the body.
"""
if not env.CheckCollision(body):
# Not in collision
return True
# Need to use pqp collision checker
previous_cc = env.GetCollisionChecker()
checker = orpy.RaveCreateCollisionChecker(env, 'pqp')
checker.SetCollisionOptions(orpy.CollisionOptions.Distance|orpy.CollisionOptions.Contacts)
env.SetCollisionChecker(checker)
# Collision report
report = orpy.CollisionReport()
env.CheckCollision(body, report)
# Restore previous collision checker
env.SetCollisionChecker(previous_cc)
# Get the direction we should push the object
positions = []
normals = []
occurrences = []
for c in report.contacts:
positions.append(c.pos)
if len(normals) == 0:
normals.append(c.norm)
occurrences.append(1)
continue
found = False
for i,normal in enumerate(normals):
if np.allclose(c.norm, normal):
occurrences[i] += 1
found = True
break
if not found:
normals.append(c.norm)
occurrences.append(1)
push_direction = tr.unit_vector(normals[np.argmax(occurrences)])
# Get the distance we should push the object
Tbody = body.GetTransform()
Tnew = np.array(Tbody)
push_distance = 0
while env.CheckCollision(body):
push_distance += 0.001
Tnew[:3,3] = Tbody[:3,3] + push_distance*push_direction
body.SetTransform(Tnew)
if push_distance > max_displacement:
print 'push_distance: {0}'.format(push_distance)
body.SetTransform(Tbody)
return False
return True
def random_joint_positions(robot):
"""
Generates random joint positions within joint limits for the given robot.
@type robot: orpy.Robot
@param robot: The OpenRAVE robot
@rtype: np.array
@return:
"""
# Get the limits of the active DOFs
lower, upper = robot.GetActiveDOFLimits()
positions = lower + np.random.rand(len(lower))*(upper-lower)
return positions
def remove_bodies(env, remove=None, keep=None):
"""
Removes the specified bodies from the OpenRAVE environment.
You can specify the bodies to be removed or kept.
@type env: orpy.Environment
@param env: The OpenRAVE environment
@type remove: list
@param remove: list of objects to remove
@type keep: list
@param keep: list of objects to keep
"""
# Check that one of the lists is None
if (remove is None) and (type(keep) is list):
case = 1
elif (keep is None) and (type(remove) is list):
case = 2
else:
return
for body in env.GetBodies():
remove_body = False
name = body.GetName()
if case == 1:
remove_body = name not in keep
if case == 2:
remove_body = name in remove
if remove_body:
with env:
env.Remove(body)
def remove_body_padding(body):
"""
Restores the collision meshes of the body. The original collision
meshes are store as C{UserData} by the C{set_body_padding} function.
@type body: orpy.KinBody
@param body: The OpenRAVE body
@rtype: bool
@return: True if succeeded, False otherwise
"""
if not body.IsRobot():
raise Exception('Not implemented yet for bodies')
robot = body
original_collision_meshes = robot.GetUserData()
if original_collision_meshes is None:
logger.logerr('Robot user data is empty: {0}'.format(robot.GetName()))
return False
for name,meshes in original_collision_meshes.items():
link = robot.GetLink(name)
for geom,mesh in itertools.izip(link.GetGeometries(), meshes):
if mesh is not None:
geom.SetCollisionMesh(mesh)
return True
def set_body_padding(body, padding, generate=False, links=[]):
"""
Sets the padding for the specified links. If C{links} is empty,
the padding will be done for ALL the links.
@type body: orpy.KinBody
@param body: The OpenRAVE body
@type padding: float
@param padding: The padding value.
@type generate: bool
@param generate: If set, the ConvexDecompositionModel will be
generated if it doesn't exist already.
@type links: list
@param links: The list of links to be padded. If it is empty,
the padding will be done for ALL the links.
@rtype: bool
@return: True if succeeded, False otherwise
"""
if not body.IsRobot():
raise Exception('Not implemented yet for bodies')
robot = body
cdmodel = orpy.databases.convexdecomposition.ConvexDecompositionModel(robot, padding=padding)
if not cdmodel.load():
if generate:
cmodel = generate_convex_decomposition_model(robot, padding)
if cdmodel is None:
logger.logerr('Failed to generate ConvexDecompositionModel: {0}'.format(robot.GetName()))
return False
else:
logger.logerr('ConvexDecompositionModel database for robot {0} with padding {1:.3f} not found'.format(robot.GetName(), padding))
return False
if len(links) == 0:
# Do it for all the links
links = [l.GetName() for l in robot.GetLinks()]
original_collision_meshes = robot.GetUserData()
if original_collision_meshes is None:
original_collision_meshes = dict()
env = robot.GetEnv()
with env:
for link, linkcd in itertools.izip(robot.GetLinks(), cdmodel.linkgeometry):
if link.GetName() not in links:
continue
make_a_copy = link.GetName() not in original_collision_meshes
if make_a_copy:
original_collision_meshes[link.GetName()] = [None] * len(link.GetGeometries())
for ig,hulls in linkcd:
geom = link.GetGeometries()[ig]
if geom.IsModifiable():
if make_a_copy:
# Keep a copy of the original collision meshes
original_collision_meshes[link.GetName()][ig] = geom.GetCollisionMesh()
# Set the padded mesh
geom.SetCollisionMesh(cdmodel.GenerateTrimeshFromHulls(hulls))
robot.SetUserData(original_collision_meshes)
return True
def set_body_transparency(body, transparency=0.0, links=None):
"""
Sets the transparency value of a body recursively.
@type body: orpy.KinBody
@param body: The OpenRAVE body
@type transparency: float
@param transparency: The transparency value. If it's out of range [0.0, 1.0], it'll be clipped.
@type links: list
@param links: Links to be changed. By default all the links are changed
"""
transparency = np.clip(transparency, 0.0, 1.0)
env = body.GetEnv()
with env:
for link in body.GetLinks():
if type(links) == list:
if link.GetName() not in links:
continue
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
def trimesh_from_point_cloud(cloud):
"""
Converts a PCL point cloud into a OpenRAVE trimesh
@type cloud: pcl.Cloud
@param cloud: The PCL cloud
@rtype: orpy.Trimesh
@return: The OpenRAVE trimesh
"""
points = np.asarray(cloud)
hull = scipy.spatial.ConvexHull(points)
hull = scipy.spatial.ConvexHull(points[hull.vertices])
criros.spalg.counterclockwise_hull(hull)
return orpy.TriMesh(hull.points, hull.simplices)
| bsd-3-clause |
davidam/python-examples | scikit/plot_nested_cross_validation_iris.py | 19 | 4413 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {:6f} with std. dev. of {:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| gpl-3.0 |
sigopt/sigopt_sklearn | sigopt_sklearn/search.py | 1 | 20866 | from __future__ import absolute_import, print_function
import math
import os
from multiprocessing import TimeoutError
import sys
import time
import warnings
import collections
import sigopt
import numpy
from joblib import Parallel, delayed
from joblib.func_inspect import getfullargspec
try:
# For scikit-learn >= 0.18
from sklearn.model_selection import check_cv as base_check_cv
def our_check_cv(cv, X, y, classifier):
ret = base_check_cv(cv, y, classifier)
return ret.n_splits, list(ret.split(X, y=y))
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._validation import _fit_and_score
except ImportError:
# For scikit-learn < 0.18
from sklearn.grid_search import BaseSearchCV
from sklearn.cross_validation import check_cv as base_check_cv, _fit_and_score
def our_check_cv(cv, X, y, classifier):
ret = base_check_cv(cv, X, y, classifier)
return len(ret), list(iter(ret))
from sklearn.metrics.scorer import check_scoring
from sklearn.utils.validation import _num_samples, indexable
from sklearn.base import is_classifier, clone
HANDLES_UNICODE = sys.version_info[0] >= 3
class SigOptSearchCV(BaseSearchCV):
"""SigOpt powered search on hyper parameters.
SigOptSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is chosen from the specified
domains. The number of parameter settings that are tried is
given by n_iter.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_domains : dict
Dictionary with parameters names (string) as keys and domains as lists
of parameter ranges to try. Domains are either lists of categorical
(string) values or 2 element lists specifying a min and max for integer
or float parameters
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades off runtime
vs quality of the solution.
n_sug : int, default=1
Number of suggestions to retrieve from SigOpt for evaluation in parallel
client_token : string, optional
SigOpt API client token, find yours here:
https://sigopt.com/tokens. This field is required except when the
``sigopt_connection`` argument is present or when the
``SIGOPT_API_TOKEN`` environment variable is set. We recommend using
this instead of ``sigopt_connection``.
sigopt_connection : sigopt.interface.Connection, optional
SigOpt API Connection object. If present, this object will be used to
connect to SigOpt in lieu of the client token. We recommend using the
``client_token`` option instead of this one.
opt_timeout : float, optional
Max time for entire optimization process
cv_timeout : float, optional
Max time each CV fold objective evaluation can take
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or a scorer callable
object / function with signature ``scorer(estimator, X, y)``. If
``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an explosion of
memory consumption when more jobs get dispatched than CPUs can process.
This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across the
folds, and the loss minimized is the total loss per sample, and not the
mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset. If "False", it is
impossible to make predictions using this RandomizedSearchCV instance
after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting. If
set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
"""
def __init__(self, estimator, param_domains, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, n_sug=1, pre_dispatch='2*n_jobs',
error_score='raise', cv_timeout=None, opt_timeout=None,
client_token=None, sigopt_connection=None, experiment=None):
self.param_domains = param_domains
self.n_iter = n_iter
self.n_sug = n_sug
self.cv_timeout = cv_timeout
self.opt_timeout = opt_timeout
self.verbose = verbose
# Stores the mappings between categorical strings to Python values. The keys correspond to parameter names and
# values correspond to the string-to-value mappings themselves.
self.categorical_mappings_ = {}
self.scorer_ = None
self.our_best_params_ = None
self.our_best_score_ = None
self.our_best_estimator_ = None
self.experiment = experiment
# Set up sigopt_connection
found_token = client_token or os.environ.get('SIGOPT_API_TOKEN')
if (not found_token) and (not sigopt_connection):
raise ValueError(
'Please set the `SIGOPT_API_TOKEN` environment variable, pass the ``client_token`` parameter, or pass '
'the ``sigopt_connection`` parameter. You can find your client token here: '
'https://sigopt.com/tokens.')
else:
self.sigopt_connection = (sigopt_connection if sigopt_connection
else sigopt.Connection(client_token=found_token))
super(SigOptSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def _transform_param_domains(self, param_domains):
def _transform_param(param_name, param_bounds):
"""Transform a parameter name and its bounds into a form that can be sent to the API layer."""
def _check_bounds():
"""Check that min/max bounds are well formed."""
if len(param_bounds) != 2:
raise Exception('Parameter bounds must be specified with two numbers! Not sure what to do with {}.'
.format(param_bounds))
if not isinstance(param_bounds, tuple):
warnings.warn('Parameter bounds should be specified as a tuple in the form (min, max).')
# Check that param bounds is either iterable (range/categoricals) or a dict (categoricals)
if not isinstance(param_bounds, (collections.Iterable, dict)):
raise Exception('Parameter bounds must be iterable or dicts! The range {} isn\'t friendly!'
.format(param_bounds))
param_dict = {'name': param_name}
if isinstance(param_bounds, dict):
# This is a categorical with mappings between strings and values
param_dict['type'] = 'categorical'
param_dict['categorical_values'] = [{'name': k} for k in param_bounds.keys()]
# Add this mapping to our set of categorical string mappings
self.categorical_mappings_[param_name] = param_bounds
elif all(isinstance(x, str) for x in param_bounds):
# This is a categorical with a list of strings naming each category
param_dict['type'] = 'categorical'
param_dict['categorical_values'] = [{'name': k} for k in param_bounds]
elif all(isinstance(x, int) for x in param_bounds):
# This is an integer parameter
_check_bounds()
param_dict['type'] = 'int'
param_dict['bounds'] = {'min': param_bounds[0], 'max': param_bounds[1]}
elif any(isinstance(x, float) for x in param_bounds):
# This is a continuous parameter. Note that we use `any` since the user may pass some combination of
# float and integer parameters, e.g. (0, 0.1).
_check_bounds()
param_dict['type'] = 'double'
param_dict['bounds'] = {'min': param_bounds[0], 'max': param_bounds[1]}
else:
# Not sure what the user gave us here
raise Exception('Bad parameter range {}.'.format(param_bounds))
return param_dict
# generate sigopt experiment parameters
return [_transform_param(name, bounds) for (name, bounds) in param_domains.items()]
def _create_sigopt_exp(self, conn):
est_name = self.estimator.__class__.__name__
exp_name = est_name + ' (sklearn)'
if len(exp_name) > 50:
exp_name = est_name
if self.verbose > 0:
print('Creating SigOpt experiment: ', exp_name)
# create sigopt experiment
experiment = conn.experiments().create(
name=exp_name,
parameters=self._transform_param_domains(self.param_domains),
observation_budget=self.n_iter,
)
if self.verbose > 0:
exp_url = 'https://sigopt.com/experiment/{0}'.format(self.experiment.id)
print('Experiment progress available at :', exp_url)
return experiment
# NOTE(patrick): SVM can't handle unicode, so we need to convert those to string.
def _convert_unicode(self, data):
if HANDLES_UNICODE:
return data
# pylint: disable=undefined-variable
if isinstance(data, basestring):
return str(data)
# pylint: enable=undefined-variable
if isinstance(data, collections.Mapping):
return dict(map(self._convert_unicode, data.items()))
if isinstance(data, collections.Iterable):
return type(data)(map(self._convert_unicode, data))
return data
def _convert_log_params(self, param_dict):
# searches through names for params and converts params with __log__ names
log_converted_dict = {}
for pname in param_dict:
pval = param_dict[pname]
if '__log__' in pname:
pval = math.exp(pval)
pname = pname.replace('__log__', '')
log_converted_dict[pname] = pval
return log_converted_dict
def _convert_nonstring_categoricals(self, param_dict):
"""Apply the self.categorical_mappings_ mappings where necessary."""
return {name: (self.categorical_mappings_[name][val] if name in self.categorical_mappings_ else val)
for (name, val) in param_dict.items()}
def _convert_sigopt_api_to_sklearn_assignments(self, param_dict):
return self._convert_nonstring_categoricals(self._convert_log_params(self._convert_unicode(param_dict)))
# pylint: disable=unused-argument
def _run_search(self, evaluate_candidates):
# NOTE(patrick): scikit-learn 0.20.0 checks for the existence of this method, since
# the default implementation of `_fit` calls it. However, to maintain compatibility
# with older versions, we completely override _fit, so this method is unused. But
# we make sure it exists, so that the class can be instantiated
raise NotImplementedError('_run_search not used in this implementation')
# pylint: enable=unused-argument
def _fit(self, X, y, groups=None, parameter_iterable=None, **fit_params):
if groups is not None:
raise NotImplementedError('The groups argument is not supported.')
if parameter_iterable is not None:
raise NotImplementedError('The parameter_iterable argument is not supported.')
if self.fit_params is not None:
fit_params = self.fit_params
# Actual fitting, performing the search over parameters.
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
n_folds, cv_iter = our_check_cv(cv, X, y, classifier=is_classifier(estimator))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
# setup SigOpt experiment and run optimization
if not self.experiment:
self.experiment = self._create_sigopt_exp(self.sigopt_connection)
# start tracking time to optimize estimator
opt_start_time = time.time()
for jk in range(0, self.n_iter, self.n_sug):
# check for opt timeout, ensuring at least 1 observation
# TODO : handling failure observations
if (
self.opt_timeout is not None and
time.time() - opt_start_time > self.opt_timeout and
jk >= 1
):
# break out of loop and refit model with best params so far
break
suggestions = []
parameter_configs = []
for _ in range(self.n_sug):
suggestion = self.sigopt_connection.experiments(self.experiment.id).suggestions().create()
parameters = self._convert_sigopt_api_to_sklearn_assignments(suggestion.assignments.to_json())
suggestions.append(suggestion)
parameter_configs.append(parameters)
if self.verbose > 0:
print('Evaluating params : ', parameter_configs)
# do CV folds in parallel using joblib
# returns scores on test set
obs_timed_out = False
try:
par_kwargs = {'n_jobs': self.n_jobs, 'verbose': self.verbose,
'pre_dispatch': pre_dispatch}
# add timeout kwarg if version of joblib supports it
if 'timeout' in getfullargspec(Parallel.__init__).args:
par_kwargs['timeout'] = self.cv_timeout
out = Parallel(
**par_kwargs
)(
delayed(_fit_and_score)(clone(base_estimator), X, y,
self.scorer_, train, test,
self.verbose, parameters,
fit_params,
return_parameters=True,
error_score=self.error_score)
for parameters in parameter_configs
for train, test in cv_iter)
except TimeoutError:
obs_timed_out = True
if not obs_timed_out:
# grab scores from results
for sidx, suggestion in enumerate(suggestions):
out_idx = sidx * n_folds
scores = [o[0] for o in out[out_idx:out_idx+n_folds]]
self.sigopt_connection.experiments(self.experiment.id).observations().create(
suggestion=suggestion.id,
value=numpy.mean(scores),
value_stddev=numpy.std(scores)
)
else:
# obsevation timed out so report a failure
self.sigopt_connection.experiments(self.experiment.id).observations().create(
suggestion=suggestion.id,
failed=True)
# return best SigOpt assignments so far
best_assignments = self.sigopt_connection.experiments(self.experiment.id).best_assignments().fetch().data
if not best_assignments:
raise RuntimeError(
'No valid observations found. '
'Make sure opt_timeout and cv_timeout provide sufficient time for observations to be reported.')
self.our_best_params_ = self._convert_sigopt_api_to_sklearn_assignments(
best_assignments[0].assignments.to_json())
self.our_best_score_ = best_assignments[0].value
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(**self.best_params_)
if y is not None:
best_estimator.fit(X, y, **fit_params)
else:
best_estimator.fit(X, **fit_params)
self.our_best_estimator_ = best_estimator
return self
@property
def best_params_(self):
return self.our_best_params_
@property
def best_score_(self):
return self.our_best_score_
@property
def best_estimator_(self):
return self.our_best_estimator_
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit on the estimator with parameters chosen sequentially by SigOpt.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y=y, groups=groups, **fit_params)
| mit |
dimkal/mne-python | tutorials/plot_introduction.py | 9 | 13464 | """
.. _intro_tutorial:
Basic MEG and EEG data processing
=================================
MNE-Python reimplements most of MNE-C's (the original MNE command line utils)
functionality and offers transparent scripting.
On top of that it extends MNE-C's functionality considerably (customize events,
compute
contrasts, group statistics, time-frequency analysis, EEG-sensor space analyses
, etc.) It uses the same files as standard MNE unix commands:
no need to convert your files to a new system or database.
What you can do with MNE Python
-------------------------------
- **Raw data visualization** to visualize recordings, can also use
*mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
- **Epoching**: Define epochs, baseline correction, handle conditions etc.
- **Averaging** to get Evoked data
- **Compute SSP pojectors** to remove ECG and EOG artifacts
- **Compute ICA** to remove artifacts or select latent sources.
- **Boundary Element Modeling**: single and three-layer BEM model
creation and solution computation.
- **Forward modeling**: BEM computation and mesh creation
(see :ref:`ch_forward`)
- **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
- **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map,
Time-Frequency MxNE)
- **Connectivity estimation** in sensor and source space
- **Visualization of sensor and source space data**
- **Time-frequency** analysis with Morlet wavelets (induced power,
intertrial coherence, phase lock value) also in the source space
- **Spectrum estimation** using multi-taper method
- **Mixed Source Models** combining cortical and subcortical structures
- **Dipole Fitting**
- **Decoding** multivariate pattern analyis of M/EEG topographies
- **Compute contrasts** between conditions, between sensors, across
subjects etc.
- **Non-parametric statistics** in time, space and frequency
(including cluster-level)
- **Scripting** (batch and parallel computing)
What you're not supposed to do with MNE Python
----------------------------------------------
- **Brain and head surface segmentation** for use with BEM models -- use Freesurfer.
.. note:: Package based on the FIF file format from Neuromag. It can read and
convert CTF, BTI/4D, KIT and various EEG formats to FIF.
Installation of the required materials
---------------------------------------
See :ref:`getting_started` with Python.
.. note:: The expected location for the MNE-sample data is
my-path-to/mne-python/examples. If you downloaded data and an example asks
you whether to download it again, make sure
the data reside in the examples directory and you run the script from its
current directory.
From IPython e.g. say::
cd examples/preprocessing
%run plot_find_ecg_artifacts.py
From raw data to evoked data
----------------------------
.. _ipython: http://ipython.scipy.org/
Now, launch `ipython`_ (Advanced Python shell) using the QT backend which best
supported across systems::
$ ipython --pylab -qt
First, load the mne package:
"""
import mne
##############################################################################
# If you'd like to turn information status messages off:
mne.set_log_level('WARNING')
##############################################################################
# But it's generally a good idea to leave them on:
mne.set_log_level('INFO')
##############################################################################
# You can set the default level by setting the environment variable
# "MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
mne.set_config('MNE_LOGGING_LEVEL','WARNING')
##############################################################################
# Note that the location of the mne-python preferences file (for easier manual
# editing) can be found using:
mne.get_config_path()
##############################################################################
# By default logging messages print to the console, but look at
# mne.set_log_file() to save output to a file.
#
# Access raw data
# ^^^^^^^^^^^^^^^
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
print(raw_fname)
##############################################################################
# .. note:: The MNE sample dataset should be downloaded automatically but be
# patient (approx. 2GB)
#
# Read data from file:
raw = mne.io.Raw(raw_fname)
print(raw)
print(raw.info)
##############################################################################
# Look at the channels in raw:
print(raw.ch_names)
##############################################################################
# Read and plot a segment of raw data
start, stop = raw.time_as_index([100, 115]) # 100 s to 115 s data segment
data, times = raw[:, start:stop]
print(data.shape)
print(times.shape)
data, times = raw[2:20:3, start:stop] # access underlying data
raw.plot()
##############################################################################
# Save a segment of 150s of raw data (MEG only):
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
exclude='bads')
raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
overwrite=True)
##############################################################################
# Define and read epochs
# ^^^^^^^^^^^^^^^^^^^^^^
#
# First extract events:
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5])
##############################################################################
# Note that, by default, we use stim_channel='STI 014'. If you have a different
# system (e.g., a newer system that uses channel 'STI101' by default), you can
# use the following to set the default stim channel to use for finding events:
mne.set_config('MNE_STIM_CHANNEL', 'STI101')
##############################################################################
# Events are stored as 2D numpy array where the first column is the time
# instant and the last one is the event number. It is therefore easy to
# manipulate.
#
# Define epochs parameters:
event_id = dict(aud_l=1, aud_r=2) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
##############################################################################
# Exclude some channels (original bads + 2 more):
raw.info['bads'] += ['MEG 2443', 'EEG 053']
##############################################################################
# The variable raw.info['bads'] is just a python list.
#
# Pick the good channels, excluding raw.info['bads']:
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False,
exclude='bads')
##############################################################################
# Alternatively one can restrict to magnetometers or gradiometers with:
mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
##############################################################################
# Define the baseline period:
baseline = (None, 0) # means from the first instant to t = 0
##############################################################################
# Define peak-to-peak rejection parameters for gradiometers, magnetometers and EOG:
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
##############################################################################
# Read epochs:
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=False, reject=reject)
print(epochs)
##############################################################################
# Get single epochs for one condition:
epochs_data = epochs['aud_l'].get_data()
print(epochs_data.shape)
##############################################################################
# epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time
# instants).
#
# Scipy supports read and write of matlab files. You can save your single
# trials with:
from scipy import io
io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
##############################################################################
# or if you want to keep all the information about the data you can save your
# epochs in a fif file:
epochs.save('sample-epo.fif')
##############################################################################
# and read them later with:
saved_epochs = mne.read_epochs('sample-epo.fif')
##############################################################################
# Compute evoked responses for auditory responses by averaging and plot it:
evoked = epochs['aud_l'].average()
print(evoked)
evoked.plot()
##############################################################################
# .. topic:: Exercise
#
# 1. Extract the max value of each epoch
max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
##############################################################################
# It is also possible to read evoked data stored in a fif file:
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked1 = mne.read_evokeds(
evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Or another one stored in the same file:
evoked2 = mne.read_evokeds(
evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Compute a contrast:
contrast = evoked1 - evoked2
print(contrast)
##############################################################################
# Time-Frequency: Induced power and inter trial coherence
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Define parameters:
import numpy as np
n_cycles = 2 # number of cycles in Morlet wavelet
freqs = np.arange(7, 30, 3) # frequencies of interest
##############################################################################
# Compute induced power and phase-locking values and plot gradiometers:
from mne.time_frequency import tfr_morlet
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
return_itc=True, decim=3, n_jobs=1)
# power.plot()
##############################################################################
# Inverse modeling: MNE and dSPM on evoked and raw data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Import the required functions:
from mne.minimum_norm import apply_inverse, read_inverse_operator
##############################################################################
# Read the inverse operator:
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inverse_operator = read_inverse_operator(fname_inv)
##############################################################################
# Define the inverse parameters:
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"
##############################################################################
# Compute the inverse solution:
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
##############################################################################
# Save the source time courses to disk:
stc.save('mne_dSPM_inverse')
##############################################################################
# Now, let's compute dSPM on a raw file within a label:
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
label = mne.read_label(fname_label)
##############################################################################
# Compute inverse solution during the first 15s:
from mne.minimum_norm import apply_inverse_raw
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop)
##############################################################################
# Save result in stc files:
stc.save('mne_dSPM_raw_inverse_Aud')
##############################################################################
# What else can you do?
# ^^^^^^^^^^^^^^^^^^^^^
#
# - detect heart beat QRS component
# - detect eye blinks and EOG artifacts
# - compute SSP projections to remove ECG or EOG artifacts
# - compute Independent Component Analysis (ICA) to remove artifacts or
# select latent sources
# - estimate noise covariance matrix from Raw and Epochs
# - visualize cross-trial response dynamics using epochs images
# - compute forward solutions
# - estimate power in the source space
# - estimate connectivity in sensor and source space
# - morph stc from one brain to another for group studies
# - compute mass univariate statistics base on custom contrasts
# - visualize source estimates
# - export raw, epochs, and evoked data to other python data analysis
# libraries e.g. pandas
# - and many more things ...
#
# Want to know more ?
# ^^^^^^^^^^^^^^^^^^^
#
# Browse :ref:`examples-index` gallery.
print("Done!")
| bsd-3-clause |
pkruskal/scikit-learn | examples/exercises/plot_iris_exercise.py | 320 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
dimkal/mne-python | mne/io/fiff/tests/test_raw.py | 3 | 43394 | from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_concat():
"""Test RawFIF concatenation"""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2., copy=False)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(read_raw_fif, test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, False)
raw.preload_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, False)
raw_2.preload_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
raw_read.anonymize()
assert_true(raw_read.info.get('subject_info') is None)
out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
raw_read.save(out_fname_anon, overwrite=True)
raw_read = Raw(out_fname_anon)
assert_true(raw_read.info.get('subject_info') is None)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10, False)
raw.preload_data()
raw.preload_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = Raw(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = Raw(fif_fname, preload=True)
raw_combo.append(Raw(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=True)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files
"""
tempdir = _TempDir()
raw_1 = Raw(fif_fname, preload=True)
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = Raw(split_fname)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = Raw(fnames)
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
def test_load_bad_channels():
"""Test reading/writing of bad channels
"""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = Raw(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = Raw(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)
"""
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with Raw(fif_fname) as r:
assert_true('Raw' in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with Raw(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = Raw(fif_fname).crop(0, 3.5, False)
raw.preload_data()
# put in some data that we know the values of
data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = Raw(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = Raw(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5])
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
Raw(raw_badname)
assert_true(len(w) > 0) # len(w) should be 2 but Travis sometimes has more
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types
"""
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = Raw(fif_fname, preload=True)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = Raw(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw
"""
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations
"""
tempdir = _TempDir()
for proj in [True, False]:
raw = Raw(fif_fname, preload=False, proj=proj)
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
raw.add_proj(projs, remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = Raw(fif_fname, preload=preload, proj=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = Raw(fif_fname, preload=preload, proj=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = Raw(fif_fname, preload=preload, proj=True)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, proj=True, preload=False)
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data
"""
tempdir = _TempDir()
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = np.random.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = Raw(tmp_fname)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface
"""
raw = Raw(fif_fname).crop(0, 7, False)
raw.preload_data()
sig_dec = 11
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_lp = raw.copy()
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw.copy()
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw.copy()
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw.copy()
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
raw_lp_iir = raw.copy()
raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
raw_hp_iir = raw.copy()
raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
raw_bp_iir = raw.copy()
raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
sig_dec)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# do a very simple check on line filtering
raw_bs = raw.copy()
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy()
raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy()
raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files
"""
# split a concatenated file to test a difficult case
raw = Raw([fif_fname, fif_fname], preload=False)
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.crop(tmin, tmax, True)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy()
raws[ri].crop(tmin, tmax, False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.crop(0, None, True)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 3, False)
raw.preload_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2)
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1)
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10.)
raw3.resample(10.)
raw4.resample(10.)
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8.)._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8.)._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4.)._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(8.)
assert_true(len(w) == 1)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(4.)
assert_true(len(w) == 1)
# test resampling events: this should no longer give a warning
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
events = find_events(raw)
raw, events = raw.resample(4., events=events)
assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.resample(4., copy=True)
assert_true(raw_resampled is not raw)
raw_resampled = raw.resample(4., copy=False)
assert_true(raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.resample(10)
assert_true(len(raw) == 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert
"""
raw = Raw(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20)
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks)
raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
# Test custom n_fft
raw_filt.apply_hilbert(picks)
raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
assert_raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy
"""
raw = Raw(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = Raw(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter"""
raw = Raw(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
@testing.requires_testing_data
def test_raw_index_as_time():
""" Test index as time conversion"""
raw = Raw(fif_fname, preload=True)
t0 = raw.index_as_time([0], True)[0]
t1 = raw.index_as_time([100], False)[0]
t2 = raw.index_as_time([100], True)[0]
assert_equal(t2 - t1, t0)
# ensure we can go back and forth
t3 = raw.index_as_time(raw.time_as_index([0], True), True)
assert_array_almost_equal(t3, [0.0], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
i0 = raw.time_as_index(raw.index_as_time([0], True), True)
assert_equal(i0[0], 0)
i1 = raw.time_as_index(raw.index_as_time([100], True), True)
assert_equal(i1[0], 100)
# Have to add small amount of time because we truncate via int casting
i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
assert_equal(i1[0], 100)
def test_add_channels():
"""Test raw splitting / re-appending channel types
"""
raw = Raw(test_fif_fname).crop(0, 1).preload_data()
raw_nopre = Raw(test_fif_fname, preload=False)
raw_eeg_meg = raw.pick_types(meg=True, eeg=True, copy=True)
raw_eeg = raw.pick_types(meg=False, eeg=True, copy=True)
raw_meg = raw.pick_types(meg=True, eeg=False, copy=True)
raw_stim = raw.pick_types(meg=False, eeg=False, stim=True, copy=True)
raw_new = raw_meg.add_channels([raw_eeg, raw_stim], copy=True)
assert_true(all(ch in raw_new.ch_names
for ch in raw_stim.ch_names + raw_meg.ch_names))
raw_new = raw_meg.add_channels([raw_eeg], copy=True)
assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg = raw_eeg.crop(.5)
assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_raw_time_as_index():
""" Test time as index conversion"""
raw = Raw(fif_fname, preload=True)
first_samp = raw.time_as_index([0], True)[0]
assert_equal(raw.first_samp, -first_samp)
@testing.requires_testing_data
def test_save():
""" Test saving raw"""
tempdir = _TempDir()
raw = Raw(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = Raw(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = Raw(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
# make sure we can overwrite the file we loaded when preload=True
new_raw = Raw(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
""" Test with statement """
for preload in [True, False]:
with Raw(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation
"""
tempdir = _TempDir()
raw1 = Raw(ctf_comp_fname, compensation=None)
assert_true(raw1.comp is None)
data1, times1 = raw1[:, :]
raw2 = Raw(ctf_comp_fname, compensation=3)
data2, times2 = raw2[:, :]
assert_true(raw2.comp is None) # unchanged (data come with grade 3)
assert_array_equal(times1, times2)
assert_array_equal(data1, data2)
raw3 = Raw(ctf_comp_fname, compensation=1)
data3, times3 = raw3[:, :]
assert_true(raw3.comp is not None)
assert_array_equal(times1, times3)
# make sure it's different with a different compensation:
assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw1.save(temp_file, overwrite=True)
raw4 = Raw(temp_file)
data4, times4 = raw4[:, :]
assert_array_equal(times1, times4)
assert_array_equal(data1, data4)
# Now save the file that has modified compensation
# and make sure we can the same data as input ie. compensation
# is undone
raw3.save(temp_file, overwrite=True)
raw5 = Raw(temp_file)
data5, times5 = raw5[:, :]
assert_array_equal(times1, times5)
assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE
"""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return Raw(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw = Raw(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
# preload is True
raw = Raw(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.pick_channels(ch_names, copy=True) # copy is True
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names, copy=False) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
assert_raises(ValueError, raw.pick_channels, ch_names[0])
raw = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels
"""
raw1 = Raw(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 113 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
arthur-gouveia/DAT210x | Module3/just-playing-module3.py | 1 | 1894 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 4 13:18:57 2016
Module 3 on DAT210x course scripts
More info on matplotlib histogram:
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
@author: Arthur Gouveia
"""
import pandas as pd
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from pandas.tools.plotting import parallel_coordinates
from sklearn.datasets import load_iris
MENU = '''
1: Single Histogram
2: Multiple Histogram
3: Scatter plot
4: 3D Scatter plot
5: Parallel Plot
Enter your choice:
'''
def histplot(data, **kwargs):
data.plot.hist(**kwargs)
def scatter2D(data, x, y, **kwargs):
data.plot.scatter(x=x, y=y, **kwargs)
def scatter3D(data, **kwargs):
fig = mpl.pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('Final Grade')
ax.set_ylabel('First Grade')
ax.set_zlabel('Daily Alcohol')
ax.scatter(data[0], data[1],
data[2], **kwargs)
mpl.pyplot.show()
def menu():
return input(MENU)
if __name__ == '__main__':
mpl.style.use('ggplot')
mpl.cm.cmapname = 'gray'
student_dataset = pd.read_csv("Datasets/students.data", index_col=0)
data = load_iris()
iris = pd.DataFrame(data.data, columns=data.feature_names)
iris['target_names'] = [data.target_names[i] for i in data.target]
choice = menu()
if choice == '1':
histplot(student_dataset.G3, alpha=0.5, normed=True)
elif choice == '2':
histplot(student_dataset[['G3', 'G2', 'G1']], alpha=0.5)
elif choice == '3':
scatter2D(student_dataset[['G1', 'G3']], x='G1', y='G3')
elif choice == '4':
scatter3D([student_dataset.G1, student_dataset.G3,
student_dataset['Dalc']], c='r', marker='o')
elif choice == '5':
parallel_coordinates(iris, 'target_names')
else:
print('Invalid option. Try again')
| mit |
yonglehou/scikit-learn | sklearn/decomposition/base.py | 310 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
tclose/python-neo | neo/test/coretest/test_analogsignalarray.py | 3 | 37391 | # -*- coding: utf-8 -*-
"""
Tests of the neo.core.analogsignalarray.AnalogSignalArrayArray class
"""
import os
import pickle
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.analogsignalarray import AnalogSignalArray
from neo.core import AnalogSignal, Segment, RecordingChannelGroup
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict([(str(x), TEST_ANNOTATIONS[x]) for x in
range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
signal = get_fake_value('signal', pq.Quantity, seed=0, dim=2)
sampling_rate = get_fake_value('sampling_rate', pq.Quantity,
seed=1, dim=0)
t_start = get_fake_value('t_start', pq.Quantity, seed=2, dim=0)
channel_index = get_fake_value('channel_index', np.ndarray, seed=3,
dim=1, dtype='i')
name = get_fake_value('name', str, seed=4, obj=AnalogSignalArray)
description = get_fake_value('description', str, seed=5,
obj='AnalogSignalArray')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(AnalogSignalArray, annotate=False, seed=0)
res12 = get_fake_values('AnalogSignalArray', annotate=False, seed=0)
res21 = get_fake_values(AnalogSignalArray, annotate=True, seed=0)
res22 = get_fake_values('AnalogSignalArray', annotate=True, seed=0)
assert_arrays_equal(res11.pop('signal'), signal)
assert_arrays_equal(res12.pop('signal'), signal)
assert_arrays_equal(res21.pop('signal'), signal)
assert_arrays_equal(res22.pop('signal'), signal)
assert_arrays_equal(res11.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res12.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res21.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res22.pop('sampling_rate'), sampling_rate)
assert_arrays_equal(res11.pop('t_start'), t_start)
assert_arrays_equal(res12.pop('t_start'), t_start)
assert_arrays_equal(res21.pop('t_start'), t_start)
assert_arrays_equal(res22.pop('t_start'), t_start)
assert_arrays_equal(res11.pop('channel_index'), channel_index)
assert_arrays_equal(res12.pop('channel_index'), channel_index)
assert_arrays_equal(res21.pop('channel_index'), channel_index)
assert_arrays_equal(res22.pop('channel_index'), channel_index)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = 'AnalogSignalArray'
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, AnalogSignalArray))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = AnalogSignalArray
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, AnalogSignalArray))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestAnalogSignalArrayConstructor(unittest.TestCase):
def test__create_from_list(self):
data = [(i, i, i) for i in range(10)] # 3 signals each with 10 samples
rate = 1000*pq.Hz
signal = AnalogSignalArray(data, sampling_rate=rate, units="mV")
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.shape, (10, 3))
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, len(data)/rate)
self.assertEqual(signal[9, 0], 9000*pq.uV)
def test__create_from_numpy_array(self):
data = np.arange(20.0).reshape((10, 2))
rate = 1*pq.kHz
signal = AnalogSignalArray(data, sampling_rate=rate, units="uV")
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, data.shape[0]/rate)
self.assertEqual(signal[9, 0], 0.018*pq.mV)
self.assertEqual(signal[9, 1], 19*pq.uV)
def test__create_from_quantities_array(self):
data = np.arange(20.0).reshape((10, 2)) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignalArray(data, sampling_rate=rate)
assert_neo_object_is_compliant(signal)
self.assertEqual(signal.t_start, 0*pq.ms)
self.assertEqual(signal.t_stop, data.shape[0]/rate)
self.assertEqual(signal[9, 0], 18000*pq.uV)
def test__create_from_quantities_with_inconsistent_units_ValueError(self):
data = np.arange(20.0).reshape((10, 2)) * pq.mV
self.assertRaises(ValueError, AnalogSignalArray, data,
sampling_rate=1*pq.kHz, units="nA")
def test__create_with_copy_true_should_return_copy(self):
data = np.arange(20.0).reshape((10, 2)) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignalArray(data, copy=True, sampling_rate=rate)
assert_neo_object_is_compliant(signal)
data[3, 0] = 0.099*pq.V
self.assertNotEqual(signal[3, 0], 99*pq.mV)
def test__create_with_copy_false_should_return_view(self):
data = np.arange(20.0).reshape((10, 2)) * pq.mV
rate = 5000*pq.Hz
signal = AnalogSignalArray(data, copy=False, sampling_rate=rate)
assert_neo_object_is_compliant(signal)
data[3, 0] = 99*pq.mV
self.assertEqual(signal[3, 0], 99000*pq.uV)
# signal must not be 1D - should raise Exception if 1D
class TestAnalogSignalArrayProperties(unittest.TestCase):
def setUp(self):
self.t_start = [0.0*pq.ms, 100*pq.ms, -200*pq.ms]
self.rates = [1*pq.kHz, 420*pq.Hz, 999*pq.Hz]
self.data = [np.arange(10.0).reshape((5, 2))*pq.nA,
np.arange(-100.0, 100.0, 10.0).reshape((4, 5))*pq.mV,
np.random.uniform(size=(100, 4))*pq.uV]
self.signals = [AnalogSignalArray(D, sampling_rate=r, t_start=t)
for r, D, t in zip(self.rates,
self.data,
self.t_start)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_stop(self):
for i, signal in enumerate(self.signals):
targ = self.t_start[i] + self.data[i].shape[0]/self.rates[i]
self.assertEqual(signal.t_stop, targ)
def test__duration(self):
for signal in self.signals:
self.assertAlmostEqual(signal.duration,
signal.t_stop - signal.t_start,
delta=1e-15)
def test__sampling_period(self):
for signal, rate in zip(self.signals, self.rates):
self.assertEqual(signal.sampling_period, 1/rate)
def test__times(self):
for i, signal in enumerate(self.signals):
targ = np.arange(self.data[i].shape[0])
targ = targ/self.rates[i] + self.t_start[i]
assert_arrays_almost_equal(signal.times, targ, 1e-12*pq.ms)
def test__children(self):
signal = self.signals[0]
segment = Segment(name='seg1')
segment.analogsignalarrays = [signal]
segment.create_many_to_one_relationship()
rcg = RecordingChannelGroup(name='rcg1')
rcg.analogsignalarrays = [signal]
rcg.create_many_to_one_relationship()
self.assertEqual(signal._single_parent_objects,
('Segment', 'RecordingChannelGroup'))
self.assertEqual(signal._multi_parent_objects, ())
self.assertEqual(signal._single_parent_containers,
('segment', 'recordingchannelgroup'))
self.assertEqual(signal._multi_parent_containers, ())
self.assertEqual(signal._parent_objects,
('Segment', 'RecordingChannelGroup'))
self.assertEqual(signal._parent_containers,
('segment', 'recordingchannelgroup'))
self.assertEqual(len(signal.parents), 2)
self.assertEqual(signal.parents[0].name, 'seg1')
self.assertEqual(signal.parents[1].name, 'rcg1')
assert_neo_object_is_compliant(signal)
def test__repr(self):
for i, signal in enumerate(self.signals):
prepr = repr(signal)
targ = '<AnalogSignalArray(%s, [%s, %s], sampling rate: %s)>' % \
(repr(self.data[i]),
self.t_start[i],
self.t_start[i] + len(self.data[i])/self.rates[i],
self.rates[i])
self.assertEqual(prepr, targ)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
for signal in self.signals:
prepr = pretty(signal)
targ = (('AnalogSignalArray in %s with %sx%s %s values\n' %
(signal.units, signal.shape[0], signal.shape[1],
signal.dtype)) +
('channel index: %s\n' % signal.channel_index) +
('sampling rate: %s\n' % signal.sampling_rate) +
('time: %s to %s' % (signal.t_start, signal.t_stop)))
self.assertEqual(prepr, targ)
class TestAnalogSignalArrayArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(55.0).reshape((11, 5))
self.data1quant = self.data1 * pq.nA
self.signal1 = AnalogSignalArray(self.data1quant,
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt',
arg1='test')
self.data2 = np.array([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]).T
self.data2quant = self.data2 * pq.mV
self.signal2 = AnalogSignalArray(self.data2quant,
sampling_rate=1.0*pq.Hz,
name='spam', description='eggs',
file_origin='testfile.txt',
arg1='test')
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_neo_object_is_compliant(self.signal2)
self.assertEqual(self.signal2.name, 'spam')
self.assertEqual(self.signal2.description, 'eggs')
self.assertEqual(self.signal2.file_origin, 'testfile.txt')
self.assertEqual(self.signal2.annotations, {'arg1': 'test'})
def test__index_dim1_should_return_analogsignal(self):
result = self.signal1[:, 0]
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, None)
self.assertEqual(result.description, None)
self.assertEqual(result.file_origin, None)
self.assertEqual(result.annotations, {})
self.assertEqual(result.t_stop, self.signal1.t_stop)
self.assertEqual(result.t_start, self.signal1.t_start)
self.assertEqual(result.sampling_rate,
self.signal1.sampling_rate)
assert_arrays_equal(result, self.data1[:, 0])
def test__index_dim1_and_slice_dim0_should_return_analogsignal(self):
result = self.signal1[2:7, 0]
self.assertIsInstance(result, AnalogSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, None)
self.assertEqual(result.description, None)
self.assertEqual(result.file_origin, None)
self.assertEqual(result.annotations, {})
self.assertEqual(result.t_start,
self.signal1.t_start+2*self.signal1.sampling_period)
self.assertEqual(result.t_stop,
self.signal1.t_start+7*self.signal1.sampling_period)
self.assertEqual(result.sampling_rate,
self.signal1.sampling_rate)
assert_arrays_equal(result, self.data1[2:7, 0])
def test__index_dim0_should_return_quantity_array(self):
# i.e. values from all signals for a single point in time
result = self.signal1[3, :]
self.assertIsInstance(result, pq.Quantity)
self.assertFalse(hasattr(result, 'name'))
self.assertFalse(hasattr(result, 'description'))
self.assertFalse(hasattr(result, 'file_origin'))
self.assertFalse(hasattr(result, 'annotations'))
self.assertEqual(result.shape, (5,))
self.assertFalse(hasattr(result, "t_start"))
self.assertEqual(result.units, pq.nA)
assert_arrays_equal(result, self.data1[3, :])
def test__index_dim0_and_slice_dim1_should_return_quantity_array(self):
# i.e. values from a subset of signals for a single point in time
result = self.signal1[3, 2:5]
self.assertIsInstance(result, pq.Quantity)
self.assertFalse(hasattr(result, 'name'))
self.assertFalse(hasattr(result, 'description'))
self.assertFalse(hasattr(result, 'file_origin'))
self.assertFalse(hasattr(result, 'annotations'))
self.assertEqual(result.shape, (3,))
self.assertFalse(hasattr(result, "t_start"))
self.assertEqual(result.units, pq.nA)
assert_arrays_equal(result, self.data1[3, 2:5])
def test__index_as_string_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, 5.)
def test__slice_both_dimensions_should_return_analogsignalarray(self):
result = self.signal1[0:3, 0:3]
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignalArray([[0, 1, 2], [5, 6, 7], [10, 11, 12]],
dtype=float, units="nA",
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
self.assertEqual(result.t_stop, targ.t_stop)
self.assertEqual(result.t_start, targ.t_start)
self.assertEqual(result.sampling_rate, targ.sampling_rate)
self.assertEqual(result.shape, targ.shape)
assert_same_sub_schema(result, targ)
assert_arrays_equal(result, self.data1[0:3, 0:3])
def test__slice_only_first_dimension_should_return_analogsignalarray(self):
result = self.signal1[2:7]
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.t_start,
self.signal1.t_start+2*self.signal1.sampling_period)
self.assertEqual(result.t_stop,
self.signal1.t_start+7*self.signal1.sampling_period)
self.assertEqual(result.sampling_rate, self.signal1.sampling_rate)
assert_arrays_equal(result, self.data1[2:7])
def test__getitem_should_return_single_quantity(self):
# quantities drops the units in this case
self.assertEqual(self.signal1[9, 3], 48000*pq.pA)
self.assertEqual(self.signal1[9][3], self.signal1[9, 3])
self.assertTrue(hasattr(self.signal1[9, 3], 'units'))
self.assertRaises(IndexError, self.signal1.__getitem__, (99, 73))
def test_comparison_operators(self):
assert_arrays_equal(self.signal1[0:3, 0:3] >= 5*pq.nA,
np.array([[False, False, False],
[True, True, True],
[True, True, True]]))
assert_arrays_equal(self.signal1[0:3, 0:3] >= 5*pq.pA,
np.array([[False, True, True],
[True, True, True],
[True, True, True]]))
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5*pq.mV)
def test__simple_statistics(self):
self.assertEqual(self.signal1.max(), 54000*pq.pA)
self.assertEqual(self.signal1.min(), 0*pq.nA)
self.assertEqual(self.signal1.mean(), 27*pq.nA)
self.assertEqual(self.signal1.std(), self.signal1.magnitude.std()*pq.nA)
self.assertEqual(self.signal1.var(), self.signal1.magnitude.var()*pq.nA**2)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.nA)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.units, 1*pq.nA)
assert_arrays_equal(result, self.data1)
assert_same_sub_schema(result, self.signal1)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.pA)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.units, 1*pq.pA)
assert_arrays_almost_equal(np.array(result), self.data1*1000., 1e-10)
def test__time_slice(self):
t_start = 2 * pq.s
t_stop = 4 * pq.s
result = self.signal2.time_slice(t_start, t_stop)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignalArray(np.array([[2., 3.], [2., 3.]]).T,
sampling_rate=1.0*pq.Hz, units='mV',
t_start=t_start,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(result)
self.assertEqual(result.t_stop, t_stop)
self.assertEqual(result.t_start, t_start)
self.assertEqual(result.sampling_rate, targ.sampling_rate)
assert_arrays_equal(result, targ)
assert_same_sub_schema(result, targ)
def test__time_slice__out_of_bounds_ValueError(self):
t_start_good = 2 * pq.s
t_stop_good = 4 * pq.s
t_start_bad = -2 * pq.s
t_stop_bad = 40 * pq.s
self.assertRaises(ValueError, self.signal2.time_slice,
t_start_good, t_stop_bad)
self.assertRaises(ValueError, self.signal2.time_slice,
t_start_bad, t_stop_good)
self.assertRaises(ValueError, self.signal2.time_slice,
t_start_bad, t_stop_bad)
def test__time_equal(self):
t_start = 0 * pq.s
t_stop = 6 * pq.s
result = self.signal2.time_slice(t_start, t_stop)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.t_stop, t_stop)
self.assertEqual(result.t_start, t_start)
assert_arrays_equal(result, self.signal2)
assert_same_sub_schema(result, self.signal2)
def test__time_slice__offset(self):
self.signal2.t_start = 10.0 * pq.s
assert_neo_object_is_compliant(self.signal2)
t_start = 12 * pq.s
t_stop = 14 * pq.s
result = self.signal2.time_slice(t_start, t_stop)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignalArray(np.array([[2., 3.], [2., 3.]]).T,
t_start=12.0*pq.ms,
sampling_rate=1.0*pq.Hz, units='mV',
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(result)
self.assertEqual(self.signal2.t_start, 10.0 * pq.s)
self.assertEqual(result.t_stop, t_stop)
self.assertEqual(result.t_start, t_start)
self.assertEqual(result.sampling_rate, targ.sampling_rate)
assert_arrays_equal(result, targ)
assert_same_sub_schema(result, targ)
def test__time_slice__different_units(self):
self.signal2.t_start = 10.0 * pq.ms
assert_neo_object_is_compliant(self.signal2)
t_start = 2 * pq.s + 10.0 * pq.ms
t_stop = 4 * pq.s + 10.0 * pq.ms
result = self.signal2.time_slice(t_start, t_stop)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ = AnalogSignalArray(np.array([[2., 3.], [2., 3.]]).T,
t_start=t_start.rescale(pq.ms),
sampling_rate=1.0*pq.Hz, units='mV',
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(result)
assert_neo_object_is_compliant(self.signal2)
self.assertEqual(self.signal2.t_start, 10.0 * pq.ms)
self.assertAlmostEqual(result.t_stop, t_stop, delta=1e-12*pq.ms)
self.assertAlmostEqual(result.t_start, t_start, delta=1e-12*pq.ms)
assert_arrays_almost_equal(result.times, targ.times, 1e-12*pq.ms)
self.assertEqual(result.sampling_rate, targ.sampling_rate)
assert_arrays_equal(result, targ)
assert_same_sub_schema(result, targ)
def test__time_slice__no_explicit_time(self):
self.signal2.t_start = 10.0 * pq.ms
assert_neo_object_is_compliant(self.signal2)
t1 = 2 * pq.s + 10.0 * pq.ms
t2 = 4 * pq.s + 10.0 * pq.ms
for t_start,t_stop in [(t1,None),(None,None),(None,t2)]:
t_start_targ = t1 if t_start!=None else self.signal2.t_start
t_stop_targ = t2 if t_stop!=None else self.signal2.t_stop
result = self.signal2.time_slice(t_start, t_stop)
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targ_ind = np.where((self.signal2.times >= t_start_targ) &
(self.signal2.times < t_stop_targ))
targ_array = self.signal2.magnitude[targ_ind]
targ = AnalogSignalArray(targ_array,
t_start=t_start_targ.rescale(pq.ms),
sampling_rate=1.0*pq.Hz, units='mV',
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(result)
assert_neo_object_is_compliant(self.signal2)
self.assertEqual(self.signal2.t_start, 10.0 * pq.ms)
self.assertAlmostEqual(result.t_stop, t_stop_targ, delta=1e-12*pq.ms)
self.assertAlmostEqual(result.t_start, t_start_targ, delta=1e-12*pq.ms)
assert_arrays_almost_equal(result.times, targ.times, 1e-12*pq.ms)
self.assertEqual(result.sampling_rate, targ.sampling_rate)
assert_arrays_equal(result, targ)
assert_same_sub_schema(result, targ)
class TestAnalogSignalArrayEquality(unittest.TestCase):
def test__signals_with_different_data_complement_should_be_not_equal(self):
signal1 = AnalogSignalArray(np.arange(55.0).reshape((11, 5)),
units="mV", sampling_rate=1*pq.kHz)
signal2 = AnalogSignalArray(np.arange(55.0).reshape((11, 5)),
units="mV", sampling_rate=2*pq.kHz)
self.assertNotEqual(signal1, signal2)
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
class TestAnalogSignalArrayCombination(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(55.0).reshape((11, 5))
self.data1quant = self.data1 * pq.mV
self.signal1 = AnalogSignalArray(self.data1quant,
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt',
arg1='test')
self.data2 = np.arange(100.0, 155.0).reshape((11, 5))
self.data2quant = self.data2 * pq.mV
self.signal2 = AnalogSignalArray(self.data2quant,
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt',
arg1='test')
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_neo_object_is_compliant(self.signal2)
self.assertEqual(self.signal2.name, 'spam')
self.assertEqual(self.signal2.description, 'eggs')
self.assertEqual(self.signal2.file_origin, 'testfile.txt')
self.assertEqual(self.signal2.annotations, {'arg1': 'test'})
def test__add_const_quantity_should_preserve_data_complement(self):
result = self.signal1 + 0.065*pq.V
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
# time zero, signal index 4
assert_arrays_equal(result, self.data1 + 65)
self.assertEqual(self.signal1[0, 4], 4*pq.mV)
self.assertEqual(result[0, 4], 69000*pq.uV)
self.assertEqual(self.signal1.t_start, result.t_start)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__add_two_consistent_signals_should_preserve_data_complement(self):
result = self.signal1 + self.signal2
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
targdata = np.arange(100.0, 210.0, 2.0).reshape((11, 5))
targ = AnalogSignalArray(targdata, units="mV",
sampling_rate=1*pq.kHz,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_arrays_equal(result, targdata)
assert_same_sub_schema(result, targ)
def test__add_signals_with_inconsistent_data_complement_ValueError(self):
self.signal2.sampling_rate = 0.5*pq.kHz
assert_neo_object_is_compliant(self.signal2)
self.assertRaises(ValueError, self.signal1.__add__, self.signal2)
def test__subtract_const_should_preserve_data_complement(self):
result = self.signal1 - 65*pq.mV
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(np.array(self.signal1[1, 4]), 9)
self.assertEqual(np.array(result[1, 4]), -56)
assert_arrays_equal(result, self.data1 - 65)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__subtract_from_const_should_return_signal(self):
result = 10*pq.mV - self.signal1
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(np.array(self.signal1[1, 4]), 9)
self.assertEqual(np.array(result[1, 4]), 1)
assert_arrays_equal(result, 10 - self.data1)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__mult_by_const_float_should_preserve_data_complement(self):
result = self.signal1*2
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(np.array(self.signal1[1, 4]), 9)
self.assertEqual(np.array(result[1, 4]), 18)
assert_arrays_equal(result, self.data1*2)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__divide_by_const_should_preserve_data_complement(self):
result = self.signal1/0.5
self.assertIsInstance(result, AnalogSignalArray)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(np.array(self.signal1[1, 4]), 9)
self.assertEqual(np.array(result[1, 4]), 18)
assert_arrays_equal(result, self.data1/0.5)
self.assertEqual(self.signal1.sampling_rate, result.sampling_rate)
def test__merge(self):
self.signal1.description = None
self.signal1.file_origin = None
assert_neo_object_is_compliant(self.signal1)
data3 = np.arange(1000.0, 1066.0).reshape((11, 6)) * pq.uV
data3scale = data3.rescale(self.data1quant.units)
signal2 = AnalogSignalArray(self.data1quant,
sampling_rate=1*pq.kHz,
channel_index=np.arange(5),
name='signal2',
description='test signal',
file_origin='testfile.txt')
signal3 = AnalogSignalArray(data3,
units="uV", sampling_rate=1*pq.kHz,
channel_index=np.arange(5, 11),
name='signal3',
description='test signal',
file_origin='testfile.txt')
signal4 = AnalogSignalArray(data3,
units="uV", sampling_rate=1*pq.kHz,
name='signal4',
description='test signal',
file_origin='testfile.txt')
merged13 = self.signal1.merge(signal3)
merged23 = signal2.merge(signal3)
merged24 = signal2.merge(signal4)
mergeddata13 = np.array(merged13)
mergeddata23 = np.array(merged23)
mergeddata24 = np.array(merged24)
targdata13 = np.hstack([self.data1quant, data3scale])
targdata23 = np.hstack([self.data1quant, data3scale])
targdata24 = np.hstack([self.data1quant, data3scale])
assert_neo_object_is_compliant(signal2)
assert_neo_object_is_compliant(signal3)
assert_neo_object_is_compliant(merged13)
assert_neo_object_is_compliant(merged23)
assert_neo_object_is_compliant(merged24)
self.assertEqual(merged13[0, 4], 4*pq.mV)
self.assertEqual(merged23[0, 4], 4*pq.mV)
self.assertEqual(merged13[0, 5], 1*pq.mV)
self.assertEqual(merged23[0, 5], 1*pq.mV)
self.assertEqual(merged13[10, 10], 1.065*pq.mV)
self.assertEqual(merged23[10, 10], 1.065*pq.mV)
self.assertEqual(merged13.t_stop, self.signal1.t_stop)
self.assertEqual(merged23.t_stop, self.signal1.t_stop)
self.assertEqual(merged13.name, 'merge(spam, signal3)')
self.assertEqual(merged23.name, 'merge(signal2, signal3)')
self.assertEqual(merged13.description, 'merge(None, test signal)')
self.assertEqual(merged23.description, 'test signal')
self.assertEqual(merged13.file_origin, 'merge(None, testfile.txt)')
self.assertEqual(merged23.file_origin, 'testfile.txt')
assert_arrays_equal(mergeddata13, targdata13)
assert_arrays_equal(mergeddata23, targdata23)
assert_arrays_equal(mergeddata24, targdata24)
assert_arrays_equal(merged13.channel_indexes, np.arange(5, 11))
assert_arrays_equal(merged23.channel_indexes, np.arange(11))
assert_arrays_equal(merged24.channel_indexes, np.arange(5))
class TestAnalogSignalArrayFunctions(unittest.TestCase):
def test__pickle(self):
signal1 = AnalogSignalArray(np.arange(55.0).reshape((11, 5)),
units="mV", sampling_rate=1*pq.kHz,
channel_index=np.arange(5))
fobj = open('./pickle', 'wb')
pickle.dump(signal1, fobj)
fobj.close()
fobj = open('./pickle', 'rb')
try:
signal2 = pickle.load(fobj)
except ValueError:
signal2 = None
assert_arrays_equal(signal1, signal2)
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
self.assertEqual(list(signal1.channel_indexes), [0, 1, 2, 3, 4])
self.assertEqual(list(signal1.channel_indexes),
list(signal2.channel_indexes))
fobj.close()
os.remove('./pickle')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
ageron/tensorflow | tensorflow/tools/docs/generate_lib.py | 16 | 23300 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path=''):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path, dup.replace('.', '/'))
to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
redirects.append((
os.path.join('/', from_path),
os.path.join('/', to_path)))
if redirects:
redirects = sorted(redirects)
template = ('- from: {}\n'
' to: {}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[module])]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.compat': ['v1', 'v2'],
'tf.contrib.estimator': ['python'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
"""A `docs_controls` aware API-crawler."""
def _is_private(self, path, name, obj):
if doc_controls.should_skip(obj):
return True
return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index the file and section of each `symbol` reference."""
for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(content)
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir,
output_dir,
reference_resolver,
file_pattern='*.md',
api_docs_relpath='api_docs'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
api_docs_relpath: Relative-path string to the api_docs, from the src_dir.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
depth = os.path.relpath(src_dir, start=dirpath)
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.join(depth, api_docs_relpath, 'python')
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
if full_in_path != full_out_path:
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
self.argument_parser.add_argument(
'--site_api_path',
type=str, default='',
help='The path from the site-root to api_docs'
'directory for this project')
self.argument_parser.add_argument(
'--api_cache_out_path',
type=str,
default=None,
help='Path to store a json-serialized api-index, so links can be '
'inserted into docs without rebuilding the api_docs')
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
if getattr(flags, 'api_cache_out_path', None):
reference_resolver.to_json_file(flags.api_cache_out_path)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', ''))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
| apache-2.0 |
mxjl620/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 35 | 11709 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/tpu/python/tpu/keras_support.py | 1 | 87984 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""*Experimental* support for running Keras models on the TPU.
To use, wrap your model with the `keras_support.tpu_model` function.
Example usage:
```
image = tf.keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)
flattened = tf.keras.layers.Flatten()(c1)
logits = tf.keras.layers.Dense(10, activation='softmax')(flattened)
model = tf.keras.Model(inputs=[image], outputs=[logits])
resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_name)
strategy = keras_support.TPUDistributionStrategy(resolver)
model = keras_support.tpu_model(model, strategy=strategy)
# Only TF optimizers are currently supported.
model.compile(optimizer=tf.train.AdamOptimizer(), ...)
# `images` and `labels` should be Numpy arrays. Support for tensor input
# (e.g. datasets) is planned.
model.fit(images, labels)
```
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import sys
import time
import numpy as np
import six
from tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver as tpu_cluster_resolver_lib
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import keras_tpu_variables
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python import tf2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers as keras_optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# TODO(b/114775106): temporary shim to optionally initialize the TPU
# This increases the odds our session is initialized, but shouldn't be needed.
_TEST_REWRITE_OP = None
def _maybe_initialize_tpu(session):
"""Initialize the TPU if it has not already been initialized."""
global _TEST_REWRITE_OP
try:
# Try to use cached version to avoid another ground of graph optimization.
test_rewrite_op = _TEST_REWRITE_OP
if (test_rewrite_op is None or
test_rewrite_op[0].graph != ops.get_default_graph()):
def test_op():
return constant_op.constant(1) + constant_op.constant(1)
test_rewrite_op = tpu.rewrite(test_op)
_TEST_REWRITE_OP = test_rewrite_op
session.run(test_rewrite_op)
except errors.FailedPreconditionError as _:
session.run(tpu.initialize_system())
@contextlib.contextmanager
def _tpu_session_context():
"""Initialize the TPU and cleans cache entries for bad sessions."""
try:
_maybe_initialize_tpu(K.get_session())
yield
except (errors.FailedPreconditionError, errors.AbortedError) as e:
K.clear_session()
raise Exception("""
An error occurred connecting or initializing your TPU.
The session has been reset. re-run keras_to_tpu_model to create a new session.
""" + str(e))
def setup_tpu_session(cluster_resolver):
"""Construct or return a `tf.Session` connected to the given cluster."""
master = cluster_resolver.master()
# Use the existing session if we're already connected to this TPU
# N.B K.get_session() is a non-trivial operation, and may fail if the remote
# session has been reset.
try:
default_session = K.get_session()
if (default_session._target == master and
getattr(default_session, '_tpu_initialized', None)):
return
except errors.AbortedError as _:
# We lost the remote session and need to re-initialize.
logging.warning('Lost remote session: creating a new session.')
cluster_spec = cluster_resolver.cluster_spec()
config = config_pb2.ConfigProto(isolate_session_state=True)
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
tpu_session = tf_session.Session(target=master, config=config)
tpu_session.run(tpu.initialize_system())
tpu_session._tpu_initialized = True
# N.B. We have to call `K.set_session()` AND set our session as the
# TF default. `K.get_session()` surprisingly does not return the value
# supplied by K.set_session otherwise.
K.set_session(tpu_session)
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master, cluster_def=cluster_def, query_topology=False))
return tpu_system_metadata
class TPUDistributionStrategy(object):
"""The strategy to run Keras model on TPU."""
def __init__(self, tpu_cluster_resolver=None, using_single_core=False):
"""Construct a TPUDistributionStrategy.
Args:
tpu_cluster_resolver: Any instance of `TPUClusterResolver`. If None, will
create one with '' as master address.
using_single_core: Bool. This is the debugging option, which might be
removed in future once the model replication functionality is mature
enough. If `False` (default behavior), the system automatically finds
the best configuration, in terms of number of TPU cores, for the model
replication, typically using all available TPU cores. If overwrites as
`True`, force the model replication using single core, i.e., no
replication.
Raises:
Exception: No TPU Found on the given worker.
"""
if tf2.enabled():
raise RuntimeError(
'Keras support is now deprecated in support of TPU Strategy. '
'Please follow the distribution strategy guide on tensorflow.org '
'to migrate to the 2.0 supported version.')
else:
logging.warning(
'Keras support is now deprecated in support of TPU Strategy. '
'Please follow the distribution strategy guide on tensorflow.org '
'to migrate to the 2.0 supported version.')
if tpu_cluster_resolver is None:
tpu_cluster_resolver = tpu_cluster_resolver_lib.TPUClusterResolver('')
metadata = get_tpu_system_metadata(tpu_cluster_resolver)
self._tpu_metadata = metadata
self._tpu_cluster_resolver = tpu_cluster_resolver
self._num_cores = 1 if using_single_core else metadata.num_cores
# Walk device list to identify TPU worker for enqueue/dequeue operations.
worker_re = re.compile('/job:([^/]+)')
for device in metadata.devices:
if 'TPU:0' in device.name:
self._worker_name = worker_re.search(device.name).group(1)
return
raise Exception('No TPU found on given worker.')
def _make_assignment_for_model(self, cpu_model):
"""Makes a `TPUAssignment` for the passed in `cpu_model`."""
num_cores = self._num_cores
if num_cores > 1 and cpu_model.stateful:
logging.warning(
'Model replication does not currently support stateful models. '
'Degrading to a single core.')
num_cores = 1
return TPUAssignment(worker_name=self._worker_name, num_cores=num_cores)
class TPUAssignment(object):
"""This is object holding TPU resources assignment for the concrete model.
`TPUDistributionStrategy` is responsible to create the instance of
`TPUAssignment`, so, it can dynamically adjust the `num_cores` to use based on
model and input batch sizes.
"""
def __init__(self, worker_name, num_cores):
self._worker_name = worker_name
self._num_cores = num_cores
@property
def worker_name(self):
return self._worker_name
@property
def num_towers(self):
# TODO(xiejw): Support automatically assign num_cores based on inputs.
return self._num_cores
class TPUEmbedding(embeddings.Embedding):
"""TPU compatible embedding layer.
The default Keras layer is not TPU compatible. This layer is a drop-in
replacement: it has the same behavior and will work on CPU and GPU devices.
"""
def build(self, input_shape):
if input_shape[0] is None:
raise ValueError(
'TPUEmbeddings must have a fixed input_length or input shape.')
return super(TPUEmbedding, self).build(input_shape)
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = math_ops.cast(inputs, 'int32')
inputs = array_ops.one_hot(inputs, self.input_dim)
return math_ops.tensordot(inputs, self.embeddings, 1)
def _cross_replica_concat(tensor, core_id, num_cores, name):
"""Concatenate `tensor` across cores.
Args:
tensor: The tensor to be concatenated. Must be [int32 and float32].
core_id: Tensor indicating the current TPU core.
num_cores: Python int. The total number of TPU cores in the system.
name: The string name to print for debugging.
Returns:
The same concatenated Tensor on each core.
"""
input_dtype = tensor.dtype
if input_dtype not in [dtypes.bfloat16, dtypes.float32, dtypes.int32]:
raise TypeError('For model replication, only (bfloat16, float32 and int32) '
'is supported for model outputs and targets. Got {} for '
'{}.'.format(input_dtype, name))
batch_size = tensor.shape[0]
mask = math_ops.to_float(
math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id))
mask = array_ops.reshape(mask, [num_cores] + [1] * tensor.shape.ndims)
result = mask * math_ops.to_float(tensor)
local_tensor_with_holes = array_ops.reshape(result,
[-1] + result.shape.as_list()[2:])
concat_tensor = tpu_ops.cross_replica_sum(local_tensor_with_holes)
concat_tensor.set_shape((num_cores * batch_size,) + tuple(tensor.shape[1:]))
if concat_tensor != input_dtype:
concat_tensor = math_ops.cast(concat_tensor, input_dtype)
return concat_tensor
class KerasCrossShardOptimizer(keras_optimizers.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self, opt, name='KerasCrossShardOptimizer'):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "KerasCrossShardOptimizer".
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
super(KerasCrossShardOptimizer, self).__init__()
self._name = name
self._opt = opt
logging.info('KerasCrossShard: %s %s', self._opt, self._opt.weights)
def get_updates(self, loss, params):
self._opt.get_gradients = self.get_gradients
return self._opt.get_updates(loss, params)
def get_gradients(self, loss, params):
num_shards = tpu_function.get_tpu_context().number_of_shards
grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)
return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]
def get_weights(self):
return self._opt.get_weights()
def get_config(self):
return self._opt.get_config()
# Defer remaining operations to the underlying optimizer
def __getattr__(self, key):
return getattr(self._opt, key)
class TPUModelOp(
collections.namedtuple('TPUModelOp', [
'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'
])):
pass
def _valid_name(tensor_name):
"""Return a valid tensor name (strips '/', ':', etc)."""
return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
def _replicated_optimizer(opt):
"""Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
# Always wrap `opt` with CrossShardOptimizer, even if we are running on a
# single core. This ensures Keras properly tracks and initializes optimizer
# variables.
if isinstance(opt, keras_optimizers.TFOptimizer):
return tpu_optimizer.CrossShardOptimizer(opt.optimizer)
else:
return KerasCrossShardOptimizer(opt)
def _clone_optimizer(optimizer, config=None, worker_name=None):
"""Returns a cloned optimizer with the provided optimizer.config or config."""
if not isinstance(optimizer, keras_optimizers.Optimizer):
# In the first call to tpu_model(model), Keras may not have wrapped the TF
# optimizer in the TFOptimizer helper, e.g., the given model isn't compiled
# or optimizer isn't set, and later generated tpu_model compiles with a TF
# optimizer.
return optimizer
if isinstance(optimizer, keras_optimizers.TFOptimizer):
return keras_optimizers.TFOptimizer(optimizer.optimizer)
if config is None:
config = optimizer.get_config()
logging.info('Cloning %s %s', optimizer.__class__.__name__, config)
with ops.device(
'%s/device:CPU:0' % ('/job:%s' % worker_name if worker_name else '')):
# Explicitly put optimizer parameter variables on TPU worker.
return optimizer.__class__.from_config(config)
class TPURewriteContext(object):
"""Prepare the environment for a Keras model during `tpu.rewrite`.
This overrides the default placeholder behaviour to instead refer to a preset
input mapping. Placeholders are unsupported in TPU compiled code, and must
be replaced with explicit inputs or values from the infeed queue.
Instead of explicitly threading inputs all the way through the Keras codebase,
we override the behavior of the placeholder while compiling and inject the
Tensors from the infeed in place of the placeholder.
Similarly, as we compile a new sub-graph for each unique shape and execution
mode, we need to override the behavior of an embedded `name_scope` call in
the base Keras layer code. This allows us to re-use the same weights across
many compiles and share a single session/graph.
"""
def __init__(self, input_map):
self._input_map = input_map
self._default_placeholder = None
self._default_name_scope = None
def __enter__(self):
def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument
logging.info('Remapping placeholder for %s', name)
if name in self._input_map:
return self._input_map[name]
else:
logging.info('Default: %s', name)
return self._default_placeholder(dtype, shape, name)
def _name_scope(name, default_name=None, values=None):
caller_frame = sys._getframe().f_back
caller_obj = caller_frame.f_locals.get('self')
if (caller_obj is not None and
isinstance(caller_obj, base_layer.Layer) and name is not None):
return variable_scope.variable_scope(
name, default_name, values, reuse=variable_scope.AUTO_REUSE)
return self._default_name_scope(name, default_name, values)
self._default_placeholder = array_ops.placeholder
self._default_name_scope = ops.name_scope
self._default_make_variable = base_layer_utils.make_variable
self._default_random_normal = random_ops.random_normal
self._default_qr = gen_linalg_ops.qr
array_ops.placeholder = _placeholder
# Replace random_ops.random_normal with a dummy function because
# `random_normal` isn't yet implemented on the TPU. Because these
# initialized values are overwritten by the CPU values, this is okay.
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
del mean
del stddev
del seed
return array_ops.zeros(shape, dtype=dtype, name=name)
random_ops.random_normal = random_normal
# Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
# TODO(saeta): Remove qr override once we confirm the qr implementation is
# ok.
# pylint: disable=redefined-builtin
def qr(input, full_matrices=False, name=None):
"""Dummy implementation of qr decomposition."""
del full_matrices # TODO(saeta): Properly handle the full matrix case.
input_shape = input.shape
if len(input_shape) < 2:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
p = min(input_shape[-1], input_shape[-2])
if len(input_shape) == 2:
q = array_ops.zeros((p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
elif len(input_shape) == 3:
n = input_shape[0]
q = array_ops.zeros((n, p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
else:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
gen_linalg_ops.qr = qr
ops.name_scope = _name_scope
base_layer_utils.make_variable = variable_scope.get_variable
logging.info('Overriding default placeholder.')
return
def __exit__(self, exc_type, exc_val, exc_tb):
array_ops.placeholder = self._default_placeholder
ops.name_scope = self._default_name_scope
base_layer_utils.make_variable = self._default_make_variable
random_ops.random_normal = self._default_random_normal
gen_linalg_ops.qr = self._default_qr
class SizedInfeed(
collections.namedtuple('SizedInfeed',
['sharded_infeed_tensors', 'infeed_ops'])):
"""Represents an instantiation of the infeed ops for a concrete input shape.
sharded_infeed_tensors: A data structure of Tensors used to represent the
placeholder tensors that must be fed when using feed_dicts.
infeed_ops: the set of ops that will be run to drive infeed for a single step.
"""
pass
class TPUInfeedInstance(object):
"""TPUInfeedInstance represents the logic to manage feeding in a single step.
See the comments on the `TPUInfeedManager` for a description for how infeed
is managed.
"""
@abc.abstractmethod
def make_input_specs(self, input_tensors):
"""Constructs the infeed_specs for the given Infeed instance.
Args:
input_tensors: The inputs to the model.
Returns:
A list of
"""
pass
def make_feed_dict(self, tpu_model_op):
"""Constructs a feed_dict for this instance, given the tpu_model_op.
Args:
tpu_model_op: A `TPUModelOp` representing the TPU Model for this
instance's input spec.
Returns:
A dictionary to use as the feed_dict of a `session.run` call.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class TPUInfeedManager(object):
"""TPUInfeedManager manages the data infeeding of data to a TPU computation.
Because there are multiple data sources (e.g. in-memory NumPy arrays,
`tf.data.Dataset`s), we abstract the different logic behind a single
interface: the `TPUInfeedManager`.
(1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
`TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
new one if required).
(2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
which returns a `TPUInfeedInstance`.
(3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
the model based on the returned `input_specs` from `TPUInfeedInstance`.
(4) [Optional.] If the model has not already been instantiated for the given
input spec, the `TPUFunction` compiles the model for the input spec (using the
`TPUInfeedManager`).
(5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
compiled model instance corresponding to its shape.
"""
@abc.abstractmethod
def make_infeed_instance(self, inputs):
"""Given a single step's input, construct a `TPUInfeedInstance`.
Args:
inputs: The inputs to a given step.
Returns:
A subclass of `TPUInfeedInstance`.
"""
pass
@abc.abstractmethod
def build_infeed_from_input_specs(self, input_specs, execution_mode):
"""For a given input specification (size, type), construct the infeed ops.
This is called only once for a given input specification and builds the
graph ops. It does not have a pointer to the actual infeed data.
Args:
input_specs: TODO(saeta): Document me!
execution_mode: TODO(saeta): Document me!
Returns:
A `SizedInfeed` instance.
"""
pass
class TPUNumpyInfeedManager(TPUInfeedManager):
"""TPU Infeed manager for Numpy inputs."""
class NumpyInfeedInstance(TPUInfeedInstance):
"""Infeed instance for Numpy inputs."""
def __init__(self, sharded_inputs):
self._sharded_inputs = sharded_inputs
def make_input_specs(self, input_tensors):
# Compute an input specification (used to generate infeed enqueue and
# dequeue operations). We use the shape from our input array and the
# dtype from our model. A user may pass in a float64 for a float32
# input: for model compatibility we still must generate a float32 infeed.
input_specs = []
# We use the shape and dtype from the first shard to compute the input
# metadata (`input_specs`); all replicas have the same type and shape.
for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
input_specs.append(
tensor_spec.TensorSpec(ary.shape, tensor.dtype,
_valid_name(tensor.name)))
return input_specs
def make_feed_dict(self, tpu_model_op):
infeed_dict = {}
for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
self._sharded_inputs):
for tensor, value in zip(infeed_tensors, inputs):
infeed_dict[tensor] = value
return infeed_dict
def __init__(self, tpu_assignment):
self._tpu_assignment = tpu_assignment
def _split_tensors(self, inputs):
"""Split input data across shards.
Each input is sliced along the batch axis.
Args:
inputs: List of Numpy arrays to run on the TPU.
Returns:
List of lists containing the input to feed to each TPU shard.
"""
if self._tpu_assignment.num_towers == 1:
return [inputs]
batch_size = inputs[0].shape[0]
assert batch_size % self._tpu_assignment.num_towers == 0, (
'batch_size must be divisible by the number of TPU cores in use (%s '
'vs %s)' % (batch_size, self._tpu_assignment.num_towers))
shard_size = batch_size // self._tpu_assignment.num_towers
input_list = []
for index in range(self._tpu_assignment.num_towers):
shard_inputs = [
x[index * shard_size:(index + 1) * shard_size] for x in inputs
]
input_list.append(shard_inputs)
return input_list
def make_infeed_instance(self, inputs):
sharded_inputs = self._split_tensors(inputs)
return self.NumpyInfeedInstance(sharded_inputs)
def build_infeed_from_input_specs(self, input_specs, execution_mode):
infeed_op = []
shard_infeed_tensors = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_tensors = []
with ops.device('/device:TPU:%d' % shard_id):
for spec in input_specs:
# Construct placeholders for each of the inputs.
infeed_tensors.append(
array_ops.placeholder(
dtype=spec.dtype,
shape=spec.shape,
name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
shard_infeed_tensors.append(infeed_tensors)
infeed_op.append(
tpu_ops.infeed_enqueue_tuple(
infeed_tensors, [spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_op, sharded_infeed_tensors=shard_infeed_tensors)
class TPUDatasetInfeedManager(TPUInfeedManager):
"""Manages infeed for a `tf.data.Dataset` into a TPU computation.
"""
class DatasetInfeedInstance(TPUInfeedInstance):
"""An instance of the TPU infeed."""
def __init__(self, input_specs):
self._input_specs = input_specs
def make_input_specs(self, input_tensors):
# TODO(saeta): Do error checking here!
return self._input_specs
def make_feed_dict(self, tpu_model_op):
# TODO(saeta): Verify tpu_model_op is as expected!
return {}
# pylint: disable=redefined-outer-name
def __init__(self, dataset, tpu_assignment, mode):
"""Constructs a TPUDatasetInfeedManager.
Args:
dataset: A `tf.data.Dataset` to infeed.
tpu_assignment: The `TPUAssignment` used to configure the
Keras TPU model.
mode: ModeKeys enum.
"""
self._verify_dataset_shape(dataset)
self._dataset = dataset
self._tpu_assignment = tpu_assignment
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
dummy_x_shape = dataset_output_shapes[0].as_list()
dummy_x_shape[0] *= tpu_assignment.num_towers
dummy_y_shape = dataset_output_shapes[1].as_list()
dummy_y_shape[0] *= tpu_assignment.num_towers
self._iterator = dataset_ops.make_initializable_iterator(dataset)
K.get_session().run(self._iterator.initializer)
self._get_next_ops = []
ctrl_deps = []
for i in range(tpu_assignment.num_towers):
with ops.control_dependencies(ctrl_deps): # Ensure deterministic
# TODO(saeta): Ensure correct placement!
get_next_op = self._iterator.get_next()
self._get_next_ops.append(get_next_op)
ctrl_deps.extend(get_next_op)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
dataset_output_types = dataset_ops.get_legacy_output_types(dataset)
self._dummy_x = np.zeros(
dummy_x_shape, dtype=dataset_output_types[0].as_numpy_dtype)
self._dummy_y = np.zeros(
dummy_y_shape, dtype=dataset_output_types[1].as_numpy_dtype)
input_specs = []
iterator_output_shapes = dataset_ops.get_legacy_output_shapes(
self._iterator)
iterator_output_types = dataset_ops.get_legacy_output_types(self._iterator)
if isinstance(iterator_output_shapes, tuple):
assert isinstance(iterator_output_types, tuple)
assert len(iterator_output_shapes) == len(iterator_output_types)
for i in range(len(iterator_output_shapes)):
spec = tensor_spec.TensorSpec(iterator_output_shapes[i],
iterator_output_types[i])
input_specs.append(spec)
elif isinstance(iterator_output_shapes, tensor_shape.TensorShape):
spec = tensor_spec.TensorSpec(iterator_output_shapes,
iterator_output_types)
input_specs.append(spec)
# Pre-process the inputs and get_next_ops before caching.
input_specs, self._get_next_ops = (
_inject_tpu_inputs_for_dataset(
tpu_assignment, mode, input_specs, self._get_next_ops))
self._infeed_instance = self.DatasetInfeedInstance(input_specs)
def _verify_dataset_shape(self, dataset):
"""Verifies a dataset is of an appropriate shape for TPUs."""
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
dataset_output_classes = dataset_ops.get_legacy_output_classes(dataset)
if not isinstance(dataset, dataset_ops.DatasetV2):
raise ValueError('The function passed as the `x` parameter did not '
'return a `tf.data.Dataset`.')
if not isinstance(dataset_output_classes, tuple):
raise ValueError('The dataset must return a tuple of tf.Tensors, '
'instead it returns: %s' % dataset_output_classes)
if len(dataset_output_classes) != 2:
raise ValueError('The dataset must return a 2-element tuple, got '
'%s output classes instead.' % (dataset_output_classes,))
for i, cls in enumerate(dataset_output_classes):
if cls != ops.Tensor:
raise ValueError('The dataset returned a non-Tensor type (%s) at '
'index %d.' % (cls, i))
for i, shape in enumerate(dataset_output_shapes):
if not shape:
raise ValueError('The dataset returns a scalar tensor in '
'tuple index %d. Did you forget to batch? '
'(Output shapes: %s).' % (i, dataset_output_shapes))
for j, dim in enumerate(shape):
if dim.value is None:
if j == 0:
hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
'drop_remainder=True)`?')
else:
hint = ''
raise ValueError(
'The Keras-TPU integration for `tf.data` '
'currently requires static shapes. The provided '
'dataset only has a partially defined shape. '
'(Dimension %d of output tensor %d is not statically known '
'for output shapes: %s.%s)' % (j, i, dataset_output_shapes, hint))
@property
def dummy_x(self):
return self._dummy_x
@property
def dummy_y(self):
return self._dummy_y
def make_infeed_instance(self, inputs):
# TODO(saeta): Verify inputs is as expected.
return self._infeed_instance
def build_infeed_from_input_specs(self, input_specs, execution_mode):
shard_infeed_tensors = self._get_next_ops
assert len(shard_infeed_tensors) == self._tpu_assignment.num_towers
infeed_ops = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
infeed_ops.append(
tpu_ops.infeed_enqueue_tuple(
shard_infeed_tensors[shard_id],
[spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(
infeed_ops=infeed_ops, sharded_infeed_tensors=shard_infeed_tensors)
def _inject_tpu_inputs_for_dataset(tpu_assignment, mode,
input_specs, get_next_ops):
"""Append core information to the set of dataset inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_specs, get_next_ops
# Dataset inputs operate on per core basis.
per_core_batch_size = input_specs[0].shape.as_list()[0]
# Insert, at head, the tensor for core_id.
assert len(get_next_ops) == tpu_assignment.num_towers
for i in range(tpu_assignment.num_towers):
core_id_constant = constant_op.constant(
np.array([i] * per_core_batch_size).astype('int32'),
dtype=dtypes.int32,
name='cord_id_constant')
get_next_ops[i] = [core_id_constant] + list(get_next_ops[i])
# Insert the input spec at head also.
input_specs = [tensor_spec.TensorSpec([per_core_batch_size], dtypes.int32)
] + input_specs
return input_specs, get_next_ops
def _inject_tpu_inputs_for_infeed(tpu_assignment, mode,
core_id_place_holder, input_tensors, inputs):
"""Append core information to the set of inputs."""
# This is used during compilation to identify the current TPU core and enable
# concatenation operations across cores.
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return input_tensors, inputs
# Puts a place holder in input spec.
input_tensors = [core_id_place_holder] + input_tensors
# Now fill the core id. For `num_cores` = 2, `batch_size` = 8, we fill the
# core id inputs as [0, 0, 0, 0, 1, 1, 1, 1], so each core sees its core id
# (duplicated).
num_cores = tpu_assignment.num_towers
per_core_batch_size = inputs[0].shape[0] // num_cores
core_ids = np.arange(num_cores).repeat(per_core_batch_size)
inputs = [core_ids] + inputs
return input_tensors, inputs
def _read_tpu_coreid_from_infeed(mode, infeed_tensors):
"""Popping out the core ids from infeed."""
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:
return None, infeed_tensors
if len(infeed_tensors) <= 1:
raise RuntimeError(
'The infeed tensors on TPU core has only {} tensors. '
'This is not expected. Please report a bug.\nTensors: {}'.format(
len(infeed_tensors), infeed_tensors))
core_id = infeed_tensors[0][0] # Pop out the scalar version.
rest = infeed_tensors[1:]
return core_id, rest
class TPUFunction(object):
"""K.function compatible interface for invoking a TPU compiled function.
Recompilation is triggered on-demand for each set of new inputs shapes: the
results are cached for future execution. We expect most computations will
be dominated by a standard batch-size, followed by a straggler batch for
the end of training or evaluation.
All `inputs` and `outputs` will be loaded via the infeed and outfeed queues
instead of being injected as `feed_dict` items or fetches.
"""
def __init__(self, model, execution_mode, tpu_assignment):
self.model = model
self.execution_mode = execution_mode
self._tpu_assignment = tpu_assignment
self._compilation_cache = {}
self._cloned_model = None
self._cloned_optimizer = None
# Create a placeholder for the TPU core ID. Cache the placeholder to avoid
# modifying the graph for every batch.
self._core_id_place_holder = array_ops.placeholder(
dtype=dtypes.int32, shape=[1], name='core_id')
def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)
# functools.partial and callable objects are not supported by tpu.rewrite
def _model_fn():
"""Compute fit/eval/predict for the TPU."""
is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT
# During train/eval, we infeed our features as well as labels.
if is_training or is_test:
infeed_layers = self.model._input_layers + self.model._output_layers
else:
infeed_layers = self.model._input_layers
# Generate our infeed operation to read features & labels.
infeed_tensors = tpu_ops.infeed_dequeue_tuple(
dtypes=[spec.dtype for spec in input_specs],
shapes=[spec.shape for spec in input_specs],
name='infeed-%s' % self.execution_mode)
core_id, infeed_tensors = (
_read_tpu_coreid_from_infeed(
mode=self.execution_mode, infeed_tensors=infeed_tensors))
assert len(infeed_tensors) == len(infeed_layers), (
'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
infeed_tensors))
tpu_targets = []
tpu_input_map = {}
# Sort infeed outputs into inputs and labels for calling our Keras model.
for tensor, layer in zip(infeed_tensors, infeed_layers):
if layer in self.model._input_layers:
tpu_input_map[layer.name] = tensor
if layer in self.model._output_layers:
tpu_targets.append(tensor)
# Clone our CPU model, running within the TPU device context.
#
# We use the id of the original model as a key to avoid weight collisions
# (if a user re-runs the same model multiple times, in e.g. Colab).
with TPURewriteContext(tpu_input_map):
with variable_scope.variable_scope('tpu_%s' % id(self.model)):
with keras_tpu_variables.replicated_scope(
self._tpu_assignment.num_towers):
if not self._cloned_optimizer:
self._cloned_optimizer = _clone_optimizer(
self.model.cpu_optimizer,
worker_name=self._tpu_assignment.worker_name)
self._cloned_model = models.clone_model(self.model)
# When running on more than one core, concatenate outputs at the end
# of processing. In backprop stage, the gradients will be
# calculated according to the local inputs as gradient of
# cross-replica-concat being zero for any outputs other than those
# from mlocal core so the loss calculation is identical.
num_towers = self.model._tpu_assignment.num_towers
if num_towers > 1 and (is_training or is_test):
new_outputs = [
_cross_replica_concat(
o, core_id, num_towers,
name='model output ({})'.format(o.name))
for o in self._cloned_model.outputs
]
# Recast all low precision outputs back to float32 since we only
# casted the inputs to bfloat16 and not targets. This is done so
# that we can preserve precision when calculating the loss value.
if new_outputs and new_outputs[0].dtype == dtypes.bfloat16:
new_outputs = [
math_ops.cast(o, dtypes.float32) for o in new_outputs]
self._cloned_model.outputs = new_outputs
tpu_targets = [
_cross_replica_concat(
tensor,
core_id,
num_towers,
name='model target ({})'.format(tensor.name))
for tensor in tpu_targets
]
if is_training or is_test:
with variable_scope.variable_scope(
'metrics', reuse=variable_scope.AUTO_REUSE):
self._cloned_model.compile(
optimizer=_replicated_optimizer(self._cloned_optimizer),
loss=self.model.loss,
loss_weights=self.model.loss_weights,
metrics=metrics_module.clone_metrics(
self.model._compile_metrics),
weighted_metrics=metrics_module.clone_metrics(
self.model._compile_weighted_metrics),
target_tensors=tpu_targets,
)
# Compute our outfeed depending on the execution mode
if is_training:
if not isinstance(self._cloned_optimizer, keras_optimizers.TFOptimizer):
# For Keras optimizer, we try to place the variable weights on the TPU
# device. Keras creates optimizer variables (e.g. momentum values for
# the Momentum optimizer) when _make_train_function is invoked.
with keras_tpu_variables.replicated_variable_for_optimizer(
self._tpu_assignment.num_towers):
self._cloned_model._make_fit_function()
else:
self._cloned_model._make_fit_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._fit_function.outputs
]
return [
self._cloned_model._fit_function.updates_op,
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._fit_function.outputs,
name='outfeed-enqueue-train')
]
elif is_test:
self._cloned_model._make_eval_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model._eval_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model._eval_function.outputs,
name='outfeed-enqueue-test')
]
elif is_predict:
self._cloned_model._make_predict_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.predict_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.predict_function.outputs,
name='outfeed-enqueue-predict',
)
]
else:
assert False, 'Unexpected execution mode: %s' % self.execution_mode
# Capture outfeed metadata computed during the rewrite.
self._outfeed_spec = None
# Generate out TPU operations using `tpu.split_compile_and_replicate`.
# `compile_op` can be used to test the TPU model compiles before execution.
# `execute op` replicates `_model_fn` `num_replicas` times, with each shard
# running on a different logical core.
compile_op, execute_op = tpu.split_compile_and_replicate(
_model_fn, inputs=[[] for _ in range(self._tpu_assignment.num_towers)])
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
sized_infeed = infeed_manager.build_infeed_from_input_specs(
input_specs, self.execution_mode)
# Build output ops.
outfeed_op = []
for shard_id in range(self._tpu_assignment.num_towers):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
outfeed_op.extend(
tpu_ops.outfeed_dequeue_tuple(
dtypes=[spec.dtype for spec in self._outfeed_spec],
shapes=[spec.shape for spec in self._outfeed_spec],
name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),
device_ordinal=shard_id))
return TPUModelOp(
compile_op,
execute_op,
infeed_tensors=sized_infeed.sharded_infeed_tensors,
infeed_op=sized_infeed.infeed_ops,
outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
"""Verifies that the given TPUModelOp can be compiled via XLA."""
logging.info('Started compiling')
start_time = time.time()
result = K.get_session().run(tpu_model_ops.compile_op)
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
raise RuntimeError('Compilation failed: {}'.format(
proto.status_error_message))
end_time = time.time()
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
def _lookup_infeed_manager(self, inputs):
"""Return an existing manager, or construct a new InfeedManager for inputs.
_lookup_infeed_manager will return an existing InfeedManager if one has been
previously assigned for this model and input. If not, it will construct a
new TPUNumpyInfeedManager.
Args:
inputs: A NumPy input to the model.
Returns:
A `TPUInfeedManager` object to manage infeeds for this input.
"""
if inputs is None:
return None
for x, mgr in self.model._numpy_to_infeed_manager_list:
if inputs[0] is x:
return mgr
return TPUNumpyInfeedManager(self.model._tpu_assignment)
def _tpu_model_ops_for_input_specs(self, input_specs, infeed_manager):
"""Looks up the corresponding `TPUModelOp` for a given `input_specs`.
It instantiates a new copy of the model for each unique input shape.
Args:
input_specs: The specification of the inputs to train on.
infeed_manager: The infeed manager responsible for feeding in data.
Returns:
A `TPUModelOp` instance that can be used to execute a step of the model.
"""
if input_specs is None or infeed_manager is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
# unique input shape.
shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])
if shape_key not in self._compilation_cache:
logging.info(
'New input shapes; (re-)compiling: mode=%s '
'(# of cores %d), %s', self.execution_mode,
self._tpu_assignment.num_towers, input_specs)
new_tpu_model_ops = self._specialize_model(input_specs,
infeed_manager)
self._compilation_cache[shape_key] = new_tpu_model_ops
self._test_model_compiles(new_tpu_model_ops)
return self._compilation_cache[shape_key]
def _construct_input_tensors_and_inputs(self, inputs):
"""Returns input tensors and numpy array inputs corresponding to `inputs`.
Args:
inputs: NumPy inputs.
Returns:
A tuple of `input_tensors`, and `inputs`.
"""
if inputs is None:
# Note: this condition is possible during the prologue or epilogue of the
# pipelined loop.
return None, None
if isinstance(inputs[-1], int):
# Remove the learning_phase flag at the end. We currently hard code the
# learning_phase in TPUFunction.
inputs = inputs[:-1]
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
# Strip sample weight from inputs.
input_tensors = self.model._feed_inputs + self.model._feed_targets
else:
input_tensors = self.model._feed_inputs
inputs = inputs[:len(input_tensors)]
input_tensors, inputs = (
_inject_tpu_inputs_for_infeed(
self._tpu_assignment, self.execution_mode,
self._core_id_place_holder, input_tensors, inputs))
return input_tensors, inputs
def _process_outputs(self, outfeed_outputs):
"""Processes the outputs of a model function execution.
Args:
outfeed_outputs: The sharded outputs of the TPU computation.
Returns:
The aggregated outputs of the TPU computation to be used in the rest of
the model execution.
"""
# TODO(xiejw): Decide how to reduce outputs, or discard all but first.
if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:
outputs = [[] for _ in range(len(self._outfeed_spec))]
outputs_per_replica = len(self._outfeed_spec)
for i in range(self._tpu_assignment.num_towers):
output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *
outputs_per_replica]
for j in range(outputs_per_replica):
outputs[j].append(output_group[j])
return [np.concatenate(group) for group in outputs]
else:
return outfeed_outputs[:len(outfeed_outputs) //
self._tpu_assignment.num_towers]
def __call__(self, inputs):
"""__call__ executes the function on the computational hardware.
It handles executing infeed, and preprocessing in addition to executing the
model on the TPU hardware.
Note: `__call__` has a sibling method `pipeline_run` which performs the same
operations, but with software pipelining.
Args:
inputs: The inputs to use to train.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
assert isinstance(inputs, list)
infeed_manager = self._lookup_infeed_manager(inputs)
input_tensors, inputs = self._construct_input_tensors_and_inputs(inputs)
infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
input_specs = infeed_instance.make_input_specs(input_tensors)
tpu_model_ops = self._tpu_model_ops_for_input_specs(input_specs,
infeed_manager)
infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
_, _, outfeed_outputs = K.get_session().run([
tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
def pipeline_run(self, cur_step_inputs, next_step_inputs):
"""pipeline_run executes the function on the computational hardware.
pipeline_run performs the same computation as __call__, however it runs the
infeed in a software pipelined fashion compared to the on-device execution.
Note: it is the responsibility of the caller to call `pipeline_run` in the
following sequence:
- Once with `cur_step_inputs=None` and `next_step_inputs=list(...)`
- `n` times with `cur_step_inputs` and `next_step_inputs` as `list`s
- Once with `cur_step_inputs=list(...)` and `next_step_inputs=None`
Additionally, it is the responsibility of the caller to pass
`next_step_inputs` as `cur_step_inputs` on the next invocation of
`pipeline_run`.
Args:
cur_step_inputs: The current step's inputs.
next_step_inputs: The next step's inputs.
Returns:
The output of the computation for the given mode it is executed in.
Raises:
RuntimeError: If there is an inappropriate use of the function.
"""
# Software pipelined case.
next_step_infeed_manager = self._lookup_infeed_manager(next_step_inputs)
cur_step_infeed_manager = self._lookup_infeed_manager(cur_step_inputs)
if (next_step_infeed_manager is not None and
cur_step_infeed_manager is not None):
assert type(next_step_infeed_manager) is type(cur_step_infeed_manager)
next_input_tensors, next_step_inputs = (
self._construct_input_tensors_and_inputs(next_step_inputs))
cur_input_tensors, cur_step_inputs = (
self._construct_input_tensors_and_inputs(cur_step_inputs))
cur_infeed_instance = None
if cur_step_infeed_manager:
cur_infeed_instance = cur_step_infeed_manager.make_infeed_instance(
cur_step_inputs)
next_infeed_instance = None
if next_step_infeed_manager:
next_infeed_instance = next_step_infeed_manager.make_infeed_instance(
next_step_inputs)
del cur_step_inputs # Avoid accidental re-use.
del next_step_inputs # Avoid accidental re-use.
cur_tpu_model_ops = None
next_tpu_model_ops = None
infeed_dict = None
if cur_infeed_instance and cur_input_tensors and cur_step_infeed_manager:
cur_input_specs = cur_infeed_instance.make_input_specs(cur_input_tensors)
cur_tpu_model_ops = self._tpu_model_ops_for_input_specs(
cur_input_specs, cur_step_infeed_manager)
if (next_infeed_instance and next_input_tensors and
next_step_infeed_manager):
next_input_specs = next_infeed_instance.make_input_specs(
next_input_tensors)
next_tpu_model_ops = self._tpu_model_ops_for_input_specs(
next_input_specs, next_step_infeed_manager)
infeed_dict = next_infeed_instance.make_feed_dict(next_tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
if next_tpu_model_ops and cur_tpu_model_ops:
_, _, outfeed_outputs = K.get_session().run([
next_tpu_model_ops.infeed_op, cur_tpu_model_ops.execute_op,
cur_tpu_model_ops.outfeed_op
], infeed_dict)
return self._process_outputs(outfeed_outputs)
if cur_tpu_model_ops:
_, outfeed_outputs = K.get_session().run(
[cur_tpu_model_ops.execute_op, cur_tpu_model_ops.outfeed_op])
return self._process_outputs(outfeed_outputs)
if next_tpu_model_ops:
K.get_session().run(next_tpu_model_ops.infeed_op, infeed_dict)
return None
raise RuntimeError('Internal error: both current & next tpu_model_ops '
'were None')
class KerasTPUModel(models.Model):
"""TPU compatible Keras model wrapper."""
def __init__(self, cpu_model, strategy):
super(models.Model, self).__init__( # pylint: disable=bad-super-call
inputs=cpu_model.inputs,
outputs=cpu_model.outputs,
name=cpu_model.name,
)
if tf2.enabled():
raise RuntimeError(
'Keras support is now deprecated in support of TPU Strategy. '
'Please follow the distribution strategy guide on tensorflow.org '
'to migrate to the 2.0 supported version.')
else:
logging.warning(
'Keras support is now deprecated in support of TPU Strategy. '
'Please follow the distribution strategy guide on tensorflow.org '
'to migrate to the 2.0 supported version.')
# Create a mapping from numpy arrays to infeed managers.
# Note: uses a list of tuples instead of a map because numpy arrays are
# not hashable.
self._numpy_to_infeed_manager_list = []
# Add distribution specific arguments since we don't call the Model init.
self._distribution_strategy = None
self._compile_distribution = None
self.predict_function = None
self.test_function = None
self.train_function = None
self._fit_function = None
self._eval_function = None
self._stateful_metric_functions = []
cluster_resolver = strategy._tpu_cluster_resolver
self._tpu_name_or_address = cluster_resolver.get_master()
self._cpu_model = cpu_model
self._tpu_assignment = strategy._make_assignment_for_model(cpu_model)
self._tpu_model = None
self._tpu_weights_initialized = False
# If the input CPU model has already been compiled, compile our TPU model
# immediately.
if self._cpu_model.optimizer:
self.compile(
self._cpu_model.optimizer,
self._cpu_model.loss,
self._cpu_model._compile_metrics,
self._cpu_model.loss_weights,
self._cpu_model.sample_weight_mode,
self._cpu_model._compile_weighted_metrics,
self._cpu_model.target_tensors,
)
# This flag must be disabled upon model mutation, such as changing the model
# layers or recompiling the model to use a different optimizer. New function
# definitions are generated whenever this flag is disabled, ensuring that
# internal graph functions are always using the current model structure.
#
# Requires declaration here because this constructor skips the
# Model constructor.
self._built_graph_functions = False
def get_config(self):
return {
'cpu_model': self._cpu_model,
'tpu_name_or_address': self._tpu_name_or_address,
'tpu_assignment': self._tpu_assignment,
}
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
if sample_weight_mode:
raise ValueError('sample_weight_mode not supported for TPU execution.')
if weighted_metrics:
raise ValueError('weighted_metrics not supported for TPU execution.')
if target_tensors:
raise ValueError('target_tensors is not supported for TPU execution.')
self._cpu_model.compile(
_clone_optimizer(optimizer), loss,
metrics_module.clone_metrics(metrics), loss_weights, sample_weight_mode,
metrics_module.clone_metrics(weighted_metrics), target_tensors,
**kwargs)
super(KerasTPUModel, self).compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
if context.executing_eagerly():
raise EnvironmentError('KerasTPUModel currently does not support eager '
'mode.')
with _tpu_session_context():
assert not self._numpy_to_infeed_manager_list # Ensure empty.
infeed_managers = [] # Managers to clean up at the end of the fit call.
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
with ops.device(
'/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):
dataset = x()
if steps_per_epoch is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps_per_epoch argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must '
'be None')
infeed_manager = TPUDatasetInfeedManager(
dataset, self._tpu_assignment, model_fn_lib.ModeKeys.TRAIN)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
if isinstance(validation_data, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(validation_data):
dataset = validation_data()
if validation_steps is None:
raise ValueError('When using tf.data as validation for a model, you '
'should specify the validation_steps argument.')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
val_x = infeed_manager.dummy_x
val_y = infeed_manager.dummy_y
infeed_managers.append((val_x, infeed_manager))
validation_data = (val_x, val_y)
self._numpy_to_infeed_manager_list = infeed_managers
try:
pipeline = kwargs.get('_pipeline', True)
if '_pipeline' in kwargs:
kwargs.pop('_pipeline')
if not pipeline:
logging.info('Running non-pipelined training loop (`_pipeline=%s`).',
pipeline)
return super(KerasTPUModel, self).fit(
x, y, batch_size, epochs, verbose, callbacks, validation_split,
validation_data, shuffle, class_weight, sample_weight,
initial_epoch, steps_per_epoch, validation_steps, **kwargs)
return self._pipeline_fit(x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle,
class_weight, sample_weight, initial_epoch,
steps_per_epoch, validation_steps, **kwargs)
finally:
self._numpy_to_infeed_manager_list = []
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
original_numpy_to_infeed_manager_list = []
if self._numpy_to_infeed_manager_list:
# evaluate call may be executed as callbacks during the training. In this
# case, _numpy_to_infeed_manager_list is not empty, so save it for
# recovery at the end of evaluate call.
original_numpy_to_infeed_manager_list = self._numpy_to_infeed_manager_list
self._numpy_to_infeed_manager_list = []
with _tpu_session_context():
# Managers to clean up at the end of the evaluate call.
infeed_managers = []
if isinstance(x, dataset_ops.DatasetV2):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
dataset = x()
if steps is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must be '
'None')
infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,
model_fn_lib.ModeKeys.EVAL)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
self._numpy_to_infeed_manager_list = infeed_managers
try:
return super(KerasTPUModel, self).evaluate(x, y, batch_size, verbose,
sample_weight, steps)
finally:
self._numpy_to_infeed_manager_list = (
original_numpy_to_infeed_manager_list)
def _pipeline_fit(self, x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle, class_weight,
sample_weight, initial_epoch, steps_per_epoch,
validation_steps, **kwargs):
# Similar to super.fit(...), but modified to support software pipelining.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning('The `nb_epoch` argument in `fit` has been renamed '
'`epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split)
# Prepare validation data
val_x, val_y, val_sample_weights = self._prepare_validation_data(
validation_data, validation_split, validation_steps, x, y,
sample_weights, batch_size)
return self._pipeline_fit_loop(
x,
y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def _pipeline_fit_loop(self,
inputs,
targets,
sample_weights,
batch_size,
epochs,
verbose,
callbacks,
val_inputs,
val_targets,
val_sample_weights,
shuffle,
initial_epoch,
steps_per_epoch,
validation_steps):
self._make_train_function()
sample_weights = sample_weights or []
val_sample_weights = val_sample_weights or []
if not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [1]
else:
ins = inputs + targets + sample_weights
do_validation = False
if val_inputs:
do_validation = True
if (steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
num_training_samples = training_utils.check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
count_mode = 'steps' if steps_per_epoch else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
self,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_training_samples,
verbose=verbose,
count_mode=count_mode)
if num_training_samples is not None:
index_array = np.arange(num_training_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = self._feed_inputs + self._feed_targets + self._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
# Reset stateful metrics
for m in self.metrics:
m.reset_states()
# Update callbacks
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
# Step-wise fit loop.
self._pipeline_fit_loop_step_wise(
ins=ins,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
else:
# Sample-wise fit loop.
self._pipeline_fit_loop_sample_wise(
ins=ins,
callbacks=callbacks,
index_array=index_array,
shuffle=shuffle,
batch_size=batch_size,
num_training_samples=num_training_samples,
indices_for_conversion_to_dense=indices_for_conversion_to_dense,
do_validation=do_validation,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
epoch_logs=epoch_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
return self.history
def _pipeline_fit_loop_sample_wise(self,
ins,
callbacks,
index_array,
shuffle,
batch_size,
num_training_samples,
indices_for_conversion_to_dense,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_training_samples, batch_size)
ins_last_batch = None
last_batch_logs = None
batch_index = 0
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. If using HDF5 '
'input data, pass shuffle="batch".')
# Pipeline batch logs
next_batch_logs = {}
next_batch_logs['batch'] = batch_index
next_batch_logs['size'] = len(batch_ids)
if batch_index > 0:
# Callbacks operate one step behind in software pipeline.
callbacks.on_batch_begin(batch_index - 1, last_batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = f.pipeline_run(
cur_step_inputs=ins_last_batch, next_step_inputs=ins_batch)
ins_last_batch = ins_batch
if batch_index == 0:
assert outs is None
else:
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o # pylint: disable=unsupported-assignment-operation
callbacks.on_batch_end(batch_index - 1, last_batch_logs)
if callbacks.model.stop_training:
return
last_batch_logs = next_batch_logs
# Final batch
callbacks.on_batch_begin(batch_index, last_batch_logs)
outs = f.pipeline_run(cur_step_inputs=ins_last_batch, next_step_inputs=None)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
last_batch_logs[l] = o
callbacks.on_batch_end(batch_index, last_batch_logs)
if callbacks.model.stop_training:
return
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _pipeline_fit_loop_step_wise(self,
ins,
callbacks,
steps_per_epoch,
epochs,
do_validation,
val_inputs,
val_targets,
val_sample_weights,
validation_steps,
epoch_logs):
f = self.train_function
# Loop prologue
try:
outs = f.pipeline_run(cur_step_inputs=None, next_step_inputs=ins)
assert outs is None # Function shouldn't return anything!
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data on the first step '
'of the epoch, preventing further training. Check to '
'make sure your paths are correct and you have '
'permissions to read the files. Skipping validation')
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
if step_index < steps_per_epoch - 1:
next_step_inputs = ins
else:
next_step_inputs = None
outs = f.pipeline_run(
cur_step_inputs=ins, next_step_inputs=next_step_inputs)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your '
'dataset can generate at least `steps_per_batch * '
'epochs` batches (in this case, %d batches). You '
'may need to use the repeat() function when '
'building your dataset.' % steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(self.metrics_names, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if do_validation:
val_outs = training_arrays.test_loop(
self,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(self.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def _prepare_validation_data(self, validation_data, validation_split,
validation_steps, x, y, sample_weights,
batch_size):
"""Prepares the validation dataset.
Args:
validation_data: The validation data (if provided)
validation_split: The validation split (if provided)
validation_steps: The validation steps (if provided)
x: The main training data x (if provided)
y: The main training data y (if provided)
sample_weights: The sample weights (if provided)
batch_size: The training batch size (if provided)
Returns:
A 3-tuple of (val_x, val_y, val_sample_weights).
Raises:
ValueError: If the provided arguments are not compatible with
`KerasTPUModel`.
"""
# Note: this is similar to a section of $tf/python/keras/engine/training.py
# It differns in that tf.data objects are not allowed to be passed directly.
# Additionally, it handles validating shapes & types appropriately for use
# in TPUs.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.DatasetV2)):
raise ValueError('KerasTPUModel cannot handle a Dataset or Iterator '
'for validation_data. Please instead pass a function '
'that returns a `tf.data.Dataset`.')
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('When passing a `validation_data` argument, it must '
'contain either 2 items (x_val, y_val), or 3 items '
'(x_val, y_val, val_sample_weights). However we '
'received `validation_data=%s`' % validation_data)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, you '
'cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
slice_arrays(sample_weights, 0, split_at),
slice_arrays(sample_weights, split_at)
)
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
return val_x, val_y, val_sample_weights
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
with _tpu_session_context():
return super(KerasTPUModel, self).predict(
x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
@property
def optimizer(self):
if self._tpu_model:
return self._tpu_model.optimizer
return self._cpu_model.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self._optimizer = optimizer
@property
def metrics(self):
if self._tpu_model:
return self._tpu_model.metrics
return self._stateful_metric_functions
@metrics.setter
def metrics(self, metrics):
self._stateful_metric_functions = metrics
def _make_train_function(self):
if not self.train_function:
self.train_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self.train_function
def _make_test_function(self):
if not self.test_function:
self.test_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self.test_function
def _make_fit_function(self):
if not self._fit_function:
self._fit_function = TPUFunction(
self,
model_fn_lib.ModeKeys.TRAIN,
tpu_assignment=self._tpu_assignment)
return self._fit_function
def _make_eval_function(self):
if not self._eval_function:
self._eval_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)
return self._eval_function
def _make_predict_function(self):
if not self.predict_function:
self.predict_function = TPUFunction(
self,
model_fn_lib.ModeKeys.PREDICT,
tpu_assignment=self._tpu_assignment)
return self.predict_function
def _initialize_weights(self, cloned_model):
"""Initialize TPU weights.
This is called on the first compile of the TPU model (first call to
fit/predict/evaluate).
Args:
cloned_model: `keras.Model`, TPU model to initialize.
"""
if self._tpu_weights_initialized:
return
self._tpu_model = cloned_model
self._tpu_weights_initialized = True
weights = self._cpu_model.get_weights()
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
cpu_optimizer_config = {}
else:
cpu_optimizer_config = self.cpu_optimizer.get_config()
logging.info('Setting weights on TPU model.')
cloned_model.set_weights(weights)
if self._tpu_model.optimizer is None:
# tpu_model may not be compiled, e.g., loading weights and then predict.
return
for k, v in six.iteritems(cpu_optimizer_config):
if k == 'name':
continue
opt_var = getattr(self._tpu_model.optimizer, k)
if isinstance(opt_var, variables.Variable):
logging.info('CPU -> TPU %s: %s {%s}', k, v, K.get_value(opt_var))
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
@property
def cpu_optimizer(self):
return self._cpu_model.optimizer
def sync_to_cpu(self):
"""Copy weights from the CPU, returning a synchronized CPU model."""
if not self._tpu_weights_initialized:
return self._cpu_model
logging.info('Copying TPU weights to the CPU')
tpu_weights = self._tpu_model.get_weights()
# TFOptimizers have no configurable options
if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):
tpu_optimizer_config = {}
else:
tpu_optimizer_config = self._tpu_model.optimizer.get_config()
self._cpu_model.set_weights(tpu_weights)
for k, v in six.iteritems(tpu_optimizer_config):
logging.info('TPU -> CPU %s: %s', k, v)
if k == 'name':
continue
opt_var = getattr(self.cpu_optimizer, k)
if isinstance(opt_var, variables.Variable):
K.get_session().run(opt_var.assign(v))
else:
logging.warning('Cannot update non-variable config: %s', k)
return self._cpu_model
def get_weights(self):
return self.sync_to_cpu().get_weights()
def save_weights(self, *args, **kw):
return self.sync_to_cpu().save_weights(*args, **kw)
def save(self, *args, **kw):
return self.sync_to_cpu().save(*args, **kw)
def set_weights(self, weights):
# We may not have a TPU model available if we haven't run fit/predict, so
# we can't directly set the TPU weights here.
# Instead, reset CPU model weights and force TPU re-initialization at the
# next call.
self._cpu_model.set_weights(weights)
self._tpu_weights_initialized = False
def load_weights(self, filepath, by_name=False):
self._cpu_model.load_weights(filepath, by_name)
self._tpu_weights_initialized = False
# pylint: disable=bad-continuation
def _validate_shapes(model):
"""Validate that all layers in `model` have constant shape."""
for layer in model.layers:
if isinstance(layer.input_shape, tuple):
input_shapes = [layer.input_shape]
else:
input_shapes = layer.input_shape
if isinstance(layer.output_shape, tuple):
output_shapes = [layer.output_shape]
else:
output_shapes = layer.output_shape
for shape in input_shapes + output_shapes:
for dim in shape[1:]:
if dim is None:
raise ValueError(
"""
Layer %(layer)s has a variable shape in a non-batch dimension. TPU models must
have constant shapes for all operations.
You may have to specify `input_length` for RNN/TimeDistributed layers.
Layer: %(layer)s
Input shape: %(input_shape)s
Output shape: %(output_shape)s
""" % {
'layer': layer,
'input_shape': layer.input_shape,
'output_shape': layer.output_shape
})
# pylint: enable=bad-continuation
@deprecated(
'2019-02-20', 'Switch to tf.contrib.distribute.TPUStrategy. '
'https://www.tensorflow.org/api_docs/python/tf/contrib/distribute/DistributionStrategy'
)
def tpu_model(model, strategy=None):
"""Copy `model` along with weights to the TPU.
Returns a TPU model.
Usage:
```
a = Input(shape=(32,))
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
# If `num_cores_per_host` is greater than one, batch parallelism will be used
# to run on multiple TPU cores.
strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
model = keras_support.tpu_model(model, strategy)
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
...)
```
Args:
model: A `tf.keras.Model` instance.
strategy: `TPUDistributionStrategy`. The strategy to use for replicating
model across multiple TPU cores.
Returns:
A new `KerasTPUModel` instance.
"""
_validate_shapes(model)
# TODO(xiejw): Validate TPU model. TPUModel only?
# TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?
# TODO(xiejw): Adds reduction option.
if strategy is None:
strategy = TPUDistributionStrategy()
else:
if not isinstance(strategy, TPUDistributionStrategy):
raise TypeError(
'`strategy` must have type `tf.contrib.tpu.TPUDistributionStrategy`. '
'Got: {}'.format(type(strategy)))
# If the model has already been initialized, grab the optimizer configuration
# and model weights before entering the TPU session.
if model.optimizer:
if (isinstance(model.optimizer, keras_optimizers.Optimizer) and not
isinstance(model.optimizer, keras_optimizers.TFOptimizer)):
optimizer_config = model.optimizer.get_config()
else:
optimizer_config = None
model_weights = model.get_weights()
else:
model_weights = None
setup_tpu_session(strategy._tpu_cluster_resolver)
# Force initialization of the CPU model in the TPU session.
cpu_model = models.clone_model(model)
if model.optimizer:
cpu_model.compile(
_clone_optimizer(model.optimizer, optimizer_config),
model.loss,
metrics_module.clone_metrics(model._compile_metrics),
model.loss_weights,
model.sample_weight_mode,
metrics_module.clone_metrics(model._compile_weighted_metrics),
)
if model_weights:
cpu_model.set_weights(model_weights)
cpu_model.reset_states()
return KerasTPUModel(cpu_model=cpu_model, strategy=strategy)
| apache-2.0 |
zizouvb/deeplearning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/boosted_trees/examples/mnist.py | 61 | 5840 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a MNIST dataset. We are using layer by layer boosting with diagonal hessian
strategy for multiclass handling, and cross entropy loss.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/mnist.py \
--output_dir="/tmp/mnist" --depth=4 --learning_rate=0.3 --batch_size=60000 \
--examples_per_layer=60000 --eval_batch_size=10000 --num_eval_steps=1 \
--num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(dataset_split,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[dataset_split.images,
dataset_split.labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
num_classes = 10
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.num_classes = num_classes
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=num_classes,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
pansapiens/mytardis | tardis/tardis_portal/south_migrations/0020_remove_old_datafile_fields.py | 3 | 21156 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Dataset_File.stay_remote'
db.delete_column('tardis_portal_dataset_file', 'stay_remote')
# Deleting field 'Dataset_File.protocol'
db.delete_column('tardis_portal_dataset_file', 'protocol')
# Deleting field 'Dataset_File.verified'
db.delete_column('tardis_portal_dataset_file', 'verified')
# Deleting field 'Dataset_File.url'
db.delete_column('tardis_portal_dataset_file', 'url')
def backwards(self, orm):
# Adding field 'Dataset_File.stay_remote'
db.add_column('tardis_portal_dataset_file', 'stay_remote',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Dataset_File.protocol'
db.add_column('tardis_portal_dataset_file', 'protocol',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'Dataset_File.verified'
db.add_column('tardis_portal_dataset_file', 'verified',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Dataset_File.url'
raise RuntimeError("Cannot reverse this migration. 'Dataset_File.url' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'dataset_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.dataset': {
'Meta': {'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.dataset_file': {
'Meta': {'object_name': 'Dataset_File'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sha512sum': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'The University of Queensland'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentacl': {
'Meta': {'ordering': "['experiment__id']", 'object_name': 'ExperimentACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.location': {
'Meta': {'object_name': 'Location'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'transfer_provider': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.providerparameter': {
'Meta': {'unique_together': "(('location', 'name'),)", 'object_name': 'ProviderParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'tardis_portal.replica': {
'Meta': {'unique_together': "(('datafile', 'location'),)", 'object_name': 'Replica'},
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'stay_remote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 7, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
| bsd-3-clause |
openaid-IATI/OIPA | OIPA/OIPA/settings.py | 1 | 13127 | # Django settings for OIPA project.
import os
import sys
from ast import literal_eval
from os import environ as env
from celery.schedules import crontab
# from tzlocal import get_localzone
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DEBUG = literal_eval(env.get('OIPA_DEBUG', 'True'))
FTS_ENABLED = literal_eval(env.get('OIPA_FTS_ENABLED', 'True'))
LOGIN_REDIRECT_URL = '/admin/'
LOGOUT_URL = '/logout'
# LOGOUT_REDIRECT_URL = '/admin/logout'
DATA_UPLOAD_MAX_NUMBER_FIELDS = 3000
SECRET_KEY = env.get('OIPA_SECRET_KEY', 'PXwlMOpfNJTgIdQeH5zk39jKfUMZPOUK')
DATABASES = {
'default': {
'ENGINE': env.get(
'OIPA_DB_ENGINE', 'django.contrib.gis.db.backends.postgis'
),
'HOST': os.getenv('OIPA_DB_HOST', 'localhost'),
'PORT': os.getenv('OIPA_DB_PORT', 5432),
'NAME': os.getenv('OIPA_DB_NAME', 'oipa'),
'USER': os.getenv('OIPA_DB_USER', 'oipa'),
'PASSWORD': os.getenv('OIPA_DB_PASSWORD', 'oipa'),
'CONN_MAX_AGE': int(os.getenv('OIPA_DB_CONN_MAX_AGE', 500))
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (os.path.join(BASE_DIR, 'templates'),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
# 'loaders': [
# ('django.template.loaders.cached.Loader', [
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# ]),
# ],
},
},
]
def rel(*x):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
sys.path.insert(0, rel('..', 'lib'))
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.get('OIPA_ALLOWED_HOSTS', '*').split()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
# Celery is needed UTC
# TIME_ZONE = get_localzone().zone
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
APPEND_SLASH = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# URL for static files
STATIC_URL = '/static/'
STATIC_ROOT = os.environ.get(
'OIPA_STATIC_ROOT',
os.path.join(
os.path.dirname(BASE_DIR),
'public/static'))
MEDIA_URL = '/media/'
MEDIA_ROOT = os.environ.get(
'OIPA_MEDIA_ROOT',
os.path.join(
os.path.dirname(BASE_DIR),
'public/media'))
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'OIPA.wsgi.application'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'api.middleware.FileExportMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django_otp.middleware.OTPMiddleware',
]
ROOT_URLCONF = 'OIPA.urls'
INSTALLED_APPS = [
# 'two-factor
'django_otp',
'django_otp.plugins.otp_static',
'django_otp.plugins.otp_totp',
# 'two_factor',
# 'otp_yubikey',
# 'django_rq',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'grappelli',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
'corsheaders',
'common',
'iati.apps.IatiConfig',
'iati_organisation.apps.IatiOrganisationConfig',
'iati_synchroniser.apps.IatiSynchroniserConfig',
'geodata.apps.GeodataConfig',
'currency_convert.apps.CurrencyConvertConfig',
'traceability.apps.TraceabilityConfig',
'api',
'task_queue',
'djsupervisor',
'rest_framework',
'rest_framework_csv',
'django_extensions',
'iati_vocabulary.apps.IatiVocabularyConfig',
'iati_codelists.apps.IatiCodelistsConfig',
'test_without_migrations',
'rest_framework.authtoken',
'iati.permissions',
'rest_auth',
'rest_auth.registration',
'django_filters',
'markdownify',
'solr',
'django_celery_beat',
'django_celery_results'
]
RQ_SHOW_ADMIN_LINK = True
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'api.pagination.CustomPagination',
'PAGE_SIZE': 10,
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'api.renderers.PaginatedCSVRenderer',
'api.renderers.XlsRenderer',
'api.renderers.IATIXMLRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
RQ_REDIS_URL = env.get('OIPA_RQ_REDIS_URL', 'redis://localhost:6379/0')
RQ_QUEUES = {
'default': {
'URL': RQ_REDIS_URL,
'DEFAULT_TIMEOUT': 10800, # 3 hours
},
'parser': {
'URL': RQ_REDIS_URL,
'DEFAULT_TIMEOUT': 5400,
},
'export': {
'URL': RQ_REDIS_URL,
'DEFAULT_TIMEOUT': 5400,
},
'document_collector': {
'URL': RQ_REDIS_URL,
'DEFAULT_TIMEOUT': 5400,
},
'solr': {
'URL': RQ_REDIS_URL,
'DEFAULT_TIMEOUT': 10800,
}
}
# TWO_FACTOR_FORCE_OTP_ADMIN = True
# LOGIN_URL = 'two_factor:login'
# LOGIN_REDIRECT_URL = '/admin' # Redirect admin dashboard
GRAPPELLI_ADMIN_TITLE = 'OIPA admin'
ADMINFILES_UPLOAD_TO = 'csv_files'
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/.*$'
CORS_ALLOW_METHODS = ('GET',)
IATI_PARSER_DISABLED = False
CONVERT_CURRENCIES = True
ROOT_ORGANISATIONS = []
ERROR_LOGS_ENABLED = literal_eval(env.get('OIPA_ERROR_LOGS_ENABLED', 'True'))
DEFAULT_LANG = 'en'
# django-all-auth
ACCOUNT_EMAIL_VERIFICATION = 'none'
# django-rest-auth
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'api.permissions.serializers.UserSerializer',
}
REST_AUTH_REGISTER_SERIALIZERS = {
'REGISTER_SERIALIZER': 'api.permissions.serializers.RegistrationSerializer'
}
# EXPORT_COMMENT = 'Published with tools developed by Zimmerman & Zimmerman'
FIXTURE_DIRS = (
os.path.join(BASE_DIR, '../fixtures/'),
)
CKAN_URL = env.get('OIPA_CKAN_URL', 'https://iati-staging.ckan.io')
API_CACHE_SECONDS = int(env.get('OIPA_API_CACHE_SECONDS', 0))
CACHES = {
'default': {
'BACKEND': env.get(
'OIPA_CACHES_DEFAULT_BACKEND', 'redis_cache.RedisCache'
),
'LOCATION': env.get('OIPA_CACHES_DEFAULT_LOCATION', 'localhost:6379'),
},
'api': {
'BACKEND': env.get(
'OIPA_CACHES_DEFAULT_BACKEND', 'redis_cache.RedisCache'
),
'LOCATION': env.get('OIPA_CACHES_DEFAULT_LOCATION', 'localhost:6379'),
}
}
OIPA_LOG_LEVEL = env.get('OIPA_LOG_LEVEL', 'ERROR')
# These settings are overriden in development_settings and
# produduction_settings modules:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# Useful for local development:
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
# All other errors:
'': {
'handlers': ['console'],
'level': OIPA_LOG_LEVEL,
'propagate': False,
},
# IATI Parser related errors:
'iati.parser': {
'handlers': ['console'],
'level': OIPA_LOG_LEVEL,
'propagate': False,
},
# Django-related errors:
'django': {
'handlers': ['console'],
'level': OIPA_LOG_LEVEL,
'propagate': False,
},
},
}
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_USE_CACHE': 'api',
# reset cache every x seconds:
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 1 * 60 * 60 * 24 * 7, # 1 week
}
# DATA PLUGINS is a dict with data which is not related to the IATI data.
# For example, for M49 Regions import, add such code block it in the
# local_settings.py:
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DATA_PLUGINS = {
'codelist': {
'm49_region_file': '{base_dir}/plugins/data/{filename}'.format(
base_dir=BASE_DIR, filename='regions.json')
}
}
# DATA_PLUGINS = {}
# A setting indicating whether to save XML datasets (files) to local machine or
# not:
DOWNLOAD_DATASETS = False
# CELERY CONFIG
CELERY_ACKS_LATE = True
CELERY_WORKER_PREFETCH_MULTIPLIER = 1 # limiting the number of reserved tasks.
CELERY_TIMEZONE = 'UTC'
CELERY_ENABLE_UTC = True
CELERY_TASK_ROUTES = {'task_queue.tasks.revoke_all_tasks': {'queue':
'revoke_queue'},
'task_queue.tasks.continuous_parse_all_existing_sources_task': {'queue': 'revoke_queue'}} # NOQA: E501
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_RESULT_BACKEND = 'django-db' # 'rpc://localhost'
# 'db+postgresql://oipa:oipa@localhost/oipa'
CELERY_ALWAYS_EAGER = True
CELERY_BROKER_POOL_LIMIT = None
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IMPORTS = 'iati.PostmanJsonImport.tasks'
CELERY_BEAT_SCHEDULE = {
'getting_postman-api': {
'task': 'iati.PostmanJsonImport.tasks.get_postman_api',
'schedule': crontab(minute=0, hour=0),
},
'Update the exchange rates': {
'task': 'task_queue.tasks.update_exchange_rates',
'schedule': crontab(minute=0, hour=0),
},
}
SOLR = {
'indexing': False,
'url': 'http://localhost:8983/solr',
'cores': {
'activity': 'activity',
'activity-sector': 'activity-sector',
'budget': 'budget',
'codelist': {
'country': 'codelist-country',
'region': 'codelist-region'
},
'dataset': 'dataset',
'datasetnote': 'datasetnote',
'organisation': 'organisation',
'publisher': 'publisher',
'result': 'result',
'transaction': 'transaction',
'transaction-sector': 'transaction-sector'
}
}
VALIDATION = {
'host': 'http://iativalidator.iatistandard.org/',
'api': {
'root': 'api',
'version': '/v1',
'urls': {
'post_file': '/iati-testfiles/file/source',
'start_validation': '/iati-testdatasets/{validation_id}',
'get_json_file': '/iati-files/file/json/{json_file}',
'get_json_file_ad_hoc': '/iati-testfiles/file/json/{json_file}',
},
'max_loop_process': 50,
'sleep_second_process': 5,
'valid_status': 'success',
'retry': {
'max_retries': 5,
}
}
}
# To be overwritten by local_settings.py in any case for security purposes.
POSTMAN_API_KEY = 'OverwriteFromLocalSettings'
try:
from .local_settings import * # noqa: F401, F403
except ImportError:
pass
| agpl-3.0 |
dvorka/endurance-training-log | src/import_concept2_to_etl_csv.py | 1 | 5751 | #!/usr/bin/env python
#
# Endurance Training Log
#
# Copyright (C) 2020 Martin Dvorak <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import datatable as dt
from src.import_strava_torben_to_etl_csv import EtlDataset
C2_YEARS = [2009, 2010, 2011, 2012, 2013, 2015, 2016, 2017, 2019, 2020]
FILE_SRC_CSV = (
"/home/dvorka/p/endurance-training-log/github/endurance-training-log/datasets/"
"concept2.com/concept2-season-"
)
FILE_DST_CSV_FILE = (
"/home/dvorka/p/endurance-training-log/github/endurance-training-log/test/datasets/"
"concept2-training-log-import-"
)
# TODO fix rank
# TODO quote description to avoid problems
class Concept2Dataset:
"""concept2.com dataset exported from training log web.
Columns description:
"ID",
[source]
concept2:<ID>
"Date",
[year, month, day, when]
"Description",
[description]
"Work Time (Formatted)",
"Work Time (Seconds)",
[time]
int(1194.3)
"Rest Time (Formatted)",
"Rest Time (Seconds)",
"Work Distance",
[distance]
"Rest Distance",
"Stroke Rate/Cadence",
[description]
if not None then "@24"
"Stroke Count",
"Pace",
[description]
if not None then "1:59"/500m
"Avg Watts",
"Cal/Hour",
"Total Cal",
[kcal]
"Avg Heart Rate",
"Drag Factor",
[description]
if not None then DF"122"
"Age",
"Weight",
"Type",
"Ranked",
[intensity]
if not None then "rank"
"Comments"
[description]
if not None then ("...")
"""
URL_CONCEPT2_ACTIVITY = "https://log.concept2.com/profile/737678/log/"
def __init__(self, dataset_path: str):
self.dataset_path = dataset_path
self.frame: dt.Frame = dt.fread(dataset_path)
def __str__(self) -> str:
result: str = f"Dataset path: {self.dataset_path}\n"
result = f"{result}Columns:\n"
for name in self.frame.names:
result = f"{result} {name}\n"
return result
def to_etl_dataset(self) -> dt.Frame:
etl_frame: dt.Frame = EtlDataset.get_empty_frame()
for row in range(self.frame.shape[0]):
print(f"{row}: {self.frame[row,'ID']}")
new_row: dict = EtlDataset.get_empty_frame_dict()
# "2020-03-04 11:34:00"
date_time_obj = datetime.datetime.strptime(
self.frame[row, "Date"], "%Y-%m-%d %H:%M:%S"
)
new_row["year"] = [date_time_obj.year]
new_row["month"] = [date_time_obj.month]
new_row["day"] = [date_time_obj.day]
new_row["when"] = [
f"{date_time_obj.hour:02}:{date_time_obj.minute:02}"
f":{date_time_obj.second:02}"
]
new_row["activity"] = ["rowing"]
description: str = ""
cadence = self.frame[row, "Stroke Rate/Cadence"]
description = f"{description} @{cadence}" if cadence else f"{description}"
pace = self.frame[row, "Pace"]
description = f"{description} {pace}/500m" if pace else f"{description}"
drag = self.frame[row, "Drag Factor"]
description = f"{description} DF{drag}" if drag else f"{description}"
comment = self.frame[row, "Comments"]
description = f"{description} ({comment})" if comment else f"{description}"
new_row["description"] = [description]
new_row["distance_meters"] = [int(self.frame[row, "Work Distance"])]
new_row["time_seconds"] = [int(self.frame[row, "Work Time (Seconds)"])]
new_row["total_distance_meters"] = new_row["distance_meters"]
new_row["total_time_seconds"] = new_row["time_seconds"]
speed: float = float(new_row["distance_meters"][0]) / float(
new_row["time_seconds"][0]
) * 3.6
new_row["avg_speed"] = [speed]
new_row["max_speed"] = new_row["avg_speed"]
new_row["elevation_gain"] = [0]
avg_watts = self.frame[row, "Avg Watts"]
new_row["avg_watts"] = [int(avg_watts) if avg_watts else 0]
new_row["kcal"] = [self.frame[row, "Total Cal"]]
new_row["commute"] = [False]
intensity = self.frame[row, "Ranked"]
new_row["intensity"] = ["rank" if intensity else "fartlek"]
new_row["gear"] = ["my_concept2_e"]
new_row["url"] = [
f"{Concept2Dataset.URL_CONCEPT2_ACTIVITY}{self.frame[row, 'ID']}"
]
new_row["source"] = [f"concept2:{self.frame[row, 'ID']}"]
etl_frame.rbind(dt.Frame(new_row))
print(f"Imported frame:\n{etl_frame}")
return etl_frame
#
# main
#
if __name__ == "__main__":
for year in C2_YEARS:
concept2_dataset = Concept2Dataset(
f"{FILE_SRC_CSV}{year}.csv"
)
print(concept2_dataset)
etl_frame = concept2_dataset.to_etl_dataset()
etl_frame.to_csv(
f"{FILE_DST_CSV_FILE}{year}.csv"
)
| apache-2.0 |
yonglehou/scikit-learn | sklearn/__check_build/__init__.py | 342 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/ODDSTKernel_example_calculate_matrix.py | 1 | 1663 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from skgraph.kernel.ODDSTGraphKernel import ODDSTGraphKernel
from skgraph.datasets import load_graph_datasets
if __name__=='__main__':
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py filename")
max_radius=3
la=1
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[1])
g_it=load_graph_datasets.load_graphs_bursi()
ODDkernel=ODDSTGraphKernel(r=max_radius,l=la)
GM=ODDkernel.computeKernelMatrixTrain([g_it.graphs[i] for i in range(21)]) #Parallel ,njobs
GMsvm=[]
for i in range(len(GM)):
GMsvm.append([])
GMsvm[i]=[i+1]
GMsvm[i].extend(GM[i])
from sklearn import datasets
print "Saving Gram matrix"
#datasets.dump_svmlight_file(GMsvm,g_it.target, name+".svmlight")
datasets.dump_svmlight_file(GMsvm,[g_it.target[i] for i in range(21)], name+".svmlight")
#print GM | gpl-3.0 |
nickgentoo/scikit-learn-graph | scripts/cross_validation_ICML16_norm_10fold.py | 1 | 5398 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '',''))
import numpy as np
#from skgraph import datasets
from sklearn import svm
#from skgraph.ioskgraph import *
from math import sqrt
from copy import copy
import sys
#"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')"
if len(sys.argv)<4:
sys.exit("python cross_validation_from_matrix_norm.py inputMatrix.libsvm C outfile")
c=float(sys.argv[2])
##TODO read from libsvm format
from sklearn.datasets import load_svmlight_file
#TODO metodo + veloce per caricar ele amtrici (anche per fare dump)
#from svmlight_loader import load_svmlight_file # riga 22 non serve
km, target_array = load_svmlight_file(sys.argv[1])
#print type(target_array)
#print target_array
#Controlla se target array ha +1 e -1! se ha 0, sostituisco gli 0 ai -1
if not -1 in target_array:
print "WARNING: no -1 in target array! Changing 0s to -1s"
target_array = np.array([-1 if x == 0 else x for x in target_array])
#print km
#tolgo indice
##############kmgood=km[:,1:].todense()
gram=km[:,1:].todense()
kmgood=copy(gram)
#NORMALIZATION
for i in xrange(len(target_array)):
for j in xrange(0,len(target_array)):
#print i,j,kmgood[i,j],kmgood[i,i],kmgood[j,j]
if kmgood[i,i]*kmgood[j,j]==0:
print "WARNING: avoided divizion by zero"
gram[i,j]=0
else:
gram[i,j]=kmgood[i,j]/sqrt(kmgood[i,i]*kmgood[j,j])
#-----------------------------------
print "matrix normalization completed"
#from sklearn.metrics import make_scorer
# (16) in the paper
def my_custom_loss_func(ground_truth, predictions):
total_loss=0.0
for gt,p in zip(ground_truth, predictions):
#print gt, p
diff = (1.0 - (gt * p)) / 2.0
if diff<0:
diff=0.0
if diff > 1.0:
diff=1.0
total_loss+=diff
return total_loss / len(predictions)
from sklearn import cross_validation
import time
start = time.time()
for rs in range(42,43):
#for rs in range(42,53):
f=open(str(sys.argv[3]+".seed"+str(rs)+".c"+str(c)),'w')
kf = cross_validation.StratifiedKFold(target_array, n_folds=10, shuffle=True,random_state=rs)
#print kf
#remove column zero because
#first entry of each line is the index
#gram=km[:,1:].todense()
f.write("Total examples "+str(len(gram))+"\n")
f.write("|W| \t train_loss \t test_loss\n")
#print gram
# normalization
from math import sqrt
#for i in range(len(gram)):
# for j in range(len(gram)):
# gram[i,j]=gram[i,j]/sqrt(gram[i,i]+gram[j,j])
sc=[]
for train_index, test_index in kf:
#print("TRAIN:", train_index, "TEST:", test_index)
#generated train and test lists, incuding indices of the examples in training/test
#for the specific fold. Indices starts from 0 now
clf = svm.SVC(C=c, kernel='precomputed')
train_gram = [] #[[] for x in xrange(0,len(train))]
test_gram = []# [[] for x in xrange(0,len(test))]
#compute training and test sub-matrices
index=-1
for row in gram:
index+=1
if index in train_index:
train_gram.append([gram[index,i] for i in train_index])
else:
test_gram.append([gram[index,i] for i in train_index])
#print gram
X_train, X_test, y_train, y_test = np.array(train_gram), np.array(test_gram), target_array[train_index], target_array[test_index]
clf.fit(X_train, y_train)
#print |W|^2= alpha Q alpha, where Q_ij= y_i y_j K(x_i,x_j)
alpha = clf.dual_coef_
yw=target_array[clf.support_]
Kw=gram[clf.support_,:][:,clf.support_]
#print yw.shape, Kw.shape, gram.shape
yw.shape=(yw.shape[0],1)
YM=np.ones(yw.shape[0])*yw.T
Q= np.multiply(np.multiply(YM,Kw),YM.T)
#print Q.shape
#print alpha.shape
#alpha.shape=(alpha.shape[1],1)
W2=alpha*Q*alpha.T
print "|W|" , sqrt(W2),
f.write(str(sqrt(W2))+"\t")
#loss = make_scorer(my_custom_loss_func, greater_is_better=False)
from sklearn.metrics import accuracy_score
#predictions on training set
y_train_predicted=clf.decision_function(X_train)
#print type( my_custom_loss_func(y_train, y_train_predicted))
print " training loss ",(str( my_custom_loss_func(y_train, y_train_predicted))),
f.write(str(my_custom_loss_func(y_train, y_train_predicted))+"\t")
# predict on test examples
y_test_predicted=clf.decision_function(X_test)
#print y_test.shape, y_test_predicted.shape
print " test loss ",(str( my_custom_loss_func(y_test, y_test_predicted)))
y_test_predicted_binary=clf.predict(X_test)
#print y_test
#print y_test_predicted_binary
#print "Accuracy: ", accuracy_score(y_test, y_test_predicted_binary)
#y_test_sign=map(np.sign, y_test_predicted)
#print "Accuracy_decision: ", accuracy_score(y_test, y_test_sign)
sc.append(my_custom_loss_func(y_test, y_test_predicted))
f.write(str( my_custom_loss_func(y_test, y_test_predicted))+"\n")
f.close()
end = time.time()
print "Total time:", end-start
scores=np.array(sc)
print "Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() / 2)
| gpl-3.0 |
rui-castro/Sick-Beard | lib/guessit/transfo/split_path_components.py | 18 | 1697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit import fileutils
from os.path import splitext
class SplitPathComponents(Transformer):
def __init__(self):
Transformer.__init__(self, 255)
def process(self, mtree, options=None):
"""first split our path into dirs + basename + ext
:return: the filename split into [ dir*, basename, ext ]
"""
if not options.get('name_only'):
components = fileutils.split_path(mtree.value)
basename = components.pop(-1)
components += list(splitext(basename))
components[-1] = components[-1][1:] # remove the '.' from the extension
mtree.split_on_components(components)
else:
mtree.split_on_components([mtree.value, ''])
| gpl-3.0 |
rishizsinha/project-beta | code/scenes_pred.py | 4 | 7072 |
""" The following script will analyze the scenes data. Specifically, it will:
* Try to find patterns between neural responses and scenes
* Use SVM and KNN to link these together
* Predict scenes based on BOLD activity
"""
#Import Standard Libraries
from __future__ import print_function, division
import numpy as np
import pandas as pd
import nibabel as nib
import matplotlib.pyplot as plt
import itertools
from pylab import *
#Local Modules
import utils.data_loading as dl
import utils.save_files as sv
import utils.scenes as sn
#Clustering Libraries
from sklearn import preprocessing as pp
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
#Load in filtered data and normalize
masked_path = "../data/filtered_data.npy"
combined_runs = pp.normalize(np.transpose(np.load("../data/filtered_data.npy")))
#Too many predictors (55k) - filter to around 1500 predictors
xvar = np.var(combined_runs, axis=0)
varmask = np.where(xvar > .0000000015)[0]
combined_runs = combined_runs.T[varmask] #1584 voxels
#Load in scenes data
scenes_path = '../data/scene_times_nums.csv'
scenes = pd.read_csv(scenes_path, header = None)
scenes = scenes.values #Now just a numpy array
TR = 2
NUM_VOLUMES = combined_runs.shape[-1] #3543
ONSET_TIMES = scenes[:,0]
ONSET_TIMES_NORMED = ONSET_TIMES - 17 #First recorded scene occurs at t = 17 sec
DURATION = scenes[:,1]
LABELS = scenes[:,3]
SCAN_TIMES = np.arange(start=0, stop=2*NUM_VOLUMES, step=2)
#Creates a list that tells us scene id at given scan time
factor_grid = []
for scan_time in SCAN_TIMES:
index_list = np.where(ONSET_TIMES_NORMED < scan_time)[0]
if scan_time == 0:
label_index = 0
else:
label_index = index_list[-1]
factor_id = LABELS[label_index]
factor_grid.append(factor_id)
factor_grid = np.array(factor_grid) #Convert to np array for future analysis
#Grouped Factors Ids
GUMP_SCENES_IDS = [38, 40, 41, 42] #factor ids of Gump scenes
MILITARY_IDS = [52, 62, 77, 78, 80, 81, 82, 83]
SCHOOL = [22,43, 67, 61, 69]
SAVANNA = [66]
POLITICAL = [86, 85, 2, 87, 84]
OUTSIDE = [27, 73, 58, 53, 59]
CHURCH = [20]
DEATH = [16, 48]
############ SVM and KNN Analysis #################################
#Comparison between Military and Gump Scenes
#Set up training and testing samples and data
all_ids_1 = GUMP_SCENES_IDS + MILITARY_IDS
sample1, missing_facts1 = sn.gen_sample_by_factors(all_ids_1, factor_grid, True, prop=.9)
train_samp1 = sn.get_training_samples(sample1)
test_samp1 = sn.get_tst_samples(sample1)
train1_labs, train1_times = sn.make_label_by_time(train_samp1)
test1_labs, test1_times = sn.make_label_by_time(test_samp1)
on_off1_train = sn.on_off_course(GUMP_SCENES_IDS, train1_labs)
on_off1_test = sn.on_off_course(GUMP_SCENES_IDS, test1_labs)
subarr1_train = combined_runs[:,train1_times].T #rows correspond to images, colums to voxels
subarr1_test = combined_runs[:,test1_times].T #data we feed into our classifier
clf = svm.SVC(C=100, kernel='linear') #Paramters obtained through cross-validation
clf.fit(subarr1_train, on_off1_train)
pred_svm1 = clf.predict(subarr1_test)
accuracy_score(on_off1_test, pred_svm1) #52%
knn = KNeighborsClassifier()
knn.fit(subarr1_train, on_off1_train)
pred_knn1 = knn.predict(subarr1_test)
accuracy_score(on_off1_test, pred_knn1) #69%
#Compare more scenes
all_ids_2 = GUMP_SCENES_IDS + SCHOOL + MILITARY_IDS + SAVANNA + POLITICAL + OUTSIDE + DEATH + CHURCH
sample2, missing_facts2 = sn.gen_sample_by_factors(all_ids_2, factor_grid, True, prop=.9)
train_samp2 = sn.get_training_samples(sample2)
test_samp2 = sn.get_tst_samples(sample2)
train2_labs, train2_times = sn.make_label_by_time(train_samp2)
test2_labs, test2_times = sn.make_label_by_time(test_samp2)
#Set up ids for each category
labels2_train = []
for val in train2_labs:
if val in GUMP_SCENES_IDS:
labels2_train.append(0)
elif val in SCHOOL:
labels2_train.append(1)
elif val in MILITARY_IDS:
labels2_train.append(2)
elif val in SAVANNA:
labels2_train.append(3)
elif val in POLITICAL:
labels2_train.append(4)
elif val in OUTSIDE:
labels2_train.append(5)
elif val in DEATH:
labels2_train.append(6)
else:
labels2_train.append(7)
labels2_train = np.array(labels2_train)
labels2_test = []
for val in test2_labs:
if val in GUMP_SCENES_IDS:
labels2_test.append(0)
elif val in SCHOOL:
labels2_test.append(1)
elif val in MILITARY_IDS:
labels2_test.append(2)
elif val in SAVANNA:
labels2_test.append(3)
elif val in POLITICAL:
labels2_test.append(4)
elif val in OUTSIDE:
labels2_test.append(5)
elif val in DEATH:
labels2_test.append(6)
else:
labels2_test.append(7)
labels2_test = np.array(labels2_test)
subarr2_train = combined_runs[:,train2_times].T
subarr2_test = combined_runs[:,test2_times].T
clf = svm.SVC(C=100, kernel='linear') #Paramters obtained through cross-validation
clf.fit(subarr2_train, labels2_train)
pred_svm2 = clf.predict(subarr2_test)
accuracy_score(labels2_test, pred_svm2) #27.7%
knn = KNeighborsClassifier()
knn.fit(subarr2_train, labels2_train)
pred_knn2 = knn.predict(subarr2_test)
accuracy_score(labels2_test, pred_knn2) #34%
#Knn looks better - let's see how it performs by cateogry
#Check performance over the 6 categories
gump_indcs = np.where(labels2_test == 0)[0]
school_inds = np.where(labels2_test == 1)[0]
milit_incs = np.where(labels2_test == 2)[0]
savan_indcs = np.where(labels2_test == 3)[0]
political_indcs = np.where(labels2_test == 4)[0]
outside_indcs = np.where(labels2_test == 5)[0]
death_indcs = np.where(labels2_test == 6)[0]
church_inds = np.where(labels2_test == 7)[0]
by_cat = [gump_indcs, school_inds, milit_incs, savan_indcs, political_indcs,
outside_indcs, death_indcs, church_inds]
perform_by_cat = []
actual_count = []
pred_count = []
for scence_ind in by_cat:
acc = accuracy_score(labels2_test[scence_ind], pred_knn2[scence_ind])
weight = scence_ind.shape[0]
perform_by_cat.append(acc)
actual_count.append(weight)
#Plot this
actual_count = np.array(actual_count)
relative_weights = actual_count / sum(actual_count)
#create labels for pie chart
categories = ['gump', 'school', 'military', 'savanna', 'political',
'outside', 'death', 'church']
categories_per = []
for index, name in enumerate(categories):
name2 = name + ': ' + '' + str(round(perform_by_cat[index], 3) * 100) + '%'
categories_per.append(name2)
fig = plt.figure()
ax = fig.gca()
ax.pie(relative_weights, labels=categories_per,autopct='%1.1f%%')
plt.title('Category Weight and Performance by Category')
plt.savefig('../figure/scenes_pie_chart.png')
plt.close()
| bsd-3-clause |
ResidentMario/geoplot | examples/plot_obesity.py | 1 | 1380 | """
Cartogram of US states by obesity rate
======================================
This example ``cartogram`` showcases regional trends for obesity in the United States. Rugged
mountain states are the healthiest; the deep South, the unhealthiest.
This example inspired by the `"Non-Contiguous Cartogram" <https://bl.ocks.org/mbostock/4055908>`_
example in the D3.JS example gallery.
"""
import pandas as pd
import geopandas as gpd
import geoplot as gplt
import geoplot.crs as gcrs
import matplotlib.pyplot as plt
import mapclassify as mc
# load the data
obesity_by_state = pd.read_csv(gplt.datasets.get_path('obesity_by_state'), sep='\t')
contiguous_usa = gpd.read_file(gplt.datasets.get_path('contiguous_usa'))
contiguous_usa['Obesity Rate'] = contiguous_usa['state'].map(
lambda state: obesity_by_state.query("State == @state").iloc[0]['Percent']
)
scheme = mc.Quantiles(contiguous_usa['Obesity Rate'], k=5)
ax = gplt.cartogram(
contiguous_usa,
scale='Obesity Rate', limits=(0.75, 1),
projection=gcrs.AlbersEqualArea(central_longitude=-98, central_latitude=39.5),
hue='Obesity Rate', cmap='Reds', scheme=scheme,
linewidth=0.5,
legend=True, legend_kwargs={'loc': 'lower right'}, legend_var='hue',
figsize=(12, 7)
)
gplt.polyplot(contiguous_usa, facecolor='lightgray', edgecolor='None', ax=ax)
plt.title("Adult Obesity Rate by State, 2013")
| mit |
longubu/datumio | examples/keras/__init__.py | 1 | 1674 | # COPYRIGHT
# ---------
# All contributions by Long Van Ho:
# Copyright (c) 2015 Long Van Ho
# All rights reserved.
#
# All contributions by Sander Dielman:
# Copyright (c) 2015 Sander Dieleman
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2015, the respective contributors.
# All rights reserved.
#
# LICENSE
# ---------
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN
# ==============================================================================
"""Imports mnist tutorial libraries used by tutorial examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.datasets import cifar10
from keras.datasets import mnist
| mit |
matthias-k/pysaliency | pysaliency/external_datasets/figrim.py | 1 | 7300 | from __future__ import absolute_import, print_function, division
import zipfile
import os
import glob
import numpy as np
from scipy.io import loadmat
from natsort import natsorted
from boltons.fileutils import mkdir_p
from ..datasets import FixationTrains
from ..utils import (
TemporaryDirectory,
download_and_check,
atomic_directory_setup,
)
from .utils import create_stimuli, _load
def _load_FIGRIM_data(filename, stimuli_indices, stimulus_type):
data = loadmat(filename)['allImages'].flatten()
xs = []
ys = []
ts = []
ns = []
train_subjects = []
which_times = []
which_time_names = ['enc', 'rec', 'rec2']
stimulus_types = []
responses = []
for stimulus_data in data:
n = stimuli_indices[stimulus_data['filename'][0]]
# category = stimulus_data['category'][0] # TODO: use
for subject, subject_data in enumerate(stimulus_data['userdata'].flatten()):
if not subject_data['trial']:
# No data for this subject and this stimulus
continue
for which_time in which_time_names:
fixations = subject_data['fixations'][0, 0][which_time]
if not len(fixations):
continue
# if len(fixations) and which_time != 'enc':
# print("Problem:", n, subject_name, which_time)
subject_response = subject_data['SDT'][0][which_time_names.index(which_time)]
xs.append(fixations[:, 0])
ys.append(fixations[:, 1])
ts.append(np.arange(len(xs[-1])))
ns.append(n)
train_subjects.append(subject)
which_times.append(which_time_names.index(which_time))
stimulus_types.append(stimulus_type)
responses.append(subject_response)
return xs, ys, ts, ns, train_subjects, which_times, stimulus_types, responses
def get_FIGRIM(location=None):
"""
Loads or downloads and caches the FIGRIM dataset. The dataset
consists of >2700 scenes of sizes 1000x1000px
and the fixations of subjects while doing a repetition
recognition task with 3 seconds presentation time.
subject responses etc are included.
@type location: string, defaults to `None`
@param location: If and where to cache the dataset. The dataset
will be stored in the subdirectory `toronto` of
location and read from there, if already present.
@return: Stimuli, FixationTrains
.. note::
This dataset comes with additional annotations:
- stimulus_type: 0=filler, 1=target
- which_time: 0=encoding, 1=first recognition, 2=second recognition
- response: 1=hit, 2=false alarm, 3=miss, 4=correct rejection
.. seealso::
Bylinskii, Zoya and Isola, Phillip and Bainbridge, Constance and Torralba, Antonio and Oliva, Aude. Intrinsic and Extrinsic Effects on Image Memorability [Vision research 2015]
http://figrim.mit.edu/index_eyetracking.html
"""
if location:
location = os.path.join(location, 'FIGRIM')
if os.path.exists(location):
stimuli = _load(os.path.join(location, 'stimuli.hdf5'))
fixations = _load(os.path.join(location, 'fixations.hdf5'))
return stimuli, fixations
os.makedirs(location)
with atomic_directory_setup(location):
with TemporaryDirectory(cleanup=True) as temp_dir:
download_and_check('http://figrim.mit.edu/Fillers.zip',
os.path.join(temp_dir, 'Fillers.zip'),
'dc0bc9561b5bc90e158ec32074dd1060')
download_and_check('http://figrim.mit.edu/Targets.zip',
os.path.join(temp_dir, 'Targets.zip'),
'2ad3a42ebc377efe4b39064405568201')
download_and_check('https://github.com/cvzoya/figrim/blob/master/targetData/allImages_release.mat?raw=True',
os.path.join(temp_dir, 'allImages_release.mat'),
'c72843b05e95ab27594c1d11c849c897')
download_and_check('https://github.com/cvzoya/figrim/blob/master/fillerData/allImages_fillers.mat?raw=True',
os.path.join(temp_dir, 'allImages_fillers.mat'),
'ce4f8b4961005d62f7a21191a64cab5e')
# Stimuli
mkdir_p(os.path.join(temp_dir, 'stimuli'))
print('Creating stimuli')
f = zipfile.ZipFile(os.path.join(temp_dir, 'Fillers.zip'))
f.extractall(os.path.join(temp_dir, 'stimuli'))
f = zipfile.ZipFile(os.path.join(temp_dir, 'Targets.zip'))
f.extractall(os.path.join(temp_dir, 'stimuli'))
stimuli_src_location = os.path.join(temp_dir, 'stimuli')
stimuli_target_location = os.path.join(location, 'Stimuli') if location else None
images = glob.glob(os.path.join(stimuli_src_location, '**', '**', '*.jpg'))
images = [os.path.relpath(img, start=stimuli_src_location) for img in images]
stimuli_filenames = natsorted(images)
stimuli = create_stimuli(stimuli_src_location, stimuli_filenames, stimuli_target_location)
stimuli_basenames = [os.path.basename(filename) for filename in stimuli_filenames]
stimulus_indices = {s: stimuli_basenames.index(s) for s in stimuli_basenames}
# FixationTrains
print('Creating fixations')
print('Fillers...')
(xs_filler,
ys_filler,
ts_filler,
ns_filler,
train_subjects_filler,
which_times_filler,
stimulus_types_filler,
responses_filler) = _load_FIGRIM_data(os.path.join(temp_dir, 'allImages_fillers.mat'), stimulus_indices, stimulus_type=0)
print("Targets...")
(xs_target,
ys_target,
ts_target,
ns_target,
train_subjects_target,
which_times_target,
stimulus_types_target,
responses_target) = _load_FIGRIM_data(os.path.join(temp_dir, 'allImages_release.mat'), stimulus_indices, stimulus_type=0)
print("Finalizing...")
xs = xs_filler + xs_target
ys = ys_filler + ys_target
ts = ts_filler + ts_target
ns = ns_filler + ns_target
train_subjects = train_subjects_filler + train_subjects_target
which_times = which_times_filler + which_times_target
stimulus_types = stimulus_types_filler + stimulus_types_target
responses = responses_filler + responses_target
fixations = FixationTrains.from_fixation_trains(
xs, ys, ts, ns, train_subjects,
scanpath_attributes={
'which_time': which_times,
'stimulus_type': stimulus_types,
'response': responses
})
if location:
stimuli.to_hdf5(os.path.join(location, 'stimuli.hdf5'))
fixations.to_hdf5(os.path.join(location, 'fixations.hdf5'))
return stimuli, fixations | mit |
yonglehou/scikit-learn | examples/decomposition/plot_sparse_coding.py | 246 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
pkruskal/scikit-learn | examples/decomposition/plot_sparse_coding.py | 246 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/Online_PassiveAggressive_countmeansketch_unbiased_median.py | 1 | 10081 | # -*- coding: utf-8 -*-
"""
python -m scripts/Online_PassiveAggressive_countmeansketch LMdata 3 1 a ODDST 0.01
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier as PAC
from skgraph.datasets import load_graph_datasets
import numpy as np
from scipy.sparse import csc_matrix
from sklearn.utils import compute_class_weight
from scipy.sparse import csr_matrix
from countminsketch_unbiased_Numpy_median import CountMinSketch
from itertools import izip
if __name__=='__main__':
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l filename kernel C m d")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
C=float(sys.argv[6])
m=int(sys.argv[7])
d=int(sys.argv[8])
#lr=float(sys.argv[7])
#FIXED PARAMETERS
normalization=False
#working with Chemical
g_it=load_graph_datasets.dispatch(dataset)
f=open(name,'w')
#At this point, one_hot_encoding contains the encoding for each symbol in the alphabet
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
#TODO the C parameter should probably be optimized
#print zip(_letters, _one_hot)
#exit()
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
print "examples, features", features.shape
errors=0
tp=0
fp=0
tn=0
fn=0
predictions=[0]*50
correct=[0]*50
#print ESN
#netDataSet=[]
#netTargetSet=[]
#netKeyList=[]
BERtotal=[]
bintargets=[1,-1]
#print features
#print list_for_deep.keys()
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
WCMS=CountMinSketch(m,d)
for i in xrange(features.shape[0]):
exCMS=CountMinSketch(m,d)
ex=features[i][0]
#W=csr_matrix(ex)
rows,cols = ex.nonzero()
dot=0.0
module=0.0
for row,col in izip(rows,cols):
#((row,col), ex[row,col])
value=ex[row,col]
module+=value**2
#print col, ex[row,col]
#dot+=WCMS[col]*ex[row,col]
exCMS.add(col,value)
#print dot
#TODO aggiungere bias
dot=WCMS.dot(exCMS)
#print "dot:", dot, "dotCMS:",dot1
if (np.sign(dot) != g_it.target[i] ):
#print "error on example",i, "predicted:", dot, "correct:", g_it.target[i]
errors+=1
if g_it.target[i]==1:
fn+=1
else:
fp+=1
else:
#print "correct classification", g_it.target[i]
if g_it.target[i]==1:
tp+=1
else:
tn+=1
if(g_it.target[i]==1):
coef=(part_minus+1.0)/(part_plus+part_minus+1.0)
part_plus+=1
else:
coef=(part_plus+1.0)/(part_plus+part_minus+1.0)
part_minus+=1
tao = min (C, max (0.0,( (1.0 - g_it.target[i]*dot )*coef) / module ) );
if (tao > 0.0):
exCMS*=(tao*g_it.target[i])
WCMS+=(exCMS)
# for row,col in zip(rows,cols):
# ((row,col), ex[row,col])
# #print col, ex[row,col]
# WCMS.add(col,g_it.target[i]*tao*ex[row,col])
#print "Correct prediction example",i, "pred", score, "target",g_it.target[i]
if i%50==0 and i!=0:
#output performance statistics every 50 examples
if (tn+fp) > 0:
pos_part= float(fp) / (tn+fp)
else:
pos_part=0
if (tp+fn) > 0:
neg_part=float(fn) / (tp+fn)
else:
neg_part=0
BER = 0.5 * ( pos_part + neg_part)
print "1-BER Window esempio ",i, (1.0 - BER)
f.write("1-BER Window esempio "+str(i)+" "+str(1.0 - BER)+"\n")
#print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
BERtotal.append(1.0 - BER)
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
print "BER AVG", str(np.average(BERtotal)),"std", np.std(BERtotal)
f.write("BER AVG "+ str(np.average(BERtotal))+" std "+str(np.std(BERtotal))+"\n")
f.close()
#print "N_features", ex.shape
#generate explicit W from CountMeanSketch
#print W
#raw_input("W (output)")
#==============================================================================
#
# tao = /*(double)labels->get_label(idx_a) **/ min (C, max (0.0,(1.0 - (((double)labels->get_label(idx_a))*(classe_mod) )) * c_plus ) / modulo_test);
#
# #W=W_old #dump line
#
#
# #set the weights of PA to the predicted values
# PassiveAggressive.coef_=W
# pred=PassiveAggressive.predict(ex)
#
# score=PassiveAggressive.decision_function(ex)
#
# bintargets.append(g_it.target[i])
# if pred!=g_it.target[i]:
# errors+=1
# print "Error",errors," on example",i, "pred", score, "target",g_it.target[i]
# if g_it.target[i]==1:
# fn+=1
# else:
# fp+=1
#
# else:
# if g_it.target[i]==1:
# tp+=1
# else:
# tn+=1
# #print "Correct prediction example",i, "pred", score, "target",g_it.target[i]
#
# else:
# #first example is always an error!
# pred=0
# score=0
# errors+=1
# print "Error",errors," on example",i
# if g_it.target[i]==1:
# fn+=1
# else:
# fp+=1
# #print i
# if i%50==0 and i!=0:
# #output performance statistics every 50 examples
# if (tn+fp) > 0:
# pos_part= float(fp) / (tn+fp)
# else:
# pos_part=0
# if (tp+fn) > 0:
# neg_part=float(fn) / (tp+fn)
# else:
# neg_part=0
# BER = 0.5 * ( pos_part + neg_part)
# print "1-BER Window esempio ",i, (1.0 - BER)
# print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
# BERtotal.append(1.0 - BER)
# tp = 0
# fp = 0
# fn = 0
# tn = 0
# bintargets=[1,-1]
# #print features[0][i]
# #print features[0][i].shape
# #f=features[0][i,:]
# #print f.shape
# #print f.shape
# #print g_it.target[i]
# #third parameter is compulsory just for the first call
# print "prediction", pred, score
# #print "intecept",PassiveAggressive.intercept_
# #raw_input()
# if abs(score)<1.0 or pred!=g_it.target[i]:
#
# ClassWeight=compute_class_weight('auto',np.asarray([1,-1]),bintargets)
# #print "class weights", {1:ClassWeight[0],-1:ClassWeight[1]}
# PassiveAggressive.class_weight={1:ClassWeight[0],-1:ClassWeight[1]}
#
# PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# #PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# W_old=PassiveAggressive.coef_
#
#
# #ESN target---#
# netTargetSet=[]
# for key,rowDict in list_for_deep[i].iteritems():
#
#
# target=np.asarray( [np.asarray([W_old[0,key]])]*len(rowDict))
#
#
# netTargetSet.append(target)
#
#
#
#
# #------------ESN TargetSetset--------------------#
# # ESN Training
#
# #for ftDataset,ftTargetSet in zip(netDataSet,netTargetSet):
# #print "Input"
# #print netDataSet
# #raw_input("Output")
# #print netTargetSet
# #raw_input("Target")
# model.OnlineTrain(netDataSet,netTargetSet,lr)
# #raw_input("TR")
# #calcolo statistiche
#
# print "BER AVG", sum(BERtotal) / float(len(BERtotal))
# print>>f,"BER AVG "+str(sum(BERtotal) / float(len(BERtotal)))
# f.close()
#==============================================================================
| gpl-3.0 |
hammerlab/immuno_research | Jan30_exclude_hla_a2.py | 1 | 5343 | import numpy as np
import sklearn
import sklearn.cross_validation
import sklearn.ensemble
import sklearn.linear_model
from epitopes import iedb, amino_acid, features, reduced_alphabet
import eval_dataset
"""
Do results from a restrict HLA sample (only A2) generalize to all the other HLA types?
"""
A2 = 'A2$|A\*02'
print
print "---"
print "Human MHC1 (keep)"
X_human_mhc1, Y_human_mhc1 = iedb.load_tcell_ngrams(
noisy_labels = 'keep',
human = True,
mhc_class = 1)
eval_dataset.eval_cv(X_human_mhc1, Y_human_mhc1)
print
print "---"
print "Human MHC1 (drop)"
X_human_mhc1_filter, Y_human_mhc1_filter = iedb.load_tcell_ngrams(
noisy_labels = 'drop',
human = True,
mhc_class = 1)
eval_dataset.eval_cv(X_human_mhc1_filter, Y_human_mhc1_filter)
print
print "---"
print "Human MHC1 noisy = positive"
X_human_mhc1_positive, Y_human_mhc1_positive = iedb.load_tcell_ngrams(
noisy_labels = 'positive',
human = True,
mhc_class = 1)
eval_dataset.eval_cv(X_human_mhc1_positive, Y_human_mhc1_positive)
print
print "---"
print "Human MHC1 noisy = negative"
X_human_mhc1_negative, Y_human_mhc1_negative = iedb.load_tcell_ngrams(
noisy_labels = 'negative',
human = True,
mhc_class = 1)
eval_dataset.eval_cv(X_human_mhc1_positive, Y_human_mhc1_positive)
print
print "---"
print "No HLA-A2"
X_no_hla_a2, Y_no_hla_a2 = iedb.load_tcell_ngrams(
noisy_labels = 'keep',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
eval_dataset.eval_cv(X_no_hla_a2, Y_no_hla_a2)
print
print "---"
print "No HLA-A2 filtered"
X_no_hla_a2_filter, Y_no_hla_a2_filter = iedb.load_tcell_ngrams(
noisy_labels = 'drop',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
eval_dataset.eval_cv(X_no_hla_a2_filter, Y_no_hla_a2_filter)
print
print "---"
print "No HLA-A2 noisy = positive"
X_no_hla_a2_positive, Y_no_hla_a2_positive = iedb.load_tcell_ngrams(
noisy_labels = 'positive',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
eval_dataset.eval_cv(X_no_hla_a2_positive, Y_no_hla_a2_positive)
print
print "---"
print "No HLA-A2 noisy = negative"
X_no_hla_a2_negtive, Y_no_hla_a2_negative = iedb.load_tcell_ngrams(
noisy_labels = 'negative',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
eval_dataset.eval_cv(X_no_hla_a2_positive, Y_no_hla_a2_positive)
print
print "---"
print "Cross-accuracy for HLA-A2 data"
X_hla_a2, Y_hla_a2 = iedb.load_tcell_ngrams(
noisy_labels = 'keep',
human = True,
mhc_class = 1,
hla_type = A2)
eval_dataset.eval_split(X_no_hla_a2, Y_no_hla_a2, X_hla_a2, Y_hla_a2)
print
print "---"
print "Cross-accuracy for HLA-A2 data filtered"
X_hla_a2_filtered, Y_hla_a2_filtered = iedb.load_tcell_ngrams(
noisy_labels = 'drop',
human = True,
mhc_class = 1,
hla_type = A2)
eval_dataset.eval_split(X_no_hla_a2_filter, Y_no_hla_a2_filter, X_hla_a2_filtered, Y_hla_a2_filtered)
print
print "---"
print "Cross-accuracy for HLA-A2 data noisy = positive"
X_hla_a2_positive, Y_hla_a2_positive = iedb.load_tcell_ngrams(
noisy_labels = 'positive',
human = True,
mhc_class = 1,
hla_type = A2)
eval_dataset.eval_split(X_no_hla_a2_positive, Y_no_hla_a2_positive, X_hla_a2_positive, Y_hla_a2_positive)
print
print "---"
print "Cross-accuracy for HLA-A2 data filtered (assay_group = cytotoxity)"
X_no_hla_a2_cytotoxicity, Y_no_hla_a2_cytotoxicity = iedb.load_tcell_ngrams(
noisy_labels = 'drop',
assay_group = 'cytotoxicity',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
X_hla_a2_cytotoxicity, Y_hla_a2_cytotoxicity = iedb.load_tcell_ngrams(
noisy_labels = 'drop',
assay_group = 'cytotoxicity',
human = True,
mhc_class = 1,
hla_type = A2)
eval_dataset.eval_split(X_no_hla_a2_cytotoxicity, Y_no_hla_a2_cytotoxicity, X_hla_a2_cytotoxicity, Y_hla_a2_cytotoxicity)
print
print "---"
print "Cross-accuracy for HLA-A2 data (noisy = positive, assay_group = cytotoxity)"
X_no_hla_a2_positive_cytotoxicity, Y_no_hla_a2_positive_cytotoxicity = iedb.load_tcell_ngrams(
noisy_labels = 'positive',
assay_group = 'cytotoxicity',
human = True,
mhc_class = 1,
exclude_hla_type = A2)
X_hla_a2_positive_cytotoxicity, Y_hla_a2_positive_cytotoxicity = iedb.load_tcell_ngrams(
noisy_labels = 'positive',
assay_group = 'cytotoxicity',
human = True,
mhc_class = 1,
hla_type = A2)
eval_dataset.eval_split(X_no_hla_a2_positive_cytotoxicity, Y_no_hla_a2_positive_cytotoxicity, X_hla_a2_positive_cytotoxicity, Y_hla_a2_positive_cytotoxicity) | gpl-2.0 |
tclose/python-neo | neo/test/iotest/common_io_test.py | 7 | 22757 | # -*- coding: utf-8 -*-
'''
Common tests for IOs:
* check presence of all necessary attr
* check types
* write/read consistency
See BaseTestIO.
The public URL is in url_for_tests.
The private url for writing is
ssh://gate.g-node.org/groups/neo/io_test_files/
'''
# needed for python 3 compatibility
from __future__ import absolute_import
__test__ = False
url_for_tests = "https://portal.g-node.org/neo/"
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.core import Block, Segment
from neo.test.tools import (assert_same_sub_schema,
assert_neo_object_is_compliant,
assert_sub_schema_is_lazy_loaded,
assert_lazy_sub_schema_can_be_loaded,
assert_children_empty)
from neo.test.iotest.tools import (can_use_network, cleanup_test_file,
close_object_safe, create_generic_io_object,
create_generic_reader,
create_generic_writer,
create_local_temp_dir, download_test_file,
iter_generic_io_objects,
iter_generic_readers, iter_read_objects,
make_all_directories, read_generic,
write_generic)
from neo.test.generate_datasets import generate_from_supported_objects
class BaseTestIO(object):
'''
This class make common tests for all IOs.
Several startegies:
* for IO able to read write : test_write_then_read
* for IO able to read write with hash conservation (optional):
test_read_then_write
* for all IOs : test_assert_readed_neo_object_is_compliant
2 cases:
* files are at G-node and downloaded:
download_test_files_if_not_present
* files are generated by MyIO.write()
'''
#~ __test__ = False
# all IO test need to modify this:
ioclass = None # the IOclass to be tested
files_to_test = [] # list of files to test compliances
files_to_download = [] # when files are at G-Node
# when reading then writing produces files with identical hashes
hash_conserved_when_write_read = False
# when writing then reading creates an identical neo object
read_and_write_is_bijective = True
# allow environment to tell avoid using network
use_network = can_use_network()
local_test_dir = None
def setUp(self):
'''
Set up the test fixture. This is run for every test
'''
self.higher = self.ioclass.supported_objects[0]
self.shortname = self.ioclass.__name__.lower().strip('io')
# these objects can both be written and read
self.io_readandwrite = list(set(self.ioclass.readable_objects) &
set(self.ioclass.writeable_objects))
# these objects can be either written or read
self.io_readorwrite = list(set(self.ioclass.readable_objects) |
set(self.ioclass.writeable_objects))
self.create_local_dir_if_not_exists()
self.download_test_files_if_not_present()
self.files_generated = []
self.generate_files_for_io_able_to_write()
self.files_to_test.extend(self.files_generated)
self.cascade_modes = [True]
if hasattr(self.ioclass, 'load_lazy_cascade'):
self.cascade_modes.append('lazy')
def create_local_dir_if_not_exists(self):
'''
Create a local directory to store testing files and return it.
The directory path is also written to self.local_test_dir
'''
self.local_test_dir = create_local_temp_dir(self.shortname)
return self.local_test_dir
def download_test_files_if_not_present(self):
'''
Download %s file at G-node for testing
url_for_tests is global at beginning of this file.
''' % self.ioclass.__name__
if not self.use_network:
raise unittest.SkipTest("Requires download of data from the web")
url = url_for_tests+self.shortname
try:
make_all_directories(self.files_to_download, self.local_test_dir)
download_test_file(self.files_to_download,
self.local_test_dir, url)
except IOError as exc:
raise unittest.SkipTest(exc)
download_test_files_if_not_present.__test__ = False
def cleanup_file(self, path):
'''
Remove test files or directories safely.
'''
cleanup_test_file(self.ioclass, path, directory=self.local_test_dir)
def able_to_write_or_read(self, writeread=False, readwrite=False):
'''
Return True if generalized writing or reading is possible.
If writeread=True, return True if writing then reading is
possible and produces identical neo objects.
If readwrite=True, return True if reading then writing is possible
and produces files with identical hashes.
'''
# Find the highest object that is supported by the IO
# Test only if it is a Block or Segment, and if it can both read
# and write this object.
if self.higher not in self.io_readandwrite:
return False
if self.higher not in [Block, Segment]:
return False
# when io need external knowldge for writting or read such as
# sampling_rate (RawBinaryIO...) the test is too much complex to design
# genericaly.
if (self.higher in self.ioclass.read_params and
len(self.ioclass.read_params[self.higher]) != 0):
return False
# handle cases where the test should write then read
if writeread and not self.read_and_write_is_bijective:
return False
# handle cases where the test should read then write
if readwrite and not self.hash_conserved_when_write_read:
return False
return True
def get_filename_path(self, filename):
'''
Get the path to a filename in the current temporary file directory
'''
return os.path.join(self.local_test_dir, filename)
def generic_io_object(self, filename=None, return_path=False, clean=False):
'''
Create an io object in a generic way that can work with both
file-based and directory-based io objects.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the io object. return ioobj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
'''
return create_generic_io_object(ioclass=self.ioclass,
filename=filename,
directory=self.local_test_dir,
return_path=return_path,
clean=clean)
def create_file_reader(self, filename=None, return_path=False,
clean=False, target=None, readall=False):
'''
Create a function that can read from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the reader function. return reader, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
res = create_generic_reader(ioobj, target=target, readall=readall)
if return_path:
return res, path
return res
def create_file_writer(self, filename=None, return_path=False,
clean=False, target=None):
'''
Create a function that can write from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the writer function. return writer, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'write' method.
If target is the Block or Segment class, use write_block or
write_segment, respectively.
If target is a string, use 'write_'+target.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
res = create_generic_writer(ioobj, target=target)
if return_path:
return res, path
return res
def read_file(self, filename=None, return_path=False, clean=False,
target=None, readall=False, cascade=True, lazy=False):
'''
Read from the specified filename.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the object. return obj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
The cascade and lazy parameters are passed to the reader. Defaults
are True and False, respectively.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
obj = read_generic(ioobj, target=target, cascade=cascade, lazy=lazy,
readall=readall, return_reader=False)
if return_path:
return obj, path
return obj
def write_file(self, obj=None, filename=None, return_path=False,
clean=False, target=None):
'''
Write the target object to a file using the given neo io object ioobj.
If filename is None, create a filename (default).
If return_path is True, return the full path of the file along with
the object. return obj, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
obj is the object to write. If obj is None, an object is created
automatically for the io class.
'''
ioobj, path = self.generic_io_object(filename=filename,
return_path=True, clean=clean)
obj = write_generic(ioobj, target=target, return_reader=False)
if return_path:
return obj, path
return obj
def iter_io_objects(self, return_path=False, clean=False):
'''
Return an iterable over the io objects created from files_to_test
If return_path is True, yield the full path of the file along with
the io object. yield ioobj, path Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
'''
return iter_generic_io_objects(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
return_path=return_path,
clean=clean)
def iter_readers(self, target=None, readall=False,
return_path=False, return_ioobj=False, clean=False):
'''
Return an iterable over readers created from files_to_test.
If return_path is True, return the full path of the file along with
the reader object. return reader, path.
If return_ioobj is True, return the io object as well as the reader.
return reader, ioobj. Default is False.
If both return_path and return_ioobj is True,
return reader, path, ioobj. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If readall is True, use the read_all_ method instead of the
read_ method. Default is False.
'''
return iter_generic_readers(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
return_path=return_path,
return_ioobj=return_ioobj,
target=target,
clean=clean,
readall=readall)
def iter_objects(self, target=None, return_path=False, return_ioobj=False,
return_reader=False, clean=False, readall=False,
cascade=True, lazy=False):
'''
Iterate over objects read from the list of filenames in files_to_test.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or
read_segment, respectively.
If target is a string, use 'read_'+target.
If return_path is True, yield the full path of the file along with
the object. yield obj, path.
If return_ioobj is True, yield the io object as well as the object.
yield obj, ioobj. Default is False.
If return_reader is True, yield the io reader function as well as the
object. yield obj, reader. Default is False.
If some combination of return_path, return_ioobj, and return_reader
is True, they are yielded in the order: obj, path, ioobj, reader.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
The cascade and lazy parameters are passed to the reader. Defaults
are True and False, respectively.
If readall is True, use the read_all_ method instead of the read_
method. Default is False.
'''
return iter_read_objects(ioclass=self.ioclass,
filenames=self.files_to_test,
directory=self.local_test_dir,
target=target,
return_path=return_path,
return_ioobj=return_ioobj,
return_reader=return_reader,
clean=clean, readall=readall,
cascade=cascade, lazy=lazy)
def generate_files_for_io_able_to_write(self):
'''
Write files for use in testing.
'''
self.files_generated = []
if not self.able_to_write_or_read():
return
generate_from_supported_objects(self.ioclass.supported_objects)
ioobj, path = self.generic_io_object(return_path=True, clean=True)
if ioobj is None:
return
self.files_generated.append(path)
write_generic(ioobj, target=self.higher)
close_object_safe(ioobj)
def test_write_then_read(self):
'''
Test for IO that are able to write and read - here %s:
1 - Generate a full schema with supported objects.
2 - Write to a file
3 - Read from the file
4 - Check the hierachy
5 - Check data
Work only for IO for Block and Segment for the highest object
(main cases).
''' % self.ioclass.__name__
if not self.able_to_write_or_read(writeread=True):
return
for cascade in self.cascade_modes:
ioobj1 = self.generic_io_object(clean=True)
if ioobj1 is None:
return
ob1 = write_generic(ioobj1, target=self.higher)
close_object_safe(ioobj1)
ioobj2 = self.generic_io_object()
# Read the highest supported object from the file
obj_reader = create_generic_reader(ioobj2, target=False)
ob2 = obj_reader(cascade=cascade)[0]
if self.higher == Segment:
ob2 = ob2.segments[0]
# some formats (e.g. elphy) do not support double floating
# point spiketrains
try:
assert_same_sub_schema(ob1, ob2, True, 1e-8)
assert_neo_object_is_compliant(ob1)
assert_neo_object_is_compliant(ob2)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('with cascade=%s ' % cascade,)
raise
close_object_safe(ioobj2)
def test_read_then_write(self):
'''
Test for IO that are able to read and write, here %s:
1 - Read a file
2 Write object set in another file
3 Compare the 2 files hash
NOTE: TODO: Not implemented yet
''' % self.ioclass.__name__
if not self.able_to_write_or_read(readwrite=True):
return
#assert_file_contents_equal(a, b)
def test_assert_readed_neo_object_is_compliant(self):
'''
Reading %s files in `files_to_test` produces compliant objects.
Compliance test: neo.test.tools.assert_neo_object_is_compliant for
all cascade and lazy modes
''' % self.ioclass.__name__
# This is for files presents at G-Node or generated
for cascade in self.cascade_modes:
for lazy in [True, False]:
for obj, path in self.iter_objects(cascade=cascade, lazy=lazy,
return_path=True):
try:
# Check compliance of the block
assert_neo_object_is_compliant(obj)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from %s with cascade=%s and lazy=%s' %
(os.path.basename(path), cascade, lazy),)
raise
def test_readed_with_cascade_is_compliant(self):
'''
Reading %s files in `files_to_test` with `cascade` is compliant.
A reader with cascade = False should return empty children.
''' % self.ioclass.__name__
# This is for files presents at G-Node or generated
for obj, path in self.iter_objects(cascade=False, lazy=False,
return_path=True):
try:
# Check compliance of the block or segment
assert_neo_object_is_compliant(obj)
assert_children_empty(obj, self.ioclass)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from %s ' % os.path.basename(path),)
raise
def test_readed_with_lazy_is_compliant(self):
'''
Reading %s files in `files_to_test` with `lazy` is compliant.
Test the reader with lazy = True. All objects derived from ndarray
or Quantity should have a size of 0. Also, AnalogSignal,
AnalogSignalArray, SpikeTrain, Epocharray, and EventArray should
contain the lazy_shape attribute.
''' % self.ioclass.__name__
# This is for files presents at G-Node or generated
for cascade in self.cascade_modes:
for obj, path in self.iter_objects(cascade=cascade, lazy=True,
return_path=True):
try:
assert_sub_schema_is_lazy_loaded(obj)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from %s with cascade=%s ' %
(os.path.basename(path), cascade),)
raise
def test_load_lazy_objects(self):
'''
Reading %s files in `files_to_test` with `lazy` works.
Test the reader with lazy = True. All objects derived from ndarray
or Quantity should have a size of 0. Also, AnalogSignal,
AnalogSignalArray, SpikeTrain, Epocharray, and EventArray should
contain the lazy_shape attribute.
''' % self.ioclass.__name__
if not hasattr(self.ioclass, 'load_lazy_object'):
return
# This is for files presents at G-Node or generated
for cascade in self.cascade_modes:
for obj, path, ioobj in self.iter_objects(cascade=cascade,
lazy=True,
return_ioobj=True,
return_path=True):
try:
assert_lazy_sub_schema_can_be_loaded(obj, ioobj)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from %s with cascade=%s ' %
(os.path.basename(path), cascade),)
raise
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/eager/python/tfe.py | 1 | 5748 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager execution prototype.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
To use, at program startup, call `tf.enable_eager_execution()`.
@@metrics
@@list_devices
@@num_gpus
@@py_func
@@defun
@@function
@@make_template
@@implicit_gradients
@@implicit_value_and_gradients
@@gradients_function
@@value_and_gradients_function
@@GradientTape
@@run
@@enable_eager_execution
@@enable_remote_eager_execution
@@custom_gradient
@@add_execution_callback
@@clear_execution_callbacks
@@errstate
@@ExecutionCallback
@@inf_callback
@@inf_nan_callback
@@nan_callback
@@seterr
@@Iterator
@@Saver
@@restore_variables_on_create
@@Variable
@@get_optimizer_variables
@@EagerVariableStore
@@Network
@@Sequential
@@save_network_checkpoint
@@restore_network_checkpoint
@@Checkpoint
@@Checkpointable
@@executing_eagerly
@@in_eager_mode
@@set_execution_mode
@@execution_mode
@@async_wait
@@async_clear_error
@@set_server_def
@@run_test_in_graph_and_eager_modes
@@run_all_tests_in_graph_and_eager_modes
@@TensorSpec
@@connect_to_remote_host
@@DEVICE_PLACEMENT_EXPLICIT
@@DEVICE_PLACEMENT_WARN
@@DEVICE_PLACEMENT_SILENT
@@SYNC
@@ASYNC
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-bad-import-order,g-import-not-at-top,unused-import
#
from tensorflow.contrib.eager.python import metrics
from tensorflow.contrib.eager.python.datasets import Iterator
from tensorflow.contrib.eager.python.network import Network
from tensorflow.contrib.eager.python.network import Sequential
from tensorflow.contrib.eager.python.network import save_network_checkpoint
from tensorflow.contrib.eager.python.network import restore_network_checkpoint
from tensorflow.contrib.eager.python.saver import get_optimizer_variables
from tensorflow.contrib.eager.python.saver import restore_variables_on_create
from tensorflow.contrib.eager.python.saver import Saver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import function as _function_lib
from tensorflow.python.eager.context import DEVICE_PLACEMENT_EXPLICIT
from tensorflow.python.eager.context import DEVICE_PLACEMENT_WARN
from tensorflow.python.eager.context import DEVICE_PLACEMENT_SILENT
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.context import list_devices
from tensorflow.python.eager.context import set_execution_mode
from tensorflow.python.eager.context import execution_mode
from tensorflow.python.eager.context import async_wait
from tensorflow.python.eager.context import async_clear_error
from tensorflow.python.eager.context import SYNC
from tensorflow.python.eager.context import ASYNC
from tensorflow.python.eager.context import num_gpus
from tensorflow.python.eager.context import set_server_def
from tensorflow.python.eager.def_function import function
from tensorflow.python.eager.execution_callbacks import add_execution_callback
from tensorflow.python.eager.execution_callbacks import clear_execution_callbacks
from tensorflow.python.eager.execution_callbacks import errstate
from tensorflow.python.eager.execution_callbacks import ExecutionCallback
from tensorflow.python.eager.execution_callbacks import inf_callback
from tensorflow.python.eager.execution_callbacks import inf_nan_callback
from tensorflow.python.eager.execution_callbacks import nan_callback
from tensorflow.python.eager.execution_callbacks import seterr
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.framework.ops import enable_eager_execution
from tensorflow.python.framework.ops import enable_eager_execution_internal as enable_remote_eager_execution
from tensorflow.python.framework.ops import eager_run as run
from tensorflow.python.framework.test_util import run_in_graph_and_eager_modes as run_test_in_graph_and_eager_modes
from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes as run_all_tests_in_graph_and_eager_modes
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.ops.variable_scope import EagerVariableStore
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import template
from tensorflow.python.training.tracking.tracking import AutoTrackable as Checkpointable
from tensorflow.python.training.tracking.util import Checkpoint
from tensorflow.python.util.all_util import remove_undocumented
py_func = script_ops.eager_py_func
defun = _function_lib.defun
make_template = template.make_template_internal
implicit_gradients = backprop.implicit_grad
implicit_value_and_gradients = backprop.implicit_val_and_grad
gradients_function = backprop.gradients_function
value_and_gradients_function = backprop.val_and_grad_function
GradientTape = backprop.GradientTape # pylint: disable=invalid-name
in_eager_mode = executing_eagerly
remove_undocumented(__name__)
| apache-2.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/slim/python/slim/nets/resnet_utils.py | 66 | 10979 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
@add_arg_scope
def stack_blocks_dense(net,
blocks,
output_stride=None,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| apache-2.0 |
ageron/tensorflow | tensorflow/contrib/slim/python/slim/nets/resnet_utils.py | 66 | 10979 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
@add_arg_scope
def stack_blocks_dense(net,
blocks,
output_stride=None,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| apache-2.0 |
yonglehou/scikit-learn | sklearn/tests/test_metaestimators.py | 225 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
thesandlord/gcloud-python | gcloud/datastore/connection.py | 5 | 16975 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connections to gcloud datastore API servers."""
import os
from gcloud import connection
from gcloud.environment_vars import GCD_HOST
from gcloud.exceptions import make_exception
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
SCOPE = ('https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/userinfo.email')
"""The scopes required for authenticating as a Cloud Datastore consumer."""
class Connection(connection.Connection):
"""A connection to the Google Cloud Datastore via the Protobuf API.
This class should understand only the basic types (and protobufs)
in method arguments, however should be capable of returning advanced types.
:type credentials: :class:`oauth2client.client.OAuth2Credentials`
:param credentials: The OAuth2 Credentials to use for this connection.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests.
:type api_base_url: string
:param api_base_url: The base of the API call URL. Defaults to the value
from :mod:`gcloud.connection`.
"""
API_VERSION = 'v1beta2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = ('{api_base}/datastore/{api_version}'
'/datasets/{dataset_id}/{method}')
"""A template for the URL of a particular API call."""
def __init__(self, credentials=None, http=None, api_base_url=None):
credentials = self._create_scoped_credentials(credentials, SCOPE)
super(Connection, self).__init__(credentials=credentials, http=http)
if api_base_url is None:
api_base_url = os.getenv(GCD_HOST,
connection.API_BASE_URL)
self.api_base_url = api_base_url
def _request(self, dataset_id, method, data):
"""Make a request over the Http transport to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset of which to make the request.
:type method: string
:param method: The API call method name (ie, ``runQuery``,
``lookup``, etc)
:type data: string
:param data: The data to send with the API call.
Typically this is a serialized Protobuf string.
:rtype: string
:returns: The string response content from the API call.
:raises: :class:`gcloud.exceptions.GCloudError` if the response
code is not 200 OK.
"""
headers = {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(data)),
'User-Agent': self.USER_AGENT,
}
headers, content = self.http.request(
uri=self.build_api_url(dataset_id=dataset_id, method=method),
method='POST', headers=headers, body=data)
status = headers['status']
if status != '200':
raise make_exception(headers, content, use_json=False)
return content
def _rpc(self, dataset_id, method, request_pb, response_pb_cls):
"""Make a protobuf RPC request.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The name of the method to invoke.
:type request_pb: :class:`google.protobuf.message.Message` instance
:param request_pb: the protobuf instance representing the request.
:type response_pb_cls: A :class:`google.protobuf.message.Message'
subclass.
:param response_pb_cls: The class used to unmarshall the response
protobuf.
"""
response = self._request(dataset_id=dataset_id, method=method,
data=request_pb.SerializeToString())
return response_pb_cls.FromString(response)
def build_api_url(self, dataset_id, method, base_url=None,
api_version=None):
"""Construct the URL for a particular API call.
This method is used internally to come up with the URL to use when
making RPCs to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The API method to call (ie, runQuery, lookup, ...).
:type base_url: string
:param base_url: The base URL where the API lives.
You shouldn't have to provide this.
:type api_version: string
:param api_version: The version of the API to connect to.
You shouldn't have to provide this.
"""
return self.API_URL_TEMPLATE.format(
api_base=(base_url or self.api_base_url),
api_version=(api_version or self.API_VERSION),
dataset_id=dataset_id, method=method)
def lookup(self, dataset_id, key_pbs,
eventual=False, transaction_id=None):
"""Lookup keys from a dataset in the Cloud Datastore.
Maps the ``DatastoreService.Lookup`` protobuf RPC.
This method deals only with protobufs
(:class:`gcloud.datastore._datastore_v1_pb2.Key` and
:class:`gcloud.datastore._datastore_v1_pb2.Entity`) and is used
under the hood in :func:`gcloud.datastore.get`:
>>> from gcloud import datastore
>>> key = datastore.Key('MyKind', 1234, dataset_id='dataset-id')
>>> datastore.get(key)
[<Entity object>]
Using the ``connection`` class directly:
>>> connection.lookup('dataset-id', [key.to_protobuf()])
[<Entity protobuf>]
:type dataset_id: string
:param dataset_id: The ID of the dataset to look up the keys.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type eventual: boolean
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: string
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: tuple
:returns: A triple of (``results``, ``missing``, ``deferred``) where
both ``results`` and ``missing`` are lists of
:class:`gcloud.datastore._datastore_v1_pb2.Entity` and
``deferred`` is a list of
:class:`gcloud.datastore._datastore_v1_pb2.Key`.
"""
lookup_request = datastore_pb.LookupRequest()
_set_read_options(lookup_request, eventual, transaction_id)
_add_keys_to_request(lookup_request.key, key_pbs)
lookup_response = self._rpc(dataset_id, 'lookup', lookup_request,
datastore_pb.LookupResponse)
results = [result.entity for result in lookup_response.found]
missing = [result.entity for result in lookup_response.missing]
return results, missing, list(lookup_response.deferred)
def run_query(self, dataset_id, query_pb, namespace=None,
eventual=False, transaction_id=None):
"""Run a query on the Cloud Datastore.
Maps the ``DatastoreService.RunQuery`` protobuf RPC.
Given a Query protobuf, sends a ``runQuery`` request to the
Cloud Datastore API and returns a list of entity protobufs
matching the query.
You typically wouldn't use this method directly, in favor of the
:meth:`gcloud.datastore.query.Query.fetch` method.
Under the hood, the :class:`gcloud.datastore.query.Query` class
uses this method to fetch data:
>>> from gcloud import datastore
>>> query = datastore.Query(kind='MyKind')
>>> query.add_filter('property', '=', 'val')
Using the query's ``fetch_page`` method...
>>> entities, cursor, more_results = query.fetch_page()
>>> entities
[<list of Entity unmarshalled from protobuf>]
>>> cursor
<string containing cursor where fetch stopped>
>>> more_results
<boolean of more results>
Under the hood this is doing...
>>> connection.run_query('dataset-id', query.to_protobuf())
[<list of Entity Protobufs>], cursor, more_results, skipped_results
:type dataset_id: string
:param dataset_id: The ID of the dataset over which to run the query.
:type query_pb: :class:`gcloud.datastore._datastore_v1_pb2.Query`
:param query_pb: The Protobuf representing the query to run.
:type namespace: string
:param namespace: The namespace over which to run the query.
:type eventual: boolean
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: string
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
"""
request = datastore_pb.RunQueryRequest()
_set_read_options(request, eventual, transaction_id)
if namespace:
request.partition_id.namespace = namespace
request.query.CopyFrom(query_pb)
response = self._rpc(dataset_id, 'runQuery', request,
datastore_pb.RunQueryResponse)
return (
[e.entity for e in response.batch.entity_result],
response.batch.end_cursor, # Assume response always has cursor.
response.batch.more_results,
response.batch.skipped_results,
)
def begin_transaction(self, dataset_id, serializable=False):
"""Begin a transaction.
Maps the ``DatastoreService.BeginTransaction`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID dataset to which the transaction applies.
:type serializable: boolean
:param serializable: Boolean indicating if the isolation level of the
transaction should be SERIALIZABLE (True) or
SNAPSHOT (False).
:rtype: :class:`._datastore_v1_pb2.BeginTransactionResponse`
:returns': the result protobuf for the begin transaction request.
"""
request = datastore_pb.BeginTransactionRequest()
if serializable:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SERIALIZABLE)
else:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SNAPSHOT)
response = self._rpc(dataset_id, 'beginTransaction', request,
datastore_pb.BeginTransactionResponse)
return response.transaction
def commit(self, dataset_id, mutation_pb, transaction_id):
"""Commit dataset mutations in context of current transation (if any).
Maps the ``DatastoreService.Commit`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID dataset to which the transaction applies.
:type mutation_pb: :class:`datastore_pb.Mutation`.
:param mutation_pb: The protobuf for the mutations being saved.
:type transaction_id: string or None
:param transaction_id: The transaction ID returned from
:meth:`begin_transaction`. Non-transactional
batches must pass ``None``.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.MutationResult`.
:returns': the result protobuf for the mutation.
"""
request = datastore_pb.CommitRequest()
if transaction_id:
request.mode = datastore_pb.CommitRequest.TRANSACTIONAL
request.transaction = transaction_id
else:
request.mode = datastore_pb.CommitRequest.NON_TRANSACTIONAL
request.mutation.CopyFrom(mutation_pb)
response = self._rpc(dataset_id, 'commit', request,
datastore_pb.CommitResponse)
return response.mutation_result
def rollback(self, dataset_id, transaction_id):
"""Rollback the connection's existing transaction.
Maps the ``DatastoreService.Rollback`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID of the dataset to which the transaction
belongs.
:type transaction_id: string
:param transaction_id: The transaction ID returned from
:meth:`begin_transaction`.
"""
request = datastore_pb.RollbackRequest()
request.transaction = transaction_id
# Nothing to do with this response, so just execute the method.
self._rpc(dataset_id, 'rollback', request,
datastore_pb.RollbackResponse)
def allocate_ids(self, dataset_id, key_pbs):
"""Obtain backend-generated IDs for a set of keys.
Maps the ``DatastoreService.AllocateIds`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID of the dataset to which the transaction
belongs.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys for which the backend should allocate IDs.
:rtype: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:returns: An equal number of keys, with IDs filled in by the backend.
"""
request = datastore_pb.AllocateIdsRequest()
_add_keys_to_request(request.key, key_pbs)
# Nothing to do with this response, so just execute the method.
response = self._rpc(dataset_id, 'allocateIds', request,
datastore_pb.AllocateIdsResponse)
return list(response.key)
def _set_read_options(request, eventual, transaction_id):
"""Validate rules for read options, and assign to the request.
Helper method for ``lookup()`` and ``run_query``.
:raises: :class:`ValueError` if ``eventual`` is ``True`` and the
``transaction_id`` is not ``None``.
"""
if eventual and (transaction_id is not None):
raise ValueError('eventual must be False when in a transaction')
opts = request.read_options
if eventual:
opts.read_consistency = datastore_pb.ReadOptions.EVENTUAL
elif transaction_id:
opts.transaction = transaction_id
def _prepare_key_for_request(key_pb): # pragma: NO COVER copied from helpers
"""Add protobuf keys to a request object.
.. note::
This is copied from `helpers` to avoid a cycle:
_implicit_environ -> connection -> helpers -> key -> _implicit_environ
:type key_pb: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pb: A key to be added to a request.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:returns: A key which will be added to a request. It will be the
original if nothing needs to be changed.
"""
if key_pb.partition_id.HasField('dataset_id'):
new_key_pb = datastore_pb.Key()
new_key_pb.CopyFrom(key_pb)
new_key_pb.partition_id.ClearField('dataset_id')
key_pb = new_key_pb
return key_pb
def _add_keys_to_request(request_field_pb, key_pbs):
"""Add protobuf keys to a request object.
:type request_field_pb: `RepeatedCompositeFieldContainer`
:param request_field_pb: A repeated proto field that contains keys.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys to add to a request.
"""
for key_pb in key_pbs:
key_pb = _prepare_key_for_request(key_pb)
request_field_pb.add().CopyFrom(key_pb)
| apache-2.0 |
davidam/python-examples | matplotlib/stackplot_demo.py | 1 | 2255 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
"""
==============
Stackplot Demo
==============
How to create stackplots with Matplotlib.
Stackplots are generated by plotting different datasets vertically on
top of one another rather than overlapping with one another. Below we
show some examples to accomplish this with Matplotlib.
"""
import numpy as np
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y1 = [1, 1, 2, 3, 5]
y2 = [0, 4, 2, 6, 8]
y3 = [1, 3, 5, 7, 9]
y = np.vstack([y1, y2, y3])
labels = ["Fibonacci ", "Evens", "Odds"]
fig, ax = plt.subplots()
ax.stackplot(x, y1, y2, y3, labels=labels)
ax.legend(loc=2)
plt.show()
fig, ax = plt.subplots()
ax.stackplot(x, y)
plt.show()
###############################################################################
# Here we show an example of making a streamgraph using stackplot
def layers(n, m):
"""
Return *n* random Gaussian mixtures, each of length *m*.
"""
def bump(a):
x = 1 / (.1 + np.random.random())
y = 2 * np.random.random() - .5
z = 10 / (.1 + np.random.random())
for i in range(m):
w = (i / m - y) * z
a[i] += x * np.exp(-w * w)
a = np.zeros((m, n))
for i in range(n):
for j in range(5):
bump(a[:, i])
return a
d = layers(3, 100)
fig, ax = plt.subplots()
ax.stackplot(range(100), d.T, baseline='wiggle')
plt.show()
| gpl-3.0 |
ageron/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 39 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
pkruskal/scikit-learn | sklearn/neural_network/rbm.py | 205 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
tclose/python-neo | neo/test/coretest/test_block.py | 7 | 34814 | # -*- coding: utf-8 -*-
"""
Tests of the neo.core.block.Block class
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.block import Block
from neo.core.container import filterdata
from neo.core import SpikeTrain, Unit
from neo.test.tools import (assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, clone_object,
get_annotations, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict([(str(x), TEST_ANNOTATIONS[x]) for x in
range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
file_datetime = get_fake_value('file_datetime', datetime, seed=0)
rec_datetime = get_fake_value('rec_datetime', datetime, seed=1)
index = get_fake_value('index', int, seed=2)
name = get_fake_value('name', str, seed=3, obj=Block)
description = get_fake_value('description', str, seed=4, obj='Block')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'file_datetime': file_datetime,
'rec_datetime': rec_datetime,
'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Block, annotate=False, seed=0)
res12 = get_fake_values('Block', annotate=False, seed=0)
res21 = get_fake_values(Block, annotate=True, seed=0)
res22 = get_fake_values('Block', annotate=True, seed=0)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = 'Block'
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
for child in res.children_recur:
del child.annotations['i']
del child.annotations['j']
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 1)
seg = res.segments[0]
self.assertEqual(seg.annotations, self.annotations)
self.assertEqual(len(res.recordingchannelgroups), 1)
rcg = res.recordingchannelgroups[0]
self.assertEqual(rcg.annotations, self.annotations)
self.assertEqual(len(seg.analogsignalarrays), 1)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.irregularlysampledsignals), 1)
self.assertEqual(len(seg.spiketrains), 1)
self.assertEqual(len(seg.spikes), 1)
self.assertEqual(len(seg.events), 1)
self.assertEqual(len(seg.epochs), 1)
self.assertEqual(len(seg.eventarrays), 1)
self.assertEqual(len(seg.epocharrays), 1)
self.assertEqual(seg.analogsignalarrays[0].annotations,
self.annotations)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(seg.spiketrains[0].annotations,
self.annotations)
self.assertEqual(seg.spikes[0].annotations,
self.annotations)
self.assertEqual(seg.events[0].annotations,
self.annotations)
self.assertEqual(seg.epochs[0].annotations,
self.annotations)
self.assertEqual(seg.eventarrays[0].annotations,
self.annotations)
self.assertEqual(seg.epocharrays[0].annotations,
self.annotations)
self.assertEqual(len(rcg.recordingchannels), 1)
rchan = rcg.recordingchannels[0]
self.assertEqual(rchan.annotations, self.annotations)
self.assertEqual(len(rcg.units), 1)
unit = rcg.units[0]
self.assertEqual(unit.annotations, self.annotations)
self.assertEqual(len(rcg.analogsignalarrays), 1)
self.assertEqual(rcg.analogsignalarrays[0].annotations,
self.annotations)
self.assertEqual(len(rchan.analogsignals), 1)
self.assertEqual(len(rchan.irregularlysampledsignals), 1)
self.assertEqual(rchan.analogsignals[0].annotations,
self.annotations)
self.assertEqual(rchan.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(len(unit.spiketrains), 1)
self.assertEqual(len(unit.spikes), 1)
self.assertEqual(unit.spiketrains[0].annotations,
self.annotations)
self.assertEqual(unit.spikes[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = Block
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 0)
self.assertEqual(len(res.recordingchannelgroups), 0)
class TestBlock(unittest.TestCase):
def setUp(self):
self.nchildren = 2
self.seed1 = 0
self.seed2 = 10000
self.blk1 = fake_neo(Block, seed=self.seed1, n=self.nchildren)
self.blk2 = fake_neo(Block, seed=self.seed2, n=self.nchildren)
self.targobj = self.blk1
self.segs1 = self.blk1.segments
self.segs2 = self.blk2.segments
self.rcgs1 = self.blk1.recordingchannelgroups
self.rcgs2 = self.blk2.recordingchannelgroups
self.units1 = [[unit for unit in rcg.units] for rcg in self.rcgs1]
self.units2 = [[unit for unit in rcg.units] for rcg in self.rcgs2]
self.rchans1 = [[rchan for rchan in rcg.recordingchannels]
for rcg in self.rcgs1]
self.rchans2 = [[rchan for rchan in rcg.recordingchannels]
for rcg in self.rcgs2]
self.units1 = sum(self.units1, [])
self.units2 = sum(self.units2, [])
self.rchans1 = sum(self.rchans1, [])
self.rchans2 = sum(self.rchans2, [])
self.sigarrs1 = [[sigarr for sigarr in rcg.analogsignalarrays]
for rcg in self.rcgs1]
self.sigarrs2 = [[sigarr for sigarr in rcg.analogsignalarrays]
for rcg in self.rcgs2]
self.spikes1 = [[spike for spike in unit.spikes]
for unit in self.units1]
self.spikes2 = [[spike for spike in unit.spikes]
for unit in self.units2]
self.trains1 = [[train for train in unit.spiketrains]
for unit in self.units1]
self.trains2 = [[train for train in unit.spiketrains]
for unit in self.units2]
self.sigs1 = [[sig for sig in rchan.analogsignals]
for rchan in self.rchans1]
self.sigs2 = [[sig for sig in rchan.analogsignals]
for rchan in self.rchans2]
self.irsigs1 = [[irsig for irsig in rchan.irregularlysampledsignals]
for rchan in self.rchans1]
self.irsigs2 = [[irsig for irsig in rchan.irregularlysampledsignals]
for rchan in self.rchans2]
self.epcs1 = [[epc for epc in seg.epochs]
for seg in self.segs1]
self.epcs2 = [[epc for epc in seg.epochs]
for seg in self.segs2]
self.epcas1 = [[epca for epca in seg.epocharrays]
for seg in self.segs1]
self.epcas2 = [[epca for epca in seg.epocharrays]
for seg in self.segs2]
self.evts1 = [[evt for evt in seg.events]
for seg in self.segs1]
self.evts2 = [[evt for evt in seg.events]
for seg in self.segs2]
self.evtas1 = [[evta for evta in seg.eventarrays]
for seg in self.segs1]
self.evtas2 = [[evta for evta in seg.eventarrays]
for seg in self.segs2]
self.sigarrs1 = sum(self.sigarrs1, [])
self.sigarrs2 = sum(self.sigarrs2, [])
self.spikes1 = sum(self.spikes1, [])
self.spikes2 = sum(self.spikes2, [])
self.trains1 = sum(self.trains1, [])
self.trains2 = sum(self.trains2, [])
self.sigs1 = sum(self.sigs1, [])
self.sigs2 = sum(self.sigs2, [])
self.irsigs1 = sum(self.irsigs1, [])
self.irsigs2 = sum(self.irsigs2, [])
self.epcs1 = sum(self.epcs1, [])
self.epcs2 = sum(self.epcs2, [])
self.epcas1 = sum(self.epcas1, [])
self.epcas2 = sum(self.epcas2, [])
self.evts1 = sum(self.evts1, [])
self.evts2 = sum(self.evts2, [])
self.evtas1 = sum(self.evtas1, [])
self.evtas2 = sum(self.evtas2, [])
def test_block_init(self):
blk = Block(name='a block')
assert_neo_object_is_compliant(blk)
self.assertEqual(blk.name, 'a block')
self.assertEqual(blk.file_origin, None)
def check_creation(self, blk):
assert_neo_object_is_compliant(blk)
seed = blk.annotations['seed']
targ0 = get_fake_value('file_datetime', datetime, seed=seed+0)
self.assertEqual(blk.file_datetime, targ0)
targ1 = get_fake_value('rec_datetime', datetime, seed=seed+1)
self.assertEqual(blk.rec_datetime, targ1)
targ2 = get_fake_value('index', int, seed=seed+2, obj=Block)
self.assertEqual(blk.index, targ2)
targ3 = get_fake_value('name', str, seed=seed+3, obj=Block)
self.assertEqual(blk.name, targ3)
targ4 = get_fake_value('description', str, seed=seed+4, obj=Block)
self.assertEqual(blk.description, targ4)
targ5 = get_fake_value('file_origin', str)
self.assertEqual(blk.file_origin, targ5)
targ6 = get_annotations()
targ6['seed'] = seed
self.assertEqual(blk.annotations, targ6)
self.assertTrue(hasattr(blk, 'recordingchannelgroups'))
self.assertTrue(hasattr(blk, 'segments'))
self.assertEqual(len(blk.recordingchannelgroups), self.nchildren)
self.assertEqual(len(blk.segments), self.nchildren)
def test__creation(self):
self.check_creation(self.blk1)
self.check_creation(self.blk2)
def test__merge(self):
blk1a = fake_neo(Block,
seed=self.seed1, n=self.nchildren)
assert_same_sub_schema(self.blk1, blk1a)
blk1a.annotate(seed=self.seed2)
blk1a.segments.append(self.segs2[0])
blk1a.merge(self.blk2)
segs1a = clone_object(self.blk1).segments
rcgs1a = clone_object(self.rcgs1)
assert_same_sub_schema(rcgs1a + self.rcgs2,
blk1a.recordingchannelgroups)
assert_same_sub_schema(segs1a + self.segs2,
blk1a.segments)
def test__children(self):
segs1a = clone_object(self.blk1).segments
rcgs1a = clone_object(self.rcgs1)
self.assertEqual(self.blk1._container_child_objects,
('Segment', 'RecordingChannelGroup'))
self.assertEqual(self.blk1._data_child_objects, ())
self.assertEqual(self.blk1._single_parent_objects, ())
self.assertEqual(self.blk1._multi_child_objects, ())
self.assertEqual(self.blk1._multi_parent_objects, ())
self.assertEqual(self.blk1._child_properties,
('Unit', 'RecordingChannel'))
self.assertEqual(self.blk1._single_child_objects,
('Segment', 'RecordingChannelGroup'))
self.assertEqual(self.blk1._container_child_containers,
('segments', 'recordingchannelgroups'))
self.assertEqual(self.blk1._data_child_containers, ())
self.assertEqual(self.blk1._single_child_containers,
('segments', 'recordingchannelgroups'))
self.assertEqual(self.blk1._single_parent_containers, ())
self.assertEqual(self.blk1._multi_child_containers, ())
self.assertEqual(self.blk1._multi_parent_containers, ())
self.assertEqual(self.blk1._child_objects,
('Segment', 'RecordingChannelGroup'))
self.assertEqual(self.blk1._child_containers,
('segments', 'recordingchannelgroups'))
self.assertEqual(self.blk1._parent_objects, ())
self.assertEqual(self.blk1._parent_containers, ())
self.assertEqual(len(self.blk1._single_children), 2*self.nchildren)
self.assertEqual(len(self.blk1._multi_children), 0)
self.assertEqual(len(self.blk1.data_children), 0)
self.assertEqual(len(self.blk1.data_children_recur),
4*self.nchildren**3 + 5*self.nchildren**2)
self.assertEqual(len(self.blk1.container_children), 2*self.nchildren)
self.assertEqual(len(self.blk1.container_children_recur),
2*self.nchildren + 2*self.nchildren**2)
self.assertEqual(len(self.blk1.children), 2*self.nchildren)
self.assertEqual(len(self.blk1.children_recur),
2*self.nchildren +
2*self.nchildren**2 +
4*self.nchildren**3 + 5*self.nchildren**2)
self.assertEqual(self.blk1._multi_children, ())
assert_same_sub_schema(list(self.blk1._single_children),
self.segs1 + self.rcgs1)
assert_same_sub_schema(list(self.blk1.container_children),
self.segs1 + self.rcgs1)
assert_same_sub_schema(list(self.blk1.container_children_recur),
self.segs1 + self.rcgs1 +
self.units1[:2] + self.rchans1[:2] +
self.units1[2:] + self.rchans1[2:])
assert_same_sub_schema(list(self.blk1.data_children_recur),
self.sigs1[::2] + self.sigarrs1[::2] +
self.epcs1[:2] + self.epcas1[:2] +
self.evts1[:2] + self.evtas1[:2] +
self.irsigs1[::2] + self.spikes1[::2] +
self.trains1[::2] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
self.epcs1[2:] + self.epcas1[2:] +
self.evts1[2:] + self.evtas1[2:] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2],
exclude=['channel_index'])
assert_same_sub_schema(list(self.blk1.children),
segs1a + rcgs1a)
assert_same_sub_schema(list(self.blk1.children_recur),
self.sigs1[::2] + self.sigarrs1[::2] +
self.epcs1[:2] + self.epcas1[:2] +
self.evts1[:2] + self.evtas1[:2] +
self.irsigs1[::2] + self.spikes1[::2] +
self.trains1[::2] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
self.epcs1[2:] + self.epcas1[2:] +
self.evts1[2:] + self.evtas1[2:] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2] +
self.segs1 + self.rcgs1 +
self.units1[:2] + self.rchans1[:2] +
self.units1[2:] + self.rchans1[2:],
exclude=['channel_index'])
def test__size(self):
targ = {'segments': self.nchildren,
'recordingchannelgroups': self.nchildren}
self.assertEqual(self.targobj.size, targ)
def test__filter_none(self):
targ = []
res1 = self.targobj.filter()
res2 = self.targobj.filter({})
res3 = self.targobj.filter([])
res4 = self.targobj.filter([{}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter([{}, {}])
res7 = self.targobj.filter(targdict={})
res8 = self.targobj.filter(targdict=[])
res9 = self.targobj.filter(targdict=[{}])
res10 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
def test__filter_annotation_single(self):
targ = ([self.epcs1[1], self.epcas1[1],
self.evts1[1], self.evtas1[1]] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
[self.epcs1[3], self.epcas1[3],
self.evts1[3], self.evtas1[3]] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2])
res0 = self.targobj.filter(j=1)
res1 = self.targobj.filter({'j': 1})
res2 = self.targobj.filter(targdict={'j': 1})
res3 = self.targobj.filter([{'j': 1}])
res4 = self.targobj.filter(targdict=[{'j': 1}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.spikes1[0]]
name = self.spikes1[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
name = self.spikes2[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = ([self.epcs1[1], self.epcas1[1],
self.evts1[1], self.evtas1[1]] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
[self.epcs1[3], self.epcas1[3],
self.evts1[3], self.evtas1[3]] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2] +
[self.spikes1[0]])
name = self.spikes1[0].name
res0 = self.targobj.filter(name=name, j=1)
res1 = self.targobj.filter({'name': name, 'j': 1})
res2 = self.targobj.filter(targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
name0 = self.sigarrs2[0].name
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, j=0)
res2 = self.targobj.filter([{}], i=0)
res3 = self.targobj.filter({'name': name0}, j=1)
res4 = self.targobj.filter(targdict={'name': name0}, j=1)
res5 = self.targobj.filter(name=name0, targdict={'j': 1})
res6 = self.targobj.filter(name=name0, j=5)
res7 = self.targobj.filter({'name': name0, 'j': 5})
res8 = self.targobj.filter(targdict={'name': name0, 'j': 5})
res9 = self.targobj.filter({'name': name0}, j=5)
res10 = self.targobj.filter(targdict={'name': name0}, j=5)
res11 = self.targobj.filter(name=name0, targdict={'j': 5})
res12 = self.targobj.filter({'name': name0}, j=5)
res13 = self.targobj.filter(targdict={'name': name0}, j=5)
res14 = self.targobj.filter(name=name0, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres_annotation_attribute(self):
targ = [self.spikes1[0]]
name = self.spikes1[0].name
res0 = self.targobj.filter(name=name, j=90)
res1 = self.targobj.filter({'name': name, 'j': 90})
res2 = self.targobj.filter(targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_partres_annotation_annotation(self):
targ = self.sigs1[::2] + self.spikes1[::2]
res0 = self.targobj.filter([{'j': 0}, {'i': 0}])
res1 = self.targobj.filter({'j': 0}, i=0)
res2 = self.targobj.filter([{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_single_annotation_obj_single(self):
targ = self.trains1[1::2]
res0 = self.targobj.filter(j=1, objects='SpikeTrain')
res1 = self.targobj.filter(j=1, objects=SpikeTrain)
res2 = self.targobj.filter(j=1, objects=['SpikeTrain'])
res3 = self.targobj.filter(j=1, objects=[SpikeTrain])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
def test__filter_single_annotation_obj_multi(self):
targ = self.spikes1[1::2] + self.trains1[1::2]
res0 = self.targobj.filter(j=1, objects=['Spike', SpikeTrain])
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_norecur(self):
targ = []
res0 = self.targobj.filter(j=1, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=1, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = ([self.epcs1[1], self.epcas1[1],
self.evts1[1], self.evtas1[1]] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
[self.epcs1[3], self.epcas1[3],
self.evts1[3], self.evtas1[3]] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2] +
[self.segs1[1], self.rcgs1[1],
self.units1[1], self.rchans1[1],
self.units1[3], self.rchans1[3]])
res0 = self.targobj.filter(j=1, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_data(self):
targ = [self.spikes1[0]]
res0 = self.targobj.filter(name=self.spikes1[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_container(self):
targ = [self.rchans1[0]]
res0 = self.targobj.filter(name=self.rchans1[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.segs1[1], self.rcgs1[1]]
res0 = self.targobj.filter(j=1, container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.spikes1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = [self.segs1[1], self.rcgs1[1],
self.units1[1], self.rchans1[1],
self.units1[3], self.rchans1[3]]
res0 = self.targobj.filter(j=1,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container(self):
targ = [self.rchans1[0]]
res0 = self.targobj.filter(name=self.rchans1[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_nores(self):
targ = []
res0 = self.targobj.filter(name=self.spikes1[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = [self.segs1[1], self.rcgs1[1]]
res0 = self.targobj.filter(j=1,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.spikes1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = ([self.epcs1[1], self.epcas1[1],
self.evts1[1], self.evtas1[1]] +
self.sigs1[1::2] + self.sigarrs1[1::2] +
[self.epcs1[3], self.epcas1[3],
self.evts1[3], self.evtas1[3]] +
self.irsigs1[1::2] +
self.spikes1[1::2] + self.trains1[1::2] +
[self.segs1[1], self.rcgs1[1],
self.units1[1], self.rchans1[1],
self.units1[3], self.rchans1[3],
self.spikes1[0]])
name = self.spikes1[0].name
res0 = filterdata(data, name=name, j=1)
res1 = filterdata(data, {'name': name, 'j': 1})
res2 = filterdata(data, targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
name1 = self.sigarrs1[0].name
name2 = self.sigarrs2[0].name
res0 = filterdata(data, [{'j': 0}, {}])
res1 = filterdata(data, {}, i=0)
res2 = filterdata(data, [{}], i=0)
res3 = filterdata(data, name=name1, targdict={'j': 1})
res4 = filterdata(data, {'name': name1}, j=1)
res5 = filterdata(data, targdict={'name': name1}, j=1)
res6 = filterdata(data, name=name2, j=5)
res7 = filterdata(data, {'name': name2, 'j': 5})
res8 = filterdata(data, targdict={'name': name2, 'j': 5})
res9 = filterdata(data, {'name': name2}, j=5)
res10 = filterdata(data, targdict={'name': name2}, j=5)
res11 = filterdata(data, name=name2, targdict={'j': 5})
res12 = filterdata(data, {'name': name1}, j=5)
res13 = filterdata(data, targdict={'name': name1}, j=5)
res14 = filterdata(data, name=name1, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres_annotation_attribute(self):
data = self.targobj.children_recur
targ = [self.spikes1[0]]
name = self.spikes1[0].name
res0 = filterdata(data, name=name, j=90)
res1 = filterdata(data, {'name': name, 'j': 90})
res2 = filterdata(data, targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_partres_annotation_annotation(self):
data = self.targobj.children_recur
targ = (self.sigs1[::2] + self.spikes1[::2] +
self.segs1[:1] + self.units1[::2])
res0 = filterdata(data, [{'j': 0}, {'i': 0}])
res1 = filterdata(data, {'j': 0}, i=0)
res2 = filterdata(data, [{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
res = pretty(self.blk1)
ann = get_annotations()
ann['seed'] = self.seed1
ann = pretty(ann).replace('\n ', '\n ')
seg0 = pretty(self.segs1[0])
seg1 = pretty(self.segs1[1])
seg0 = seg0.replace('\n', '\n ')
seg1 = seg1.replace('\n', '\n ')
targ = ("Block with " +
("%s segments, %s recordingchannelgroups\n" %
(len(self.segs1), len(self.rcgs1))) +
("name: '%s'\ndescription: '%s'\n" % (self.blk1.name,
self.blk1.description)) +
("annotations: %s\n" % ann) +
("file_origin: '%s'\n" % self.blk1.file_origin) +
("file_datetime: %s\n" % repr(self.blk1.file_datetime)) +
("rec_datetime: %s\n" % repr(self.blk1.rec_datetime)) +
("index: %s\n" % self.blk1.index) +
("# segments (N=%s)\n" % len(self.segs1)) +
('%s: %s\n' % (0, seg0)) +
('%s: %s' % (1, seg1)))
self.assertEqual(res, targ)
def test_block_list_units(self):
assert_same_sub_schema(self.units1, self.blk1.list_units)
assert_same_sub_schema(self.units2, self.blk2.list_units)
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class(Unit))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class(Unit))
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class('Unit'))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('Unit'))
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class('units'))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('units'))
def test_block_list_recordingchannels(self):
assert_same_sub_schema(self.rchans1, self.blk1.list_recordingchannels)
assert_same_sub_schema(self.rchans2, self.blk2.list_recordingchannels)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
GoogleCloudPlatform/public-datasets-pipelines | datasets/nhtsa_traffic_fatalities/pipelines/nhtsa_traffic_fatalities/nhtsa_traffic_fatalities_dag.py | 1 | 319461 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.operators import kubernetes_engine
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2022-03-01",
}
with DAG(
dag_id="nhtsa_traffic_fatalities.nhtsa_traffic_fatalities",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
create_cluster = kubernetes_engine.GKECreateClusterOperator(
task_id="create_cluster",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
body={
"name": "nhtsa-traffic-fatalities",
"initial_node_count": 2,
"network": "{{ var.value.vpc_network }}",
"node_config": {
"machine_type": "e2-standard-16",
"oauth_scopes": [
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/cloud-platform",
],
},
},
)
# Run CSV transform within kubernetes pod for accident pipeline
accident_2015_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="accident_2015_transform_csv",
startup_timeout_seconds=600,
name="accident_2015",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "accident.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2015.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_vehicle_forms_submitted_all",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_parked_working_vehicles",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "county",\n "city",\n "day_of_crash",\n "day_name",\n "month_of_crash",\n "month_of_crash_name",\n "year_of_crash",\n "day_of_week",\n "day_of_week_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "national_highway_system",\n "national_highway_system_name",\n "route_signing",\n "route_signing_name",\n "trafficway_identifier",\n "trafficway_identifier_2",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "ownership",\n "ownership_name",\n "milepoint",\n "milepoint_name",\n "latitude",\n "latitude_name",\n "longitude",\n "longitude_name",\n "special_jurisdiction",\n "special_jurisdiction_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "relation_to_junction_within_interchange_area",\n "relation_to_junction_within_interchange_area_name",\n "relation_to_junction_specific_location",\n "relation_to_junction_specific_location_name",\n "type_of_intersection",\n "type_of_intersection_name",\n "work_zone",\n "work_zone_name",\n "relation_to_trafficway",\n "relation_to_trafficway_name",\n "light_condition",\n "light_condition_name",\n "atmospheric_conditions_1",\n "atmospheric_conditions_1_name",\n "atmospheric_conditions_2",\n "atmospheric_conditions_2_name",\n "atmospheric_conditions",\n "atmospheric_conditions_name",\n "school_bus_related",\n "school_bus_related_name",\n "rail_grade_crossing_identifier",\n "rail_grade_crossing_identifier_name",\n "hour_of_notification",\n "hour_of_notification_name",\n "minute_of_notification",\n "minute_of_notification_name",\n "hour_of_arrival_at_scene",\n "hour_of_arrival_at_scene_name",\n "minute_of_arrival_at_scene",\n "minute_of_arrival_at_scene_name",\n "hour_of_ems_arrival_at_hospital",\n "hour_of_ems_arrival_at_hospital_name",\n "minute_of_ems_arrival_at_hospital",\n "minute_of_ems_arrival_at_hospital_name",\n "related_factors_crash_level_1",\n "related_factors_crash_level_1_name",\n "related_factors_crash_level_2",\n "related_factors_crash_level_2_name",\n "related_factors_crash_level_3",\n "related_factors_crash_level_3_name",\n "number_of_fatalities",\n "number_of_drunk_drivers"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_vehicle_forms_submitted_all": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_parked_working_vehicles": "str",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles": "str",\n "number_of_forms_submitted_for_persons_in_motor_vehicles": "str",\n "number_of_persons_in_motor_vehicles_in_transport_mvit": "str",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit": "str",\n "county": "str",\n "city": "str",\n "day_of_crash": "str",\n "day_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "year_of_crash": "str",\n "day_of_week": "str",\n "day_of_week_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "national_highway_system": "str",\n "national_highway_system_name": "str",\n "route_signing": "str",\n "route_signing_name": "str",\n "trafficway_identifier": "str",\n "trafficway_identifier_2": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "ownership": "str",\n "ownership_name": "str",\n "milepoint": "str",\n "milepoint_name": "str",\n "latitude": "str",\n "latitude_name": "str",\n "longitude": "str",\n "longitude_name": "str",\n "special_jurisdiction": "str",\n "special_jurisdiction_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "relation_to_junction_within_interchange_area": "str",\n "relation_to_junction_within_interchange_area_name": "str",\n "relation_to_junction_specific_location": "str",\n "relation_to_junction_specific_location_name": "str",\n "type_of_intersection": "str",\n "type_of_intersection_name": "str",\n "work_zone": "str",\n "work_zone_name": "str",\n "relation_to_trafficway": "str",\n "relation_to_trafficway_name": "str",\n "light_condition": "str",\n "light_condition_name": "str",\n "atmospheric_conditions_1": "str",\n "atmospheric_conditions_1_name": "str",\n "atmospheric_conditions_2": "str",\n "atmospheric_conditions_2_name": "str",\n "atmospheric_conditions": "str",\n "atmospheric_conditions_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "rail_grade_crossing_identifier": "str",\n "rail_grade_crossing_identifier_name": "str",\n "hour_of_notification": "str",\n "hour_of_notification_name": "str",\n "minute_of_notification": "str",\n "minute_of_notification_name": "str",\n "hour_of_arrival_at_scene": "str",\n "hour_of_arrival_at_scene_name": "str",\n "minute_of_arrival_at_scene": "str",\n "minute_of_arrival_at_scene_name": "str",\n "hour_of_ems_arrival_at_hospital": "str",\n "hour_of_ems_arrival_at_hospital_name": "str",\n "minute_of_ems_arrival_at_hospital": "str",\n "minute_of_ems_arrival_at_hospital_name": "str",\n "related_factors_crash_level_1": "str",\n "related_factors_crash_level_1_name": "str",\n "related_factors_crash_level_2": "str",\n "related_factors_crash_level_2_name": "str",\n "related_factors_crash_level_3": "str",\n "related_factors_crash_level_3_name": "str",\n "number_of_fatalities": "str",\n "number_of_drunk_drivers": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_TOTAL": "number_of_vehicle_forms_submitted_all",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PVH_INVL": "number_of_parked_working_vehicles",\n "PEDS": "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "PERSONS": "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "PERMVIT": "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "PERNOTMVIT": "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "COUNTY": "county",\n "CITY": "city",\n "DAY": "day_of_crash",\n "DAYNAME": "day_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "YEAR": "year_of_crash",\n "DAY_WEEK": "day_of_week",\n "DAY_WEEKNAME": "day_of_week_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "NHS": "national_highway_system",\n "NHSNAME": "national_highway_system_name",\n "ROUTE": "route_signing",\n "ROUTENAME": "route_signing_name",\n "TWAY_ID": "trafficway_identifier",\n "TWAY_ID2": "trafficway_identifier_2",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "RD_OWNER": "ownership",\n "RD_OWNERNAME": "ownership_name",\n "MILEPT": "milepoint",\n "MILEPTNAME": "milepoint_name",\n "LATITUDE": "latitude",\n "LATITUDENAME": "latitude_name",\n "LONGITUD": "longitude",\n "LONGITUDNAME": "longitude_name",\n "SP_JUR": "special_jurisdiction",\n "SP_JURNAME": "special_jurisdiction_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "RELJCT1": "relation_to_junction_within_interchange_area",\n "RELJCT1NAME": "relation_to_junction_within_interchange_area_name",\n "RELJCT2": "relation_to_junction_specific_location",\n "RELJCT2NAME": "relation_to_junction_specific_location_name",\n "TYP_INT" : "type_of_intersection",\n "TYP_INTNAME": "type_of_intersection_name",\n "WRK_ZONE": "work_zone",\n "WRK_ZONENAME": "work_zone_name",\n "REL_ROAD": "relation_to_trafficway",\n "REL_ROADNAME": "relation_to_trafficway_name",\n "LGT_COND": "light_condition",\n "LGT_CONDNAME": "light_condition_name",\n "WEATHER1": "atmospheric_conditions_1",\n "WEATHER1NAME": "atmospheric_conditions_1_name",\n "WEATHER2": "atmospheric_conditions_2",\n "WEATHER2NAME": "atmospheric_conditions_2_name",\n "WEATHER": "atmospheric_conditions",\n "WEATHERNAME": "atmospheric_conditions_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "RAIL": "rail_grade_crossing_identifier",\n "RAILNAME": "rail_grade_crossing_identifier_name",\n "NOT_HOUR": "hour_of_notification",\n "NOT_HOURNAME": "hour_of_notification_name",\n "NOT_MIN": "minute_of_notification",\n "NOT_MINNAME": "minute_of_notification_name",\n "ARR_HOUR": "hour_of_arrival_at_scene",\n "ARR_HOURNAME": "hour_of_arrival_at_scene_name",\n "ARR_MIN": "minute_of_arrival_at_scene",\n "ARR_MINNAME": "minute_of_arrival_at_scene_name",\n "HOSP_HR": "hour_of_ems_arrival_at_hospital",\n "HOSP_HRNAME": "hour_of_ems_arrival_at_hospital_name",\n "HOSP_MN": "minute_of_ems_arrival_at_hospital",\n "HOSP_MNNAME": "minute_of_ems_arrival_at_hospital_name",\n "CF1": "related_factors_crash_level_1",\n "CF1NAME": "related_factors_crash_level_1_name",\n "CF2": "related_factors_crash_level_2",\n "CF2NAME": "related_factors_crash_level_2_name",\n "CF3": "related_factors_crash_level_3",\n "CF3NAME": "related_factors_crash_level_3_name",\n "FATALS": "number_of_fatalities",\n "DRUNK_DR": "number_of_drunk_drivers"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for accident pipeline
accident_2016_2019_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="accident_2016_2019_transform_csv",
startup_timeout_seconds=600,
name="accident_2016_2019",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "accident.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2016_2019.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_vehicle_forms_submitted_all",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_parked_working_vehicles",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "county",\n "county_name",\n "city",\n "city_name",\n "day_of_crash",\n "day_name",\n "month_of_crash",\n "month_of_crash_name",\n "year_of_crash",\n "day_of_week",\n "day_of_week_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "national_highway_system",\n "national_highway_system_name",\n "route_signing",\n "route_signing_name",\n "trafficway_identifier",\n "trafficway_identifier_2",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "ownership",\n "ownership_name",\n "milepoint",\n "milepoint_name",\n "latitude",\n "latitude_name",\n "longitude",\n "longitude_name",\n "special_jurisdiction",\n "special_jurisdiction_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "relation_to_junction_within_interchange_area",\n "relation_to_junction_within_interchange_area_name",\n "relation_to_junction_specific_location",\n "relation_to_junction_specific_location_name",\n "type_of_intersection",\n "type_of_intersection_name",\n "work_zone",\n "work_zone_name",\n "relation_to_trafficway",\n "relation_to_trafficway_name",\n "light_condition",\n "light_condition_name",\n "atmospheric_conditions_1",\n "atmospheric_conditions_1_name",\n "atmospheric_conditions_2",\n "atmospheric_conditions_2_name",\n "atmospheric_conditions",\n "atmospheric_conditions_name",\n "school_bus_related",\n "school_bus_related_name",\n "rail_grade_crossing_identifier",\n "rail_grade_crossing_identifier_name",\n "hour_of_notification",\n "hour_of_notification_name",\n "minute_of_notification",\n "minute_of_notification_name",\n "hour_of_arrival_at_scene",\n "hour_of_arrival_at_scene_name",\n "minute_of_arrival_at_scene",\n "minute_of_arrival_at_scene_name",\n "hour_of_ems_arrival_at_hospital",\n "hour_of_ems_arrival_at_hospital_name",\n "minute_of_ems_arrival_at_hospital",\n "minute_of_ems_arrival_at_hospital_name",\n "related_factors_crash_level_1",\n "related_factors_crash_level_1_name",\n "related_factors_crash_level_2",\n "related_factors_crash_level_2_name",\n "related_factors_crash_level_3",\n "related_factors_crash_level_3_name",\n "number_of_fatalities",\n "number_of_drunk_drivers"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_vehicle_forms_submitted_all": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_parked_working_vehicles": "str",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles": "str",\n "number_of_forms_submitted_for_persons_in_motor_vehicles": "str",\n "number_of_persons_in_motor_vehicles_in_transport_mvit": "str",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit": "str",\n "county": "str",\n "county_name": "str",\n "city": "str",\n "city_name": "str",\n "day_of_crash": "str",\n "day_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "year_of_crash": "str",\n "day_of_week": "str",\n "day_of_week_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "national_highway_system": "str",\n "national_highway_system_name": "str",\n "route_signing": "str",\n "route_signing_name": "str",\n "trafficway_identifier": "str",\n "trafficway_identifier_2": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "ownership": "str",\n "ownership_name": "str",\n "milepoint": "str",\n "milepoint_name": "str",\n "latitude": "str",\n "latitude_name": "str",\n "longitude": "str",\n "longitude_name": "str",\n "special_jurisdiction": "str",\n "special_jurisdiction_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "relation_to_junction_within_interchange_area": "str",\n "relation_to_junction_within_interchange_area_name": "str",\n "relation_to_junction_specific_location": "str",\n "relation_to_junction_specific_location_name": "str",\n "type_of_intersection": "str",\n "type_of_intersection_name": "str",\n "work_zone": "str",\n "work_zone_name": "str",\n "relation_to_trafficway": "str",\n "relation_to_trafficway_name": "str",\n "light_condition": "str",\n "light_condition_name": "str",\n "atmospheric_conditions_1": "str",\n "atmospheric_conditions_1_name": "str",\n "atmospheric_conditions_2": "str",\n "atmospheric_conditions_2_name": "str",\n "atmospheric_conditions": "str",\n "atmospheric_conditions_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "rail_grade_crossing_identifier": "str",\n "rail_grade_crossing_identifier_name": "str",\n "hour_of_notification": "str",\n "hour_of_notification_name": "str",\n "minute_of_notification": "str",\n "minute_of_notification_name": "str",\n "hour_of_arrival_at_scene": "str",\n "hour_of_arrival_at_scene_name": "str",\n "minute_of_arrival_at_scene": "str",\n "minute_of_arrival_at_scene_name": "str",\n "hour_of_ems_arrival_at_hospital": "str",\n "hour_of_ems_arrival_at_hospital_name": "str",\n "minute_of_ems_arrival_at_hospital": "str",\n "minute_of_ems_arrival_at_hospital_name": "str",\n "related_factors_crash_level_1": "str",\n "related_factors_crash_level_1_name": "str",\n "related_factors_crash_level_2": "str",\n "related_factors_crash_level_2_name": "str",\n "related_factors_crash_level_3": "str",\n "related_factors_crash_level_3_name": "str",\n "number_of_fatalities": "str",\n "number_of_drunk_drivers": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_TOTAL": "number_of_vehicle_forms_submitted_all",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PVH_INVL": "number_of_parked_working_vehicles",\n "PEDS": "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "PERSONS": "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "PERMVIT": "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "PERNOTMVIT": "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "COUNTY": "county",\n "COUNTYNAME": "county_name",\n "CITY": "city",\n "CITYNAME": "city_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "YEAR": "year_of_crash",\n "DAY_WEEK": "day_of_week",\n "DAY_WEEKNAME": "day_of_week_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "NHS": "national_highway_system",\n "NHSNAME": "national_highway_system_name",\n "ROUTE": "route_signing",\n "ROUTENAME": "route_signing_name",\n "TWAY_ID": "trafficway_identifier",\n "TWAY_ID2": "trafficway_identifier_2",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "RD_OWNER": "ownership",\n "RD_OWNERNAME": "ownership_name",\n "MILEPT": "milepoint",\n "MILEPTNAME": "milepoint_name",\n "LATITUDE": "latitude",\n "LATITUDENAME": "latitude_name",\n "LONGITUD": "longitude",\n "LONGITUDNAME": "longitude_name",\n "SP_JUR": "special_jurisdiction",\n "SP_JURNAME": "special_jurisdiction_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "RELJCT1": "relation_to_junction_within_interchange_area",\n "RELJCT1NAME": "relation_to_junction_within_interchange_area_name",\n "RELJCT2": "relation_to_junction_specific_location",\n "RELJCT2NAME": "relation_to_junction_specific_location_name",\n "TYP_INT" : "type_of_intersection",\n "TYP_INTNAME": "type_of_intersection_name",\n "WRK_ZONE": "work_zone",\n "WRK_ZONENAME": "work_zone_name",\n "REL_ROAD": "relation_to_trafficway",\n "REL_ROADNAME": "relation_to_trafficway_name",\n "LGT_COND": "light_condition",\n "LGT_CONDNAME": "light_condition_name",\n "WEATHER1": "atmospheric_conditions_1",\n "WEATHER1NAME": "atmospheric_conditions_1_name",\n "WEATHER2": "atmospheric_conditions_2",\n "WEATHER2NAME": "atmospheric_conditions_2_name",\n "WEATHER": "atmospheric_conditions",\n "WEATHERNAME": "atmospheric_conditions_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "RAIL": "rail_grade_crossing_identifier",\n "RAILNAME": "rail_grade_crossing_identifier_name",\n "NOT_HOUR": "hour_of_notification",\n "NOT_HOURNAME": "hour_of_notification_name",\n "NOT_MIN": "minute_of_notification",\n "NOT_MINNAME": "minute_of_notification_name",\n "ARR_HOUR": "hour_of_arrival_at_scene",\n "ARR_HOURNAME": "hour_of_arrival_at_scene_name",\n "ARR_MIN": "minute_of_arrival_at_scene",\n "ARR_MINNAME": "minute_of_arrival_at_scene_name",\n "HOSP_HR": "hour_of_ems_arrival_at_hospital",\n "HOSP_HRNAME": "hour_of_ems_arrival_at_hospital_name",\n "HOSP_MN": "minute_of_ems_arrival_at_hospital",\n "HOSP_MNNAME": "minute_of_ems_arrival_at_hospital_name",\n "CF1": "related_factors_crash_level_1",\n "CF1NAME": "related_factors_crash_level_1_name",\n "CF2": "related_factors_crash_level_2",\n "CF2NAME": "related_factors_crash_level_2_name",\n "CF3": "related_factors_crash_level_3",\n "CF3NAME": "related_factors_crash_level_3_name",\n "FATALS": "number_of_fatalities",\n "DRUNK_DR": "number_of_drunk_drivers"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for accident pipeline
accident_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="accident_2020_transform_csv",
startup_timeout_seconds=600,
name="accident_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "accident.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.accident_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_vehicle_forms_submitted_all",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_parked_working_vehicles",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "county",\n "county_name",\n "city",\n "city_name",\n "day_of_crash",\n "day_name",\n "month_of_crash",\n "month_of_crash_name",\n "year_of_crash",\n "day_of_week",\n "day_of_week_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "national_highway_system",\n "national_highway_system_name",\n "route_signing",\n "route_signing_name",\n "trafficway_identifier",\n "trafficway_identifier_2",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "ownership",\n "ownership_name",\n "milepoint",\n "milepoint_name",\n "latitude",\n "latitude_name",\n "longitude",\n "longitude_name",\n "special_jurisdiction",\n "special_jurisdiction_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "relation_to_junction_within_interchange_area",\n "relation_to_junction_within_interchange_area_name",\n "relation_to_junction_specific_location",\n "relation_to_junction_specific_location_name",\n "type_of_intersection",\n "type_of_intersection_name",\n "work_zone",\n "work_zone_name",\n "relation_to_trafficway",\n "relation_to_trafficway_name",\n "light_condition",\n "light_condition_name",\n "atmospheric_conditions_1",\n "atmospheric_conditions_1_name",\n "atmospheric_conditions_2",\n "atmospheric_conditions_2_name",\n "rail_grade_crossing_identifier",\n "rail_grade_crossing_identifier_name",\n "hour_of_notification",\n "hour_of_notification_name",\n "minute_of_notification",\n "minute_of_notification_name",\n "hour_of_arrival_at_scene",\n "hour_of_arrival_at_scene_name",\n "minute_of_arrival_at_scene",\n "minute_of_arrival_at_scene_name",\n "hour_of_ems_arrival_at_hospital",\n "hour_of_ems_arrival_at_hospital_name",\n "minute_of_ems_arrival_at_hospital",\n "minute_of_ems_arrival_at_hospital_name",\n "number_of_fatalities",\n "number_of_drunk_drivers"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_vehicle_forms_submitted_all": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_parked_working_vehicles": "str",\n "number_of_forms_submitted_for_persons_not_in_motor_vehicles": "str",\n "number_of_forms_submitted_for_persons_in_motor_vehicles": "str",\n "number_of_persons_in_motor_vehicles_in_transport_mvit": "str",\n "number_of_persons_not_in_motor_vehicles_in_transport_mvit": "str",\n "county": "str",\n "county_name": "str",\n "city": "str",\n "city_name": "str",\n "day_of_crash": "str",\n "day_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "year_of_crash": "str",\n "day_of_week": "str",\n "day_of_week_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "national_highway_system": "str",\n "national_highway_system_name": "str",\n "route_signing": "str",\n "route_signing_name": "str",\n "trafficway_identifier": "str",\n "trafficway_identifier_2": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "ownership": "str",\n "ownership_name": "str",\n "milepoint": "str",\n "milepoint_name": "str",\n "latitude": "str",\n "latitude_name": "str",\n "longitude": "str",\n "longitude_name": "str",\n "special_jurisdiction": "str",\n "special_jurisdiction_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "relation_to_junction_within_interchange_area": "str",\n "relation_to_junction_within_interchange_area_name": "str",\n "relation_to_junction_specific_location": "str",\n "relation_to_junction_specific_location_name": "str",\n "type_of_intersection": "str",\n "type_of_intersection_name": "str",\n "work_zone": "str",\n "work_zone_name": "str",\n "relation_to_trafficway": "str",\n "relation_to_trafficway_name": "str",\n "light_condition": "str",\n "light_condition_name": "str",\n "atmospheric_conditions_1": "str",\n "atmospheric_conditions_1_name": "str",\n "atmospheric_conditions_2": "str",\n "atmospheric_conditions_2_name": "str",\n "rail_grade_crossing_identifier": "str",\n "rail_grade_crossing_identifier_name": "str",\n "hour_of_notification": "str",\n "hour_of_notification_name": "str",\n "minute_of_notification": "str",\n "minute_of_notification_name": "str",\n "hour_of_arrival_at_scene": "str",\n "hour_of_arrival_at_scene_name": "str",\n "minute_of_arrival_at_scene": "str",\n "minute_of_arrival_at_scene_name": "str",\n "hour_of_ems_arrival_at_hospital": "str",\n "hour_of_ems_arrival_at_hospital_name": "str",\n "minute_of_ems_arrival_at_hospital": "str",\n "minute_of_ems_arrival_at_hospital_name": "str",\n "number_of_fatalities": "str",\n "number_of_drunk_drivers": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_TOTAL": "number_of_vehicle_forms_submitted_all",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PVH_INVL": "number_of_parked_working_vehicles",\n "PEDS": "number_of_forms_submitted_for_persons_not_in_motor_vehicles",\n "PERSONS": "number_of_forms_submitted_for_persons_in_motor_vehicles",\n "PERMVIT": "number_of_persons_in_motor_vehicles_in_transport_mvit",\n "PERNOTMVIT": "number_of_persons_not_in_motor_vehicles_in_transport_mvit",\n "COUNTY": "county",\n "COUNTYNAME": "county_name",\n "CITY": "city",\n "CITYNAME": "city_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "YEAR": "year_of_crash",\n "DAY_WEEK": "day_of_week",\n "DAY_WEEKNAME": "day_of_week_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "NHS": "national_highway_system",\n "NHSNAME": "national_highway_system_name",\n "ROUTE": "route_signing",\n "ROUTENAME": "route_signing_name",\n "TWAY_ID": "trafficway_identifier",\n "TWAY_ID2": "trafficway_identifier_2",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "RD_OWNER": "ownership",\n "RD_OWNERNAME": "ownership_name",\n "MILEPT": "milepoint",\n "MILEPTNAME": "milepoint_name",\n "LATITUDE": "latitude",\n "LATITUDENAME": "latitude_name",\n "LONGITUD": "longitude",\n "LONGITUDNAME": "longitude_name",\n "SP_JUR": "special_jurisdiction",\n "SP_JURNAME": "special_jurisdiction_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "RELJCT1": "relation_to_junction_within_interchange_area",\n "RELJCT1NAME": "relation_to_junction_within_interchange_area_name",\n "RELJCT2": "relation_to_junction_specific_location",\n "RELJCT2NAME": "relation_to_junction_specific_location_name",\n "TYP_INT" : "type_of_intersection",\n "TYP_INTNAME": "type_of_intersection_name",\n "WRK_ZONE": "work_zone",\n "WRK_ZONENAME": "work_zone_name",\n "REL_ROAD": "relation_to_trafficway",\n "REL_ROADNAME": "relation_to_trafficway_name",\n "LGT_COND": "light_condition",\n "LGT_CONDNAME": "light_condition_name",\n "WEATHER1": "atmospheric_conditions_1",\n "WEATHER1NAME": "atmospheric_conditions_1_name",\n "WEATHER2": "atmospheric_conditions_2",\n "WEATHER2NAME": "atmospheric_conditions_2_name",\n "RAIL": "rail_grade_crossing_identifier",\n "RAILNAME": "rail_grade_crossing_identifier_name",\n "NOT_HOUR": "hour_of_notification",\n "NOT_HOURNAME": "hour_of_notification_name",\n "NOT_MIN": "minute_of_notification",\n "NOT_MINNAME": "minute_of_notification_name",\n "ARR_HOUR": "hour_of_arrival_at_scene",\n "ARR_HOURNAME": "hour_of_arrival_at_scene_name",\n "ARR_MIN": "minute_of_arrival_at_scene",\n "ARR_MINNAME": "minute_of_arrival_at_scene_name",\n "HOSP_HR": "hour_of_ems_arrival_at_hospital",\n "HOSP_HRNAME": "hour_of_ems_arrival_at_hospital_name",\n "HOSP_MN": "minute_of_ems_arrival_at_hospital",\n "HOSP_MNNAME": "minute_of_ems_arrival_at_hospital_name",\n "FATALS": "number_of_fatalities",\n "DRUNK_DR": "number_of_drunk_drivers"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for cevent pipeline
cevent_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="cevent_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="cevent_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "cevent.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.cevent_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "event_number",\n "vehicle_number_this_vehicle",\n "area_of_impact_this_vehicle",\n "area_of_impact_this_vehicle_name",\n "sequence_of_events",\n "sequence_of_events_name",\n "vehicle_number_other_vehicle",\n "vehicle_number_other_vehicle_name",\n "area_of_impact_other_vehicle",\n "area_of_impact_other_vehicle_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "event_number": "str",\n "vehicle_number_this_vehicle": "str",\n "area_of_impact_this_vehicle": "str",\n "area_of_impact_this_vehicle_name": "str",\n "sequence_of_events": "str",\n "sequence_of_events_name": "str",\n "vehicle_number_other_vehicle": "str",\n "vehicle_number_other_vehicle_name": "str",\n "area_of_impact_other_vehicle": "str",\n "area_of_impact_other_vehicle_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "EVENTNUM": "event_number",\n "VNUMBER1": "vehicle_number_this_vehicle",\n "AOI1": "area_of_impact_this_vehicle",\n "AOI1NAME": "area_of_impact_this_vehicle_name",\n "SOE": "sequence_of_events",\n "SOENAME": "sequence_of_events_name",\n "VNUMBER2": "vehicle_number_other_vehicle",\n "VNUMBER2NAME": "vehicle_number_other_vehicle_name",\n "AOI2": "area_of_impact_other_vehicle",\n "AOI2NAME": "area_of_impact_other_vehicle_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for damage pipeline
damage_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="damage_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="damage_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "damage.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.damage_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "damaged_areas",\n "damaged_areas_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "damaged_areas": "str",\n "damaged_areas_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MDAREAS": "damaged_areas",\n "MDAREASNAME": "area_of_impact_this_vehicle"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for distract pipeline
distract_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="distract_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="distract_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "distract.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.distract_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "driver_distracted_by",\n "driver_distracted_by_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "driver_distracted_by": "str",\n "driver_distracted_by_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MDRDSTRD": "driver_distracted_by",\n "MDRDSTRDNAME": "driver_distracted_by_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for drimpair pipeline
drimpair_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="drimpair_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="drimpair_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "drimpair.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.drimpair_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "condition_impairment_at_time_of_crash_driver",\n "condition_impairment_at_time_of_crash_driver_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "condition_impairment_at_time_of_crash_driver": "str",\n "condition_impairment_at_time_of_crash_driver_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "DRIMPAIR": "condition_impairment_at_time_of_crash_driver",\n "DRIMPAIRNAME": "condition_impairment_at_time_of_crash_driver_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for factor pipeline
factor_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="factor_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="factor_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "factor.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.factor_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "contributing_circumstances_motor_vehicle",\n "contributing_circumstances_motor_vehicle_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "contributing_circumstances_motor_vehicle": "str",\n "contributing_circumstances_motor_vehicle_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MFACTOR": "contributing_circumstances_motor_vehicle",\n "MFACTORNAME": "contributing_circumstances_motor_vehicle_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for maneuver pipeline
maneuver_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="maneuver_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="maneuver_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "maneuver.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.maneuver_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "driver_maneuvered_to_avoid",\n "driver_maneuvered_to_avoid_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "driver_maneuvered_to_avoid": "str",\n "driver_maneuvered_to_avoid_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MDRMANAV": "driver_maneuvered_to_avoid",\n "MDRMANAVNAME": "driver_maneuvered_to_avoid_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for nmcrash pipeline
nmcrash_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="nmcrash_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="nmcrash_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "nmcrash.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmcrash_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "non_motorist_contributing_circumstances",\n "non_motorist_contributing_circumstances_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "non_motorist_contributing_circumstances": "str",\n "non_motorist_contributing_circumstances_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "MTM_CRSH": "non_motorist_contributing_circumstances",\n "MTM_CRSHNAME": "non_motorist_contributing_circumstances_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for nmimpair pipeline
nmimpair_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="nmimpair_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="nmimpair_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "nmimpair.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmimpair_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "condition_impairment_at_time_of_crash_non_motorist",\n "condition_impairment_at_time_of_crash_non_motorist_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "condition_impairment_at_time_of_crash_non_motorist": "str",\n "condition_impairment_at_time_of_crash_non_motorist_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "NMIMPAIR": "condition_impairment_at_time_of_crash_non_motorist",\n "NMIMPAIRNAME": "condition_impairment_at_time_of_crash_non_motorist_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for nmprior pipeline
nmprior_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="nmprior_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="nmprior_2015_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "nmprior.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.nmprior_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "non_motorist_action_circumstances",\n "non_motorist_action_circumstances_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "non_motorist_action_circumstances": "str",\n "non_motorist_action_circumstances_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "NMIMPAIR": "non_motorist_action_circumstances",\n "NMIMPAIRNAME": "non_motorist_action_circumstances_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for parkwork_2015 pipeline
parkwork_2015_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="parkwork_2015_transform_csv",
startup_timeout_seconds=600,
name="parkwork_2015",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "parkwork.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2015.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number",\n "motor_carrier_identification_number_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "underride_override",\n "underride_override_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level1",\n "related_factors_vehicle_level1_name",\n "related_factors_vehicle_level2",\n "related_factors_vehicle_level2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "fatalities_in_vehicle"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number": "str",\n "motor_carrier_identification_number_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level1": "str",\n "related_factors_vehicle_level1_name": "str",\n "related_factors_vehicle_level2": "str",\n "related_factors_vehicle_level2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "fatalities_in_vehicle": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PVE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PNUMOCCS": "number_of_occupants",\n "PNUMOCCSNAME": "number_of_occupants_name",\n "PDAY": "day_of_crash",\n "PDAYNAME": "day_of_crash_name",\n "PMONTH": "month_of_crash",\n "PMONTHNAME": "month_of_crash_name",\n "PHOUR": "hour_of_crash",\n "PHOURNAME": "hour_of_crash_name",\n "PMINUTE": "minute_of_crash",\n "PMINUTENAME": "minute_of_crash_name",\n "PHARM_EV": "first_harmful_event",\n "PHARM_EVNAME": "first_harmful_event_name",\n "PMAN_COLL": "manner_of_collision",\n "PMAN_COLLNAME": "manner_of_collision_name",\n "PTYPE": "unit_type",\n "PTYPENAME": "unit_type_name",\n "PHIT_RUN": "hit_and_run",\n "PHIT_RUNNAME": "hit_and_run_name",\n "PREG_STAT": "registration_state",\n "PREG_STATNAME": "registration_state_name",\n "POWNER": "registered_vehicle_owner",\n "POWNERNAME": "registered_vehicle_owner_name",\n "PMAKE": "vehicle_make",\n "PMAKENAME": "vehicle_make_name",\n "PMODEL": "vehicle_model",\n "PMAK_MOD": "make_model_combined",\n "PBODYTYP": "body_type",\n "PBODYTYPNAME": "body_type_name",\n "PMODYEAR": "vehicle_model_year",\n "PMODYEARNAME": "vehicle_model_year_name",\n "PVIN": "vehicle_identification_number_vin",\n "PVINNAME": "vehicle_identification_number_vin_name",\n "PVIN_1": "vin_character_1",\n "PVIN_2": "vin_character_2",\n "PVIN_3": "vin_character_3",\n "PVIN_4": "vin_character_4",\n "PVIN_5": "vin_character_5",\n "PVIN_6": "vin_character_6",\n "PVIN_7": "vin_character_7",\n "PVIN_8": "vin_character_8",\n "PVIN_9": "vin_character_9",\n "PVIN_10": "vin_character_10",\n "PVIN_11": "vin_character_11",\n "PVIN_12": "vin_character_12",\n "PTRAILER": "vehicle_trailing",\n "PTRAILERNAME": "vehicle_trailing_name",\n "PMCARR_I1": "mcid_issuing_authority",\n "PMCARR_I1NAME": "mcid_issuing_authority_name",\n "PMCARR_I2": "mcid_identification_number",\n "PMCARR_I2NAME": "mcid_identification_number_name",\n "PMCARR_ID": "motor_carrier_identification_number",\n "PMCARR_IDNAME": "motor_carrier_identification_number_name",\n "PGVWR": "gross_vehicle_weight_rating",\n "PGVWRNAME": "gross_vehicle_weight_rating_name",\n "PV_CONFIG": "vehicle_configuration",\n "PV_CONFIGNAME": "vehicle_configuration_name",\n "PCARGTYP": "cargo_body_type",\n "PCARGTYPNAME": "cargo_body_type_name",\n "PHAZ_INV": "hazardous_material_involvement",\n "PHAZ_INVNAME": "hazardous_material_involvement_name",\n "PHAZPLAC": "hazardous_material_placard",\n "PHAZPLACNAME": "hazardous_material_placard_name",\n "PHAZ_ID": "hazardous_material_identification_number",\n "PHAZ_IDNAME": "hazardous_material_identification_number_name",\n "PHAZ_CNO": "hazardous_material_class_number",\n "PHAZ_CNONAME": "hazardous_material_class_number_name",\n "PHAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "PHAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "PBUS_USE": "bus_use",\n "PBUS_USENAME": "bus_use_name",\n "PSP_USE": "special_use",\n "PSP_USENAME": "special_use_name",\n "PEM_USE": "emergency_motor_vehicle_use",\n "PEM_USENAME": "emergency_motor_vehicle_use_name",\n "PUNDERIDE": "underride_override",\n "PUNDERIDENAME": "underride_override_name",\n "PIMPACT1": "initial_contact_point",\n "PIMPACT1NAME": "initial_contact_point_name",\n "PVEH_SEV": "extent_of_damage",\n "PVEH_SEVNAME": "extent_of_damage_name",\n "PTOWED": "vehicle_removal",\n "PTOWEDNAME": "vehicle_removal_name",\n "PM_HARM": "most_harmful_event",\n "PM_HARMNAME": "most_harmful_event_name",\n "PVEH_SC1": "related_factors_vehicle_level1",\n "PVEH_SC1NAME": "related_factors_vehicle_level1_name",\n "PVEH_SC2": "related_factors_vehicle_level2",\n "PVEH_SC2NAME": "related_factors_vehicle_level2_name",\n "PFIRE": "fire_occurrence",\n "PFIRENAME": "fire_occurrence_name",\n "PDEATHS": "fatalities_in_vehicle"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for parkwork_2016_2017 pipelines
parkwork_2016_2017_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="parkwork_2016_2017_transform_csv",
startup_timeout_seconds=600,
name="parkwork_2016_2017",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "parkwork.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2016_2017.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number",\n "motor_carrier_identification_number_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "underride_override",\n "underride_override_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level1",\n "related_factors_vehicle_level1_name",\n "related_factors_vehicle_level2",\n "related_factors_vehicle_level2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "fatalities_in_vehicle",\n "ptrlr1vin",\n "ptrlr1vinname",\n "ptrlr2vin",\n "ptrlr2vinname",\n "ptrlr3vin",\n "ptrlr3vinname"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number": "str",\n "motor_carrier_identification_number_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level1": "str",\n "related_factors_vehicle_level1_name": "str",\n "related_factors_vehicle_level2": "str",\n "related_factors_vehicle_level2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "fatalities_in_vehicle": "str",\n "ptrlr1vin": "str",\n "ptrlr1vinname": "str",\n "ptrlr2vin": "str",\n "ptrlr2vinname": "str",\n "ptrlr3vin": "str",\n "ptrlr3vinname": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PVE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PNUMOCCS": "number_of_occupants",\n "PNUMOCCSNAME": "number_of_occupants_name",\n "PDAY": "day_of_crash",\n "PDAYNAME": "day_of_crash_name",\n "PMONTH": "month_of_crash",\n "PMONTHNAME": "month_of_crash_name",\n "PHOUR": "hour_of_crash",\n "PHOURNAME": "hour_of_crash_name",\n "PMINUTE": "minute_of_crash",\n "PMINUTENAME": "minute_of_crash_name",\n "PHARM_EV": "first_harmful_event",\n "PHARM_EVNAME": "first_harmful_event_name",\n "PMAN_COLL": "manner_of_collision",\n "PMAN_COLLNAME": "manner_of_collision_name",\n "PTYPE": "unit_type",\n "PTYPENAME": "unit_type_name",\n "PHIT_RUN": "hit_and_run",\n "PHIT_RUNNAME": "hit_and_run_name",\n "PREG_STAT": "registration_state",\n "PREG_STATNAME": "registration_state_name",\n "POWNER": "registered_vehicle_owner",\n "POWNERNAME": "registered_vehicle_owner_name",\n "PMAKE": "vehicle_make",\n "PMAKENAME": "vehicle_make_name",\n "PMODEL": "vehicle_model",\n "PMAK_MOD": "make_model_combined",\n "PBODYTYP": "body_type",\n "PBODYTYPNAME": "body_type_name",\n "PMODYEAR": "vehicle_model_year",\n "PMODYEARNAME": "vehicle_model_year_name",\n "PVIN": "vehicle_identification_number_vin",\n "PVINNAME": "vehicle_identification_number_vin_name",\n "PVIN_1": "vin_character_1",\n "PVIN_2": "vin_character_2",\n "PVIN_3": "vin_character_3",\n "PVIN_4": "vin_character_4",\n "PVIN_5": "vin_character_5",\n "PVIN_6": "vin_character_6",\n "PVIN_7": "vin_character_7",\n "PVIN_8": "vin_character_8",\n "PVIN_9": "vin_character_9",\n "PVIN_10": "vin_character_10",\n "PVIN_11": "vin_character_11",\n "PVIN_12": "vin_character_12",\n "PTRAILER": "vehicle_trailing",\n "PTRAILERNAME": "vehicle_trailing_name",\n "PMCARR_I1": "mcid_issuing_authority",\n "PMCARR_I1NAME": "mcid_issuing_authority_name",\n "PMCARR_I2": "mcid_identification_number",\n "PMCARR_I2NAME": "mcid_identification_number_name",\n "PMCARR_ID": "motor_carrier_identification_number",\n "PMCARR_IDNAME": "motor_carrier_identification_number_name",\n "PGVWR": "gross_vehicle_weight_rating",\n "PGVWRNAME": "gross_vehicle_weight_rating_name",\n "PV_CONFIG": "vehicle_configuration",\n "PV_CONFIGNAME": "vehicle_configuration_name",\n "PCARGTYP": "cargo_body_type",\n "PCARGTYPNAME": "cargo_body_type_name",\n "PHAZ_INV": "hazardous_material_involvement",\n "PHAZ_INVNAME": "hazardous_material_involvement_name",\n "PHAZPLAC": "hazardous_material_placard",\n "PHAZPLACNAME": "hazardous_material_placard_name",\n "PHAZ_ID": "hazardous_material_identification_number",\n "PHAZ_IDNAME": "hazardous_material_identification_number_name",\n "PHAZ_CNO": "hazardous_material_class_number",\n "PHAZ_CNONAME": "hazardous_material_class_number_name",\n "PHAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "PHAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "PBUS_USE": "bus_use",\n "PBUS_USENAME": "bus_use_name",\n "PSP_USE": "special_use",\n "PSP_USENAME": "special_use_name",\n "PEM_USE": "emergency_motor_vehicle_use",\n "PEM_USENAME": "emergency_motor_vehicle_use_name",\n "PUNDERIDE": "underride_override",\n "PUNDERIDENAME": "underride_override_name",\n "PIMPACT1": "initial_contact_point",\n "PIMPACT1NAME": "initial_contact_point_name",\n "PVEH_SEV": "extent_of_damage",\n "PVEH_SEVNAME": "extent_of_damage_name",\n "PTOWED": "vehicle_removal",\n "PTOWEDNAME": "vehicle_removal_name",\n "PM_HARM": "most_harmful_event",\n "PM_HARMNAME": "most_harmful_event_name",\n "PVEH_SC1": "related_factors_vehicle_level1",\n "PVEH_SC1NAME": "related_factors_vehicle_level1_name",\n "PVEH_SC2": "related_factors_vehicle_level2",\n "PVEH_SC2NAME": "related_factors_vehicle_level2_name",\n "PFIRE": "fire_occurrence",\n "PFIRENAME": "fire_occurrence_name",\n "PDEATHS": "fatalities_in_vehicle",\n "PTRLR1VIN": "ptrlr1vin",\n "PTRLR1VINNAME": "ptrlr1vinname",\n "PTRLR2VIN": "ptrlr2vin",\n "PTRLR2VINNAME": "ptrlr2vinname",\n "PTRLR3VIN": "ptrlr3vin",\n "PTRLR3VINNAME": "ptrlr3vinname"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for parkwork_2018 pipelines
parkwork_2018_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="parkwork_2018_transform_csv",
startup_timeout_seconds=600,
name="parkwork_2018",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "parkwork.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2018.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number",\n "motor_carrier_identification_number_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "underride_override",\n "underride_override_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level1",\n "related_factors_vehicle_level1_name",\n "related_factors_vehicle_level2",\n "related_factors_vehicle_level2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "fatalities_in_vehicle",\n "ptrlr1vin",\n "ptrlr1vinname",\n "ptrlr2vin",\n "ptrlr2vinname",\n "ptrlr3vin",\n "ptrlr3vinname"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number": "str",\n "motor_carrier_identification_number_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level1": "str",\n "related_factors_vehicle_level1_name": "str",\n "related_factors_vehicle_level2": "str",\n "related_factors_vehicle_level2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "fatalities_in_vehicle": "str",\n "ptrlr1vin": "str",\n "ptrlr1vinname": "str",\n "ptrlr2vin": "str",\n "ptrlr2vinname": "str",\n "ptrlr3vin": "str",\n "ptrlr3vinname": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PVE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PNUMOCCS": "number_of_occupants",\n "PNUMOCCSNAME": "number_of_occupants_name",\n "PDAY": "day_of_crash",\n "PDAYNAME": "day_of_crash_name",\n "PMONTH": "month_of_crash",\n "PMONTHNAME": "month_of_crash_name",\n "PHOUR": "hour_of_crash",\n "PHOURNAME": "hour_of_crash_name",\n "PMINUTE": "minute_of_crash",\n "PMINUTENAME": "minute_of_crash_name",\n "PHARM_EV": "first_harmful_event",\n "PHARM_EVNAME": "first_harmful_event_name",\n "PMAN_COLL": "manner_of_collision",\n "PMAN_COLLNAME": "manner_of_collision_name",\n "PTYPE": "unit_type",\n "PTYPENAME": "unit_type_name",\n "PHIT_RUN": "hit_and_run",\n "PHIT_RUNNAME": "hit_and_run_name",\n "PREG_STAT": "registration_state",\n "PREG_STATNAME": "registration_state_name",\n "POWNER": "registered_vehicle_owner",\n "POWNERNAME": "registered_vehicle_owner_name",\n "PMAKE": "vehicle_make",\n "PMAKENAME": "vehicle_make_name",\n "PMODEL": "vehicle_model",\n "PMAK_MOD": "make_model_combined",\n "PMAK_MODNAME": "make_model_combined_name",\n "PBODYTYP": "body_type",\n "PBODYTYPNAME": "body_type_name",\n "PMODYEAR": "vehicle_model_year",\n "PMODYEARNAME": "vehicle_model_year_name",\n "PVIN": "vehicle_identification_number_vin",\n "PVINNAME": "vehicle_identification_number_vin_name",\n "PVIN_1": "vin_character_1",\n "PVIN_2": "vin_character_2",\n "PVIN_3": "vin_character_3",\n "PVIN_4": "vin_character_4",\n "PVIN_5": "vin_character_5",\n "PVIN_6": "vin_character_6",\n "PVIN_7": "vin_character_7",\n "PVIN_8": "vin_character_8",\n "PVIN_9": "vin_character_9",\n "PVIN_10": "vin_character_10",\n "PVIN_11": "vin_character_11",\n "PVIN_12": "vin_character_12",\n "PTRAILER": "vehicle_trailing",\n "PTRAILERNAME": "vehicle_trailing_name",\n "PMCARR_I1": "mcid_issuing_authority",\n "PMCARR_I1NAME": "mcid_issuing_authority_name",\n "PMCARR_I2": "mcid_identification_number",\n "PMCARR_I2NAME": "mcid_identification_number_name",\n "PMCARR_ID": "motor_carrier_identification_number",\n "PMCARR_IDNAME": "motor_carrier_identification_number_name",\n "PGVWR": "gross_vehicle_weight_rating",\n "PGVWRNAME": "gross_vehicle_weight_rating_name",\n "PV_CONFIG": "vehicle_configuration",\n "PV_CONFIGNAME": "vehicle_configuration_name",\n "PCARGTYP": "cargo_body_type",\n "PCARGTYPNAME": "cargo_body_type_name",\n "PHAZ_INV": "hazardous_material_involvement",\n "PHAZ_INVNAME": "hazardous_material_involvement_name",\n "PHAZPLAC": "hazardous_material_placard",\n "PHAZPLACNAME": "hazardous_material_placard_name",\n "PHAZ_ID": "hazardous_material_identification_number",\n "PHAZ_IDNAME": "hazardous_material_identification_number_name",\n "PHAZ_CNO": "hazardous_material_class_number",\n "PHAZ_CNONAME": "hazardous_material_class_number_name",\n "PHAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "PHAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "PBUS_USE": "bus_use",\n "PBUS_USENAME": "bus_use_name",\n "PSP_USE": "special_use",\n "PSP_USENAME": "special_use_name",\n "PEM_USE": "emergency_motor_vehicle_use",\n "PEM_USENAME": "emergency_motor_vehicle_use_name",\n "PUNDERIDE": "underride_override",\n "PUNDERIDENAME": "underride_override_name",\n "PIMPACT1": "initial_contact_point",\n "PIMPACT1NAME": "initial_contact_point_name",\n "PVEH_SEV": "extent_of_damage",\n "PVEH_SEVNAME": "extent_of_damage_name",\n "PTOWED": "vehicle_removal",\n "PTOWEDNAME": "vehicle_removal_name",\n "PM_HARM": "most_harmful_event",\n "PM_HARMNAME": "most_harmful_event_name",\n "PVEH_SC1": "related_factors_vehicle_level1",\n "PVEH_SC1NAME": "related_factors_vehicle_level1_name",\n "PVEH_SC2": "related_factors_vehicle_level2",\n "PVEH_SC2NAME": "related_factors_vehicle_level2_name",\n "PFIRE": "fire_occurrence",\n "PFIRENAME": "fire_occurrence_name",\n "PDEATHS": "fatalities_in_vehicle",\n "PTRLR1VIN": "ptrlr1vin",\n "PTRLR1VINNAME": "ptrlr1vinname",\n "PTRLR2VIN": "ptrlr2vin",\n "PTRLR2VINNAME": "ptrlr2vinname",\n "PTRLR3VIN": "ptrlr3vin",\n "PTRLR3VINNAME": "ptrlr3vinname"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for parkwork_2019 pipelines
parkwork_2019_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="parkwork_2019_transform_csv",
startup_timeout_seconds=600,
name="parkwork_2019",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "parkwork.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2019.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number",\n "motor_carrier_identification_number_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "underride_override",\n "underride_override_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level1",\n "related_factors_vehicle_level1_name",\n "related_factors_vehicle_level2",\n "related_factors_vehicle_level2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "fatalities_in_vehicle",\n "ptrlr1vin",\n "ptrlr1vinname",\n "ptrlr2vin",\n "ptrlr2vinname",\n "ptrlr3vin",\n "ptrlr3vinname"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number": "str",\n "motor_carrier_identification_number_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level1": "str",\n "related_factors_vehicle_level1_name": "str",\n "related_factors_vehicle_level2": "str",\n "related_factors_vehicle_level2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "fatalities_in_vehicle": "str",\n "ptrlr1vin": "str",\n "ptrlr1vinname": "str",\n "ptrlr2vin": "str",\n "ptrlr2vinname": "str",\n "ptrlr3vin": "str",\n "ptrlr3vinname": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PVE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PNUMOCCS": "number_of_occupants",\n "PNUMOCCSNAME": "number_of_occupants_name",\n "PDAY": "day_of_crash",\n "PDAYNAME": "day_of_crash_name",\n "PMONTH": "month_of_crash",\n "PMONTHNAME": "month_of_crash_name",\n "PHOUR": "hour_of_crash",\n "PHOURNAME": "hour_of_crash_name",\n "PMINUTE": "minute_of_crash",\n "PMINUTENAME": "minute_of_crash_name",\n "PHARM_EV": "first_harmful_event",\n "PHARM_EVNAME": "first_harmful_event_name",\n "PMAN_COLL": "manner_of_collision",\n "PMAN_COLLNAME": "manner_of_collision_name",\n "PTYPE": "unit_type",\n "PTYPENAME": "unit_type_name",\n "PHIT_RUN": "hit_and_run",\n "PHIT_RUNNAME": "hit_and_run_name",\n "PREG_STAT": "registration_state",\n "PREG_STATNAME": "registration_state_name",\n "POWNER": "registered_vehicle_owner",\n "POWNERNAME": "registered_vehicle_owner_name",\n "PMAKE": "vehicle_make",\n "PMAKENAME": "vehicle_make_name",\n "PMODEL": "vehicle_model",\n "PMAK_MOD": "make_model_combined",\n "PBODYTYP": "body_type",\n "PBODYTYPNAME": "body_type_name",\n "PMODYEAR": "vehicle_model_year",\n "PMODYEARNAME": "vehicle_model_year_name",\n "PVIN": "vehicle_identification_number_vin",\n "PVINNAME": "vehicle_identification_number_vin_name",\n "PVIN_1": "vin_character_1",\n "PVIN_2": "vin_character_2",\n "PVIN_3": "vin_character_3",\n "PVIN_4": "vin_character_4",\n "PVIN_5": "vin_character_5",\n "PVIN_6": "vin_character_6",\n "PVIN_7": "vin_character_7",\n "PVIN_8": "vin_character_8",\n "PVIN_9": "vin_character_9",\n "PVIN_10": "vin_character_10",\n "PVIN_11": "vin_character_11",\n "PVIN_12": "vin_character_12",\n "PTRAILER": "vehicle_trailing",\n "PTRAILERNAME": "vehicle_trailing_name",\n "PMCARR_I1": "mcid_issuing_authority",\n "PMCARR_I1NAME": "mcid_issuing_authority_name",\n "PMCARR_I2": "mcid_identification_number",\n "PMCARR_I2NAME": "mcid_identification_number_name",\n "PMCARR_ID": "motor_carrier_identification_number",\n "PMCARR_IDNAME": "motor_carrier_identification_number_name",\n "PGVWR": "gross_vehicle_weight_rating",\n "PGVWRNAME": "gross_vehicle_weight_rating_name",\n "PV_CONFIG": "vehicle_configuration",\n "PV_CONFIGNAME": "vehicle_configuration_name",\n "PCARGTYP": "cargo_body_type",\n "PCARGTYPNAME": "cargo_body_type_name",\n "PHAZ_INV": "hazardous_material_involvement",\n "PHAZ_INVNAME": "hazardous_material_involvement_name",\n "PHAZPLAC": "hazardous_material_placard",\n "PHAZPLACNAME": "hazardous_material_placard_name",\n "PHAZ_ID": "hazardous_material_identification_number",\n "PHAZ_IDNAME": "hazardous_material_identification_number_name",\n "PHAZ_CNO": "hazardous_material_class_number",\n "PHAZ_CNONAME": "hazardous_material_class_number_name",\n "PHAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "PHAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "PBUS_USE": "bus_use",\n "PBUS_USENAME": "bus_use_name",\n "PSP_USE": "special_use",\n "PSP_USENAME": "special_use_name",\n "PEM_USE": "emergency_motor_vehicle_use",\n "PEM_USENAME": "emergency_motor_vehicle_use_name",\n "PUNDERIDE": "underride_override",\n "PUNDERIDENAME": "underride_override_name",\n "PIMPACT1": "initial_contact_point",\n "PIMPACT1NAME": "initial_contact_point_name",\n "PVEH_SEV": "extent_of_damage",\n "PVEH_SEVNAME": "extent_of_damage_name",\n "PTOWED": "vehicle_removal",\n "PTOWEDNAME": "vehicle_removal_name",\n "PM_HARM": "most_harmful_event",\n "PM_HARMNAME": "most_harmful_event_name",\n "PVEH_SC1": "related_factors_vehicle_level1",\n "PVEH_SC1NAME": "related_factors_vehicle_level1_name",\n "PVEH_SC2": "related_factors_vehicle_level2",\n "PVEH_SC2NAME": "related_factors_vehicle_level2_name",\n "PFIRE": "fire_occurrence",\n "PFIRENAME": "fire_occurrence_name",\n "PDEATHS": "fatalities_in_vehicle",\n "PTRLR1VIN": "ptrlr1vin",\n "PTRLR1VINNAME": "ptrlr1vinname",\n "PTRLR2VIN": "ptrlr2vin",\n "PTRLR2VINNAME": "ptrlr2vinname",\n "PTRLR3VIN": "ptrlr3vin",\n "PTRLR3VINNAME": "ptrlr3vinname"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for parkwork_2020 pipelines
parkwork_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="parkwork_2020_transform_csv",
startup_timeout_seconds=600,
name="parkwork_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "parkwork.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.parkwork_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number",\n "motor_carrier_identification_number_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "underride_override",\n "underride_override_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "fatalities_in_vehicle",\n "ptrlr1vin",\n "ptrlr1vinname",\n "ptrlr2vin",\n "ptrlr2vinname",\n "ptrlr3vin",\n "ptrlr3vinname",\n "pvpicmake",\n "pvpicmakename",\n "pvpicmodel",\n "pvpicmodelname",\n "pvpicbodyclass",\n "pvpicbodyclassname",\n "picfinalbody",\n "picfinalbodyname",\n "pgvwr_from",\n "pgvwr_fromname",\n "pgvwr_to",\n "pgvwr_toname",\n "ptrlr1gvwr",\n "ptrlr1gvwrname",\n "ptrlr2gvwr",\n "ptrlr2gvwrname",\n "ptrlr3gvwr",\n "ptrlr3gvwrname"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number": "str",\n "motor_carrier_identification_number_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "fatalities_in_vehicle": "str",\n "ptrlr1vin": "str",\n "ptrlr1vinname": "str",\n "ptrlr2vin": "str",\n "ptrlr2vinname": "str",\n "ptrlr3vin": "str",\n "ptrlr3vinname": "str",\n "pvpicmake": "str",\n "pvpicmakename": "str",\n "pvpicmodel": "str",\n "pvpicmodelname": "str",\n "pvpicbodyclass": "str",\n "pvpicbodyclassname": "str",\n "picfinalbody": "str",\n "picfinalbodyname": "str",\n "pgvwr_from": "str",\n "pgvwr_fromname": "str",\n "pgvwr_to": "str",\n "pgvwr_toname": "str",\n "ptrlr1gvwr": "str",\n "ptrlr1gvwrname": "str",\n "ptrlr2gvwr": "str",\n "ptrlr2gvwrname": "str",\n "ptrlr3gvwr": "str",\n "ptrlr3gvwrname": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PVE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "PNUMOCCS": "number_of_occupants",\n "PNUMOCCSNAME": "number_of_occupants_name",\n "PDAY": "day_of_crash",\n "PDAYNAME": "day_of_crash_name",\n "PMONTH": "month_of_crash",\n "PMONTHNAME": "month_of_crash_name",\n "PHOUR": "hour_of_crash",\n "PHOURNAME": "hour_of_crash_name",\n "PMINUTE": "minute_of_crash",\n "PMINUTENAME": "minute_of_crash_name",\n "PHARM_EV": "first_harmful_event",\n "PHARM_EVNAME": "first_harmful_event_name",\n "PMAN_COLL": "manner_of_collision",\n "PMAN_COLLNAME": "manner_of_collision_name",\n "PTYPE": "unit_type",\n "PTYPENAME": "unit_type_name",\n "PHIT_RUN": "hit_and_run",\n "PHIT_RUNNAME": "hit_and_run_name",\n "PREG_STAT": "registration_state",\n "PREG_STATNAME": "registration_state_name",\n "POWNER": "registered_vehicle_owner",\n "POWNERNAME": "registered_vehicle_owner_name",\n "PMAKE": "vehicle_make",\n "PMAKENAME": "vehicle_make_name",\n "PMODEL": "vehicle_model",\n "PMAK_MOD": "make_model_combined",\n "PBODYTYP": "body_type",\n "PBODYTYPNAME": "body_type_name",\n "PMODYEAR": "vehicle_model_year",\n "PMODYEARNAME": "vehicle_model_year_name",\n "PVIN": "vehicle_identification_number_vin",\n "PVINNAME": "vehicle_identification_number_vin_name",\n "PVIN_1": "vin_character_1",\n "PVIN_2": "vin_character_2",\n "PVIN_3": "vin_character_3",\n "PVIN_4": "vin_character_4",\n "PVIN_5": "vin_character_5",\n "PVIN_6": "vin_character_6",\n "PVIN_7": "vin_character_7",\n "PVIN_8": "vin_character_8",\n "PVIN_9": "vin_character_9",\n "PVIN_10": "vin_character_10",\n "PVIN_11": "vin_character_11",\n "PVIN_12": "vin_character_12",\n "PTRAILER": "vehicle_trailing",\n "PTRAILERNAME": "vehicle_trailing_name",\n "PMCARR_I1": "mcid_issuing_authority",\n "PMCARR_I1NAME": "mcid_issuing_authority_name",\n "PMCARR_I2": "mcid_identification_number",\n "PMCARR_I2NAME": "mcid_identification_number_name",\n "PMCARR_ID": "motor_carrier_identification_number",\n "PMCARR_IDNAME": "motor_carrier_identification_number_name",\n "PV_CONFIG": "vehicle_configuration",\n "PV_CONFIGNAME": "vehicle_configuration_name",\n "PCARGTYP": "cargo_body_type",\n "PCARGTYPNAME": "cargo_body_type_name",\n "PHAZ_INV": "hazardous_material_involvement",\n "PHAZ_INVNAME": "hazardous_material_involvement_name",\n "PHAZPLAC": "hazardous_material_placard",\n "PHAZPLACNAME": "hazardous_material_placard_name",\n "PHAZ_ID": "hazardous_material_identification_number",\n "PHAZ_IDNAME": "hazardous_material_identification_number_name",\n "PHAZ_CNO": "hazardous_material_class_number",\n "PHAZ_CNONAME": "hazardous_material_class_number_name",\n "PHAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "PHAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "PBUS_USE": "bus_use",\n "PBUS_USENAME": "bus_use_name",\n "PSP_USE": "special_use",\n "PSP_USENAME": "special_use_name",\n "PEM_USE": "emergency_motor_vehicle_use",\n "PEM_USENAME": "emergency_motor_vehicle_use_name",\n "PUNDERIDE": "underride_override",\n "PUNDERIDENAME": "underride_override_name",\n "PIMPACT1": "initial_contact_point",\n "PIMPACT1NAME": "initial_contact_point_name",\n "PVEH_SEV": "extent_of_damage",\n "PVEH_SEVNAME": "extent_of_damage_name",\n "PTOWED": "vehicle_removal",\n "PTOWEDNAME": "vehicle_removal_name",\n "PM_HARM": "most_harmful_event",\n "PM_HARMNAME": "most_harmful_event_name",\n "PFIRE": "fire_occurrence",\n "PFIRENAME": "fire_occurrence_name",\n "PDEATHS": "fatalities_in_vehicle",\n "PTRLR1VIN": "ptrlr1vin",\n "PTRLR1VINNAME": "ptrlr1vinname",\n "PTRLR2VIN": "ptrlr2vin",\n "PTRLR2VINNAME": "ptrlr2vinname",\n "PTRLR3VIN": "ptrlr3vin",\n "PTRLR3VINNAME": "ptrlr3vinname",\n "PVPICMAKE": "pvpicmake",\n "PVPICMAKENAME": "pvpicmakename",\n "PVPICMODEL": "pvpicmodel",\n "PVPICMODELNAME": "pvpicmodelname",\n "PVPICBODYCLASS": "pvpicbodyclass",\n "PVPICBODYCLASSNAME": "pvpicbodyclassname",\n "PICFINALBODY": "picfinalbody",\n "PICFINALBODYNAME": "picfinalbodyname",\n "PGVWR_FROM": "pgvwr_from",\n "PGVWR_FROMNAME": "pgvwr_fromname",\n "PGVWR_TO": "pgvwr_to",\n "PGVWR_TONAME": "pgvwr_toname",\n "PTRLR1GVWR": "ptrlr1gvwr",\n "PTRLR1GVWRNAME": "ptrlr1gvwrname",\n "PTRLR2GVWR": "ptrlr2gvwr",\n "PTRLR2GVWRNAME": "ptrlr2gvwrname",\n "PTRLR3GVWR": "ptrlr3gvwr",\n "PTRLR3GVWRNAME": "ptrlr3gvwrname"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for pbtype pipelines
pbtype_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="pbtype_transform_csv",
startup_timeout_seconds=600,
name="pbtype",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "pbtype.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.pbtype_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "person_type",\n "person_type_name",\n "age",\n "age_name",\n "sex",\n "sex_name",\n "marked_crosswalk_present",\n "marked_crosswalk_present_name",\n "sidewalk_present",\n "sidewalk_present_name",\n "school_zone",\n "school_zone_name",\n "crash_type_pedestrian",\n "crash_type_pedestrian_name",\n "crash_type_bicycle",\n "crash_type_bicycle_name",\n "crash_location_pedestrian",\n "crash_location_pedestrian_name",\n "crash_location_bicycle",\n "crash_location_bicycle_name",\n "pedestrian_position",\n "pedestrian_position_name",\n "bicyclist_position",\n "bicyclist_position_name",\n "pedestrian_initial_direction_of_travel",\n "pedestrian_initial_direction_of_travel_name",\n "bicyclist_initial_direction_of_travel",\n "bicyclist_initial_direction_of_travel_name",\n "motorist_initial_direction_of_travel",\n "motorist_initial_direction_of_travel_name",\n "motorist_maneuver",\n "motorist_maneuver_name",\n "intersection_leg",\n "intersection_leg_name",\n "pedestrian_scenario",\n "pedestrian_scenario_name",\n "crash_group_pedestrian",\n "crash_group_pedestrian_name",\n "crash_group_bicycle",\n "crash_group_bicycle_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "person_type": "str",\n "person_type_name": "str",\n "age": "str",\n "age_name": "str",\n "sex": "str",\n "sex_name": "str",\n "marked_crosswalk_present": "str",\n "marked_crosswalk_present_name": "str",\n "sidewalk_present": "str",\n "sidewalk_present_name": "str",\n "school_zone": "str",\n "school_zone_name": "str",\n "crash_type_pedestrian": "str",\n "crash_type_pedestrian_name": "str",\n "crash_type_bicycle": "str",\n "crash_type_bicycle_name": "str",\n "crash_location_pedestrian": "str",\n "crash_location_pedestrian_name": "str",\n "crash_location_bicycle": "str",\n "crash_location_bicycle_name": "str",\n "pedestrian_position": "str",\n "pedestrian_position_name": "str",\n "bicyclist_position": "str",\n "bicyclist_position_name": "str",\n "pedestrian_initial_direction_of_travel": "str",\n "pedestrian_initial_direction_of_travel_name": "str",\n "bicyclist_initial_direction_of_travel": "str",\n "bicyclist_initial_direction_of_travel_name": "str",\n "motorist_initial_direction_of_travel": "str",\n "motorist_initial_direction_of_travel_name": "str",\n "motorist_maneuver": "str",\n "motorist_maneuver_name": "str",\n "intersection_leg": "str",\n "intersection_leg_name": "str",\n "pedestrian_scenario": "str",\n "pedestrian_scenario_name": "str",\n "crash_group_pedestrian": "str",\n "crash_group_pedestrian_name": "str",\n "crash_group_bicycle": "str",\n "crash_group_bicycle_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "PBPTYPE": "person_type",\n "PBPTYPENAME": "person_type_name",\n "PBAGE": "age",\n "PBAGENAME": "age_name",\n "PBSEX": "sex",\n "PBSEXNAME": "sex_name",\n "PBCWALK": "marked_crosswalk_present",\n "PBCWALKNAME": "marked_crosswalk_present_name",\n "PBSWALK": "sidewalk_present",\n "PBSWALKNAME": "sidewalk_present_name",\n "PBSZONE": "school_zone",\n "PBSZONENAME": "school_zone_name",\n "PEDCTYPE": "crash_type_pedestrian",\n "PEDCTYPENAME": "crash_type_pedestrian_name",\n "BIKECTYPE": "crash_type_bicycle",\n "BIKECTYPENAME": "crash_type_bicycle_name",\n "PEDLOC": "crash_location_pedestrian",\n "PEDLOCNAME": "crash_location_pedestrian_name",\n "BIKELOC": "crash_location_bicycle",\n "BIKELOCNAME": "crash_location_bicycle_name",\n "PEDPOS": "pedestrian_position",\n "PEDPOSNAME": "pedestrian_position_name",\n "BIKEPOS": "bicyclist_position",\n "BIKEPOSNAME": "bicyclist_position_name",\n "PEDDIR": "pedestrian_initial_direction_of_travel",\n "PEDDIRNAME": "pedestrian_initial_direction_of_travel_name",\n "BIKEDIR": "bicyclist_initial_direction_of_travel",\n "BIKEDIRNAME": "bicyclist_initial_direction_of_travel_name",\n "MOTDIR": "motorist_initial_direction_of_travel",\n "MOTDIRNAME": "motorist_initial_direction_of_travel_name",\n "MOTMAN": "motorist_maneuver",\n "MOTMANNAME": "motorist_maneuver_name",\n "PEDLEG": "intersection_leg",\n "PEDLEGNAME": "intersection_leg_name",\n "PEDSNR": "pedestrian_scenario",\n "PEDSNRNAME": "pedestrian_scenario_name",\n "PEDCGP": "crash_group_pedestrian",\n "PEDCGPNAME": "crash_group_pedestrian_name",\n "BIKECGP": "crash_group_bicycle",\n "BIKECGPNAME": "crash_group_bicycle_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for person pipelines
person_2015_2017_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="person_2015_2017_transform_csv",
startup_timeout_seconds=600,
name="person",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "person.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2015_2017.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "vehicle_number",\n "person_number",\n "number_of_motor_vehicle_striking_non_motorist",\n "county",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "school_bus_related",\n "school_bus_related_name",\n "vehicle_make",\n "vehicle_make_name",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "model_year",\n "model_year_name",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "rollover",\n "rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "age",\n "age_name",\n "sex",\n "sex_name",\n "person_type",\n "person_type_name",\n "injury_severity",\n "injury_severity_name",\n "seating_position",\n "seating_position_name",\n "restraint_system_helmet_use",\n "restraint_system_helmet_use_name",\n "indication_of_misuse_of_restraint_system_helmet",\n "indication_of_misuse_of_restraint_system_helmet_name",\n "air_bag_deployed",\n "air_bag_deployed_name",\n "ejection",\n "ejection_name",\n "ejection_path",\n "ejection_path_name",\n "extrication",\n "extrication_name",\n "police_reported_alcohol_involvement",\n "police_reported_alcohol_involvement_name",\n "method_of_alcohol_determination_by_police",\n "method_of_alcohol_determination_by_police_name",\n "alcohol_test_status",\n "alcohol_test_status_name",\n "alcohol_test_type",\n "alcohol_test_type_name",\n "alcohol_result",\n "alcohol_result_name",\n "police_reported_drug_involvement",\n "police_reported_drug_involvement_name",\n "method_of_drug_determination_by_police",\n "method_of_drug_determination_by_police_name",\n "drug_test_status",\n "drug_test_status_name",\n "drug_test_type1",\n "drug_test_type1_name",\n "drug_test_type2",\n "drug_test_type2_name",\n "drug_test_type3",\n "drug_test_type3_name",\n "drug_result1",\n "drug_result1_name",\n "drug_result2",\n "drug_result2_name",\n "drug_result3",\n "drug_result3_name",\n "hospital",\n "hospital_name",\n "died_at_scene_en_route",\n "died_at_scene_en_route_name",\n "day_of_death",\n "day_of_death_name",\n "month_of_death",\n "month_of_death_name",\n "year_of_death",\n "year_of_death_name",\n "hour_of_death",\n "hour_of_death_name",\n "minute_of_death",\n "minute_of_death_name",\n "death_time",\n "death_time_name",\n "lag_hours",\n "lag_hours_name",\n "lag_minutes",\n "lag_minutes_name",\n "related_factors_person_level1",\n "related_factors_person_level1_name",\n "related_factors_person_level2",\n "related_factors_person_level2_name",\n "related_factors_person_level3",\n "related_factors_person_level3_name",\n "fatal_injury_at_work",\n "fatal_injury_at_work_name",\n "hispanic_origin",\n "hispanic_origin_name",\n "race",\n "race_name",\n "non_motorist_location_at_time_of_crash",\n "non_motorist_location_at_time_of_crash_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "number_of_motor_vehicle_striking_non_motorist": "str",\n "county": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "model_year": "str",\n "model_year_name": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "age": "str",\n "age_name": "str",\n "sex": "str",\n "sex_name": "str",\n "person_type": "str",\n "person_type_name": "str",\n "injury_severity": "str",\n "injury_severity_name": "str",\n "seating_position": "str",\n "seating_position_name": "str",\n "restraint_system_helmet_use": "str",\n "restraint_system_helmet_use_name": "str",\n "indication_of_misuse_of_restraint_system_helmet": "str",\n "indication_of_misuse_of_restraint_system_helmet_name": "str",\n "air_bag_deployed": "str",\n "air_bag_deployed_name": "str",\n "ejection": "str",\n "ejection_name": "str",\n "ejection_path": "str",\n "ejection_path_name": "str",\n "extrication": "str",\n "extrication_name": "str",\n "police_reported_alcohol_involvement": "str",\n "police_reported_alcohol_involvement_name": "str",\n "method_of_alcohol_determination_by_police": "str",\n "method_of_alcohol_determination_by_police_name": "str",\n "alcohol_test_status": "str",\n "alcohol_test_status_name": "str",\n "alcohol_test_type": "str",\n "alcohol_test_type_name": "str",\n "alcohol_result": "str",\n "alcohol_result_name": "str",\n "police_reported_drug_involvement": "str",\n "police_reported_drug_involvement_name": "str",\n "method_of_drug_determination_by_police": "str",\n "method_of_drug_determination_by_police_name": "str",\n "drug_test_status": "str",\n "drug_test_status_name": "str",\n "drug_test_type1": "str",\n "drug_test_type1_name": "str",\n "drug_test_type2": "str",\n "drug_test_type2_name": "str",\n "drug_test_type3": "str",\n "drug_test_type3_name": "str",\n "drug_result1": "str",\n "drug_result1_name": "str",\n "drug_result2": "str",\n "drug_result2_name": "str",\n "drug_result3": "str",\n "drug_result3_name": "str",\n "hospital": "str",\n "hospital_name": "str",\n "died_at_scene_en_route": "str",\n "died_at_scene_en_route_name": "str",\n "day_of_death": "str",\n "day_of_death_name": "str",\n "month_of_death": "str",\n "month_of_death_name": "str",\n "year_of_death": "str",\n "year_of_death_name": "str",\n "hour_of_death": "str",\n "hour_of_death_name": "str",\n "minute_of_death": "str",\n "minute_of_death_name": "str",\n "death_time": "str",\n "death_time_name": "str",\n "lag_hours": "str",\n "lag_hours_name": "str",\n "lag_minutes": "str",\n "lag_minutes_name": "str",\n "related_factors_person_level1": "str",\n "related_factors_person_level1_name": "str",\n "related_factors_person_level2": "str",\n "related_factors_person_level2_name": "str",\n "related_factors_person_level3": "str",\n "related_factors_person_level3_name": "str",\n "fatal_injury_at_work": "str",\n "fatal_injury_at_work_name": "str",\n "hispanic_origin": "str",\n "hispanic_origin_name": "str",\n "race": "str",\n "race_name": "str",\n "non_motorist_location_at_time_of_crash": "str",\n "non_motorist_location_at_time_of_crash_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "STR_VEH": "number_of_motor_vehicle_striking_non_motorist",\n "COUNTY": "county",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MAK_MOD": "make_model_combined",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "model_year",\n "MOD_YEARNAME": "model_year_name",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "AGE": "age",\n "AGENAME": "age_name",\n "SEX": "sex",\n "SEXNAME": "sex_name",\n "PER_TYP": "person_type",\n "PER_TYPNAME": "person_type_name",\n "INJ_SEV": "injury_severity",\n "INJ_SEVNAME": "injury_severity_name",\n "SEAT_POS": "seating_position",\n "SEAT_POSNAME": "seating_position_name",\n "REST_USE": "restraint_system_helmet_use",\n "REST_USENAME": "restraint_system_helmet_use_name",\n "REST_MIS": "indication_of_misuse_of_restraint_system_helmet",\n "REST_MISNAME": "indication_of_misuse_of_restraint_system_helmet_name",\n "AIR_BAG": "air_bag_deployed",\n "AIR_BAGNAME": "air_bag_deployed_name",\n "EJECTION": "ejection",\n "EJECTIONNAME": "ejection_name",\n "EJ_PATH": "ejection_path",\n "EJ_PATHNAME": "ejection_path_name",\n "EXTRICAT": "extrication",\n "EXTRICATNAME": "extrication_name",\n "DRINKING": "police_reported_alcohol_involvement",\n "DRINKINGNAME": "police_reported_alcohol_involvement_name",\n "ALC_DET": "method_of_alcohol_determination_by_police",\n "ALC_DETNAME": "method_of_alcohol_determination_by_police_name",\n "ALC_STATUS": "alcohol_test_status",\n "ALC_STATUSNAME": "alcohol_test_status_name",\n "ATST_TYP": "alcohol_test_type",\n "ATST_TYPNAME": "alcohol_test_type_name",\n "ALC_RES": "alcohol_result",\n "ALC_RESNAME": "alcohol_result_name",\n "DRUGS": "police_reported_drug_involvement",\n "DRUGSNAME": "police_reported_drug_involvement_name",\n "DRUG_DET": "method_of_drug_determination_by_police",\n "DRUG_DETNAME": "method_of_drug_determination_by_police_name",\n "DSTATUS": "drug_test_status",\n "DSTATUSNAME": "drug_test_status_name",\n "DRUGTST1": "drug_test_type1",\n "DRUGTST1NAME": "drug_test_type1_name",\n "DRUGTST2": "drug_test_type2",\n "DRUGTST2NAME": "drug_test_type2_name",\n "DRUGTST3": "drug_test_type3",\n "DRUGTST3NAME": "drug_test_type3_name",\n "DRUGRES1": "drug_result1",\n "DRUGRES1NAME": "drug_result1_name",\n "DRUGRES2": "drug_result2",\n "DRUGRES2NAME": "drug_result2_name",\n "DRUGRES3": "drug_result3",\n "DRUGRES3NAME": "drug_result3_name",\n "HOSPITAL": "hospital",\n "HOSPITALNAME": "hospital_name",\n "DOA": "died_at_scene_en_route",\n "DOANAME": "died_at_scene_en_route_name",\n "DEATH_DA": "day_of_death",\n "DEATH_DANAME": "day_of_death_name",\n "DEATH_MO": "month_of_death",\n "DEATH_MONAME": "month_of_death_name",\n "DEATH_YR": "year_of_death",\n "DEATH_YRNAME": "year_of_death_name",\n "DEATH_HR": "hour_of_death",\n "DEATH_HRNAME": "hour_of_death_name",\n "DEATH_MN": "minute_of_death",\n "DEATH_MNNAME": "minute_of_death_name",\n "DEATH_TM": "death_time",\n "DEATH_TMNAME": "death_time_name",\n "LAG_HRS": "lag_hours",\n "LAG_HRSNAME": "lag_hours_name",\n "LAG_MINS": "lag_minutes",\n "LAG_MINSNAME": "lag_minutes_name",\n "P_SF1": "related_factors_person_level1",\n "P_SF1NAME": "related_factors_person_level1_name",\n "P_SF2": "related_factors_person_level2",\n "P_SF2NAME": "related_factors_person_level2_name",\n "P_SF3": "related_factors_person_level3",\n "P_SF3NAME": "related_factors_person_level3_name",\n "WORK_INJ": "fatal_injury_at_work",\n "WORK_INJNAME": "fatal_injury_at_work_name",\n "HISPANIC": "hispanic_origin",\n "HISPANICNAME": "hispanic_origin_name",\n "RACE": "race",\n "RACENAME": "race_name",\n "LOCATION": "non_motorist_location_at_time_of_crash",\n "LOCATIONNAME": "non_motorist_location_at_time_of_crash_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for person pipelines
person_2018_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="person_2018_transform_csv",
startup_timeout_seconds=600,
name="person",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.person_2018.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.person_2018.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.person_2018.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "person.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.person_2018.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2018.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2018.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2018.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2018.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.person_2018.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2018.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2018.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "vehicle_number",\n "person_number",\n "number_of_motor_vehicle_striking_non_motorist",\n "county",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "school_bus_related",\n "school_bus_related_name",\n "vehicle_make",\n "vehicle_make_name",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "model_year",\n "model_year_name",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "rollover",\n "rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "age",\n "age_name",\n "sex",\n "sex_name",\n "person_type",\n "person_type_name",\n "injury_severity",\n "injury_severity_name",\n "seating_position",\n "seating_position_name",\n "restraint_system_helmet_use",\n "restraint_system_helmet_use_name",\n "indication_of_misuse_of_restraint_system_helmet",\n "indication_of_misuse_of_restraint_system_helmet_name",\n "air_bag_deployed",\n "air_bag_deployed_name",\n "ejection",\n "ejection_name",\n "ejection_path",\n "ejection_path_name",\n "extrication",\n "extrication_name",\n "police_reported_alcohol_involvement",\n "police_reported_alcohol_involvement_name",\n "method_of_alcohol_determination_by_police",\n "method_of_alcohol_determination_by_police_name",\n "alcohol_test_status",\n "alcohol_test_status_name",\n "alcohol_test_type",\n "alcohol_test_type_name",\n "alcohol_result",\n "alcohol_result_name",\n "police_reported_drug_involvement",\n "police_reported_drug_involvement_name",\n "method_of_drug_determination_by_police",\n "method_of_drug_determination_by_police_name",\n "drug_test_status",\n "drug_test_status_name",\n "hospital",\n "hospital_name",\n "died_at_scene_en_route",\n "died_at_scene_en_route_name",\n "day_of_death",\n "day_of_death_name",\n "month_of_death",\n "month_of_death_name",\n "year_of_death",\n "year_of_death_name",\n "hour_of_death",\n "hour_of_death_name",\n "minute_of_death",\n "minute_of_death_name",\n "death_time",\n "death_time_name",\n "lag_hours",\n "lag_hours_name",\n "lag_minutes",\n "lag_minutes_name",\n "related_factors_person_level1",\n "related_factors_person_level1_name",\n "related_factors_person_level2",\n "related_factors_person_level2_name",\n "related_factors_person_level3",\n "related_factors_person_level3_name",\n "fatal_injury_at_work",\n "fatal_injury_at_work_name",\n "hispanic_origin",\n "hispanic_origin_name",\n "race",\n "race_name",\n "non_motorist_location_at_time_of_crash",\n "non_motorist_location_at_time_of_crash_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "number_of_motor_vehicle_striking_non_motorist": "str",\n "county": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "model_year": "str",\n "model_year_name": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "age": "str",\n "age_name": "str",\n "sex": "str",\n "sex_name": "str",\n "person_type": "str",\n "person_type_name": "str",\n "injury_severity": "str",\n "injury_severity_name": "str",\n "seating_position": "str",\n "seating_position_name": "str",\n "restraint_system_helmet_use": "str",\n "restraint_system_helmet_use_name": "str",\n "indication_of_misuse_of_restraint_system_helmet": "str",\n "indication_of_misuse_of_restraint_system_helmet_name": "str",\n "air_bag_deployed": "str",\n "air_bag_deployed_name": "str",\n "ejection": "str",\n "ejection_name": "str",\n "ejection_path": "str",\n "ejection_path_name": "str",\n "extrication": "str",\n "extrication_name": "str",\n "police_reported_alcohol_involvement": "str",\n "police_reported_alcohol_involvement_name": "str",\n "method_of_alcohol_determination_by_police": "str",\n "method_of_alcohol_determination_by_police_name": "str",\n "alcohol_test_status": "str",\n "alcohol_test_status_name": "str",\n "alcohol_test_type": "str",\n "alcohol_test_type_name": "str",\n "alcohol_result": "str",\n "alcohol_result_name": "str",\n "police_reported_drug_involvement": "str",\n "police_reported_drug_involvement_name": "str",\n "method_of_drug_determination_by_police": "str",\n "method_of_drug_determination_by_police_name": "str",\n "drug_test_status": "str",\n "drug_test_status_name": "str",\n "hospital": "str",\n "hospital_name": "str",\n "died_at_scene_en_route": "str",\n "died_at_scene_en_route_name": "str",\n "day_of_death": "str",\n "day_of_death_name": "str",\n "month_of_death": "str",\n "month_of_death_name": "str",\n "year_of_death": "str",\n "year_of_death_name": "str",\n "hour_of_death": "str",\n "hour_of_death_name": "str",\n "minute_of_death": "str",\n "minute_of_death_name": "str",\n "death_time": "str",\n "death_time_name": "str",\n "lag_hours": "str",\n "lag_hours_name": "str",\n "lag_minutes": "str",\n "lag_minutes_name": "str",\n "related_factors_person_level1": "str",\n "related_factors_person_level1_name": "str",\n "related_factors_person_level2": "str",\n "related_factors_person_level2_name": "str",\n "related_factors_person_level3": "str",\n "related_factors_person_level3_name": "str",\n "fatal_injury_at_work": "str",\n "fatal_injury_at_work_name": "str",\n "hispanic_origin": "str",\n "hispanic_origin_name": "str",\n "race": "str",\n "race_name": "str",\n "non_motorist_location_at_time_of_crash": "str",\n "non_motorist_location_at_time_of_crash_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "STR_VEH": "number_of_motor_vehicle_striking_non_motorist",\n "COUNTY": "county",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MAK_MOD": "make_model_combined",\n "MAK_MODNAME": "make_model_combined_name",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "model_year",\n "MOD_YEARNAME": "model_year_name",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "AGE": "age",\n "AGENAME": "age_name",\n "SEX": "sex",\n "SEXNAME": "sex_name",\n "PER_TYP": "person_type",\n "PER_TYPNAME": "person_type_name",\n "INJ_SEV": "injury_severity",\n "INJ_SEVNAME": "injury_severity_name",\n "SEAT_POS": "seating_position",\n "SEAT_POSNAME": "seating_position_name",\n "REST_USE": "restraint_system_helmet_use",\n "REST_USENAME": "restraint_system_helmet_use_name",\n "REST_MIS": "indication_of_misuse_of_restraint_system_helmet",\n "REST_MISNAME": "indication_of_misuse_of_restraint_system_helmet_name",\n "AIR_BAG": "air_bag_deployed",\n "AIR_BAGNAME": "air_bag_deployed_name",\n "EJECTION": "ejection",\n "EJECTIONNAME": "ejection_name",\n "EJ_PATH": "ejection_path",\n "EJ_PATHNAME": "ejection_path_name",\n "EXTRICAT": "extrication",\n "EXTRICATNAME": "extrication_name",\n "DRINKING": "police_reported_alcohol_involvement",\n "DRINKINGNAME": "police_reported_alcohol_involvement_name",\n "ALC_DET": "method_of_alcohol_determination_by_police",\n "ALC_DETNAME": "method_of_alcohol_determination_by_police_name",\n "ALC_STATUS": "alcohol_test_status",\n "ALC_STATUSNAME": "alcohol_test_status_name",\n "ATST_TYP": "alcohol_test_type",\n "ATST_TYPNAME": "alcohol_test_type_name",\n "ALC_RES": "alcohol_result",\n "ALC_RESNAME": "alcohol_result_name",\n "DRUGS": "police_reported_drug_involvement",\n "DRUGSNAME": "police_reported_drug_involvement_name",\n "DRUG_DET": "method_of_drug_determination_by_police",\n "DRUG_DETNAME": "method_of_drug_determination_by_police_name",\n "DSTATUS": "drug_test_status",\n "DSTATUSNAME": "drug_test_status_name",\n "HOSPITAL": "hospital",\n "HOSPITALNAME": "hospital_name",\n "DOA": "died_at_scene_en_route",\n "DOANAME": "died_at_scene_en_route_name",\n "DEATH_DA": "day_of_death",\n "DEATH_DANAME": "day_of_death_name",\n "DEATH_MO": "month_of_death",\n "DEATH_MONAME": "month_of_death_name",\n "DEATH_YR": "year_of_death",\n "DEATH_YRNAME": "year_of_death_name",\n "DEATH_HR": "hour_of_death",\n "DEATH_HRNAME": "hour_of_death_name",\n "DEATH_MN": "minute_of_death",\n "DEATH_MNNAME": "minute_of_death_name",\n "DEATH_TM": "death_time",\n "DEATH_TMNAME": "death_time_name",\n "LAG_HRS": "lag_hours",\n "LAG_HRSNAME": "lag_hours_name",\n "LAG_MINS": "lag_minutes",\n "LAG_MINSNAME": "lag_minutes_name",\n "P_SF1": "related_factors_person_level1",\n "P_SF1NAME": "related_factors_person_level1_name",\n "P_SF2": "related_factors_person_level2",\n "P_SF2NAME": "related_factors_person_level2_name",\n "P_SF3": "related_factors_person_level3",\n "P_SF3NAME": "related_factors_person_level3_name",\n "WORK_INJ": "fatal_injury_at_work",\n "WORK_INJNAME": "fatal_injury_at_work_name",\n "HISPANIC": "hispanic_origin",\n "HISPANICNAME": "hispanic_origin_name",\n "RACE": "race",\n "RACENAME": "race_name",\n "LOCATION": "non_motorist_location_at_time_of_crash",\n "LOCATIONNAME": "non_motorist_location_at_time_of_crash_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for person pipelines
person_2019_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="person_2019_transform_csv",
startup_timeout_seconds=600,
name="person",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.person_2019.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.person_2019.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.person_2019.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "person.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.person_2019.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2019.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2019.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2019.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2019.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.person_2019.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2019.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2019.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "vehicle_number",\n "person_number",\n "number_of_motor_vehicle_striking_non_motorist",\n "county",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "school_bus_related",\n "school_bus_related_name",\n "vehicle_make",\n "vehicle_make_name",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "model_year",\n "model_year_name",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "rollover",\n "rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "age",\n "age_name",\n "sex",\n "sex_name",\n "person_type",\n "person_type_name",\n "injury_severity",\n "injury_severity_name",\n "seating_position",\n "seating_position_name",\n "restraint_system_helmet_use",\n "restraint_system_helmet_use_name",\n "indication_of_misuse_of_restraint_system_helmet",\n "indication_of_misuse_of_restraint_system_helmet_name",\n "air_bag_deployed",\n "air_bag_deployed_name",\n "ejection",\n "ejection_name",\n "ejection_path",\n "ejection_path_name",\n "extrication",\n "extrication_name",\n "police_reported_alcohol_involvement",\n "police_reported_alcohol_involvement_name",\n "method_of_alcohol_determination_by_police",\n "method_of_alcohol_determination_by_police_name",\n "alcohol_test_status",\n "alcohol_test_status_name",\n "alcohol_test_type",\n "alcohol_test_type_name",\n "alcohol_result",\n "alcohol_result_name",\n "police_reported_drug_involvement",\n "police_reported_drug_involvement_name",\n "method_of_drug_determination_by_police",\n "method_of_drug_determination_by_police_name",\n "drug_test_status",\n "drug_test_status_name",\n "hospital",\n "hospital_name",\n "died_at_scene_en_route",\n "died_at_scene_en_route_name",\n "day_of_death",\n "day_of_death_name",\n "month_of_death",\n "month_of_death_name",\n "year_of_death",\n "year_of_death_name",\n "hour_of_death",\n "hour_of_death_name",\n "minute_of_death",\n "minute_of_death_name",\n "death_time",\n "death_time_name",\n "lag_hours",\n "lag_hours_name",\n "lag_minutes",\n "lag_minutes_name",\n "related_factors_person_level1",\n "related_factors_person_level1_name",\n "related_factors_person_level2",\n "related_factors_person_level2_name",\n "related_factors_person_level3",\n "related_factors_person_level3_name",\n "fatal_injury_at_work",\n "fatal_injury_at_work_name",\n "hispanic_origin",\n "hispanic_origin_name",\n "non_motorist_location_at_time_of_crash",\n "non_motorist_location_at_time_of_crash_name",\n "helm_use",\n "helm_usename",\n "helm_mis",\n "helm_misname"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "number_of_motor_vehicle_striking_non_motorist": "str",\n "county": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "model_year": "str",\n "model_year_name": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "age": "str",\n "age_name": "str",\n "sex": "str",\n "sex_name": "str",\n "person_type": "str",\n "person_type_name": "str",\n "injury_severity": "str",\n "injury_severity_name": "str",\n "seating_position": "str",\n "seating_position_name": "str",\n "restraint_system_helmet_use": "str",\n "restraint_system_helmet_use_name": "str",\n "indication_of_misuse_of_restraint_system_helmet": "str",\n "indication_of_misuse_of_restraint_system_helmet_name": "str",\n "air_bag_deployed": "str",\n "air_bag_deployed_name": "str",\n "ejection": "str",\n "ejection_name": "str",\n "ejection_path": "str",\n "ejection_path_name": "str",\n "extrication": "str",\n "extrication_name": "str",\n "police_reported_alcohol_involvement": "str",\n "police_reported_alcohol_involvement_name": "str",\n "method_of_alcohol_determination_by_police": "str",\n "method_of_alcohol_determination_by_police_name": "str",\n "alcohol_test_status": "str",\n "alcohol_test_status_name": "str",\n "alcohol_test_type": "str",\n "alcohol_test_type_name": "str",\n "alcohol_result": "str",\n "alcohol_result_name": "str",\n "police_reported_drug_involvement": "str",\n "police_reported_drug_involvement_name": "str",\n "method_of_drug_determination_by_police": "str",\n "method_of_drug_determination_by_police_name": "str",\n "drug_test_status": "str",\n "drug_test_status_name": "str",\n "hospital": "str",\n "hospital_name": "str",\n "died_at_scene_en_route": "str",\n "died_at_scene_en_route_name": "str",\n "day_of_death": "str",\n "day_of_death_name": "str",\n "month_of_death": "str",\n "month_of_death_name": "str",\n "year_of_death": "str",\n "year_of_death_name": "str",\n "hour_of_death": "str",\n "hour_of_death_name": "str",\n "minute_of_death": "str",\n "minute_of_death_name": "str",\n "death_time": "str",\n "death_time_name": "str",\n "lag_hours": "str",\n "lag_hours_name": "str",\n "lag_minutes": "str",\n "lag_minutes_name": "str",\n "related_factors_person_level1": "str",\n "related_factors_person_level1_name": "str",\n "related_factors_person_level2": "str",\n "related_factors_person_level2_name": "str",\n "related_factors_person_level3": "str",\n "related_factors_person_level3_name": "str",\n "fatal_injury_at_work": "str",\n "fatal_injury_at_work_name": "str",\n "hispanic_origin": "str",\n "hispanic_origin_name": "str",\n "non_motorist_location_at_time_of_crash": "str",\n "non_motorist_location_at_time_of_crash_name": "str",\n "helm_use": "str",\n "helm_usename": "str",\n "helm_mis": "str",\n "helm_misname": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "STR_VEH": "number_of_motor_vehicle_striking_non_motorist",\n "COUNTY": "county",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MAK_MOD": "make_model_combined",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "model_year",\n "MOD_YEARNAME": "model_year_name",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "AGE": "age",\n "AGENAME": "age_name",\n "SEX": "sex",\n "SEXNAME": "sex_name",\n "PER_TYP": "person_type",\n "PER_TYPNAME": "person_type_name",\n "INJ_SEV": "injury_severity",\n "INJ_SEVNAME": "injury_severity_name",\n "SEAT_POS": "seating_position",\n "SEAT_POSNAME": "seating_position_name",\n "REST_USE": "restraint_system_helmet_use",\n "REST_USENAME": "restraint_system_helmet_use_name",\n "REST_MIS": "indication_of_misuse_of_restraint_system_helmet",\n "REST_MISNAME": "indication_of_misuse_of_restraint_system_helmet_name",\n "AIR_BAG": "air_bag_deployed",\n "AIR_BAGNAME": "air_bag_deployed_name",\n "EJECTION": "ejection",\n "EJECTIONNAME": "ejection_name",\n "EJ_PATH": "ejection_path",\n "EJ_PATHNAME": "ejection_path_name",\n "EXTRICAT": "extrication",\n "EXTRICATNAME": "extrication_name",\n "DRINKING": "police_reported_alcohol_involvement",\n "DRINKINGNAME": "police_reported_alcohol_involvement_name",\n "ALC_DET": "method_of_alcohol_determination_by_police",\n "ALC_DETNAME": "method_of_alcohol_determination_by_police_name",\n "ALC_STATUS": "alcohol_test_status",\n "ALC_STATUSNAME": "alcohol_test_status_name",\n "ATST_TYP": "alcohol_test_type",\n "ATST_TYPNAME": "alcohol_test_type_name",\n "ALC_RES": "alcohol_result",\n "ALC_RESNAME": "alcohol_result_name",\n "DRUGS": "police_reported_drug_involvement",\n "DRUGSNAME": "police_reported_drug_involvement_name",\n "DRUG_DET": "method_of_drug_determination_by_police",\n "DRUG_DETNAME": "method_of_drug_determination_by_police_name",\n "DSTATUS": "drug_test_status",\n "DSTATUSNAME": "drug_test_status_name",\n "HOSPITAL": "hospital",\n "HOSPITALNAME": "hospital_name",\n "DOA": "died_at_scene_en_route",\n "DOANAME": "died_at_scene_en_route_name",\n "DEATH_DA": "day_of_death",\n "DEATH_DANAME": "day_of_death_name",\n "DEATH_MO": "month_of_death",\n "DEATH_MONAME": "month_of_death_name",\n "DEATH_YR": "year_of_death",\n "DEATH_YRNAME": "year_of_death_name",\n "DEATH_HR": "hour_of_death",\n "DEATH_HRNAME": "hour_of_death_name",\n "DEATH_MN": "minute_of_death",\n "DEATH_MNNAME": "minute_of_death_name",\n "DEATH_TM": "death_time",\n "DEATH_TMNAME": "death_time_name",\n "LAG_HRS": "lag_hours",\n "LAG_HRSNAME": "lag_hours_name",\n "LAG_MINS": "lag_minutes",\n "LAG_MINSNAME": "lag_minutes_name",\n "P_SF1": "related_factors_person_level1",\n "P_SF1NAME": "related_factors_person_level1_name",\n "P_SF2": "related_factors_person_level2",\n "P_SF2NAME": "related_factors_person_level2_name",\n "P_SF3": "related_factors_person_level3",\n "P_SF3NAME": "related_factors_person_level3_name",\n "WORK_INJ": "fatal_injury_at_work",\n "WORK_INJNAME": "fatal_injury_at_work_name",\n "HISPANIC": "hispanic_origin",\n "HISPANICNAME": "hispanic_origin_name",\n "LOCATION": "non_motorist_location_at_time_of_crash",\n "LOCATIONNAME": "non_motorist_location_at_time_of_crash_name",\n "HELM_USE": "helm_use",\n "HELM_USENAME": "helm_usename",\n "HELM_MIS": "helm_mis",\n "HELM_MISNAME": "helm_misname"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for person pipelines
person_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="person_2020_transform_csv",
startup_timeout_seconds=600,
name="person",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.person_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.person_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.person_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "person.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.person_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.person_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.person_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.person_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.person_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "vehicle_number",\n "person_number",\n "number_of_motor_vehicle_striking_non_motorist",\n "county",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "land_use",\n "land_use_name",\n "functional_system",\n "functional_system_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "school_bus_related",\n "school_bus_related_name",\n "vehicle_make",\n "vehicle_make_name",\n "make_model_combined",\n "body_type",\n "body_type_name",\n "model_year",\n "model_year_name",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "rollover",\n "rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "age",\n "age_name",\n "sex",\n "sex_name",\n "person_type",\n "person_type_name",\n "injury_severity",\n "injury_severity_name",\n "seating_position",\n "seating_position_name",\n "restraint_system_helmet_use",\n "restraint_system_helmet_use_name",\n "indication_of_misuse_of_restraint_system_helmet",\n "indication_of_misuse_of_restraint_system_helmet_name",\n "air_bag_deployed",\n "air_bag_deployed_name",\n "ejection",\n "ejection_name",\n "ejection_path",\n "ejection_path_name",\n "extrication",\n "extrication_name",\n "police_reported_alcohol_involvement",\n "police_reported_alcohol_involvement_name",\n "method_of_alcohol_determination_by_police",\n "method_of_alcohol_determination_by_police_name",\n "alcohol_test_status",\n "alcohol_test_status_name",\n "alcohol_test_type",\n "alcohol_test_type_name",\n "alcohol_result",\n "alcohol_result_name",\n "police_reported_drug_involvement",\n "police_reported_drug_involvement_name",\n "method_of_drug_determination_by_police",\n "method_of_drug_determination_by_police_name",\n "drug_test_status",\n "drug_test_status_name",\n "hospital",\n "hospital_name",\n "died_at_scene_en_route",\n "died_at_scene_en_route_name",\n "day_of_death",\n "day_of_death_name",\n "month_of_death",\n "month_of_death_name",\n "year_of_death",\n "year_of_death_name",\n "hour_of_death",\n "hour_of_death_name",\n "minute_of_death",\n "minute_of_death_name",\n "death_time",\n "death_time_name",\n "lag_hours",\n "lag_hours_name",\n "lag_minutes",\n "lag_minutes_name",\n "fatal_injury_at_work",\n "fatal_injury_at_work_name",\n "hispanic_origin",\n "hispanic_origin_name",\n "non_motorist_location_at_time_of_crash",\n "non_motorist_location_at_time_of_crash_name",\n "helm_use",\n "helm_usename",\n "helm_mis",\n "helm_misname",\n "vpic_make",\n "vpic_make_name",\n "vpic_model",\n "vpic_model_name",\n "vpic_body_class",\n "vpic_body_class_name",\n "icfinal_body",\n "icfinalbody_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "number_of_motor_vehicle_striking_non_motorist": "str",\n "county": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "land_use": "str",\n "land_use_name": "str",\n "functional_system": "str",\n "functional_system_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "school_bus_related": "str",\n "school_bus_related_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "make_model_combined": "str",\n "body_type": "str",\n "body_type_name": "str",\n "model_year": "str",\n "model_year_name": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "age": "str",\n "age_name": "str",\n "sex": "str",\n "sex_name": "str",\n "person_type": "str",\n "person_type_name": "str",\n "injury_severity": "str",\n "injury_severity_name": "str",\n "seating_position": "str",\n "seating_position_name": "str",\n "restraint_system_helmet_use": "str",\n "restraint_system_helmet_use_name": "str",\n "indication_of_misuse_of_restraint_system_helmet": "str",\n "indication_of_misuse_of_restraint_system_helmet_name": "str",\n "air_bag_deployed": "str",\n "air_bag_deployed_name": "str",\n "ejection": "str",\n "ejection_name": "str",\n "ejection_path": "str",\n "ejection_path_name": "str",\n "extrication": "str",\n "extrication_name": "str",\n "police_reported_alcohol_involvement": "str",\n "police_reported_alcohol_involvement_name": "str",\n "method_of_alcohol_determination_by_police": "str",\n "method_of_alcohol_determination_by_police_name": "str",\n "alcohol_test_status": "str",\n "alcohol_test_status_name": "str",\n "alcohol_test_type": "str",\n "alcohol_test_type_name": "str",\n "alcohol_result": "str",\n "alcohol_result_name": "str",\n "police_reported_drug_involvement": "str",\n "police_reported_drug_involvement_name": "str",\n "method_of_drug_determination_by_police": "str",\n "method_of_drug_determination_by_police_name": "str",\n "drug_test_status": "str",\n "drug_test_status_name": "str",\n "hospital": "str",\n "hospital_name": "str",\n "died_at_scene_en_route": "str",\n "died_at_scene_en_route_name": "str",\n "day_of_death": "str",\n "day_of_death_name": "str",\n "month_of_death": "str",\n "month_of_death_name": "str",\n "year_of_death": "str",\n "year_of_death_name": "str",\n "hour_of_death": "str",\n "hour_of_death_name": "str",\n "minute_of_death": "str",\n "minute_of_death_name": "str",\n "death_time": "str",\n "death_time_name": "str",\n "lag_hours": "str",\n "lag_hours_name": "str",\n "lag_minutes": "str",\n "lag_minutes_name": "str",\n "fatal_injury_at_work": "str",\n "fatal_injury_at_work_name": "str",\n "hispanic_origin": "str",\n "hispanic_origin_name": "str",\n "non_motorist_location_at_time_of_crash": "str",\n "non_motorist_location_at_time_of_crash_name": "str",\n "helm_use": "str",\n "helm_usename": "str",\n "helm_mis": "str",\n "helm_misname": "str",\n "vpic_make": "str",\n "vpic_make_name": "str",\n "vpic_model": "str",\n "vpic_model_name": "str",\n "vpic_body_class": "str",\n "vpic_body_class_name": "str",\n "icfinal_body": "str",\n "icfinalbody_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "STR_VEH": "number_of_motor_vehicle_striking_non_motorist",\n "COUNTY": "county",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "RUR_URB": "land_use",\n "RUR_URBNAME": "land_use_name",\n "FUNC_SYS": "functional_system",\n "FUNC_SYSNAME": "functional_system_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "SCH_BUS": "school_bus_related",\n "SCH_BUSNAME": "school_bus_related_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MAK_MOD": "make_model_combined",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "model_year",\n "MOD_YEARNAME": "model_year_name",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "AGE": "age",\n "AGENAME": "age_name",\n "SEX": "sex",\n "SEXNAME": "sex_name",\n "PER_TYP": "person_type",\n "PER_TYPNAME": "person_type_name",\n "INJ_SEV": "injury_severity",\n "INJ_SEVNAME": "injury_severity_name",\n "SEAT_POS": "seating_position",\n "SEAT_POSNAME": "seating_position_name",\n "REST_USE": "restraint_system_helmet_use",\n "REST_USENAME": "restraint_system_helmet_use_name",\n "REST_MIS": "indication_of_misuse_of_restraint_system_helmet",\n "REST_MISNAME": "indication_of_misuse_of_restraint_system_helmet_name",\n "AIR_BAG": "air_bag_deployed",\n "AIR_BAGNAME": "air_bag_deployed_name",\n "EJECTION": "ejection",\n "EJECTIONNAME": "ejection_name",\n "EJ_PATH": "ejection_path",\n "EJ_PATHNAME": "ejection_path_name",\n "EXTRICAT": "extrication",\n "EXTRICATNAME": "extrication_name",\n "DRINKING": "police_reported_alcohol_involvement",\n "DRINKINGNAME": "police_reported_alcohol_involvement_name",\n "ALC_DET": "method_of_alcohol_determination_by_police",\n "ALC_DETNAME": "method_of_alcohol_determination_by_police_name",\n "ALC_STATUS": "alcohol_test_status",\n "ALC_STATUSNAME": "alcohol_test_status_name",\n "ATST_TYP": "alcohol_test_type",\n "ATST_TYPNAME": "alcohol_test_type_name",\n "ALC_RES": "alcohol_result",\n "ALC_RESNAME": "alcohol_result_name",\n "DRUGS": "police_reported_drug_involvement",\n "DRUGSNAME": "police_reported_drug_involvement_name",\n "DRUG_DET": "method_of_drug_determination_by_police",\n "DRUG_DETNAME": "method_of_drug_determination_by_police_name",\n "DSTATUS": "drug_test_status",\n "DSTATUSNAME": "drug_test_status_name",\n "HOSPITAL": "hospital",\n "HOSPITALNAME": "hospital_name",\n "DOA": "died_at_scene_en_route",\n "DOANAME": "died_at_scene_en_route_name",\n "DEATH_DA": "day_of_death",\n "DEATH_DANAME": "day_of_death_name",\n "DEATH_MO": "month_of_death",\n "DEATH_MONAME": "month_of_death_name",\n "DEATH_YR": "year_of_death",\n "DEATH_YRNAME": "year_of_death_name",\n "DEATH_HR": "hour_of_death",\n "DEATH_HRNAME": "hour_of_death_name",\n "DEATH_MN": "minute_of_death",\n "DEATH_MNNAME": "minute_of_death_name",\n "DEATH_TM": "death_time",\n "DEATH_TMNAME": "death_time_name",\n "LAG_HRS": "lag_hours",\n "LAG_HRSNAME": "lag_hours_name",\n "LAG_MINS": "lag_minutes",\n "LAG_MINSNAME": "lag_minutes_name",\n "WORK_INJ": "fatal_injury_at_work",\n "WORK_INJNAME": "fatal_injury_at_work_name",\n "HISPANIC": "hispanic_origin",\n "HISPANICNAME": "hispanic_origin_name",\n "LOCATION": "non_motorist_location_at_time_of_crash",\n "LOCATIONNAME": "non_motorist_location_at_time_of_crash_name",\n "HELM_USE": "helm_use",\n "HELM_USENAME": "helm_usename",\n "HELM_MIS": "helm_mis",\n "HELM_MISNAME": "helm_misname",\n "VPICMAKE": "vpic_make",\n "VPICMAKENAME": "vpic_make_name",\n "VPICMODEL": "vpic_model",\n "VPICMODELNAME": "vpic_model_name",\n "VPICBODYCLASS": "vpic_body_class",\n "VPICBODYCLASSNAME": "vpic_body_class_name",\n "ICFINALBODY": "icfinal_body",\n "ICFINALBODYNAME": "icfinalbody_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for safetyeq pipelines
safetyeq_2015_2016_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="safetyeq_2015_2016_transform_csv",
startup_timeout_seconds=600,
name="safetyeq",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "safetyeq.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2015_2016.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "non_motorist_safety_equipment_use",\n "non_motorist_safety_equipment_use_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "non_motorist_safety_equipment_use": "str",\n "non_motorist_safety_equipment_use_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "MSAFEQMT": "non_motorist_safety_equipment_use",\n "MSAFEQMTNAME": "non_motorist_safety_equipment_use_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for safetyeq pipelines
safetyeq_2017_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="safetyeq_2017_2020_transform_csv",
startup_timeout_seconds=600,
name="safetyeq",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "safetyeq.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.safetyeq_2017_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "person_number",\n "nm_helmet",\n "nm_helmet_name",\n "nm_propad",\n "nm_propad_name",\n "nm_othpro",\n "nm_othpro_name",\n "nm_refclo",\n "nm_refclo_name",\n "nm_light",\n "nm_light_name",\n "nm_othpre",\n "nm_othpre_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "person_number": "str",\n "nm_helmet": "str",\n "nm_helmet_name": "str",\n "nm_propad": "str",\n "nm_propad_name": "str",\n "nm_othpro": "str",\n "nm_othpro_name": "str",\n "nm_refclo": "str",\n "nm_refclo_name": "str",\n "nm_light": "str",\n "nm_light_name": "str",\n "nm_othpre": "str",\n "nm_othpre_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "PER_NO": "person_number",\n "NMHELMET": "nm_helmet",\n "NMHELMETNAME": "nm_helmet_name",\n "NMPROPAD": "nm_propad",\n "NMPROPADNAME": "nm_propad_name",\n "NMOTHPRO": "nm_othpro",\n "NMOTHPRONAME": "nm_othpro_name",\n "NMREFCLO": "nm_refclo",\n "NMREFCLONAME": "nm_refclo_name",\n "NMLIGHT": "nm_light",\n "NMLIGHTNAME": "nm_light_name",\n "NMOTHPRE": "nm_othpre",\n "NMOTHPRENAME": "nm_othpre_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vehicle pipelines
vehicle_2015_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vehicle_2015_transform_csv",
startup_timeout_seconds=600,
name="vehicle",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vehicle.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2015.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "jackknife",\n "jackknife_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number_mcid",\n "motor_carrier_identification_number_mcid_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "travel_speed",\n "travel_speed_name",\n "underride_override",\n "underride_override_name",\n "rollover",\n "rollover_name",\n "location_of_rollover",\n "location_of_rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level_1",\n "related_factors_vehicle_level_1_name",\n "related_factors_vehicle_level_2",\n "related_factors_vehicle_level_2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "driver_presence",\n "driver_presence_name",\n "drivers_license_state",\n "drivers_license_state_name",\n "drivers_zip_code",\n "drivers_zip_code_name",\n "non_cdl_license_status",\n "non_cdl_license_status_name",\n "non_cdl_license_type",\n "non_cdl_license_type_name",\n "commercial_motor_vehicle_license_status",\n "commercial_motor_vehicle_license_status_name",\n "compliance_with_cdl_endorsements",\n "compliance_with_cdl_endorsements_name",\n "license_compliance_with_class_of_vehicle",\n "license_compliance_with_class_of_vehicle_name",\n "compliance_with_license_restrictions",\n "compliance_with_license_restrictions_name",\n "driver_height",\n "driver_height_name",\n "driver_weight",\n "driver_weight_name",\n "previous_recorded_crashes",\n "previous_recorded_crashes_name",\n "previous_recorded_suspensions_and_revocations",\n "previous_recorded_suspensions_and_revocations_name",\n "previous_dwi_convictions",\n "previous_dwi_convictions_name",\n "previous_speeding_convictions",\n "previous_speeding_convictions_name",\n "previous_other_moving_violation_convictions",\n "previous_other_moving_violation_convictions_name",\n "month_of_first_crash_suspension_or_conviction",\n "month_of_first_crash_suspension_or_conviction_name",\n "year_of_first_crash_suspension_or_conviction",\n "year_of_first_crash_suspension_or_conviction_name",\n "month_of_last_crash_suspension_or_conviction",\n "month_of_last_crash_suspension_or_conviction_name",\n "year_of_last_crash_suspension_or_conviction",\n "year_of_last_crash_suspension_or_conviction_name",\n "speeding_related",\n "speeding_related_name",\n "related_factors_driver_level_1",\n "related_factors_driver_level_1_name",\n "related_factors_driver_level_2",\n "related_factors_driver_level_2_name",\n "related_factors_driver_level_3",\n "related_factors_driver_level_3_name",\n "related_factors_driver_level_4",\n "related_factors_driver_level_4_name",\n "trafficway_description",\n "trafficway_description_name",\n "total_lanes_in_roadway",\n "total_lanes_in_roadway_name",\n "speed_limit",\n "speed_limit_name",\n "roadway_alignment",\n "roadway_alignment_name",\n "roadway_grade",\n "roadway_grade_name",\n "roadway_surface_type",\n "roadway_surface_type_name",\n "roadway_surface_condition",\n "roadway_surface_condition_name",\n "traffic_control_device",\n "traffic_control_device_name",\n "traffic_control_device_functioning",\n "traffic_control_device_functioning_name",\n "pre_event_movement_prior_to_recognition_of_critical_event",\n "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "critical_event_precrash",\n "critical_event_precrash_name",\n "attempted_avoidance_maneuver",\n "attempted_avoidance_maneuver_name",\n "pre_impact_stability",\n "pre_impact_stability_name",\n "pre_impact_location",\n "pre_impact_location_name",\n "crash_type",\n "crash_type_name",\n "fatalities_in_vehicle",\n "driver_drinking",\n "driver_drinking_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "jackknife": "str",\n "jackknife_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number_mcid": "str",\n "motor_carrier_identification_number_mcid_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "travel_speed": "str",\n "travel_speed_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "location_of_rollover": "str",\n "location_of_rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level_1": "str",\n "related_factors_vehicle_level_1_name": "str",\n "related_factors_vehicle_level_2": "str",\n "related_factors_vehicle_level_2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "driver_presence": "str",\n "driver_presence_name": "str",\n "drivers_license_state": "str",\n "drivers_license_state_name": "str",\n "drivers_zip_code": "str",\n "drivers_zip_code_name": "str",\n "non_cdl_license_status": "str",\n "non_cdl_license_status_name": "str",\n "non_cdl_license_type": "str",\n "non_cdl_license_type_name": "str",\n "commercial_motor_vehicle_license_status": "str",\n "commercial_motor_vehicle_license_status_name": "str",\n "compliance_with_cdl_endorsements": "str",\n "compliance_with_cdl_endorsements_name": "str",\n "license_compliance_with_class_of_vehicle": "str",\n "license_compliance_with_class_of_vehicle_name": "str",\n "compliance_with_license_restrictions": "str",\n "compliance_with_license_restrictions_name": "str",\n "driver_height": "str",\n "driver_height_name": "str",\n "driver_weight": "str",\n "driver_weight_name": "str",\n "previous_recorded_crashes": "str",\n "previous_recorded_crashes_name": "str",\n "previous_recorded_suspensions_and_revocations": "str",\n "previous_recorded_suspensions_and_revocations_name": "str",\n "previous_dwi_convictions": "str",\n "previous_dwi_convictions_name": "str",\n "previous_speeding_convictions": "str",\n "previous_speeding_convictions_name": "str",\n "previous_other_moving_violation_convictions": "str",\n "previous_other_moving_violation_convictions_name": "str",\n "month_of_first_crash_suspension_or_conviction": "str",\n "month_of_first_crash_suspension_or_conviction_name": "str",\n "year_of_first_crash_suspension_or_conviction": "str",\n "year_of_first_crash_suspension_or_conviction_name": "str",\n "month_of_last_crash_suspension_or_conviction": "str",\n "month_of_last_crash_suspension_or_conviction_name": "str",\n "year_of_last_crash_suspension_or_conviction": "str",\n "year_of_last_crash_suspension_or_conviction_name": "str",\n "speeding_related": "str",\n "speeding_related_name": "str",\n "related_factors_driver_level_1": "str",\n "related_factors_driver_level_1_name": "str",\n "related_factors_driver_level_2": "str",\n "related_factors_driver_level_2_name": "str",\n "related_factors_driver_level_3": "str",\n "related_factors_driver_level_3_name": "str",\n "related_factors_driver_level_4": "str",\n "related_factors_driver_level_4_name": "str",\n "trafficway_description": "str",\n "trafficway_description_name": "str",\n "total_lanes_in_roadway": "str",\n "total_lanes_in_roadway_name": "str",\n "speed_limit": "str",\n "speed_limit_name": "str",\n "roadway_alignment": "str",\n "roadway_alignment_name": "str",\n "roadway_grade": "str",\n "roadway_grade_name": "str",\n "roadway_surface_type": "str",\n "roadway_surface_type_name": "str",\n "roadway_surface_condition": "str",\n "roadway_surface_condition_name": "str",\n "traffic_control_device": "str",\n "traffic_control_device_name": "str",\n "traffic_control_device_functioning": "str",\n "traffic_control_device_functioning_name": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event_name": "str",\n "critical_event_precrash": "str",\n "critical_event_precrash_name": "str",\n "attempted_avoidance_maneuver": "str",\n "attempted_avoidance_maneuver_name": "str",\n "pre_impact_stability": "str",\n "pre_impact_stability_name": "str",\n "pre_impact_location": "str",\n "pre_impact_location_name": "str",\n "crash_type": "str",\n "crash_type_name": "str",\n "fatalities_in_vehicle": "str",\n "driver_drinking": "str",\n "driver_drinking_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "NUMOCCS": "number_of_occupants",\n "NUMOCCSNAME": "number_of_occupants_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "UNITTYPE": "unit_type",\n "UNITTYPENAME": "unit_type_name",\n "HIT_RUN": "hit_and_run",\n "HIT_RUNNAME": "hit_and_run_name",\n "REG_STAT": "registration_state",\n "REG_STATNAME": "registration_state_name",\n "OWNER": "registered_vehicle_owner",\n "OWNERNAME": "registered_vehicle_owner_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MODEL": "vehicle_model",\n "MAK_MOD": "make_model_combined",\n "MAK_MODNAME": "make_model_combined_name",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "vehicle_model_year",\n "MOD_YEARNAME": "vehicle_model_year_name",\n "VIN": "vehicle_identification_number_vin",\n "VINNAME": "vehicle_identification_number_vin_name",\n "VIN_1": "vin_character_1",\n "VIN_2": "vin_character_2",\n "VIN_3": "vin_character_3",\n "VIN_4": "vin_character_4",\n "VIN_5": "vin_character_5",\n "VIN_6": "vin_character_6",\n "VIN_7": "vin_character_7",\n "VIN_8": "vin_character_8",\n "VIN_9": "vin_character_9",\n "VIN_10": "vin_character_10",\n "VIN_11": "vin_character_11",\n "VIN_12": "vin_character_12",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "J_KNIFE": "jackknife",\n "J_KNIFENAME": "jackknife_name",\n "MCARR_I1": "mcid_issuing_authority",\n "MCARR_I1NAME": "mcid_issuing_authority_name",\n "MCARR_I2": "mcid_identification_number",\n "MCARR_I2NAME": "mcid_identification_number_name",\n "MCARR_ID": "motor_carrier_identification_number_mcid",\n "MCARR_IDNAME": "motor_carrier_identification_number_mcid_name",\n "GVWR": "gross_vehicle_weight_rating",\n "GVWRNAME": "gross_vehicle_weight_rating_name",\n "V_CONFIG": "vehicle_configuration",\n "V_CONFIGNAME": "vehicle_configuration_name",\n "CARGO_BT": "cargo_body_type",\n "CARGO_BTNAME": "cargo_body_type_name",\n "HAZ_INV": "hazardous_material_involvement",\n "HAZ_INVNAME": "hazardous_material_involvement_name",\n "HAZ_PLAC": "hazardous_material_placard",\n "HAZ_PLACNAME": "hazardous_material_placard_name",\n "HAZ_ID": "hazardous_material_identification_number",\n "HAZ_IDNAME": "hazardous_material_identification_number_name",\n "HAZ_CNO": "hazardous_material_class_number",\n "HAZ_CNONAME": "hazardous_material_class_number_name",\n "HAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "HAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "BUS_USE": "bus_use",\n "BUS_USENAME": "bus_use_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "TRAV_SP": "travel_speed",\n "TRAV_SPNAME": "travel_speed_name",\n "UNDERIDE": "underride_override",\n "UNDERIDENAME": "underride_override_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "ROLINLOC": "location_of_rollover",\n "ROLINLOCNAME": "location_of_rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "DEFORMED": "extent_of_damage",\n "DEFORMEDNAME": "extent_of_damage_name",\n "TOWED": "vehicle_removal",\n "TOWEDNAME": "vehicle_removal_name",\n "M_HARM": "most_harmful_event",\n "M_HARMNAME": "most_harmful_event_name",\n "VEH_SC1": "related_factors_vehicle_level_1",\n "VEH_SC1NAME": "related_factors_vehicle_level_1_name",\n "VEH_SC2": "related_factors_vehicle_level_2",\n "VEH_SC2NAME": "related_factors_vehicle_level_2_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "DR_PRES": "driver_presence",\n "DR_PRESNAME": "driver_presence_name",\n "L_STATE": "drivers_license_state",\n "L_STATENAME": "drivers_license_state_name",\n "DR_ZIP": "drivers_zip_code",\n "DR_ZIPNAME": "drivers_zip_code_name",\n "L_STATUS": "non_cdl_license_status",\n "L_STATUSNAME": "non_cdl_license_status_name",\n "L_TYPE": "non_cdl_license_type",\n "L_TYPENAME": "non_cdl_license_type_name",\n "CDL_STAT": "commercial_motor_vehicle_license_status",\n "CDL_STATNAME": "commercial_motor_vehicle_license_status_name",\n "L_ENDORS": "compliance_with_cdl_endorsements",\n "L_ENDORSNAME": "compliance_with_cdl_endorsements_name",\n "L_COMPL": "license_compliance_with_class_of_vehicle",\n "L_COMPLNAME": "license_compliance_with_class_of_vehicle_name",\n "L_RESTRI": "compliance_with_license_restrictions",\n "L_RESTRINAME": "compliance_with_license_restrictions_name",\n "DR_HGT": "driver_height",\n "DR_HGTNAME": "driver_height_name",\n "DR_WGT": "driver_weight",\n "DR_WGTNAME": "driver_weight_name",\n "PREV_ACC": "previous_recorded_crashes",\n "PREV_ACCNAME": "previous_recorded_crashes_name",\n "PREV_SUS": "previous_recorded_suspensions_and_revocations",\n "PREV_SUSNAME": "previous_recorded_suspensions_and_revocations_name",\n "PREV_DWI": "previous_dwi_convictions",\n "PREV_DWINAME": "previous_dwi_convictions_name",\n "PREV_SPD": "previous_speeding_convictions",\n "PREV_SPDNAME": "previous_speeding_convictions_name",\n "PREV_OTH": "previous_other_moving_violation_convictions",\n "PREV_OTHNAME": "previous_other_moving_violation_convictions_name",\n "FIRST_MO": "month_of_first_crash_suspension_or_conviction",\n "FIRST_MONAME": "month_of_first_crash_suspension_or_conviction_name",\n "FIRST_YR": "year_of_first_crash_suspension_or_conviction",\n "FIRST_YRNAME": "year_of_first_crash_suspension_or_conviction_name",\n "LAST_MO": "month_of_last_crash_suspension_or_conviction",\n "LAST_MONAME": "month_of_last_crash_suspension_or_conviction_name",\n "LAST_YR": "year_of_last_crash_suspension_or_conviction",\n "LAST_YRNAME": "year_of_last_crash_suspension_or_conviction_name",\n "SPEEDREL": "speeding_related",\n "SPEEDRELNAME": "speeding_related_name",\n "DR_SF1": "related_factors_driver_level_1",\n "DR_SF1NAME": "related_factors_driver_level_1_name",\n "DR_SF2": "related_factors_driver_level_2",\n "DR_SF2NAME": "related_factors_driver_level_2_name",\n "DR_SF3": "related_factors_driver_level_3",\n "DR_SF3NAME": "related_factors_driver_level_3_name",\n "DR_SF4": "related_factors_driver_level_4",\n "DR_SF4NAME": "related_factors_driver_level_4_name",\n "VTRAFWAY": "trafficway_description",\n "VTRAFWAYNAME": "trafficway_description_name",\n "VNUM_LAN": "total_lanes_in_roadway",\n "VNUM_LANNAME": "total_lanes_in_roadway_name",\n "VSPD_LIM": "speed_limit",\n "VSPD_LIMNAME": "speed_limit_name",\n "VALIGN": "roadway_alignment",\n "VALIGNNAME": "roadway_alignment_name",\n "VPROFILE": "roadway_grade",\n "VPROFILENAME": "roadway_grade_name",\n "VPAVETYP": "roadway_surface_type",\n "VPAVETYPNAME": "roadway_surface_type_name",\n "VSURCOND": "roadway_surface_condition",\n "VSURCONDNAME": "roadway_surface_condition_name",\n "VTRAFCON": "traffic_control_device",\n "VTRAFCONNAME": "traffic_control_device_name",\n "VTCONT_F": "traffic_control_device_functioning",\n "VTCONT_FNAME": "traffic_control_device_functioning_name",\n "P_CRASH1": "pre_event_movement_prior_to_recognition_of_critical_event",\n "P_CRASH1NAME": "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "P_CRASH2": "critical_event_precrash",\n "P_CRASH2NAME": "critical_event_precrash_name",\n "P_CRASH3": "attempted_avoidance_maneuver",\n "P_CRASH3NAME": "attempted_avoidance_maneuver_name",\n "PCRASH4": "pre_impact_stability",\n "PCRASH4NAME": "pre_impact_stability_name",\n "PCRASH5": "pre_impact_location",\n "PCRASH5NAME": "pre_impact_location_name",\n "ACC_TYPE": "crash_type",\n "ACC_TYPENAME": "crash_type_name",\n "DEATHS": "fatalities_in_vehicle",\n "DR_DRINK": "driver_drinking",\n "DR_DRINKNAME": "driver_drinking_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vehicle pipelines
vehicle_2016_2017_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vehicle_2016_2017_transform_csv",
startup_timeout_seconds=600,
name="vehicle",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vehicle.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2016_2017.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "jackknife",\n "jackknife_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number_mcid",\n "motor_carrier_identification_number_mcid_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "travel_speed",\n "travel_speed_name",\n "underride_override",\n "underride_override_name",\n "rollover",\n "rollover_name",\n "location_of_rollover",\n "location_of_rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level_1",\n "related_factors_vehicle_level_1_name",\n "related_factors_vehicle_level_2",\n "related_factors_vehicle_level_2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "driver_presence",\n "driver_presence_name",\n "drivers_license_state",\n "drivers_license_state_name",\n "drivers_zip_code",\n "drivers_zip_code_name",\n "non_cdl_license_status",\n "non_cdl_license_status_name",\n "non_cdl_license_type",\n "non_cdl_license_type_name",\n "commercial_motor_vehicle_license_status",\n "commercial_motor_vehicle_license_status_name",\n "compliance_with_cdl_endorsements",\n "compliance_with_cdl_endorsements_name",\n "license_compliance_with_class_of_vehicle",\n "license_compliance_with_class_of_vehicle_name",\n "compliance_with_license_restrictions",\n "compliance_with_license_restrictions_name",\n "driver_height",\n "driver_height_name",\n "driver_weight",\n "driver_weight_name",\n "previous_recorded_crashes",\n "previous_recorded_crashes_name",\n "previous_recorded_suspensions_and_revocations",\n "previous_recorded_suspensions_and_revocations_name",\n "previous_dwi_convictions",\n "previous_dwi_convictions_name",\n "previous_speeding_convictions",\n "previous_speeding_convictions_name",\n "previous_other_moving_violation_convictions",\n "previous_other_moving_violation_convictions_name",\n "month_of_first_crash_suspension_or_conviction",\n "month_of_first_crash_suspension_or_conviction_name",\n "year_of_first_crash_suspension_or_conviction",\n "year_of_first_crash_suspension_or_conviction_name",\n "month_of_last_crash_suspension_or_conviction",\n "month_of_last_crash_suspension_or_conviction_name",\n "year_of_last_crash_suspension_or_conviction",\n "year_of_last_crash_suspension_or_conviction_name",\n "speeding_related",\n "speeding_related_name",\n "related_factors_driver_level_1",\n "related_factors_driver_level_1_name",\n "related_factors_driver_level_2",\n "related_factors_driver_level_2_name",\n "related_factors_driver_level_3",\n "related_factors_driver_level_3_name",\n "related_factors_driver_level_4",\n "related_factors_driver_level_4_name",\n "trafficway_description",\n "trafficway_description_name",\n "total_lanes_in_roadway",\n "total_lanes_in_roadway_name",\n "speed_limit",\n "speed_limit_name",\n "roadway_alignment",\n "roadway_alignment_name",\n "roadway_grade",\n "roadway_grade_name",\n "roadway_surface_type",\n "roadway_surface_type_name",\n "roadway_surface_condition",\n "roadway_surface_condition_name",\n "traffic_control_device",\n "traffic_control_device_name",\n "traffic_control_device_functioning",\n "traffic_control_device_functioning_name",\n "pre_event_movement_prior_to_recognition_of_critical_event",\n "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "critical_event_precrash",\n "critical_event_precrash_name",\n "attempted_avoidance_maneuver",\n "attempted_avoidance_maneuver_name",\n "pre_impact_stability",\n "pre_impact_stability_name",\n "pre_impact_location",\n "pre_impact_location_name",\n "crash_type",\n "crash_type_name",\n "fatalities_in_vehicle",\n "driver_drinking",\n "driver_drinking_name",\n "trailer_vehicle_identification_number_1",\n "trailer_vehicle_identification_number_1_name",\n "trailer_vehicle_identification_number_2",\n "trailer_vehicle_identification_number_2_name",\n "trailer_vehicle_identification_number_3",\n "trailer_vehicle_identification_number_3_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "jackknife": "str",\n "jackknife_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number_mcid": "str",\n "motor_carrier_identification_number_mcid_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "travel_speed": "str",\n "travel_speed_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "location_of_rollover": "str",\n "location_of_rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level_1": "str",\n "related_factors_vehicle_level_1_name": "str",\n "related_factors_vehicle_level_2": "str",\n "related_factors_vehicle_level_2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "driver_presence": "str",\n "driver_presence_name": "str",\n "drivers_license_state": "str",\n "drivers_license_state_name": "str",\n "drivers_zip_code": "str",\n "drivers_zip_code_name": "str",\n "non_cdl_license_status": "str",\n "non_cdl_license_status_name": "str",\n "non_cdl_license_type": "str",\n "non_cdl_license_type_name": "str",\n "commercial_motor_vehicle_license_status": "str",\n "commercial_motor_vehicle_license_status_name": "str",\n "compliance_with_cdl_endorsements": "str",\n "compliance_with_cdl_endorsements_name": "str",\n "license_compliance_with_class_of_vehicle": "str",\n "license_compliance_with_class_of_vehicle_name": "str",\n "compliance_with_license_restrictions": "str",\n "compliance_with_license_restrictions_name": "str",\n "driver_height": "str",\n "driver_height_name": "str",\n "driver_weight": "str",\n "driver_weight_name": "str",\n "previous_recorded_crashes": "str",\n "previous_recorded_crashes_name": "str",\n "previous_recorded_suspensions_and_revocations": "str",\n "previous_recorded_suspensions_and_revocations_name": "str",\n "previous_dwi_convictions": "str",\n "previous_dwi_convictions_name": "str",\n "previous_speeding_convictions": "str",\n "previous_speeding_convictions_name": "str",\n "previous_other_moving_violation_convictions": "str",\n "previous_other_moving_violation_convictions_name": "str",\n "month_of_first_crash_suspension_or_conviction": "str",\n "month_of_first_crash_suspension_or_conviction_name": "str",\n "year_of_first_crash_suspension_or_conviction": "str",\n "year_of_first_crash_suspension_or_conviction_name": "str",\n "month_of_last_crash_suspension_or_conviction": "str",\n "month_of_last_crash_suspension_or_conviction_name": "str",\n "year_of_last_crash_suspension_or_conviction": "str",\n "year_of_last_crash_suspension_or_conviction_name": "str",\n "speeding_related": "str",\n "speeding_related_name": "str",\n "related_factors_driver_level_1": "str",\n "related_factors_driver_level_1_name": "str",\n "related_factors_driver_level_2": "str",\n "related_factors_driver_level_2_name": "str",\n "related_factors_driver_level_3": "str",\n "related_factors_driver_level_3_name": "str",\n "related_factors_driver_level_4": "str",\n "related_factors_driver_level_4_name": "str",\n "trafficway_description": "str",\n "trafficway_description_name": "str",\n "total_lanes_in_roadway": "str",\n "total_lanes_in_roadway_name": "str",\n "speed_limit": "str",\n "speed_limit_name": "str",\n "roadway_alignment": "str",\n "roadway_alignment_name": "str",\n "roadway_grade": "str",\n "roadway_grade_name": "str",\n "roadway_surface_type": "str",\n "roadway_surface_type_name": "str",\n "roadway_surface_condition": "str",\n "roadway_surface_condition_name": "str",\n "traffic_control_device": "str",\n "traffic_control_device_name": "str",\n "traffic_control_device_functioning": "str",\n "traffic_control_device_functioning_name": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event_name": "str",\n "critical_event_precrash": "str",\n "critical_event_precrash_name": "str",\n "attempted_avoidance_maneuver": "str",\n "attempted_avoidance_maneuver_name": "str",\n "pre_impact_stability": "str",\n "pre_impact_stability_name": "str",\n "pre_impact_location": "str",\n "pre_impact_location_name": "str",\n "crash_type": "str",\n "crash_type_name": "str",\n "fatalities_in_vehicle": "str",\n "driver_drinking": "str",\n "driver_drinking_name": "str",\n "trailer_vehicle_identification_number_1": "str",\n "trailer_vehicle_identification_number_1_name": "str",\n "trailer_vehicle_identification_number_2": "str",\n "trailer_vehicle_identification_number_2_name": "str",\n "trailer_vehicle_identification_number_3": "str",\n "trailer_vehicle_identification_number_3_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "NUMOCCS": "number_of_occupants",\n "NUMOCCSNAME": "number_of_occupants_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "UNITTYPE": "unit_type",\n "UNITTYPENAME": "unit_type_name",\n "HIT_RUN": "hit_and_run",\n "HIT_RUNNAME": "hit_and_run_name",\n "REG_STAT": "registration_state",\n "REG_STATNAME": "registration_state_name",\n "OWNER": "registered_vehicle_owner",\n "OWNERNAME": "registered_vehicle_owner_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MODEL": "vehicle_model",\n "MAK_MOD": "make_model_combined",\n "MAK_MODNAME": "make_model_combined_name",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "vehicle_model_year",\n "MOD_YEARNAME": "vehicle_model_year_name",\n "VIN": "vehicle_identification_number_vin",\n "VINNAME": "vehicle_identification_number_vin_name",\n "VIN_1": "vin_character_1",\n "VIN_2": "vin_character_2",\n "VIN_3": "vin_character_3",\n "VIN_4": "vin_character_4",\n "VIN_5": "vin_character_5",\n "VIN_6": "vin_character_6",\n "VIN_7": "vin_character_7",\n "VIN_8": "vin_character_8",\n "VIN_9": "vin_character_9",\n "VIN_10": "vin_character_10",\n "VIN_11": "vin_character_11",\n "VIN_12": "vin_character_12",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "J_KNIFE": "jackknife",\n "J_KNIFENAME": "jackknife_name",\n "MCARR_I1": "mcid_issuing_authority",\n "MCARR_I1NAME": "mcid_issuing_authority_name",\n "MCARR_I2": "mcid_identification_number",\n "MCARR_I2NAME": "mcid_identification_number_name",\n "MCARR_ID": "motor_carrier_identification_number_mcid",\n "MCARR_IDNAME": "motor_carrier_identification_number_mcid_name",\n "GVWR": "gross_vehicle_weight_rating",\n "GVWRNAME": "gross_vehicle_weight_rating_name",\n "V_CONFIG": "vehicle_configuration",\n "V_CONFIGNAME": "vehicle_configuration_name",\n "CARGO_BT": "cargo_body_type",\n "CARGO_BTNAME": "cargo_body_type_name",\n "HAZ_INV": "hazardous_material_involvement",\n "HAZ_INVNAME": "hazardous_material_involvement_name",\n "HAZ_PLAC": "hazardous_material_placard",\n "HAZ_PLACNAME": "hazardous_material_placard_name",\n "HAZ_ID": "hazardous_material_identification_number",\n "HAZ_IDNAME": "hazardous_material_identification_number_name",\n "HAZ_CNO": "hazardous_material_class_number",\n "HAZ_CNONAME": "hazardous_material_class_number_name",\n "HAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "HAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "BUS_USE": "bus_use",\n "BUS_USENAME": "bus_use_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "TRAV_SP": "travel_speed",\n "TRAV_SPNAME": "travel_speed_name",\n "UNDERIDE": "underride_override",\n "UNDERIDENAME": "underride_override_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "ROLINLOC": "location_of_rollover",\n "ROLINLOCNAME": "location_of_rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "DEFORMED": "extent_of_damage",\n "DEFORMEDNAME": "extent_of_damage_name",\n "TOWED": "vehicle_removal",\n "TOWEDNAME": "vehicle_removal_name",\n "M_HARM": "most_harmful_event",\n "M_HARMNAME": "most_harmful_event_name",\n "VEH_SC1": "related_factors_vehicle_level_1",\n "VEH_SC1NAME": "related_factors_vehicle_level_1_name",\n "VEH_SC2": "related_factors_vehicle_level_2",\n "VEH_SC2NAME": "related_factors_vehicle_level_2_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "DR_PRES": "driver_presence",\n "DR_PRESNAME": "driver_presence_name",\n "L_STATE": "drivers_license_state",\n "L_STATENAME": "drivers_license_state_name",\n "DR_ZIP": "drivers_zip_code",\n "DR_ZIPNAME": "drivers_zip_code_name",\n "L_STATUS": "non_cdl_license_status",\n "L_STATUSNAME": "non_cdl_license_status_name",\n "L_TYPE": "non_cdl_license_type",\n "L_TYPENAME": "non_cdl_license_type_name",\n "CDL_STAT": "commercial_motor_vehicle_license_status",\n "CDL_STATNAME": "commercial_motor_vehicle_license_status_name",\n "L_ENDORS": "compliance_with_cdl_endorsements",\n "L_ENDORSNAME": "compliance_with_cdl_endorsements_name",\n "L_COMPL": "license_compliance_with_class_of_vehicle",\n "L_COMPLNAME": "license_compliance_with_class_of_vehicle_name",\n "L_RESTRI": "compliance_with_license_restrictions",\n "L_RESTRINAME": "compliance_with_license_restrictions_name",\n "DR_HGT": "driver_height",\n "DR_HGTNAME": "driver_height_name",\n "DR_WGT": "driver_weight",\n "DR_WGTNAME": "driver_weight_name",\n "PREV_ACC": "previous_recorded_crashes",\n "PREV_ACCNAME": "previous_recorded_crashes_name",\n "PREV_SUS": "previous_recorded_suspensions_and_revocations",\n "PREV_SUSNAME": "previous_recorded_suspensions_and_revocations_name",\n "PREV_DWI": "previous_dwi_convictions",\n "PREV_DWINAME": "previous_dwi_convictions_name",\n "PREV_SPD": "previous_speeding_convictions",\n "PREV_SPDNAME": "previous_speeding_convictions_name",\n "PREV_OTH": "previous_other_moving_violation_convictions",\n "PREV_OTHNAME": "previous_other_moving_violation_convictions_name",\n "FIRST_MO": "month_of_first_crash_suspension_or_conviction",\n "FIRST_MONAME": "month_of_first_crash_suspension_or_conviction_name",\n "FIRST_YR": "year_of_first_crash_suspension_or_conviction",\n "FIRST_YRNAME": "year_of_first_crash_suspension_or_conviction_name",\n "LAST_MO": "month_of_last_crash_suspension_or_conviction",\n "LAST_MONAME": "month_of_last_crash_suspension_or_conviction_name",\n "LAST_YR": "year_of_last_crash_suspension_or_conviction",\n "LAST_YRNAME": "year_of_last_crash_suspension_or_conviction_name",\n "SPEEDREL": "speeding_related",\n "SPEEDRELNAME": "speeding_related_name",\n "DR_SF1": "related_factors_driver_level_1",\n "DR_SF1NAME": "related_factors_driver_level_1_name",\n "DR_SF2": "related_factors_driver_level_2",\n "DR_SF2NAME": "related_factors_driver_level_2_name",\n "DR_SF3": "related_factors_driver_level_3",\n "DR_SF3NAME": "related_factors_driver_level_3_name",\n "DR_SF4": "related_factors_driver_level_4",\n "DR_SF4NAME": "related_factors_driver_level_4_name",\n "VTRAFWAY": "trafficway_description",\n "VTRAFWAYNAME": "trafficway_description_name",\n "VNUM_LAN": "total_lanes_in_roadway",\n "VNUM_LANNAME": "total_lanes_in_roadway_name",\n "VSPD_LIM": "speed_limit",\n "VSPD_LIMNAME": "speed_limit_name",\n "VALIGN": "roadway_alignment",\n "VALIGNNAME": "roadway_alignment_name",\n "VPROFILE": "roadway_grade",\n "VPROFILENAME": "roadway_grade_name",\n "VPAVETYP": "roadway_surface_type",\n "VPAVETYPNAME": "roadway_surface_type_name",\n "VSURCOND": "roadway_surface_condition",\n "VSURCONDNAME": "roadway_surface_condition_name",\n "VTRAFCON": "traffic_control_device",\n "VTRAFCONNAME": "traffic_control_device_name",\n "VTCONT_F": "traffic_control_device_functioning",\n "VTCONT_FNAME": "traffic_control_device_functioning_name",\n "P_CRASH1": "pre_event_movement_prior_to_recognition_of_critical_event",\n "P_CRASH1NAME": "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "P_CRASH2": "critical_event_precrash",\n "P_CRASH2NAME": "critical_event_precrash_name",\n "P_CRASH3": "attempted_avoidance_maneuver",\n "P_CRASH3NAME": "attempted_avoidance_maneuver_name",\n "PCRASH4": "pre_impact_stability",\n "PCRASH4NAME": "pre_impact_stability_name",\n "PCRASH5": "pre_impact_location",\n "PCRASH5NAME": "pre_impact_location_name",\n "ACC_TYPE": "crash_type",\n "ACC_TYPENAME": "crash_type_name",\n "DEATHS": "fatalities_in_vehicle",\n "DR_DRINK": "driver_drinking",\n "DR_DRINKNAME": "driver_drinking_name",\n "TRLR1VIN": "trailer_vehicle_identification_number_1",\n "TRLR1VINNAME": "trailer_vehicle_identification_number_1_name",\n "TRLR2VIN": "trailer_vehicle_identification_number_2",\n "TRLR2VINNAME": "trailer_vehicle_identification_number_2_name",\n "TRLR3VIN": "trailer_vehicle_identification_number_3",\n "TRLR3VINNAME": "trailer_vehicle_identification_number_3_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vehicle pipelines
vehicle_2018_2019_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vehicle_2018_2019_transform_csv",
startup_timeout_seconds=600,
name="vehicle",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vehicle.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2018_2019.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "jackknife",\n "jackknife_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number_mcid",\n "motor_carrier_identification_number_mcid_name",\n "gross_vehicle_weight_rating",\n "gross_vehicle_weight_rating_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "travel_speed",\n "travel_speed_name",\n "underride_override",\n "underride_override_name",\n "rollover",\n "rollover_name",\n "location_of_rollover",\n "location_of_rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "related_factors_vehicle_level_1",\n "related_factors_vehicle_level_1_name",\n "related_factors_vehicle_level_2",\n "related_factors_vehicle_level_2_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "driver_presence",\n "driver_presence_name",\n "drivers_license_state",\n "drivers_license_state_name",\n "drivers_zip_code",\n "drivers_zip_code_name",\n "non_cdl_license_status",\n "non_cdl_license_status_name",\n "non_cdl_license_type",\n "non_cdl_license_type_name",\n "commercial_motor_vehicle_license_status",\n "commercial_motor_vehicle_license_status_name",\n "compliance_with_cdl_endorsements",\n "compliance_with_cdl_endorsements_name",\n "license_compliance_with_class_of_vehicle",\n "license_compliance_with_class_of_vehicle_name",\n "compliance_with_license_restrictions",\n "compliance_with_license_restrictions_name",\n "driver_height",\n "driver_height_name",\n "driver_weight",\n "driver_weight_name",\n "previous_recorded_crashes",\n "previous_recorded_crashes_name",\n "previous_recorded_suspensions_and_revocations1",\n "previous_recorded_suspensions_and_revocations1_name",\n "previous_recorded_suspensions_and_revocations2",\n "previous_recorded_suspensions_and_revocations2_name",\n "previous_recorded_suspensions_and_revocations3",\n "previous_recorded_suspensions_and_revocations3_name",\n "previous_dwi_convictions",\n "previous_dwi_convictions_name",\n "previous_speeding_convictions",\n "previous_speeding_convictions_name",\n "previous_other_moving_violation_convictions",\n "previous_other_moving_violation_convictions_name",\n "month_of_first_crash_suspension_or_conviction",\n "month_of_first_crash_suspension_or_conviction_name",\n "year_of_first_crash_suspension_or_conviction",\n "year_of_first_crash_suspension_or_conviction_name",\n "month_of_last_crash_suspension_or_conviction",\n "month_of_last_crash_suspension_or_conviction_name",\n "year_of_last_crash_suspension_or_conviction",\n "year_of_last_crash_suspension_or_conviction_name",\n "speeding_related",\n "speeding_related_name",\n "related_factors_driver_level_1",\n "related_factors_driver_level_1_name",\n "related_factors_driver_level_2",\n "related_factors_driver_level_2_name",\n "related_factors_driver_level_3",\n "related_factors_driver_level_3_name",\n "related_factors_driver_level_4",\n "related_factors_driver_level_4_name",\n "trafficway_description",\n "trafficway_description_name",\n "total_lanes_in_roadway",\n "total_lanes_in_roadway_name",\n "speed_limit",\n "speed_limit_name",\n "roadway_alignment",\n "roadway_alignment_name",\n "roadway_grade",\n "roadway_grade_name",\n "roadway_surface_type",\n "roadway_surface_type_name",\n "roadway_surface_condition",\n "roadway_surface_condition_name",\n "traffic_control_device",\n "traffic_control_device_name",\n "traffic_control_device_functioning",\n "traffic_control_device_functioning_name",\n "pre_event_movement_prior_to_recognition_of_critical_event",\n "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "critical_event_precrash",\n "critical_event_precrash_name",\n "attempted_avoidance_maneuver",\n "attempted_avoidance_maneuver_name",\n "pre_impact_stability",\n "pre_impact_stability_name",\n "pre_impact_location",\n "pre_impact_location_name",\n "crash_type",\n "crash_type_name",\n "fatalities_in_vehicle",\n "driver_drinking",\n "driver_drinking_name",\n "trailer_vehicle_identification_number_1",\n "trailer_vehicle_identification_number_1_name",\n "trailer_vehicle_identification_number_2",\n "trailer_vehicle_identification_number_2_name",\n "trailer_vehicle_identification_number_3",\n "trailer_vehicle_identification_number_3_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "jackknife": "str",\n "jackknife_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number_mcid": "str",\n "motor_carrier_identification_number_mcid_name": "str",\n "gross_vehicle_weight_rating": "str",\n "gross_vehicle_weight_rating_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "travel_speed": "str",\n "travel_speed_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "location_of_rollover": "str",\n "location_of_rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "related_factors_vehicle_level_1": "str",\n "related_factors_vehicle_level_1_name": "str",\n "related_factors_vehicle_level_2": "str",\n "related_factors_vehicle_level_2_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "driver_presence": "str",\n "driver_presence_name": "str",\n "drivers_license_state": "str",\n "drivers_license_state_name": "str",\n "drivers_zip_code": "str",\n "drivers_zip_code_name": "str",\n "non_cdl_license_status": "str",\n "non_cdl_license_status_name": "str",\n "non_cdl_license_type": "str",\n "non_cdl_license_type_name": "str",\n "commercial_motor_vehicle_license_status": "str",\n "commercial_motor_vehicle_license_status_name": "str",\n "compliance_with_cdl_endorsements": "str",\n "compliance_with_cdl_endorsements_name": "str",\n "license_compliance_with_class_of_vehicle": "str",\n "license_compliance_with_class_of_vehicle_name": "str",\n "compliance_with_license_restrictions": "str",\n "compliance_with_license_restrictions_name": "str",\n "driver_height": "str",\n "driver_height_name": "str",\n "driver_weight": "str",\n "driver_weight_name": "str",\n "previous_recorded_crashes": "str",\n "previous_recorded_crashes_name": "str",\n "previous_recorded_suspensions_and_revocations1": "str",\n "previous_recorded_suspensions_and_revocations1_name": "str",\n "previous_recorded_suspensions_and_revocations2": "str",\n "previous_recorded_suspensions_and_revocations2_name": "str",\n "previous_recorded_suspensions_and_revocations3": "str",\n "previous_recorded_suspensions_and_revocations3_name": "str",\n "previous_dwi_convictions": "str",\n "previous_dwi_convictions_name": "str",\n "previous_speeding_convictions": "str",\n "previous_speeding_convictions_name": "str",\n "previous_other_moving_violation_convictions": "str",\n "previous_other_moving_violation_convictions_name": "str",\n "month_of_first_crash_suspension_or_conviction": "str",\n "month_of_first_crash_suspension_or_conviction_name": "str",\n "year_of_first_crash_suspension_or_conviction": "str",\n "year_of_first_crash_suspension_or_conviction_name": "str",\n "month_of_last_crash_suspension_or_conviction": "str",\n "month_of_last_crash_suspension_or_conviction_name": "str",\n "year_of_last_crash_suspension_or_conviction": "str",\n "year_of_last_crash_suspension_or_conviction_name": "str",\n "speeding_related": "str",\n "speeding_related_name": "str",\n "related_factors_driver_level_1": "str",\n "related_factors_driver_level_1_name": "str",\n "related_factors_driver_level_2": "str",\n "related_factors_driver_level_2_name": "str",\n "related_factors_driver_level_3": "str",\n "related_factors_driver_level_3_name": "str",\n "related_factors_driver_level_4": "str",\n "related_factors_driver_level_4_name": "str",\n "trafficway_description": "str",\n "trafficway_description_name": "str",\n "total_lanes_in_roadway": "str",\n "total_lanes_in_roadway_name": "str",\n "speed_limit": "str",\n "speed_limit_name": "str",\n "roadway_alignment": "str",\n "roadway_alignment_name": "str",\n "roadway_grade": "str",\n "roadway_grade_name": "str",\n "roadway_surface_type": "str",\n "roadway_surface_type_name": "str",\n "roadway_surface_condition": "str",\n "roadway_surface_condition_name": "str",\n "traffic_control_device": "str",\n "traffic_control_device_name": "str",\n "traffic_control_device_functioning": "str",\n "traffic_control_device_functioning_name": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event_name": "str",\n "critical_event_precrash": "str",\n "critical_event_precrash_name": "str",\n "attempted_avoidance_maneuver": "str",\n "attempted_avoidance_maneuver_name": "str",\n "pre_impact_stability": "str",\n "pre_impact_stability_name": "str",\n "pre_impact_location": "str",\n "pre_impact_location_name": "str",\n "crash_type": "str",\n "crash_type_name": "str",\n "fatalities_in_vehicle": "str",\n "driver_drinking": "str",\n "driver_drinking_name": "str",\n "trailer_vehicle_identification_number_1": "str",\n "trailer_vehicle_identification_number_1_name": "str",\n "trailer_vehicle_identification_number_2": "str",\n "trailer_vehicle_identification_number_2_name": "str",\n "trailer_vehicle_identification_number_3": "str",\n "trailer_vehicle_identification_number_3_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "NUMOCCS": "number_of_occupants",\n "NUMOCCSNAME": "number_of_occupants_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "UNITTYPE": "unit_type",\n "UNITTYPENAME": "unit_type_name",\n "HIT_RUN": "hit_and_run",\n "HIT_RUNNAME": "hit_and_run_name",\n "REG_STAT": "registration_state",\n "REG_STATNAME": "registration_state_name",\n "OWNER": "registered_vehicle_owner",\n "OWNERNAME": "registered_vehicle_owner_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MODEL": "vehicle_model",\n "MAK_MOD": "make_model_combined",\n "MAK_MODNAME": "make_model_combined_name",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "vehicle_model_year",\n "MOD_YEARNAME": "vehicle_model_year_name",\n "VIN": "vehicle_identification_number_vin",\n "VINNAME": "vehicle_identification_number_vin_name",\n "VIN_1": "vin_character_1",\n "VIN_2": "vin_character_2",\n "VIN_3": "vin_character_3",\n "VIN_4": "vin_character_4",\n "VIN_5": "vin_character_5",\n "VIN_6": "vin_character_6",\n "VIN_7": "vin_character_7",\n "VIN_8": "vin_character_8",\n "VIN_9": "vin_character_9",\n "VIN_10": "vin_character_10",\n "VIN_11": "vin_character_11",\n "VIN_12": "vin_character_12",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "J_KNIFE": "jackknife",\n "J_KNIFENAME": "jackknife_name",\n "MCARR_I1": "mcid_issuing_authority",\n "MCARR_I1NAME": "mcid_issuing_authority_name",\n "MCARR_I2": "mcid_identification_number",\n "MCARR_I2NAME": "mcid_identification_number_name",\n "MCARR_ID": "motor_carrier_identification_number_mcid",\n "MCARR_IDNAME": "motor_carrier_identification_number_mcid_name",\n "GVWR": "gross_vehicle_weight_rating",\n "GVWRNAME": "gross_vehicle_weight_rating_name",\n "V_CONFIG": "vehicle_configuration",\n "V_CONFIGNAME": "vehicle_configuration_name",\n "CARGO_BT": "cargo_body_type",\n "CARGO_BTNAME": "cargo_body_type_name",\n "HAZ_INV": "hazardous_material_involvement",\n "HAZ_INVNAME": "hazardous_material_involvement_name",\n "HAZ_PLAC": "hazardous_material_placard",\n "HAZ_PLACNAME": "hazardous_material_placard_name",\n "HAZ_ID": "hazardous_material_identification_number",\n "HAZ_IDNAME": "hazardous_material_identification_number_name",\n "HAZ_CNO": "hazardous_material_class_number",\n "HAZ_CNONAME": "hazardous_material_class_number_name",\n "HAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "HAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "BUS_USE": "bus_use",\n "BUS_USENAME": "bus_use_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "TRAV_SP": "travel_speed",\n "TRAV_SPNAME": "travel_speed_name",\n "UNDERIDE": "underride_override",\n "UNDERIDENAME": "underride_override_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "ROLINLOC": "location_of_rollover",\n "ROLINLOCNAME": "location_of_rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "DEFORMED": "extent_of_damage",\n "DEFORMEDNAME": "extent_of_damage_name",\n "TOWED": "vehicle_removal",\n "TOWEDNAME": "vehicle_removal_name",\n "M_HARM": "most_harmful_event",\n "M_HARMNAME": "most_harmful_event_name",\n "VEH_SC1": "related_factors_vehicle_level_1",\n "VEH_SC1NAME": "related_factors_vehicle_level_1_name",\n "VEH_SC2": "related_factors_vehicle_level_2",\n "VEH_SC2NAME": "related_factors_vehicle_level_2_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "DR_PRES": "driver_presence",\n "DR_PRESNAME": "driver_presence_name",\n "L_STATE": "drivers_license_state",\n "L_STATENAME": "drivers_license_state_name",\n "DR_ZIP": "drivers_zip_code",\n "DR_ZIPNAME": "drivers_zip_code_name",\n "L_STATUS": "non_cdl_license_status",\n "L_STATUSNAME": "non_cdl_license_status_name",\n "L_TYPE": "non_cdl_license_type",\n "L_TYPENAME": "non_cdl_license_type_name",\n "CDL_STAT": "commercial_motor_vehicle_license_status",\n "CDL_STATNAME": "commercial_motor_vehicle_license_status_name",\n "L_ENDORS": "compliance_with_cdl_endorsements",\n "L_ENDORSNAME": "compliance_with_cdl_endorsements_name",\n "L_COMPL": "license_compliance_with_class_of_vehicle",\n "L_COMPLNAME": "license_compliance_with_class_of_vehicle_name",\n "L_RESTRI": "compliance_with_license_restrictions",\n "L_RESTRINAME": "compliance_with_license_restrictions_name",\n "DR_HGT": "driver_height",\n "DR_HGTNAME": "driver_height_name",\n "DR_WGT": "driver_weight",\n "DR_WGTNAME": "driver_weight_name",\n "PREV_ACC": "previous_recorded_crashes",\n "PREV_ACCNAME": "previous_recorded_crashes_name",\n "PREV_SUS1": "previous_recorded_suspensions_and_revocations1",\n "PREV_SUS1NAME": "previous_recorded_suspensions_and_revocations1_name",\n "PREV_SUS2": "previous_recorded_suspensions_and_revocations2",\n "PREV_SUS2NAME": "previous_recorded_suspensions_and_revocations2_name",\n "PREV_SUS3": "previous_recorded_suspensions_and_revocations3",\n "PREV_SUS3NAME": "previous_recorded_suspensions_and_revocations3_name",\n "PREV_DWI": "previous_dwi_convictions",\n "PREV_DWINAME": "previous_dwi_convictions_name",\n "PREV_SPD": "previous_speeding_convictions",\n "PREV_SPDNAME": "previous_speeding_convictions_name",\n "PREV_OTH": "previous_other_moving_violation_convictions",\n "PREV_OTHNAME": "previous_other_moving_violation_convictions_name",\n "FIRST_MO": "month_of_first_crash_suspension_or_conviction",\n "FIRST_MONAME": "month_of_first_crash_suspension_or_conviction_name",\n "FIRST_YR": "year_of_first_crash_suspension_or_conviction",\n "FIRST_YRNAME": "year_of_first_crash_suspension_or_conviction_name",\n "LAST_MO": "month_of_last_crash_suspension_or_conviction",\n "LAST_MONAME": "month_of_last_crash_suspension_or_conviction_name",\n "LAST_YR": "year_of_last_crash_suspension_or_conviction",\n "LAST_YRNAME": "year_of_last_crash_suspension_or_conviction_name",\n "SPEEDREL": "speeding_related",\n "SPEEDRELNAME": "speeding_related_name",\n "DR_SF1": "related_factors_driver_level_1",\n "DR_SF1NAME": "related_factors_driver_level_1_name",\n "DR_SF2": "related_factors_driver_level_2",\n "DR_SF2NAME": "related_factors_driver_level_2_name",\n "DR_SF3": "related_factors_driver_level_3",\n "DR_SF3NAME": "related_factors_driver_level_3_name",\n "DR_SF4": "related_factors_driver_level_4",\n "DR_SF4NAME": "related_factors_driver_level_4_name",\n "VTRAFWAY": "trafficway_description",\n "VTRAFWAYNAME": "trafficway_description_name",\n "VNUM_LAN": "total_lanes_in_roadway",\n "VNUM_LANNAME": "total_lanes_in_roadway_name",\n "VSPD_LIM": "speed_limit",\n "VSPD_LIMNAME": "speed_limit_name",\n "VALIGN": "roadway_alignment",\n "VALIGNNAME": "roadway_alignment_name",\n "VPROFILE": "roadway_grade",\n "VPROFILENAME": "roadway_grade_name",\n "VPAVETYP": "roadway_surface_type",\n "VPAVETYPNAME": "roadway_surface_type_name",\n "VSURCOND": "roadway_surface_condition",\n "VSURCONDNAME": "roadway_surface_condition_name",\n "VTRAFCON": "traffic_control_device",\n "VTRAFCONNAME": "traffic_control_device_name",\n "VTCONT_F": "traffic_control_device_functioning",\n "VTCONT_FNAME": "traffic_control_device_functioning_name",\n "P_CRASH1": "pre_event_movement_prior_to_recognition_of_critical_event",\n "P_CRASH1NAME": "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "P_CRASH2": "critical_event_precrash",\n "P_CRASH2NAME": "critical_event_precrash_name",\n "P_CRASH3": "attempted_avoidance_maneuver",\n "P_CRASH3NAME": "attempted_avoidance_maneuver_name",\n "PCRASH4": "pre_impact_stability",\n "PCRASH4NAME": "pre_impact_stability_name",\n "PCRASH5": "pre_impact_location",\n "PCRASH5NAME": "pre_impact_location_name",\n "ACC_TYPE": "crash_type",\n "ACC_TYPENAME": "crash_type_name",\n "DEATHS": "fatalities_in_vehicle",\n "DR_DRINK": "driver_drinking",\n "DR_DRINKNAME": "driver_drinking_name",\n "TRLR1VIN": "trailer_vehicle_identification_number_1",\n "TRLR1VINNAME": "trailer_vehicle_identification_number_1_name",\n "TRLR2VIN": "trailer_vehicle_identification_number_2",\n "TRLR2VINNAME": "trailer_vehicle_identification_number_2_name",\n "TRLR3VIN": "trailer_vehicle_identification_number_3",\n "TRLR3VINNAME": "trailer_vehicle_identification_number_3_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vehicle pipelines
vehicle_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vehicle_2020_transform_csv",
startup_timeout_seconds=600,
name="vehicle",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vehicle.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vehicle_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "number_of_motor_vehicles_in_transport_mvit",\n "number_of_occupants",\n "number_of_occupants_name",\n "day_of_crash",\n "day_of_crash_name",\n "month_of_crash",\n "month_of_crash_name",\n "hour_of_crash",\n "hour_of_crash_name",\n "minute_of_crash",\n "minute_of_crash_name",\n "first_harmful_event",\n "first_harmful_event_name",\n "manner_of_collision",\n "manner_of_collision_name",\n "unit_type",\n "unit_type_name",\n "hit_and_run",\n "hit_and_run_name",\n "registration_state",\n "registration_state_name",\n "registered_vehicle_owner",\n "registered_vehicle_owner_name",\n "vehicle_make",\n "vehicle_make_name",\n "vehicle_model",\n "make_model_combined",\n "make_model_combined_name",\n "body_type",\n "body_type_name",\n "vehicle_model_year",\n "vehicle_model_year_name",\n "vehicle_identification_number_vin",\n "vehicle_identification_number_vin_name",\n "vin_character_1",\n "vin_character_2",\n "vin_character_3",\n "vin_character_4",\n "vin_character_5",\n "vin_character_6",\n "vin_character_7",\n "vin_character_8",\n "vin_character_9",\n "vin_character_10",\n "vin_character_11",\n "vin_character_12",\n "vehicle_trailing",\n "vehicle_trailing_name",\n "jackknife",\n "jackknife_name",\n "mcid_issuing_authority",\n "mcid_issuing_authority_name",\n "mcid_identification_number",\n "mcid_identification_number_name",\n "motor_carrier_identification_number_mcid",\n "motor_carrier_identification_number_mcid_name",\n "vehicle_configuration",\n "vehicle_configuration_name",\n "cargo_body_type",\n "cargo_body_type_name",\n "hazardous_material_involvement",\n "hazardous_material_involvement_name",\n "hazardous_material_placard",\n "hazardous_material_placard_name",\n "hazardous_material_identification_number",\n "hazardous_material_identification_number_name",\n "hazardous_material_class_number",\n "hazardous_material_class_number_name",\n "release_of_hazardous_material_from_the_cargo_compartment",\n "release_of_hazardous_material_from_the_cargo_compartment_name",\n "bus_use",\n "bus_use_name",\n "special_use",\n "special_use_name",\n "emergency_motor_vehicle_use",\n "emergency_motor_vehicle_use_name",\n "travel_speed",\n "travel_speed_name",\n "underride_override",\n "underride_override_name",\n "rollover",\n "rollover_name",\n "location_of_rollover",\n "location_of_rollover_name",\n "initial_contact_point",\n "initial_contact_point_name",\n "extent_of_damage",\n "extent_of_damage_name",\n "vehicle_removal",\n "vehicle_removal_name",\n "most_harmful_event",\n "most_harmful_event_name",\n "fire_occurrence",\n "fire_occurrence_name",\n "driver_presence",\n "driver_presence_name",\n "drivers_license_state",\n "drivers_license_state_name",\n "drivers_zip_code",\n "drivers_zip_code_name",\n "non_cdl_license_status",\n "non_cdl_license_status_name",\n "non_cdl_license_type",\n "non_cdl_license_type_name",\n "commercial_motor_vehicle_license_status",\n "commercial_motor_vehicle_license_status_name",\n "compliance_with_cdl_endorsements",\n "compliance_with_cdl_endorsements_name",\n "license_compliance_with_class_of_vehicle",\n "license_compliance_with_class_of_vehicle_name",\n "compliance_with_license_restrictions",\n "compliance_with_license_restrictions_name",\n "driver_height",\n "driver_height_name",\n "driver_weight",\n "driver_weight_name",\n "previous_recorded_crashes",\n "previous_recorded_crashes_name",\n "previous_recorded_suspensions_and_revocations1",\n "previous_recorded_suspensions_and_revocations1_name",\n "previous_recorded_suspensions_and_revocations2",\n "previous_recorded_suspensions_and_revocations2_name",\n "previous_recorded_suspensions_and_revocations3",\n "previous_recorded_suspensions_and_revocations3_name",\n "previous_dwi_convictions",\n "previous_dwi_convictions_name",\n "previous_speeding_convictions",\n "previous_speeding_convictions_name",\n "previous_other_moving_violation_convictions",\n "previous_other_moving_violation_convictions_name",\n "month_of_first_crash_suspension_or_conviction",\n "month_of_first_crash_suspension_or_conviction_name",\n "year_of_first_crash_suspension_or_conviction",\n "year_of_first_crash_suspension_or_conviction_name",\n "month_of_last_crash_suspension_or_conviction",\n "month_of_last_crash_suspension_or_conviction_name",\n "year_of_last_crash_suspension_or_conviction",\n "year_of_last_crash_suspension_or_conviction_name",\n "speeding_related",\n "speeding_related_name",\n "trafficway_description",\n "trafficway_description_name",\n "total_lanes_in_roadway",\n "total_lanes_in_roadway_name",\n "speed_limit",\n "speed_limit_name",\n "roadway_alignment",\n "roadway_alignment_name",\n "roadway_grade",\n "roadway_grade_name",\n "roadway_surface_type",\n "roadway_surface_type_name",\n "roadway_surface_condition",\n "roadway_surface_condition_name",\n "traffic_control_device",\n "traffic_control_device_name",\n "traffic_control_device_functioning",\n "traffic_control_device_functioning_name",\n "pre_event_movement_prior_to_recognition_of_critical_event",\n "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "critical_event_precrash",\n "critical_event_precrash_name",\n "attempted_avoidance_maneuver",\n "attempted_avoidance_maneuver_name",\n "pre_impact_stability",\n "pre_impact_stability_name",\n "pre_impact_location",\n "pre_impact_location_name",\n "crash_type",\n "crash_type_name",\n "fatalities_in_vehicle",\n "driver_drinking",\n "driver_drinking_name",\n "trailer_vehicle_identification_number_1",\n "trailer_vehicle_identification_number_1_name",\n "trailer_vehicle_identification_number_2",\n "trailer_vehicle_identification_number_2_name",\n "trailer_vehicle_identification_number_3",\n "trailer_vehicle_identification_number_3_name",\n "vpic_make",\n "vpic_make_name",\n "vpic_model",\n "vpic_model_name",\n "vpic_body_class",\n "vpic_body_class_name",\n "final_stage_body_class",\n "final_stage_body_class_name",\n "power_unit_gross_vehicle_weight_rating_from",\n "power_unit_gross_vehicle_weight_rating_from_name",\n "power_unit_gross_vehicle_weight_rating_to",\n "power_unit_gross_vehicle_weight_rating_to_name",\n "trailer_gross_vehicle_weight_rating_1",\n "trailer_gross_vehicle_weight_rating_1_name",\n "trailer_gross_vehicle_weight_rating_2",\n "trailer_gross_vehicle_weight_rating_2_name",\n "trailer_gross_vehicle_weight_rating_3",\n "trailer_gross_vehicle_weight_rating_3_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "number_of_motor_vehicles_in_transport_mvit": "str",\n "number_of_occupants": "str",\n "number_of_occupants_name": "str",\n "day_of_crash": "str",\n "day_of_crash_name": "str",\n "month_of_crash": "str",\n "month_of_crash_name": "str",\n "hour_of_crash": "str",\n "hour_of_crash_name": "str",\n "minute_of_crash": "str",\n "minute_of_crash_name": "str",\n "first_harmful_event": "str",\n "first_harmful_event_name": "str",\n "manner_of_collision": "str",\n "manner_of_collision_name": "str",\n "unit_type": "str",\n "unit_type_name": "str",\n "hit_and_run": "str",\n "hit_and_run_name": "str",\n "registration_state": "str",\n "registration_state_name": "str",\n "registered_vehicle_owner": "str",\n "registered_vehicle_owner_name": "str",\n "vehicle_make": "str",\n "vehicle_make_name": "str",\n "vehicle_model": "str",\n "make_model_combined": "str",\n "make_model_combined_name": "str",\n "body_type": "str",\n "body_type_name": "str",\n "vehicle_model_year": "str",\n "vehicle_model_year_name": "str",\n "vehicle_identification_number_vin": "str",\n "vehicle_identification_number_vin_name": "str",\n "vin_character_1": "str",\n "vin_character_2": "str",\n "vin_character_3": "str",\n "vin_character_4": "str",\n "vin_character_5": "str",\n "vin_character_6": "str",\n "vin_character_7": "str",\n "vin_character_8": "str",\n "vin_character_9": "str",\n "vin_character_10": "str",\n "vin_character_11": "str",\n "vin_character_12": "str",\n "vehicle_trailing": "str",\n "vehicle_trailing_name": "str",\n "jackknife": "str",\n "jackknife_name": "str",\n "mcid_issuing_authority": "str",\n "mcid_issuing_authority_name": "str",\n "mcid_identification_number": "str",\n "mcid_identification_number_name": "str",\n "motor_carrier_identification_number_mcid": "str",\n "motor_carrier_identification_number_mcid_name": "str",\n "vehicle_configuration": "str",\n "vehicle_configuration_name": "str",\n "cargo_body_type": "str",\n "cargo_body_type_name": "str",\n "hazardous_material_involvement": "str",\n "hazardous_material_involvement_name": "str",\n "hazardous_material_placard": "str",\n "hazardous_material_placard_name": "str",\n "hazardous_material_identification_number": "str",\n "hazardous_material_identification_number_name": "str",\n "hazardous_material_class_number": "str",\n "hazardous_material_class_number_name": "str",\n "release_of_hazardous_material_from_the_cargo_compartment": "str",\n "release_of_hazardous_material_from_the_cargo_compartment_name": "str",\n "bus_use": "str",\n "bus_use_name": "str",\n "special_use": "str",\n "special_use_name": "str",\n "emergency_motor_vehicle_use": "str",\n "emergency_motor_vehicle_use_name": "str",\n "travel_speed": "str",\n "travel_speed_name": "str",\n "underride_override": "str",\n "underride_override_name": "str",\n "rollover": "str",\n "rollover_name": "str",\n "location_of_rollover": "str",\n "location_of_rollover_name": "str",\n "initial_contact_point": "str",\n "initial_contact_point_name": "str",\n "extent_of_damage": "str",\n "extent_of_damage_name": "str",\n "vehicle_removal": "str",\n "vehicle_removal_name": "str",\n "most_harmful_event": "str",\n "most_harmful_event_name": "str",\n "fire_occurrence": "str",\n "fire_occurrence_name": "str",\n "driver_presence": "str",\n "driver_presence_name": "str",\n "drivers_license_state": "str",\n "drivers_license_state_name": "str",\n "drivers_zip_code": "str",\n "drivers_zip_code_name": "str",\n "non_cdl_license_status": "str",\n "non_cdl_license_status_name": "str",\n "non_cdl_license_type": "str",\n "non_cdl_license_type_name": "str",\n "commercial_motor_vehicle_license_status": "str",\n "commercial_motor_vehicle_license_status_name": "str",\n "compliance_with_cdl_endorsements": "str",\n "compliance_with_cdl_endorsements_name": "str",\n "license_compliance_with_class_of_vehicle": "str",\n "license_compliance_with_class_of_vehicle_name": "str",\n "compliance_with_license_restrictions": "str",\n "compliance_with_license_restrictions_name": "str",\n "driver_height": "str",\n "driver_height_name": "str",\n "driver_weight": "str",\n "driver_weight_name": "str",\n "previous_recorded_crashes": "str",\n "previous_recorded_crashes_name": "str",\n "previous_recorded_suspensions_and_revocations1": "str",\n "previous_recorded_suspensions_and_revocations1_name": "str",\n "previous_recorded_suspensions_and_revocations2": "str",\n "previous_recorded_suspensions_and_revocations2_name": "str",\n "previous_recorded_suspensions_and_revocations3": "str",\n "previous_recorded_suspensions_and_revocations3_name": "str",\n "previous_dwi_convictions": "str",\n "previous_dwi_convictions_name": "str",\n "previous_speeding_convictions": "str",\n "previous_speeding_convictions_name": "str",\n "previous_other_moving_violation_convictions": "str",\n "previous_other_moving_violation_convictions_name": "str",\n "month_of_first_crash_suspension_or_conviction": "str",\n "month_of_first_crash_suspension_or_conviction_name": "str",\n "year_of_first_crash_suspension_or_conviction": "str",\n "year_of_first_crash_suspension_or_conviction_name": "str",\n "month_of_last_crash_suspension_or_conviction": "str",\n "month_of_last_crash_suspension_or_conviction_name": "str",\n "year_of_last_crash_suspension_or_conviction": "str",\n "year_of_last_crash_suspension_or_conviction_name": "str",\n "speeding_related": "str",\n "speeding_related_name": "str",\n "trafficway_description": "str",\n "trafficway_description_name": "str",\n "total_lanes_in_roadway": "str",\n "total_lanes_in_roadway_name": "str",\n "speed_limit": "str",\n "speed_limit_name": "str",\n "roadway_alignment": "str",\n "roadway_alignment_name": "str",\n "roadway_grade": "str",\n "roadway_grade_name": "str",\n "roadway_surface_type": "str",\n "roadway_surface_type_name": "str",\n "roadway_surface_condition": "str",\n "roadway_surface_condition_name": "str",\n "traffic_control_device": "str",\n "traffic_control_device_name": "str",\n "traffic_control_device_functioning": "str",\n "traffic_control_device_functioning_name": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event": "str",\n "pre_event_movement_prior_to_recognition_of_critical_event_name": "str",\n "critical_event_precrash": "str",\n "critical_event_precrash_name": "str",\n "attempted_avoidance_maneuver": "str",\n "attempted_avoidance_maneuver_name": "str",\n "pre_impact_stability": "str",\n "pre_impact_stability_name": "str",\n "pre_impact_location": "str",\n "pre_impact_location_name": "str",\n "crash_type": "str",\n "crash_type_name": "str",\n "fatalities_in_vehicle": "str",\n "driver_drinking": "str",\n "driver_drinking_name": "str",\n "trailer_vehicle_identification_number_1": "str",\n "trailer_vehicle_identification_number_1_name": "str",\n "trailer_vehicle_identification_number_2": "str",\n "trailer_vehicle_identification_number_2_name": "str",\n "trailer_vehicle_identification_number_3": "str",\n "trailer_vehicle_identification_number_3_name": "str",\n "vpic_make": "str",\n "vpic_make_name": "str",\n "vpic_model": "str",\n "vpic_model_name": "str",\n "vpic_body_class": "str",\n "vpic_body_class_name": "str",\n "final_stage_body_class": "str",\n "final_stage_body_class_name": "str",\n "power_unit_gross_vehicle_weight_rating_from": "str",\n "power_unit_gross_vehicle_weight_rating_from_name": "str",\n "power_unit_gross_vehicle_weight_rating_to": "str",\n "power_unit_gross_vehicle_weight_rating_to_name": "str",\n "trailer_gross_vehicle_weight_rating_1": "str",\n "trailer_gross_vehicle_weight_rating_1_name": "str",\n "trailer_gross_vehicle_weight_rating_2": "str",\n "trailer_gross_vehicle_weight_rating_2_name": "str",\n "trailer_gross_vehicle_weight_rating_3": "str",\n "trailer_gross_vehicle_weight_rating_3_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "VE_FORMS": "number_of_motor_vehicles_in_transport_mvit",\n "NUMOCCS": "number_of_occupants",\n "NUMOCCSNAME": "number_of_occupants_name",\n "DAY": "day_of_crash",\n "DAYNAME": "day_of_crash_name",\n "MONTH": "month_of_crash",\n "MONTHNAME": "month_of_crash_name",\n "HOUR": "hour_of_crash",\n "HOURNAME": "hour_of_crash_name",\n "MINUTE": "minute_of_crash",\n "MINUTENAME": "minute_of_crash_name",\n "HARM_EV": "first_harmful_event",\n "HARM_EVNAME": "first_harmful_event_name",\n "MAN_COLL": "manner_of_collision",\n "MAN_COLLNAME": "manner_of_collision_name",\n "UNITTYPE": "unit_type",\n "UNITTYPENAME": "unit_type_name",\n "HIT_RUN": "hit_and_run",\n "HIT_RUNNAME": "hit_and_run_name",\n "REG_STAT": "registration_state",\n "REG_STATNAME": "registration_state_name",\n "OWNER": "registered_vehicle_owner",\n "OWNERNAME": "registered_vehicle_owner_name",\n "MAKE": "vehicle_make",\n "MAKENAME": "vehicle_make_name",\n "MODEL": "vehicle_model",\n "MAK_MOD": "make_model_combined",\n "MAK_MODNAME": "make_model_combined_name",\n "BODY_TYP": "body_type",\n "BODY_TYPNAME": "body_type_name",\n "MOD_YEAR": "vehicle_model_year",\n "MOD_YEARNAME": "vehicle_model_year_name",\n "VIN": "vehicle_identification_number_vin",\n "VINNAME": "vehicle_identification_number_vin_name",\n "VIN_1": "vin_character_1",\n "VIN_2": "vin_character_2",\n "VIN_3": "vin_character_3",\n "VIN_4": "vin_character_4",\n "VIN_5": "vin_character_5",\n "VIN_6": "vin_character_6",\n "VIN_7": "vin_character_7",\n "VIN_8": "vin_character_8",\n "VIN_9": "vin_character_9",\n "VIN_10": "vin_character_10",\n "VIN_11": "vin_character_11",\n "VIN_12": "vin_character_12",\n "TOW_VEH": "vehicle_trailing",\n "TOW_VEHNAME": "vehicle_trailing_name",\n "J_KNIFE": "jackknife",\n "J_KNIFENAME": "jackknife_name",\n "MCARR_I1": "mcid_issuing_authority",\n "MCARR_I1NAME": "mcid_issuing_authority_name",\n "MCARR_I2": "mcid_identification_number",\n "MCARR_I2NAME": "mcid_identification_number_name",\n "MCARR_ID": "motor_carrier_identification_number_mcid",\n "MCARR_IDNAME": "motor_carrier_identification_number_mcid_name",\n "V_CONFIG": "vehicle_configuration",\n "V_CONFIGNAME": "vehicle_configuration_name",\n "CARGO_BT": "cargo_body_type",\n "CARGO_BTNAME": "cargo_body_type_name",\n "HAZ_INV": "hazardous_material_involvement",\n "HAZ_INVNAME": "hazardous_material_involvement_name",\n "HAZ_PLAC": "hazardous_material_placard",\n "HAZ_PLACNAME": "hazardous_material_placard_name",\n "HAZ_ID": "hazardous_material_identification_number",\n "HAZ_IDNAME": "hazardous_material_identification_number_name",\n "HAZ_CNO": "hazardous_material_class_number",\n "HAZ_CNONAME": "hazardous_material_class_number_name",\n "HAZ_REL": "release_of_hazardous_material_from_the_cargo_compartment",\n "HAZ_RELNAME": "release_of_hazardous_material_from_the_cargo_compartment_name",\n "BUS_USE": "bus_use",\n "BUS_USENAME": "bus_use_name",\n "SPEC_USE": "special_use",\n "SPEC_USENAME": "special_use_name",\n "EMER_USE": "emergency_motor_vehicle_use",\n "EMER_USENAME": "emergency_motor_vehicle_use_name",\n "TRAV_SP": "travel_speed",\n "TRAV_SPNAME": "travel_speed_name",\n "UNDERIDE": "underride_override",\n "UNDERIDENAME": "underride_override_name",\n "ROLLOVER": "rollover",\n "ROLLOVERNAME": "rollover_name",\n "ROLINLOC": "location_of_rollover",\n "ROLINLOCNAME": "location_of_rollover_name",\n "IMPACT1": "initial_contact_point",\n "IMPACT1NAME": "initial_contact_point_name",\n "DEFORMED": "extent_of_damage",\n "DEFORMEDNAME": "extent_of_damage_name",\n "TOWED": "vehicle_removal",\n "TOWEDNAME": "vehicle_removal_name",\n "M_HARM": "most_harmful_event",\n "M_HARMNAME": "most_harmful_event_name",\n "FIRE_EXP": "fire_occurrence",\n "FIRE_EXPNAME": "fire_occurrence_name",\n "DR_PRES": "driver_presence",\n "DR_PRESNAME": "driver_presence_name",\n "L_STATE": "drivers_license_state",\n "L_STATENAME": "drivers_license_state_name",\n "DR_ZIP": "drivers_zip_code",\n "DR_ZIPNAME": "drivers_zip_code_name",\n "L_STATUS": "non_cdl_license_status",\n "L_STATUSNAME": "non_cdl_license_status_name",\n "L_TYPE": "non_cdl_license_type",\n "L_TYPENAME": "non_cdl_license_type_name",\n "CDL_STAT": "commercial_motor_vehicle_license_status",\n "CDL_STATNAME": "commercial_motor_vehicle_license_status_name",\n "L_ENDORS": "compliance_with_cdl_endorsements",\n "L_ENDORSNAME": "compliance_with_cdl_endorsements_name",\n "L_COMPL": "license_compliance_with_class_of_vehicle",\n "L_COMPLNAME": "license_compliance_with_class_of_vehicle_name",\n "L_RESTRI": "compliance_with_license_restrictions",\n "L_RESTRINAME": "compliance_with_license_restrictions_name",\n "DR_HGT": "driver_height",\n "DR_HGTNAME": "driver_height_name",\n "DR_WGT": "driver_weight",\n "DR_WGTNAME": "driver_weight_name",\n "PREV_ACC": "previous_recorded_crashes",\n "PREV_ACCNAME": "previous_recorded_crashes_name",\n "PREV_SUS1": "previous_recorded_suspensions_and_revocations1",\n "PREV_SUS1NAME": "previous_recorded_suspensions_and_revocations1_name",\n "PREV_SUS2": "previous_recorded_suspensions_and_revocations2",\n "PREV_SUS2NAME": "previous_recorded_suspensions_and_revocations2_name",\n "PREV_SUS3": "previous_recorded_suspensions_and_revocations3",\n "PREV_SUS3NAME": "previous_recorded_suspensions_and_revocations3_name",\n "PREV_DWI": "previous_dwi_convictions",\n "PREV_DWINAME": "previous_dwi_convictions_name",\n "PREV_SPD": "previous_speeding_convictions",\n "PREV_SPDNAME": "previous_speeding_convictions_name",\n "PREV_OTH": "previous_other_moving_violation_convictions",\n "PREV_OTHNAME": "previous_other_moving_violation_convictions_name",\n "FIRST_MO": "month_of_first_crash_suspension_or_conviction",\n "FIRST_MONAME": "month_of_first_crash_suspension_or_conviction_name",\n "FIRST_YR": "year_of_first_crash_suspension_or_conviction",\n "FIRST_YRNAME": "year_of_first_crash_suspension_or_conviction_name",\n "LAST_MO": "month_of_last_crash_suspension_or_conviction",\n "LAST_MONAME": "month_of_last_crash_suspension_or_conviction_name",\n "LAST_YR": "year_of_last_crash_suspension_or_conviction",\n "LAST_YRNAME": "year_of_last_crash_suspension_or_conviction_name",\n "SPEEDREL": "speeding_related",\n "SPEEDRELNAME": "speeding_related_name",\n "VTRAFWAY": "trafficway_description",\n "VTRAFWAYNAME": "trafficway_description_name",\n "VNUM_LAN": "total_lanes_in_roadway",\n "VNUM_LANNAME": "total_lanes_in_roadway_name",\n "VSPD_LIM": "speed_limit",\n "VSPD_LIMNAME": "speed_limit_name",\n "VALIGN": "roadway_alignment",\n "VALIGNNAME": "roadway_alignment_name",\n "VPROFILE": "roadway_grade",\n "VPROFILENAME": "roadway_grade_name",\n "VPAVETYP": "roadway_surface_type",\n "VPAVETYPNAME": "roadway_surface_type_name",\n "VSURCOND": "roadway_surface_condition",\n "VSURCONDNAME": "roadway_surface_condition_name",\n "VTRAFCON": "traffic_control_device",\n "VTRAFCONNAME": "traffic_control_device_name",\n "VTCONT_F": "traffic_control_device_functioning",\n "VTCONT_FNAME": "traffic_control_device_functioning_name",\n "P_CRASH1": "pre_event_movement_prior_to_recognition_of_critical_event",\n "P_CRASH1NAME": "pre_event_movement_prior_to_recognition_of_critical_event_name",\n "P_CRASH2": "critical_event_precrash",\n "P_CRASH2NAME": "critical_event_precrash_name",\n "P_CRASH3": "attempted_avoidance_maneuver",\n "P_CRASH3NAME": "attempted_avoidance_maneuver_name",\n "PCRASH4": "pre_impact_stability",\n "PCRASH4NAME": "pre_impact_stability_name",\n "PCRASH5": "pre_impact_location",\n "PCRASH5NAME": "pre_impact_location_name",\n "ACC_TYPE": "crash_type",\n "ACC_TYPENAME": "crash_type_name",\n "DEATHS": "fatalities_in_vehicle",\n "DR_DRINK": "driver_drinking",\n "DR_DRINKNAME": "driver_drinking_name",\n "TRLR1VIN": "trailer_vehicle_identification_number_1",\n "TRLR1VINNAME": "trailer_vehicle_identification_number_1_name",\n "TRLR2VIN": "trailer_vehicle_identification_number_2",\n "TRLR2VINNAME": "trailer_vehicle_identification_number_2_name",\n "TRLR3VIN": "trailer_vehicle_identification_number_3",\n "TRLR3VINNAME": "trailer_vehicle_identification_number_3_name",\n "VPICMAKE": "vpic_make",\n "VPICMAKENAME": "vpic_make_name",\n "VPICMODEL": "vpic_model",\n "VPICMODELNAME": "vpic_model_name",\n "VPICBODYCLASS": "vpic_body_class",\n "VPICBODYCLASSNAME": "vpic_body_class_name",\n "ICFINALBODY": "final_stage_body_class",\n "ICFINALBODYNAME": "final_stage_body_class_name",\n "GVWR_FROM": "power_unit_gross_vehicle_weight_rating_from",\n "GVWR_FROMNAME": "power_unit_gross_vehicle_weight_rating_from_name",\n "GVWR_TO": "power_unit_gross_vehicle_weight_rating_to",\n "GVWR_TONAME": "power_unit_gross_vehicle_weight_rating_to_name",\n "TRLR1GVWR": "trailer_gross_vehicle_weight_rating_1",\n "TRLR1GVWRNAME": "trailer_gross_vehicle_weight_rating_1_name",\n "TRLR2GVWR": "trailer_gross_vehicle_weight_rating_2",\n "TRLR2GVWRNAME": "trailer_gross_vehicle_weight_rating_2_name",\n "TRLR3GVWR": "trailer_gross_vehicle_weight_rating_3",\n "TRLR3GVWRNAME": "trailer_gross_vehicle_weight_rating_3_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vevent pipelines
vevent_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vevent_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="vevent",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vevent.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vevent_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "event_number",\n "vehicle_number",\n "vehicle_event_number",\n "vehicle_number_this_vehicle",\n "area_of_impact_this_vehicle",\n "area_of_impact_this_vehicle_name",\n "sequence_of_events",\n "sequence_of_events_name",\n "vehicle_number_other_vehicle",\n "vehicle_number_other_vehicle_name",\n "area_of_impact_other_vehicle",\n "area_of_impact_other_vehicle_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "event_number": "str",\n "vehicle_number": "str",\n "vehicle_event_number": "str",\n "vehicle_number_this_vehicle": "str",\n "area_of_impact_this_vehicle": "str",\n "area_of_impact_this_vehicle_name": "str",\n "sequence_of_events": "str",\n "sequence_of_events_name": "str",\n "vehicle_number_other_vehicle": "str",\n "vehicle_number_other_vehicle_name": "str",\n "area_of_impact_other_vehicle": "str",\n "area_of_impact_other_vehicle_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "EVENTNUM": "event_number",\n "VEH_NO": "vehicle_number",\n "VEVENTNUM": "vehicle_event_number",\n "VNUMBER1": "vehicle_number_this_vehicle",\n "AOI1": "area_of_impact_this_vehicle",\n "AOI1NAME": "area_of_impact_this_vehicle_name",\n "SOE": "sequence_of_events",\n "SOENAME": "sequence_of_events_name",\n "VNUMBER2": "vehicle_number_other_vehicle",\n "VNUMBER2NAME": "vehicle_number_other_vehicle_name",\n "AOI2": "area_of_impact_other_vehicle",\n "AOI2NAME": "area_of_impact_other_vehicle_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vindecode pipelines
vindecode_2015_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vindecode_2015_transform_csv",
startup_timeout_seconds=600,
name="vindecode",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vindecode.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vindecode_2015.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "vehicle_make",\n "marketing_year",\n "vehicle_type_code",\n "vehicle_type",\n "make_name",\n "model_code",\n "vehicle_trim",\n "vehicle_trim_1",\n "vehicle_trim_2",\n "vehicle_trim_3",\n "vehicle_trim_4",\n "body_style_code",\n "body_style",\n "num_of_doors",\n "number_of_wheels",\n "num_of_wheels_by_power_train",\n "vehicle_manufacturer_code",\n "vehicle_manufacturer_name",\n "displacement_cid",\n "displacement_cc",\n "cylinder_count_code",\n "cycle_count",\n "fuel_code",\n "fuel",\n "type_of_fuel_code",\n "type_of_fuel",\n "carburetion_types_code",\n "carburetion_types",\n "num_of_barrels",\n "gross_vehicle_weights_range_code",\n "gross_vehicle_weights_range",\n "distance_between_axles_for_base_model",\n "distance_between_axles_for_particular_series",\n "front_tire",\n "front_tire_pressure",\n "front_tire_size_code",\n "front_tire_size",\n "rear_tire",\n "rear_tire_pressure",\n "rear_tire_size_code",\n "rear_tire_size",\n "tonnage_rating",\n "shipping_weight",\n "base_price",\n "drive_type_1",\n "drive_type_2",\n "country_sold_code",\n "country_sold",\n "brakes_abs_code",\n "brakes_abs_description",\n "security_type_code",\n "security_type",\n "daytime_running_lights_1",\n "daytime_running_lights_2",\n "restraint_type_code",\n "restraint_type",\n "cab_configuration_code",\n "cab_configuration",\n "axle_type_front_axle_code",\n "axle_type_front_axle",\n "axle_type_rear_axle_code",\n "axle_type_rear_axle",\n "brake_type_code",\n "brake_type",\n "engine_manufacture_code",\n "engine_manufacture",\n "engine_model",\n "duty_type_code",\n "duty_type",\n "bed_length_code",\n "bed_length",\n "standard_segmentation_code",\n "standard_segmentation",\n "plant_code",\n "plant_country",\n "plant_city",\n "plant_country_code",\n "plant_state_code",\n "plant_state",\n "origin_code",\n "origin",\n "displacement_liters",\n "block_type",\n "head_configuration_1",\n "head_configuration_2",\n "valves_per_cylinder",\n "valves_total",\n "engine_code",\n "is_incomplete",\n "battery_type_code",\n "battery_type",\n "total_battery_power",\n "battery_voltage",\n "supercharge_flag",\n "supercharge_flag_description",\n "turbocharger_flag",\n "turbocharger_flag_description",\n "variable_valve_timing_flag",\n "motorcycles_body_style_code",\n "motorcycles_body_style"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "vehicle_make": "str",\n "marketing_year": "str",\n "vehicle_type_code": "str",\n "vehicle_type": "str",\n "make_name": "str",\n "model_code": "str",\n "vehicle_trim": "str",\n "vehicle_trim_1": "str",\n "vehicle_trim_2": "str",\n "vehicle_trim_3": "str",\n "vehicle_trim_4": "str",\n "body_style_code": "str",\n "body_style": "str",\n "num_of_doors": "str",\n "number_of_wheels": "str",\n "num_of_wheels_by_power_train": "str",\n "vehicle_manufacturer_code": "str",\n "vehicle_manufacturer_name": "str",\n "displacement_cid": "str",\n "displacement_cc": "str",\n "cylinder_count_code": "str",\n "cycle_count": "str",\n "fuel_code": "str",\n "fuel": "str",\n "type_of_fuel_code": "str",\n "type_of_fuel": "str",\n "carburetion_types_code": "str",\n "carburetion_types": "str",\n "num_of_barrels": "str",\n "gross_vehicle_weights_range_code": "str",\n "gross_vehicle_weights_range": "str",\n "distance_between_axles_for_base_model": "str",\n "distance_between_axles_for_particular_series": "str",\n "front_tire": "str",\n "front_tire_pressure": "str",\n "front_tire_size_code": "str",\n "front_tire_size": "str",\n "rear_tire": "str",\n "rear_tire_pressure": "str",\n "rear_tire_size_code": "str",\n "rear_tire_size": "str",\n "tonnage_rating": "str",\n "shipping_weight": "str",\n "base_price": "str",\n "drive_type_1": "str",\n "drive_type_2": "str",\n "country_sold_code": "str",\n "country_sold": "str",\n "brakes_abs_code": "str",\n "brakes_abs_description": "str",\n "security_type_code": "str",\n "security_type": "str",\n "daytime_running_lights_1": "str",\n "daytime_running_lights_2": "str",\n "restraint_type_code": "str",\n "restraint_type": "str",\n "cab_configuration_code": "str",\n "cab_configuration": "str",\n "axle_type_front_axle_code": "str",\n "axle_type_front_axle": "str",\n "axle_type_rear_axle_code": "str",\n "axle_type_rear_axle": "str",\n "brake_type_code": "str",\n "brake_type": "str",\n "engine_manufacture_code": "str",\n "engine_manufacture": "str",\n "engine_model": "str",\n "duty_type_code": "str",\n "duty_type": "str",\n "bed_length_code": "str",\n "bed_length": "str",\n "standard_segmentation_code": "str",\n "standard_segmentation": "str",\n "plant_code": "str",\n "plant_country": "str",\n "plant_city": "str",\n "plant_country_code": "str",\n "plant_state_code": "str",\n "plant_state": "str",\n "origin_code": "str",\n "origin": "str",\n "displacement_liters": "str",\n "block_type": "str",\n "head_configuration_1": "str",\n "head_configuration_2": "str",\n "valves_per_cylinder": "str",\n "valves_total": "str",\n "engine_code": "str",\n "is_incomplete": "str",\n "battery_type_code": "str",\n "battery_type": "str",\n "total_battery_power": "str",\n "battery_voltage": "str",\n "supercharge_flag": "str",\n "supercharge_flag_description": "str",\n "turbocharger_flag": "str",\n "turbocharger_flag_description": "str",\n "variable_valve_timing_flag": "str",\n "motorcycles_body_style_code": "str",\n "motorcycles_body_style": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "NCICMAKE": "vehicle_make",\n "VINYEAR": "marketing_year",\n "VEHTYPE": "vehicle_type_code",\n "VEHTYPE_T": "vehicle_type",\n "VINMAKE_T": "make_name",\n "VINMODEL_T": "model_code",\n "VINTRIM_T": "vehicle_trim",\n "VINTRIM1_T": "vehicle_trim_1",\n "VINTRIM2_T": "vehicle_trim_2",\n "VINTRIM3_T": "vehicle_trim_3",\n "VINTRIM4_T": "vehicle_trim_4",\n "BODYSTYL": "body_style_code",\n "BODYSTYL_T": "body_style",\n "DOORS": "num_of_doors",\n "WHEELS": "number_of_wheels",\n "DRIVWHLS": "num_of_wheels_by_power_train",\n "MFG": "vehicle_manufacturer_code",\n "MFG_T": "vehicle_manufacturer_name",\n "DISPLCI": "displacement_cid",\n "DISPLCC": "displacement_cc",\n "CYLNDRS": "cylinder_count_code",\n "CYCLES": "cycle_count",\n "FUEL": "fuel_code",\n "FUEL_T": "fuel",\n "FUELINJ": "type_of_fuel_code",\n "FUELINJ_T": "type_of_fuel",\n "CARBTYPE": "carburetion_types_code",\n "CARBTYPE_T": "carburetion_types",\n "CARBBRLS": "num_of_barrels",\n "GVWRANGE": "gross_vehicle_weights_range_code",\n "GVWRANGE_T": "gross_vehicle_weights_range",\n "WHLBSH": "distance_between_axles_for_base_model",\n "WHLBLG": "distance_between_axles_for_particular_series",\n "TIREDESC_F": "front_tire",\n "PSI_F": "front_tire_pressure",\n "TIRESZ_F": "front_tire_size_code",\n "TIRESZ_F_T": "front_tire_size",\n "TIREDESC_R": "rear_tire",\n "PSI_R": "rear_tire_pressure",\n "REARSIZE": "rear_tire_size_code",\n "REARSIZE_T": "rear_tire_size",\n "TONRATING": "tonnage_rating",\n "SHIPWEIGHT": "shipping_weight",\n "MSRP": "base_price",\n "DRIVETYP": "drive_type_1",\n "DRIVETYP_T": "drive_type_2",\n "SALECTRY": "country_sold_code",\n "SALECTRY_T": "country_sold",\n "ABS": "brakes_abs_code",\n "ABS_T": "brakes_abs_description",\n "SECURITY": "security_type_code",\n "SECURITY_T": "security_type",\n "DRL": "daytime_running_lights_1",\n "DRL_T": "daytime_running_lights_2",\n "RSTRNT": "restraint_type_code",\n "RSTRNT_T": "restraint_type",\n "TKCAB": "cab_configuration_code",\n "TKCAB_T": "cab_configuration",\n "TKAXLEF": "axle_type_front_axle_code",\n "TKAXLEF_T": "axle_type_front_axle",\n "TKAXLER": "axle_type_rear_axle_code",\n "TKAXLER_T": "axle_type_rear_axle",\n "TKBRAK": "brake_type_code",\n "TKBRAK_T": "brake_type",\n "ENGMFG": "engine_manufacture_code",\n "ENGMFG_T": "engine_manufacture",\n "ENGMODEL": "engine_model",\n "TKDUTY": "duty_type_code",\n "TKDUTY_T": "duty_type",\n "TKBEDL": "bed_length_code",\n "TKBEDL_T": "bed_length",\n "SEGMNT": "standard_segmentation_code",\n "SEGMNT_T": "standard_segmentation",\n "PLANT": "plant_code",\n "PLNTCTRY_T": "plant_country",\n "PLNTCITY": "plant_city",\n "PLNTCTRY": "plant_country_code",\n "PLNTSTAT": "plant_state_code",\n "PLNTSTAT_T": "plant_state",\n "ORIGIN": "origin_code",\n "ORIGIN_T": "origin",\n "DISPCLMT": "displacement_liters",\n "BLOCKTYPE": "block_type",\n "ENGHEAD": "head_configuration_1",\n "ENGHEAD_T": "head_configuration_2",\n "VLVCLNDR": "valves_per_cylinder",\n "VLVTOTAL": "valves_total",\n "ENGVINCD": "engine_code",\n "INCOMPLT": "is_incomplete",\n "BATTYP": "battery_type_code",\n "BATTYP_T": "battery_type",\n "BATKWRTG": "total_battery_power",\n "BATVOLT": "battery_voltage",\n "SUPCHRGR": "supercharge_flag",\n "SUPCHRGR_T": "supercharge_flag_description",\n "TURBO": "turbocharger_flag",\n "TURBO_T": "turbocharger_flag_description",\n "ENGVVT": "variable_valve_timing_flag",\n "MCYUSAGE": "motorcycles_body_style_code",\n "MCYUSAGE_T": "motorcycles_body_style"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for violatn pipelines
violatn_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="violatn_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="violatn",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "violatn.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.violatn_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "violations_charged",\n "violations_charged_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "violations_charged": "str",\n "violations_charged_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MVIOLATN": "violations_charged",\n "MVIOLATNNAME": "violations_charged_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vision pipelines
vision_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vision_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="vision",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vision.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vision_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "drivers_vision_obscured_by",\n "drivers_vision_obscured_by_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "drivers_vision_obscured_by": "str",\n "drivers_vision_obscured_by_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "MVISOBSC": "drivers_vision_obscured_by",\n "MVISOBSCNAME": "drivers_vision_obscured_by_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
# Run CSV transform within kubernetes pod for vsoe pipelines
vsoe_2015_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="vsoe_2015_2020_transform_csv",
startup_timeout_seconds=600,
name="vsoe",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.nhtsa_traffic_fatalities.container_registry.run_csv_transform_kub }}",
env_vars={
"PIPELINE_NAME": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.pipeline_name }}",
"SOURCE_URL": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.source_url }}",
"CHUNKSIZE": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.chunksize }}",
"SOURCE_ZIPFILE_EXTRACTED": "vsoe.csv",
"SOURCE_FILE": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.source_file }}",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.dataset_id }}",
"TABLE_ID": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.destination_table }}",
"START_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.start_year }}",
"END_YEAR": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.end_year }}",
"DROP_DEST_TABLE": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.drop_dest_table }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.target_gcs_path }}",
"SCHEMA_PATH": "{{ var.json.nhtsa_traffic_fatalities.vsoe_2015_2020.schema_path }}",
"INPUT_CSV_HEADERS": '[\n "state_number",\n "state_name",\n "consecutive_number",\n "vehicle_number",\n "vehicle_event_number",\n "sequence_of_events",\n "sequence_of_events_name",\n "area_of_Impact_associated_with_the_event",\n "area_of_Impact_associated_with_the_event_name"\n]',
"INPUT_DTYPES": '{\n "state_number": "str",\n "state_name": "str",\n "consecutive_number": "str",\n "vehicle_number": "str",\n "vehicle_event_number": "str",\n "sequence_of_events": "str",\n "sequence_of_events_name": "str",\n "area_of_Impact_associated_with_the_event": "str",\n "area_of_Impact_associated_with_the_event_name": "str"\n}',
"RENAME_MAPPINGS_LIST": '{\n "STATE": "state_number",\n "STATENAME": "state_name",\n "ST_CASE": "consecutive_number",\n "VEH_NO": "vehicle_number",\n "VEVENTNUM": "vehicle_event_number",\n "SOE": "sequence_of_events",\n "SOENAME": "sequence_of_events_name",\n "AOI": "area_of_Impact_associated_with_the_event",\n "AOINAME": "area_of_Impact_associated_with_the_event_name"\n}',
},
resources={
"request_ephemeral_storage": "4G",
"request_cpu": "1",
"request_memory": "4G",
},
)
delete_cluster = kubernetes_engine.GKEDeleteClusterOperator(
task_id="delete_cluster",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
name="nhtsa-traffic-fatalities",
)
(
create_cluster
>> [
accident_2015_transform_csv,
accident_2016_2019_transform_csv,
accident_2020_transform_csv,
cevent_2015_2020_transform_csv,
damage_2015_2020_transform_csv,
distract_2015_2020_transform_csv,
drimpair_2015_2020_transform_csv,
factor_2015_2020_transform_csv,
maneuver_2015_2020_transform_csv,
nmcrash_2015_2020_transform_csv,
nmimpair_2015_2020_transform_csv,
nmprior_2015_2020_transform_csv,
parkwork_2015_transform_csv,
parkwork_2016_2017_transform_csv,
parkwork_2018_transform_csv,
parkwork_2019_transform_csv,
parkwork_2020_transform_csv,
pbtype_transform_csv,
person_2015_2017_transform_csv,
person_2018_transform_csv,
person_2019_transform_csv,
person_2020_transform_csv,
safetyeq_2015_2016_transform_csv,
safetyeq_2017_2020_transform_csv,
vehicle_2015_transform_csv,
vehicle_2016_2017_transform_csv,
vehicle_2018_2019_transform_csv,
vehicle_2020_transform_csv,
vevent_2015_2020_transform_csv,
vindecode_2015_transform_csv,
violatn_2015_2020_transform_csv,
vision_2015_2020_transform_csv,
vsoe_2015_2020_transform_csv,
]
>> delete_cluster
)
| apache-2.0 |
uber/pyro | pyro/distributions/improper_uniform.py | 1 | 2448 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from .torch_distribution import TorchDistribution
from .util import broadcast_shape
class ImproperUniform(TorchDistribution):
"""
Improper distribution with zero :meth:`log_prob` and undefined
:meth:`sample`.
This is useful for transforming a model from generative dag form to factor
graph form for use in HMC. For example the following are equal in
distribution::
# Version 1. a generative dag
x = pyro.sample("x", Normal(0, 1))
y = pyro.sample("y", Normal(x, 1))
z = pyro.sample("z", Normal(y, 1))
# Version 2. a factor graph
xyz = pyro.sample("xyz", ImproperUniform(constraints.real, (), (3,)))
x, y, z = xyz.unbind(-1)
pyro.sample("x", Normal(0, 1), obs=x)
pyro.sample("y", Normal(x, 1), obs=y)
pyro.sample("z", Normal(y, 1), obs=z)
Note this distribution errors when :meth:`sample` is called. To create a
similar distribution that instead samples from a specified distribution
consider using ``.mask(False)`` as in::
xyz = dist.Normal(0, 1).expand([3]).to_event(1).mask(False)
:param support: The support of the distribution.
:type support: ~torch.distributions.constraints.Constraint
:param torch.Size batch_shape: The batch shape.
:param torch.Size event_shape: The event shape.
"""
arg_constraints = {}
def __init__(self, support, batch_shape, event_shape):
assert isinstance(support, constraints.Constraint)
self._support = support
super().__init__(batch_shape, event_shape)
@constraints.dependent_property
def support(self):
return self._support
def expand(self, batch_shape, _instance=None):
batch_shape = torch.Size(batch_shape)
new = self._get_checked_instance(ImproperUniform, _instance)
new._support = self._support
super(ImproperUniform, new).__init__(batch_shape, self.event_shape)
return new
def log_prob(self, value):
batch_shape = value.shape[: value.dim() - self.event_dim]
batch_shape = broadcast_shape(batch_shape, self.batch_shape)
return torch.zeros(()).expand(batch_shape)
def sample(self, sample_shape=torch.Size()):
raise NotImplementedError("ImproperUniform does not support sampling")
| apache-2.0 |