repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
offbye/paparazzi | sw/tools/calibration/calibrate_gyro.py | 87 | 4686 | #! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
stevenwudi/Kernelized_Correlation_Filter | CNN_training.py | 1 | 3640 | import numpy as np
from keras.optimizers import SGD
from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \
cnn_cifar_small_batchnormalisation
from models.DataLoader import DataLoader
from scripts.progress_bar import printProgress
from time import time, localtime
# this is a predefined dataloader
loader = DataLoader(batch_size=32)
# construct the model here (pre-defined model)
model = cnn_cifar_small_batchnormalisation(loader.image_shape)
print(model.name)
nb_epoch = 200
early_stopping = True
early_stopping_count = 0
early_stopping_wait = 3
train_loss = []
valid_loss = []
learning_rate = [0.0001, 0.001, 0.01]
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
# load validation data from the h5py file (heavy lifting here)
x_valid, y_valid = loader.get_valid()
best_valid = np.inf
for e in range(nb_epoch):
print("epoch %d" % e)
loss_list = []
time_list = []
time_start = time()
for i in range(loader.n_iter_train):
time_start_batch = time()
X_batch, Y_batch = loader.next_train_batch()
loss_list.append(model.train_on_batch(X_batch, Y_batch))
# calculate some time information
time_list.append(time() - time_start_batch)
eta = (loader.n_iter_train - i) * np.array(time_list).mean()
printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50)
printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50)
train_loss.append(np.asarray(loss_list).mean())
print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start))
valid_loss.append(model.evaluate(x_valid, y_valid))
print('valid loss is %f' % valid_loss[-1])
if best_valid > valid_loss[-1]:
early_stopping_count = 0
print('saving best valid result...')
best_valid = valid_loss[-1]
model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5')
else:
# we wait for early stopping loop until a certain time
early_stopping_count += 1
if early_stopping_count > early_stopping_wait:
early_stopping_count = 0
if len(learning_rate) > 1:
learning_rate.pop()
print('decreasing the learning rate to: %f'%learning_rate[-1])
model.optimizer.lr.set_value(learning_rate[-1])
else:
break
lt = localtime()
lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \
+str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\
+str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2)
np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss)
np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss)
model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5')
print("done")
#### we show some visualisation here
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt')
valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt')
plt.plot(train_loss, 'b')
plt.plot(valid_loss, 'r')
blue_label = mpatches.Patch(color='blue', label='train_loss')
red_label = mpatches.Patch(color='red', label='valid_loss')
plt.legend(handles=[blue_label, red_label])
| gpl-3.0 |
galfaroi/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit |
shadowk29/cusumtools | legacy/minimal_psd.py | 1 | 12009 | ## COPYRIGHT
## Copyright (C) 2015 Kyle Briggs (kbrig035<at>uottawa.ca)
##
## This file is part of cusumtools.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import tkinter.filedialog
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import scipy.io as sio
from scipy.signal import bessel, filtfilt, welch
from scikits.samplerate import resample
import pylab as pl
import glob
import os
import time
import pandas as pd
from pandasql import sqldf
import re
def make_format(current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
coords = [ax_coord, (x, y)]
return ('Left: {:<40} Right: {:<}'
.format(*['({:.3f}, {:.3f})'.format(x, y) for x,y in coords]))
return format_coord
class App(tk.Frame):
def __init__(self, parent,file_path):
tk.Frame.__init__(self, parent)
parent.deiconify()
self.events_flag = False
self.baseline_flag = False
self.file_path = file_path
##### Trace plotting widgets #####
self.trace_frame = tk.LabelFrame(parent,text='Current Trace')
self.trace_fig = Figure(figsize=(7,5), dpi=100)
self.trace_canvas = FigureCanvasTkAgg(self.trace_fig, master=self.trace_frame)
self.trace_toolbar_frame = tk.Frame(self.trace_frame)
self.trace_toolbar = NavigationToolbar2TkAgg(self.trace_canvas, self.trace_toolbar_frame)
self.trace_toolbar.update()
self.trace_frame.grid(row=0,column=0,columnspan=6,sticky=tk.N+tk.S)
self.trace_toolbar_frame.grid(row=1,column=0,columnspan=6)
self.trace_canvas.get_tk_widget().grid(row=0,column=0,columnspan=6)
##### PSD plotting widgets #####
self.psd_frame = tk.LabelFrame(parent,text='Power Spectrum')
self.psd_fig = Figure(figsize=(7,5), dpi=100)
self.psd_canvas = FigureCanvasTkAgg(self.psd_fig, master=self.psd_frame)
self.psd_toolbar_frame = tk.Frame(self.psd_frame)
self.psd_toolbar = NavigationToolbar2TkAgg(self.psd_canvas, self.psd_toolbar_frame)
self.psd_toolbar.update()
self.psd_frame.grid(row=0,column=6,columnspan=6,sticky=tk.N+tk.S)
self.psd_toolbar_frame.grid(row=1,column=6,columnspan=6)
self.psd_canvas.get_tk_widget().grid(row=0,column=6,columnspan=6)
##### Control widgets #####
self.control_frame = tk.LabelFrame(parent, text='Controls')
self.control_frame.grid(row=2,column=0,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.start_entry = tk.Entry(self.control_frame)
self.start_entry.insert(0,'0')
self.start_label = tk.Label(self.control_frame, text='Start Time (s)')
self.start_label.grid(row=0,column=0,sticky=tk.E+tk.W)
self.start_entry.grid(row=0,column=1,sticky=tk.E+tk.W)
self.end_entry = tk.Entry(self.control_frame)
self.end_entry.insert(0,'10')
self.end_label = tk.Label(self.control_frame, text='End Time (s)')
self.end_label.grid(row=0,column=2,sticky=tk.E+tk.W)
self.end_entry.grid(row=0,column=3,sticky=tk.E+tk.W)
self.cutoff_entry = tk.Entry(self.control_frame)
self.cutoff_entry.insert(0,'')
self.cutoff_label = tk.Label(self.control_frame, text='Cutoff (Hz)')
self.cutoff_label.grid(row=1,column=0,sticky=tk.E+tk.W)
self.cutoff_entry.grid(row=1,column=1,sticky=tk.E+tk.W)
self.order_entry = tk.Entry(self.control_frame)
self.order_entry.insert(0,'')
self.order_label = tk.Label(self.control_frame, text='Filter Order')
self.order_label.grid(row=1,column=2,sticky=tk.E+tk.W)
self.order_entry.grid(row=1,column=3,sticky=tk.E+tk.W)
self.samplerate_entry = tk.Entry(self.control_frame)
self.samplerate_entry.insert(0,'250000')
self.samplerate_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.samplerate_label.grid(row=1,column=4,sticky=tk.E+tk.W)
self.samplerate_entry.grid(row=1,column=5,sticky=tk.E+tk.W)
self.savegain_entry = tk.Entry(self.control_frame)
self.savegain_entry.insert(0,'1')
self.savegain_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.savegain_label.grid(row=0,column=4,sticky=tk.E+tk.W)
self.savegain_entry.grid(row=0,column=5,sticky=tk.E+tk.W)
self.plot_trace = tk.Button(self.control_frame, text='Update Trace', command=self.update_trace)
self.plot_trace.grid(row=2,column=0,columnspan=2,sticky=tk.E+tk.W)
self.normalize = tk.IntVar()
self.normalize.set(0)
self.normalize_check = tk.Checkbutton(self.control_frame, text='Normalize', variable = self.normalize)
self.normalize_check.grid(row=2,column=2,sticky=tk.E+tk.W)
self.plot_psd = tk.Button(self.control_frame, text='Update PSD', command=self.update_psd)
self.plot_psd.grid(row=2,column=3,sticky=tk.E+tk.W)
##### Feedback Widgets #####
self.feedback_frame = tk.LabelFrame(parent, text='Status')
self.feedback_frame.grid(row=2,column=6,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.export_psd = tk.Button(self.feedback_frame, text='Export PSD',command=self.export_psd)
self.export_psd.grid(row=1,column=0,columnspan=6,sticky=tk.E+tk.W)
self.export_trace = tk.Button(self.feedback_frame, text='Export Trace',command=self.export_trace)
self.export_trace.grid(row=2,column=0,columnspan=6,sticky=tk.E+tk.W)
self.load_memmap()
self.initialize_samplerate()
def export_psd(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\PSDs for Sam')
np.savetxt(data_path,np.c_[self.f, self.Pxx, self.rms],delimiter=',')
except AttributeError:
self.wildcard.set('Plot the PSD first')
def export_trace(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\Analysis\Pores\NPN\PSDs')
np.savetxt(data_path,self.plot_data,delimiter=',')
except AttributeError:
self.wildcard.set('Plot the trace first')
def load_mapped_data(self):
self.total_samples = len(self.map)
self.samplerate = int(self.samplerate_entry.get())
if self.start_entry.get()!='':
self.start_time = float(self.start_entry.get())
start_index = int((float(self.start_entry.get())*self.samplerate))
else:
self.start_time = 0
start_index = 0
if self.end_entry.get()!='':
self.end_time = float(self.end_entry.get())
end_index = int((float(self.end_entry.get())*self.samplerate))
if end_index > self.total_samples:
end_index = self.total_samples
self.data = self.map[start_index:end_index]
self.data = float(self.savegain_entry.get()) * self.data
def load_memmap(self):
columntypes = np.dtype([('current', '>i2'), ('voltage', '>i2')])
self.map = np.memmap(self.file_path, dtype=columntypes, mode='r')['current']
def integrate_noise(self, f, Pxx):
df = f[1]-f[0]
return np.sqrt(np.cumsum(Pxx * df))
def filter_data(self):
cutoff = float(self.cutoff_entry.get())
order = int(self.order_entry.get())
Wn = 2.0 * cutoff/float(self.samplerate)
b, a = bessel(order,Wn,'low')
padding = 1000
padded = np.pad(self.data, pad_width=padding, mode='median')
self.filtered_data = filtfilt(b, a, padded, padtype=None)[padding:-padding]
def initialize_samplerate(self):
self.samplerate = float(self.samplerate_entry.get())
##### Plot Updating functions #####
def update_trace(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
self.trace_fig.clf()
a = self.trace_fig.add_subplot(111)
time = np.linspace(1.0/self.samplerate,len(self.plot_data)/float(self.samplerate),len(self.plot_data))+self.start_time
a.set_xlabel(r'Time ($\mu s$)')
a.set_ylabel('Current (pA)')
self.trace_fig.subplots_adjust(bottom=0.14,left=0.21)
a.plot(time*1e6,self.plot_data,'.',markersize=1)
self.trace_canvas.show()
def update_psd(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
maxf = 2*float(self.cutoff_entry.get())
else:
maxf = 2*float(self.samplerate_entry.get())
length = np.minimum(2**18,len(self.filtered_data))
end_index = int(np.floor(len(self.filtered_data)/length)*length)
current = np.average(self.filtered_data[:end_index])
f, Pxx = welch(self.filtered_data, plot_samplerate,nperseg=length)
self.rms = self.integrate_noise(f, Pxx)
if self.normalize.get():
Pxx /= current**2
Pxx *= maxf/2.0
self.rms /= np.absolute(current)
self.f = f
self.Pxx = Pxx
minf = 1
BW_index = np.searchsorted(f, maxf/2)
logPxx = np.log10(Pxx[1:BW_index])
minP = 10**np.floor(np.amin(logPxx))
maxP = 10**np.ceil(np.amax(logPxx))
self.psd_fig.clf()
a = self.psd_fig.add_subplot(111)
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(r'Spectral Power ($\mathrm{pA}^2/\mathrm{Hz}$)')
a.set_xlim(minf, maxf)
a.set_ylim(minP, maxP)
self.psd_fig.subplots_adjust(bottom=0.14,left=0.21)
a.loglog(f[1:],Pxx[1:],'b-')
for tick in a.get_yticklabels():
tick.set_color('b')
a2 = a.twinx()
a2.semilogx(f, self.rms, 'r-')
a2.set_ylabel('RMS Noise (pA)')
a2.set_xlim(minf, maxf)
for tick in a2.get_yticklabels():
tick.set_color('r')
a2.format_coord = make_format(a2, a)
self.psd_canvas.show()
def main():
root=tk.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename(initialdir='C:/Data/')
App(root,file_path).grid(row=0,column=0)
root.mainloop()
if __name__=="__main__":
main()
| gpl-3.0 |
ual/urbansim | urbansim/utils/tests/test_misc.py | 5 | 3159 | import os
import shutil
import numpy as np
import pandas as pd
import pytest
from .. import misc
class _FakeTable(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
@pytest.fixture
def fta():
return _FakeTable('a', ['aa', 'ab', 'ac'])
@pytest.fixture
def ftb():
return _FakeTable('b', ['bx', 'by', 'bz'])
@pytest.fixture
def clean_fake_data_home(request):
def fin():
if os.path.isdir('fake_data_home'):
shutil.rmtree('fake_data_home')
request.addfinalizer(fin)
def test_column_map_raises(fta, ftb):
with pytest.raises(RuntimeError):
misc.column_map([fta, ftb], ['aa', 'by', 'bz', 'cw'])
def test_column_map_none(fta, ftb):
assert misc.column_map([fta, ftb], None) == {'a': None, 'b': None}
def test_column_map(fta, ftb):
assert misc.column_map([fta, ftb], ['aa', 'by', 'bz']) == \
{'a': ['aa'], 'b': ['by', 'bz']}
assert misc.column_map([fta, ftb], ['by', 'bz']) == \
{'a': [], 'b': ['by', 'bz']}
def test_dirs(clean_fake_data_home):
misc._mkifnotexists("fake_data_home")
os.environ["DATA_HOME"] = "fake_data_home"
misc.get_run_number()
misc.get_run_number()
misc.data_dir()
misc.configs_dir()
misc.models_dir()
misc.charts_dir()
misc.maps_dir()
misc.simulations_dir()
misc.reports_dir()
misc.runs_dir()
misc.config("test")
@pytest.fixture
def range_df():
df = pd.DataFrame({'to_zone_id': [2, 3, 4],
'from_zone_id': [1, 1, 1],
'distance': [.1, .2, .9]})
df = df.set_index(['from_zone_id', 'to_zone_id'])
return df
@pytest.fixture
def range_series():
return pd.Series([10, 150, 75, 275], index=[1, 2, 3, 4])
def test_compute_range(range_df, range_series):
assert misc.compute_range(range_df, range_series, "distance", .5).loc[1] == 225
def test_reindex():
s = pd.Series([.5, 1.0, 1.5], index=[2, 1, 3])
s2 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
assert list(misc.reindex(s, s2).values) == [1.0, .5, 1.5]
def test_naics():
assert misc.naicsname(54) == "Professional"
def test_signif():
assert misc.signif(4.0) == '***'
assert misc.signif(3.0) == '**'
assert misc.signif(2.0) == '*'
assert misc.signif(1.5) == '.'
assert misc.signif(1.0) == ''
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_cost': [1000000, 2000000, 3000000],
'parcel_size': [10000, 20000, 30000],
'max_far': [2.0, 3.0, 4.0],
'names': ['a', 'b', 'c'],
'max_height': [40, 60, 80]},
index=['a', 'b', 'c'])
def test_misc_dffunctions(simple_dev_inputs):
misc.df64bitto32bit(simple_dev_inputs)
misc.pandasdfsummarytojson(simple_dev_inputs[['land_cost', 'parcel_size']])
misc.numpymat2df(np.array([[1, 2], [3, 4]]))
def test_column_list(fta, ftb):
assert misc.column_list([fta, ftb], ['aa', 'by', 'bz', 'c']) == \
['aa', 'by', 'bz']
| bsd-3-clause |
tienjunhsu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
dsimic/taxsims | ss.py | 1 | 1112 | import pandas as pd
import numpy as np
def ss_calc(
contrib_yearly, inv_gwth_rt, num_years, safe_withdrw_rate, start_age=28
):
"""
inv_gwth_rt is infaltion adjusted.
contrib_yearly is in first years dollars
"""
tot_years = max(0, 62 - start_age - num_years) + num_years
df = pd.DataFrame({
'contrib_yearly': [contrib_yearly] * num_years + [0.] *
max(0, (62 - num_years - start_age)),
'inv_value': [0] * tot_years,
}, index=range(tot_years))
for year in range(0, tot_years):
print year
multiplier = np.array([
(1. + inv_gwth_rt) ** (year - y_) for y_ in range(year + 1)])
print multiplier
df['inv_value'][year] = np.sum(
np.array(df['contrib_yearly'][0: year + 1]) * multiplier)
df['monthly_inv_income'] = safe_withdrw_rate * df['inv_value'] / 12.
df['monthly_inv_income_w_spouse'] = df['monthly_inv_income'] * 1.5
return df
if __name__ == "__main__":
df = ss_calc(15e3, .03, 10, .03)
ss_benefit_monthly = 939.00
ss_benefit_w_spouse_monthly = 1.5 * ss_benefit_monthly
| gpl-2.0 |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_good_192matrix.py | 1 | 6357 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit |
mringel/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
yl565/statsmodels | statsmodels/stats/contingency_tables.py | 4 | 43623 | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of contingency tables.
Also contains functions for conducting Mcnemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from __future__ import division
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels import iolib
from statsmodels.tools.sm_exceptions import SingularMatrixWarning
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if table.shape[0] != table.shape[1]:
ix = list(set(table.index) | set(table.columns))
table = table.reindex(ix, axis=0)
table = table.reindex(ix, axis=1)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch object containing statsmodels results>"
class Table(object):
"""
Analyses that can be performed on a two-way contingency table.
Parameters
----------
table : array-like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array-like
The original table is cached as `table_orig`.
marginal_probabilities : tuple of two ndarrays
The estimated row and column marginal distributions.
independence_probabilities : ndarray
Estimated cell probabilities under row/column independence.
fittedvalues : ndarray
Fitted values under independence.
resid_pearson : ndarray
The Pearson residuals under row/column independence.
standardized_resids : ndarray
Residuals for the independent row/column model with approximate
unit variance.
chi2_contribs : ndarray
The contribution of each cell to the chi^2 statistic.
local_logodds_ratios : ndarray
The local log odds ratios are calculated for each 2x2 subtable
formed from adjacent rows and columns.
local_oddsratios : ndarray
The local odds ratios are calculated from each 2x2 subtable
formed from adjacent rows and columns.
cumulative_log_oddsratios : ndarray
The cumulative log odds ratio at a given pair of thresholds is
calculated by reducing the table to a 2x2 table based on
dichotomizing the rows and columns at the given thresholds.
The table of cumulative log odds ratios presents all possible
cumulative log odds ratios that can be formed from a given
table.
cumulative_oddsratios : ndarray
The cumulative odds ratios are calculated by reducing the
table to a 2x2 table based on cutting the rows and columns at
a given point. The table of cumulative odds ratios presents
all possible cumulative odds ratios that can be formed from a
given table.
See also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table = self.table + 0.5
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array-like
An array of numeric row scores
col_scores : array-like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
raise ValueError("The length of `row_scores` must match the first dimension of `table`.")
if len(col_scores) != self.table.shape[1]:
raise ValueError("The length of `col_scores` must match the second dimension of `table`.")
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
# docstring for cached attributes in init above
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
# docstring for cached attributes in init above
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
# docstring for cached attributes in init above
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
# docstring for cached attributes in init above
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array-like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table. Otherwise the table should be provided in
a square form, with the (implicit) row and column categories
appearing in the same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
import warnings
warnings.warn("Unable to invert covariance matrix",
SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : string
Used to format numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array-like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Attributes
----------
log_oddsratio : float
The log odds ratio of the table.
log_oddsratio_se : float
The asymptotic standard error of the estimated log odds ratio.
oddsratio : float
The odds ratio of the table.
riskratio : float
The ratio between the risk in the first row and the risk in
the second row. Column 0 is interpreted as containing the
number of occurences of the event of interest.
log_riskratio : float
The estimated log risk ratio for the table.
log_riskratio_se : float
The standard error of the estimated log risk ratio for the
table.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
# docstring for cached attributes in init above
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
# docstring for cached attributes in init above
return self.table[0, 0] * self.table[1, 1] / (self.table[0, 1] * self.table[1, 0])
@cache_readonly
def log_oddsratio_se(self):
# docstring for cached attributes in init above
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
# docstring for cached attributes in init above
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
# docstring for cached attributes in init above
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
# docstring for cached attributes in init above
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : string
Used to format the numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio", "Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se, lcb2, ucb2,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb2, ucb2, self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se, lcb4, ucb4,
self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Attributes
----------
logodds_pooled : float
An estimate of the pooled log odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all the tables.
log_oddsratio_se : float
The estimated standard error of the pooled log odds ratio,
following Robins, Breslow and Greenland (Biometrics
42:311-323).
oddsratio_pooled : float
An estimate of the pooled odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all tables.
risk_pooled : float
An estimate of the pooled risk ratio. This is an estimate of
a risk ratio that is common to all the tables.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = resettable_cache()
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` containing the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` containing the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index of name of `data` containing the variable
defining the strata.
data : array-like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=data.index, column=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
tables.append(tab)
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] - self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
# doc for cached attributes in init above
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
# doc for cached attributes in init above
return np.log(self.oddsratio_pooled)
@cache_readonly
def risk_pooled(self):
# doc for cached attributes in init above
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
# doc for cached attributes in init above
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) * self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = 1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) + 1 / (self._dma + e11)
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : string
Used for formatting numeric values in the summary.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.risk_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array-like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
# From the SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
#q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
| bsd-3-clause |
JamiiTech/mplh5canvas | examples/multi_plot.py | 4 | 1357 | #!/usr/bin/python
"""Testbed for the animation functionality of the backend, with multiple figures.
It basically produces an long series of frames that get animated on the client
browser side, this time with two figures.
"""
import matplotlib
matplotlib.use('module://mplh5canvas.backend_h5canvas')
from pylab import *
import time
def refresh_data(ax):
t = arange(0.0 + count, 2.0 + count, 0.01)
s = sin(2*pi*t)
ax.lines[0].set_xdata(t)
ax.lines[0].set_ydata(s)
ax.set_xlim(t[0],t[-1])
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
plot(t, s, linewidth=1.0)
xlabel('time (s)')
ylabel('voltage (mV)')
title('Frist Post')
f = gcf()
ax = f.gca()
count = 0
f2 = figure()
ax2 = f2.gca()
ax2.set_xlabel('IMDB rating')
ax2.set_ylabel('South African Connections')
ax2.set_title('Luds chart...')
ax2.plot(arange(0.0, 5 + count, 0.01), arange(0.0, 5 + count, 0.01))
show(block=False, layout=2)
# show the figure manager but don't block script execution so animation works..
# layout=2 overrides the default layout manager which only shows a single plot in the browser window
while True:
refresh_data(ax)
d = arange(0.0, 5 + count, 0.01)
ax2.lines[0].set_xdata(d)
ax2.lines[0].set_ydata(d)
ax2.set_xlim(d[0],d[-1])
ax2.set_ylim(d[0],d[-1])
f.canvas.draw()
f2.canvas.draw()
count += 0.01
time.sleep(1)
| bsd-3-clause |
aavanian/bokeh | bokeh/sampledata/tests/test_world_cities.py | 2 | 1963 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.world_cities as bsw
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.world_cities", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.world_cities as bsw
assert isinstance(bsw.data, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
xiawei0000/Kinectforactiondetect | ChalearnLAPSample.py | 1 | 41779 | # coding=gbk
#-------------------------------------------------------------------------------
# Name: Chalearn LAP sample
# Purpose: Provide easy access to Chalearn LAP challenge data samples
#
# Author: Xavier Baro
#
# Created: 21/01/2014
# Copyright: (c) Xavier Baro 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import zipfile
import shutil
import cv2
import numpy
import csv
from PIL import Image, ImageDraw
from scipy.misc import imresize
class Skeleton(object):
""" Class that represents the skeleton information """
"""¹Ç¼ÜÀ࣬ÊäÈë¹Ç¼ÜÊý¾Ý£¬½¨Á¢Àà"""
#define a class to encode skeleton data
def __init__(self,data):
""" Constructor. Reads skeleton information from given raw data """
# Create an object from raw data
self.joins=dict();
pos=0
self.joins['HipCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Spine']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Head']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
def getAllData(self):
""" Return a dictionary with all the information for each skeleton node """
return self.joins
def getWorldCoordinates(self):
""" Get World coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][0]
return skel
def getJoinOrientations(self):
""" Get orientations of all skeleton nodes """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][1]
return skel
def getPixelCoordinates(self):
""" Get Pixel coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][2]
return skel
def toImage(self,width,height,bgColor):
""" Create an image for the skeleton information """
SkeletonConnectionMap = (['HipCenter','Spine'],['Spine','ShoulderCenter'],['ShoulderCenter','Head'],['ShoulderCenter','ShoulderLeft'], \
['ShoulderLeft','ElbowLeft'],['ElbowLeft','WristLeft'],['WristLeft','HandLeft'],['ShoulderCenter','ShoulderRight'], \
['ShoulderRight','ElbowRight'],['ElbowRight','WristRight'],['WristRight','HandRight'],['HipCenter','HipRight'], \
['HipRight','KneeRight'],['KneeRight','AnkleRight'],['AnkleRight','FootRight'],['HipCenter','HipLeft'], \
['HipLeft','KneeLeft'],['KneeLeft','AnkleLeft'],['AnkleLeft','FootLeft'])
im = Image.new('RGB', (width, height), bgColor)
draw = ImageDraw.Draw(im)
for link in SkeletonConnectionMap:
p=self.getPixelCoordinates()[link[1]]
p.extend(self.getPixelCoordinates()[link[0]])
draw.line(p, fill=(255,0,0), width=5)
for node in self.getPixelCoordinates().keys():
p=self.getPixelCoordinates()[node]
r=5
draw.ellipse((p[0]-r,p[1]-r,p[0]+r,p[1]+r),fill=(0,0,255))
del draw
image = numpy.array(im)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
##ÊÖÊÆÊý¾ÝµÄÀ࣬ÊäÈë·¾¶£¬½¨Á¢ÊÖÊÆÊý¾ÝÀà
class GestureSample(object):
""" Class that allows to access all the information for a certain gesture database sample """
#define class to access gesture data samples
#³õʼ»¯£¬¶ÁÈ¡Îļþ
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=GestureSample('Sample0001.zip')
"""
# Check the given file
if not os.path.exists(fileName): #or not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
#ÅжÏÊÇzip»¹ÊÇĿ¼
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Open video access for Depth information
depthVideoPath=self.samplePath + os.path.sep + self.seqID + '_depth.mp4'
if not os.path.exists(depthVideoPath):
raise Exception("Invalid sample file. Depth data is not available")
self.depth = cv2.VideoCapture(depthVideoPath)
while not self.depth.isOpened():
self.depth = cv2.VideoCapture(depthVideoPath)
cv2.waitKey(500)
# Open video access for User segmentation information
userVideoPath=self.samplePath + os.path.sep + self.seqID + '_user.mp4'
if not os.path.exists(userVideoPath):
raise Exception("Invalid sample file. User segmentation data is not available")
self.user = cv2.VideoCapture(userVideoPath)
while not self.user.isOpened():
self.user = cv2.VideoCapture(userVideoPath)
cv2.waitKey(500)
# Read skeleton data
skeletonPath=self.samplePath + os.path.sep + self.seqID + '_skeleton.csv'
if not os.path.exists(skeletonPath):
raise Exception("Invalid sample file. Skeleton data is not available")
self.skeletons=[]
with open(skeletonPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.skeletons.append(Skeleton(row))
del filereader
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
self.data['fps']=int(row[1])
self.data['maxDepth']=int(row[2])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
if not os.path.exists(labelsPath):
#warnings.warn("Labels are not available", Warning)
self.labels=[]
else:
self.labels=[]
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
#Îö¹¹º¯Êý
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
del self.depth;
del self.user;
shutil.rmtree(self.samplePath)
#´ÓvideoÖжÁÈ¡Ò»Ö¡·µ»Ø
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
#ÏÂÃæµÄº¯Êý¶¼ÊÇÕë¶ÔÊý¾Ý³ÉÔ±£¬µÄÌض¨Ö¡²Ù×÷µÄ
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
#·µ»ØÉî¶Èͼ£¬Ê¹ÓÃ16int±£´æµÄ
def getDepth(self, frameNum):
""" Get the depth image for the given frame """
#get Depth frame
depthData=self.getFrame(self.depth,frameNum)
# Convert to grayscale
depthGray=cv2.cvtColor(depthData,cv2.cv.CV_RGB2GRAY)
# Convert to float point
depth=depthGray.astype(numpy.float32)
# Convert to depth values
depth=depth/255.0*float(self.data['maxDepth'])
depth=depth.round()
depth=depth.astype(numpy.uint16)
return depth
def getUser(self, frameNum):
""" Get user segmentation image for the given frame """
#get user segmentation frame
return self.getFrame(self.user,frameNum)
def getSkeleton(self, frameNum):
""" Get the skeleton information for a given frame. It returns a Skeleton object """
#get user skeleton for a given frame
# Check frame number
# Get total number of frames
numFrames = len(self.skeletons)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
return self.skeletons[frameNum-1]
def getSkeletonImage(self, frameNum):
""" Create an image with the skeleton image for a given frame """
return self.getSkeleton(frameNum).toImage(640,480,(255,255,255))
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
#½«ËùÓеÄÒ»Ö¡Êý¾Ý ´ò°üµ½Ò»¸ö´óµÄ¾ØÕóÀï
def getComposedFrame(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
skel=self.getSkeletonImage(frameNum)
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize1=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
compSize2=(max(user.shape[0],skel.shape[0]),user.shape[1]+skel.shape[1])
comp = numpy.zeros((compSize1[0]+ compSize2[0],max(compSize1[1],compSize2[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]=depth
comp[compSize1[0]:compSize1[0]+user.shape[0],:user.shape[1],:]=user
comp[compSize1[0]:compSize1[0]+skel.shape[0],user.shape[1]:user.shape[1]+skel.shape[1],:]=skel
return comp
def getComposedFrameOverlapUser(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getComposedFrame_480(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
rgb = rgb[topCut:-topCut,botCut:-botCut,:]
rgb = imresize(rgb, ratio, interp='bilinear')
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getDepth3DCNN(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
return mask * depth
def getDepthOverlapUser(self, frameNum, x_centre, y_centre, pixel_value, extractedFrameSize=224, upshift = 0):
""" Get a composition of all the modalities for a given frame """
halfFrameSize = extractedFrameSize/2
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
ratio = pixel_value/ 3000
# Build depth image
# get sample modalities
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
mask = imresize(mask, ratio, interp='nearest')
depth = imresize(depth, ratio, interp='bilinear')
depth_temp = depth * mask
depth_extracted = depth_temp[x_centre-halfFrameSize-upshift:x_centre+halfFrameSize-upshift, y_centre-halfFrameSize: y_centre+halfFrameSize]
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
depth_extracted = depth_extracted.round()
depth_extracted = depth_extracted.astype(numpy.uint8)
depth_extracted = cv2.applyColorMap(depth_extracted,cv2.COLORMAP_JET)
# Build final image
compSize=(depth.shape[0],depth.shape[1])
comp = numpy.zeros((compSize[0] + extractedFrameSize,compSize[1]+compSize[1],3), numpy.uint8)
# Create composition
comp[:depth.shape[0],:depth.shape[1],:]=depth
mask_new = numpy.tile(mask, (3,1,1))
mask_new = mask_new.transpose((1,2,0))
comp[:depth.shape[0],depth.shape[1]:depth.shape[1]+depth.shape[1],:]= mask_new * depth
comp[compSize[0]:,:extractedFrameSize,:]= depth_extracted
return comp
def getDepthCentroid(self, startFrame, endFrame):
""" Get a composition of all the modalities for a given frame """
x_centre = []
y_centre = []
pixel_value = []
for frameNum in range(startFrame, endFrame):
user=self.getUser(frameNum)
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
#depth = depth*255.0/float(self.data['maxDepth'])
mask = numpy.mean(user, axis=2) > 150
width, height = mask.shape
XX, YY, count, pixel_sum = 0, 0, 0, 0
for x in range(width):
for y in range(height):
if mask[x, y]:
XX += x
YY += y
count += 1
pixel_sum += depth[x, y]
if count>0:
x_centre.append(XX/count)
y_centre.append(YY/count)
pixel_value.append(pixel_sum/count)
return [numpy.mean(x_centre), numpy.mean(y_centre), numpy.mean(pixel_value)]
def getGestures(self):
""" Get the list of gesture for this sample. Each row is a gesture, with the format (gestureID,startFrame,endFrame) """
return self.labels
def getGestureName(self,gestureID):
""" Get the gesture label from a given gesture ID """
names=('vattene','vieniqui','perfetto','furbo','cheduepalle','chevuoi','daccordo','seipazzo', \
'combinato','freganiente','ok','cosatifarei','basta','prendere','noncenepiu','fame','tantotempo', \
'buonissimo','messidaccordo','sonostufo')
# Check the given file
if gestureID<1 or gestureID>20:
raise Exception("Invalid gesture ID <" + str(gestureID) + ">. Valid IDs are values between 1 and 20")
return names[gestureID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def play_video(self):
"""
play the video, Wudi adds this
"""
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while (self.rgb.isOpened()):
ret, frame = self.rgb.read()
cv2.imshow('frame',frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
self.rgb.release()
cv2.destroyAllWindows()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
def get_shift_scale(self, template, ref_depth, start_frame=10, end_frame=20, debug_show=False):
"""
Wudi add this method for extracting normalizing depth wrt Sample0003
"""
from skimage.feature import match_template
Feature_all = numpy.zeros(shape=(480, 640, end_frame-start_frame), dtype=numpy.uint16 )
count = 0
for frame_num in range(start_frame,end_frame):
depth_original = self.getDepth(frame_num)
mask = numpy.mean(self.getUser(frame_num), axis=2) > 150
Feature_all[:, :, count] = depth_original * mask
count += 1
depth_image = Feature_all.mean(axis = 2)
depth_image_normalized = depth_image * 1.0 / float(self.data['maxDepth'])
depth_image_normalized /= depth_image_normalized.max()
result = match_template(depth_image_normalized, template, pad_input=True)
#############plot
x, y = numpy.unravel_index(numpy.argmax(result), result.shape)
shift = [depth_image.shape[0]/2-x, depth_image.shape[1]/2-y]
subsize = 25 # we use 25 by 25 region as a measurement for median of distance
minX = max(x - subsize,0)
minY = max(y - subsize,0)
maxX = min(x + subsize,depth_image.shape[0])
maxY = min(y + subsize,depth_image.shape[1])
subregion = depth_image[minX:maxX, minY:maxY]
distance = numpy.median(subregion[subregion>0])
scaling = distance*1.0 / ref_depth
from matplotlib import pyplot as plt
print "[x, y, shift, distance, scaling]"
print str([x, y, shift, distance, scaling])
if debug_show:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(8, 4))
ax1.imshow(template)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(depth_image_normalized)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = template.shape
rect = plt.Rectangle((y-hcoin/2, x-wcoin/2), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
import cv2
from scipy.misc import imresize
rows,cols = depth_image_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_image_normalized, M, (cols, rows))
resize_image = imresize(affine_image, scaling)
resize_image_median = cv2.medianBlur(resize_image,5)
ax3.imshow(resize_image_median)
ax3.set_axis_off()
ax3.set_title('image_transformed')
# highlight matched region
hcoin, wcoin = resize_image_median.shape
rect = plt.Rectangle((wcoin/2-160, hcoin/2-160), 320, 320, edgecolor='r', facecolor='none')
ax3.add_patch(rect)
ax4.imshow(result)
ax4.set_axis_off()
ax4.set_title('`match_template`\nresult')
# highlight matched region
ax4.autoscale(False)
ax4.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
return [shift, scaling]
def get_shift_scale_depth(self, shift, scale, framenumber, IM_SZ, show_flag=False):
"""
Wudi added this method to extract segmented depth frame,
by a shift and scale
"""
depth_original = self.getDepth(framenumber)
mask = numpy.mean(self.getUser(framenumber), axis=2) > 150
resize_final_out = numpy.zeros((IM_SZ,IM_SZ))
if mask.sum() < 1000: # Kinect detect nothing
print "skip "+ str(framenumber)
flag = False
else:
flag = True
depth_user = depth_original * mask
depth_user_normalized = depth_user * 1.0 / float(self.data['maxDepth'])
depth_user_normalized = depth_user_normalized *255 /depth_user_normalized.max()
rows,cols = depth_user_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_user_normalized, M,(cols, rows))
resize_image = imresize(affine_image, scale)
resize_image_median = cv2.medianBlur(resize_image,5)
rows, cols = resize_image_median.shape
image_crop = resize_image_median[rows/2-160:rows/2+160, cols/2-160:cols/2+160]
resize_final_out = imresize(image_crop, (IM_SZ,IM_SZ))
if show_flag: # show the segmented images here
cv2.imshow('image',image_crop)
cv2.waitKey(10)
return [resize_final_out, flag]
#¶¯×÷Êý¾ÝÀà
class ActionSample(object):
""" Class that allows to access all the information for a certain action database sample """
#define class to access actions data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=ActionSample('Sec01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
self.labels=[]
if not os.path.exists(labelsPath):
warnings.warn("Labels are not available", Warning)
else:
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
shutil.rmtree(self.samplePath)
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
def getActions(self):
""" Get the list of gesture for this sample. Each row is an action, with the format (actionID,startFrame,endFrame) """
return self.labels
def getActionsName(self,actionID):
""" Get the action label from a given action ID """
names=('wave','point','clap','crouch','jump','walk','run','shake hands', \
'hug','kiss','fight')
# Check the given file
if actionID<1 or actionID>11:
raise Exception("Invalid action ID <" + str(actionID) + ">. Valid IDs are values between 1 and 11")
return names[actionID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
#×Ë̬Êý¾ÝÀà
class PoseSample(object):
""" Class that allows to access all the information for a certain pose database sample """
#define class to access gesture data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=PoseSample('Seq01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sequence path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath):
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Set path for rgb images
rgbPath=self.samplePath + os.path.sep + 'imagesjpg'+ os.path.sep
if not os.path.exists(rgbPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgbpath = rgbPath
# Set path for gt images
gtPath=self.samplePath + os.path.sep + 'maskspng'+ os.path.sep
if not os.path.exists(gtPath):
self.gtpath= "empty"
else:
self.gtpath = gtPath
frames=os.listdir(self.rgbpath)
self.numberFrames=len(frames)
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
shutil.rmtree(self.samplePath)
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
framepath=self.rgbpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'.jpg'
if not os.path.isfile(framepath):
raise Exception("RGB file does not exist: " + framepath)
return cv2.imread(framepath)
def getNumFrames(self):
return self.numberFrames
def getLimb(self, frameNum, actorID,limbID):
""" Get the BW limb image for a certain frame and a certain limbID """
if self.gtpath == "empty":
raise Exception("Limb labels are not available for this sequence. This sequence belong to the validation set.")
else:
limbpath=self.gtpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'_'+str(actorID)+'_'+str(limbID)+'.png'
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
if actorID<1 or actorID>2:
raise Exception("Invalid actor ID <" + str(actorID) + ">. Valid frames are values between 1 and 2 ")
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid frames are values between 1 and 14")
return cv2.imread(limbpath,cv2.CV_LOAD_IMAGE_GRAYSCALE)
def getLimbsName(self,limbID):
""" Get the limb label from a given limb ID """
names=('head','torso','lhand','rhand','lforearm','rforearm','larm','rarm', \
'lfoot','rfoot','lleg','rleg','lthigh','rthigh')
# Check the given file
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid IDs are values between 1 and 14")
return names[limbID-1]
def overlap_images(self, gtimage, predimage):
""" this function computes the hit measure of overlap between two binary images im1 and im2 """
[ret, im1] = cv2.threshold(gtimage, 127, 255, cv2.THRESH_BINARY)
[ret, im2] = cv2.threshold(predimage, 127, 255, cv2.THRESH_BINARY)
intersec = cv2.bitwise_and(im1, im2)
intersec_val = float(numpy.sum(intersec))
union = cv2.bitwise_or(im1, im2)
union_val = float(numpy.sum(union))
if union_val == 0:
return 0
else:
if float(intersec_val / union_val)>0.5:
return 1
else:
return 0
def exportPredictions(self, prediction,frame,actor,limb,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
prediction_filename = predPath+os.path.sep+ self.seqID[3:5] +'_'+ '%04d' %frame +'_'+str(actor)+'_'+str(limb)+'_prediction.png'
cv2.imwrite(prediction_filename,prediction)
def evaluate(self, predpath):
""" Evaluate this sample agains the ground truth file """
# Get the list of videos from ground truth
gt_list = os.listdir(self.gtpath)
# For each sample on the GT, search the given prediction
score = 0.0
nevals = 0
for gtlimbimage in gt_list:
# Avoid double check, use only labels file
if not gtlimbimage.lower().endswith(".png"):
continue
# Build paths for prediction and ground truth files
aux = gtlimbimage.split('.')
parts = aux[0].split('_')
seqID = parts[0]
gtlimbimagepath = os.path.join(self.gtpath,gtlimbimage)
predlimbimagepath= os.path.join(predpath) + os.path.sep + seqID+'_'+parts[1]+'_'+parts[2]+'_'+parts[3]+"_prediction.png"
#check predfile exists
if not os.path.exists(predlimbimagepath) or not os.path.isfile(predlimbimagepath):
raise Exception("Invalid video limb prediction file. Not all limb predictions are available")
#Load images
gtimage=cv2.imread(gtlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
predimage=cv2.imread(predlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
if cv2.cv.CountNonZero(cv2.cv.fromarray(gtimage)) >= 1:
score += self.overlap_images(gtimage, predimage)
nevals += 1
#release videos and return mean overlap
return score/nevals
| mit |
heli522/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
burjorjee/evolve-parities | evolveparities.py | 1 | 5098 | from contextlib import closing
from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title
from numpy import sort, logical_xor, transpose, logical_not
from numpy.numarray.functions import cumsum, zeros
from numpy.random import rand, shuffle
from numpy import mod, floor
import time
import cloud
from durus.file_storage import FileStorage
from durus.connection import Connection
def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen):
f = figure(1)
n = len(bitFreqs)
hold(False)
plot(range(n), bitFreqs,'b.', markersize=10)
hold(True)
plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10)
axis([0, n-1, 0, 1])
title("Generation = %s" % (gen,))
ylabel('Frequency of the Bit 1')
xlabel('Locus')
f.canvas.draw()
f.show()
def showExperimentTimeStamps():
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
return conn.get_root().keys()
def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None):
""" neap = "noisy effective attribute parity"
"""
pop = rand(m,n)<0.5
bitFreqHist= zeros((n,gens+1))
for t in range(gens+1):
print "Generation %s" % t
bitFreqs = pop.astype('float').sum(axis=0)/m
bitFreqHist[:,t] = transpose(bitFreqs)
if bitFreqVisualizer:
bitFreqVisualizer(bitFreqs,t)
fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) +
(rand(m) < probMisclassification).astype('byte'),2)
totalFitness = sum (fitnessVals)
cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness
parentIndices = zeros(2*m, dtype='int16')
markers = sort(rand(2*m))
ctr = 0
for idx in xrange(2*m):
while markers[idx]>cumNormFitnessVals[ctr]:
ctr += 1
parentIndices[idx] = ctr
shuffle(parentIndices)
crossoverMasks = rand(m, n) < 0.5
newPop = zeros((m, n), dtype='bool')
newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks]
newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)]
mutationMasks = rand(m, n)<probMutation
pop = logical_xor(newPop,mutationMasks)
return bitFreqHist[0, :], bitFreqHist[-1, :]
def f(gens):
k = 7
n= k + 1
effectiveAttrIndices = range(k)
probMutation = 0.004
probMisclassification = 0.20
popSize = 1500
jid = cloud.call(neap_uga, **dict(m=popSize,
n=n,
gens=gens,
probMutation=probMutation,
effectiveAttrIndices=effectiveAttrIndices,
probMisclassification=probMisclassification))
print "Kicked off trial %s" % jid
return jid
def cloud_result(jid):
result = cloud.result(jid)
print "Retrieved results for trial %s" % jid
return result
def run_trials():
numTrials = 3000
gens = 1000
from multiprocessing.pool import ThreadPool as Pool
pool = Pool(50)
jids = pool.map(f,[gens]*numTrials)
print "Done spawning trials. Retrieving results..."
results = pool.map(cloud_result, jids)
firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
print "Done retrieving results. Press Enter to serialize..."
raw_input()
for i, result in enumerate(results):
firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
conn.commit()
pool.close()
pool.join()
def render_results(timestamp=None):
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
db = conn.get_root()
if not timestamp:
timestamp = sorted(db.keys())[-1]
firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp]
print "Done deserializing results. Plotting..."
x = [(2, 'First', firstLocusFreqsHists, "effective"),
(3, 'Last', lastLocusFreqsHists, "non-effective")]
for i, pos, freqsHists, filename in x :
freqsHists = freqsHists[:,:801]
f = figure(i)
hold(False)
plot(transpose(freqsHists), color='grey')
hold(True)
maxGens = freqsHists.shape[1]-1
plot([0, maxGens], [.05,.05], 'k--')
plot([0, maxGens], [.95,.95], 'k--')
axis([0, maxGens, 0, 1])
xlabel('Generation')
ylabel('1-Frequency of the '+pos+' Locus')
f.canvas.draw()
f.show()
savefig(filename+'.png', format='png', dpi=200)
if __name__ == "__main__":
cloud.start_simulator()
run_trials()
render_results()
print "Done plotting results. Press Enter to end..."
raw_input()
| gpl-3.0 |
matthiasrichter/AliceO2 | Analysis/Scripts/update_ccdb.py | 3 | 6042 | #!/usr/bin/env python3
# Copyright 2019-2020 CERN and copyright holders of ALICE O2.
# See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
# All rights not expressly granted are reserved.
#
# This software is distributed under the terms of the GNU General Public
# License v3 (GPL Version 3), copied verbatim in the file "COPYING".
#
# In applying this license CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""
Script to update the CCDB with timestamp non-overlapping objects.
If an object is found in the range specified, the object is split into two.
If the requested range was overlapping three objects are uploaded on CCDB:
1) latest object with requested timestamp validity
2) old object with validity [old_lower_validity-requested_lower_bound]
3) old object with validity [requested_upper_bound, old_upper_validity]
Author: Nicolo' Jacazio on 2020-06-22
TODO add support for 3 files update
"""
import subprocess
from datetime import datetime
import matplotlib.pyplot as plt
import argparse
def convert_timestamp(ts):
"""
Converts the timestamp in milliseconds in human readable format
"""
return datetime.utcfromtimestamp(ts/1000).strftime('%Y-%m-%d %H:%M:%S')
def get_ccdb_obj(path, timestamp, dest="/tmp/", verbose=0):
"""
Gets the ccdb object from 'path' and 'timestamp' and downloads it into 'dest'
"""
if verbose:
print("Getting obj", path, "with timestamp",
timestamp, convert_timestamp(timestamp))
cmd = f"o2-ccdb-downloadccdbfile --path {path} --dest {dest} --timestamp {timestamp}"
subprocess.run(cmd.split())
def get_ccdb_obj_validity(path, dest="/tmp/", verbose=0):
"""
Gets the timestamp validity for an object downloaded from CCDB.
Returns a list with the initial and end timestamps.
"""
cmd = f"o2-ccdb-inspectccdbfile {dest}{path}/snapshot.root"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode("utf-8").split("\n")
error = error.decode("utf-8").split("\n") if error is not None else error
if verbose:
print("out:")
print(*output, "\n")
print("err:")
print(error)
result = list(filter(lambda x: x.startswith('Valid-'), output))
ValidFrom = result[0].split()
ValidUntil = result[1].split()
return [int(ValidFrom[-1]), int(ValidUntil[-1])]
def upload_ccdb_obj(path, timestamp_from, timestamp_until, dest="/tmp/", meta=""):
"""
Uploads a new object to CCDB in the 'path' using the validity timestamp specified
"""
print("Uploading obj", path, "with timestamp", [timestamp_from, timestamp_until],
convert_timestamp(timestamp_from), convert_timestamp(timestamp_until))
key = path.split("/")[-1]
cmd = f"o2-ccdb-upload -f {dest}{path}/snapshot.root "
cmd += f"--key {key} --path {path} "
cmd += f"--starttimestamp {timestamp_from} --endtimestamp {timestamp_until} --meta \"{meta}\""
subprocess.run(cmd.split())
def main(path, timestamp_from, timestamp_until, verbose=0, show=False):
"""
Used to upload a new object to CCDB in 'path' valid from 'timestamp_from' to 'timestamp_until'
Gets the object from CCDB specified in 'path' and for 'timestamp_from-1'
Gets the object from CCDB specified in 'path' and for 'timestamp_until+1'
If required plots the situation before and after the update
"""
get_ccdb_obj(path, timestamp_from-1)
val_before = get_ccdb_obj_validity(path, verbose=verbose)
get_ccdb_obj(path, timestamp_until+1)
val_after = get_ccdb_obj_validity(path, verbose=verbose)
overlap_before = val_before[1] > timestamp_from
overlap_after = val_after[0] < timestamp_until
if verbose:
if overlap_before:
print("Previous objects overalps")
if overlap_after:
print("Next objects overalps")
trimmed_before = val_before if not overlap_before else [
val_before[0], timestamp_from - 1]
trimmed_after = val_after if not overlap_after else [
timestamp_until+1, val_after[1]]
if show:
fig, ax = plt.subplots()
fig
def bef_af(v, y):
return [v[0] - 1] + v + [v[1] + 1], [0, y, y, 0]
if True:
ax.plot(*bef_af(val_before, 0.95), label='before')
ax.plot(*bef_af(val_after, 1.05), label='after')
if False:
ax.plot(*bef_af(trimmed_before, 0.9), label='trimmed before')
ax.plot(*bef_af(trimmed_after, 1.1), label='trimmed after')
ax.plot(*bef_af([timestamp_from, timestamp_until], 1), label='object')
xlim = 10000000
plt.xlim([timestamp_from-xlim, timestamp_until+xlim])
plt.ylim(0, 2)
plt.xlabel('Timestamp')
plt.ylabel('Validity')
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Uploads timestamp non overlapping objects to CCDB."
"Basic example: `./update_ccdb.py qc/TOF/TOFTaskCompressed/hDiagnostic 1588956517161 1588986517161 --show --verbose`")
parser.add_argument('path', metavar='path_to_object', type=str,
help='Path of the object in the CCDB repository')
parser.add_argument('timestamp_from', metavar='from_timestamp', type=int,
help='Timestamp of start for the new object to use')
parser.add_argument('timestamp_until', metavar='until_timestamp', type=int,
help='Timestamp of stop for the new object to use')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--show', '-s', action='count', default=0)
args = parser.parse_args()
main(path=args.path,
timestamp_from=args.timestamp_from,
timestamp_until=args.timestamp_until,
verbose=args.verbose,
show=args.show)
| gpl-3.0 |
annahs/atmos_research | WHI_long_term_2min_data_to_db.py | 1 | 8596 | import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
start = datetime(2009,7,15,4) #2009 - 20090628 2010 - 20100610 2012 - 20100405
end = datetime(2009,8,17) #2009 - 20090816 2010 - 20100726 2012 - 20100601
timestep = 6.#1./30 #hours
sample_min = 117 #117 for all 2009-2012
sample_max = 123 #123 for all 2009-2012
yag_min = 3.8 #3.8 for all 2009-2012
yag_max = 6 #6 for all 2009-2012
BC_VED_min = 70
BC_VED_max = 220
min_scat_pkht = 20
mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
lag_threshold_2009 = 0.1
lag_threshold_2010 = 0.25
lag_threshold_2012 = 1.5
print 'mass limits', mass_min, mass_max
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def check_spike_times(particle_start_time,particle_end_time):
cursor.execute('''SELECT count(*)
FROM whi_spike_times_2009to2012
WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s)
OR (spike_start_UTC <= %s AND spike_end_UTC > %s)
''',
(particle_start_time,particle_start_time,particle_end_time,particle_end_time))
spike_count = cursor.fetchall()[0][0]
return spike_count
def get_hysplit_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_hysplit_hourly_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
hy_id_list = cursor.fetchall()
if hy_id_list == []:
hy_id = None
else:
hy_id = hy_id_list[0][0]
return hy_id
def get_met_info(particle_start_time):
cursor.execute('''SELECT id,pressure_Pa,room_temp_C
FROM whi_sampling_conditions
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
met_list = cursor.fetchall()
if met_list == []:
met_list = [[np.nan,np.nan,np.nan]]
return met_list[0]
def get_gc_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_gc_hourly_bc_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
gc_id_list = cursor.fetchall()
if gc_id_list == []:
gc_id = None
else:
gc_id = gc_id_list[0][0]
return gc_id
def get_sample_factor(UNIX_start):
date_time = datetime.utcfromtimestamp(UNIX_start)
sample_factors_2012 = [
[datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9), 3.0],
[datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25), 1.0],
[datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13), 3.0],
[datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0],
]
if date_time.year in [2009,2010]:
sample_factor = 1.0
if date_time.year == 2012:
for date_range in sample_factors_2012:
start_date = date_range[0]
end_date = date_range[1]
range_sample_factor = date_range[2]
if start_date<= date_time < end_date:
sample_factor = range_sample_factor
return sample_factor
def lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos):
long_lags = 0
short_lags = 0
lag_time = np.nan
if (-10 < lag_time < 10):
lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2 #us
if start_dt.year == 2009 and lag_time > lag_threshold_2009:
long_lags = 1
elif start_dt.year == 2010 and lag_time > lag_threshold_2010:
long_lags = 1
elif start_dt.year == 2012 and lag_time > lag_threshold_2012:
long_lags = 1
else:
short_lags = 1
return [lag_time,long_lags,short_lags]
#query to add 1h mass conc data
add_data = ('''INSERT INTO whi_sp2_2min_data
(UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass_conc,rBC_mass_conc_err,volume_air_sampled,sampling_duration,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id)
VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass_conc)s,%(rBC_mass_conc_err)s,%(volume_air_sampled)s,%(sampling_duration)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)'''
)
#
multiple_records = []
i=1
while start <= end:
long_lags = 0
short_lags = 0
if (4 <= start.hour < 16):
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = UNIX_start + timestep*3600.0
print start, UNIX_start+60
print datetime.utcfromtimestamp(UNIX_end)
#filter on hk data here
cursor.execute('''(SELECT
mn.UNIX_UTC_ts_int_start,
mn.UNIX_UTC_ts_int_end,
mn.rBC_mass_fg_BBHG,
mn.rBC_mass_fg_BBHG_err,
mn.BB_incand_pk_pos,
mn.BB_scat_pk_pos,
mn.BB_scat_pkht,
hk.sample_flow,
mn.BB_incand_HG
FROM whi_sp2_particle_data mn
FORCE INDEX (hourly_binning)
JOIN whi_hk_data hk on mn.HK_id = hk.id
WHERE
mn.UNIX_UTC_ts_int_start >= %s
AND mn.UNIX_UTC_ts_int_end < %s
AND hk.sample_flow >= %s
AND hk.sample_flow < %s
AND hk.yag_power >= %s
AND hk.yag_power < %s)''',
(UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max))
ind_data = cursor.fetchall()
data={
'rBC_mass_fg':[],
'rBC_mass_fg_err':[],
'lag_time':[]
}
total_sample_vol = 0
for row in ind_data:
ind_start_time = float(row[0])
ind_end_time = float(row[1])
bbhg_mass_corr11 = float(row[2])
bbhg_mass_corr_err = float(row[3])
BB_incand_pk_pos = float(row[4])
BB_scat_pk_pos = float(row[5])
BB_scat_pk_ht = float(row[6])
sample_flow = float(row[7]) #in vccm
incand_pkht = float(row[8])
#filter spike times here
if check_spike_times(ind_start_time,ind_end_time):
print 'spike'
continue
#skip the long interval
if (ind_end_time - ind_start_time) > 540:
print 'long interval'
continue
#skip if no sample flow
if sample_flow == None:
print 'no flow'
continue
#get sampling conditions id and met conditions
met_data = get_met_info(UNIX_start)
met_id = met_data[0]
pressure = met_data[1]
temperature = met_data[2]+273.15
correction_factor_for_STP = (273*pressure)/(101325*temperature)
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60)*correction_factor_for_STP #/60 b/c sccm and time in secs
total_sample_vol = total_sample_vol + sample_vol
bbhg_mass_corr = 0.01244+0.0172*incand_pkht
if (mass_min <= bbhg_mass_corr < mass_max):
#get sample factor
sample_factor = get_sample_factor(UNIX_start)
data['rBC_mass_fg'].append(bbhg_mass_corr*sample_factor)
data['rBC_mass_fg_err'].append(bbhg_mass_corr_err)
#only calc lag time if there is a scattering signal
if BB_scat_pk_ht > min_scat_pkht:
lags = lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos)
data['lag_time'].append(lags[0])
long_lags += lags[1]
short_lags += lags[2]
tot_rBC_mass_fg = sum(data['rBC_mass_fg'])
tot_rBC_mass_uncer = sum(data['rBC_mass_fg_err'])
rBC_number = len(data['rBC_mass_fg'])
mean_lag = float(np.mean(data['lag_time']))
if np.isnan(mean_lag):
mean_lag = None
#get hysplit_id
hysplit_id = None #get_hysplit_id(UNIX_start)
#get GC id
gc_id = None #get_gc_id(UNIX_start)
if total_sample_vol != 0:
mass_conc = (tot_rBC_mass_fg/total_sample_vol)
mass_conc_uncer = (tot_rBC_mass_uncer/total_sample_vol)
#add to db
single_record = {
'UNIX_UTC_start_time' :UNIX_start,
'UNIX_UTC_end_time' :UNIX_end,
'number_particles' :rBC_number,
'rBC_mass_conc' :mass_conc,
'rBC_mass_conc_err' :mass_conc_uncer,
'volume_air_sampled' :total_sample_vol,
'sampling_duration' :(total_sample_vol/2),
'mean_lag_time' :mean_lag,
'number_long_lag' :long_lags,
'number_short_lag' :short_lags,
'sample_factor' :sample_factor,
'hysplit_hourly_id' :hysplit_id,
'whi_sampling_cond_id' :met_id,
'gc_hourly_id' :gc_id,
}
multiple_records.append((single_record))
#bulk insert to db table
if i%1 == 0:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
#increment count
i+= 1
start += timedelta(hours = timestep)
#bulk insert of remaining records to db
if multiple_records != []:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
cnx.close()
| mit |
inviwo/inviwo | data/scripts/matplotlib_create_transferfunction.py | 2 | 1270 | # Inviwo Python script
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import inviwopy
from inviwopy.glm import vec2,vec3,vec4
#http://matplotlib.org/examples/color/colormaps_reference.html
#Perceptually Uniform Sequential : #['viridis', 'inferno', 'plasma', 'magma']
#Sequential : #['Blues', 'BuGn', 'BuPu','GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu','Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
#Diverging : #['afmhot', 'autumn', 'bone', 'cool','copper', 'gist_heat', 'gray', 'hot','pink', 'spring', 'summer', 'winter']
#Qualitative : #['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic']
#Miscellaneous : #['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3']
#Sequential : #['gist_earth', 'terrain', 'ocean', 'gist_stern','brg', 'CMRmap', 'cubehelix','gnuplot', 'gnuplot2', 'gist_ncar', 'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
tf = inviwopy.app.network.VolumeRaycaster.transferFunction
tf.clear()
cmapName = "viridis"
cmap=plt.get_cmap(cmapName)
N = 128
for i in range(0,N,1):
x = i / (N-1)
a = 1.0
color = cmap(x)
tf.add(x, vec4(color[0],color[1],color[2], a))
| bsd-2-clause |
xyguo/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
acimmarusti/isl_exercises | chap3/chap3ex8.py | 1 | 1315 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
#from sklearn.linear_model import LinearRegression
#import scipy, scipy.stats
#from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import variance_inflation_factor, summary_table
filename = '../Auto.csv'
data = pd.read_csv(filename, na_values='?').dropna()
#Quantitative and qualitative predictors#
print(data.dtypes)
#Simple linear regression#
slinreg = smf.ols('mpg ~ horsepower', data=data).fit()
print(slinreg.summary())
st, fitdat, ss2 = summary_table(slinreg, alpha=0.05)
fittedvalues = fitdat[:,2]
predict_mean_se = fitdat[:,3]
predict_mean_ci_low, predict_mean_ci_upp = fitdat[:,4:6].T
predict_ci_low, predict_ci_upp = fitdat[:,6:8].T
x = data['horsepower']
y = data['mpg']
#Residuals#
resd1 = y - fittedvalues
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y, 'o')
ax1.plot(x, fittedvalues, 'g-')
ax1.plot(x, predict_ci_low, 'r--')
ax1.plot(x, predict_ci_upp, 'r--')
ax1.plot(x, predict_mean_ci_low, 'b--')
ax1.plot(x, predict_mean_ci_upp, 'b--')
ax2.plot(resd1, fittedvalues, 'o')
plt.show()
| gpl-3.0 |
naturali/tensorflow | tensorflow/examples/skflow/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |