text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id="ref0"></a>
Step2: Use the following function to calculate accuracy
Step3: <a id="ref1"></a>
Step4: Define a function to train the model
Step5: <a id="ref2"></a>
Step6: Dataset object
Step7: <a id="ref3"></a>
Step8: <a id="ref3"></a>
Step9: <a id="ref4"></a>
| <ASSISTANT_TASK:>
Python Code:
!conda install -y torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions_2class(model,data_set):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00','#00AAFF'])
X=data_set.x.numpy()
y=data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min()-0.1 , X[:, 0].max()+0.1
y_min, y_max = X[:, 1].min()-0.1 , X[:, 1].max() +0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
XX=torch.torch.Tensor(np.c_[xx.ravel(), yy.ravel()])
yhat=np.logical_not((model(XX)[:,0]>0.5).numpy()).reshape(xx.shape)
plt.pcolormesh(xx, yy, yhat, cmap=cmap_light)
plt.plot(X[y[:,0]==0,0],X[y[:,0]==0,1],'o',label='y=0')
plt.plot(X[y[:,0]==1,0],X[y[:,0]==1,1],'ro',label='y=1')
plt.title("decision region")
plt.legend()
def accuracy(model,data_set):
return np.mean(data_set.y.view(-1).numpy()==(model(data_set.x)[:,0]>0.5).numpy())
class Net(nn.Module):
def __init__(self,D_in,H,D_out):
super(Net,self).__init__()
self.linear1=nn.Linear(D_in,H)
self.linear2=nn.Linear(H,D_out)
def forward(self,x):
x=torch.sigmoid(self.linear1(x))
x=torch.sigmoid(self.linear2(x))
return x
def train(data_set,model,criterion, train_loader, optimizer, epochs=5):
LOSS=[]
ACC=[]
for epoch in range(epochs):
for x,y in train_loader:
optimizer.zero_grad()
yhat=model(x)
loss=criterion(yhat,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
ACC.append(accuracy(model,data_set))
LOSS.append(loss)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(LOSS,color=color)
ax1.set_xlabel('epoch',color=color)
ax1.set_ylabel('total loss',color=color)
ax1.tick_params(axis='y', color=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1
ax2.plot( ACC, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
return LOSS
from torch.utils.data import Dataset, DataLoader
class XOR_Data(Dataset):
def __init__(self,N_s=100):
self.x=torch.zeros((N_s,2))
self.y=torch.zeros((N_s,1))
for i in range(N_s//4):
self.x[i,:]=torch.Tensor([0.0,0.0])
self.y[i,0]=torch.Tensor([0.0])
self.x[i+N_s//4,:]=torch.Tensor([0.0,1.0])
self.y[i+N_s//4,0]=torch.Tensor([1.0])
self.x[i+N_s//2,:]=torch.Tensor([1.0,0.0])
self.y[i+N_s//2,0]=torch.Tensor([1.0])
self.x[i+3*N_s//4,:]=torch.Tensor([1.0,1.0])
self.y[i+3*N_s//4,0]=torch.Tensor([0.0])
self.x=self.x+0.01*torch.randn((N_s,2))
self.len=N_s
#self.y=self.y.type(torch.LongTensor)
def __getitem__(self,index):
return self.x[index],self.y[index]
def __len__(self):
return self.len
def plot_stuff(self):
plt.plot(self.x[self.y[:,0]==0,0].numpy(),self.x[self.y[:,0]==0,1].numpy(),'o',label="y=0")
plt.plot(self.x[self.y[:,0]==1,0].numpy(),self.x[self.y[:,0]==1,1].numpy(),'ro',label="y=1")
plt.legend()
data_set=XOR_Data()
data_set.plot_stuff()
learning_rate=0.001
criterion=nn.BCELoss()
optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader=DataLoader(dataset=data_set,batch_size=1)
LOSS12=train(data_set,model,criterion, train_loader, optimizer, epochs=500)
plot_decision_regions_2class(model,data_set)
learning_rate=0.1
criterion=nn.BCELoss()
optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader=DataLoader(dataset=data_set,batch_size=1)
LOSS12=train(data_set,model,criterion, train_loader, optimizer, epochs=500)
plot_decision_regions_2class(model,data_set)
learning_rate=0.1
criterion=nn.BCELoss()
optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader=DataLoader(dataset=data_set,batch_size=1)
LOSS12=train(data_set,model,criterion, train_loader, optimizer, epochs=500)
plot_decision_regions_2class(model,data_set)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: original output
Step2: Recreate validation set and sample
Step3: Examine the driver_imgs_list.csv
| <ASSISTANT_TASK:>
Python Code:
#Create directories
%cd $DATA_HOME_DIR
# did this once
%mkdir valid
%mkdir results
%mkdir -p sample/train
%mkdir -p sample/test
%mkdir -p sample/valid
%mkdir -p sample/results
%mkdir -p test/unknown
# Create subdirectories
for c in categories:
%mkdir -p valid/{c}
%mkdir -p sample/train/{c}
%mkdir -p sample/valid/{c}
%cd $DATA_HOME_DIR/train
# how many images we talking about?
for c in categories:
g = glob(c+"/*.jpg")
print c, len(g)
# going to take 20% or ~460 in each category for validation
validation_ratio = 0.2
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
num_valid = int(validation_ratio*len(g))
print num_valid
for i in range(num_valid):
#print shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i]
os.rename(shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i])
# now create the sample train subset of 20 per category
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
for i in range(20):
#print shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i]
copyfile(shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i])
%cd $DATA_HOME_DIR/valid
# now create the sample valid subset of 5 per category
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
for i in range(5):
#print shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i]
copyfile(shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i])
!ls {DATA_HOME_DIR}/train/* |wc -l
!ls {DATA_HOME_DIR}/valid/* |wc -l
!ls {DATA_HOME_DIR}/test/* |wc -l
!ls {DATA_HOME_DIR}/sample/train/* |wc -l
!ls {DATA_HOME_DIR}/sample/valid/* |wc -l
# Create single 'unknown' class for test set
%cd $DATA_HOME_DIR/test
%mv *.jpg unknown
# and sample test, too
%cd $DATA_HOME_DIR/sample/test
%mkdir unknown
# sample the test set
%cd $DATA_HOME_DIR/test/unknown
g = glob("*.jpg")
shuf = np.random.permutation(g)
for i in range(100):
#print shuf[i], DATA_HOME_DIR+'/sample/test/unknown/'+shuf[i]
copyfile(shuf[i], DATA_HOME_DIR+'/sample/test/unknown/'+shuf[i])
# So, move validation data back.
%cd $DATA_HOME_DIR
for c in categories:
g = glob("valid/"+c+"/*.jpg")
for i in range(len(g)):
#print g[i], g[i].replace('valid','train')
os.rename(g[i], g[i].replace('valid','train'))
%cd $DATA_HOME_DIR
!ls sample/
#results test train valid
!rm -rf sample/results/*
!rm sample/test/unknown/*
!rm sample/train/c*/*jpg
!rm sample/valid/c*/*jpg
%cd $DATA_HOME_DIR
driver_imgs = pd.read_csv('driver_imgs_list.csv')
driver_imgs.head()
subjects = driver_imgs.subject.unique()
len(subjects), int(0.2*len(subjects))
subjects
# let's look at some subject examples
#images = []
def img_div(img,txt,idx):
W=220
H=200
N=4
float = idx%N < (N-1) # turn off on last one
fs = ""
if float:
fs = 'style="float: left;"'
s = "<div %s>"%(fs)
s += "<img width=%dpx height=%dpx src='%s'/>%s"%(W,H,img,txt)
s += "</div>"
return s
def show_subjects(subj):
html = ""
for j,s in enumerate(subj):
i = driver_imgs[driver_imgs.subject == s].index[0]
classname = driver_imgs.iloc[i].classname
img = driver_imgs.iloc[i].img
html += img_div("/files/kaggle/statefarm/data/train/"+classname+"/"+img, s, j)
display(HTML(html))
show_subjects(subjects)
# we should probably make sure validation & testing both have male/female representation
# males
males = ['p002', 'p012', 'p014', 'p015', 'p016', 'p021', 'p024', 'p026', 'p035', 'p039', 'p047', 'p051', 'p056', 'p075']
# females
females = ['p022', 'p041', 'p042', 'p045', 'p049', 'p050', 'p052', 'p061', 'p064', 'p066', 'p072', 'p081']
show_subjects(males)
show_subjects(females)
len(males), len(females), len(subjects)
0.2*len(males), 0.2*len(females)
# okay 3 males & 2 females in our validation set
# choosing p045, p049 females
set(males).intersection(set(females))
np.random.permutation(males)[:3]
# gave ['p035', 'p056', 'p075']
# okay this is the set I came up with. BUT, Jeremy says he only used 3
validation_subjects = ['p021', 'p056', 'p075'] + ['p045', 'p049']
# let's try 3, then.
validation_subjects = ['p021', 'p056'] + ['p045']
show_subjects(validation_subjects)
validation_df = driver_imgs[driver_imgs.subject.isin(validation_subjects)]
# move our validation images from train to valid
for i,x in validation_df.iterrows():
#if i < 10:
# print x.classname, x.img
fr = DATA_HOME_DIR+'/train/' + x.classname + '/' + x.img
to = DATA_HOME_DIR+'/valid/' + x.classname + '/' + x.img
#print fr, to
os.rename(fr,to)
%cd $DATA_HOME_DIR/valid
# how many images we talking about?
for c in categories:
g = glob(c+"/*.jpg")
print c, len(g)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot histogram of average ratings by revision
Step2: Selected harassing and aggressive revisions by quartile
Step3: Inter-Annotator Agreement
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('display.max_colwidth', 1000)
# Download data from google drive (Respect Eng / Wiki Collab): wikipdia data/v0_annotated
dat = pd.read_csv('../data/experiment-0.csv')
# Remove test questions
dat = dat[dat['_golden'] == False]
dat['num_aggressive_na'] = np.isnan(dat['how_aggressive_or_friendly'])
dat['num_attack_na'] = np.isnan(dat['is_harassment_or_attack'])
def create_column_of_counts_from_nums(df, col):
return df.apply(lambda x: int(col) == x)
aggressive_columns = ['-2', '-1', '0', '1', '2']
for col in aggressive_columns:
dat[col] = create_column_of_counts_from_nums(dat['how_aggressive_or_friendly'], col)
dat['not_attack_1'] = 1 - dat['is_harassment_or_attack']
dat['not_attack_0'] = dat['is_harassment_or_attack']
# Group the data
agg_dict = dict.fromkeys(aggressive_columns, 'sum')
agg_dict.update({'clean_diff': 'first', 'num_aggressive_na': 'sum', 'num_attack_na': 'sum',
'_id':'count', 'not_attack_0':'sum', 'not_attack_1': 'sum'})
grouped_dat = dat.groupby(['rev_id'], as_index=False).agg(agg_dict)
# Get rid of data which the majority thinks is not in English or not readable
grouped_dat = grouped_dat[(grouped_dat['num_aggressive_na'] <= grouped_dat['_id']/2) & (grouped_dat['num_attack_na'] <= grouped_dat['_id']/2)]
def compute_aggression_score(x, cols):
n = 0
s = 0
for col in cols:
s = s + x[col]*int(col)
n = n + x[col]
return s/n
grouped_dat['aggression_score'] = grouped_dat[aggressive_columns].dot([-2,-1,0,1,2])/grouped_dat['_id']
grouped_dat['attack_score'] = grouped_dat['not_attack_0']/grouped_dat['_id']
def hist_comments(df, bins, plot_by, title):
plt.figure()
sliced_array = df[[plot_by]]
weights = np.ones_like(sliced_array)/len(sliced_array)
sliced_array.plot.hist(bins = bins, legend = False, title = title, weights=weights)
plt.ylabel('Proportion')
plt.xlabel('Average Score')
bins = np.linspace(-3,3,11)
hist_comments(grouped_dat, bins, 'aggression_score', 'Average Aggressiveness Rating')
bins = np.linspace(0,1,9)
for col in ['attack_score']:
hist_comments(grouped_dat, bins, col, 'Average Attack Rating')
def sorted_comments(df, sort_by, quartile, num, is_ascending = True):
n = df.shape[0]
start_index = int(quartile*n)
return df[['clean_diff', 'aggression_score', 'attack_score']].sort_values(
by=sort_by, ascending = is_ascending)[start_index:start_index + num]
# Least aggressive comments
sorted_comments(grouped_dat, 'aggression_score', 0, 5)
# Median aggressive comments
sorted_comments(grouped_dat, 'aggression_score', 0.5, 5)
# Most aggressive comments
sorted_comments(grouped_dat, 'aggression_score', 0, 5, False)
def add_row_to_coincidence(o, row, columns):
m_u = row.sum(1)
for i in columns:
for j in columns:
if i == j:
o[i][j] = o[i][j] + row[i]*(row[i]-1)/(m_u-1)
else:
o[i][j] = o[i][j] + row[i]*row[j]/(m_u-1)
return o
def make_coincidence_matrix(df, columns):
df = df[columns]
n = df.shape[0]
num_cols = len(columns)
o = pd.DataFrame(np.zeros((num_cols,num_cols)), index = columns, columns=columns)
for i in xrange(n):
o = add_row_to_coincidence(o, df[i:i+1], columns)
return o
def binary_distance(i,j):
return i!=j
def interval_distance(i,j):
return (int(i)-int(j))**2
def e(n, i, j):
if i == j:
return n[i]*(n[i]-1)/sum(n)-1
else:
return n[i]*n[j]/sum(n)-1
def D_e(o, columns, distance):
n = o.sum(1)
output = 0
for i in columns:
for j in columns:
output = output + e(n,i,j)*distance(i,j)
return output
def D_o(o, columns, distance):
output = 0
for i in columns:
for j in columns:
output = output + o[i][j]*distance(i,j)
return output
def Krippendorf_alpha(df, columns, distance = binary_distance, o = None):
if o is None:
o = make_coincidence_matrix(df, columns)
d_o = D_o(o, columns, distance)
d_e = D_e(o, columns, distance)
return (1 - d_o/d_e)
print "Krippendorf's Alpha (aggressiveness): "
Krippendorf_alpha(grouped_dat, aggressive_columns, distance = interval_distance)
print "Krippendorf's Alpha (attack): "
Krippendorf_alpha(grouped_dat, ['not_attack_0', 'not_attack_1'])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Model-based parametric regression
Step2: 2.2. Summary
Step3: Fit a Bayesian linear regression model assuming ${\bf z}={\bf x}$ and
Step4: To do so, compute the posterior weight distribution using the first $k$ samples in the complete dataset, for $k = 1,2,4,8,\ldots, 128$. Draw all these posteriors along with the prior distribution in the same plot.
Step5: Exercise 2
Step6: 3.5 Maximum likelihood vs Bayesian Inference. Making predictions
Step7: Posterior distribution of the target
Step8: Not only do we obtain a better predictive model, but we also have confidence intervals (error bars) for the predictions.
Step9: The above curve may change the position of its maximum from run to run.
| <ASSISTANT_TASK:>
Python Code:
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
%matplotlib inline
from IPython import display
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
import time
n_points = 20
n_grid = 200
frec = 3
std_n = 0.2
degree = 3
nplots = 20
#Prior distribution parameters
sigma_eps = 0.1
mean_w = np.zeros((degree+1,))
sigma_w = 0.03 ### Try increasing this value
var_w = sigma_w * np.eye(degree+1)
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
xmin = np.min(X_tr)
xmax = np.max(X_tr)
X_grid = np.linspace(xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin),n_grid)
S_grid = - np.cos(frec*X_grid) #Noise free for the true model
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
for k in range(nplots):
#Draw weigths fromt the prior distribution
w_iter = np.random.multivariate_normal(mean_w,var_w)
S_grid_iter = np.polyval(w_iter,X_grid)
ax.plot(X_grid,S_grid_iter,'g-')
ax.set_xlim(xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin))
ax.set_ylim(S_tr[0]-2,S_tr[-1]+2)
ax.set_xlabel('$x$')
ax.set_ylabel('$s$')
plt.show()
# True data parameters
w_true = 3
std_n = 0.4
# Generate the whole dataset
n_max = 64
X_tr = 3 * np.random.random((n_max,1)) - 0.5
S_tr = w_true * X_tr + std_n * np.random.randn(n_max,1)
# Model parameters
sigma_eps = 0.1
mean_w = np.zeros((1,))
sigma_p = 1e6 * np.eye(1)
# No. of points to analyze
n_points = [1, 2, 4, 8, 16, 32, 64]
# Prepare plots
w_grid = np.linspace(2.7, 3.4, 5000) # Sample the w axiss
plt.figure()
# Plot the prior distribution
# p = <FILL IN>
plt.plot(w_grid, p.flatten(),'g-')
for k in n_points:
# Select the first k samples
Zk = X_tr[0:k, :]
Sk = S_tr[0:k]
# Compute the parameters of the posterior distribution
# Sigma_w = <FILL IN>
# w_MSE = <FILL IN>
w_MSE = np.array(w_MSE).flatten()
# Draw weights from the posterior distribution
# p = <FILL IN>
p = p.flatten()
plt.plot(w_grid, p,'g-')
plt.fill_between(w_grid, 0, p, alpha=0.8, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=1, antialiased=True)
plt.xlim(w_grid[0], w_grid[-1])
plt.ylim(0, np.max(p))
plt.xlabel('$w$')
plt.ylabel('$p(w|s)$')
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.5)
# Remove the temporary plots and fix the last one
display.clear_output(wait=True)
plt.show()
# Print the weight estimate based on the whole dataset
print w_MSE
# <SOL>
# </SOL>
n_points = 15
n_grid = 200
frec = 3
std_n = 0.2
degree = 12
nplots = 6
#Prior distribution parameters
sigma_eps = 0.1
mean_w = np.zeros((degree+1,))
sigma_p = .3 * np.eye(degree+1)
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
X_grid = np.linspace(-.5,2.5,n_grid)
S_grid = - np.cos(frec*X_grid) #Noise free for the true model
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
# Compute matrix with training input data for the polynomial model
Z = []
for x_val in X_tr.tolist():
Z.append([x_val[0]**k for k in range(degree+1)])
Z=np.asmatrix(Z)
#Compute posterior distribution parameters
Sigma_w = np.linalg.inv(np.dot(Z.T,Z)/(sigma_eps**2) + np.linalg.inv(sigma_p))
posterior_mean = Sigma_w.dot(Z.T).dot(S_tr)/(sigma_eps**2)
posterior_mean = np.array(posterior_mean).flatten()
for k in range(nplots):
#Draw weights from the posterior distribution
w_iter = np.random.multivariate_normal(posterior_mean,Sigma_w)
#Note that polyval assumes the first element of weight vector is the coefficient of
#the highest degree term. Thus, we need to reverse w_iter
S_grid_iter = np.polyval(w_iter[::-1],X_grid)
ax.plot(X_grid,S_grid_iter,'g-')
#We plot also the least square solution
w_LS = np.polyfit(X_tr.flatten(), S_tr.flatten(), degree)
S_grid_iter = np.polyval(w_LS,X_grid)
ax.plot(X_grid,S_grid_iter,'m-',label='LS regression')
ax.set_xlim(-.5,2.5)
ax.set_ylim(S_tr[0]-2,S_tr[-1]+2)
ax.legend(loc='best')
plt.show()
n_points = 15
n_grid = 200
frec = 3
std_n = 0.2
degree = 12
nplots = 6
#Prior distribution parameters
sigma_eps = 0.1
mean_w = np.zeros((degree+1,))
sigma_p = .5 * np.eye(degree+1)
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
X_grid = np.linspace(-1,3,n_grid)
S_grid = - np.cos(frec*X_grid) #Noise free for the true model
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
#Compute matrix with training input data for the polynomial model
Z = []
for x_val in X_tr.tolist():
Z.append([x_val[0]**k for k in range(degree+1)])
Z=np.asmatrix(Z)
#Compute posterior distribution parameters
Sigma_w = np.linalg.inv(np.dot(Z.T,Z)/(sigma_eps**2) + np.linalg.inv(sigma_p))
posterior_mean = Sigma_w.dot(Z.T).dot(S_tr)/(sigma_eps**2)
posterior_mean = np.array(posterior_mean).flatten()
#Plot the posterior mean
#Note that polyval assumes the first element of weight vector is the coefficient of
#the highest degree term. Thus, we need to reverse w_iter
S_grid_iter = np.polyval(posterior_mean[::-1],X_grid)
ax.plot(X_grid,S_grid_iter,'g-',label='Predictive mean, BI')
#Plot confidence intervals for the Bayesian Inference
std_x = []
for el in X_grid:
x_ast = np.array([el**k for k in range(degree+1)])
std_x.append(np.sqrt(x_ast.dot(Sigma_w).dot(x_ast)[0,0]))
std_x = np.array(std_x)
plt.fill_between(X_grid, S_grid_iter-std_x, S_grid_iter+std_x,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=4, linestyle='dashdot', antialiased=True)
#We plot also the least square solution
w_LS = np.polyfit(X_tr.flatten(), S_tr.flatten(), degree)
S_grid_iter = np.polyval(w_LS,X_grid)
ax.plot(X_grid,S_grid_iter,'m-',label='LS regression')
ax.set_xlim(-1,3)
ax.set_ylim(S_tr[0]-2,S_tr[-1]+2)
ax.legend(loc='best')
from math import pi
n_points = 15
frec = 3
std_n = 0.2
max_degree = 12
#Prior distribution parameters
sigma_eps = 0.2
mean_w = np.zeros((degree+1,))
sigma_p = 0.5
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Compute matrix with training input data for the polynomial model
Z = []
for x_val in X_tr.tolist():
Z.append([x_val[0]**k for k in range(degree+1)])
Z=np.asmatrix(Z)
#Evaluate the posterior evidence
logE = []
for deg in range(max_degree):
Z_iter = Z[:,:deg+1]
logE_iter = -((deg+1)*np.log(2*pi)/2) \
-np.log(np.linalg.det((sigma_p**2)*Z_iter.dot(Z_iter.T) + (sigma_eps**2)*np.eye(n_points)))/2 \
-S_tr.T.dot(np.linalg.inv((sigma_p**2)*Z_iter.dot(Z_iter.T) + (sigma_eps**2)*np.eye(n_points))).dot(S_tr)/2
logE.append(logE_iter[0,0])
plt.plot(np.array(range(max_degree))+1,logE)
plt.xlabel('Polynomia degree')
plt.ylabel('log evidence')
n_points = 15
n_grid = 200
frec = 3
std_n = 0.2
degree = 5 #M-1
nplots = 6
#Prior distribution parameters
sigma_eps = 0.1
mean_w = np.zeros((degree+1,))
sigma_p = .5 * np.eye(degree+1)
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
X_grid = np.linspace(-1,3,n_grid)
S_grid = - np.cos(frec*X_grid) #Noise free for the true model
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
#Compute matrix with training input data for the polynomial model
Z = []
for x_val in X_tr.tolist():
Z.append([x_val[0]**k for k in range(degree+1)])
Z=np.asmatrix(Z)
#Compute posterior distribution parameters
Sigma_w = np.linalg.inv(np.dot(Z.T,Z)/(sigma_eps**2) + np.linalg.inv(sigma_p))
posterior_mean = Sigma_w.dot(Z.T).dot(S_tr)/(sigma_eps**2)
posterior_mean = np.array(posterior_mean).flatten()
#Plot the posterior mean
#Note that polyval assumes the first element of weight vector is the coefficient of
#the highest degree term. Thus, we need to reverse w_iter
S_grid_iter = np.polyval(posterior_mean[::-1],X_grid)
ax.plot(X_grid,S_grid_iter,'g-',label='Predictive mean, BI')
#Plot confidence intervals for the Bayesian Inference
std_x = []
for el in X_grid:
x_ast = np.array([el**k for k in range(degree+1)])
std_x.append(np.sqrt(x_ast.dot(Sigma_w).dot(x_ast)[0,0]))
std_x = np.array(std_x)
plt.fill_between(X_grid, S_grid_iter-std_x, S_grid_iter+std_x,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=4, linestyle='dashdot', antialiased=True)
#We plot also the least square solution
w_LS = np.polyfit(X_tr.flatten(), S_tr.flatten(), degree)
S_grid_iter = np.polyval(w_LS,X_grid)
ax.plot(X_grid,S_grid_iter,'m-',label='LS regression')
ax.set_xlim(-1,3)
ax.set_ylim(S_tr[0]-2,S_tr[-1]+2)
ax.legend(loc='best')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tensorflow Lattice와 형상 제약 조건
Step2: 필수 패키지 가져오기
Step3: 이 가이드에서 사용되는 기본값
Step4: 레스토랑 순위 지정을 위한 훈련 데이터세트
Step6: 이 CTR 함수의 등고선도를 살펴보겠습니다.
Step7: 데이터 준비하기
Step8: 훈련, 검증 및 테스트 데이터세트를 생성해 보겠습니다. 검색 결과에 레스토랑이 표시되면 사용자의 참여(클릭 또는 클릭 없음)를 샘플 포인트로 기록할 수 있습니다.
Step9: 훈련 및 평가에 사용되는 input_fns 정의하기
Step10: 그래디언트 Boosted 트리 적합화하기
Step11: TensorFlow 그래디언트 boosted 결정 트리를 데이터세트에 적합하도록 맞출 수 있습니다.
Step12: 모델이 실제 CTR의 일반적인 형상을 포착하고 적절한 검증 메트릭을 가지고 있지만, 입력 공간의 여러 부분에서 반직관적인 동작을 보입니다. 평균 평점 또는 리뷰 수가 증가하면 예상 CTR이 감소하는데, 이는 훈련 데이터세트에서 잘 다루지 않는 영역에 샘플 포인트가 부족하기 때문입니다. 모델은 데이터에서만 올바른 동작을 추론할 방법이 없습니다.
Step13: 형상 제약 조건
Step14: CalibratedLatticeConfig를 사용하면 먼저 calibrator를 각 입력(숫자 특성에 대한 부분 선형 함수)에 적용한 다음 격자 레이어를 적용하여 보정된 특성을 비선형적으로 융합하는 준비된 분류자를 생성합니다. tfl.visualization을 사용하여 모델을 시각화할 수 있습니다. 특히 다음 플롯은 미리 준비된 estimator에 포함된 두 개의 훈련된 calibrator를 보여줍니다.
Step15: 제약 조건이 추가되면 평균 평점이 증가하거나 리뷰 수가 증가함에 따라 예상 CTR이 항상 증가합니다. 이것은 calibrator와 격자가 단조로운지 확인하여 수행됩니다.
Step16: 오목 제약 조건을 추가하여 테스트 메트릭이 어떻게 향상되는지 확인하세요. 예측 플롯은 또한 지상 진실과 더 유사합니다.
Step17: 다음 플롯은 훈련된 격자 함수를 나타냅니다. 신뢰 제약 조건으로 인해, 보정된 num_reviews의 큰 값이 보정된 avg_rating에 대한 경사를 더 높여서 격자 출력에서 더 중요한 이동이 있을 것을 예상합니다.
Step18: Smoothing Calibrator
Step19: 이제 calibrator가 매끄럽고 전체 예상 CTR이 실제와 더 잘 일치합니다. 해당 적용은 테스트 메트릭과 등고선 플롯 모두에 반영됩니다.
Step20: 세 번째 특성인 dollar_rating을 포함하려면 범주형 특성이 특성 열과 특성 구성 모두에서 TFL 내에서 약간 다른 처리가 필요하다는 점을 기억해야 합니다. 여기서 다른 모든 입력이 고정될 때 'DD' 레스토랑의 출력이 'D' 레스토랑보다 커야 한다는 부분 단조 제약 조건을 적용합니다. 해당 적용은 특성 구성에서 monotonicity 설정을 사용하여 수행됩니다.
Step21: 범주형 calibrator는 모델 출력의 선호도를 보여줍니다. DD > D > DDD > DDDD는 설정과 일치합니다. 결측값에 대한 열도 있습니다. 훈련 및 테스트 데이터에는 누락된 특성이 없지만, 모델은 다운스트림 모델 제공 중에 발생하는 누락된 값에 대한 대체 값을 제공합니다.
| <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install tensorflow-lattice
import tensorflow as tf
from IPython.core.pylabtools import figsize
import itertools
import logging
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import sys
import tensorflow_lattice as tfl
logging.disable(sys.maxsize)
NUM_EPOCHS = 1000
BATCH_SIZE = 64
LEARNING_RATE=0.01
def click_through_rate(avg_ratings, num_reviews, dollar_ratings):
dollar_rating_baseline = {"D": 3, "DD": 2, "DDD": 4, "DDDD": 4.5}
return 1 / (1 + np.exp(
np.array([dollar_rating_baseline[d] for d in dollar_ratings]) -
avg_ratings * np.log1p(num_reviews) / 4))
def color_bar():
bar = matplotlib.cm.ScalarMappable(
norm=matplotlib.colors.Normalize(0, 1, True),
cmap="viridis",
)
bar.set_array([0, 1])
return bar
def plot_fns(fns, split_by_dollar=False, res=25):
Generates contour plots for a list of (name, fn) functions.
num_reviews, avg_ratings = np.meshgrid(
np.linspace(0, 200, num=res),
np.linspace(1, 5, num=res),
)
if split_by_dollar:
dollar_rating_splits = ["D", "DD", "DDD", "DDDD"]
else:
dollar_rating_splits = [None]
if len(fns) == 1:
fig, axes = plt.subplots(2, 2, sharey=True, tight_layout=False)
else:
fig, axes = plt.subplots(
len(dollar_rating_splits), len(fns), sharey=True, tight_layout=False)
axes = axes.flatten()
axes_index = 0
for dollar_rating_split in dollar_rating_splits:
for title, fn in fns:
if dollar_rating_split is not None:
dollar_ratings = np.repeat(dollar_rating_split, res**2)
values = fn(avg_ratings.flatten(), num_reviews.flatten(),
dollar_ratings)
title = "{}: dollar_rating={}".format(title, dollar_rating_split)
else:
values = fn(avg_ratings.flatten(), num_reviews.flatten())
subplot = axes[axes_index]
axes_index += 1
subplot.contourf(
avg_ratings,
num_reviews,
np.reshape(values, (res, res)),
vmin=0,
vmax=1)
subplot.title.set_text(title)
subplot.set(xlabel="Average Rating")
subplot.set(ylabel="Number of Reviews")
subplot.set(xlim=(1, 5))
_ = fig.colorbar(color_bar(), cax=fig.add_axes([0.95, 0.2, 0.01, 0.6]))
figsize(11, 11)
plot_fns([("CTR", click_through_rate)], split_by_dollar=True)
def sample_restaurants(n):
avg_ratings = np.random.uniform(1.0, 5.0, n)
num_reviews = np.round(np.exp(np.random.uniform(0.0, np.log(200), n)))
dollar_ratings = np.random.choice(["D", "DD", "DDD", "DDDD"], n)
ctr_labels = click_through_rate(avg_ratings, num_reviews, dollar_ratings)
return avg_ratings, num_reviews, dollar_ratings, ctr_labels
np.random.seed(42)
avg_ratings, num_reviews, dollar_ratings, ctr_labels = sample_restaurants(2000)
figsize(5, 5)
fig, axs = plt.subplots(1, 1, sharey=False, tight_layout=False)
for rating, marker in [("D", "o"), ("DD", "^"), ("DDD", "+"), ("DDDD", "x")]:
plt.scatter(
x=avg_ratings[np.where(dollar_ratings == rating)],
y=num_reviews[np.where(dollar_ratings == rating)],
c=ctr_labels[np.where(dollar_ratings == rating)],
vmin=0,
vmax=1,
marker=marker,
label=rating)
plt.xlabel("Average Rating")
plt.ylabel("Number of Reviews")
plt.legend()
plt.xlim((1, 5))
plt.title("Distribution of restaurants")
_ = fig.colorbar(color_bar(), cax=fig.add_axes([0.95, 0.2, 0.01, 0.6]))
def sample_dataset(n, testing_set):
(avg_ratings, num_reviews, dollar_ratings, ctr_labels) = sample_restaurants(n)
if testing_set:
# Testing has a more uniform distribution over all restaurants.
num_views = np.random.poisson(lam=3, size=n)
else:
# Training/validation datasets have more views on popular restaurants.
num_views = np.random.poisson(lam=ctr_labels * num_reviews / 50.0, size=n)
return pd.DataFrame({
"avg_rating": np.repeat(avg_ratings, num_views),
"num_reviews": np.repeat(num_reviews, num_views),
"dollar_rating": np.repeat(dollar_ratings, num_views),
"clicked": np.random.binomial(n=1, p=np.repeat(ctr_labels, num_views))
})
# Generate datasets.
np.random.seed(42)
data_train = sample_dataset(500, testing_set=False)
data_val = sample_dataset(500, testing_set=False)
data_test = sample_dataset(500, testing_set=True)
# Plotting dataset densities.
figsize(12, 5)
fig, axs = plt.subplots(1, 2, sharey=False, tight_layout=False)
for ax, data, title in [(axs[0], data_train, "training"),
(axs[1], data_test, "testing")]:
_, _, _, density = ax.hist2d(
x=data["avg_rating"],
y=data["num_reviews"],
bins=(np.linspace(1, 5, num=21), np.linspace(0, 200, num=21)),
density=True,
cmap="Blues",
)
ax.set(xlim=(1, 5))
ax.set(ylim=(0, 200))
ax.set(xlabel="Average Rating")
ax.set(ylabel="Number of Reviews")
ax.title.set_text("Density of {} examples".format(title))
_ = fig.colorbar(density, ax=ax)
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=data_train,
y=data_train["clicked"],
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
shuffle=False,
)
# feature_analysis_input_fn is used for TF Lattice estimators.
feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=data_train,
y=data_train["clicked"],
batch_size=BATCH_SIZE,
num_epochs=1,
shuffle=False,
)
val_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=data_val,
y=data_val["clicked"],
batch_size=BATCH_SIZE,
num_epochs=1,
shuffle=False,
)
test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=data_test,
y=data_test["clicked"],
batch_size=BATCH_SIZE,
num_epochs=1,
shuffle=False,
)
def analyze_two_d_estimator(estimator, name):
# Extract validation metrics.
metric = estimator.evaluate(input_fn=val_input_fn)
print("Validation AUC: {}".format(metric["auc"]))
metric = estimator.evaluate(input_fn=test_input_fn)
print("Testing AUC: {}".format(metric["auc"]))
def two_d_pred(avg_ratings, num_reviews):
results = estimator.predict(
tf.compat.v1.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({
"avg_rating": avg_ratings,
"num_reviews": num_reviews,
}),
shuffle=False,
))
return [x["logistic"][0] for x in results]
def two_d_click_through_rate(avg_ratings, num_reviews):
return np.mean([
click_through_rate(avg_ratings, num_reviews,
np.repeat(d, len(avg_ratings)))
for d in ["D", "DD", "DDD", "DDDD"]
],
axis=0)
figsize(11, 5)
plot_fns([("{} Estimated CTR".format(name), two_d_pred),
("CTR", two_d_click_through_rate)],
split_by_dollar=False)
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
gbt_estimator = tf.estimator.BoostedTreesClassifier(
feature_columns=feature_columns,
# Hyper-params optimized on validation set.
n_batches_per_layer=1,
max_depth=2,
n_trees=50,
learning_rate=0.05,
config=tf.estimator.RunConfig(tf_random_seed=42),
)
gbt_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(gbt_estimator, "GBT")
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
dnn_estimator = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
# Hyper-params optimized on validation set.
hidden_units=[16, 8, 8],
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
dnn_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(dnn_estimator, "DNN")
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
)
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(tfl_estimator, "TF Lattice")
def save_and_visualize_lattice(tfl_estimator):
saved_model_path = tfl_estimator.export_saved_model(
"/tmp/TensorFlow_Lattice_101/",
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec=tf.feature_column.make_parse_example_spec(
feature_columns)))
model_graph = tfl.estimators.get_model_graph(saved_model_path)
figsize(8, 8)
tfl.visualization.draw_model_graph(model_graph)
return model_graph
_ = save_and_visualize_lattice(tfl_estimator)
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_convexity="concave",
pwl_calibration_num_keypoints=20,
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
)
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(tfl_estimator, "TF Lattice")
_ = save_and_visualize_lattice(tfl_estimator)
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_convexity="concave",
pwl_calibration_num_keypoints=20,
# Larger num_reviews indicating more trust in avg_rating.
reflects_trust_in=[
tfl.configs.TrustConfig(
feature_name="avg_rating", trust_type="edgeworth"),
],
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
)
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(tfl_estimator, "TF Lattice")
model_graph = save_and_visualize_lattice(tfl_estimator)
lat_mesh_n = 12
lat_mesh_x, lat_mesh_y = tfl.test_utils.two_dim_mesh_grid(
lat_mesh_n**2, 0, 0, 1, 1)
lat_mesh_fn = tfl.test_utils.get_hypercube_interpolation_fn(
model_graph.output_node.weights.flatten())
lat_mesh_z = [
lat_mesh_fn([lat_mesh_x.flatten()[i],
lat_mesh_y.flatten()[i]]) for i in range(lat_mesh_n**2)
]
trust_plt = tfl.visualization.plot_outputs(
(lat_mesh_x, lat_mesh_y),
{"Lattice Lookup": lat_mesh_z},
figsize=(6, 6),
)
trust_plt.title("Trust")
trust_plt.xlabel("Calibrated avg_rating")
trust_plt.ylabel("Calibrated num_reviews")
trust_plt.show()
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
]
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_convexity="concave",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
reflects_trust_in=[
tfl.configs.TrustConfig(
feature_name="avg_rating", trust_type="edgeworth"),
],
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
)
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_two_d_estimator(tfl_estimator, "TF Lattice")
_ = save_and_visualize_lattice(tfl_estimator)
def analyze_three_d_estimator(estimator, name):
# Extract validation metrics.
metric = estimator.evaluate(input_fn=val_input_fn)
print("Validation AUC: {}".format(metric["auc"]))
metric = estimator.evaluate(input_fn=test_input_fn)
print("Testing AUC: {}".format(metric["auc"]))
def three_d_pred(avg_ratings, num_reviews, dollar_rating):
results = estimator.predict(
tf.compat.v1.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({
"avg_rating": avg_ratings,
"num_reviews": num_reviews,
"dollar_rating": dollar_rating,
}),
shuffle=False,
))
return [x["logistic"][0] for x in results]
figsize(11, 22)
plot_fns([("{} Estimated CTR".format(name), three_d_pred),
("CTR", click_through_rate)],
split_by_dollar=True)
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
tf.feature_column.categorical_column_with_vocabulary_list(
"dollar_rating",
vocabulary_list=["D", "DD", "DDD", "DDDD"],
dtype=tf.string,
default_value=0),
]
model_config = tfl.configs.CalibratedLatticeConfig(
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_convexity="concave",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
reflects_trust_in=[
tfl.configs.TrustConfig(
feature_name="avg_rating", trust_type="edgeworth"),
],
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
),
tfl.configs.FeatureConfig(
name="dollar_rating",
lattice_size=2,
pwl_calibration_num_keypoints=4,
# Here we only specify one monotonicity:
# `D` resturants has smaller value than `DD` restaurants
monotonicity=[("D", "DD")],
),
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_three_d_estimator(tfl_estimator, "TF Lattice")
_ = save_and_visualize_lattice(tfl_estimator)
feature_columns = [
tf.feature_column.numeric_column("num_reviews"),
tf.feature_column.numeric_column("avg_rating"),
tf.feature_column.categorical_column_with_vocabulary_list(
"dollar_rating",
vocabulary_list=["D", "DD", "DDD", "DDDD"],
dtype=tf.string,
default_value=0),
]
model_config = tfl.configs.CalibratedLatticeConfig(
output_calibration=True,
output_calibration_num_keypoints=5,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="output_calib_wrinkle", l2=0.1),
],
feature_configs=[
tfl.configs.FeatureConfig(
name="num_reviews",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_convexity="concave",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
reflects_trust_in=[
tfl.configs.TrustConfig(
feature_name="avg_rating", trust_type="edgeworth"),
],
),
tfl.configs.FeatureConfig(
name="avg_rating",
lattice_size=2,
monotonicity="increasing",
pwl_calibration_num_keypoints=20,
regularizer_configs=[
tfl.configs.RegularizerConfig(name="calib_wrinkle", l2=1.0),
],
),
tfl.configs.FeatureConfig(
name="dollar_rating",
lattice_size=2,
pwl_calibration_num_keypoints=4,
# Here we only specify one monotonicity:
# `D` resturants has smaller value than `DD` restaurants
monotonicity=[("D", "DD")],
),
])
tfl_estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=feature_analysis_input_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
config=tf.estimator.RunConfig(tf_random_seed=42),
)
tfl_estimator.train(input_fn=train_input_fn)
analyze_three_d_estimator(tfl_estimator, "TF Lattice")
_ = save_and_visualize_lattice(tfl_estimator)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 10.1. Spatiocyte Lattice-based Method
Step2: There is a distinct difference in the second argument for spatiocyte.World. This is called voxel radius.
Step3: 10.2. The Diffusion Movement of Single Molecule
Step4: new_particle method tries to place a particle to a coordinate in spatiocyte.World. It returns the particle's ParticleID (pid). If new_particle fails to place the particle, it returns None instead of a ParticleID.
Step5: get_particle method receives a particle ID and returns the ID and particle (of cource the ID are same with the given one).
Step6: And you can use FixedIntervalTrajectoryObserver to track the trajectory of molecular diffusion process.
Step7: Here we visualized the trajectory with plotting.plot_trajectory method, you can also obtain it as Real3 list with data() method.
Step8: data() method returns nested list.
Step9: Please remember list_particles_exact method, this method can be used for other World as well as add_molecules method.
Step10: Although we used faster kinetic constant than before, the result is same.
Step11: 10.4. The Structure in the Spatiocyte Method
Step12: Visualize the state of the World.
Step13: The Sphere class first argument is the center of the sphere, and second argument is the radius.
Step14: Now we restricted the trajectories of Species A on the structure of Species C, and add_molecules works like that. As a note, you need to create the structure before add_molecule.
Step15: pid_list is a list of the first 10 ParticleIDs of A molecules.
Step16: It might be hard to see them, but actually the Species B are placed only not on a surface.
Step17: This means that a Species B becomes A when B collides with a structure M.
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ecell4.prelude import *
with species_attributes():
A | B | C | {'D': 1}
with reaction_rules():
A + B == C | (0.01, 0.3)
m = get_model()
w = spatiocyte.World(ones(), 0.005) # The second argument is 'voxel_radius'.
w.bind_to(m)
w.add_molecules(Species('C'), 60)
sim = spatiocyte.Simulator(w)
obs = FixedIntervalNumberObserver(0.1, ('A', 'B', 'C'))
sim.run(10, obs)
show(obs, step=True)
with species_attributes():
A | {'D': 1}
m = get_model()
w = spatiocyte.World(ones(), 0.005)
w.bind_to(m)
pid = w.new_particle(Species('A'), 0.5 * ones())
pid, p = w.get_particle(pid)
print(p.species().serial()) # must print: A
print(p.radius(), p.D()) # must print: (0.005, 1.0)
print(tuple(p.position())) # must print: (0.49806291436591293, 0.49652123150307814, 0.5)
plotting.plot_world(w)
sim = spatiocyte.Simulator(w)
obs = FixedIntervalTrajectoryObserver(0.002, [pid])
sim.run(1, obs)
plotting.plot_trajectory(obs)
print(len(obs.data())) # => 1
print(len(obs.data()[0])) # => 501
w.add_molecules(Species('A'), 5)
particles = w.list_particles_exact(Species('A'))
for pid, p in particles:
print(p.species().serial(), tuple(p.position()))
with species_attributes():
A | B | C | {'D': 1}
with reaction_rules():
A + B > C | 1.0
m = get_model()
w = spatiocyte.World(Real3(2, 1, 1), 0.005)
w.bind_to(m)
w.add_molecules(Species('A'), 120)
w.add_molecules(Species('B'), 120)
obs = FixedIntervalNumberObserver(0.005, ('A', 'B', 'C'))
sim = spatiocyte.Simulator(w)
sim.run(1.0, obs)
odew = ode.World(Real3(2, 1, 1))
# odew.bind_to(m)
odew.add_molecules(Species('A'), 120)
odew.add_molecules(Species('B'), 120)
odeobs = FixedIntervalNumberObserver(0.005, ('A', 'B', 'C'))
odesim = ode.Simulator(odew, m)
odesim.run(1.0, odeobs)
show(obs, "-", odeobs, "--")
import numpy
kD = 4 * numpy.pi * (0.005 * 2) * (1 * 2)
ka = 1.0
kf = ka * kD / (ka + kD)
with reaction_rules():
A + B > C | kf
odeobs = run_simulation(1, y0={'A': 120, 'B': 120}, volume=2, return_type='observer')
show(obs, "-", odeobs, "--")
with species_attributes():
A | {'D': 1, 'location': 'C', 'dimension': 3}
C | {'dimension': 3}
m = get_model()
w = spatiocyte.SpatiocyteWorld(ones(), 0.005)
w.bind_to(m)
sph = Sphere(0.5 * ones(), 0.45)
print(w.add_structure(Species('C'), sph)) # will print 539805
show(w)
w.add_molecules(Species('A'), 120)
show(w, species_list=['A']) # visualize A-molecules only
pid_list = [pid for pid, p in w.list_particles(Species('A'))[: 10]]
obs = FixedIntervalTrajectoryObserver(1e-3, pid_list)
sim = spatiocyte.Simulator(w)
sim.run(1, obs)
show(obs)
with species_attributes():
A | {'D': 0.1, 'location': 'M', 'dimension': 2}
B | {'D': 1}
M | {'dimension': 2}
m = get_model()
w = spatiocyte.World(ones())
w.bind_to(m)
origin = Real3(0, 0, 0.5)
w.add_structure(
Species('M'), PlanarSurface(origin, unitx(), unity())) # Create a structure first
w.add_molecules(Species('B'), 480) # Throw-in B-molecules
show(w, species_list=['B', 'M'])
with species_attributes():
A | {'D': 0.1, 'location': 'M', 'dimension': 2}
B | {'D': 1}
M | {'dimension': 2}
with reaction_rules():
B + M == A | (1.0, 1.5)
m = get_model()
w.bind_to(m)
sim = spatiocyte.Simulator(w)
obs = NumberObserver(('A', 'B'))
sim.run(2, obs)
show(obs)
show(w, species_list=['A', 'B'])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Create new features
Step3: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.
Step4: Applying L1 penalty requires adding an extra parameter (l1_penalty) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set l2_penalty=0 to ensure we don't introduce an additional L2 penalty.
Step5: Find what features had non-zero weight.
Step6: Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
Step7: Next, we write a loop that does the following
Step8: QUIZ QUESTIONS
Step9: Exploring the larger range of values to find a narrow range with the desired sparsity
Step10: Now, implement a loop that search through this space of possible l1_penalty values
Step11: Out of this large range, we want to find the two ends of our desired narrow range of l1_penalty. At one end, we will have l1_penalty values that have too few non-zeros, and at the other end, we will have an l1_penalty that has too many non-zeros.
Step12: QUIZ QUESTIONS
Step13: For l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20)
Step14: QUIZ QUESTIONS
| <ASSISTANT_TASK:>
Python Code:
import graphlab
graphlab.product_key.set_product_key("C0C2-04B4-D94B-70F6-8771-86F9-C6E1-E122")
sales = graphlab.SFrame('kc_house_data.gl/kc_house_data.gl')
from math import log, sqrt
sales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)
sales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)
sales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']
# In the dataset, 'floors' was defined with type string,
# so we'll convert them to float, before creating a new feature.
sales['floors'] = sales['floors'].astype(float)
sales['floors_square'] = sales['floors']*sales['floors']
all_features = ['bedrooms', 'bedrooms_square',
'bathrooms',
'sqft_living', 'sqft_living_sqrt',
'sqft_lot', 'sqft_lot_sqrt',
'floors', 'floors_square',
'waterfront', 'view', 'condition', 'grade',
'sqft_above',
'sqft_basement',
'yr_built', 'yr_renovated']
model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,
validation_set=None,
l2_penalty=0., l1_penalty=1e10)
model_all.get("coefficients").print_rows(num_rows=18, num_columns=3)
model_all['coefficients']['value'].nnz()
(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split
(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate
import numpy as np
def get_RSS(prediction, output):
residual = output - prediction
# square the residuals and add them up
RS = residual*residual
RSS = RS.sum()
return(RSS)
for l1_penalty in np.logspace(1, 7, num=13):
A = 0
print l1_penalty
model_all = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0, l1_penalty=l1_penalty)
predictions=model_all.predict(validation)
A = get_RSS(predictions,validation['price'])
print A
model_all = graphlab.linear_regression.create(testing, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0, l1_penalty=10)
model_all['coefficients']['value'].nnz()
max_nonzeros = 7
l1_penalty_values = np.logspace(8, 10, num=20)
for l1_penalty in np.logspace(8, 10, num=20):
A = 0
predictions = 0
print l1_penalty
model_all = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0, l1_penalty=l1_penalty)
predictions=model_all.predict(validation)
A = get_RSS(predictions,validation['price'])
print A
print model_all['coefficients']['value'].nnz()
l1_penalty_min = 2976351441.63
l1_penalty_max = 3792690190.73
l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)
for l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20):
A = 0
predictions = 0
model_all = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0, l1_penalty=l1_penalty)
predictions=model_all.predict(validation)
A = get_RSS(predictions,validation['price'])
if model_all['coefficients']['value'].nnz() <= 7:
print l1_penalty
print A
print model_all['coefficients']['value'].nnz()
model_all = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, verbose = False,
l2_penalty=0, l1_penalty=3448968612.16)
# model_all['coefficients']['value'].nnz()
# print np.linspace(l1_penalty_min,l1_penalty_max,20)[0]
print model_all['coefficients'].print_rows(num_rows=18, num_columns=3)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
A, B = load_data()
cnt_not_equal = int((A[int(len(A) / 2):] != B[int(len(A) / 2):]).sum())
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The global collection of tide gauge records at the PSMSL is used to access the data. The other way to access the data is to ask the service desk data at Rijkswaterstaat. There are two types of datasets the "Revised Local Reference" and "Metric". For the Netherlands the difference is that the "Revised Local Reference" undoes the corrections from the NAP correction in 2014, to get a consistent dataset.
Step5: Now that we have defined which tide gauges we are monitoring we can start downloading the relevant data.
Step6: Now that we have all data downloaded we can compute the mean.
Step7: Methods
Step8: Is there a sea-level acceleration?
Step9: Conclusions
| <ASSISTANT_TASK:>
Python Code:
# this is a list of packages that are used in this notebook
# these come with python
import io
import zipfile
import functools
# you can install these packages using pip or anaconda
# (requests numpy pandas bokeh pyproj statsmodels)
# for downloading
import requests
# computation libraries
import numpy as np
import pandas
# coordinate systems
import pyproj
# statistics
import statsmodels.api as sm
# plotting
import bokeh.charts
import bokeh.io
import bokeh.plotting
import bokeh.tile_providers
import bokeh.palettes
# displaying things
from ipywidgets import Image
import IPython.display
# Some coordinate systems
WEBMERCATOR = pyproj.Proj(init='epsg:3857')
WGS84 = pyproj.Proj(init='epsg:4326')
# If this notebook is not showing up with figures, you can use the following url:
# https://nbviewer.ipython.org/github/openearth/notebooks/blob/master/sealevelmonitor.ipynb
bokeh.io.output_notebook()
urls = {
'metric_monthly': 'http://www.psmsl.org/data/obtaining/met.monthly.data/met_monthly.zip',
'rlr_monthly': 'http://www.psmsl.org/data/obtaining/rlr.annual.data/rlr_monthly.zip',
'rlr_annual': 'http://www.psmsl.org/data/obtaining/rlr.annual.data/rlr_annual.zip'
}
dataset_name = 'rlr_annual'
# these compute the rlr back to NAP (ignoring the undoing of the NAP correction)
main_stations = {
20: {
'name': 'Vlissingen',
'rlr2nap': lambda x: x - (6976-46)
},
22: {
'name': 'Hoek van Holland',
'rlr2nap': lambda x:x - (6994 - 121)
},
23: {
'name': 'Den Helder',
'rlr2nap': lambda x: x - (6988-42)
},
24: {
'name': 'Delfzijl',
'rlr2nap': lambda x: x - (6978-155)
},
25: {
'name': 'Harlingen',
'rlr2nap': lambda x: x - (7036-122)
},
32: {
'name': 'IJmuiden',
'rlr2nap': lambda x: x - (7033-83)
}
}
# the main stations are defined by their ids
main_stations_idx = list(main_stations.keys())
main_stations_idx
# download the zipfile
resp = requests.get(urls[dataset_name])
# we can read the zipfile
stream = io.BytesIO(resp.content)
zf = zipfile.ZipFile(stream)
# this list contains a table of
# station ID, latitude, longitude, station name, coastline code, station code, and quality flag
csvtext = zf.read('{}/filelist.txt'.format(dataset_name))
stations = pandas.read_csv(
io.BytesIO(csvtext),
sep=';',
names=('id', 'lat', 'lon', 'name', 'coastline_code', 'station_code', 'quality'),
converters={
'name': str.strip,
'quality': str.strip
}
)
stations = stations.set_index('id')
# the dutch stations in the PSMSL database, make a copy
# or use stations.coastline_code == 150 for all dutch stations
selected_stations = stations.ix[main_stations_idx].copy()
# set the main stations, this should be a list of 6 stations
selected_stations
# show all the stations on a map
# compute the bounds of the plot
sw = (50, -5)
ne = (55, 10)
# transform to web mercator
sw_wm = pyproj.transform(WGS84, WEBMERCATOR, sw[1], sw[0])
ne_wm = pyproj.transform(WGS84, WEBMERCATOR, ne[1], ne[0])
# create a plot
fig = bokeh.plotting.figure(tools='pan, wheel_zoom', plot_width=600, plot_height=200, x_range=(sw_wm[0], ne_wm[0]), y_range=(sw_wm[1], ne_wm[1]))
fig.axis.visible = False
# add some background tiles
fig.add_tile(bokeh.tile_providers.STAMEN_TERRAIN)
# add the stations
x, y = pyproj.transform(WGS84, WEBMERCATOR, np.array(stations.lon), np.array(stations.lat))
fig.circle(x, y)
x, y = pyproj.transform(WGS84, WEBMERCATOR, np.array(selected_stations.lon), np.array(selected_stations.lat))
_ = fig.circle(x, y, color='red')
# show the plot
bokeh.io.show(fig)
# each station has a number of files that you can look at.
# here we define a template for each filename
# stations that we are using for our computation
# define the name formats for the relevant files
names = {
'datum': '{dataset}/RLR_info/{id}.txt',
'diagram': '{dataset}/RLR_info/{id}.png',
'url': 'http://www.psmsl.org/data/obtaining/rlr.diagrams/{id}.php',
'data': '{dataset}/data/{id}.rlrdata',
'doc': '{dataset}/docu/{id}.txt',
'contact': '{dataset}/docu/{id}_auth.txt'
}
def get_url(station, dataset):
return the url of the station information (diagram and datum)
info = dict(
dataset=dataset,
id=station.name
)
url = names['url'].format(**info)
return url
# fill in the dataset parameter using the global dataset_name
f = functools.partial(get_url, dataset=dataset_name)
# compute the url for each station
selected_stations['url'] = selected_stations.apply(f, axis=1)
selected_stations
def missing2nan(value, missing=-99999):
convert the value to nan if the float of value equals the missing value
value = float(value)
if value == missing:
return np.nan
return value
def get_data(station, dataset):
get data for the station (pandas record) from the dataset (url)
info = dict(
dataset=dataset,
id=station.name
)
bytes = zf.read(names['data'].format(**info))
df = pandas.read_csv(
io.BytesIO(bytes),
sep=';',
names=('year', 'height', 'interpolated', 'flags'),
converters={
"height": lambda x: main_stations[station.name]['rlr2nap'](missing2nan(x)),
"interpolated": str.strip,
}
)
df['station'] = station.name
return df
# get data for all stations
f = functools.partial(get_data, dataset=dataset_name)
# look up the data for each station
selected_stations['data'] = [f(station) for _, station in selected_stations.iterrows()]
# we now have data for each station
selected_stations[['name', 'data']]
# compute the mean
grouped = pandas.concat(selected_stations['data'].tolist())[['year', 'height']].groupby('year')
mean_df = grouped.mean().reset_index()
# filter out non-trusted part (before NAP)
mean_df = mean_df[mean_df['year'] >= 1890].copy()
# these are the mean waterlevels
mean_df.tail()
# show all the stations, including the mean
title = 'Sea-surface height for Dutch tide gauges [{year_min} - {year_max}]'.format(
year_min=mean_df.year.min(),
year_max=mean_df.year.max()
)
fig = bokeh.plotting.figure(title=title, x_range=(1860, 2020), plot_width=900, plot_height=400)
colors = bokeh.palettes.Accent6
for color, (id_, station) in zip(colors, selected_stations.iterrows()):
data = station['data']
fig.circle(data.year, data.height, color=color, legend=station['name'], alpha=0.5)
fig.line(mean_df.year, mean_df.height, line_width=3, alpha=0.7, color='black', legend='Mean')
fig.legend.location = "bottom_right"
fig.yaxis.axis_label = 'waterlevel [mm] above NAP'
fig.xaxis.axis_label = 'year'
bokeh.io.show(fig)
# define the statistical model
y = mean_df['height']
X = np.c_[
mean_df['year']-1970,
np.cos(2*np.pi*(mean_df['year']-1970)/18.613),
np.sin(2*np.pi*(mean_df['year']-1970)/18.613)
]
X = sm.add_constant(X)
model = sm.OLS(y, X)
fit = model.fit()
fit.summary(yname='Sea-surface height', xname=['Constant', 'Trend', 'Nodal U', 'Nodal V'])
# things to check:
# Durbin Watson should be >1 for no worries, >2 for no autocorrelation
# JB should be non-significant for normal residuals
# abs(x2.t) + abs(x3.t) should be > 3, otherwise adding nodal is not useful
fig = bokeh.plotting.figure(x_range=(1860, 2020), plot_width=900, plot_height=400)
for color, (id_, station) in zip(colors, selected_stations.iterrows()):
data = station['data']
fig.circle(data.year, data.height, color=color, legend=station['name'], alpha=0.8)
fig.circle(mean_df.year, mean_df.height, line_width=3, legend='Mean', color='black', alpha=0.5)
fig.line(mean_df.year, fit.predict(), line_width=3, legend='Current')
fig.legend.location = "bottom_right"
fig.yaxis.axis_label = 'waterlevel [mm] above N.A.P.'
fig.xaxis.axis_label = 'year'
bokeh.io.show(fig)
# define the statistical model
y = mean_df['height']
X = np.c_[
mean_df['year']-1970,
(mean_df['year'] > 1993) * (mean_df['year'] - 1993),
np.cos(2*np.pi*(mean_df['year']-1970)/18.613),
np.sin(2*np.pi*(mean_df['year']-1970)/18.613)
]
X = sm.add_constant(X)
model_broken_linear = sm.OLS(y, X)
fit_broken_linear = model_broken_linear.fit()
# define the statistical model
y = mean_df['height']
X = np.c_[
mean_df['year']-1970,
(mean_df['year'] - 1970) * (mean_df['year'] - 1970),
np.cos(2*np.pi*(mean_df['year']-1970)/18.613),
np.sin(2*np.pi*(mean_df['year']-1970)/18.613)
]
X = sm.add_constant(X)
model_quadratic = sm.OLS(y, X)
fit_quadratic = model_quadratic.fit()
fit_broken_linear.summary(yname='Sea-surface height', xname=['Constant', 'Trend', 'Trend(year > 1990)', 'Nodal U', 'Nodal V'])
fit_quadratic.summary(yname='Sea-surface height', xname=['Constant', 'Trend', 'Trend**2', 'Nodal U', 'Nodal V'])
fig = bokeh.plotting.figure(x_range=(1860, 2020), plot_width=900, plot_height=400)
for color, (id_, station) in zip(colors, selected_stations.iterrows()):
data = station['data']
fig.circle(data.year, data.height, color=color, legend=station['name'], alpha=0.8)
fig.circle(mean_df.year, mean_df.height, line_width=3, legend='Mean', color='black', alpha=0.5)
fig.line(mean_df.year, fit.predict(), line_width=3, legend='Current')
fig.line(mean_df.year, fit_broken_linear.predict(), line_width=3, color='#33bb33', legend='Broken')
fig.line(mean_df.year, fit_quadratic.predict(), line_width=3, color='#3333bb', legend='Quadratic')
fig.legend.location = "top_left"
fig.yaxis.axis_label = 'waterlevel [mm] above N.A.P.'
fig.xaxis.axis_label = 'year'
bokeh.io.show(fig)
msg = '''The current average waterlevel above NAP (in mm),
based on the 6 main tide gauges for the year {year} is {height:.1f} cm.
The current sea-level rise is {rate:.0f} cm/century'''
print(msg.format(year=mean_df['year'].iloc[-1], height=fit.predict()[-1]/10.0, rate=fit.params.x1*100.0/10))
if (fit.aic < fit_broken_linear.aic):
print('The linear model is a higher quality model (smaller AIC) than the broken linear model.')
else:
print('The broken linear model is a higher quality model (smaller AIC) than the linear model.')
if (fit_broken_linear.pvalues['x2'] < 0.05):
print('The trend break is bigger than we would have expected under the assumption that there was no trend break.')
else:
print('Under the assumption that there is no trend break, we would have expected a trend break as big as we have seen.')
if (fit.aic < fit_quadratic.aic):
print('The linear model is a higher quality model (smaller AIC) than the quadratic model.')
else:
print('The quadratic model is a higher quality model (smaller AIC) than the linear model.')
if (fit_quadratic.pvalues['x2'] < 0.05):
print('The quadratic term is bigger than we would have expected under the assumption that there was no quadraticness.')
else:
print('Under the assumption that there is no quadraticness, we would have expected a quadratic term as big as we have seen.')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The code above also defines a stardard conformal $\mathbb{R}^{N+1,1}$ layout without this new basis vector. This is primarily to support rendering with pyganja, which doesn't support the presence of this extra vector. BasisVectorMap defaults to preserving vectors by name between one algebra and another, while throwing away blades containing vectors missing from the destination algebra.
Step2: Before we start looking at specified dimensions of euclidean space, we build a helper to construct conformal dual circles and spheres, with the word round being a general term intended to cover both circles and spheres.
Step3: In order to render with pyganja, we'll need a helper to convert from our custom $\mathbb{R}^{N+1,2}$ layout into a standard conformal $\mathbb{R}^{N+1,1}$ layout. clifford maps indices in .value to basis blades via layout._basis_blade_order.index_to_bitmap, which we can use to convert the indices in one layout to the indices in another.
Step4: Apollonius' problem in $\mathbb{R}^2$ with circles
Step5: This gives us the Layout l2 with the desired metric,
Step6: Now we can build some dual circles
Step7: Compute the space orthogonal to all of them, which is an object of grade 2
Step8: We hypothesize that this object is of the form l2.ups(c4) ^ l2.ups(c5).
Step9: And finally, plot our circles
Step10: This works for colinear circles too
Step11: Apollonius' problem in $\mathbb{R}^3$ with spheres
Step12: Again, we can check the metric
Step13: And apply the solution to some spheres, noting that we now need 4 in order to constrain our solution
| <ASSISTANT_TASK:>
Python Code:
from clifford import ConformalLayout, BasisVectorIds, MultiVector, transformations
class OurCustomLayout(ConformalLayout):
def __init__(self, ndims):
self.ndims = ndims
euclidean_vectors = [str(i + 1) for i in range(ndims)]
conformal_vectors = ['m2', 'm1']
# Construct our custom algebra. Note that ConformalLayout requires the e- and e+ basis vectors to be last.
ConformalLayout.__init__(
self,
[1]*ndims + [-1] + [1, -1],
ids=BasisVectorIds(euclidean_vectors + ['np1'] + conformal_vectors)
)
self.enp1 = self.basis_vectors_lst[ndims]
# Construct a base algebra without the extra `enp1`, which would not be understood by pyganja.
self.conformal_base = ConformalLayout(
[1]*ndims + [1, -1],
ids=BasisVectorIds(euclidean_vectors + conformal_vectors)
)
# this lets us convert between the two layouts
self.to_conformal = transformations.between_basis_vectors(self, self.conformal_base)
def ups(self, s):
return s + self.enp1*abs(s)
OurCustomLayout.ups = ups; del ups
def downs(self, mv):
if (mv | self.enp1)[()] > 0:
mv = -mv
return mv
OurCustomLayout.downs = downs; del downs
def dual_round(at, r):
l = at.layout
return l.up(at) - 0.5*l.einf*r*r
import itertools
from pyganja import GanjaScene, draw
def plot_rounds(in_rounds, out_rounds, scale=1):
colors = itertools.cycle([
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(0, 255, 255),
])
# note: .dual() neede here because we're passing in dual rounds, but ganja expects direct rounds
s = GanjaScene()
for r, color in zip(in_rounds, colors):
s.add_object(r.layout.to_conformal(r).dual(), color=color)
for r in out_rounds:
s.add_object(r.layout.to_conformal(r).dual(), color=(64, 64, 64))
draw(s, sig=r.layout.conformal_base.sig, scale=scale)
l2 = OurCustomLayout(ndims=2)
e1, e2 = l2.basis_vectors_lst[:2]
import pandas as pd # convenient but somewhat slow trick for showing tables
pd.DataFrame(l2.metric, index=l2.basis_names, columns=l2.basis_names)
# add minus signs before `dual_round` to flip circle directions
c1 = dual_round(-e1-e2, 1)
c2 = dual_round(e1-e2, 0.75)
c3 = dual_round(e2, 0.5)
pp = (l2.ups(c1) ^ l2.ups(c2) ^ l2.ups(c3)).dual()
pp.grades()
def pp_ends(pp):
P = (1 + pp.normal()) / 2
return P * (pp | pp.layout.enp1), ~P * (pp | pp.layout.enp1)
c4u, c5u = pp_ends(pp)
plot_rounds([c1, c2, c3], [l2.downs(c4u), l2.downs(c5u)], scale=0.75)
c1 = dual_round(-1.5*e1, 0.5)
c2 = dual_round(e1*0, 0.5)
c3 = dual_round(1.5*e1, 0.5)
c4u, c5u = pp_ends((l2.ups(c1) ^ l2.ups(c2) ^ l2.ups(c3)).dual())
plot_rounds([c1, c2, c3], [l2.downs(c4u), l2.downs(c5u)])
c1 = dual_round(-3*e1, 1.5)
c2 = dual_round(-2*e1, 1)
c3 = -dual_round(2*e1, 1)
c4u, c5u = pp_ends((l2.ups(c1) ^ l2.ups(c2) ^ l2.ups(c3)).dual())
plot_rounds([c1, c2, c3], [l2.downs(c4u), l2.downs(c5u)])
l3 = OurCustomLayout(ndims=3)
e1, e2, e3 = l3.basis_vectors_lst[:3]
pd.DataFrame(l3.metric, index=l3.basis_names, columns=l3.basis_names)
c1 = dual_round(e1+e2+e3, 1)
c2 = dual_round(-e1+e2-e3, 0.25)
c3 = dual_round(e1-e2-e3, 0.5)
c4 = dual_round(-e1-e2+e3, 1)
c5u, c6u = pp_ends((l3.ups(c1) ^ l3.ups(c2) ^ l3.ups(c3) ^ l3.ups(c4)).dual())
plot_rounds([c1, c2, c3, c4], [l3.downs(c6u), l3.downs(c5u)], scale=0.25)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the database webserver.
Step2: Import the web interface and initialize it.
Step3: The instructions below assume that these three steps have been carried out.
Step4: Insert Time Series
Step5: Upsert Metadata
Step6: Delete Time Series
Step7: Select
Step8: Augmented Select
Step9: Add Trigger
Step10: Remove Trigger
Step11: Add Vantage Point
Step12: Delete Vantage Point
Step13: Vantage Point Similarity Search
Step14: iSAX Tree Similarity Search
Step15: iSAX Tree Representation
Step16: Termination
Step17: Remember to terminate any outstanding processes!
| <ASSISTANT_TASK:>
Python Code:
# you must specify the length of the time series when loading the database
ts_length = 100
# when running from the terminal
# python go_server_persistent.py --ts_length 100 --db_name 'demo'
# here we load the server as a subprocess for demonstration purposes
server = subprocess.Popen(['python', '../go_server_persistent.py',
'--ts_length', str(ts_length), '--data_dir', '../db_files', '--db_name', 'demo'])
time.sleep(5) # make sure it loads completely
# when running from the terminal
# python go_webserver.py
# here we load the server as a subprocess for demonstration purposes
webserver = subprocess.Popen(['python', '../go_webserver.py'])
time.sleep(5) # make sure it loads completely
from webserver import *
web_interface = WebInterface()
from timeseries import *
def tsmaker(m, s, j):
'''
Helper function: randomly generates a time series for testing.
Parameters
----------
m : float
Mean value for generating time series data
s : float
Standard deviation value for generating time series data
j : float
Quantifies the "jitter" to add to the time series data
Returns
-------
A time series and associated meta data.
'''
# generate metadata
meta = {}
meta['order'] = int(np.random.choice(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
# generate time series data
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j * np.random.randn(ts_length)
# return time series and metadata
return meta, TimeSeries(t, v)
# generate sample time series
num_ts = 50
mus = np.random.uniform(low=0.0, high=1.0, size=num_ts)
sigs = np.random.uniform(low=0.05, high=0.4, size=num_ts)
jits = np.random.uniform(low=0.05, high=0.2, size=num_ts)
# initialize dictionaries for time series and their metadata
primary_keys = []
tsdict = {}
metadict = {}
# fill dictionaries with randomly generated entries for database
for i, m, s, j in zip(range(num_ts), mus, sigs, jits):
meta, tsrs = tsmaker(m, s, j) # generate data
pk = "ts-{}".format(i) # generate primary key
primary_keys.append(pk) # keep track of all primary keys
tsdict[pk] = tsrs # store time series data
metadict[pk] = meta # store metadata
# insert all the time series
for k in primary_keys:
web_interface.insert_ts(pk=k, ts=tsdict[k])
# check what is in the database
web_interface.select(fields=None, additional={'sort_by': '+pk', 'limit': 10})
# successfully inserting data will yield a success code
web_interface.insert_ts(pk='sample1', ts=tsdict[primary_keys[0]])
# errors will yield an error code (e.g. attempting to insert the same primary key twice)
web_interface.insert_ts(pk='sample1', ts=tsdict[primary_keys[0]])
# let's remove the test time series
web_interface.delete_ts('sample1')
# upsert the metadata
for k in primary_keys:
web_interface.upsert_meta(k, metadict[k])
# let's check the first five entries in the database - they should include metadata
web_interface.select(fields=[], additional={'sort_by': '+pk', 'limit': 5})
# example primary key to delete
primary_keys[0]
# delete an existing time series
web_interface.delete_ts(primary_keys[0])
# check what is in the database - should not include the deleted key
# note: select operations return dictionaries, so you can use the keys(), values(), and items() methods
web_interface.select(additional={'sort_by': '+pk'}).keys()
# double-check!
primary_keys[0] in web_interface.select(additional={'sort_by': '+pk'}).keys()
# add the time series and metadata back in
web_interface.insert_ts(primary_keys[0], tsdict[primary_keys[0]])
web_interface.upsert_meta(primary_keys[0], metadict[primary_keys[0]])
# check what is in the database - should include the newly added key
web_interface.select(additional={'sort_by': '+pk'}).keys()
# select all database entries; no metadata fields
web_interface.select(additional={'sort_by': '+pk', 'limit': 10})
# select all database entries; all metadata fields
web_interface.select(fields=[], additional={'sort_by': '+pk', 'limit': 10})
# select a specific time series; all metadata fields
web_interface.select(md={'pk': 'ts-0'}, fields=[])
# return a specific time series and the result of the 'stats' function (mean and standard deviation)
web_interface.augmented_select(
proc='stats', target=['mean', 'std'], arg=None, md={'pk': 'ts-0'}, additional=None)
# add trigger
web_interface.add_trigger('stats', 'insert_ts', ['mean', 'std'], None)
# add a new time series with the trigger (note: not adding metadata)
web_interface.insert_ts('test', tsdict[primary_keys[0]])
# inspect the results of the trigger - should include mean and std fields
web_interface.select(md={'pk': 'test'}, fields=[])
# delete back out
web_interface.delete_ts('test')
# remove trigger
web_interface.remove_trigger('stats', 'insert_ts')
# add a new time series without the trigger (note: not adding metadata)
web_interface.insert_ts('sample2', tsdict[primary_keys[0]])
# inspect the results of the trigger - should not include mean and std fields
web_interface.select(md={'pk': 'sample2'}, fields=[])
# delete back out
web_interface.delete_ts('sample2')
# randomly choose time series as vantage points
num_vps = 5
random_vps = np.random.choice(range(num_ts), size=num_vps, replace=False)
vpkeys = ['ts-{}'.format(i) for i in random_vps]
# add the time series as vantage points
for i in range(num_vps):
web_interface.insert_vp(vpkeys[i])
# delete one of the vantage points
web_interface.delete_vp(vpkeys[0])
# add it back in
web_interface.insert_vp(vpkeys[0])
# run similarity search on a time series already in the database
# should return the same time series
primary_keys[0], web_interface.vp_similarity_search(tsdict[primary_keys[0]], 1)
# create dummy time series for demonstration purposes
_, query = tsmaker(np.random.uniform(low=0.0, high=1.0),
np.random.uniform(low=0.05, high=0.4),
np.random.uniform(low=0.05, high=0.2))
results = web_interface.vp_similarity_search(query, 1)
results
# visualize the results
plt.plot(query, label='Query TS')
plt.plot(tsdict[list(results.keys())[0]], label='Closest TS')
plt.legend(loc='best')
plt.xticks([])
plt.show()
# run similarity search on a time series already in the database
# should return the same time series
primary_keys[0], web_interface.isax_similarity_search(tsdict[primary_keys[0]])
# create dummy time series for demonstration purposes
_, query = tsmaker(np.random.uniform(low=0.0, high=1.0),
np.random.uniform(low=0.05, high=0.4),
np.random.uniform(low=0.05, high=0.2))
# note: because this is an approximate search, it will not be able
# to find a match for all query time series
results = web_interface.isax_similarity_search(query)
results
# visualize the results
plt.plot(query, label='Query TS')
plt.plot(tsdict[list(results.keys())[0]], label='Closest TS')
plt.legend(loc='best')
plt.xticks([])
plt.show()
# note: print() is required to visualize the tree correctly with carriage returns
print(web_interface.isax_tree())
# insert all the time series
for k in primary_keys:
web_interface.delete_ts(pk=k)
# check that no data is left
web_interface.select()
# terminate processes before exiting
os.kill(server.pid, signal.SIGINT)
time.sleep(5) # give it time to terminate
web_interface = None
webserver.terminate()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1.4.2 Code Management with Git
Step4: 1.5.6 SDSS DR7 Quasar Catalog
Step5: Access BOSS spectra and metadata
Step6: Read the DR12 quasar catalog
Step8: 1.6.1 Plotting Two-Dimensional Representations of Large Data Sets
Step9: Use the same technique to plot the r-i vs. redshift quasar plot above
Step11: 1.6.3 Plotting Representations of Data on the Sky
Step12: You can make nicer sky plots using the Basemap map-projections library. This example is borrowed from the bossdata docs and shows the number density of BOSS DR12 quasars on the sky
Step13: Graphing Extras
| <ASSISTANT_TASK:>
Python Code:
%pylab inline
import astroML
print astroML.__version__
SDSS Spectrum Example
---------------------
Figure 1.2.
An example of an SDSS spectrum (the specific flux plotted as a function of
wavelength) loaded from the SDSS SQL server in real time using Python tools
provided here (this spectrum is uniquely described by SDSS parameters
plate=1615, fiber=513, and mjd=53166).
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_spectrum
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch single spectrum
plate = 1615
mjd = 53166
fiber = 513
spec = fetch_sdss_spectrum(plate, mjd, fiber)
#------------------------------------------------------------
# Plot the resulting spectrum
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(spec.wavelength(), spec.spectrum, '-k', lw=1)
ax.set_xlim(3000, 10000)
ax.set_ylim(25, 300)
ax.set_xlabel(r'$\lambda {(\rm \AA)}$')
ax.set_ylabel('Flux')
ax.set_title('Plate = %(plate)i, MJD = %(mjd)i, Fiber = %(fiber)i' % locals())
plt.show()
SDSS DR7 Quasars
----------------
Figure 1.4.
The r-i color vs. redshift diagram for the first 10,000 entries from the
SDSS Data Release 7 Quasar Catalog. The color variation is due to emission
lines entering and exiting the r and i band wavelength windows.
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_dr7_quasar
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the quasar data
data = fetch_dr7_quasar()
# select the first 10000 points
data = data[:10000]
r = data['mag_r']
i = data['mag_i']
z = data['redshift']
#------------------------------------------------------------
# Plot the quasar data
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(z, r - i, marker='.', markersize=2, linestyle='none', color='black')
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
import bossdata
print bossdata.__version__
quasar_catalog = bossdata.meta.Database(quasar_catalog=True)
dr12q = quasar_catalog.select_all(what='RA,DEC,Z_VI,PSFMAG_2,PSFMAG_3', max_rows=0)
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(z, r - i, marker='.', markersize=2, linestyle='none', color='black')
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
SDSS Stripe 82 Standard Stars
-----------------------------
Figure 1.9.
Scatter plot with contours over dense regions.This is a color-color diagram
of the entire set of SDSS Stripe 82 standard stars; cf. figure 1.6.
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.plotting import scatter_contour
from astroML.datasets import fetch_sdss_S82standards
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the Stripe 82 standard star catalog
data = fetch_sdss_S82standards()
g = data['mmu_g']
r = data['mmu_r']
i = data['mmu_i']
#------------------------------------------------------------
# plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(g - r, r - i, threshold=200, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlabel(r'${\rm g - r}$')
ax.set_ylabel(r'${\rm r - i}$')
ax.set_xlim(-0.6, 2.5)
ax.set_ylim(-0.6, 2.5)
plt.show()
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(z, r - i, threshold=1000, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
Example of HealPix pixellization
--------------------------------
Figure 1.15.
The top panel shows HEALPix pixels in nested order. The 12 fundamental sky
divisions can be seen, as well as the hierarchical nature of the smaller
pixels. This shows a pixelization with nside = 4, that is, each of the 12
large regions has 4 x 4 pixels, for a total of 192 pixels. The lower panel
shows a seven-year co-add of raw WMAP data, plotted using the HEALPix
projection using the HealPy package. This particular realization has
nside = 512, for a total of 3,145,728 pixels. The pixels are roughly
6.8 arcminutes on a side.
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
# warning: due to a bug in healpy, importing it before pylab can cause
# a segmentation fault in some circumstances.
import healpy as hp
from astroML.datasets import fetch_wmap_temperatures
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Next plot the wmap pixellization
wmap_unmasked = fetch_wmap_temperatures(masked=False)
# plot the unmasked map
fig = plt.figure(2, figsize=(10, 7.5))
hp.mollview(wmap_unmasked, min=-1, max=1, title='Raw WMAP data',
unit=r'$\Delta$T (mK)', fig=2)
plt.show()
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import PolyCollection
def plot_sky(ra, dec, data=None, nside=16, label='', projection='eck4', cmap=plt.get_cmap('jet'), norm=None,
hide_galactic_plane=False):
# get pixel area in degrees
pixel_area = hp.pixelfunc.nside2pixarea(nside, degrees=True)
# find healpixels associated with input vectors
pixels = hp.ang2pix(nside, 0.5*np.pi-np.radians(dec), np.radians(ra))
# find unique pixels
unique_pixels = np.unique(pixels)
# count number of points in each pixel
bincounts = np.bincount(pixels)
# if no data provided, show counts per sq degree
# otherwise, show mean per pixel
if data is None:
values = bincounts[unique_pixels]/pixel_area
else:
weighted_counts = np.bincount(pixels, weights=data)
values = weighted_counts[unique_pixels]/bincounts[unique_pixels]
# find pixel boundaries
corners = hp.boundaries(nside, unique_pixels, step=1)
corner_theta, corner_phi = hp.vec2ang(corners.transpose(0,2,1))
corner_ra, corner_dec = np.degrees(corner_phi), np.degrees(np.pi/2-corner_theta)
# set up basemap
m = Basemap(projection=projection, lon_0=90, resolution='l', celestial=True)
m.drawmeridians(np.arange(0, 360, 30), labels=[0,0,1,0], labelstyle='+/-')
m.drawparallels(np.arange(-90, 90, 15), labels=[1,0,0,0], labelstyle='+/-')
m.drawmapboundary()
# convert sky coords to map coords
x,y = m(corner_ra, corner_dec)
# regroup into pixel corners
verts = np.array([x.reshape(-1,4), y.reshape(-1,4)]).transpose(1,2,0)
# Make the collection and add it to the plot.
coll = PolyCollection(verts, array=values, cmap=cmap, norm=norm, edgecolors='none')
plt.gca().add_collection(coll)
plt.gca().autoscale_view()
if not hide_galactic_plane:
from astropy.coordinates import SkyCoord
import astropy.units as u
# generate vector in galactic coordinates and convert to equatorial coordinates
galactic_l = np.linspace(0, 2*np.pi, 1000)
galactic_plane = SkyCoord(l=galactic_l*u.radian, b=np.zeros_like(galactic_l)*u.radian, frame='galactic').fk5
# project to map coordinates
galactic_x, galactic_y = m(galactic_plane.ra.degree, galactic_plane.dec.degree)
m.scatter(galactic_x, galactic_y, marker='.', s=2, c='k')
# Add a colorbar for the PolyCollection
plt.colorbar(coll, orientation='horizontal', pad=0.01, aspect=40, label=label)
return m
plt.figure(figsize=(12,9))
plot_sky(dr12q['RA'].data, dr12q['DEC'].data, label='Number of quasars per square degree')
plt.show()
import seaborn as sns
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(z, r - i, threshold=1000, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up default parameters. We use 32 subjects, which is the median sample size of the set of fMRI studies published between 2011 and 2015 that were estimated from Neurosynth in the paper. We use a heuristic correction for multiple comparisons of p<0.001 and 10 voxels, like that show by Eklund et al. (2016, PNAS) to result in Type I error rates of 0.6-0.9.
Step2: In order to recreate the figure from the paper exactly, we need to fix the random seed so that it will generate exactly the same random data. If you wish to generate new data, then set the recreate_paper_figure variable to False and rerun the notebook.
Step3: Use the standard MNI152 2mm brain mask as the mask for the generated data
Step4: Generate a dataset for each subject. fMRI data within the mask are generated using a Gaussian distribution (mean=1000, standard deviation=100). Behavioral data are generated using a Gaussian distribution (mean=100, standard deviation=1).
Step5: Spatially smooth data using a 6 mm FWHM Gaussian kernel
Step6: Use FSL's GLM tool to run a regression at each voxel
Step7: Use FSL's cluster tool to identify clusters of activation that exceed the specified cluster-forming threshold
Step8: Generate a plot showing the brain-behavior relation from the top cluster
Step9: Generate a thresholded statistics image for display
Step10: Generate a figure showing the location of the selected activation focus.
| <ASSISTANT_TASK:>
Python Code:
import numpy
import nibabel
import os
import nilearn.plotting
import matplotlib.pyplot as plt
from statsmodels.regression.linear_model import OLS
import nipype.interfaces.fsl as fsl
import scipy.stats
if not 'FSLDIR' in os.environ.keys():
raise Exception('This notebook requires that FSL is installed and the FSLDIR environment variable is set')
%matplotlib inline
pthresh=0.001 # cluster forming threshold
cthresh=10 # cluster extent threshold
nsubs=32 # number of subjects
recreate_paper_figure=True
if recreate_paper_figure:
seed=61974
else:
seed=numpy.ceil(numpy.random.rand()*100000).astype('int')
print(seed)
numpy.random.seed(seed)
maskimg=os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain_mask.nii.gz')
mask=nibabel.load(maskimg)
maskdata=mask.get_data()
maskvox=numpy.where(maskdata>0)
print('Mask includes %d voxels'%len(maskvox[0]))
imgmean=1000 # mean activation within mask
imgstd=100 # standard deviation of noise within mask
behavmean=100 # mean of behavioral regressor
behavstd=1 # standard deviation of behavioral regressor
data=numpy.zeros((maskdata.shape + (nsubs,)))
for i in range(nsubs):
tmp=numpy.zeros(maskdata.shape)
tmp[maskvox]=numpy.random.randn(len(maskvox[0]))*imgstd+imgmean
data[:,:,:,i]=tmp
newimg=nibabel.Nifti1Image(data,mask.get_affine(),mask.get_header())
newimg.to_filename('fakedata.nii.gz')
regressor=numpy.random.randn(nsubs,1)*behavstd+behavmean
numpy.savetxt('regressor.txt',regressor)
smoothing_fwhm=6 # FWHM in millimeters
smooth=fsl.IsotropicSmooth(fwhm=smoothing_fwhm,
in_file='fakedata.nii.gz',
out_file='fakedata_smooth.nii.gz')
smooth.run()
glm = fsl.GLM(in_file='fakedata_smooth.nii.gz',
design='regressor.txt',
out_t_name='regressor_tstat.nii.gz',
demean=True)
glm.run()
tcut=scipy.stats.t.ppf(1-pthresh,nsubs-1)
cl = fsl.Cluster()
cl.inputs.threshold = tcut
cl.inputs.in_file = 'regressor_tstat.nii.gz'
cl.inputs.out_index_file='tstat_cluster_index.nii.gz'
results=cl.run()
clusterimg=nibabel.load(cl.inputs.out_index_file)
clusterdata=clusterimg.get_data()
indices=numpy.unique(clusterdata)
clustersize=numpy.zeros(len(indices))
clustermean=numpy.zeros((len(indices),nsubs))
indvox={}
for c in range(1,len(indices)):
indvox[c]=numpy.where(clusterdata==c)
clustersize[c]=len(indvox[c][0])
for i in range(nsubs):
tmp=data[:,:,:,i]
clustermean[c,i]=numpy.mean(tmp[indvox[c]])
corr=numpy.corrcoef(regressor.T,clustermean[-1])
print('Found %d clusters exceeding p<%0.3f and %d voxel extent threshold'%(c,pthresh,cthresh))
print('Largest cluster: correlation=%0.3f, extent = %d voxels'%(corr[0,1],len(indvox[c][0])))
# set cluster to show - 0 is the largest, 1 the second largest, and so on
cluster_to_show=0
# translate this variable into the index of indvox
cluster_to_show_idx=len(indices)-cluster_to_show-1
# plot the (circular) relation between fMRI signal and
# behavioral regressor in the chosen cluster
plt.scatter(regressor.T,clustermean[cluster_to_show_idx])
plt.title('Correlation = %0.3f'%corr[0,1],fontsize=14)
plt.xlabel('Fake behavioral regressor',fontsize=18)
plt.ylabel('Fake fMRI data',fontsize=18)
m, b = numpy.polyfit(regressor[:,0], clustermean[cluster_to_show_idx], 1)
axes = plt.gca()
X_plot = numpy.linspace(axes.get_xlim()[0],axes.get_xlim()[1],100)
plt.plot(X_plot, m*X_plot + b, '-')
plt.savefig('scatter.png',dpi=600)
tstat=nibabel.load('regressor_tstat.nii.gz').get_data()
thresh_t=clusterdata.copy()
cutoff=numpy.min(numpy.where(clustersize>cthresh))
thresh_t[thresh_t<cutoff]=0
thresh_t=thresh_t*tstat
thresh_t_img=nibabel.Nifti1Image(thresh_t,mask.get_affine(),mask.get_header())
mid=len(indvox[cluster_to_show_idx][0])/2
coords=numpy.array([indvox[cluster_to_show_idx][0][mid],
indvox[cluster_to_show_idx][1][mid],
indvox[cluster_to_show_idx][2][mid],1]).T
mni=mask.get_qform().dot(coords)
nilearn.plotting.plot_stat_map(thresh_t_img,
os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain.nii.gz'),
threshold=cl.inputs.threshold,
cut_coords=mni[:3])
plt.savefig('slices.png',dpi=600)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 非ガウス型観測による STS モデルの近似推論
Step2: 合成データ
Step3: モデル
Step4: このモデルは、観測された時系列で演算する代わりに、観測を管理するポワソン型レートパラメータの系列で演算します。
Step5: 非ガウス型観測モデルに近似推論を使用するために、STS モデルを TFP JointDistribution としてエンコーディングします。この同時分布の確率変数は STS モデル、潜在ポワソン分布レートの時系列、および観測されたカウントのパラメータです。
Step6: 推論の準備
Step7: また、推論が STS モデルのパラメータへの制約(スケールが正であるなど)を考慮するように、制約付きのバイジェクターも必要です。
Step8: HMC による推論
Step9: まず、サンプラーを指定し、sample_chain を使用してサンプルを生成するサンプリングカーネルを実行します。
Step10: パラメータのトレースを調べて、推論のサニティチェックを行います。この場合、データに対し複数の説明が探られたようです。これは良いことではありますが、サンプルが多いほどチェーンの混合状態がどれだけうまく行っているかを判定しやすくなります。
Step11: ようやく成果を見ることができます。ポワソン分布のレートに対する事後分布を見てみましょう!また、観測数に対して 80% の予測間隔もプロットし、この間隔に、実際に観測した数の約 80% が含まれているかを確認します。
Step13: 予測
Step14: VI 推論
| <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import bijectors as tfb
from tensorflow_probability import distributions as tfd
tf.enable_v2_behavior()
num_timesteps = 30
observed_counts = np.round(3 + np.random.lognormal(np.log(np.linspace(
num_timesteps, 5, num=num_timesteps)), 0.20, size=num_timesteps))
observed_counts = observed_counts.astype(np.float32)
plt.plot(observed_counts)
def build_model(approximate_unconstrained_rates):
trend = tfp.sts.LocalLinearTrend(
observed_time_series=approximate_unconstrained_rates)
return tfp.sts.Sum([trend],
observed_time_series=approximate_unconstrained_rates)
positive_bijector = tfb.Softplus() # Or tfb.Exp()
# Approximate the unconstrained Poisson rate just to set heuristic priors.
# We could avoid this by passing explicit priors on all model params.
approximate_unconstrained_rates = positive_bijector.inverse(
tf.convert_to_tensor(observed_counts) + 0.01)
sts_model = build_model(approximate_unconstrained_rates)
def sts_with_poisson_likelihood_model():
# Encode the parameters of the STS model as random variables.
param_vals = []
for param in sts_model.parameters:
param_val = yield param.prior
param_vals.append(param_val)
# Use the STS model to encode the log- (or inverse-softplus)
# rate of a Poisson.
unconstrained_rate = yield sts_model.make_state_space_model(
num_timesteps, param_vals)
rate = positive_bijector.forward(unconstrained_rate[..., 0])
observed_counts = yield tfd.Poisson(rate, name='observed_counts')
model = tfd.JointDistributionCoroutineAutoBatched(sts_with_poisson_likelihood_model)
pinned_model = model.experimental_pin(observed_counts=observed_counts)
constraining_bijector = pinned_model.experimental_default_event_space_bijector()
#@title Sampler configuration
# Allow external control of sampling to reduce test runtimes.
num_results = 500 # @param { isTemplate: true}
num_results = int(num_results)
num_burnin_steps = 100 # @param { isTemplate: true}
num_burnin_steps = int(num_burnin_steps)
sampler = tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.NoUTurnSampler(
target_log_prob_fn=pinned_model.unnormalized_log_prob,
step_size=0.1),
bijector=constraining_bijector)
adaptive_sampler = tfp.mcmc.DualAveragingStepSizeAdaptation(
inner_kernel=sampler,
num_adaptation_steps=int(0.8 * num_burnin_steps),
target_accept_prob=0.75)
initial_state = constraining_bijector.forward(
type(pinned_model.event_shape)(
*(tf.random.normal(part_shape)
for part_shape in constraining_bijector.inverse_event_shape(
pinned_model.event_shape))))
# Speed up sampling by tracing with `tf.function`.
@tf.function(autograph=False, jit_compile=True)
def do_sampling():
return tfp.mcmc.sample_chain(
kernel=adaptive_sampler,
current_state=initial_state,
num_results=num_results,
num_burnin_steps=num_burnin_steps,
trace_fn=None)
t0 = time.time()
samples = do_sampling()
t1 = time.time()
print("Inference ran in {:.2f}s.".format(t1-t0))
f = plt.figure(figsize=(12, 4))
for i, param in enumerate(sts_model.parameters):
ax = f.add_subplot(1, len(sts_model.parameters), i + 1)
ax.plot(samples[i])
ax.set_title("{} samples".format(param.name))
param_samples = samples[:-1]
unconstrained_rate_samples = samples[-1][..., 0]
rate_samples = positive_bijector.forward(unconstrained_rate_samples)
plt.figure(figsize=(10, 4))
mean_lower, mean_upper = np.percentile(rate_samples, [10, 90], axis=0)
pred_lower, pred_upper = np.percentile(np.random.poisson(rate_samples),
[10, 90], axis=0)
_ = plt.plot(observed_counts, color="blue", ls='--', marker='o', label='observed', alpha=0.7)
_ = plt.plot(np.mean(rate_samples, axis=0), label='rate', color="green", ls='dashed', lw=2, alpha=0.7)
_ = plt.fill_between(np.arange(0, 30), mean_lower, mean_upper, color='green', alpha=0.2)
_ = plt.fill_between(np.arange(0, 30), pred_lower, pred_upper, color='grey', label='counts', alpha=0.2)
plt.xlabel("Day")
plt.ylabel("Daily Sample Size")
plt.title("Posterior Mean")
plt.legend()
def sample_forecasted_counts(sts_model, posterior_latent_rates,
posterior_params, num_steps_forecast,
num_sampled_forecasts):
# Forecast the future latent unconstrained rates, given the inferred latent
# unconstrained rates and parameters.
unconstrained_rates_forecast_dist = tfp.sts.forecast(sts_model,
observed_time_series=unconstrained_rate_samples,
parameter_samples=posterior_params,
num_steps_forecast=num_steps_forecast)
# Transform the forecast to positive-valued Poisson rates.
rates_forecast_dist = tfd.TransformedDistribution(
unconstrained_rates_forecast_dist,
positive_bijector)
# Sample from the forecast model following the chain rule:
# P(counts) = P(counts | latent_rates)P(latent_rates)
sampled_latent_rates = rates_forecast_dist.sample(num_sampled_forecasts)
sampled_forecast_counts = tfd.Poisson(rate=sampled_latent_rates).sample()
return sampled_forecast_counts, sampled_latent_rates
forecast_samples, rate_samples = sample_forecasted_counts(
sts_model,
posterior_latent_rates=unconstrained_rate_samples,
posterior_params=param_samples,
# Days to forecast:
num_steps_forecast=30,
num_sampled_forecasts=100)
forecast_samples = np.squeeze(forecast_samples)
def plot_forecast_helper(data, forecast_samples, CI=90):
Plot the observed time series alongside the forecast.
plt.figure(figsize=(10, 4))
forecast_median = np.median(forecast_samples, axis=0)
num_steps = len(data)
num_steps_forecast = forecast_median.shape[-1]
plt.plot(np.arange(num_steps), data, lw=2, color='blue', linestyle='--', marker='o',
label='Observed Data', alpha=0.7)
forecast_steps = np.arange(num_steps, num_steps+num_steps_forecast)
CI_interval = [(100 - CI)/2, 100 - (100 - CI)/2]
lower, upper = np.percentile(forecast_samples, CI_interval, axis=0)
plt.plot(forecast_steps, forecast_median, lw=2, ls='--', marker='o', color='orange',
label=str(CI) + '% Forecast Interval', alpha=0.7)
plt.fill_between(forecast_steps,
lower,
upper, color='orange', alpha=0.2)
plt.xlim([0, num_steps+num_steps_forecast])
ymin, ymax = min(np.min(forecast_samples), np.min(data)), max(np.max(forecast_samples), np.max(data))
yrange = ymax-ymin
plt.title("{}".format('Observed time series with ' + str(num_steps_forecast) + ' Day Forecast'))
plt.xlabel('Day')
plt.ylabel('Daily Sample Size')
plt.legend()
plot_forecast_helper(observed_counts, forecast_samples, CI=80)
surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
event_shape=pinned_model.event_shape,
bijector=constraining_bijector)
# Allow external control of optimization to reduce test runtimes.
num_variational_steps = 1000 # @param { isTemplate: true}
num_variational_steps = int(num_variational_steps)
t0 = time.time()
losses = tfp.vi.fit_surrogate_posterior(pinned_model.unnormalized_log_prob,
surrogate_posterior,
optimizer=tf.optimizers.Adam(0.1),
num_steps=num_variational_steps)
t1 = time.time()
print("Inference ran in {:.2f}s.".format(t1-t0))
plt.plot(losses)
plt.title("Variational loss")
_ = plt.xlabel("Steps")
posterior_samples = surrogate_posterior.sample(50)
param_samples = posterior_samples[:-1]
unconstrained_rate_samples = posterior_samples[-1][..., 0]
rate_samples = positive_bijector.forward(unconstrained_rate_samples)
plt.figure(figsize=(10, 4))
mean_lower, mean_upper = np.percentile(rate_samples, [10, 90], axis=0)
pred_lower, pred_upper = np.percentile(
np.random.poisson(rate_samples), [10, 90], axis=0)
_ = plt.plot(observed_counts, color='blue', ls='--', marker='o',
label='observed', alpha=0.7)
_ = plt.plot(np.mean(rate_samples, axis=0), label='rate', color='green',
ls='dashed', lw=2, alpha=0.7)
_ = plt.fill_between(
np.arange(0, 30), mean_lower, mean_upper, color='green', alpha=0.2)
_ = plt.fill_between(np.arange(0, 30), pred_lower, pred_upper, color='grey',
label='counts', alpha=0.2)
plt.xlabel('Day')
plt.ylabel('Daily Sample Size')
plt.title('Posterior Mean')
plt.legend()
forecast_samples, rate_samples = sample_forecasted_counts(
sts_model,
posterior_latent_rates=unconstrained_rate_samples,
posterior_params=param_samples,
# Days to forecast:
num_steps_forecast=30,
num_sampled_forecasts=100)
forecast_samples = np.squeeze(forecast_samples)
plot_forecast_helper(observed_counts, forecast_samples, CI=80)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure environment settings
Step2: Importing the dataset into BigQuery
Step3: Explore the Covertype dataset
Step4: Create training and validation splits
Step5: Create a validation split
Step6: Develop a training application
Step7: Convert all numeric features to float64
Step8: Run the pipeline locally.
Step9: Calculate the trained model's accuracy.
Step10: Prepare the hyperparameter tuning application.
Step11: Write the tuning script.
Step12: Package the script into a docker image.
Step13: Build the docker image.
Step14: Submit an AI Platform hyperparameter tuning job
Step15: Start the hyperparameter tuning job.
Step16: Monitor the job.
Step17: Retrieve HP-tuning results.
Step18: The returned run results are sorted by a value of the optimization metric. The best run is the first item on the returned list.
Step19: Retrain the model with the best hyperparameters
Step20: Examine the training output
Step21: Deploy the model to AI Platform Prediction
Step22: Create a model version
Step23: Serve predictions
Step24: Invoke the model
| <ASSISTANT_TASK:>
Python Code:
import json
import os
import pickle
import tempfile
import time
import uuid
from typing import NamedTuple
import numpy as np
import pandas as pd
from google.cloud import bigquery
from googleapiclient import discovery, errors
from jinja2 import Template
from kfp.components import func_to_container_op
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
!(gsutil ls | grep kubeflow)
REGION = "us-central1"
ARTIFACT_STORE = "gs://qwiklabs-gcp-00-97cd915af2d6-kubeflowpipelines-default"
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
os.environ["PROJECT_ID"] = PROJECT_ID
DATA_ROOT = f"{ARTIFACT_STORE}/data"
JOB_DIR_ROOT = f"{ARTIFACT_STORE}/jobs"
TRAINING_FILE_PATH = "{}/{}/{}".format(DATA_ROOT, "training", "dataset.csv")
VALIDATION_FILE_PATH = "{}/{}/{}".format(DATA_ROOT, "validation", "dataset.csv")
%%bash
DATASET_LOCATION=US
DATASET_ID=covertype_dataset
TABLE_ID=covertype
DATA_SOURCE=gs://workshop-datasets/covertype/small/dataset.csv
SCHEMA=Elevation:INTEGER,\
Aspect:INTEGER,\
Slope:INTEGER,\
Horizontal_Distance_To_Hydrology:INTEGER,\
Vertical_Distance_To_Hydrology:INTEGER,\
Horizontal_Distance_To_Roadways:INTEGER,\
Hillshade_9am:INTEGER,\
Hillshade_Noon:INTEGER,\
Hillshade_3pm:INTEGER,\
Horizontal_Distance_To_Fire_Points:INTEGER,\
Wilderness_Area:STRING,\
Soil_Type:STRING,\
Cover_Type:INTEGER
bq --location=$DATASET_LOCATION --project_id=$PROJECT_ID mk --dataset $DATASET_ID
bq --project_id=$PROJECT_ID --dataset_id=$DATASET_ID load \
--source_format=CSV \
--skip_leading_rows=1 \
--replace \
$TABLE_ID \
$DATA_SOURCE \
$SCHEMA
%%bigquery
SELECT *
FROM `covertype_dataset.covertype`
!bq query \
-n 0 \
--destination_table covertype_dataset.training \
--replace \
--use_legacy_sql=false \
'SELECT * \
FROM `covertype_dataset.covertype` AS cover \
WHERE \
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), 10) IN (1, 2, 3, 4)'
!bq extract \
--destination_format CSV \
covertype_dataset.training \
$TRAINING_FILE_PATH
!bq query \
-n 0 \
--destination_table covertype_dataset.validation \
--replace \
--use_legacy_sql=false \
'SELECT * \
FROM `covertype_dataset.covertype` AS cover \
WHERE \
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), 10) IN (8)'
!bq extract \
--destination_format CSV \
covertype_dataset.validation \
$VALIDATION_FILE_PATH
df_train = pd.read_csv(TRAINING_FILE_PATH)
df_validation = pd.read_csv(VALIDATION_FILE_PATH)
print(df_train.shape)
print(df_validation.shape)
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
("num", StandardScaler(), numeric_feature_indexes),
("cat", OneHotEncoder(), categorical_feature_indexes),
]
)
pipeline = Pipeline(
[
("preprocessor", preprocessor),
("classifier", SGDClassifier(loss="log", tol=1e-3)),
]
)
num_features_type_map = {
feature: "float64" for feature in df_train.columns[numeric_feature_indexes]
}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
X_train = df_train.drop("Cover_Type", axis=1)
y_train = df_train["Cover_Type"]
X_validation = df_validation.drop("Cover_Type", axis=1)
y_validation = df_validation["Cover_Type"]
pipeline.set_params(classifier__alpha=0.001, classifier__max_iter=200)
pipeline.fit(X_train, y_train)
accuracy = pipeline.score(X_validation, y_validation)
print(accuracy)
TRAINING_APP_FOLDER = "training_app"
os.makedirs(TRAINING_APP_FOLDER, exist_ok=True)
%%writefile {TRAINING_APP_FOLDER}/train.py
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import fire
import pickle
import numpy as np
import pandas as pd
import hypertune
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def train_evaluate(job_dir, training_dataset_path, validation_dataset_path, alpha, max_iter, hptune):
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
if not hptune:
df_train = pd.concat([df_train, df_validation])
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numeric_feature_indexes),
('cat', OneHotEncoder(), categorical_feature_indexes)
])
pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log',tol=1e-3))
])
num_features_type_map = {feature: 'float64' for feature in df_train.columns[numeric_feature_indexes]}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
print('Starting training: alpha={}, max_iter={}'.format(alpha, max_iter))
X_train = df_train.drop('Cover_Type', axis=1)
y_train = df_train['Cover_Type']
pipeline.set_params(classifier__alpha=alpha, classifier__max_iter=max_iter)
pipeline.fit(X_train, y_train)
if hptune:
X_validation = df_validation.drop('Cover_Type', axis=1)
y_validation = df_validation['Cover_Type']
accuracy = pipeline.score(X_validation, y_validation)
print('Model accuracy: {}'.format(accuracy))
# Log it with hypertune
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=accuracy
)
# Save the model
if not hptune:
model_filename = 'model.pkl'
with open(model_filename, 'wb') as model_file:
pickle.dump(pipeline, model_file)
gcs_model_path = "{}/{}".format(job_dir, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path], stderr=sys.stdout)
print("Saved model in: {}".format(gcs_model_path))
if __name__ == "__main__":
fire.Fire(train_evaluate)
%%writefile {TRAINING_APP_FOLDER}/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
IMAGE_NAME = "trainer_image"
IMAGE_TAG = "latest"
IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{IMAGE_TAG}"
!gcloud builds submit --tag $IMAGE_URI $TRAINING_APP_FOLDER
%%writefile {TRAINING_APP_FOLDER}/hptuning_config.yaml
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trainingInput:
hyperparameters:
goal: MAXIMIZE
maxTrials: 4
maxParallelTrials: 4
hyperparameterMetricTag: accuracy
enableTrialEarlyStopping: TRUE
params:
- parameterName: max_iter
type: DISCRETE
discreteValues: [
200,
500
]
- parameterName: alpha
type: DOUBLE
minValue: 0.00001
maxValue: 0.001
scaleType: UNIT_LINEAR_SCALE
JOB_NAME = "JOB_{}".format(time.strftime("%Y%m%d_%H%M%S"))
JOB_DIR = "{}/{}".format(JOB_DIR_ROOT, JOB_NAME)
SCALE_TIER = "BASIC"
!gcloud ai-platform jobs submit training $JOB_NAME \
--region=$REGION \
--job-dir=$JOB_DIR \
--master-image-uri=$IMAGE_URI \
--scale-tier=$SCALE_TIER \
--config $TRAINING_APP_FOLDER/hptuning_config.yaml \
-- \
--training_dataset_path=$TRAINING_FILE_PATH \
--validation_dataset_path=$VALIDATION_FILE_PATH \
--hptune
!gcloud ai-platform jobs describe $JOB_NAME
!gcloud ai-platform jobs stream-logs $JOB_NAME
ml = discovery.build("ml", "v1")
job_id = f"projects/{PROJECT_ID}/jobs/{JOB_NAME}"
request = ml.projects().jobs().get(name=job_id)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print("Unexpected error")
response
response["trainingOutput"]["trials"][0]
alpha = response["trainingOutput"]["trials"][0]["hyperparameters"]["alpha"]
max_iter = response["trainingOutput"]["trials"][0]["hyperparameters"][
"max_iter"
]
JOB_NAME = "JOB_{}".format(time.strftime("%Y%m%d_%H%M%S"))
JOB_DIR = "{}/{}".format(JOB_DIR_ROOT, JOB_NAME)
SCALE_TIER = "BASIC"
!gcloud ai-platform jobs submit training $JOB_NAME \
--region=$REGION \
--job-dir=$JOB_DIR \
--master-image-uri=$IMAGE_URI \
--scale-tier=$SCALE_TIER \
-- \
--training_dataset_path=$TRAINING_FILE_PATH \
--validation_dataset_path=$VALIDATION_FILE_PATH \
--alpha=$alpha \
--max_iter=$max_iter \
--nohptune
!gcloud ai-platform jobs stream-logs $JOB_NAME
!gsutil ls $JOB_DIR
model_name = 'forest_cover_classifier'
labels = "task=classifier,domain=forestry"
!gcloud ai-platform models create $model_name \
--regions=$REGION \
--labels=$labels
model_version = 'v01'
!gcloud ai-platform versions create {model_version} \
--model={model_name} \
--origin=$JOB_DIR \
--runtime-version=1.15 \
--framework=scikit-learn \
--python-version=3.7 \
--region=global
input_file = "serving_instances.json"
with open(input_file, "w") as f:
for index, row in X_validation.head().iterrows():
f.write(json.dumps(list(row.values)))
f.write("\n")
!cat $input_file
!gcloud ai-platform predict \
--model $model_name \
--version $model_version \
--json-instances $input_file \
--region global
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load and examine the titanic data with pandas first.
Step2: So we have 891 training examples with 10 information columns given. Of course it is not straight forward to use all of them at this point.
Step3: Here is how we examine how the selection works.
Step4: Gender-Age model
Step5: How many missing values are there?
Step6: SVM does not allow features with missing values, what do we do?
Step7: can you think of better ways to do this?
Step8: feature rescaling
Step9: Let's examine the selection function of the model.
Step10: Create a submission file with the Gender-Age model
Step11: We notice again that some of the age value is missing in the test data, and want to fill in the same way as what we did with the training data.
Step12: Note here we give the missing values the mean age of the training data.
Step13: We use the model above to predict the survive of our test data.
Step14: create a file that can be submit to kaggle
| <ASSISTANT_TASK:>
Python Code:
#import all the needed package
import numpy as np
import scipy as sp
import re
import pandas as pd
import sklearn
from sklearn.cross_validation import train_test_split,cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import matplotlib
from matplotlib import pyplot as plt
%matplotlib inline
from sklearn.svm import SVC
data = pd.read_csv('data/train.csv')
print data.head()
# our target is the survived column
y= data['Survived']
print data.shape
#add in Sex_male features
data['Sex_male']=data.Sex.map({'female':0,'male':1})
data.head()
#get the features we indented to use
feature_cols=['Pclass','Sex_male']
X=data[feature_cols]
X.head()
#use the default SVM rbf model
model=SVC()
scores=cross_val_score(model,X,y,cv=10,scoring='accuracy')
print scores, np.mean(scores),np.std(scores)
xmin,xmax=X['Pclass'].min()-0.5,X['Pclass'].max()+0.5
ymin,ymax=X['Sex_male'].min()-0.5,X['Sex_male'].max()+0.5
print xmin,xmax,ymin,ymax
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 200), np.linspace(ymin, ymax, 200))
model.fit(X,y)
Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
fig=plt.figure(figsize=(20,10))
ax=fig.add_subplot(111)
ax.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu,alpha=0.5)
ax.scatter(X['Pclass']+np.random.randn(len(X['Pclass']))*0.1, X['Sex_male']+np.random.randn(len(X['Pclass']))*0.05, c=y,s=40, cmap=plt.cm.RdBu_r)
ax.set_xlabel("Pclass")
ax.set_ylabel("Sex_male")
ax.set_xlim([0.5,3.5])
ax.set_ylim([-0.5,1.5])
plt.show()
#use the isnull function to check if there is any missing value in the Age column.
pd.isnull(data['Age']).any()
print len(data['Age'][pd.isnull(data['Age'])])
data['Age'][pd.isnull(data['Age'])]=data['Age'].mean()
#generate our new feature
feature_cols=['Age','Sex_male']
X=data[feature_cols]
X.head()
#use the default SVM rbf model
scores=cross_val_score(model,X,y,cv=10,scoring='accuracy')
print scores, np.mean(scores),np.std(scores)
X['Age']=(X['Age']-X['Age'].median())/X['Age'].std()
#X = StandardScaler().fit_transform(X)
scores=cross_val_score(model,X,y,cv=10,scoring='accuracy')
print scores, np.mean(scores),np.std(scores)
xmin,xmax=X['Age'].min()-0.5,X['Age'].max()+0.5
ymin,ymax=X['Sex_male'].min()-0.5,X['Sex_male'].max()+0.5
print xmin,xmax,ymin,ymax
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 200), np.linspace(ymin, ymax, 200))
model.fit(X,y)
Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
fig=plt.figure(figsize=(20,10))
ax=fig.add_subplot(111)
ax.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu,alpha=0.5)
ax.scatter(X['Age'], X['Sex_male']+np.random.randn(len(X['Age']))*0.05, c=y,s=40, cmap=plt.cm.RdBu_r)
ax.set_xlabel("Normalized Age")
ax.set_ylabel("Sex_male")
ax.set_ylim([-0.5,1.5])
ax.set_xlim([-3,4.5])
plt.show()
test_data = pd.read_csv('data/test.csv')
#print test_data.head()
#add in Sex_male features
test_data['Sex_male']=test_data.Sex.map({'female':0,'male':1})
#use the isnull function to check if there is any missing value in the Age column.
pd.isnull(test_data['Age']).any()
print len(test_data['Age'][pd.isnull(test_data['Age'])])
test_data['Age'][pd.isnull(test_data['Age'])]=data['Age'].mean()
#generate our new feature
X_test=test_data[feature_cols]
X_test['Age']=(X_test['Age']-data['Age'].median())/data['Age'].std()
y_pred=model.predict(X_test)
X_test.head()
samplesubmit = pd.read_csv("data/titanic_submit_example.csv")
#samplesubmit.head()
samplesubmit["Survived"]=y_pred
#samplesubmit.to_csv
samplesubmit.to_csv("data/titanic_submit_gender_age.csv",index=False)
samplesubmit.head()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data from Google Clod Storage
Step2: Prepare data for ARIMA
Step3: Let's create a column for weekly returns. Take the log to of the returns to normalize large fluctuations.
Step4: Test for stationarity of the udiff series
Step5: With a p-value < 0.05, we can reject the null hypotehsis. This data set is stationary.
Step6: The table below summarizes the patterns of the ACF and PACF.
Step7: Our model doesn't do a good job predicting variance in the original data (peaks and valleys).
Step8: Let's make a forecast 2 weeks ahead
| <ASSISTANT_TASK:>
Python Code:
!pip install --user statsmodels
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
%config InlineBackend.figure_format = 'retina'
df = pd.read_csv('gs://cloud-training/ai4f/AAPL10Y.csv')
df['date'] = pd.to_datetime(df['date'])
df.sort_values('date', inplace=True)
df.set_index('date', inplace=True)
print(df.shape)
df.head()
df_week = df.resample('w').mean()
df_week = df_week[['close']]
df_week.head()
df_week['weekly_ret'] = np.log(df_week['close']).diff()
df_week.head()
# drop null rows
df_week.dropna(inplace=True)
df_week.weekly_ret.plot(kind='line', figsize=(12, 6));
udiff = df_week.drop(['close'], axis=1)
udiff.head()
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
rolmean = udiff.rolling(20).mean()
rolstd = udiff.rolling(20).std()
plt.figure(figsize=(12, 6))
orig = plt.plot(udiff, color='blue', label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std Deviation')
plt.title('Rolling Mean & Standard Deviation')
plt.legend(loc='best')
plt.show(block=False)
# Perform Dickey-Fuller test
dftest = sm.tsa.adfuller(udiff.weekly_ret, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value ({0})'.format(key)] = value
dfoutput
from statsmodels.graphics.tsaplots import plot_acf
# the autocorrelation chart provides just the correlation at increasing lags
fig, ax = plt.subplots(figsize=(12,5))
plot_acf(udiff.values, lags=10, ax=ax)
plt.show()
from statsmodels.graphics.tsaplots import plot_pacf
fig, ax = plt.subplots(figsize=(12,5))
plot_pacf(udiff.values, lags=10, ax=ax)
plt.show()
from statsmodels.tsa.arima.model import ARIMA
# Notice that you have to use udiff - the differenced data rather than the original data.
ar1 = ARIMA(udiff.values, order = (3, 0,1)).fit()
ar1.summary()
plt.figure(figsize=(12, 8))
plt.plot(udiff.values, color='blue')
preds = ar1.fittedvalues
plt.plot(preds, color='red')
plt.show()
steps = 2
forecast = ar1.forecast(steps=steps)
plt.figure(figsize=(12, 8))
plt.plot(udiff.values, color='blue')
preds = ar1.fittedvalues
plt.plot(preds, color='red')
plt.plot(pd.DataFrame(np.array([preds[-1],forecast[0]]).T,index=range(len(udiff.values)+1, len(udiff.values)+3)), color='green')
plt.plot(pd.DataFrame(forecast,index=range(len(udiff.values)+1, len(udiff.values)+1+steps)), color='green')
plt.title('Display the predictions with the ARIMA model')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We are going to use a simple cnn network as our encoder and decoder. In decoder, we use SpatialFullConvolution (aka deconvolution or convolution transpose) layer to upsample the image to the original resolution.
Step2: Get the MNIST Dataset
Step3: Define our Training Objective
Step4: Compile the Model
Step5: Start Training
Step6: Let's show the learning curve.
Step7: You can also open tensorboard to see this curve.
Step8: Explore the Latent Space
| <ASSISTANT_TASK:>
Python Code:
# a bit of setup
import numpy as np
from bigdl.nn.criterion import *
from bigdl.dataset import mnist
from zoo.pipeline.api.keras.layers import *
from zoo.pipeline.api.keras.models import Model
from zoo.pipeline.api.keras.utils import *
import datetime as dt
IMAGE_SIZE = 784
IMAGE_ROWS = 28
IMAGE_COLS = 28
IMAGE_CHANNELS = 1
latent_size = 2
from zoo.common.nncontext import *
sc = init_nncontext("Variational Autoencoder Example")
def get_encoder(latent_size):
input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_COLS, IMAGE_ROWS))
#CONV
conv1 = Convolution2D(16, 5, 5, input_shape=(IMAGE_CHANNELS, IMAGE_ROWS, IMAGE_COLS), border_mode='same',
subsample=(2, 2))(input0)
relu1 = LeakyReLU()(conv1)
conv2 = Convolution2D(32, 5, 5, input_shape=(16, 14, 14), border_mode='same', subsample=(2, 2))(relu1)
relu2 = LeakyReLU()(conv2) # 32,7,7
reshape = Flatten()(relu2)
#fully connected to output mean vector and log-variance vector
reshape = Reshape([7*7*32])(relu2)
z_mean = Dense(latent_size)(reshape)
z_log_var = Dense(latent_size)(reshape)
model = Model([input0],[z_mean,z_log_var])
return model
def get_decoder(latent_size):
input0 = Input(shape=(latent_size,))
reshape0 = Dense(1568)(input0)
reshape1 = Reshape((32, 7, 7))(reshape0)
relu0 = Activation('relu')(reshape1)
# use resize and conv layer instead of deconv layer
resize1 = ResizeBilinear(14,14)(relu0)
deconv1 = Convolution2D(16, 5, 5, subsample=(1, 1), activation='relu', border_mode = 'same', input_shape=(32, 14, 14))(resize1)
resize2 = ResizeBilinear(28,28)(deconv1)
deconv2 = Convolution2D(1, 5, 5, subsample=(1, 1), input_shape=(16, 28, 28), border_mode = 'same')(resize2)
outputs = Activation('sigmoid')(deconv2)
model = Model([input0],[outputs])
return model
def get_autoencoder(latent_size):
input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_COLS, IMAGE_ROWS))
encoder = get_encoder(latent_size)(input0)
sample = GaussianSampler()(encoder)
decoder_model = get_decoder(latent_size)
decoder = decoder_model(sample)
model = Model([input0],[encoder,decoder])
return model,decoder_model
autoencoder,decoder_model = get_autoencoder(2)
def get_mnist(sc, mnist_path):
(train_images, train_labels) = mnist.read_data_sets(mnist_path, "train")
train_images = np.reshape(train_images, (60000, 1, 28, 28))
rdd_train_images = sc.parallelize(train_images)
rdd_train_sample = rdd_train_images.map(lambda img:
Sample.from_ndarray(
(img > 128) * 1.0,
[(img > 128) * 1.0, (img > 128) * 1.0]))
return rdd_train_sample
mnist_path = "datasets/mnist" # please replace this
train_data = get_mnist(sc, mnist_path)
# (train_images, train_labels) = mnist.read_data_sets(mnist_path, "train")
batch_size = 100
criterion = ParallelCriterion()
criterion.add(KLDCriterion(), 1.0)
criterion.add(BCECriterion(size_average=False), 1.0/batch_size)
autoencoder.compile(optimizer=Adam(0.001), loss=criterion)
import os
if not os.path.exists("./log"):
os.makedirs("./log")
app_name='vae-digits-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
autoencoder.set_tensorboard(log_dir='./log/',app_name=app_name)
print("Saving logs to ", app_name)
autoencoder.fit(x=train_data,
batch_size=batch_size,
nb_epoch = 6)
import matplotlib
matplotlib.use('Agg')
%pylab inline
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import datetime as dt
train_summary = TrainSummary('./log/', app_name)
loss = np.array(train_summary.read_scalar("Loss"))
plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
from matplotlib.pyplot import imshow
img = np.column_stack([decoder_model.forward(np.random.randn(1,2)).reshape(28,28) for s in range(8)])
imshow(img, cmap='gray')
# This code snippet references this keras example (https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py)
from scipy.stats import norm
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = decoder_model.forward(z_sample)
digit = x_decoded.reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 就可以下载到,然后可以找到一个叫做meetingminitus.pdf的文件。这个就是我们接下来要用的
Step2: 好。现在来试试我自己用latex写的pdf文档。
| <ASSISTANT_TASK:>
Python Code:
wget https://nostarch.com/download/Automate_the_Boring_Stuff_onlinematerials_v.2.zip
import PyPDF2
with open('./automate_online-materials/meetingminutes.pdf', 'rb') as f:
pdfreader = PyPDF2.PdfFileReader(f)
print(pdfreader.numPages)
page0 = pdfreader.getPage(0)
page0text = page0.extractText()
print(page0text)
import PyPDF2
with open('./polarmetal-v28-letter.pdf', 'rb') as f:
pdfreader = PyPDF2.PdfFileReader(f)
print(pdfreader.numPages)
for i in range(0,pdfreader.numPages+1):
if i==0:
page_number = pdfreader.getPage(i)
page_text = page_number.extractText()
print(page_text[:100])
所以说,我们并不能期待它太高。标题和姓名中的空格基本上都被‘吞’掉了。这样的话,对于大篇幅文本,可以说并没什么用处。
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Part 0
Step4: In this lab we will be examining subsets of the tuples we create (e.g., the top rated movies by users). Whenever we examine only a subset of a large dataset, there is the potential that the result will depend on the order we perform operations, such as joins, or how the data is partitioned across the workers. What we want to guarantee is that we always see the same results for a subset, independent of how we manipulate or store the data.
Step6: Even though the two lists contain identical tuples, the difference in ordering sometimes yields a different ordering for the sorted RDD (try running the cell repeatedly and see if the results change or the assertion fails). If we only examined the first two elements of the RDD (e.g., using take(2)), then we would observe different answers - that is a really bad outcome as we want identical input data to always yield identical output. A better technique is to sort the RDD by both the key and value, which we can do by combining the key and value into a single string and then sorting on that string. Since the key is an integer and the value is a unicode string, we can use a function to combine them into a single unicode string (e.g., unicode('%.3f' % key) + ' ' + value) before sorting the RDD using sortBy().
Step7: If we just want to look at the first few elements of the RDD in sorted order, we can use the takeOrdered method with the sortFunction we defined.
Step9: Part 1
Step10: (1b) Movies with Highest Average Ratings
Step11: (1c) Movies with Highest Average Ratings and more than 500 reviews
Step12: Using a threshold on the number of reviews is one way to improve the recommendations, but there are many other good ways to improve quality. For example, you could weight ratings by the number of ratings.
Step14: After splitting the dataset, your training set has about 293,000 entries and the validation and test sets each have about 97,000 entries (the exact number of entries in each dataset varies slightly due to the random nature of the randomSplit() transformation.
Step15: (2c) Using ALS.train()
Step16: (2d) Testing Your Model
Step17: (2e) Comparing Your Model
Step18: You now have code to predict how users will rate movies!
Step19: The user ID 0 is unassigned, so we will use it for your ratings. We set the variable myUserID to 0 for you. Next, create a new RDD myRatingsRDD with your ratings for at least 10 movie ratings. Each entry should be formatted as (myUserID, movieID, rating) (i.e., each entry should be formatted in the same way as trainingRDD). As in the original dataset, ratings should be between 1 and 5 (inclusive). If you have not seen at least 10 of these movies, you can increase the parameter passed to take() in the above cell until there are 10 movies that you have seen (or you can also guess what your rating would be for movies you have not seen).
Step20: (3b) Add Your Movies to Training Dataset
Step21: (3c) Train a Model with Your Ratings
Step22: (3d) Check RMSE for the New Model with Your Ratings
Step23: (3e) Predict Your Ratings
Step24: (3f) Predict Your Ratings
| <ASSISTANT_TASK:>
Python Code:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab4', 'small')
ratingsFilename = os.path.join(baseDir, inputPath, 'ratings.dat.gz')
moviesFilename = os.path.join(baseDir, inputPath, 'movies.dat')
numPartitions = 2
rawRatings = sc.textFile(ratingsFilename).repartition(numPartitions)
rawMovies = sc.textFile(moviesFilename)
def get_ratings_tuple(entry):
Parse a line in the ratings dataset
Args:
entry (str): a line in the ratings dataset in the form of UserID::MovieID::Rating::Timestamp
Returns:
tuple: (UserID, MovieID, Rating)
items = entry.split('::')
return int(items[0]), int(items[1]), float(items[2])
def get_movie_tuple(entry):
Parse a line in the movies dataset
Args:
entry (str): a line in the movies dataset in the form of MovieID::Title::Genres
Returns:
tuple: (MovieID, Title)
items = entry.split('::')
return int(items[0]), items[1]
ratingsRDD = rawRatings.map(get_ratings_tuple).cache()
moviesRDD = rawMovies.map(get_movie_tuple).cache()
ratingsCount = ratingsRDD.count()
moviesCount = moviesRDD.count()
print 'There are %s ratings and %s movies in the datasets' % (ratingsCount, moviesCount)
print 'Ratings: %s' % ratingsRDD.take(3)
print 'Movies: %s' % moviesRDD.take(3)
assert ratingsCount == 487650
assert moviesCount == 3883
assert moviesRDD.filter(lambda (id, title): title == 'Toy Story (1995)').count() == 1
assert (ratingsRDD.takeOrdered(1, key=lambda (user, movie, rating): movie)
== [(1, 1, 5.0)])
tmp1 = [(1, u'alpha'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'delta')]
tmp2 = [(1, u'delta'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'alpha')]
oneRDD = sc.parallelize(tmp1)
twoRDD = sc.parallelize(tmp2)
oneSorted = oneRDD.sortByKey(True).collect()
twoSorted = twoRDD.sortByKey(True).collect()
print oneSorted
print twoSorted
assert set(oneSorted) == set(twoSorted) # Note that both lists have the same elements
assert twoSorted[0][0] < twoSorted.pop()[0] # Check that it is sorted by the keys
assert oneSorted[0:2] != twoSorted[0:2] # Note that the subset consisting of the first two elements does not match
def sortFunction(tuple):
Construct the sort string (does not perform actual sorting)
Args:
tuple: (rating, MovieName)
Returns:
sortString: the value to sort with, 'rating MovieName'
key = unicode('%.3f' % tuple[0])
value = tuple[1]
return (key + ' ' + value)
print oneRDD.sortBy(sortFunction, True).collect()
print twoRDD.sortBy(sortFunction, True).collect()
oneSorted1 = oneRDD.takeOrdered(oneRDD.count(),key=sortFunction)
twoSorted1 = twoRDD.takeOrdered(twoRDD.count(),key=sortFunction)
print 'one is %s' % oneSorted1
print 'two is %s' % twoSorted1
assert oneSorted1 == twoSorted1
# TODO: Replace <FILL IN> with appropriate code
# First, implement a helper function `getCountsAndAverages` using only Python
def getCountsAndAverages(IDandRatingsTuple):
Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (MovieID, (number of ratings, averageRating))
count = 0
tot = 0.0
for rating in IDandRatingsTuple[1]:
tot += rating
count += 1;
return (IDandRatingsTuple[0], (count, tot/count))
# TEST Number of Ratings and Average Ratings for a Movie (1a)
Test.assertEquals(getCountsAndAverages((1, (1, 2, 3, 4))), (1, (4, 2.5)),
'incorrect getCountsAndAverages() with integer list')
Test.assertEquals(getCountsAndAverages((100, (10.0, 20.0, 30.0))), (100, (3, 20.0)),
'incorrect getCountsAndAverages() with float list')
Test.assertEquals(getCountsAndAverages((110, xrange(20))), (110, (20, 9.5)),
'incorrect getCountsAndAverages() with xrange')
# TODO: Replace <FILL IN> with appropriate code
# From ratingsRDD with tuples of (UserID, MovieID, Rating) create an RDD with tuples of
# the (MovieID, iterable of Ratings for that MovieID)
movieIDsWithRatingsRDD = (ratingsRDD
.map(lambda x:(x[1], x[2]))
.groupByKey())
print 'movieIDsWithRatingsRDD: %s\n' % movieIDsWithRatingsRDD.take(3)
# Using `movieIDsWithRatingsRDD`, compute the number of ratings and average rating for each movie to
# yield tuples of the form (MovieID, (number of ratings, average rating))
movieIDsWithAvgRatingsRDD = movieIDsWithRatingsRDD.map(getCountsAndAverages)
print 'movieIDsWithAvgRatingsRDD: %s\n' % movieIDsWithAvgRatingsRDD.take(3)
# To `movieIDsWithAvgRatingsRDD`, apply RDD transformations that use `moviesRDD` to get the movie
# names for `movieIDsWithAvgRatingsRDD`, yielding tuples of the form
# (average rating, movie name, number of ratings)
movieNameWithAvgRatingsRDD = (moviesRDD
.join(movieIDsWithAvgRatingsRDD)
.map(lambda x: (x[1][1][1], x[1][0], x[1][1][0])))
print 'movieNameWithAvgRatingsRDD: %s\n' % movieNameWithAvgRatingsRDD.take(3)
# TEST Movies with Highest Average Ratings (1b)
Test.assertEquals(movieIDsWithRatingsRDD.count(), 3615,
'incorrect movieIDsWithRatingsRDD.count() (expected 3615)')
movieIDsWithRatingsTakeOrdered = movieIDsWithRatingsRDD.takeOrdered(3)
Test.assertTrue(movieIDsWithRatingsTakeOrdered[0][0] == 1 and
len(list(movieIDsWithRatingsTakeOrdered[0][1])) == 993,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[0] (expected 993)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[1][0] == 2 and
len(list(movieIDsWithRatingsTakeOrdered[1][1])) == 332,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[1] (expected 332)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[2][0] == 3 and
len(list(movieIDsWithRatingsTakeOrdered[2][1])) == 299,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[2] (expected 299)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.count(), 3615,
'incorrect movieIDsWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.takeOrdered(3),
[(1, (993, 4.145015105740181)), (2, (332, 3.174698795180723)),
(3, (299, 3.0468227424749164))],
'incorrect movieIDsWithAvgRatingsRDD.takeOrdered(3)')
Test.assertEquals(movieNameWithAvgRatingsRDD.count(), 3615,
'incorrect movieNameWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieNameWithAvgRatingsRDD.takeOrdered(3),
[(1.0, u'Autopsy (Macchie Solari) (1975)', 1), (1.0, u'Better Living (1998)', 1),
(1.0, u'Big Squeeze, The (1996)', 3)],
'incorrect movieNameWithAvgRatingsRDD.takeOrdered(3)')
# TODO: Replace <FILL IN> with appropriate code
# Apply an RDD transformation to `movieNameWithAvgRatingsRDD` to limit the results to movies with
# ratings from more than 500 people. We then use the `sortFunction()` helper function to sort by the
# average rating to get the movies in order of their rating (highest rating first)
movieLimitedAndSortedByRatingRDD = (movieNameWithAvgRatingsRDD
.filter(lambda x: x[2] > 500)
.sortBy(sortFunction, False))
print 'Movies with highest ratings: %s' % movieLimitedAndSortedByRatingRDD.take(20)
# TEST Movies with Highest Average Ratings and more than 500 Reviews (1c)
Test.assertEquals(movieLimitedAndSortedByRatingRDD.count(), 194,
'incorrect movieLimitedAndSortedByRatingRDD.count()')
Test.assertEquals(movieLimitedAndSortedByRatingRDD.take(20),
[(4.5349264705882355, u'Shawshank Redemption, The (1994)', 1088),
(4.515798462852263, u"Schindler's List (1993)", 1171),
(4.512893982808023, u'Godfather, The (1972)', 1047),
(4.510460251046025, u'Raiders of the Lost Ark (1981)', 1195),
(4.505415162454874, u'Usual Suspects, The (1995)', 831),
(4.457256461232604, u'Rear Window (1954)', 503),
(4.45468509984639, u'Dr. Strangelove or: How I Learned to Stop Worrying and Love the Bomb (1963)', 651),
(4.43953006219765, u'Star Wars: Episode IV - A New Hope (1977)', 1447),
(4.4, u'Sixth Sense, The (1999)', 1110), (4.394285714285714, u'North by Northwest (1959)', 700),
(4.379506641366224, u'Citizen Kane (1941)', 527), (4.375, u'Casablanca (1942)', 776),
(4.363975155279503, u'Godfather: Part II, The (1974)', 805),
(4.358816276202219, u"One Flew Over the Cuckoo's Nest (1975)", 811),
(4.358173076923077, u'Silence of the Lambs, The (1991)', 1248),
(4.335826477187734, u'Saving Private Ryan (1998)', 1337),
(4.326241134751773, u'Chinatown (1974)', 564),
(4.325383304940375, u'Life Is Beautiful (La Vita \ufffd bella) (1997)', 587),
(4.324110671936759, u'Monty Python and the Holy Grail (1974)', 759),
(4.3096, u'Matrix, The (1999)', 1250)], 'incorrect sortedByRatingRDD.take(20)')
trainingRDD, validationRDD, testRDD = ratingsRDD.randomSplit([6, 2, 2], seed=0L)
print 'Training: %s, validation: %s, test: %s\n' % (trainingRDD.count(),
validationRDD.count(),
testRDD.count())
print trainingRDD.take(3)
print validationRDD.take(3)
print testRDD.take(3)
assert trainingRDD.count() == 292716
assert validationRDD.count() == 96902
assert testRDD.count() == 98032
assert trainingRDD.filter(lambda t: t == (1, 914, 3.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 2355, 5.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 595, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1287, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 594, 4.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1270, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1193, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 2398, 4.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1035, 5.0)).count() == 1
# TODO: Replace <FILL IN> with appropriate code
import math
def computeError(predictedRDD, actualRDD):
Compute the root mean squared error between predicted and actual
Args:
predictedRDD: predicted ratings for each movie and each user where each entry is in the form
(UserID, MovieID, Rating)
actualRDD: actual ratings where each entry is in the form (UserID, MovieID, Rating)
Returns:
RMSE (float): computed RMSE value
# Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)
predictedReformattedRDD = predictedRDD.map(lambda x: ((x[0], x[1]), x[2]))
# Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)
actualReformattedRDD = actualRDD.map(lambda x: ((x[0], x[1]), x[2]))
# Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each
# RDD) in the reformatted RDDs using RDD transformtions - do not use collect()
squaredErrorsRDD = (predictedReformattedRDD
.join(actualReformattedRDD)
.map(lambda x: (x, (x[1][0] - x[1][1])**2)))
# Compute the total squared error - do not use collect()
totalError = squaredErrorsRDD.values().sum()
# Count the number of entries for which you computed the total squared error
numRatings = squaredErrorsRDD.count()
# Using the total squared error and the number of entries, compute the RMSE
return math.sqrt(float(totalError)/numRatings)
# sc.parallelize turns a Python list into a Spark RDD.
testPredicted = sc.parallelize([
(1, 1, 5),
(1, 2, 3),
(1, 3, 4),
(2, 1, 3),
(2, 2, 2),
(2, 3, 4)])
testActual = sc.parallelize([
(1, 2, 3),
(1, 3, 5),
(2, 1, 5),
(2, 2, 1)])
testPredicted2 = sc.parallelize([
(2, 2, 5),
(1, 2, 5)])
testError = computeError(testPredicted, testActual)
print 'Error for test dataset (should be 1.22474487139): %s' % testError
testError2 = computeError(testPredicted2, testActual)
print 'Error for test dataset2 (should be 3.16227766017): %s' % testError2
testError3 = computeError(testActual, testActual)
print 'Error for testActual dataset (should be 0.0): %s' % testError3
# TEST Root Mean Square Error (2b)
Test.assertTrue(abs(testError - 1.22474487139) < 0.00000001,
'incorrect testError (expected 1.22474487139)')
Test.assertTrue(abs(testError2 - 3.16227766017) < 0.00000001,
'incorrect testError2 result (expected 3.16227766017)')
Test.assertTrue(abs(testError3 - 0.0) < 0.00000001,
'incorrect testActual result (expected 0.0)')
# TODO: Replace <FILL IN> with appropriate code
from pyspark.mllib.recommendation import ALS
validationForPredictRDD = validationRDD.map(lambda x: (x[0], x[1]))
seed = 5L
iterations = 5
regularizationParameter = 0.1
ranks = [4, 8, 12]
errors = [0, 0, 0]
err = 0
tolerance = 0.02
minError = float('inf')
bestRank = -1
bestIteration = -1
for rank in ranks:
model = ALS.train(trainingRDD, rank, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
predictedRatingsRDD = model.predictAll(validationForPredictRDD)
error = computeError(predictedRatingsRDD, validationRDD)
errors[err] = error
err += 1
print 'For rank %s the RMSE is %s' % (rank, error)
if error < minError:
minError = error
bestRank = rank
print 'The best model was trained with rank %s' % bestRank
# TEST Using ALS.train (2c)
Test.assertEquals(trainingRDD.getNumPartitions(), 2,
'incorrect number of partitions for trainingRDD (expected 2)')
Test.assertEquals(validationForPredictRDD.count(), 96902,
'incorrect size for validationForPredictRDD (expected 96902)')
Test.assertEquals(validationForPredictRDD.filter(lambda t: t == (1, 1907)).count(), 1,
'incorrect content for validationForPredictRDD')
Test.assertTrue(abs(errors[0] - 0.883710109497) < tolerance, 'incorrect errors[0]')
Test.assertTrue(abs(errors[1] - 0.878486305621) < tolerance, 'incorrect errors[1]')
Test.assertTrue(abs(errors[2] - 0.876832795659) < tolerance, 'incorrect errors[2]')
# TODO: Replace <FILL IN> with appropriate code
myModel = ALS.train(trainingRDD, bestRank, seed=seed, iterations=iterations, lambda_=regularizationParameter)
testForPredictingRDD = testRDD.map(lambda x: (x[0], x[1]))
predictedTestRDD = myModel.predictAll(testForPredictingRDD)
testRMSE = computeError(testRDD, predictedTestRDD)
print 'The model had a RMSE on the test set of %s' % testRMSE
# TEST Testing Your Model (2d)
Test.assertTrue(abs(testRMSE - 0.87809838344) < tolerance, 'incorrect testRMSE')
# TODO: Replace <FILL IN> with appropriate code
trainingAvgRating = float(trainingRDD.map(lambda x: x[2]).sum()) / trainingRDD.count()
print 'The average rating for movies in the training set is %s' % trainingAvgRating
testForAvgRDD = testRDD.map(lambda x: (x[0], x[1], trainingAvgRating))
testAvgRMSE = computeError(testRDD, testForAvgRDD)
print 'The RMSE on the average set is %s' % testAvgRMSE
# TEST Comparing Your Model (2e)
Test.assertTrue(abs(trainingAvgRating - 3.57409571052) < 0.000001,
'incorrect trainingAvgRating (expected 3.57409571052)')
Test.assertTrue(abs(testAvgRMSE - 1.12036693569) < 0.000001,
'incorrect testAvgRMSE (expected 1.12036693569)')
print 'Most rated movies:'
print '(average rating, movie name, number of reviews)'
for ratingsTuple in movieLimitedAndSortedByRatingRDD.take(50):
print ratingsTuple
# TODO: Replace <FILL IN> with appropriate code
myUserID = 0
# Note that the movie IDs are the *last* number on each line. A common error was to use the number of ratings as the movie ID.
myRatedMovies = [
# The format of each line is (myUserID, movie ID, your rating)
# For example, to give the movie "Star Wars: Episode IV - A New Hope (1977)" a five rating, you would add the following line:
# (myUserID, 260, 5),
(myUserID, 54001, 5), # Harry Potter & the Order of the Phoenix
(myUserID, 150, 5), # Apollo 13
(myUserID, 1, 4), # Toy Story
(myUserID, 2953, 4), # Home Alone 2
(myUserID, 1882, 3), # Godzilla (1998)
(myUserID, 5313, 3), # The Scorpion King
(myUserID, 260, 2), # Star Wars: Episode IV
(myUserID, 8731, 2), # Moulin Rouge
(myUserID, 3578, 1), # Gladiator
(myUserID, 2028, 1) # Saving Private Ryan
]
myRatingsRDD = sc.parallelize(myRatedMovies)
print 'My movie ratings: %s' % myRatingsRDD.take(10)
# TODO: Replace <FILL IN> with appropriate code
trainingWithMyRatingsRDD = trainingRDD.union(myRatingsRDD)
print ('The training dataset now has %s more entries than the original training dataset' %
(trainingWithMyRatingsRDD.count() - trainingRDD.count()))
assert (trainingWithMyRatingsRDD.count() - trainingRDD.count()) == myRatingsRDD.count()
# TODO: Replace <FILL IN> with appropriate code
myRatingsModel = ALS.train(trainingWithMyRatingsRDD, bestRank, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
# TODO: Replace <FILL IN> with appropriate code
predictedTestMyRatingsRDD = myRatingsModel.predictAll(testForPredictingRDD)
testRMSEMyRatings = computeError(testRDD, predictedTestMyRatingsRDD)
print 'The model had a RMSE on the test set of %s' % testRMSEMyRatings
# TODO: Replace <FILL IN> with appropriate code
# Use the Python list myRatedMovies to transform the moviesRDD into an RDD with entries that are pairs
# of the form (myUserID, Movie ID) and that does not contain any movies that you have rated.
myUnratedMoviesRDD = (moviesRDD
.filter(lambda x: x[0] not in [x[1] for x in myRatedMovies])
.map(lambda x: (myUserID, x[0])))
# Use the input RDD, myUnratedMoviesRDD, with myRatingsModel.predictAll() to predict your ratings for the movies
predictedRatingsRDD = myRatingsModel.predictAll(myUnratedMoviesRDD)
# TODO: Replace <FILL IN> with appropriate code
# Transform movieIDsWithAvgRatingsRDD from part (1b), which has the form (MovieID, (number of ratings, average rating)),
# into and RDD of the form (MovieID, number of ratings)
movieCountsRDD = movieIDsWithAvgRatingsRDD.map(lambda x: (x[0], x[1][0]))
# Transform predictedRatingsRDD into an RDD with entries that are pairs of the form (Movie ID, Predicted Rating)
predictedRDD = predictedRatingsRDD.map(lambda x: (x[1], x[2]))
# Use RDD transformations with predictedRDD and movieCountsRDD to yield an RDD with tuples
# of the form (Movie ID, (Predicted Rating, number of ratings))
predictedWithCountsRDD = (predictedRDD.join(movieCountsRDD))
# Use RDD transformations with PredictedWithCountsRDD and moviesRDD to yield an RDD with tuples
# of the form (Predicted Rating, Movie Name, number of ratings), for movies with more than 75 ratings
ratingsWithNamesRDD = (predictedWithCountsRDD
.filter(lambda x: x[1][1] > 75)
.join(moviesRDD)
.map(lambda x: (x[1][0][0], x[1][1], x[1][0][1])))
predictedHighestRatedMovies = ratingsWithNamesRDD.takeOrdered(20, key=lambda x: -x[0])
print ('My highest rated movies as predicted (for movies with more than 75 reviews):\n%s' %
'\n'.join(map(str, predictedHighestRatedMovies)))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to import here the data, modify them if needed and proceed
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from simmit import smartplus as sim
import os
dir = os.path.dirname(os.path.realpath('__file__'))
umat_name = 'MIPLN' #This is the 5 character code for the periodic homogenization for multlayered composite
nstatev = 0 #The number of scalar variables required, everything is stored in sub-phases statevs
rho = 1.12 #The density of the material (overall)
c_p = 1.64 #The specific heat capacity (overall)
nphases = 2 #The number of phases
num_file = 0 #The num of the file that contains the subphases
psi_rve = 0.
theta_rve = 0.
phi_rve = 0.
props = np.array([nphases, num_file])
NPhases_file = dir + '/data/Nlayers0.dat'
NPhases = pd.read_csv(NPhases_file, delimiter=r'\s+', index_col=False, engine='python')
NPhases[::]
path_data = 'data'
path_results = 'results'
pathfile = 'path.txt'
outputfile = 'results_PLN.txt'
sim.solver(umat_name, props, nstatev, psi_rve, theta_rve, phi_rve, rho, c_p, path_data, path_results, pathfile, outputfile)
outputfile_macro = dir + '/' + path_results + '/results_PLN_global-0.txt'
outputfile_micro1 = dir + '/' + path_results + '/results_PLN_global-0-0.txt'
outputfile_micro2 = dir + '/' + path_results + '/results_PLN_global-0-1.txt'
fig = plt.figure()
e11, e22, e33, e12, e13, e23, s11, s22, s33, s12, s13, s23 = np.loadtxt(outputfile_macro, usecols=(8,9,10,11,12,13,14,15,16,17,18,19), unpack=True)
plt.grid(True)
plt.plot(e11,s11, c='black')
e11, e22, e33, e12, e13, e23, s11, s22, s33, s12, s13, s23 = np.loadtxt(outputfile_micro1, usecols=(8,9,10,11,12,13,14,15,16,17,18,19), unpack=True)
plt.grid(True)
plt.plot(e11,s11, c='red')
e11, e22, e33, e12, e13, e23, s11, s22, s33, s12, s13, s23 = np.loadtxt(outputfile_micro2, usecols=(8,9,10,11,12,13,14,15,16,17,18,19), unpack=True)
plt.grid(True)
plt.plot(e11,s11, c='blue')
plt.xlabel('Strain')
plt.ylabel('Stress (MPa)')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SBML
Step2: There are other dialects of SBML prior to FBC 2 which have previously been use to encode COBRA models. The primary ones is the "COBRA" dialect which used the "notes" fields in SBML files.
Step3: JSON
Step4: MATLAB
Step5: If the mat file contains only a single model, cobra can figure out which variable to read from, and the variable_name paramter is unnecessary.
Step6: Saving models to mat files is also relatively straightforward
| <ASSISTANT_TASK:>
Python Code:
import cobra.test
import os
from os.path import join
data_dir = cobra.test.data_directory
print("mini test files: ")
print(", ".join(i for i in os.listdir(data_dir)
if i.startswith("mini")))
textbook_model = cobra.test.create_test_model("textbook")
ecoli_model = cobra.test.create_test_model("ecoli")
salmonella_model = cobra.test.create_test_model("salmonella")
cobra.io.read_sbml_model(join(data_dir, "mini_fbc2.xml"))
cobra.io.write_sbml_model(textbook_model, "test_fbc2.xml")
cobra.io.read_sbml_model(join(data_dir, "mini_cobra.xml"))
cobra.io.write_sbml_model(textbook_model, "test_cobra.xml",
use_fbc_package=False)
cobra.io.load_json_model(join(data_dir, "mini.json"))
cobra.io.save_json_model(textbook_model, "test.json")
cobra.io.load_matlab_model(join(data_dir, "mini.mat"),
variable_name="mini_textbook")
cobra.io.load_matlab_model(join(data_dir, "mini.mat"))
cobra.io.save_matlab_model(textbook_model, "test.mat")
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's pretend that we feel there are too many features, and many are probably useless. We only want the top 100 for making our model.
Step2: Now let's try fitting a model with the trimmed data set, and see how it performs in cross validation
Step3: Perfect cross validation?
| <ASSISTANT_TASK:>
Python Code:
npoints = 100
nfeatures = int(5e4)
rng = np.random.default_rng()
X = rng.random(size=(npoints, nfeatures));
Y = rng.random(size=(npoints,)) > 0.5
def select_best_features(X, Y, n=100):
corrs = np.zeros(X.shape[1])
for ii in range(X.shape[1]):
corrs[ii] = np.corrcoef(X[:, ii], Y)[0, 1]
top_idxs = np.argsort(np.abs(corrs))[-n:]
return top_idxs
%%time
top_idxs = select_best_features(X, Y, 100)
X100 = X[:, top_idxs]
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_score
import sklearn.linear_model
from sklearn import svm
X_train, X_test, y_train, y_test = train_test_split(X100, Y, test_size=0.4, random_state=0)
clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
print(clf.score(X_test, y_test))
folds = 5
scores = cross_val_score(clf, X100, Y, cv=folds)
print(f"{folds}-fold cross validation scores:")
print(scores)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.4, random_state=0)
print(f"Input X shape: {X_train.shape}")
top_idxs = select_best_features(X_train, y_train, 100)
X100 = X_train[:, top_idxs]
print(f"output shape: {X100.shape}")
# This will be what we check with, after training with the other rows
X_holdout = X_test[:, top_idxs]
clf = svm.SVC(kernel='linear', C=1).fit(X100, y_train)
print(f"Score on holdout set: {clf.score(X_holdout, y_test)}")
scores = cross_val_score(clf, X_holdout, y_test, cv=5)
print(f"{folds}-fold cross validation scores:")
print(scores)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transforming an input to a known output
Step2: relation between input and output is linear
Step3: Defining the model to train
Step5: Defining a layer with a random number of neurons and inputs
Step6: Output of a single untrained neuron
Step7: Loss - Mean Squared Error
Step8: Minimize Loss by changing parameters of neuron
Step9: Training
Step10: Line drawn by neuron after training
Step11: Prebuilt Optimizers do this job (but a bit more efficient and sohpisticated)
Step12: More data points, more noisy
Step13: Lines model draws over time
Step14: After 500 Steps
Step15: Final Step
Step16: Understandinging the effect of activation functions
Step17: Logictic Regression
Step19: We compress output between 0 and 1 using sigmoid to match y
Step20: We have 2d input now
Step21: Reconsidering the loss function
Step22: The same solution using high level Keas API
| <ASSISTANT_TASK:>
Python Code:
!pip install -q tf-nightly-gpu-2.0-preview
import tensorflow as tf
print(tf.__version__)
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
print("This works: {}".format(hello))
# this should return True even on Colab
tf.test.is_gpu_available()
tf.test.is_built_with_cuda()
!nvidia-smi
tf.executing_eagerly()
input = [[-1], [0], [1], [2], [3], [4]]
output = [[2], [1], [0], [-1], [-2], [-3]]
import matplotlib.pyplot as plt
plt.xlabel('input')
plt.ylabel('output')
plt.plot(input, output, 'ro')
plt.plot(input, output)
plt.plot(input, output, 'ro')
w = tf.constant([[1.5], [-2], [1]], dtype='float32')
x = tf.constant([[10, 6, 8]], dtype='float32')
b = tf.constant([6], dtype='float32')
y = tf.matmul(x, w) + b
print(y)
from tensorflow.keras.layers import Layer
class LinearLayer(Layer):
y = w.x + b
def __init__(self, units=1, input_dim=1):
super(LinearLayer, self).__init__()
w_init = tf.random_normal_initializer(stddev=2)
self.w = tf.Variable(
initial_value = w_init(shape=(input_dim, units), dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value = b_init(shape=(units,), dtype='float32'),
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
linear_layer = LinearLayer()
x = tf.constant(input, dtype=tf.float32)
y_true = tf.constant(output, dtype=tf.float32)
y_true
y_pred = linear_layer(x)
y_pred
plt.plot(x, y_pred)
plt.plot(input, output, 'ro')
loss_fn = tf.losses.mean_squared_error
# loss_fn = tf.losses.mean_absolute_error
loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred))
print(loss)
tf.keras.losses.mean_squared_error == tf.losses.mean_squared_error
# a simple example
# f(x) = x^2
# f'(x) = 2x
# x = 4
# f(4) = 16
# f'(4) = 8 (that's what we expect)
def tape_sample():
x = tf.constant(4.0)
# open a GradientTape
with tf.GradientTape() as tape:
tape.watch(x)
y = x * x
dy_dx = tape.gradient(y, x)
print(dy_dx)
# just a function in order not to interfere with x on the global scope
tape_sample()
linear_layer = LinearLayer()
linear_layer.w, linear_layer.b
linear_layer.trainable_weights
EPOCHS = 200
learning_rate = 1e-2
losses = []
weights = []
biases = []
weights_gradient = []
biases_gradient = []
for step in range(EPOCHS):
with tf.GradientTape() as tape:
# forward pass
y_pred = linear_layer(x)
# loss value for this batch
loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred))
# just for logging
losses.append(loss.numpy())
weights.append(linear_layer.w.numpy()[0][0])
biases.append(linear_layer.b.numpy()[0])
# get gradients of weights wrt the loss
gradients = tape.gradient(loss, linear_layer.trainable_weights)
weights_gradient.append(gradients[0].numpy()[0][0])
biases_gradient.append(gradients[1].numpy()[0])
# backward pass, changing trainable weights
linear_layer.w.assign_sub(learning_rate * gradients[0])
linear_layer.b.assign_sub(learning_rate * gradients[1])
print(loss)
plt.xlabel('epochs')
plt.ylabel('loss')
# plt.yscale('log')
plt.plot(losses)
plt.figure(figsize=(20, 10))
plt.plot(weights)
plt.plot(biases)
plt.plot(weights_gradient)
plt.plot(biases_gradient)
plt.legend(['slope', 'offset', 'gradient slope', 'gradient offset'])
y_pred = linear_layer(x)
y_pred
plt.plot(x, y_pred)
plt.plot(input, output, 'ro')
# single neuron and single input: one weight and one bias
# slope m ~ -1
# y-axis offset y0 ~ 1
# https://en.wikipedia.org/wiki/Linear_equation#Slope%E2%80%93intercept_form
linear_layer.trainable_weights
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2)
EPOCHS = 500
losses = []
linear_layer = LinearLayer()
for step in range(EPOCHS):
with tf.GradientTape() as tape:
# Forward pass.
y_pred = linear_layer(x)
# Loss value for this batch.
loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred))
losses.append(loss)
# Get gradients of weights wrt the loss.
gradients = tape.gradient(loss, linear_layer.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights))
# plt.yscale('log')
plt.ylabel("loss")
plt.xlabel("epochs")
plt.plot(losses)
y_pred = linear_layer(x)
plt.plot(x, y_pred)
plt.plot(input, output, 'ro')
linear_layer.trainable_weights
import numpy as np
a = -1
b = 1
n = 50
x = tf.constant(np.random.uniform(0, 1, n), dtype='float32')
y = tf.constant(a*x+b + 0.1 * np.random.normal(0, 1, n), dtype='float32')
plt.scatter(x, y)
x = tf.reshape(x, (n, 1))
y_true = tf.reshape(y, (n, 1))
linear_layer = LinearLayer()
a = linear_layer.w.numpy()[0][0]
b = linear_layer.b.numpy()[0]
def plot_line(a, b, x, y_true):
fig, ax = plt.subplots()
y_pred = a * x + b
line = ax.plot(x, y_pred)
ax.plot(x, y_true, 'ro')
return fig, line
plot_line(a, b, x, y_true)
# the problem is a little bit harder, train for a little longer
EPOCHS = 2000
losses = []
lines = []
linear_layer = LinearLayer()
for step in range(EPOCHS):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
y_pred = linear_layer(x)
# Loss value for this batch.
loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred))
losses.append(loss)
a = linear_layer.w.numpy()[0][0]
b = linear_layer.b.numpy()[0]
lines.append((a, b))
# Get gradients of weights wrt the loss.
gradients = tape.gradient(loss, linear_layer.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights))
print(loss)
# plt.yscale('log')
plt.ylabel("loss")
plt.xlabel("epochs")
plt.plot(losses)
a, b = lines[0]
plot_line(a, b, x, y_true)
a, b = lines[500]
plot_line(a, b, x, y_true)
a, b = lines[1999]
plot_line(a, b, x, y_true)
import numpy as np
x = tf.reshape(tf.constant(np.arange(-1, 4, 0.1), dtype='float32'), (50, 1))
y_pred = linear_layer(x)
plt.figure(figsize=(20, 10))
plt.plot(x, y_pred)
y_pred_relu = tf.nn.relu(y_pred)
plt.plot(x, y_pred_relu)
y_pred_sigmoid = tf.nn.sigmoid(y_pred)
plt.plot(x, y_pred_sigmoid)
y_pred_tanh = tf.nn.tanh(y_pred)
plt.plot(x, y_pred_tanh)
plt.plot(input, output, 'ro')
plt.legend(['no activation', 'relu', 'sigmoid', 'tanh'])
from matplotlib.colors import ListedColormap
a = -1
b = 1
n = 100
# all points
X = np.random.uniform(0, 1, (n, 2))
# our line
line_x = np.random.uniform(0, 1, n)
line_y = a*line_x+b
plt.plot(line_x, line_y, 'r')
# below and above line
y = X[:, 1] > a*X[:, 0]+b
y = y.astype(int)
plt.xlabel("x1")
plt.ylabel("x2")
plt.scatter(X[:,0], X[:,1], c=y, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k')
y
class SigmoidLayer(LinearLayer):
y = sigmoid(w.x + b)
def __init__(self, **kwargs):
super(SigmoidLayer, self).__init__(**kwargs)
def call(self, inputs):
return tf.sigmoid(super().call(inputs))
x = tf.constant(X, dtype='float32')
y_true = tf.constant(y, dtype='float32')
x.shape
model = SigmoidLayer(input_dim=2)
loss_fn = tf.losses.binary_crossentropy
# standard optimizer using advanced properties
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-1)
# https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/metrics/Accuracy
m = tf.keras.metrics.Accuracy()
EPOCHS = 1000
losses = []
accuracies = []
for step in range(EPOCHS):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
y_pred = model(x)
# Loss value for this batch.
loss = loss_fn(y_true=tf.squeeze(y_true), y_pred=tf.squeeze(y_pred))
y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float)
m.update_state(tf.squeeze(y_true), y_pred_binary)
accuracy = m.result().numpy()
losses.append(loss)
accuracies.append(accuracy)
# Get gradients of weights wrt the loss.
gradients = tape.gradient(loss, model.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
print(loss)
print(accuracy)
plt.yscale('log')
plt.ylabel("loss")
plt.xlabel("epochs")
plt.plot(losses)
plt.ylabel("accuracy")
plt.xlabel("epochs")
plt.plot(accuracies)
y_pred = model(x)
y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float)
y_pred_binary
y_true - y_pred_binary
# below and above line
plt.xlabel("x1")
plt.ylabel("x2")
plt.scatter(X[:,0], X[:,1], c=y_pred_binary, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k')
from tensorflow.keras.layers import Dense
model = tf.keras.Sequential()
model.add(Dense(units=1, activation='sigmoid', input_dim=2))
model.summary()
%%time
model.compile(loss=loss_fn, # binary cross entropy, unchanged from low level example
optimizer=optimizer, # adam, unchanged from low level example
metrics=['accuracy'])
# does a similar thing internally as our loop from above
history = model.fit(x, y_true, epochs=EPOCHS, verbose=0)
loss, accuracy = model.evaluate(x, y_true)
loss, accuracy
plt.yscale('log')
plt.ylabel("accuracy")
plt.xlabel("epochs")
plt.plot(history.history['accuracy'])
plt.yscale('log')
plt.ylabel("loss")
plt.xlabel("epochs")
plt.plot(history.history['loss'])
y_pred = model.predict(x)
y_pred_binary = (tf.squeeze(y_pred) > 0.5).numpy().astype(float)
# below and above line
plt.xlabel("x1")
plt.ylabel("x2")
plt.scatter(X[:,0], X[:,1], c=y_pred_binary, cmap=ListedColormap(['#AA6666', '#6666AA']), marker='o', edgecolors='k')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you look above you can see that we are accessing a csv file that has the locations of the podcasts, taking the html data and parsing it, to look for any words that end with ".mp3" and then enacting our download_mp3 function to download each podcast into it's own folder. I orginally was downlaoding from the top 30 podcasts and transcrbing 11 episodes.
Step2: If you ran into problems running the code above, you will need to look into installing your dependencies
Step3: What the code above is doing, is taking the the small transcription of a text file, removing useless words (commonly known as stopwords, NLTK has a module for 2400 stopwords, and grouping by word then comparing the differences by taking the word counts for CMU and comparing that to Watson. 143 of the words had the same count, which is a measure of 76% (143 / 186). That was enough for me to go with CMU, as I could also parallelize CMU to run multiple podcasts at a time and not pay for API calls to transcribe.
Step4: After a lot of memory-errors and heart-ache I able to transcribe 150 podcasts from 11 different series. In the future I will be adding much more. I was able to take the transcriptions and write them to a no-sql database where I have the filepath, name of the series and the transcription. The dataframe is below
Step5: As you can tell it's a fairly small dataframe, but with scaling I will be able to make this a much higher potential (I have 3K podcasts ready to transcribe in an s3 bucket, but need to finish the spark code).
Step6: Originally I was using sci-kit learn and tfidf_vectorizer as my method for calcualting the cosine similarity. As per normal, sci-kit learn makes implementation of machine learning models fairly trivial.
Step7: That creates a tfidf_matrix of podcast to terms (151 podcasts and 35028 terms). Then we create the cosine similarities
Step8: You can see above that we were able to use our linear kernel from scikit learn to transform the tfidf_matrix and get the cosine similarities for every podcast related to every podcast!
Step9: I'm creating two functions here
Step10: Timing
Step11: As mentioned before I originally implemented this model into the flask app, but noticed that for each query we had to recompute the cosine similarities, which wasn't sustainable. Thus I discovered Gensim, Topic Modeling for Humans.
Step12: Here I loaded up all my models built off the podcast transcriptions, and built an LSI model with 50 different topics. Below is a sample of 10 of them.
Step13: Looking through these topics you can immdiately see that there are some related themes, like 2 which has "republican, conservative, and trump, constitution". In fact, LSI has been tested by having a the machine cluster documents and having humans sort documents, and the results have been very similar.
Step14: As you'd expect the speed is much faster without having to continuously do the cosine similarity computation. But also the results from an anecdotal perspective look much more accurate. MLAR is a popualr political podcast so it makes sense that it's been clustered in that politics topic. We can explore this further
| <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import urllib2
import re
import wget
import os
os.chdir('/Users/sheldon/git/springboard_capstone/acquire_podcasts')
df = pd.read_csv('top100_pcasts_locations.csv')
urls = df.url.tolist()
urls = filter(lambda string: 'feeds.' in string or 'feed.' in string, urls)
urls = urls[2:3]
def homepage(request):
file = urllib2.urlopen(request)
data = file.read()
file.close()
def get_mp3s(data):
data = data.split()
data = filter(lambda word: word.endswith('.mp3"') , data)
data = list(set(data))
return data
data = get_mp3s(data)
def parse_mp3(urlstring):
urlstring = re.split('url=', urlstring)[1]
return urlstring.replace('"','')
data = map(parse_mp3, data)
return data
def download_mp3(podcastseries, urls):
os.chdir('/Users/sheldon/git/springboard_capstone/acquire_podcasts')
os.mkdir(urls.split('/')[-1])
os.chdir(urls.split('/')[-1])
mp3_list = []
def download(episode):
print 'downloading: ',episode
episode = wget.download(episode)
print 'downloaded: ',episode
for number, episode in enumerate(podcastseries):
if len(mp3_list) < 1:
print number, ': ', episode
mp3_list.append(episode)
download(episode)
print 'length: ',len(mp3_list)
else:
break
os.chdir('/Users/sheldon/git/springboard_capstone/acquire_podcasts')
for number, series in enumerate(urls):
print 'starting: ',number, ' - ',series
data = homepage(series)
download_mp3(data, series)
print 'completed: ',number, ' - ',series
from IPython.display import Audio
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
%%time
print(r.recognize_sphinx(audio))
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
import numpy as np
import nltk.data
from __future__ import division # Python 2 users only
import nltk, re, pprint
from nltk import word_tokenize
os.chdir("/Users/sheldon/git/springboard_capstone/")
cmu_trans = open('report_assets/transcription_cmu.txt','rU').read()
wat_trans = open('report_assets/transcription_watson_2.txt','rU').read()
stop = set(stopwords.words('english'))
## Tokenize and Lower the Words
def tokenize_and_lower(textfile):
tokens = word_tokenize(textfile)
lower = [w.lower() for w in tokens]
filtered_words = [word for word in lower if word not in stop]
series = pd.Series(filtered_words)
return series
## Compare results with value counts, presuming that Watson is more accurate than CMU
cmu = tokenize_and_lower(cmu_trans)
wat = tokenize_and_lower(wat_trans)
cmu = pd.Series.to_frame(cmu)
wat = pd.Series.to_frame(wat)
cmu.columns = [['words']]
wat.columns = [['words']]
cmu = cmu.groupby('words').size().reset_index()
wat = wat.groupby('words').size().reset_index()
df = pd.merge(cmu, wat, on='words')
df.columns = [['words','cmu','wat']]
df['cmu_diff_wat'] = df.cmu - df.wat
%matplotlib inline
df.cmu_diff_wat.value_counts().plot(kind='bar')
df.cmu_diff_wat.value_counts()
#example code
from pydub import AudioSegment
import glob
from math import ceil
import os
import json
import csv
import sys
import speech_recognition as sr
r = sr.Recognizer()
def transcribe_mp3(AUDIO_FILENAME, AUDIO_SEGMENT_SECONDS):
output_file_name = "{}_translation.txt".format(AUDIO_FILENAME)
#fuction to transform mp3 file to wav for transcription
try:
def transform_mp3_wav(AUDIO_FILENAME, AUDIO_SEGMENT_SECONDS):
filename = AUDIO_FILENAME.replace('.mp3','')
with open(AUDIO_FILENAME):
audio = AudioSegment.from_mp3(AUDIO_FILENAME)
xs = 0
while xs < audio.duration_seconds:
ys = min(xs + AUDIO_SEGMENT_SECONDS, ceil(audio.duration_seconds))
fname = str(xs).rjust(5, '0') + '-' + str(ys).rjust(5, '0') + '.wav'
audio[xs*1000:ys*1000].export(os.getcwd() + '/' + filename + fname, format='wav')
print("Saved", fname)
xs = ys
transform_mp3_wav(AUDIO_FILENAME, 300)
wav_filename = AUDIO_FILENAME.replace('.mp3','.wav')
wav_list = glob.glob('*.wav')
wav_list = filter(lambda x: '.mp3' not in x, wav_list)
trans_list = []
transcription = None
for wav_file in wav_list:
print 'transcribing: ' + wav_file
with sr.AudioFile(wav_file) as source:
audio = r.record(source)
transcription = r.recognize_sphinx(audio)
print 'transcription completed'
trans_list.extend(transcription)
transcription = ''.join(trans_list)
except:
return 'error'
for f in wav_list:
os.remove(f)
file = open(output_file_name,'w')
file.write(transcription)
file.close()
import sqlite3
def connect_db():
return sqlite3.connect('/Users/sheldon/podcasts/test.db')
def create_df_object():
conn = sqlite3.connect('/Users/sheldon/podcasts/test.db')
df = pd.read_sql("select * from podcast",conn)
return df
df = create_df_object()
df.info()
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "http://blog.christianperone.com/wp-content/uploads/2013/09/cosinesimilarityfq1.png")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import linear_kernel
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
tf = TfidfVectorizer(stop_words=stop)
tfidf_matrix = tf.fit_transform(df['transcribed'])
tfidf_matrix
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
print cosine_similarities
print len(cosine_similarities)
def get_related_podcasts_scikit(podcast_number,number_of_similarities):
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
related_pod_index = cosine_similarities.argsort()[podcast_number][::-1]
pod_dict = dict(zip(range(0, len(related_pod_index)),related_pod_index))
pod_dict = pd.DataFrame({'rank':pod_dict.keys()},index=pod_dict.values())
related_podcasts_df = pd.DataFrame.join(pod_dict, df, how='inner')
final_df = related_podcasts_df.sort_values('rank')[0:number_of_similarities+1][['rank','episode','series']]
return final_df
def get_related_podcasts_query_scikit(query, number_of_similarities):
query = query.lower()
query = query.split()
tfidf_matrix_test = tf.fit_transform(query)
tfidf_matrix_train = tf.transform(df['transcribed'])
tfidf_matrix_train.todense()
tfidf_matrix_test.todense()
query_similarities = linear_kernel(tfidf_matrix_test, tfidf_matrix_train)
query_similarities = query_similarities.argsort()[0][::-1]
pod_dict = dict(zip(range(0, len(query_similarities)),query_similarities))
pod_dict = pd.DataFrame({'rank':pod_dict.keys()},index=pod_dict.values())
related_podcasts_df = pd.DataFrame.join(pod_dict, df, how='inner')
final_df = related_podcasts_df.sort_values('rank')[0:number_of_similarities+1][['rank','episode','series']]
return final_df
get_related_podcasts_query_scikit('trump clinton obama guns',5)
get_related_podcasts_scikit(22,5)
%timeit get_related_podcasts_query_scikit('economics math statistics',5)
%timeit get_related_podcasts_scikit(22,5)
import gensim
os.chdir('/Users/sheldon/git/springboard_capstone/apps/')
dictionary = gensim.corpora.Dictionary.load('models/words.dict')
corpus = gensim.corpora.MmCorpus('models/corpus.mm')
tfidf = gensim.models.tfidfmodel.TfidfModel.load('models/tfidf_model')
lsi = gensim.models.lsimodel.LsiModel.load('models/model.lsi')
index = gensim.similarities.MatrixSimilarity.load('models/corpus.index')
corpus_tfidf = tfidf[corpus]
corpus_lsi = lsi[corpus_tfidf]
lsi.print_topics(10)
def get_related_podcasts(index):
def getKey(item):
return item[1]
corpus = corpus_lsi[index]
corpus = sorted(corpus, key=getKey, reverse=True)[0:10]
related_df = pd.DataFrame(corpus,columns=['index','score'])
final_df = pd.merge(related_df, df, on='index')[['index','episode','score','series']]
return final_df
def get_related_podcasts_query(query):
query = query.lower()
vec_box = dictionary.doc2bow(query.split())
vec_lsi = lsi[vec_box]
sims = index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])[0:10]
related_df = pd.DataFrame(sims,columns=['index','score'])
final_df = pd.merge(related_df, df, on='index')[['index','episode','score','series']]
return final_df
get_related_podcasts(1)
get_related_podcasts_query("trump clinton obama guns")
%timeit get_related_podcasts(1)
%timeit get_related_podcasts_query('economics math statistics')
#get list of related podcasts
related_podcasts = list(get_related_podcasts(1)['index'])
def get_topics_per_podcast(podcast_index):
def getKey(item):
return item[1]
topic_ids = [i for i in sorted(corpus_lsi[podcast_index], key=getKey, reverse=True) if i[1] > 0.10]
def get_topic_arrays(topic_ids):
x = []
for id in topic_ids:
list_of_words = sorted(lsi.show_topic(id[0], topn=5),key=getKey, reverse=True)
z = []
for word in list_of_words:
if word[1] > .05:
z.append(word)
x.append(z)
return x
topic_arrays = get_topic_arrays(topic_ids)
return topic_arrays
related_podcasts_topics_words = [[related_podcasts[i],get_topics_per_podcast(related_podcasts[i])] for i in range(0, len(related_podcasts))]
episode_podcasts = list(get_related_podcasts(1)['episode'])
series_podcasts = list(get_related_podcasts(1)['series'])
for i,k in enumerate(related_podcasts_topics_words):
print "Podcast: {}, ID: {}".format(i+1, k[0])
print "Podcast Series: {}".format(series_podcasts[i])
print "Episode Title: {}".format(episode_podcasts[i])
for num, topic in enumerate(k[1]):
print "topic: {}".format(num)
for word in topic:
print "word: {}, score:{}".format(word[0], word[1])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note that a similar transformation can be applied with compute_ems
| <ASSISTANT_TASK:>
Python Code:
# Author: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import EMS, compute_ems
from sklearn.model_selection import StratifiedKFold
print(__doc__)
data_path = sample.data_path()
# Preprocess the data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_ids = {'AudL': 1, 'VisL': 3}
# Read data and create epochs
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 45, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_ids, tmin=-0.2, tmax=0.5, picks=picks,
baseline=None, reject=dict(grad=4000e-13, eog=150e-6),
preload=True)
epochs.drop_bad()
epochs.pick_types(meg='grad')
# Setup the data to use it a scikit-learn way:
X = epochs.get_data() # The MEG data
y = epochs.events[:, 2] # The conditions indices
n_epochs, n_channels, n_times = X.shape
# Initialize EMS transformer
ems = EMS()
# Initialize the variables of interest
X_transform = np.zeros((n_epochs, n_times)) # Data after EMS transformation
filters = list() # Spatial filters at each time point
# In the original paper, the cross-validation is a leave-one-out. However,
# we recommend using a Stratified KFold, because leave-one-out tends
# to overfit and cannot be used to estimate the variance of the
# prediction within a given fold.
for train, test in StratifiedKFold(n_splits=5).split(X, y):
# In the original paper, the z-scoring is applied outside the CV.
# However, we recommend to apply this preprocessing inside the CV.
# Note that such scaling should be done separately for each channels if the
# data contains multiple channel types.
X_scaled = X / np.std(X[train])
# Fit and store the spatial filters
ems.fit(X_scaled[train], y[train])
# Store filters for future plotting
filters.append(ems.filters_)
# Generate the transformed data
X_transform[test] = ems.transform(X_scaled[test])
# Average the spatial filters across folds
filters = np.mean(filters, axis=0)
# Plot individual trials
plt.figure()
plt.title('single trial surrogates')
plt.imshow(X_transform[y.argsort()], origin='lower', aspect='auto',
extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)],
cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Trials (reordered by condition)')
# Plot average response
plt.figure()
plt.title('Average EMS signal')
mappings = [(key, value) for key, value in event_ids.items()]
for key, value in mappings:
ems_ave = X_transform[y == value]
plt.plot(epochs.times, ems_ave.mean(0), label=key)
plt.xlabel('Time (ms)')
plt.ylabel('a.u.')
plt.legend(loc='best')
plt.show()
# Visualize spatial filters across time
evoked = EvokedArray(filters, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(time_unit='s', scalings=1)
epochs.equalize_event_counts(event_ids)
X_transform, filters, classes = compute_ems(epochs)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the Data
Step2: Create the Model
Step3: Training the Model
Step4: Store the Model
| <ASSISTANT_TASK:>
Python Code:
# グラフが文章中に表示されるようにするおまじない
%matplotlib inline
# autoreload module
%load_ext autoreload
%autoreload 2
# load local package
import sys
import os
current_path = os.getcwd()
sys.path.append(os.path.join(current_path, "../../")) # load project root
def read_data(path, ignore_columns):
import os
import numpy as np
header = []
y = None
X = None
# read data part
with open(data_file, "rb") as f:
header = f.readline().decode("utf-8").replace("\r", "").replace("\n", "").split("\t")
columns = [c for c in range(len(header)) if c not in ignore_columns]
header = [h for i, h in enumerate(header) if i not in [0] + ignore_columns]
data = np.genfromtxt(f, invalid_raise=False, usecols=columns)
y = data[:, 0]
X = data[:, 1:]
return header, y, X
data_file = os.path.join(current_path, "../../data/photo_to_mood.txt")
header, moods, labels = read_data(data_file, [1])
print(header)
print(moods.shape)
print(labels.shape)
def select_features(feature_count, X, y, header):
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
selector = SelectKBest(f_classif, k=feature_count).fit(X, y)
selected = selector.get_support()
get_headers = lambda s: [i_h[1] for i_h in enumerate(header) if s[i_h[0]]]
kbests = sorted(zip(get_headers(selected), selector.scores_[selected]), key=lambda h_s: h_s[1], reverse=True)
return kbests
scores = select_features(10, labels, moods, header)
selected_features = [header.index(s[0]) for s in scores]
print(scores)
print(selected_features)
def create_model(X, y, selected=()):
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn import svm
X_c = X if len(selected) == 0 else X[:, selected]
x_train, x_test, y_train, y_test = train_test_split(X_c, y, test_size=0.2, random_state=42)
candidates = [{'kernel': ['linear'], 'C': [1, 10, 100]}]
clf = GridSearchCV(svm.SVC(C=1), candidates, cv=2, scoring="f1_weighted")
clf.fit(x_train, y_train)
for params, mean_score, scores in sorted(clf.grid_scores_, key=lambda s: s[1], reverse=True):
print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() / 2, params))
model = clf.best_estimator_
y_predict = model.predict(x_test)
print(classification_report(y_test, y_predict))
return model
model = create_model(labels, moods, selected_features)
print(model)
def save(model):
from sklearn.externals import joblib
joblib.dump(model, "./machine.pkl")
print([header[s] for s in sorted(selected_features)])
save(model)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Introduction
Step2: Ou importer le module matplotlib.pyplot avec l'identifiant plt. Plus correct pour éviter de charger tous les objets
Step3: 1.2 MATLAB-like API
Step4: Example élémentaire d'utilisation de l'API.
Step5: La plupart des fonctions MATLAB sont incluses dans pylab.
Step6: Cette API est limitée à des graphes rudimentaires. Les fonctionalités orientées objet de Matplotlib sont à privilégier pour des graphes plus élaborées.
Step7: Although a little bit more code is involved, the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure
Step8: If we don't care about being explicit about where our plot axes are placed in the figure canvas, then we can use one of the many axis layout managers in matplotlib. My favorite is subplots, which can be used like this
Step9: That was easy, but it isn't so pretty with overlapping figure axes and labels, right?
Step10: 2.2 Tailles et proportions
Step11: The same arguments can also be passed to layout managers, such as the subplots function
Step12: 2.3 Sauver les figures
Step13: Here we can also optionally specify the DPI and choose between different output formats
Step14: What formats are available and which ones should be used for best quality?
Step15: Libellés des axes
Step16: Légendes
Step17: The method described above follows the MATLAB API. It is somewhat prone to errors and unflexible if curves are added to or removed from the figure (resulting in a wrongly labelled curve).
Step18: The advantage with this method is that if curves are added or removed from the figure, the legend is automatically updated accordingly.
Step19: The following figure shows how to use the figure title, axis labels and legends described above
Step20: 2.5 Formattage des textes
Step21: We can also change the global font size and font family, which applies to all text elements in a figure (tick labels, axis labels and titles, legends, etc.)
Step22: A good choice of global fonts are the STIX fonts
Step23: Or, alternatively, we can request that matplotlib uses LaTeX to render the text elements in the figure
Step24: 2.6 Couleurs, largeur et types de lignes
Step25: We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the color and alpha keyword arguments
Step26: Line and marker styles
Step27: 2.7 Contrôle des axes
Step28: Logarithmic scale
Step29: 2.8 Placement des échelles et libellés
Step30: There are a number of more advanced methods for controlling major and minor tick placement in matplotlib figures, such as automatic placement according to different policies. See http
Step31: 2.9 Formattage des espaces sur les axes
Step32: Axis position adjustments
Step33: 2.10 Grille
Step34: 2.11 Double graphique
Step35: 2.12 Axes centrés
Step36: 2.13 Autres graphes 2D
Step37: 2.14 Textes d'annotation
Step38: 2.15 Figures avec sous graphes
Step39: subplot2grid
Step40: gridspec
Step41: add_axes
Step42: 2.16 Graphes de contour
Step43: pcolor
Step44: imshow
Step45: contour
Step46: 3 graphes 3D
Step47: 3.1 Surface plots
Step48: Wire-frame plot
Step49: 3.2 Coutour plots with projections
Step50: Change the view angle
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pylab import *
# import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
x = np.linspace(0, 5, 10)
y = x ** 2
figure()
plot(x, y, 'r')
xlabel('x')
ylabel('y')
title('titre')
show()
subplot(1,2,1)
plot(x, y, 'r--')
subplot(1,2,2)
plot(y, x, 'g*-');
show()
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
show()
fig = plt.figure()
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes
# main figure
axes1.plot(x, y, 'r')
axes1.set_xlabel('x')
axes1.set_ylabel('y')
axes1.set_title('title')
# insert
axes2.plot(y, x, 'g')
axes2.set_xlabel('y')
axes2.set_ylabel('x')
axes2.set_title('insert title');
show;
fig, axes = plt.subplots()
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
show()
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'r')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
show()
fig, axes = plt.subplots(nrows=1, ncols=2)
for ax in axes:
ax.plot(x, y, 'r')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig.tight_layout()
show()
fig = plt.figure(figsize=(8,4), dpi=100)
fig, axes = plt.subplots(figsize=(12,3))
axes.plot(x, y, 'r')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_title('title');
show()
fig.savefig("filename.png")
fig.savefig("filename.png", dpi=200)
ax.set_title("title");
ax.set_xlabel("x")
ax.set_ylabel("y");
ax.legend(["curve1", "curve2", "curve3"]);
ax.plot(x, x**2, label="curve1")
ax.plot(x, x**3, label="curve2")
ax.legend();
ax.legend(loc=0) # let matplotlib decide the optimal location
ax.legend(loc=1) # upper right corner
ax.legend(loc=2) # upper left corner
ax.legend(loc=3) # lower left corner
ax.legend(loc=4) # lower right corner
# .. many more options are available
fig, ax = plt.subplots()
ax.plot(x, x**2, label="y = x**2")
ax.plot(x, x**3, label="y = x**3")
ax.legend(loc=2); # upper left corner
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title');
show()
fig, ax = plt.subplots()
ax.plot(x, x**2, label=r"$y = \alpha^2$")
ax.plot(x, x**3, label=r"$y = \alpha^3$")
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$', fontsize=18)
ax.set_ylabel(r'$y$', fontsize=18)
ax.set_title('title');
show()
# Update the matplotlib configuration parameters:
matplotlib.rcParams.update({'font.size': 18, 'font.family': 'serif'})
fig, ax = plt.subplots()
ax.plot(x, x**2, label=r"$y = \alpha^2$")
ax.plot(x, x**3, label=r"$y = \alpha^3$")
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$y$')
ax.set_title('title');
show()
# Update the matplotlib configuration parameters:
matplotlib.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix'})
fig, ax = plt.subplots()
ax.plot(x, x**2, label=r"$y = \alpha^2$")
ax.plot(x, x**3, label=r"$y = \alpha^3$")
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$y$')
ax.set_title('title');
show()
matplotlib.rcParams.update({'font.size': 18, 'text.usetex': True})
fig, ax = plt.subplots()
ax.plot(x, x**2, label=r"$y = \alpha^2$")
ax.plot(x, x**3, label=r"$y = \alpha^3$")
ax.legend(loc=2) # upper left corner
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$y$')
ax.set_title('title');
show()
# restore
matplotlib.rcParams.update({'font.size': 12, 'font.family': 'sans', 'text.usetex': False})
# MATLAB style line color and style
ax.plot(x, x**2, 'b.-') # blue line with dots
ax.plot(x, x**3, 'g--') # green dashed line
fig, ax = plt.subplots()
ax.plot(x, x+1, color="red", alpha=0.5) # half-transparant red
ax.plot(x, x+2, color="#1155dd") # RGB hex code for a bluish color
ax.plot(x, x+3, color="#15cc55") # RGB hex code for a greenish color
show()
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+1, color="blue", linewidth=0.25)
ax.plot(x, x+2, color="blue", linewidth=0.50)
ax.plot(x, x+3, color="blue", linewidth=1.00)
ax.plot(x, x+4, color="blue", linewidth=2.00)
# possible linestype options ‘-‘, ‘--’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x+5, color="red", lw=2, linestyle='-')
ax.plot(x, x+6, color="red", lw=2, ls='-.')
ax.plot(x, x+7, color="red", lw=2, ls=':')
# custom dash
line, = ax.plot(x, x+8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ...
# possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x+ 9, color="green", lw=2, ls='--', marker='+')
ax.plot(x, x+10, color="green", lw=2, ls='--', marker='o')
ax.plot(x, x+11, color="green", lw=2, ls='--', marker='s')
ax.plot(x, x+12, color="green", lw=2, ls='--', marker='1')
# marker size and color
ax.plot(x, x+13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=2, markeredgecolor="blue");
show()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
axes[0].plot(x, x**2, x, x**3)
axes[0].set_title("default axes ranges")
axes[1].plot(x, x**2, x, x**3)
axes[1].axis('tight')
axes[1].set_title("tight axes")
axes[2].plot(x, x**2, x, x**3)
axes[2].set_ylim([0, 60])
axes[2].set_xlim([2, 5])
axes[2].set_title("custom axes range");
show()
fig, axes = plt.subplots(1, 2, figsize=(10,4))
axes[0].plot(x, x**2, x, np.exp(x))
axes[0].set_title("Normal scale")
axes[1].plot(x, x**2, x, np.exp(x))
axes[1].set_yscale("log")
axes[1].set_title("Logarithmic scale (y)");
show()
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, x**2, x, x**3, lw=2)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels([r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'], fontsize=18)
yticks = [0, 50, 100, 150]
ax.set_yticks(yticks)
ax.set_yticklabels(["$%.1f$" % y for y in yticks], fontsize=18); # use LaTeX formatted labels
show()
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_title("scientific notation")
ax.set_yticks([0, 50, 100, 150])
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
show()
# distance between x and y axis and the numbers on the axes
matplotlib.rcParams['xtick.major.pad'] = 5
matplotlib.rcParams['ytick.major.pad'] = 5
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("label and axis spacing")
# padding between axis label and axis numbers
ax.xaxis.labelpad = 5
ax.yaxis.labelpad = 5
ax.set_xlabel("x")
ax.set_ylabel("y");
show()
# restore defaults
matplotlib.rcParams['xtick.major.pad'] = 3
matplotlib.rcParams['ytick.major.pad'] = 3
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("title")
ax.set_xlabel("x")
ax.set_ylabel("y")
fig.subplots_adjust(left=0.15, right=.9, bottom=0.1, top=0.9);
show()
fig, axes = plt.subplots(1, 2, figsize=(10,3))
# default grid appearance
axes[0].plot(x, x**2, x, x**3, lw=2)
axes[0].grid(True)
# custom grid appearance
axes[1].plot(x, x**2, x, x**3, lw=2)
axes[1].grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
show()
fig, ax1 = plt.subplots()
ax1.plot(x, x**2, lw=2, color="blue")
ax1.set_ylabel(r"area $(m^2)$", fontsize=18, color="blue")
for label in ax1.get_yticklabels():
label.set_color("blue")
ax2 = ax1.twinx()
ax2.plot(x, x**3, lw=2, color="red")
ax2.set_ylabel(r"volume $(m^3)$", fontsize=18, color="red")
for label in ax2.get_yticklabels():
label.set_color("red")
show()
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0)) # set position of x spine to x=0
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0)) # set position of y spine to y=0
xx = np.linspace(-0.75, 1., 100)
ax.plot(xx, xx**3);
show()
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(1, 4, figsize=(12,3))
axes[0].scatter(xx, xx + 0.25*np.random.randn(len(xx)))
axes[0].set_title("scatter")
axes[1].step(n, n**2, lw=2)
axes[1].set_title("step")
axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5)
axes[2].set_title("bar")
axes[3].fill_between(x, x**2, x**3, color="green", alpha=0.5);
axes[3].set_title("fill_between");
show()
# polar plot using add_axes and polar projection
fig = plt.figure()
ax = fig.add_axes([0.0, 0.0, .6, .6], polar=True)
t = np.linspace(0, 2 * np.pi, 100)
ax.plot(t, t, color='blue', lw=3);
show()
# A histogram
n = np.random.randn(100000)
fig, axes = plt.subplots(1, 2, figsize=(12,4))
axes[0].hist(n)
axes[0].set_title("Default histogram")
axes[0].set_xlim((min(n), max(n)))
axes[1].hist(n, cumulative=True, bins=50)
axes[1].set_title("Cumulative detailed histogram")
axes[1].set_xlim((min(n), max(n)));
show()
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
ax.text(0.15, 0.2, r"$y=x^2$", fontsize=20, color="blue")
ax.text(0.65, 0.1, r"$y=x^3$", fontsize=20, color="green");
show()
fig, ax = plt.subplots(2, 3)
fig.tight_layout()
show()
fig = plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2,0))
ax5 = plt.subplot2grid((3,3), (2,1))
fig.tight_layout()
show()
import matplotlib.gridspec as gridspec
fig = plt.figure()
gs = gridspec.GridSpec(2, 3, height_ratios=[2,1], width_ratios=[1,2,1])
for g in gs:
ax = fig.add_subplot(g)
fig.tight_layout()
show()
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
fig.tight_layout()
# inset
inset_ax = fig.add_axes([0.2, 0.55, 0.35, 0.35]) # X, Y, width, height
inset_ax.plot(xx, xx**2, xx, xx**3)
inset_ax.set_title('zoom near origin')
# set axis range
inset_ax.set_xlim(-.2, .2)
inset_ax.set_ylim(-.005, .01)
# set axis tick locations
inset_ax.set_yticks([0, 0.005, 0.01])
inset_ax.set_xticks([-0.1,0,.1]);
show()
alpha = 0.7
phi_ext = 2 * np.pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * np.cos(phi_p) * np.cos(phi_m) - alpha * np.cos(phi_ext - 2*phi_p)
phi_m = np.linspace(0, 2*np.pi, 100)
phi_p = np.linspace(0, 2*np.pi, 100)
X,Y = np.meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
show()
fig, ax = plt.subplots()
im = ax.imshow(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
im.set_interpolation('bilinear')
cb = fig.colorbar(im, ax=ax)
show()
fig, ax = plt.subplots()
cnt = ax.contour(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
show()
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = plt.figure(figsize=(14,6))
# `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(1, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=4, cstride=4, linewidth=0)
# surface_plot with color grading and color bar
ax = fig.add_subplot(1, 2, 2, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False)
cb = fig.colorbar(p, shrink=0.5)
show()
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1, 1, 1, projection='3d')
p = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4)
show()
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=-np.pi, cmap=matplotlib.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-np.pi, cmap=matplotlib.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=3*np.pi, cmap=matplotlib.cm.coolwarm)
ax.set_xlim3d(-np.pi, 2*np.pi);
ax.set_ylim3d(0, 3*np.pi);
ax.set_zlim3d(-np.pi, 2*np.pi);
show()
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(1,2,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
ax.view_init(30, 45)
ax = fig.add_subplot(1,2,2, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
ax.view_init(70, 30)
fig.tight_layout()
show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define an objective function
Step2: Let's begin by plotting $f$.
Step3: Setting a Gaussian Process prior
Step4: The following helper function update_posterior will take care of updating our gpmodel each time we evaluate $f$ at a new value $x$.
Step5: Define an acquisition function
Step6: The final component we need is a way to find (approximate) minimizing points $x_{\rm min}$ of the acquisition function. There are several ways to proceed, including gradient-based and non-gradient-based techniques. Here we will follow the gradient-based approach. One of the possible drawbacks of gradient descent methods is that the minimization algorithm can get stuck at a local minimum. In this tutorial, we adopt a (very) simple approach to address this issue
Step7: The inner loop of Bayesian Optimization
Step8: Running the algorithm
Step9: Our surrogate model gpmodel already has 4 function evaluations at its disposal; however, we have yet to optimize the GP hyperparameters. So we do that first. Then in a loop we call the next_x and update_posterior functions repeatedly. The following plot illustrates how Gaussian Process posteriors and the corresponding acquisition functions change at each step in the algorith. Note how query points are chosen both for exploration and exploitation.
| <ASSISTANT_TASK:>
Python Code:
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import torch
import torch.autograd as autograd
import torch.optim as optim
from torch.distributions import constraints, transform_to
import pyro
import pyro.contrib.gp as gp
assert pyro.__version__.startswith('1.7.0')
pyro.set_rng_seed(1)
def f(x):
return (6 * x - 2)**2 * torch.sin(12 * x - 4)
x = torch.linspace(0, 1)
plt.figure(figsize=(8, 4))
plt.plot(x.numpy(), f(x).numpy())
plt.show()
# initialize the model with four input points: 0.0, 0.33, 0.66, 1.0
X = torch.tensor([0.0, 0.33, 0.66, 1.0])
y = f(X)
gpmodel = gp.models.GPRegression(X, y, gp.kernels.Matern52(input_dim=1),
noise=torch.tensor(0.1), jitter=1.0e-4)
def update_posterior(x_new):
y = f(x_new) # evaluate f at new point.
X = torch.cat([gpmodel.X, x_new]) # incorporate new evaluation
y = torch.cat([gpmodel.y, y])
gpmodel.set_data(X, y)
# optimize the GP hyperparameters using Adam with lr=0.001
optimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)
gp.util.train(gpmodel, optimizer)
def lower_confidence_bound(x, kappa=2):
mu, variance = gpmodel(x, full_cov=False, noiseless=False)
sigma = variance.sqrt()
return mu - kappa * sigma
def find_a_candidate(x_init, lower_bound=0, upper_bound=1):
# transform x to an unconstrained domain
constraint = constraints.interval(lower_bound, upper_bound)
unconstrained_x_init = transform_to(constraint).inv(x_init)
unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(True)
minimizer = optim.LBFGS([unconstrained_x], line_search_fn='strong_wolfe')
def closure():
minimizer.zero_grad()
x = transform_to(constraint)(unconstrained_x)
y = lower_confidence_bound(x)
autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
return y
minimizer.step(closure)
# after finding a candidate in the unconstrained domain,
# convert it back to original domain.
x = transform_to(constraint)(unconstrained_x)
return x.detach()
def next_x(lower_bound=0, upper_bound=1, num_candidates=5):
candidates = []
values = []
x_init = gpmodel.X[-1:]
for i in range(num_candidates):
x = find_a_candidate(x_init, lower_bound, upper_bound)
y = lower_confidence_bound(x)
candidates.append(x)
values.append(y)
x_init = x.new_empty(1).uniform_(lower_bound, upper_bound)
argmin = torch.min(torch.cat(values), dim=0)[1].item()
return candidates[argmin]
def plot(gs, xmin, xlabel=None, with_title=True):
xlabel = "xmin" if xlabel is None else "x{}".format(xlabel)
Xnew = torch.linspace(-0.1, 1.1)
ax1 = plt.subplot(gs[0])
ax1.plot(gpmodel.X.numpy(), gpmodel.y.numpy(), "kx") # plot all observed data
with torch.no_grad():
loc, var = gpmodel(Xnew, full_cov=False, noiseless=False)
sd = var.sqrt()
ax1.plot(Xnew.numpy(), loc.numpy(), "r", lw=2) # plot predictive mean
ax1.fill_between(Xnew.numpy(), loc.numpy() - 2*sd.numpy(), loc.numpy() + 2*sd.numpy(),
color="C0", alpha=0.3) # plot uncertainty intervals
ax1.set_xlim(-0.1, 1.1)
ax1.set_title("Find {}".format(xlabel))
if with_title:
ax1.set_ylabel("Gaussian Process Regression")
ax2 = plt.subplot(gs[1])
with torch.no_grad():
# plot the acquisition function
ax2.plot(Xnew.numpy(), lower_confidence_bound(Xnew).numpy())
# plot the new candidate point
ax2.plot(xmin.numpy(), lower_confidence_bound(xmin).numpy(), "^", markersize=10,
label="{} = {:.5f}".format(xlabel, xmin.item()))
ax2.set_xlim(-0.1, 1.1)
if with_title:
ax2.set_ylabel("Acquisition Function")
ax2.legend(loc=1)
plt.figure(figsize=(12, 30))
outer_gs = gridspec.GridSpec(5, 2)
optimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)
gp.util.train(gpmodel, optimizer)
for i in range(8):
xmin = next_x()
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer_gs[i])
plot(gs, xmin, xlabel=i+1, with_title=(i % 2 == 0))
update_posterior(xmin)
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ideal Responses
Step2: Modeling the Offset Transmission Line
Step3: The broadcasting feature in numpy is used here. The quantities
Step4: At this point, we already have everything we need to know about this offset line.
Step5: Modeling the Shunt Impedance
Step6: For the open standard, a series medium.shunt_capacitor() terminated by
Step7: Now you can pass these standards into scikit-rf's calibration routines, or use the write_touchstone() method to save them on the disk for future use.
Step8: Finally, let's take a look at the magnitudes and phase shifts of our standards.
Step9: Short
Step10: Conclusion
Step11: Example
Step12: After unit conversion, we can define standards just like how calibration standards in Keysight-style
Step13: Modeling the Shunt Impedance
Step14: Completion
Step15: Plotting
Step16: Short
Step17: Thru
Step18: Conclusion
| <ASSISTANT_TASK:>
Python Code:
import skrf
from skrf.media import DefinedGammaZ0
import numpy as np
freq = skrf.Frequency(1, 9000, 1001, "MHz")
ideal_medium = DefinedGammaZ0(frequency=freq, z0=50)
ideal_open = ideal_medium.open()
ideal_short = ideal_medium.short()
ideal_load = ideal_medium.match()
ideal_thru = ideal_medium.thru()
def offset_gamma_and_zc(offset_delay, offset_loss, offset_z0=50):
alpha_l = (offset_loss * offset_delay) / (2 * offset_z0)
alpha_l *= np.sqrt(freq.f / 1e9)
beta_l = 2 * np.pi * freq.f * offset_delay + alpha_l
gamma_l = alpha_l + 1j * beta_l
zc = (offset_z0) + (1 - 1j) * (offset_loss / (4 * np.pi * freq.f)) * np.sqrt(freq.f / 1e9)
return gamma_l, zc
gamma_l_open, zc_open = offset_gamma_and_zc(29.242e-12, 2.2e9)
gamma_l_short, zc_short = offset_gamma_and_zc(31.785e-12, 2.36e9)
medium_open = DefinedGammaZ0(
frequency=freq,
gamma=gamma_l_open, Z0=zc_open, z0=50
)
line_open = medium_open.line(
d=1, unit='m',
z0=medium_open.Z0, embed=True
)
medium_short = DefinedGammaZ0(
frequency=freq,
gamma=gamma_l_short, Z0=zc_short, z0=50
)
line_short = medium_short.line(
d=1, unit='m',
z0=medium_short.Z0, embed=True
)
# use ideal_medium, not medium_open and medium_short to avoid confusions.
capacitor_poly = np.poly1d([
-0.1597 * 1e-45,
23.17 * 1e-36,
-310.1 * 1e-27,
49.43 * 1e-15
])
capacitor_list = capacitor_poly(freq.f)
shunt_open = ideal_medium.capacitor(capacitor_list) ** ideal_medium.short()
inductor_poly = np.poly1d([
-0.01 * 1e-42,
2.171 * 1e-33,
-108.5 * 1e-24,
2.077 * 1e-12
])
inductor_list = inductor_poly(freq.f)
shunt_short = ideal_medium.inductor(inductor_list) ** ideal_medium.short()
open_std = line_open ** shunt_open
short_std = line_short ** shunt_short
load_std = ideal_medium.match()
thru_std = ideal_medium.thru()
%matplotlib inline
import matplotlib.pyplot as plt
mag = plt.subplot(1, 1, 1)
plt.title("Keysight 85033E Open (S11)")
open_std.plot_s_db(color='red', label="Magnitude")
plt.legend(bbox_to_anchor=(0.73, 1), loc='upper left', borderaxespad=0)
phase = mag.twinx()
open_std.plot_s_deg(color='blue', label="Phase")
plt.legend(bbox_to_anchor=(0.73, 0.9), loc='upper left', borderaxespad=0)
mag = plt.subplot(1, 1, 1)
plt.title("Keysight 85033E Short (S11)")
short_std.plot_s_db(color='red', label="Magnitude")
plt.legend(bbox_to_anchor=(0.73, 1), loc='upper left', borderaxespad=0)
phase = mag.twinx()
short_std.plot_s_deg(color='blue', label="Phase")
plt.legend(bbox_to_anchor=(0.73, 0.9), loc='upper left', borderaxespad=0)
import skrf
from skrf.media import DefinedGammaZ0
import numpy as np
def keysight_calkit_offset_line(freq, offset_delay, offset_loss, offset_z0):
if offset_delay or offset_loss:
alpha_l = (offset_loss * offset_delay) / (2 * offset_z0)
alpha_l *= np.sqrt(freq.f / 1e9)
beta_l = 2 * np.pi * freq.f * offset_delay + alpha_l
zc = offset_z0 + (1 - 1j) * (offset_loss / (4 * np.pi * freq.f)) * np.sqrt(freq.f / 1e9)
gamma_l = alpha_l + beta_l * 1j
medium = DefinedGammaZ0(frequency=freq, z0=offset_z0, Z0=zc, gamma=gamma_l)
offset_line = medium.line(d=1, unit='m', z0=medium.Z0, embed=True)
return medium, offset_line
else:
medium = DefinedGammaZ0(frequency=freq, Z0=offset_z0)
line = medium.line(d=0)
return medium, line
def keysight_calkit_open(freq, offset_delay, offset_loss, c0, c1, c2, c3, offset_z0=50):
medium, line = keysight_calkit_offset_line(freq, offset_delay, offset_loss, offset_z0)
# Capacitance is defined with respect to the port impedance offset_z0, not the lossy
# line impedance. In scikit-rf, the return values of `shunt_capacitor()` and `medium.open()`
# methods are (correctly) referenced to the port impedance.
if c0 or c1 or c2 or c3:
poly = np.poly1d([c3, c2, c1, c0])
capacitance = medium.shunt_capacitor(poly(freq.f)) ** medium.open()
else:
capacitance = medium.open()
return line ** capacitance
def keysight_calkit_short(freq, offset_delay, offset_loss, l0, l1, l2, l3, offset_z0=50):
# Inductance is defined with respect to the port impedance offset_z0, not the lossy
# line impedance. In scikit-rf, the return values of `inductor()` and `medium.short()`
# methods are (correctly) referenced to the port impedance.
medium, line = keysight_calkit_offset_line(freq, offset_delay, offset_loss, offset_z0)
if l0 or l1 or l2 or l3:
poly = np.poly1d([l3, l2, l1, l0])
inductance = medium.inductor(poly(freq.f)) ** medium.short()
else:
inductance = medium.short()
return line ** inductance
def keysight_calkit_load(freq, offset_delay=0, offset_loss=0, offset_z0=50):
medium, line = keysight_calkit_offset_line(freq, offset_delay, offset_loss, offset_z0)
load = medium.match()
return line ** load
def keysight_calkit_thru(freq, offset_delay=0, offset_loss=0, offset_z0=50):
medium, line = keysight_calkit_offset_line(freq, offset_delay, offset_loss, offset_z0)
thru = medium.thru()
return line ** thru
freq = skrf.Frequency(1, 9000, 1001, "MHz")
open_std = keysight_calkit_open(
freq,
offset_delay=29.242e-12, offset_loss=2.2e9,
c0=49.43e-15, c1=-310.1e-27, c2=23.17e-36, c3=-0.1597e-45
)
short_std = keysight_calkit_short(
freq,
offset_delay=31.785e-12, offset_loss=2.36e9,
l0=2.077e-12, l1=-108.5e-24, l2=2.171e-33, l3=-0.01e-42
)
load_std = keysight_calkit_load(freq)
thru_std = keysight_calkit_thru(freq)
def rs_to_keysight(rs_offset_length, rs_offset_loss, offset_z0=50):
offset_delay = rs_offset_length / skrf.constants.c
offset_loss = skrf.mathFunctions.db_2_np(rs_offset_loss * offset_z0 / offset_delay)
return offset_delay, offset_loss
offset_delay, offset_loss = rs_to_keysight(4.344e-3, 0.0033)
gamma_l, zc = offset_gamma_and_zc(offset_delay, offset_loss)
medium_open = DefinedGammaZ0(
frequency=freq,
gamma=gamma_l, Z0=zc, z0=50
)
line_open = medium_open.line(
d=1, unit='m',
z0=medium_open.Z0, embed=True
)
offset_delay, offset_loss = rs_to_keysight(5.0017e-3, 0.0038)
gamma_l, zc = offset_gamma_and_zc(offset_delay, offset_loss)
medium_short = DefinedGammaZ0(
frequency=freq,
gamma=gamma_l, Z0=zc, z0=50
)
line_short = medium_short.line(
d=1, unit='m',
z0=medium_short.Z0, embed=True
)
offset_delay, offset_loss = rs_to_keysight(17.375e-3, 0.0065)
gamma_l, zc = offset_gamma_and_zc(offset_delay, offset_loss)
medium_thru = DefinedGammaZ0(
frequency=freq,
gamma=gamma_l, Z0=zc, z0=50
)
line_thru = medium_thru.line(
d=1, unit='m',
z0=medium_thru.Z0, embed=True
)
capacitor_poly = np.poly1d([
-0.001886 * 1000e-45,
0.1076 * 1000e-36,
-1.284 * 1000e-27,
62.54 * 1e-15
])
capacitor_open = capacitor_poly(freq.f)
shunt_open = ideal_medium.shunt_capacitor(capacitor_open) ** ideal_medium.open()
# or: shunt_open = ideal_medium.shunt_capacitor(capacitor_open) ** ideal_medium.short()
# see the Keysight example for explanation.
shunt_short = ideal_medium.short()
open_std = line_open ** shunt_open
short_std = line_short ** shunt_short
load_std = ideal_medium.match()
thru_std = line_thru
mag = plt.subplot(1, 1, 1)
plt.title("Maury Microwave 8050CK10 Open (S11)")
open_std.plot_s_db(color='red', label="Magnitude")
plt.legend(bbox_to_anchor=(0.73, 1), loc='upper left', borderaxespad=0)
phase = mag.twinx()
open_std.plot_s_deg(color='blue', label="Phase")
plt.legend(bbox_to_anchor=(0.73, 0.9), loc='upper left', borderaxespad=0)
mag = plt.subplot(1, 1, 1)
plt.title("Maury Microwave 8050CK10 Short (S11)")
short_std.plot_s_db(color='red', label="Magnitude")
plt.legend(bbox_to_anchor=(0.73, 1), loc='upper left', borderaxespad=0)
phase = mag.twinx()
short_std.plot_s_deg(color='blue', label="Phase")
plt.legend(bbox_to_anchor=(0.73, 0.9), loc='upper left', borderaxespad=0)
mag = plt.subplot(1, 1, 1)
plt.title("Maury Microwave 8050CK10 Thru (S21)")
thru_std.s21.plot_s_db(color='red', label="Magnitude")
plt.legend(bbox_to_anchor=(0.73, 1), loc='upper left', borderaxespad=0)
phase = mag.twinx()
thru_std.s21.plot_s_deg(color='blue', label="Phase")
plt.legend(bbox_to_anchor=(0.73, 0.9), loc='upper left', borderaxespad=0)
import skrf
from skrf.media import DefinedGammaZ0
import numpy as np
def rs_to_keysight(rs_offset_length, rs_offset_loss, offset_z0=50):
offset_delay = rs_offset_length / skrf.constants.c
offset_loss = skrf.mathFunctions.db_2_np(rs_offset_loss * offset_z0 / offset_delay)
return offset_delay, offset_loss
def rs_calkit_offset_line(freq, rs_offset_length, rs_offset_loss, offset_z0):
if rs_offset_length or rs_offset_loss:
offset_delay, offset_loss = rs_to_keysight(rs_offset_length, rs_offset_loss)
alpha_l = (offset_loss * offset_delay) / (2 * offset_z0)
alpha_l *= np.sqrt(freq.f / 1e9)
beta_l = 2 * np.pi * freq.f * offset_delay + alpha_l
zc = offset_z0 + (1 - 1j) * (offset_loss / (4 * np.pi * freq.f)) * np.sqrt(freq.f / 1e9)
gamma_l = alpha_l + beta_l * 1j
medium = DefinedGammaZ0(frequency=freq, z0=offset_z0, Z0=zc, gamma=gamma_l)
offset_line = medium.line(d=1, unit='m', z0=medium.Z0, embed=True)
return medium, offset_line
else:
medium = DefinedGammaZ0(frequency=freq, Z0=offset_z0)
line = medium.line(d=0)
return medium, line
def rs_calkit_open(freq, offset_length, offset_loss, c0, c1, c2, c3, offset_z0=50):
# Capacitance is defined with respect to the port impedance offset_z0, not the lossy
# line impedance. In scikit-rf, the return values of `shunt_capacitor()` and `medium.open()`
# methods are (correctly) referenced to the port impedance.
medium, line = rs_calkit_offset_line(freq, offset_length, offset_loss, offset_z0)
if c0 or c1 or c2 or c3:
poly = np.poly1d([c3, c2, c1, c0])
capacitance = medium.shunt_capacitor(poly(freq.f)) ** medium.open()
else:
capacitance = medium.open()
return line ** capacitance
def rs_calkit_short(freq, offset_length, offset_loss, l0, l1, l2, l3, offset_z0=50):
# Inductance is defined with respect to the port impedance offset_z0, not the lossy
# line impedance. In scikit-rf, the return values of `inductor()` and `medium.short()`
# methods are (correctly) referenced to the port impedance.
medium, line = rs_calkit_offset_line(freq, offset_length, offset_loss, offset_z0)
if l0 or l1 or l2 or l3:
poly = np.poly1d([l3, l2, l1, l0])
inductance = medium.inductor(poly(freq.f)) ** medium.short()
else:
inductance = medium.short()
return line ** inductance
def rs_calkit_load(freq, offset_length=0, offset_loss=0, offset_z0=50):
medium, line = rs_calkit_offset_line(freq, offset_length, offset_loss, offset_z0)
load = medium.match()
return line ** load
def rs_calkit_thru(freq, offset_length=0, offset_loss=0, offset_z0=50):
medium, line = rs_calkit_offset_line(freq, offset_length, offset_loss, offset_z0)
thru = medium.thru()
return line ** thru
freq = skrf.Frequency(1, 9000, 1001, "MHz")
open_std = rs_calkit_open(
freq,
offset_length=4.344e-3, offset_loss=0.0033,
# Due to unit differences, the numerical values of c1, c2 and c3
# must be multiplied by 1000 from the R&S datasheet value. For
# Anritsu, this is not needed. Check the units on your datasheet!
c0=62.54 * 1e-15,
c1=-1.284 * 1000e-27,
c2=0.1076 * 1000e-36,
c3=-0.001886 * 1000e-45
)
short_std = rs_calkit_short(
freq,
offset_length=5.0017e-3, offset_loss=0.0038,
l0=0, l1=0, l2=0, l3=0
)
load_std = rs_calkit_load(freq)
thru_std = rs_calkit_thru(
freq,
offset_length=17.375e-3, offset_loss=0.0065
)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Exercise 1
Step3: Since we find a pvalue for the Levene test of less than our $\alpha$ level (0.05), we can reject the null hypothesis that the variability of the two groups are equal thus implying that the variances are unequal.
Step4: Exercise 3
Step5: And the skewness of XLE returns of values > 0 means that there is more weight in the right tail of the distribution. The skewness of XLI returns of value > 0 means that there is more weight in the left tail of the distribution.
| <ASSISTANT_TASK:>
Python Code:
# Useful functions
def normal_test(X):
z, pval = stats.normaltest(X)
if pval < 0.05:
print 'Values are not normally distributed.'
else:
print 'Values are normally distributed.'
return
# Useful Libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
# Get pricing data for an energy (XLE) and industrial (XLI) ETF
xle = get_pricing('XLE', fields = 'price', start_date = '2016-01-01', end_date = '2017-01-01')
xli = get_pricing('XLI', fields = 'price', start_date = '2016-01-01', end_date = '2017-01-01')
# Compute returns
xle_returns = xle.pct_change()[1:]
xli_returns = xli.pct_change()[1:]
xle = plt.hist(xle_returns, bins=30)
xli = plt.hist(xli_returns, bins=30, color='r')
plt.xlabel('returns')
plt.ylabel('Frequency')
plt.title('Histogram of the returns of XLE and XLI')
plt.legend(['XLE returns', 'XLI returns']);
# Checking for normality using function above.
print 'XLE'
normal_test(xle_returns)
print 'XLI'
normal_test(xli_returns)
# Because the data is not normally distributed, we must use the levene and not the F-test of variance.
stats.levene(xle_returns, xli_returns)
# Manually calculating the t-statistic
N1 = len(xle_returns)
N2 = len(xli_returns)
m1 = xle_returns.mean()
m2 = xli_returns.mean()
s1 = xle_returns.std()
s2 = xli_returns.std()
test_statistic = (m1 - m2) / (s1**2 / N1 + s2**2 / N2)**0.5
print 't-test statistic:', test_statistic
# Alternative form, using the scipy library on python.
stats.ttest_ind(xle_returns, xli_returns, equal_var=False)
# Calculate the mean and median of xle and xli using the numpy library
xle_mean = np.mean(xle_returns)
xle_median = np.median(xle_returns)
print 'Mean of XLE returns = ', xle_mean, '; median = ', xle_median
xli_mean = np.mean(xli_returns)
xli_median = np.median(xli_returns)
print 'Mean of XLI returns = ', xli_mean, '; median = ', xli_median
# Print values of Skewness for xle and xli returns
print 'Skew of XLE returns:', stats.skew(xle_returns)
print 'Skew of XLI returns:', stats.skew(xli_returns)
# Print value of Kurtosis for xle and xli returns
print 'kurtosis:', stats.kurtosis(xle_returns)
print 'kurtosis:', stats.kurtosis(xli_returns)
# Distribution plot of XLE returns in red (for Kurtosis of 1.6).
# Distribution plot of XLI returns in blue (for Kurtosis of 2.0).
xle = sns.distplot(xle_returns, color = 'r', axlabel = 'xle')
xli = sns.distplot(xli_returns, axlabel = 'xli');
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview <a class="anchor" id="Overview"></a>
Step2: However under the enumeration interpretation, the same sample site will return a fully enumerated set of values, based on its distribution's .enumerate_support() method.
Step3: Note that we've used "parallel" enumeration to enumerate along a new tensor dimension. This is cheap and allows Pyro to parallelize computation, but requires downstream program structure to avoid branching on the value of z. To support dynamic program structure, you can instead use "sequential" enumeration, which runs the entire model,guide pair once per sample value, but requires running the model multiple times.
Step4: Parallel enumeration is cheaper but more complex than sequential enumeration, so we'll focus the rest of this tutorial on the parallel variant. Note that both forms can be interleaved.
Step5: Examining discrete latent states <a class="anchor" id="Examining-discrete-latent-states"></a>
Step6: Notice that under the hood infer_discrete runs the model twice
Step7: When enumering within a plate (as described in the next section) Vindex can also be used together with capturing the plate index via with pyro.plate(...) as i to index into batch dimensions. Here's an example with nontrivial event dimensions due to the Dirichlet distribution.
Step8: Plates and enumeration <a class="anchor" id="Plates-and-enumeration"></a>
Step9: Observe that during inference the model is run twice, first by the AutoNormal to trace sample sites, and second by elbo to compute loss. In the first run, x has the standard interpretation of one sample per datum, hence shape (10,). In the second run enumeration can use the same three values (3,1) for all data points, and relies on broadcasting for any dependent sample or observe sites that depend on data. For example, in the pyro.sample("obs",...) statement, the distribution has shape (3,1), the data has shape(10,), and the broadcasted log probability tensor has shape (3,10).
Step10: We can learn the global parameters using SVI with an autoguide.
Step11: Notice that the model was run twice here
| <ASSISTANT_TASK:>
Python Code:
import os
import torch
import pyro
import pyro.distributions as dist
from torch.distributions import constraints
from pyro import poutine
from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, config_enumerate, infer_discrete
from pyro.infer.autoguide import AutoNormal
from pyro.ops.indexing import Vindex
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('1.7.0')
pyro.set_rng_seed(0)
def model():
z = pyro.sample("z", dist.Categorical(torch.ones(5)))
print(f"model z = {z}")
def guide():
z = pyro.sample("z", dist.Categorical(torch.ones(5)))
print(f"guide z = {z}")
elbo = Trace_ELBO()
elbo.loss(model, guide);
elbo = TraceEnum_ELBO(max_plate_nesting=0)
elbo.loss(model, config_enumerate(guide, "parallel"));
elbo = TraceEnum_ELBO(max_plate_nesting=0)
elbo.loss(model, config_enumerate(guide, "sequential"));
@config_enumerate
def model():
p = pyro.param("p", torch.randn(3, 3).exp(), constraint=constraints.simplex)
x = pyro.sample("x", dist.Categorical(p[0]))
y = pyro.sample("y", dist.Categorical(p[x]))
z = pyro.sample("z", dist.Categorical(p[y]))
print(f" model x.shape = {x.shape}")
print(f" model y.shape = {y.shape}")
print(f" model z.shape = {z.shape}")
return x, y, z
def guide():
pass
pyro.clear_param_store()
print("Sampling:")
model()
print("Enumerated Inference:")
elbo = TraceEnum_ELBO(max_plate_nesting=0)
elbo.loss(model, guide);
serving_model = infer_discrete(model, first_available_dim=-1)
x, y, z = serving_model() # takes the same args as model(), here no args
print(f"x = {x}")
print(f"y = {y}")
print(f"z = {z}")
@config_enumerate
def model():
p = pyro.param("p", torch.randn(5, 4, 3, 2).exp(), constraint=constraints.simplex)
x = pyro.sample("x", dist.Categorical(torch.ones(4)))
y = pyro.sample("y", dist.Categorical(torch.ones(3)))
with pyro.plate("z_plate", 5):
p_xy = Vindex(p)[..., x, y, :]
z = pyro.sample("z", dist.Categorical(p_xy))
print(f" p.shape = {p.shape}")
print(f" x.shape = {x.shape}")
print(f" y.shape = {y.shape}")
print(f" p_xy.shape = {p_xy.shape}")
print(f" z.shape = {z.shape}")
return x, y, z
def guide():
pass
pyro.clear_param_store()
print("Sampling:")
model()
print("Enumerated Inference:")
elbo = TraceEnum_ELBO(max_plate_nesting=1)
elbo.loss(model, guide);
@config_enumerate
def model():
data_plate = pyro.plate("data_plate", 6, dim=-1)
feature_plate = pyro.plate("feature_plate", 5, dim=-2)
component_plate = pyro.plate("component_plate", 4, dim=-1)
with feature_plate:
with component_plate:
p = pyro.sample("p", dist.Dirichlet(torch.ones(3)))
with data_plate:
c = pyro.sample("c", dist.Categorical(torch.ones(4)))
with feature_plate as vdx: # Capture plate index.
pc = Vindex(p)[vdx[..., None], c, :] # Reshape it and use in Vindex.
x = pyro.sample("x", dist.Categorical(pc),
obs=torch.zeros(5, 6, dtype=torch.long))
print(f" p.shape = {p.shape}")
print(f" c.shape = {c.shape}")
print(f" vdx.shape = {vdx.shape}")
print(f" pc.shape = {pc.shape}")
print(f" x.shape = {x.shape}")
def guide():
feature_plate = pyro.plate("feature_plate", 5, dim=-2)
component_plate = pyro.plate("component_plate", 4, dim=-1)
with feature_plate, component_plate:
pyro.sample("p", dist.Dirichlet(torch.ones(3)))
pyro.clear_param_store()
print("Sampling:")
model()
print("Enumerated Inference:")
elbo = TraceEnum_ELBO(max_plate_nesting=2)
elbo.loss(model, guide);
@config_enumerate
def model(data, num_components=3):
print(f" Running model with {len(data)} data points")
p = pyro.sample("p", dist.Dirichlet(0.5 * torch.ones(num_components)))
scale = pyro.sample("scale", dist.LogNormal(0, num_components))
with pyro.plate("components", num_components):
loc = pyro.sample("loc", dist.Normal(0, 10))
with pyro.plate("data", len(data)):
x = pyro.sample("x", dist.Categorical(p))
print(" x.shape = {}".format(x.shape))
pyro.sample("obs", dist.Normal(loc[x], scale), obs=data)
print(" dist.Normal(loc[x], scale).batch_shape = {}".format(
dist.Normal(loc[x], scale).batch_shape))
guide = AutoNormal(poutine.block(model, hide=["x", "data"]))
data = torch.randn(10)
pyro.clear_param_store()
print("Sampling:")
model(data)
print("Enumerated Inference:")
elbo = TraceEnum_ELBO(max_plate_nesting=1)
elbo.loss(model, guide, data);
data_dim = 4
num_steps = 10
data = dist.Categorical(torch.ones(num_steps, data_dim)).sample()
def hmm_model(data, data_dim, hidden_dim=10):
print(f"Running for {len(data)} time steps")
# Sample global matrices wrt a Jeffreys prior.
with pyro.plate("hidden_state", hidden_dim):
transition = pyro.sample("transition", dist.Dirichlet(0.5 * torch.ones(hidden_dim)))
emission = pyro.sample("emission", dist.Dirichlet(0.5 * torch.ones(data_dim)))
x = 0 # initial state
for t, y in enumerate(data):
x = pyro.sample(f"x_{t}", dist.Categorical(transition[x]),
infer={"enumerate": "parallel"})
pyro.sample(f" y_{t}", dist.Categorical(emission[x]), obs=y)
print(f" x_{t}.shape = {x.shape}")
hmm_guide = AutoNormal(poutine.block(hmm_model, expose=["transition", "emission"]))
pyro.clear_param_store()
elbo = TraceEnum_ELBO(max_plate_nesting=1)
elbo.loss(hmm_model, hmm_guide, data, data_dim=data_dim);
def hmm_model(data, data_dim, hidden_dim=10):
with pyro.plate("hidden_state", hidden_dim):
transition = pyro.sample("transition", dist.Dirichlet(0.5 * torch.ones(hidden_dim)))
emission = pyro.sample("emission", dist.Dirichlet(0.5 * torch.ones(data_dim)))
x = 0 # initial state
for t, y in pyro.markov(enumerate(data)):
x = pyro.sample(f"x_{t}", dist.Categorical(transition[x]),
infer={"enumerate": "parallel"})
pyro.sample(f"y_{t}", dist.Categorical(emission[x]), obs=y)
print(f"x_{t}.shape = {x.shape}")
# We'll reuse the same guide and elbo.
elbo.loss(hmm_model, hmm_guide, data, data_dim=data_dim);
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create dataframe
Step2: Make plot
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'pre_score': [4, 24, 31, 2, 3],
'mid_score': [25, 94, 57, 62, 70],
'post_score': [5, 43, 23, 23, 51]}
df = pd.DataFrame(raw_data, columns = ['first_name', 'pre_score', 'mid_score', 'post_score'])
df
# input data, specifically the second and
# third rows, skipping the first column
x1 = df.ix[1, 1:]
x2 = df.ix[2, 1:]
# Create the bar labels
bar_labels = ['Pre Score', 'Mid Score', 'Post Score']
# Create a figure
fig = plt.figure(figsize=(8,6))
# Set the y position
y_pos = np.arange(len(x1))
y_pos = [x for x in y_pos]
plt.yticks(y_pos, bar_labels, fontsize=10)
# Create a horizontal bar in the position y_pos
plt.barh(y_pos,
# using x1 data
x1,
# that is centered
align='center',
# with alpha 0.4
alpha=0.4,
# and color green
color='#263F13')
# Create a horizontal bar in the position y_pos
plt.barh(y_pos,
# using NEGATIVE x2 data
-x2,
# that is centered
align='center',
# with alpha 0.4
alpha=0.4,
# and color green
color='#77A61D')
# annotation and labels
plt.xlabel('Tina\'s Score: Light Green. Molly\'s Score: Dark Green')
t = plt.title('Comparison of Molly and Tina\'s Score')
plt.ylim([-1,len(x1)+0.1])
plt.xlim([-max(x2)-10, max(x1)+10])
plt.grid()
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train the model
Step2: Some crude attempts at sentiment analysis
Step3: Proof of principal - ish
| <ASSISTANT_TASK:>
Python Code:
import Load_Text_Set as l_data
import run_Word2Vec as w2v
words = l_data.text_8(200000)
embeddings = w2v.run_embeddings()
import numpy as np
import regex as re
joy_words = ['happy','joy','pleasure','glee']
sad_words = ['sad','unhappy','gloomy']
scary_words = ['scary','frightening','terrifying', 'horrifying']
disgust_words = ['disgust', 'distaste', 'repulsion']
anger_words = ['anger','rage','irritated']
def syn_average(word, list_words = []):
to_ret = 0
count = 0 #use this in case a word isnt in dict
for syn in list_words:
if syn in words.dictionary:
syn_id = words.dictionary[syn]
to_ret+=np.matmul(embeddings[word].reshape(1,128), embeddings[syn_id].reshape(128,1))
count +=1
else:
print(syn," is not in dict")
return to_ret/count
def test(string_words):
happy = words.dictionary['joy']
sad = words.dictionary['fear']
scary = words.dictionary['sad']
disgust = words.dictionary['disgust']
anger = words.dictionary['anger']
d2happy = 0
d2sad = 0
d2scary = 0
d2disgust = 0
d2anger = 0
for a in string_words:
if a in words.dictionary:
in_dict = words.dictionary[a]
d2happy += syn_average(in_dict,joy_words)
d2sad += syn_average(in_dict,sad_words)
d2scary += syn_average(in_dict,scary_words)
d2disgust += syn_average(in_dict,disgust_words)
d2anger += syn_average(in_dict,anger_words )
d2happy = d2happy/len(string_words)
d2sad = d2sad/len(string_words)
d2scary = d2scary/len(string_words)
d2disgust = d2disgust/len(string_words)
d2anger = d2anger/len(string_words)
print( max(d2happy,0),"\t",max(d2sad,0),"\t", max(d2scary,0),"\t", max(d2disgust,0),"\t", max(d2anger,0))
def plot_emotions(top = 8):
emotions= [ words.dictionary['joy'], words.dictionary['fear'],
words.dictionary['sad'], words.dictionary['disgust'], words.dictionary['anger'] ]
for i,i_word in enumerate(emotions):
sim = embeddings.similarity(embeddings)
nearest = (-sim[i_word, :]).argsort()[1:top+1]
print('Nearest to ', emotions[i], ": ")
for k in range(top):
close_word = words.reverse_dictionary(nearest[k])
print('\t',close_word)
happy_string_ = "Even Harry, who knew nothing about the different brooms, thought it looked wonderful. Sleek and shiny, with a mahogany handle, it had a long tail of neat, straight twigs and Nimbus Two Thousand written in gold near the top. As seven o'clock drew nearer, Harry left the castle and set off in the dusk toward the Quidditch field. Held never been inside the stadium before. Hundreds of seats were raised in stands around the field so that the spectators were high enough to see what was going on. At either end of the field were three golden poles with hoops on the end. They reminded Harry of the little plastic sticks Muggle children blew bubbles through, except that they were fifty feet high. Too eager to fly again to wait for Wood, Harry mounted his broomstick and kicked off from the ground. What a feeling -- he swooped in and out of the goal posts and then sped up and down the field. The Nimbus Two Thousand turned wherever he wanted at his lightest touch."
scary_string = "and the next second, Harry felt Quirrell's hand close on his wrist. At once, a needle-sharp pain seared across Harry's scar; his head felt as though it was about to split in two; he yelled, struggling with all his might, and to his surprise, Quirrell let go of him. The pain in his head lessened -- he looked around wildly to see where Quirrell had gone, and saw him hunched in pain, looking at his fingers -- they were blistering before his eyes."
angry_string = 'He’d forgotten all about the people in cloaks until he passed a group of them next to the baker’s. He eyed them angrily as he passed. He didn’t know why, but they made him uneasy. This bunch were whispering excitedly, too, and he couldn’t see a single collectingtin. It was on his way back past them, clutching a large doughnut in a bag, that he caught a few words of what they were saying.'
happy_string_words = re.sub(r"\p{P}+", "", happy_string_).split()
scary_string_words = re.sub(r"\p{P}+", "", scary_string).split()
angry_string_words = re.sub(r"\p{P}+", "",angry_string).split()
print("\n")
print("Sentence: ")
print(happy_string_)
print("Similarity to: ")
print("happy \t\t sad \t\t scary \t\t disgust \t\t anger")
test(happy_string_words)
print("\n")
print("Sentence: ")
print(scary_string)
print("Similarity to: ")
print("happy \t\t sad \t\t scary \t\t disgust \t\t anger")
test(scary_string_words)
print("\n")
print("Sentence: ")
print(angry_string)
print("Similarity to: ")
print("happy \t\t sad \t\t scary \t\t disgust \t\t anger")
test(angry_string_words)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comparing the Errors
| <ASSISTANT_TASK:>
Python Code:
def compute_value_dct(theta_lst, features):
return [{s: np.dot(theta, x) for s, x in features.items()} for theta in theta_lst]
def compute_values(theta_lst, X):
return [np.dot(X, theta) for theta in theta_lst]
def compute_errors(value_lst, error_func):
return [error_func(v) for v in value_lst]
def rmse_factory(true_values, d=None):
true_values = np.ravel(true_values)
# sensible default for weighting distribution
if d is None:
d = np.ones_like(true_values)
else:
d = np.ravel(d)
assert(len(d) == len(true_values))
# the actual root-mean square error
def func(v):
diff = true_values - v
return np.sqrt(np.mean(d*diff**2))
return func
# define the experiment
num_states = 8
num_features = 6
num_active = 3
num_runs = 10
max_steps = 10000
# set up environment
env = chicken.Chicken(num_states)
# Define the target policy
pol_pi = policy.FixedPolicy({s: {0: 1} for s in env.states})
# Define the behavior policy
pol_mu = policy.FixedPolicy({s: {0: 1} if s < 4 else {0: 0.5, 1: 0.5} for s in env.states})
# state-dependent gamma
gm_dct = {s: 0.9 for s in env.states}
gm_dct[0] = 0
gm_func = parametric.MapState(gm_dct)
gm_p_func = parametric.MapNextState(gm_dct)
# set up algorithm parameters
update_params = {
'alpha': 0.02,
'beta': 0.002,
'gm': gm_func,
'gm_p': gm_p_func,
'lm': 0.0,
'lm_p': 0.0,
'interest': 1.0,
}
# Run all available algorithms
data = dict()
for name, alg in algos.algo_registry.items():
print(name)
run_lst = []
for i in range(num_runs):
print("Run: %d"%i, end="\r")
episode_data = dict()
# Want to use random features
phi = features.RandomBinary(num_features, num_active)
episode_data['features'] = {s: phi(s) for s in env.states}
# Set up the agent
_update_params = update_params.copy()
if name == 'ETD':
_update_params['alpha'] = 0.002
agent = OffPolicyAgent(alg(phi.length), pol_pi, pol_mu, phi, _update_params)
# Run the experiment
episode_data['steps'] = run_contextual(agent, env, max_steps)
run_lst.append(episode_data)
data[name] = run_lst
# True values & associated stationary distribution
theta_ls = np.array([ 0.4782969, 0.531441 , 0.59049, 0.6561, 0.729, 0.81, 0.9, 1.])
d_pi = np.ones(num_states)/num_states
D_pi = np.diag(d_pi)
# define the error/objective function
err_func = rmse_factory(theta_ls, d=d_pi)
baseline = err_func(np.zeros(num_states))
for name, experiment in data.items():
print(name)
errors = []
for episode in experiment:
feats = experiment[0]['features']
X = np.array([feats[k] for k in sorted(feats.keys())])
steps = experiment[0]['steps']
thetas = list(pluck('theta', steps))
# compute the values at each step
val_lst = compute_values(thetas, X)
# compute the errors at each step
err_lst = compute_errors(val_lst, err_func)
errors.append(err_lst)
# calculate the average error
clipped_errs = np.clip(errors, 0, 100)
avg_err = np.mean(clipped_errs, axis=0)
# plot the errors
fig, ax = plt.subplots()
ax.plot(avg_err)
# format the graph
ax.set_ylim(1e-2, 2)
ax.axhline(baseline, c='red')
ax.set_yscale('log')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look at the tables and columns we have for analysis.
Step2: Price History
Step3: Compare google to S&P
Step4: Learning objective 2
Step5: Let's see how the price of stocks change over time on a yearly basis. Using the LAG function we can compute the change in stock price year-over-year.
Step6: Compute the year-over-year percentage increase.
Step7: Let's visualize some yearly stock
Step8: There have been some major fluctations in individual stocks. For example, there were major drops during the early 2000's for tech companies.
Step9: Stock splits can also impact our data - causing a stock price to rapidly drop. In practice, we would need to clean all of our stock data to account for this. This would be a major effort! Fortunately, in the case of IBM, for example, all stock splits occurred before the year 2000.
Step10: S&P companies list
Step11: We can join the price histories table with the S&P 500 table to compare industries
Step12: Using pandas we can "unstack" our table so that each industry has it's own column. This will be useful for plotting.
Step13: Let's scale each industry using min/max scaling. This will put all of the stocks on the same scale. Currently it can be hard to see the changes in stocks over time across industries.
Step14: We can also create a smoothed version of the plot above using a rolling mean. This is a useful transformation to make when visualizing time-series data.
| <ASSISTANT_TASK:>
Python Code:
PROJECT = 'qwiklabs-gcp-ml-489e10787faf' # Change to your project.
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from google.cloud import bigquery
from IPython.core.magic import register_cell_magic
from IPython import get_ipython
bq = bigquery.Client(project=PROJECT)
# Allow you to easily have Python variables in SQL query.
@register_cell_magic('with_globals')
def with_globals(line, cell):
contents = cell.format(**globals())
if 'print' in line:
print(contents)
get_ipython().run_cell(contents)
%%with_globals
%%bigquery --project {PROJECT}
SELECT table_name, column_name, data_type
FROM `asl-ml-immersion.stock_src.INFORMATION_SCHEMA.COLUMNS`
ORDER BY table_name, ordinal_position
def query_stock(symbol):
return bq.query('''
SELECT *
FROM `asl-ml-immersion.stock_src.price_history`
WHERE symbol="{0}"
ORDER BY Date
'''.format(symbol)).to_dataframe()
df_stock = query_stock('GOOG')
df_stock.Date = pd.to_datetime(df_stock.Date)
ax = df_stock.plot(x='Date', y='Close', title='Google stock')
# Add smoothed plot.
df_stock['Close_smoothed'] = df_stock.Close.rolling(100, center=True).mean()
df_stock.plot(x='Date', y='Close_smoothed', ax=ax);
df_sp = query_stock('gspc')
def plot_with_sp(symbol):
df_stock = query_stock(symbol)
df_stock.Date = pd.to_datetime(df_stock.Date)
df_stock.Date = pd.to_datetime(df_stock.Date)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax = df_sp.plot(x='Date', y='Close', label='S&P', color='green', ax=ax1,
alpha=0.7)
ax = df_stock.plot(x='Date', y='Close', label=symbol,
title=symbol + ' and S&P index', ax=ax2, alpha=0.7)
ax1.legend(loc=3)
ax2.legend(loc=4)
ax1.set_ylabel('S&P price')
ax2.set_ylabel(symbol + ' price')
ax.set_xlim(pd.to_datetime('2004-08-05'), pd.to_datetime('2013-08-05'))
plot_with_sp('GOOG')
plot_with_sp('IBM')
%%with_globals
%%bigquery --project {PROJECT} df
WITH
with_year AS
(
SELECT symbol,
EXTRACT(YEAR FROM date) AS year,
close
FROM `asl-ml-immersion.stock_src.price_history`
WHERE symbol in (SELECT symbol FROM `asl-ml-immersion.stock_src.snp500`)
),
year_aggregated AS
(
SELECT year, symbol, AVG(close) as avg_close
FROM with_year
WHERE year >= 2000
GROUP BY year, symbol
)
SELECT year, symbol, avg_close as close,
(LAG(avg_close, 1) OVER (PARTITION BY symbol order by year DESC))
AS next_yr_close
FROM year_aggregated
ORDER BY symbol, year
df.dropna(inplace=True)
df['percent_increase'] = (df.next_yr_close - df.close) / df.close
def get_random_stocks(n=5):
random_stocks = df.symbol.sample(n=n, random_state=3)
rand = df.merge(random_stocks)
return rand[['year', 'symbol', 'percent_increase']]
rand = get_random_stocks()
for symbol, _df in rand.groupby('symbol'):
plt.figure()
sns.barplot(x='year', y="percent_increase", data=_df)
plt.title(symbol)
df.sort_values('percent_increase').head()
stock_symbol = 'YHOO'
%%with_globals
%%bigquery --project {PROJECT} df
SELECT date, close
FROM `asl-ml-immersion.stock_src.price_history`
WHERE symbol='{stock_symbol}'
ORDER BY date
ax = df.plot(x='date', y='close')
stock_symbol = 'IBM'
%%with_globals
%%bigquery --project {PROJECT} df
SELECT date, close
FROM `asl-ml-immersion.stock_src.price_history`
WHERE symbol='{stock_symbol}'
ORDER BY date
IBM_STOCK_SPLIT_DATE = '1979-05-10'
ax = df.plot(x='date', y='close')
ax.vlines(pd.to_datetime(IBM_STOCK_SPLIT_DATE),
0, 500, linestyle='dashed', color='grey', alpha=0.7);
%%with_globals
%%bigquery --project {PROJECT} df
SELECT *
FROM `asl-ml-immersion.stock_src.snp500`
df.industry.value_counts().plot(kind='barh');
%%with_globals
%%bigquery --project {PROJECT} df
WITH sp_prices AS
(
SELECT a.*, b.industry
FROM `asl-ml-immersion.stock_src.price_history` a
JOIN `asl-ml-immersion.stock_src.snp500` b
USING (symbol)
WHERE date >= "2000-01-01"
)
SELECT Date, industry, AVG(close) as close
FROM sp_prices
GROUP BY Date, industry
ORDER BY industry, Date
df.head()
# Pandas `unstack` to make each industry a column. Useful for plotting.
df_ind = df.set_index(['industry', 'Date']).unstack(0).dropna()
df_ind.columns = [c[1] for c in df_ind.columns]
df_ind.head()
ax = df_ind.plot(figsize=(16, 8))
# Move legend down.
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2)
def min_max_scale(df):
return (df - df.min()) / df.max()
scaled = min_max_scale(df_ind)
ax = scaled.plot(figsize=(16, 8))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2);
SMOOTHING_WINDOW = 30 # Days.
rolling = scaled.copy()
for col in scaled.columns:
rolling[col] = scaled[col].rolling(SMOOTHING_WINDOW).mean()
ax = rolling.plot(figsize=(16, 8))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2);
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Gnerate x = a sequence of points, y = sin(x)+a where a is a small random error.
Step2: 2. Draw a scatter plot of x and y.
Step3: 3. Use linear regression model to predict y, with only one feature--x. Please print out the training and validation score of your model and the mathematical formula of your model.
Step4: 怎么理解cv_scores是负数?
Step5: 5. Try to build a linear model using two features--x and x^2. Please print out the training and validation score score and mathematical formula.
Step6: 6. Try to build linear models with features from x to x, x^2, x^3,... x^15, and plot the changes of training score and validation score with the number of features gets larger. Accoding to the result you get, what's the best number of features here?
| <ASSISTANT_TASK:>
Python Code:
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')
# import the necessary package at the very beginning
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import sklearn
## Type Your Answer Below ##
np.random.seed(1)
X = np.random.random([100, 1]).ravel()*10 # generate a set of 100 random float in range [0, 10]
X[:5]
random_error = np.random.randn(100) # genrate a set of 100 random error from a standard normal distribution
random_error[:5]
Y = np.sin(X) + random_error # y = sin(x)+a where a is a small random error
Y[:5]
## Type Your Answer Below ##
plt.scatter(x=X, y=Y, marker='o', alpha=0.4, color='b')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Y=sin(X) + random_error')
print('X: ', X.shape, ' ', 'Y: ', Y.shape )
## Type Your Answer Below ##
# reshape X from row vector in shape(100, ) to column vector in shape (100, 1)
X_re = X.reshape(X.shape[0], 1)
X_re.shape
# initiate a linear regression model
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr
# Use train_test_split to train and test lr
from sklearn import model_selection
Xtrain, Xtest, Ytrain, Ytest = model_selection.train_test_split(X_re, Y, train_size=70, random_state=1)
print(Xtrain.shape, Xtest.shape, Ytrain.shape, Ytest.shape)
lr.fit(Xtrain, Ytrain)
Ypred = lr.predict(Xtest)
print('The mathematical formula of linear regression model: ', 'Y = ' + str(lr.coef_) + '*' + 'X + ' + str(lr.intercept_), '\n')
print('The coefficient of determination R^2 of the training set: ', lr.score(Xtrain, Ytrain), '\n')
print('The coefficient of determination R^2 of the testing set: ', lr.score(Xtest, Ytest), '\n')
plt.scatter(Ytest, Ypred, marker='o', alpha=0.5)
plt.xlabel('Ytest')
plt.ylabel('Ypred')
plt.title('Linear regression model performance')
# Get the training and validation score of your model
# training and validation score具体指的什么?
from sklearn.model_selection import cross_val_score
cv_scores = cross_val_score(lr, X_re, Y, cv=3) # 3-fold cross validation
print('cv_scores: ', cv_scores)
print('mean of cv_scores: ', cv_scores.mean())
#The mean score and the 95% confidence interval of the score estimate are hence given by:
print("Accuracy: %0.2f (+/- %0.2f)" % (cv_scores.mean(), cv_scores.std() * 2))
## Type Your Answer Below ##
# show predicted y in red color
Ypred = lr.predict(X_re)
plt.plot(X, Ypred, label='Predicted Y', color='r')
# show real y in blue color
plt.scatter(X, Y, label='Real Y', color='b')
# show ground truth - sin(X) in green color
Yground = np.sin(X)
plt.scatter(X, Yground, label='Ground truth Y', color='g')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Three types of Y in a plot')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
## Type Your Answer Below ##
X2 = X_re**2
X2 = np.hstack([X_re, X2])
print(X2.shape)
lr2 = LinearRegression()
lr2.fit(X2, Y)
cv_scores2 = cross_val_score(lr2, X2, Y, cv=3)
print('cv_scores for model using x and x^2: ', cv_scores2)
print('mean of cv_scores for model using x and x^2: ', cv_scores2.mean())
#The mean score and the 95% confidence interval of the score estimate are hence given by:
print("Accuracy: %0.2f (+/- %0.2f)" % (cv_scores2.mean(), cv_scores2.std() * 2))
print('The mathematical formula of linear regression model: ', 'Y = ' + str(lr2.coef_[0]) + '*X ' + str(lr2.coef_[1]) + "*X^2 + " + str(lr.intercept_), '\n')
# visualize new set of Ypred, Y, Yground_truth
Ypred2 = lr2.predict(X2)
Yground = np.sin(X)
plt.scatter(X, Ypred2, label='predicted y using x and x**2', color='r')
plt.scatter(X, Y, label='real y', color='b')
plt.scatter(X, Yground, label='ground truth - sin(x)', color='g')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Three types of Y in a plot')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
from sklearn.model_selection import validation_curve
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
index =[] # generate an array with number 1 to 15
for i in range(1, 16):
index.append(i)
df = pd.DataFrame(columns = index) # create a new dataframe with 15 columns
df.iloc[:, 0] = X # the 1st column is X**1
mean_cv_scores = []
mean_train_scores = []
mean_valid_scores= []
for i in index:
print("################ Adding " + "x**" + str(i) + " ######################")
df.loc[:, i] = X**i # Add a new column of values
lr = LinearRegression() # start a new linear regression model with the new column taking into consideration
#lr.fit(df.iloc[:, :i], Y)
#Ypredict = lr.predict(df.iloc[:, :i])
cv_scores = cross_val_score(lr, df.iloc[:, :i], Y, cv=3)
print("mean cv score for the model is:", np.mean(cv_scores))
mean_cv_scores.append(np.mean(cv_scores))
train_score, valid_score = validation_curve(Ridge(), df.iloc[:, :i], Y, "alpha", np.logspace(-7, 3, 3))
print("mean train score is: ", np.mean(train_score))
print("mean valid score is: ", np.mean(valid_score))
mean_train_scores.append(np.mean(train_score))
mean_valid_scores.append(np.mean(valid_score))
print()
plt.plot(df.columns, mean_train_scores, c='b', label='mean train scores') #plot the training score and validation score showing what happens when feature set gets larger
plt.plot(df.columns, mean_valid_scores, c='r', label = 'mean valid scores')
plt.xlabel('feature')
plt.ylabel('mean of evaluation scores')
plt.legend(loc=0)
plt.plot(df.columns, mean_cv_scores, label='mean cv scores') #plot the training score and validation score showing what happens when feature set gets larger
plt.xlabel('feature')
plt.ylabel('mean of cross validation score')
plt.legend(loc=0)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
Step2: Adding Spots
Step3: Spot Parameters
Step4: The 'colat' parameter defines the latitude on the star measured from its North Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the observer at t0.
Step5: If we set t0 to 5 instead of zero, then the spot will cross the line-of-sight at t=5 (since the spot's longitude is 0).
Step6: And if we change the inclination to 0, we'll be looking at the north pole of the star. This clearly illustrates the right-handed rotation of the star. At time=t0=5 the spot will now be pointing in the negative y-direction.
| <ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.0,<2.1"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_star()
b.add_spot(radius=30, colat=80, long=0, relteff=0.9)
print b['spot']
times = np.linspace(0, 10, 11)
b.set_value('period', 10)
b.add_dataset('mesh', times=times)
b.run_compute(distortion_method='rotstar', irrad_method='none')
b.animate(x='xs', y='ys', facecolor='teffs')
b.set_value('t0', 5)
b.run_compute(distortion_method='rotstar', irrad_method='none')
b.animate(x='xs', y='ys', facecolor='teffs')
b.set_value('incl', 0)
b.run_compute(distortion_method='rotstar', irrad_method='none')
b.animate(x='xs', y='ys', facecolor='teffs')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now import the version in bruges for comparison
Step2: Plot the two together
Step3: We can use the inspect module ot compare the source code
Step4: Ormsby wavelet
Step5: Aki-Richards
Step6: Exact Zoeppritz
Step7: Bonus
Step8: Yet another aside
| <ASSISTANT_TASK:>
Python Code:
def r(l,d,f):import numpy as n;t=n.arange(-l/2,l/2,d);k=(n.pi*f*t)**2;return t,(1-2*k)/n.exp(k)
from bruges.filters import ricker
import matplotlib.pyplot as plt
t, w = r(0.128, 0.004, 25)
plt.figure(figsize=(10,3))
plt.plot(t, ricker(0.128, 0.004, 25), 'o') # Compare to bruges.
plt.plot(t, w)
plt.xlim(-0.07, 0.07)
plt.show()
import inspect
inspect.getsource(r).strip()
len(inspect.getsource(r).strip())
len(inspect.getsource(ricker).strip())
def o(l,d,f):import numpy as n;t=n.arange(-l/2,l/2,d);p=n.pi;f,g,h,i=f;λ=lambda ϕ,t:(n.sinc(ϕ*t)*p*ϕ)**2;A=(λ(i,t)-λ(h,t))/(i-h)-(λ(g,t)-λ(f,t))/(g-f);return t,A/max(A)
from bruges.filters import ormsby
import matplotlib.pyplot as plt
%matplotlib inline
t, w = o(0.128, 0.004, [8, 12, 60, 80])
plt.figure(figsize=(10,3))
plt.plot(t, ormsby(0.128, 0.004, [8, 12, 60, 80]), 'o') # Compare to bruges.
plt.plot(t, w)
plt.xlim(-0.07, 0.07)
plt.show()
inspect.getsource(o).strip()
len(inspect.getsource(o).strip())
len(inspect.getsource(ormsby).strip())
def a(α,β,ρ,χ,ψ,ω,t):import numpy as n;w=ω-ρ;x=ω+ρ;y=ψ+β;p=n.pi*t/180;s=n.sin(p);return w/x-(y/α)**2*w/x*s**2+(χ-α)/(χ+α)/n.cos((p+n.arcsin(χ/α*s))/2)**2-(y/α)**2*(2*(ψ-β)/y)*s**2
from bruges.reflection import akirichards
# 4-term Aki-Richards equation in 255 characters!
# http://subsurfwiki.org/wiki/Aki–Richards_equation
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rock1 = (2300, 1200, 2500) # Vp, Vs, rho for layer 1
rock2 = (2400, 1250, 2450) # Vp, Vs, rho for layer 2
theta1 = np.arange(40)
result = a(*rock1, *rock2, theta1)
plt.figure(figsize=(10,4))
plt.plot(akirichards(*rock1, *rock2, theta1), 'o')
plt.plot(result)
plt.show()
inspect.getsource(a).strip()
len(inspect.getsource(a).strip())
len(inspect.getsource(akirichards).strip())
def z(α,β,ρ,χ,ψ,ω,t):import numpy as n;t=n.pi*t/180;C=n.cos;A=n.arcsin;S=n.sin;p=S(t)/α;u=A(p*χ);ϕ=A(p*β);υ=A(p*ψ);v=lambda w,x,y,z: y-2*y*S(z)**2+2*w*S(x)**2;a=ω-2*ω*S(υ)**2-ρ+2*ρ*S(ϕ)**2;b=v(ρ,ϕ,ω,υ);x=v(ω,υ,ρ,ϕ);d=2*(ω*ψ**2-ρ*β**2);E=b*C(t)/α+x*C(u)/χ;F=b*C(ϕ)/β+x*C(υ)/ψ;G=a-d*C(t)/α*C(υ)/ψ;H=a-d*C(u)/χ*C(ϕ)/β;return(F*(b*C(t)/α-x*C(u)/χ)-H*p**2*(a+d*C(t)/α*C(υ)/ψ))/(E*F+G*H*p**2)
from bruges.reflection import zoeppritz_rpp
result = z(*rock1, *rock2, np.arange(40))
plt.figure(figsize=(10,4))
plt.plot(zoeppritz_rpp(*rock1, *rock2, theta1), 'o')
plt.plot(result)
plt.show()
inspect.getsource(z).strip()
len(inspect.getsource(z).strip())
len(inspect.getsource(zoeppritz_rpp).strip())
def generate_data(bias, length=501):
return bias + 10 * np.convolve(np.ones(20), np.random.randn(length) - 0.5, mode='same')
vp = generate_data(2600)
rhob = generate_data(2800)
plt.figure(figsize=(10,3))
plt.plot(vp)
plt.plot(rhob)
plt.show()
def s(v,ρ,l,d,f):import numpy as n;i=v*ρ;t,w=r(l,d,f);return n.convolve(w,n.diff(i)/(i[1:]+i[:-1]),mode='same')
t = np.arange(0, 0.5, 0.001)
length, dt, freq = (0.1, 0.001, 75)
x = s(vp, rhob, length, dt, freq)
fig = plt.figure(figsize=(3, 10))
ax = fig.add_subplot(111)
ax.plot(x, t, 'k')
ax.fill_betweenx(t, x, 0, x>0, color='k')
ax.invert_yaxis()
ax.set_xticks([])
plt.show()
inspect.getsource(s).strip()
len(inspect.getsource(s).strip())
def s(v,ρ,l,d,f):import numpy as n;r=lambda l,d,f:(1-2*(n.pi*f*n.arange(-l/2,l/2,d))**2)/n.exp((n.pi*f*n.arange(-l/2,l/2,d))**2);i=v*ρ;return n.convolve(r(.1,.001,75),n.diff(i)/(i[1:]+i[:-1]),mode='same')
fig = plt.figure(figsize=(3, 10))
ax = fig.add_subplot(111)
ax.plot(x, t, 'k')
ax.fill_betweenx(t, x, 0, x>0, color='k')
ax.invert_yaxis()
ax.set_xticks([])
plt.show()
inspect.getsource(s).strip()
len(inspect.getsource(s).strip())
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multiplying Numpy Arrays
Step2: LAB CHALLENGE
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
one_dimensional = np.array([1,1,1,2,3,3,3,3,3])
one_dimensional
one_dimensional.shape # not yet rows & columns
one_dimensional.reshape((9,-1)) # let numpy figure out how many columns
one_dimensional # still the same
one_dimensional.ndim
two_dimensional = one_dimensional.reshape(1,9) # recycle same name
two_dimensional.shape # is now 2D even if just the one row
two_dimensional.ndim
class M:
Symbolic representation of multiply, add
def __init__(self, s):
self.s = str(s)
def __mul__(self, other):
return M(self.s + " * " + other.s) # string
def __add__(self, other):
return M(self.s + " + " + other.s)
def __repr__(self):
return self.s
#Demo
one = M(1)
two = M(2)
print(one * two)
A,B,C = map(M, ['A','B','C']) # create three M type objects
m_array = np.array([A,B,C]) # put them in a numpy array
m_array.dtype # infers type (Object)
m_array = m_array.reshape((-1, len(m_array))) # make this 2 dimensional
m_array.shape # transpose works for > 1 dimension
m_array.T # stand it up (3,1) vs (1,3) shape
m_array.dot(m_array.T) # row dot column i.e. self * self.T
m_array.T[1,0] = M('Z') # transpose is not a copy
m_array # original has changes
m_array * m_array # dot versus element-wise
from pandas import Series
A = Series(np.arange(10))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you did not already have the training set, download and unpack the tarball.
Step2: Problem 1b
Step3: Problem 1c
Step4: Problem 1d
Step5: Problem 1e
Step6: Problem 1f
Step7: Problem 1g
Step8: Problem 2) Machine Learning Classification
Step9: The provided training set comes with 3 features
Step10: Problem 3) Feature Engineering
| <ASSISTANT_TASK:>
Python Code:
def lc_plot(t, m, m_unc, period=0.0):
if period == 0.0:
fig, ax = plt.subplots()
ax.errorbar(t, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax.set_xlabel('HJD (d)')
ax.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
fig.gca().invert_yaxis()
elif period != 0.0:
fig = plt.figure()
gs = GridSpec.GridSpec(5, 1)
ax_full = plt.subplot(gs[:2, :])
ax_full.errorbar(t, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax_full.set_xlabel('HJD (d)')
ax_full.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
plt.gca().invert_yaxis()
ax_phase = plt.subplot(gs[2:, :])
for repeat in [-1, 0, 1]:
ax_phase.errorbar(t/period % 1 + repeat, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax_phase.axvline(x=0, ls='--', color='0.8', lw=1, zorder=3)
ax_phase.axvline(x=1, ls='--', color='0.8', lw=1, zorder=3)
ax_phase.set_xlim(-0.2, 1.2)
ax_phase.set_xlabel('Phase')
ax_phase.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
plt.gca().invert_yaxis()
plt.tight_layout()
def read_lc(filename):
hjd, mag, mag_unc = np.loadtxt(filename, unpack=True)
return hjd, mag, mag_unc
# Mira example
t, m, m_unc = read_lc("./training_lcs/181637+0341.6")
lc_plot(t, m, m_unc, period=150.461188)
# RRL example
t, m, m_unc = read_lc("./training_lcs/011815-3912.8")
lc_plot(t, m, m_unc, period=0.510918)
# dEB example
t, m, m_unc = read_lc("./training_lcs/153835-6727.8")
lc_plot(t, m, m_unc, period=2*1.107174)
# aEB example
t, m, m_unc = read_lc("./training_lcs/141748-5311.2")
lc_plot(t, m, m_unc, period=1.514158)
# WU example
t, m, m_unc = read_lc("./training_lcs/193546-1136.3")
lc_plot(t, m, m_unc, period=0.424015)
# Cepheid example
t, m, m_unc = read_lc("./training_lcs/065640+0011.4")
lc_plot(t, m, m_unc, period=4.022837)
# R Cor Bor example
t, m, m_unc = read_lc("./training_lcs/163242-5315.6")
lc_plot(t, m, m_unc, period=0.0)
train_df = pd.read_csv("training_sources.csv")
X_train = np.array(train_df[["mean", "nobs", "duration"]])
y_train = np.array(train_df["Class"])
def calc_cv_score(X, y):
rf_clf = RandomForestClassifier(n_estimators=150, min_samples_leaf=1)
cv_score = cross_val_score(rf_clf, X, y, cv=10, n_jobs=-1)
print("These features have CV accuracy = {:.4f} +/- {:.4f}".format(np.mean(cv_score), np.std(cv_score, ddof=1)))
calc_cv_score( # complete
def calc_feature(df, train=True):
if train==True:
lc_dir = "./training_lcs/"
else:
lc_dir = "./test_lcs/"
feature = np.empty(len(df))
for source_num, asas_id in enumerate(df["ASAS_ID"]):
t, m, mu = read_lc(lc_dir+asas_id)
# feature calculations
# feature calculations
# feature calculations
feature[source_num] = feat_val
return feature
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download & Process Security Dataset
Step2: Analytic I
Step3: Analytic II
| <ASSISTANT_TASK:>
Python Code:
from openhunt.mordorutils import *
spark = get_spark()
sd_file = "https://raw.githubusercontent.com/OTRF/Security-Datasets/master/datasets/atomic/windows/defense_evasion/host/empire_powerview_ldap_ntsecuritydescriptor.zip"
registerMordorSQLTable(spark, sd_file, "sdTable")
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, SubjectUserName, ObjectName, OperationType
FROM sdTable
WHERE LOWER(Channel) = "security"
AND EventID = 4662
AND ObjectServer = "DS"
AND AccessMask = "0x40000"
AND ObjectType LIKE "%19195a5b_6da0_11d0_afd3_00c04fd930c9%"
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, SubjectUserName, ObjectDN, AttributeLDAPDisplayName
FROM sdTable
WHERE LOWER(Channel) = "security"
AND EventID = 5136
AND lower(AttributeLDAPDisplayName) = "ntsecuritydescriptor"
AND (AttributeValue LIKE "%1131f6aa_9c07_11d1_f79f_00c04fc2dcd2%"
OR AttributeValue LIKE "%1131f6ad_9c07_11d1_f79f_00c04fc2dcd2%"
OR AttributeValue LIKE "%89e95b76_444d_4c62_991a_0facbeda640c%")
'''
)
df.show(10,False)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input data
Step2: Create a plot function for a visual comparison of the time series
Step3: Plot an example series - in this case the temperature
Step4: Tune a hierarchical aggregation with segments in combination with duration representation
Step5: And determine the pareto optimal aggregation up to 200 total time steps. This may take some time...
Step6: And show the results for the last aggregation
| <ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import copy
import os
import pandas as pd
import matplotlib.pyplot as plt
import tsam.timeseriesaggregation as tsam
import tsam.hyperparametertuning as tune
import tqdm
%matplotlib inline
raw = pd.read_csv('testdata.csv', index_col = 0)
raw=raw.rename(columns={'T': 'Temperature [°C]', 'Load':'Load [kW]', 'Wind':'Wind [m/s]', 'GHI': 'Solar [W/m²]'})
raw.drop(columns=['Wind [m/s]',], inplace=True)
def plotTS(plot_data, raw_data, periodlength=24):
fig, axes = plt.subplots(figsize = [7, 6], dpi = 100, nrows = raw_data.shape[1], ncols = 1)
for i, column in enumerate(raw.columns):
data = plot_data[column]
stacked, timeindex = tsam.unstackToPeriods(copy.deepcopy(data), periodlength)
cax = axes[i].imshow(stacked.values.T, interpolation = 'nearest', vmin = raw_data[column].min(), vmax = raw_data[column].max(), origin='lower')
axes[i].set_aspect('auto')
axes[i].set_ylabel('Hour')
plt.xlabel('Day in the year')
cbar=plt.colorbar(cax, ax=axes[i], pad=0.01, aspect=7)
cbar.set_label(column)
fig.subplots_adjust(right = 1.1, hspace = 0.05)
plotTS(raw,raw,periodlength=24)
tunedAggregations = tune.HyperTunedAggregations(
tsam.TimeSeriesAggregation(
raw,
hoursPerPeriod=24,
clusterMethod="hierarchical",
representationMethod="durationRepresentation",
distributionPeriodWise=False,
rescaleClusterPeriods=False,
segmentation=True,
)
)
tunedAggregations.identifyParetoOptimalAggregation(untilTotalTimeSteps=100)
predictedPeriods = tunedAggregations.aggregationHistory[-1].predictOriginalData()
plotTS(predictedPeriods,raw,periodlength=24)
tunedAggregations._segmentHistory[-1]
tunedAggregations._periodHistory[-1]
aggregation=tsam.TimeSeriesAggregation(
raw,
hoursPerPeriod=24,
noSegments=8,
noTypicalPeriods=14,
clusterMethod="hierarchical",
rescaleClusterPeriods=False,
segmentation=True,
representationMethod="distributionAndMinMaxRepresentation",
distributionPeriodWise=False
)
plotTS(aggregation.predictOriginalData(), raw,periodlength=24)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Verify that models.OneParameterLogisticModel can recover parameters. We would only expect this to be possible when USING_2PL = False.
Step2: Verify that models.TwoParameterLogisticModel can recover parameters. We would only expect this to be possible when USING_2PL = True.
Step3: Verify that models.MIRTModel can recover parameters
Step4: Verify that all models achieve similar training AUCs
Step5: Construct a synthetic embedding
Step6: Sample interactions from the synthetic embedding
Step7: Estimate an embedding from the sampled interactions
Step12: Visualize the estimated embedding vs. the true embedding
| <ASSISTANT_TASK:>
Python Code:
num_students = 2000
num_assessments = 3000
num_ixns_per_student = 1000
USING_2PL = False # False => using 1PL
proficiencies = np.random.normal(0, 1, num_students)
difficulties = np.random.normal(0, 1, num_assessments)
if USING_2PL:
discriminabilities = np.random.normal(0, 1, num_assessments)
else:
discriminabilities = np.ones(num_assessments)
student_ids = ['S'+str(x) for x in xrange(num_students)]
assessment_ids = ['A'+str(x) for x in xrange(num_assessments)]
ixns = [None] * (num_students * num_ixns_per_student)
assessment_idxes = range(num_assessments)
for student_idx, student_id in enumerate(student_ids):
for t in xrange(num_ixns_per_student):
module_idx = random.choice(assessment_idxes)
pass_likelihood = 1 / (1 + math.exp(-(discriminabilities[module_idx]*proficiencies[student_idx] + difficulties[module_idx])))
ixns[student_idx * num_ixns_per_student + t] = {
'student_id' : student_id,
'module_id' : assessment_ids[module_idx],
'module_type' : datatools.AssessmentInteraction.MODULETYPE,
'outcome' : np.random.random() < pass_likelihood,
'timestep' : t+1
}
history = datatools.InteractionHistory(pd.DataFrame(ixns))
history.idx_of_student_id = lambda x: int(x[1:])
history.idx_of_assessment_id = lambda x: int(x[1:])
mirt_model = models.MIRTModel(history, dims=1, using_assessment_factors=USING_2PL)
estimator = est.MIRTMAPEstimator(
regularization_constant=1e-3,
ftol=1e-5,
debug_mode_on=True)
mirt_model.fit(estimator)
onepl_model = models.OneParameterLogisticModel(
history.data, select_regularization_constant=True)
onepl_model.fit()
twopl_model = models.TwoParameterLogisticModel(
history.data, select_regularization_constant=True)
twopl_model.fit()
student_idxes = [int(k[1:]) for k in history.data['student_id'].unique()]
assessment_idxes = [int(k[1:]) for k in history.data['module_id'].unique()]
plt.xlabel('True difficulties')
plt.ylabel('Estimated difficulties')
plt.scatter(difficulties[assessment_idxes], onepl_model.model.coef_[0, num_students:])
plt.show()
plt.xlabel('Estimated difficulty - true difficulty')
plt.ylabel('Frequency (number of assessments)')
plt.hist(onepl_model.model.coef_[0, num_students:] - difficulties[assessment_idxes], bins=20)
plt.show()
plt.xlabel('True proficiencies')
plt.ylabel('Estimated proficiencies')
plt.scatter(proficiencies[student_idxes], onepl_model.model.coef_[0, :num_students])
plt.show()
plt.xlabel('Estimated proficiency - true proficiency')
plt.ylabel('Frequency (number of students)')
plt.hist(onepl_model.model.coef_[0, :num_students] - proficiencies[student_idxes], bins=20)
plt.show()
plt.xlabel('True difficulties')
plt.ylabel('Estimated difficulties')
plt.scatter(difficulties[assessment_idxes], twopl_model.model.coef_[0, (num_students*num_assessments):])
plt.show()
plt.xlabel('Estimated difficulty - true difficulty')
plt.ylabel('Frequency (number of assessments)')
plt.hist(twopl_model.model.coef_[0, (num_students*num_assessments):] - difficulties[assessment_idxes], bins=20)
plt.show()
est_params = twopl_model.model.coef_[0, :(num_students*num_assessments)]
true_params = discriminabilities[:, None].dot(proficiencies[:, None].T).ravel()
plt.xlabel('True proficiency*discriminability')
plt.ylabel('Estimated proficiency*discriminability')
plt.scatter(true_params, est_params)
plt.show()
plt.xlabel('Estimated proficiency*discriminability - true proficiency*discriminability')
plt.ylabel('Frequency (number of student-assessment pairs)')
plt.hist(est_params - true_params, bins=20)
plt.show()
plt.xlabel('True difficulties')
plt.ylabel('Estimated difficulties')
plt.scatter(difficulties, mirt_model.assessment_offsets)
plt.show()
plt.xlabel('Estimated difficulty - true difficulty')
plt.ylabel('Frequency (number of assessments)')
plt.hist(mirt_model.assessment_offsets - difficulties, bins=20)
plt.show()
plt.xlabel('True proficiencies')
plt.ylabel('Estimated proficiencies')
plt.scatter(proficiencies, mirt_model.student_factors[:, 0])
plt.show()
plt.xlabel('Estimated proficiency - true proficiency')
plt.ylabel('Frequency (number of students)')
plt.hist(mirt_model.student_factors[:, 0] - proficiencies, bins=20)
plt.show()
plt.xlabel('True discriminabilities')
plt.ylabel('Estimated discriminabilities')
plt.scatter(discriminabilities, mirt_model.assessment_factors[:, 0])
plt.show()
plt.xlabel('Estimated discriminability - true discriminability')
plt.ylabel('Frequency (number of assessments)')
plt.hist(mirt_model.assessment_factors[:, 0] - discriminabilities, bins=20)
plt.show()
# models.OneParameterLogisticModel
evaluate.training_auc(onepl_model, history, plot_roc_curve=True)
# models.TwoParameterLogisticModel
evaluate.training_auc(twopl_model, history, plot_roc_curve=True)
# models.MIRTModel
evaluate.training_auc(mirt_model, history, plot_roc_curve=True)
# true model
true_model = copy.deepcopy(mirt_model)
true_model.student_factors[:, 0] = proficiencies
true_model.assessment_factors[:, 0] = discriminabilities
true_model.assessment_offsets = difficulties
evaluate.training_auc(true_model, history, plot_roc_curve=True)
num_students = 10000
num_assessment_interactions_per_step = 100
grid_size = 5
embedding_dimension = 2
num_assessments = grid_size ** 2
num_lessons = 2 * grid_size * (grid_size - 1)
num_lesson_interactions_per_student = 2 * (grid_size - 1) + 2
S = np.zeros((num_students, embedding_dimension, num_lesson_interactions_per_student))
A = np.zeros((num_assessments, embedding_dimension))
L = np.zeros((num_lessons, embedding_dimension))
Q = np.zeros((num_lessons, embedding_dimension))
lesson_idx_of_loc = {}
assessment_idx_of_loc = {}
cell_size = 10 / (grid_size - 1)
lesson_count = 0
for i in xrange(grid_size):
for j in xrange(grid_size):
A[grid_size * i + j, :] = [i, j]
assessment_idx_of_loc[(i, j)] = grid_size * i + j
if j < grid_size - 1:
Q[lesson_count, :] = [i, j]
L[lesson_count, :] = [0, 1]
lesson_idx_of_loc[(i, j, 0, 1)] = lesson_count
lesson_count += 1
if i < grid_size - 1:
Q[lesson_count, :] = [i, j]
L[lesson_count, :] = [1, 0]
lesson_idx_of_loc[(i, j, 1, 0)] = lesson_count
lesson_count += 1
A *= cell_size
Q *= cell_size
L *= cell_size
A = np.maximum(1e-3, A)
Q = np.maximum(1e-3, Q)
lesson_loc_of_idx = {v: k for k, v in lesson_idx_of_loc.iteritems()}
assessment_loc_of_idx = {v: k for k, v in assessment_idx_of_loc.iteritems()}
id_of_loc = lambda x: '-'.join(str(z) for z in x)
data = []
for student_idx in xrange(num_students):
student_id = 'S' + str(student_idx)
steps = ([(0, 1)] * (grid_size - 1)) + ([(1, 0)] * (grid_size - 1))
random.shuffle(steps)
x, y = 0, 0
t = 1
assessment_idx = assessment_idx_of_loc[(0, 0)]
assessment_id = id_of_loc(assessment_loc_of_idx[assessment_idx])
pass_likelihood = 1 / (1 + math.exp(-(np.dot(S[student_idx, :, t], A[assessment_idx, :]) / np.linalg.norm(A[assessment_idx, :]) - np.linalg.norm(A[assessment_idx, :]))))
outcome = random.random() < pass_likelihood
data.append({
'student_id' : student_id,
'module_id' : assessment_id,
'module_type' : datatools.AssessmentInteraction.MODULETYPE,
'timestep' : t,
'outcome' : outcome})
for i, j in steps:
lesson_idx = lesson_idx_of_loc[(x, y, i, j)]
lesson_id = id_of_loc(lesson_loc_of_idx[lesson_idx])
data.append({
'student_id' : student_id,
'module_id' : lesson_id,
'module_type' : datatools.LessonInteraction.MODULETYPE,
'timestep' : t,
'outcome' : None})
x += i
y += j
# DEBUG
S[student_idx, :, t+1] = S[student_idx, :, t] + L[lesson_idx, :]# / (1 + math.exp(-(np.dot(S[student_idx, :, t], Q[lesson_idx, :]) / np.linalg.norm(Q[lesson_idx, :]) - np.linalg.norm(Q[lesson_idx, :]))))
t += 1
for _ in xrange(num_assessment_interactions_per_step):
assessment_idx = random.randint(0, num_assessments - 1)
assessment_id = id_of_loc(assessment_loc_of_idx[assessment_idx])
pass_likelihood = 1 / (1 + math.exp(-(np.dot(S[student_idx, :, t], A[assessment_idx, :]) / np.linalg.norm(A[assessment_idx, :]) - np.linalg.norm(A[assessment_idx, :]))))
outcome = random.random() < pass_likelihood
# BEGIN DEBUG
if assessment_idx_of_loc[(0, 0)] == assessment_idx:
outcome = random.random() < 0.1
# END DEBUG
data.append({
'student_id' : student_id,
'module_id' : assessment_id,
'module_type' : datatools.AssessmentInteraction.MODULETYPE,
'timestep' : t,
'outcome' : outcome})
history = datatools.InteractionHistory(pd.DataFrame(data))
assessment_idx_map = {id_of_loc(loc): idx for idx, loc in assessment_loc_of_idx.iteritems()}
lesson_idx_map = {id_of_loc(loc): idx for idx, loc in lesson_loc_of_idx.iteritems()}
history.compute_idx_maps(assessment_idx=assessment_idx_map, lesson_idx=lesson_idx_map)
len(history.data)
history_path = os.path.join('data', 'lse_synthetic_history.pkl')
with open(history_path, 'wb') as f:
pickle.dump(history, f, pickle.HIGHEST_PROTOCOL)
model = models.EmbeddingModel(
history, embedding_dimension=2,
using_lessons=True, using_prereqs=False, using_bias=True,
learning_update_variance_constant=0.5)
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-3, using_scipy=True,
debug_mode_on=True, ftol=1e-4)
model.fit(estimator)
model = models.OneParameterLogisticModel(history.data, select_regularization_constant=True)
model.fit()
evaluate.training_auc(model, history, plot_roc_curve=True)
plt.scatter(A[:, 0], A[:, 1])
for assessment_idx in xrange(num_assessments):
plt.annotate(id_of_assessment_idx(assessment_idx), (A[assessment_idx, 0], A[assessment_idx, 1]))
for i in xrange(grid_size):
for j in xrange(grid_size):
if j < grid_size - 1:
assessment_idxes = [assessment_idx_of_loc[(i, j)], assessment_idx_of_loc[(i, j + 1)]]
plt.plot(A[assessment_idxes, 0], A[assessment_idxes, 1], c='black')
if i < grid_size - 1:
assessment_idxes = [assessment_idx_of_loc[(i, j)], assessment_idx_of_loc[(i + 1, j)]]
plt.plot(A[assessment_idxes, 0], A[assessment_idxes, 1], c='black')
plt.show()
plt.scatter(model.assessment_embeddings[:, 0], model.assessment_embeddings[:, 1])
for assessment_idx in xrange(num_assessments):
plt.annotate(id_of_assessment_idx(assessment_idx), (model.assessment_embeddings[assessment_idx, 0], model.assessment_embeddings[assessment_idx, 1]))
for i in xrange(grid_size):
for j in xrange(grid_size):
if j < grid_size - 1:
assessment_idxes = [assessment_idx_of_loc[(i, j)], assessment_idx_of_loc[(i, j + 1)]]
plt.plot(model.assessment_embeddings[assessment_idxes, 0], model.assessment_embeddings[assessment_idxes, 1], c='black')
if i < grid_size - 1:
assessment_idxes = [assessment_idx_of_loc[(i, j)], assessment_idx_of_loc[(i + 1, j)]]
plt.plot(model.assessment_embeddings[assessment_idxes, 0], model.assessment_embeddings[assessment_idxes, 1], c='black')
plt.show()
plt.quiver(Q[:, 0], Q[:, 1], L[:, 0], L[:, 1], pivot='tail', color='black')
for i in xrange(grid_size):
for j in xrange(grid_size):
if j < grid_size - 1:
lesson_idxes = [lesson_idx_of_loc[(i, j)], lesson_idx_of_loc[(i, j + 1)]]
plt.plot(Q[lesson_idxes, 0], Q[lesson_idxes, 1], c='black')
if i < grid_size - 1:
lesson_idxes = [lesson_idx_of_loc[(i, j)], lesson_idx_of_loc[(i + 1, j)]]
plt.plot(Q[lesson_idxes, 0], Q[lesson_idxes, 1], c='black')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.xlim([-1, 11])
plt.ylim([-1, 11])
plt.show()
plt.quiver(model.prereq_embeddings[:, 0], model.prereq_embeddings[:, 1], model.lesson_embeddings[:, 0], model.lesson_embeddings[:, 1], pivot='tail', color='black')
for i in xrange(grid_size):
for j in xrange(grid_size):
if j < grid_size - 1:
lesson_idxes = [lesson_idx_of_loc[(i, j)], lesson_idx_of_loc[(i, j + 1)]]
plt.plot(model.prereq_embeddings[lesson_idxes, 0], model.prereq_embeddings[lesson_idxes, 1], c='black')
if i < grid_size - 1:
lesson_idxes = [lesson_idx_of_loc[(i, j)], lesson_idx_of_loc[(i + 1, j)]]
plt.plot(model.prereq_embeddings[lesson_idxes, 0], model.prereq_embeddings[lesson_idxes, 1], c='black')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.xlim([-1, 11])
plt.ylim([-1, 11])
plt.show()
right_lesson_idxes = [lesson_idx_of_loc[(i, j, 1, 0)] for i in xrange(grid_size) for j in xrange(grid_size) if (i, j, 1, 0) in lesson_idx_of_loc]
up_lesson_idxes = [lesson_idx_of_loc[(i, j, 0, 1)] for i in xrange(grid_size) for j in xrange(grid_size) if (i, j, 0, 1) in lesson_idx_of_loc]
plt.quiver(0, 0, L[right_lesson_idxes, 0], L[right_lesson_idxes, 1], pivot='tail', color='red', alpha=0.25)
plt.quiver(0, 0, L[up_lesson_idxes, 0], L[up_lesson_idxes, 1], pivot='tail', color='blue', alpha=0.25)
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.xlim([-1, 11])
plt.ylim([-1, 11])
plt.show()
plt.quiver(0, 0, model.lesson_embeddings[right_lesson_idxes, 0], model.lesson_embeddings[right_lesson_idxes, 1], pivot='tail', color='red', alpha=0.25)
plt.quiver(0, 0, model.lesson_embeddings[up_lesson_idxes, 0], model.lesson_embeddings[up_lesson_idxes, 1], pivot='tail', color='blue', alpha=0.25)
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.xlim([-1, 11])
plt.ylim([-1, 11])
plt.show()
plt.scatter(L[right_lesson_idxes, 0], L[right_lesson_idxes, 1], color='red', label='1-0')
plt.scatter(L[up_lesson_idxes, 0], L[up_lesson_idxes, 1], color='blue', label='0-1')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.legend(loc='best')
plt.show()
plt.scatter(model.lesson_embeddings[right_lesson_idxes, 0], model.lesson_embeddings[right_lesson_idxes, 1], color='red', label='1-0')
plt.scatter(model.lesson_embeddings[up_lesson_idxes, 0], model.lesson_embeddings[up_lesson_idxes, 1], color='blue', label='0-1')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.legend(loc='best')
plt.show()
student_idxes = random.sample(range(num_students), 10)
for student_idx in student_idxes:
plt.scatter(S[student_idx, 0, :], S[student_idx, 1, :], c='black')
for i in xrange(num_lesson_interactions_per_student):
plt.plot(S[student_idx, 0, i:(i+2)], S[student_idx, 1, i:(i+2)], c='black')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.title('student_id = %s' % history.id_of_student_idx(student_idx))
plt.show()
for student_idx in student_idxes:
plt.scatter(model.student_embeddings[student_idx, 0, :], model.student_embeddings[student_idx, 1, :], c='black')
for i in xrange(num_lesson_interactions_per_student):
plt.plot(model.student_embeddings[student_idx, 0, i:(i+2)], model.student_embeddings[student_idx, 1, i:(i+2)], c='black')
plt.xlabel('Skill 1')
plt.ylabel('Skill 2')
plt.title('student_id = %s' % history.id_of_student_idx(student_idx))
plt.show()
for student_idx in student_idxes:
for i in xrange(embedding_dimension):
plt.plot(S[student_idx, i, :], '-s', label='Skill 1')
plt.xlabel('Timestep')
plt.ylabel('Skill')
plt.title('student_id = %s' % history.id_of_student_idx(student_idx))
plt.legend(loc='best')
plt.show()
for student_idx in student_idxes:
for i in xrange(embedding_dimension):
plt.plot(model.student_embeddings[student_idx, i, :], '-s', label='Skill 1')
plt.xlabel('Timestep')
plt.ylabel('Skill')
plt.title('student_id = %s' % history.id_of_student_idx(student_idx))
plt.legend(loc='best')
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each variable holds data, a gradient, and information about the function that created it.
Step2: Compare the above computations to the below graph.. and then the one below that!
Step3: Since the derivative of w*x wrt x is w, and vice versa
Step4: $ y = 2w + b$, since $x = 2$
Step5: Let's compute, $\displaystyle\frac{\partial}{\partial a}(3a^2 + 2a + 1)$ when $a = 2$ <br>
| <ASSISTANT_TASK:>
Python Code:
x = torch.Tensor(5, 3)
print(x)
x.zero_()
torch.Tensor([[1, 2, 3], # rank 2 tensor
[4, 5, 6],
[7, 8, 9]])
x.size()
x = torch.rand(5, 3)
print(x)
npy = np.random.rand(5, 3)
y = torch.from_numpy(npy)
print(y)
z = x + y #can we do this addition?
x.type(), y.type()
z = x + y.float()
print(z)
torch.add(x, y.float())
x
x.add_(1)
x
x[:2, :2]
x * y.float()
torch.exp(x)
torch.transpose(x, 0, 1)
#transposing, indexing, slicing, mathematical operations, linear algebra, random numbers
torch.trace(x)
x.numpy()
torch.cuda.is_available()
if torch.cuda.is_available():
x_gpu = x.cuda()
print(x_gpu)
from torch.autograd import Variable
x = torch.rand(5, 3)
print(x)
x = Variable(torch.rand(5, 3))
print(x)
x.data
# should be nothing right now
assert x.grad_fn is None
assert x.grad is None
x = Variable(torch.Tensor([2]), requires_grad=True)
w = Variable(torch.Tensor([3]), requires_grad=True)
b = Variable(torch.Tensor([1]), requires_grad=True)
z = x * w
y = z + b
y
y.backward()
y, b
w.grad, w
x.grad, x
a = Variable(torch.Tensor([2]), requires_grad=True)
y = 3*a*a + 2*a + 1
y.backward()
a.grad
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameterization within SQL queries
Step2: This defined a SQL query with a string parameter named endpoint, which can be filled when executing the query. Let's give it some value in a separate cell
Step3: In order to reference the variable defined above, Google Cloud Datalab offers the $var syntax, which can be invoked in the magic command
Step4: This can also be achieved using the Python API instead of the magic commands (%%bq). This is how we will create and execute a parameterized query using the API
| <ASSISTANT_TASK:>
Python Code:
%%bq query -n logs_query
SELECT * FROM `cloud-datalab-samples.httplogs.logs_20140615`
%bq sample -q logs_query --count 10
%%bq query
SELECT endpoint FROM `cloud-datalab-samples.httplogs.logs_20140615` GROUP BY endpoint
%%bq query -n endpoint_stats
SELECT *
FROM `cloud-datalab-samples.httplogs.logs_20140615`
WHERE endpoint = @endpoint
LIMIT 10
%%bq execute -q endpoint_stats
parameters:
- name: endpoint
type: STRING
value: Interact2
endpoint_val = 'Interact3'
%%bq execute -q endpoint_stats
parameters:
- name: endpoint
type: STRING
value: $endpoint_val
import google.datalab.bigquery as bq
endpoint_stats2 = bq.Query(sql='''
SELECT *
FROM `cloud-datalab-samples.httplogs.logs_20140615`
WHERE endpoint = @endpoint
LIMIT 10
''')
endpoint_value = 'Interact3'
query_parameters = [
{
'name': 'endpoint',
'parameterType': {'type': 'STRING'},
'parameterValue': {'value': endpoint_value}
}
]
job = endpoint_stats2.execute(query_params=query_parameters)
job.result()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Estimating the angle of rotation
Step2: The function can be applied as follows.
Step3: Estimating the angle of rotation and the translation
Step4: The following code find the angle of rotation and the translation. Then, the original image is obtained
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
import sys,os
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
%matplotlib inline
import matplotlib.image as mpimg
#f = ia.normalize(ia.gaussian((151,151), [[75],[75]], [[800,0],[0,800]]), [0,200]).astype(uint8)
f = mpimg.imread("../data/astablet.tif")
H,W = f.shape
f = f[:,H//2:H//2+H]
#ia.adshow(ia.isolines(f,10,3), "Image in cartesian coordinates")
g = ia.polar(f,(150,200),2*np.pi)
ia.adshow(f)
ia.adshow(g)
#ia.adshow(ia.isolines(g.astype(int),10,3), "Image in polar coordinates")
#adshow(g, "Image in polar coordinates")
f1 = f
f2 = f.T[:,::-1]
g2 = ia.polar(f2,(150,200),2*np.pi)
ia.adshow(f2)
ia.adshow(g2)
nb = ia.nbshow(2)
nb.nbshow(g)
nb.nbshow(g2)
nb.nbshow()
h = ia.phasecorr(g,g2)
print(h.shape)
ia.adshow(ia.normalize(h))
i = np.argmax(h)
row,col = np.unravel_index(i,h.shape)
v = h[row,col]
print(np.array(g.shape) - np.array((row,col)))
print(v)
def rotphasecorr2d(f,h):
F = np.fft.fftn(f)
H = np.fft.fftn(h)
pF = ia.polar(ia.dftview(F),(F.shape[0]/2,360),np.pi)
pH = ia.polar(ia.dftview(H),(H.shape[0]/2,360),np.pi)
return ia.phasecorr(pF, pH)
f = mpimg.imread("../data/cameraman.tif")
print(f.dtype)
t = np.zeros(np.array(f.shape)+200,dtype=np.uint8)
t[100:f.shape[0]+100,100:f.shape[1]+100] = f
f = t
t1 = np.array([
[1,0,-f.shape[0]/2.],
[0,1,-f.shape[1]/2.],
[0,0,1]]);
t2 = np.array([
[1,0,f.shape[0]/2.],
[0,1,f.shape[1]/2.],
[0,0,1]]);
theta = np.radians(30)
r1 = np.array([
[np.cos(theta),-np.sin(theta),0],
[np.sin(theta),np.cos(theta),0],
[0,0,1]]);
T = t2.dot(r1).dot(t1)
print(f.dtype)
f1 = ia.affine(f,T,0)
#f1.shape = f.shape
nb.nbshow(f, "f:Original image")
nb.nbshow(f1, "f1:Image rotated by 30°")
nb.nbshow()
nb = ia.nbshow(2)
F = np.fft.fftn(f)
F1 = np.fft.fftn(f1)
FS = ia.dftview(F)
F1S = ia.dftview(F1)
nb.nbshow(FS,'FS')
nb.nbshow(F1S,'F1S')
nb.nbshow()
pFS = ia.polar(FS,(FS.shape[0]//2,360),np.pi)
pF1S = ia.polar(F1S,(F1S.shape[0]//2,360),np.pi)
nb.nbshow(ia.normalize(pFS),'polar FS')
nb.nbshow(ia.normalize(pF1S),'polar F1S')
nb.nbshow()
pg = ia.phasecorr(pFS,pF1S)
ia.adshow(ia.normalize(pg))
peak = np.unravel_index(np.argmax(pg), pg.shape)
# Calculate the angle
ang = (float(peak[1])/pg.shape[1])*180
print(ang)
import scipy
def trphasecorr2d(f,h):
rg = ia.rotphasecorr2d(f,h)
peak = np.unravel_index(argmax(rg), rg.shape)
ang = (float(peak[1])/rg.shape[1])*180
h_rot = scipy.ndimage.interpolation.rotate(h, -ang, reshape=False)
g = ia.phasecorr(f,h_rot)
return g, rg
t3 = np.array([
[1,0,50],
[0,1,32],
[0,0,1]]);
T = np.dot(t3,T)
h = ia.affine(f,T,0)
h.shape = f.shape
ia.adshow(f, "Original image")
ia.adshow(h, "Image rotated by 30° and translated by (50,32)")
g, rg = trphasecorr2d(f,h)
g = ia.normalize(g)
rg = ia.normalize(rg)
trans_peak = np.unravel_index(argmax(g), g.shape)
rot_peak = np.unravel_index(argmax(rg), rg.shape)
ang = (float(rot_peak[1])/rg.shape[1])*180
trans = (np.array(h.shape)-np.array(trans_peak))
np.adshow(g, "Translation correlation map - Peak %s, \n corresponds to translation %s"%(str(trans_peak), str(tuple(trans))))
np.adshow(ianormalize(rg), "Rotation correlation map - Peak %s, corresponds to angle %f°"%(str(rot_peak),ang))
t4 = np.array([
[1,0,-trans[0]],
[0,1,-trans[1]],
[0,0,1]]);
theta1 = radians(-ang)
r2 = np.array([
[np.cos(theta1),-np.sin(theta1),0],
[np.sin(theta1),np.cos(theta1),0],
[0,0,1]]);
T1 = dot(t4,dot(t2,dot(r2,t1)))
f1 = ia.affine(h,T1,0)
f1.shape = h.shape
ia.adshow(f1, "Sample image rotated and translated by %f° and %s, respectively"%(-ang,tuple(-trans)))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we plot the two classes as a scatter plot!
Step2: Now we want to classify this synthetic data using the perceptron model which will be trained using this data, and then we will test using the same data (this is called, self classification test). To proceed further we first need to train our perceptron model using the theory above.
Step3: Now how do we solve for the parameters. Easy, we apply simple gradient descent on the objective function (the function of the parameters to be estimated, which is to be minimized). So we take the derivative of the equation (2) and we get
Step4: We can see that this perceptron model classifies the data very well
Step5: Try changing the N and see how the prediction changes! Now we will move on to see how this will help us in neural networks. Also if the system does not work, then try changing the eta and iterations (they may need to be tuned as we are generating different random data each time). Also the lines need not be equal as the same partition can be achieved with different lines
Step9: Now lets start with the class definition
Step10: Now that concludes our code for a general neural network! Now let us look how it works, first lets take a very simple example.
Step11: We see this works pretty well where the perceptron totally fails! Now lets do something more challanging.
Step12: Now lets try to apply again the three layer neural network to make the classification and see how the self and cross classification error comes into play.
Step13: The plot looks really similar, we compute the classification error by absolute difference between two label maps
Step14: Now these is all simulated talk, let us look at some real life applications of neural networks. There are several things where even basic neural networks makes a lot of impact. People use it for image classification, digit recognition, financial data analysis, time series prediction, weather predtion etc.
Step15: We will test the self classification, and see how the test performs. The parameters are set after extreme experimentation
Step16: This classification does not work well, but in neural networks there are many things which can be introduced which will make this system extremely good (one of them is momentum, read it if interested). This same problem is tackled here and they use additional things to reach their required accuracy . Also maybe this system works for some other parameters who knows
Step17: Lets do some stylish plotting
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# now we genrate the data
N = 30
x = np.zeros(N, dtype=np.float64)
y = np.zeros(N, dtype=np.float64)
for k in range(N):
x[k], y[k] = [np.random.uniform(-1,1) for i in range(2)]
a = np.random.uniform(-1,1)
b = np.random.uniform(-1,1)
c = np.random.uniform(-1,1)
label = np.ones(N) # stores the labels for two classes, 1 for C1 and -1 for C2
xa = []
ya = []
xb = []
yb = []
N1 = 0
N2 = 0
# the random line divides the points into two classes of size N1 and N2
for k in range(N):
temp = a*x[k] + b*y[k] + c
if temp > 0:
xa.append(x[k])
ya.append(y[k])
N1 += 1
else:
label[k] = -1
xb.append(x[k])
yb.append(y[k])
N2 += 1
plt.scatter(xa, ya, color = 'b')
plt.hold(True)
plt.scatter(xb, yb, color = 'r')
plt.title('Scatter plot of the data, N = 30')
w = np.ones(3, dtype=np.float64) # the weights
iter_max = 2000 # maximum number of iterations
error = 1000.0 # randomly initilize the classification error
it = 0 # variable to store the iteration number
eta = 0.001 # the step size (try varying this)
classified_labels = np.ones(N)
while (error != 0 and it < iter_max):
# Update Rules
temp_vec = np.zeros(3, dtype=np.float64)
temp = np.zeros(3, dtype=np.float64)
for i in range(N):
if label[i] != classified_labels[i]:
temp += eta * np.array([x[i], y[i], 1]) * label[i]
w += temp
# recompute the classification
for i in range(N):
temp1 = w[0]*x[i] + w[1]*y[i] + w[2]
if temp1 > 0:
classified_labels[i] = 1
else:
classified_labels[i] = -1
# compute the misclassification error
error = 0
for i in range(N):
temp1 = w[0]*x[i] + w[1]*y[i] + w[2]
if label[i] != classified_labels[i]:
error += - label[i] * temp1
w = w / np.linalg.norm(w, ord=2)
it +=1
print("Done!")
x = np.linspace(-1,1,100)
y = -(w[0] * x + w[2]) / w[1]
plt.scatter(xa, ya, color = 'b')
plt.hold(True)
plt.scatter(xb, yb, color = 'r')
plt.plot(x,y, color='k')
plt.title('Perceptron classified data (the line)')
x = np.linspace(-1,1,100)
y = -(w[0] * x + w[2]) / w[1]
plt.plot(x,y,color='b')
x = np.linspace(-1,1,100)
y = -(a * x + c) / b
plt.hold(True)
plt.plot(x,y,color='r')
plt.legend(['predicted', 'original'])
def tanh(x):
return np.tanh(x)
def tanh_derv(x):
# work the math out
return 1.0 - np.tanh(x)**2
class neural_network:
def __init__(self, layers):
layers: A list containing the number of units in each layer.
Should be at least two values (this also includes the input layer)
actfn : String to select the type of activation function
self.act_fn = tanh # the activation function of the hidden layers
self.act_fn_derv = tanh_derv # the derivative of the activation function of the hidden layers
self.weights = [] # list of the weights
# now we need to initialize these weights
# We have to keep in mind that every layer except for the output will have one extra bias unit
# we initialize the weights randomly between [-0.5,0.5]
for i in range(1,len(layers) - 1):
self.weights.append(0.5 * (2 * np.random.random([layers[i-1] + 1, layers[i] + 1]) - 1))
# and for between the last hidden layer and the output layer
self.weights.append(0.5 * (2 * np.random.random([layers[i] + 1, layers[i + 1]]) - 1))
# This is the training function
def train(self, X, t, eta = 0.05, max_iter = 10000):
X: Training data input set X = Nxdi, di= dimension of the input data > 1, N = number of data points
t: Training data output set, t = Nxdo , do = dimension of the output data
eta : double
it is the step size for the stochastic gradient descent
max_iter : int
Number of iterations of the gradient descent
# first we need to add another unit to the input
# as the input we have provided does not have a
# bias input (which we set as one)
X = np.atleast_2d(X) # google this command! it's just for smooth computation of matrices
temp = np.ones((X.shape[0], X.shape[1] + 1))
temp[:, 0:X.shape[1]] = X
X = temp
# Now for the stochastic gradient descent iterations
for k in range(max_iter):
# choose one of the input randomly (which is what we do in the stochastic gradient descent)
i = np.random.randint(X.shape[0])
# First the forward pass to get all the estimates
a = [X[i]] # this is the input we will use, it will also store all the activations and inputs
# for each hidden + output layer
for j in range(len(self.weights)):
# get the units and pass them through activation function
z = self.act_fn(a[j].dot(self.weights[j]))
# append to the activation list
a.append(z)
# compute the error from the target data
err = t[i] - a[-1] # the -1 gives the last element = output of the net
# Backward pass to estimate all deltas
# first generate a list of delts for each layer
deltas = [err * self.act_fn_derv(a[-1])] # this is by combining eq(12) and (14)
# we start from the second to last layer and go till the input (backward is the word)
for j in range(len(a) - 2, 0, -1):
# first apply backpropogation formula (14)
bp = self.act_fn_derv(a[j]) * deltas[-1].dot(np.transpose(self.weights[j]))
# now append it to deltas
deltas.append(bp)
# the deltas are in from output to input order so we reverse it
deltas.reverse()
# compute the gradient and apply the update
for j in range(len(self.weights)):
# Some precessing for the product computation and robustness, google the command atleast_2d
frl = np.atleast_2d(a[j])
bwl = np.atleast_2d(deltas[j])
# Application of equation (11) and (15)
self.weights[j] += eta * np.transpose(frl).dot(bwl)
# now the prediction step
def predict_out(self, x):
x: The input data to which we have to give the output
# this is a simple forward pass like w did in the training except just the output is needed
x = np.array(x)
temp = np.ones(x.shape[0] + 1)
temp[0:x.shape[0]] = x
a = temp
for j in range(len(self.weights)):
a = self.act_fn(a.dot(self.weights[j]))
return a
nn = neural_network([2,2,1])
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = np.array([0, 1, 1, 0])
# now train on this data
nn.train(inputs, targets)
for i in [[0, 0], [0, 1], [1, 0], [1,1]]:
print(i,nn.predict_out(i))
N = 1000
x = np.zeros(N, dtype=np.float64)
y = np.zeros(N, dtype=np.float64)
for k in range(N):
x[k], y[k] = [np.random.uniform(-1,1) for i in range(2)]
label = np.ones(N) # stores the labels for two classes, 1 for C1 and -1 for C2
xa = []
ya = []
xb = []
yb = []
N1 = 0
N2 = 0
# lets take the function y = 2x^2 - 1 to divide the points
for k in range(N):
temp = y[k] - 2*x[k]**2 + 1
if temp > 0:
xa.append(x[k])
ya.append(y[k])
N1 += 1
else:
label[k] = -1
xb.append(x[k])
yb.append(y[k])
N2 += 1
plt.scatter(xa, ya, color = 'b')
plt.hold(True)
plt.scatter(xb, yb, color = 'r')
plt.title('Scatter plot of the data, N = 500')
nn = neural_network([2, 2, 1])
inputs = np.zeros([N, 2])
inputs[:,0] = x
inputs[:,1] = y
nn.train(inputs, label, eta = 0.05)
labelout = np.ones(N)
xao = []
yao = []
xbo = []
ybo = []
for i in range(N):
out = nn.predict_out(inputs[i,:])
if(out > 0):
xao.append(inputs[i,0])
yao.append(inputs[i,1])
else:
xbo.append(inputs[i,0])
ybo.append(inputs[i,1])
labelout[i] = -1
plt.scatter(xao, yao, color = 'b')
plt.hold(True)
plt.scatter(xbo, ybo, color = 'r')
plt.title('Scatter plot of output')
# Self classification error
err = np.sum(np.abs(label - labelout))
err = err / len(label)
print('Self Classification Error = ', err)
import xlrd
book = xlrd.open_workbook('BreastTissue.xls')
# this has two sheets in it first one is of description and the second is data (which is what we want)
print(book.sheet_names())
sh = book.sheet_by_index(1) # the sheet for data
# we see the dimentions
print('number of rows', sh.nrows, 'number of coloumns', sh.ncols)
# we do not want the 1st row and the first coloumn (SEE THE DATA FILE)
# The second coloumn is of the class and the rest are feature points
# So we define the input and output
X = np.zeros([sh.nrows - 1, sh.ncols - 2]) # input to NN
y = np.zeros([sh.nrows - 1 , 6] , dtype=np.float64) # output of NN for training
for i in range(1,sh.nrows):
temp = np.str(sh.row(i)[1].value)
if temp == 'car':
y[i-1, 0] = 1
if temp == 'fad':
y[i-1, 1] = 1
if temp == 'mas':
y[i-1, 2] = 1
if temp == 'gla':
y[i-1, 3] = 1
if temp == 'con':
y[i-1, 4] = 1
if temp == 'adi':
y[i-1, 5] = 1
for j in range(2,sh.ncols):
X[i-1,j - 2] = sh.row(i)[j].value
# we also have to normalize the data before using it
for i in range(9):
X[:,i] = 1 - (X[:,i] - np.min(X[:,i])) / (np.max(X[:,i]) - np.min(X[:,i]))
nn = neural_network([9, 12, 6])
nn.train(X, y, eta = 0.1, max_iter = 100000)
misshits = 0
# First the self classification error
for i in range(X.shape[0]):
temp = nn.predict_out(X[i,:])
temp[temp != np.max(temp)] = 0
temp[temp > 0] = 1
misshits += np.sum(np.abs(temp - y[i,:])) / 2.0 # think why this would be misshits
# print("predicted", temp)
# print("actual", y[i,:])
print("Self Classification numer of misshits out of 106", misshits)
import sklearn
from sklearn.metrics import confusion_matrix
# Figure out why our confusion matrix will be of size 6x6
predict_arr = []
actual_arr = []
for i in range(X.shape[0]):
temp = nn.predict_out(X[i,:])
temp[temp != np.max(temp)] = 0
temp[temp > 0] = 1
if temp[0] == 1.0:
predict_arr.append('car')
if temp[1] == 1.0:
predict_arr.append('fad')
if temp[2] == 1.0:
predict_arr.append('mas')
if temp[3] == 1.0:
predict_arr.append('glu')
if temp[4] == 1.0:
predict_arr.append('con')
if temp[5] == 1.0:
predict_arr.append('adi')
temp = y[i,:]
if temp[0] == 1:
actual_arr.append('car')
if temp[1] == 1:
actual_arr.append('fad')
if temp[2] == 1:
actual_arr.append('mas')
if temp[3] == 1:
actual_arr.append('glu')
if temp[4] == 1:
actual_arr.append('con')
if temp[5] == 1:
actual_arr.append('adi')
conf_mat = confusion_matrix(actual_arr, predict_arr, labels = ['car', 'fad', 'mas', 'glu', 'con', 'adi'])
conf_mat
conf_arr = conf_mat
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.Spectral,
interpolation='none')
ax.grid('off')
width, height = np.array(conf_arr).shape
for x in xrange(width):
for y in xrange(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res)
alphabet = ['car', 'fad','mas', 'glu', 'con', 'adi']
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Lorenz system
Step4: Write a function solve_lorenz that solves the Lorenz system above for a particular initial condition $[x(0),y(0),z(0)]$. Your function should return a tuple of the solution array and time array.
Step6: Write a function plot_lorentz that
Step7: Use interact to explore your plot_lorenz function with
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
def lorentz_derivs(yvec, t, sigma, rho, beta):
Compute the the derivatives for the Lorentz system at yvec(t).
x,y,z = yvec[0], yvec[1], yvec[2]
sol = [0,0,0]
sol[0] = sigma*(y-x)
sol[1] = x*(rho-z)-y
sol[2] = x*y - beta*z
return sol
np.zeros((2,3))
assert np.allclose(lorentz_derivs((1,1,1),0, 1.0, 1.0, 2.0),[0.0,-1.0,-1.0])
def solve_lorentz(ic, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
Solve the Lorenz system for a single initial condition.
Parameters
----------
ic : array, list, tuple
Initial conditions [x,y,z].
max_time: float
The max time to use. Integrate with 250 points per time unit.
sigma, rho, beta: float
Parameters of the differential equation.
Returns
-------
soln : np.ndarray
The array of the solution. Each row will be the solution vector at that time.
t : np.ndarray
The array of time points used.
t = np.linspace(0,max_time,int(250*max_time))
ans = odeint(lorentz_derivs, ic, t, args=(sigma, rho, beta))
return ans, t
solve_lorentz(0,0,0)
assert True # leave this to grade solve_lorenz
N = 5
colors = plt.cm.hot(np.linspace(0,1,N))
for i in range(N):
# To use these colors with plt.plot, pass them as the color argument
print(colors[i])
np.random.seed(1)
30 * np.random.random_sample((3,)) - 15
30 * np.random.random_sample((3,)) - 15
def plot_lorentz(N=10, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
Plot [x(t),z(t)] for the Lorenz system.
Parameters
----------
N : int
Number of initial conditions and trajectories to plot.
max_time: float
Maximum time to use.
sigma, rho, beta: float
Parameters of the differential equation.
np.random.seed(1)
colors = plt.cm.hot(np.linspace(0,1,N))
for i in range(N):
ic = 30 * np.random.random_sample((3,)) - 15
ans, t = solve_lorentz(ic, max_time, sigma, rho, beta)
c = colors[i]
plt.plot(ans[0],ans[2],color =c )
plt.xlim(-15,15)
plt.ylim(-15,20)
plt.xlabel('x(t)')
plt.ylabel('z(t)')
plt.title('Lorentz System')
plot_lorentz()
assert True # leave this to grade the plot_lorenz function
interact(plot_lorentz,N=(1,50,1), max_time = (1,10,1), sigma = (0.0,50.0), rho=(0.0,50.0), beta =fixed(8/3) )
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Client-efficient large-model federated learning via federated_select and sparse aggregation
Step2: Each client will federated_select the rows of the model weights for at most this many unique tokens. This upper-bounds the size of the client's local model and the amount of server -> client (federated_select) and client - > server (federated_aggregate) communication performed.
Step3: We also define a few constants for various types. For this colab, a token is an integer identifier for a particular word after parsing the dataset.
Step6: Setting up the problem
Step7: A tiny toy dataset
Step8: Now, we create 3 clients with small local datasets. If you are running this tutorial in colab, it may be useful to use the "mirror cell in tab" feature to pin this cell and its output in order to interpret/check the output of the functions developed below.
Step9: Define constants for the raw numbers of input features (tokens/words) and labels (post tags). Our actual input/output spaces are NUM_OOV_BUCKETS = 1 larger because we add an OOV token / tag.
Step10: Create batched versions of the datasets, and individual batches, which will be useful in testing code as we go.
Step11: Define a model with sparse inputs
Step12: Let's make sure it works, first by making predictions
Step13: And some simple centralized training
Step15: Building blocks for the federated computation
Step17: We will select the model parameters corresponding to the MAX_TOKENS_SELECTED_PER_CLIENT most frequently occuring tokens on device. If
Step18: Map global tokens to local tokens
Step20: Train the local (sub)model on each client
Step21: We now have all the components we need to define a simple local training loop which will run on each client.
Step23: Aggregate IndexedSlices
Step24: Construct a minimal federated_computation as a test
Step25: Putting it all together in a federated_computation
Step26: We use a basic server training function based on Federated Averaging, applying the update with a server learning rate of 1.0. It is important that we apply an update (delta) to the model, rather than simply averaging client-supplied models, as otherwise if a given slice of the model wasn't trained on by any client on a given round its coefficients could be zeroed out.
Step27: We need a couple more tff.tf_computation components
Step28: We're now ready to put all the pieces together!
Step29: Let's train a model!
| <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import itertools
import numpy as np
from typing import Callable, List, Tuple
import tensorflow as tf
import tensorflow_federated as tff
tff.backends.native.set_local_python_execution_context()
MAX_TOKENS_SELECTED_PER_CLIENT = 6
# There are some constraints on types
# here that will require some explicit type conversions:
# - `tff.federated_select` requires int32
# - `tf.SparseTensor` requires int64 indices.
TOKEN_DTYPE = tf.int64
SELECT_KEY_DTYPE = tf.int32
# Type for counts of token occurences.
TOKEN_COUNT_DTYPE = tf.int32
# A sparse feature vector can be thought of as a map
# from TOKEN_DTYPE to FEATURE_DTYPE.
# Our features are {0, 1} indicators, so we could potentially
# use tf.int8 as an optimization.
FEATURE_DTYPE = tf.int32
NUM_OOV_BUCKETS = 1
BatchType = collections.namedtuple('BatchType', ['tokens', 'tags'])
def build_to_ids_fn(word_vocab: List[str],
tag_vocab: List[str]) -> Callable[[tf.Tensor], tf.Tensor]:
Constructs a function mapping examples to sequences of token indices.
word_table_values = np.arange(len(word_vocab), dtype=np.int64)
word_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(word_vocab, word_table_values),
num_oov_buckets=NUM_OOV_BUCKETS)
tag_table_values = np.arange(len(tag_vocab), dtype=np.int64)
tag_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(tag_vocab, tag_table_values),
num_oov_buckets=NUM_OOV_BUCKETS)
def to_ids(example):
Converts a Stack Overflow example to a bag-of-words/tags format.
sentence = tf.strings.join([example['tokens'], example['title']],
separator=' ')
# We represent that label (output tags) densely.
raw_tags = example['tags']
tags = tf.strings.split(raw_tags, sep='|')
tags = tag_table.lookup(tags)
tags, _ = tf.unique(tags)
tags = tf.one_hot(tags, len(tag_vocab) + NUM_OOV_BUCKETS)
tags = tf.reduce_max(tags, axis=0)
# We represent the features as a SparseTensor of {0, 1}s.
words = tf.strings.split(sentence)
tokens = word_table.lookup(words)
tokens, _ = tf.unique(tokens)
# Note: We could choose to use the word counts as the feature vector
# instead of just {0, 1} values (see tf.unique_with_counts).
tokens = tf.reshape(tokens, shape=(tf.size(tokens), 1))
tokens_st = tf.SparseTensor(
tokens,
tf.ones(tf.size(tokens), dtype=FEATURE_DTYPE),
dense_shape=(len(word_vocab) + NUM_OOV_BUCKETS,))
tokens_st = tf.sparse.reorder(tokens_st)
return BatchType(tokens_st, tags)
return to_ids
def build_preprocess_fn(word_vocab, tag_vocab):
@tf.function
def preprocess_fn(dataset):
to_ids = build_to_ids_fn(word_vocab, tag_vocab)
# We *don't* shuffle in order to make this colab deterministic for
# easier testing and reproducibility.
# But real-world training should use `.shuffle()`.
return dataset.map(to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return preprocess_fn
# Features
FRUIT_WORDS = ['apple', 'orange', 'pear', 'kiwi']
VEGETABLE_WORDS = ['carrot', 'broccoli', 'arugula', 'peas']
FISH_WORDS = ['trout', 'tuna', 'cod', 'salmon']
WORD_VOCAB = FRUIT_WORDS + VEGETABLE_WORDS + FISH_WORDS
# Labels
TAG_VOCAB = ['FRUIT', 'VEGETABLE', 'FISH']
preprocess_fn = build_preprocess_fn(WORD_VOCAB, TAG_VOCAB)
def make_dataset(raw):
d = tf.data.Dataset.from_tensor_slices(
# Matches the StackOverflow formatting
collections.OrderedDict(
tokens=tf.constant([t[0] for t in raw]),
tags=tf.constant([t[1] for t in raw]),
title=['' for _ in raw]))
d = preprocess_fn(d)
return d
# 4 distinct tokens
CLIENT1_DATASET = make_dataset([
('apple orange apple orange', 'FRUIT'),
('carrot trout', 'VEGETABLE|FISH'),
('orange apple', 'FRUIT'),
('orange', 'ORANGE|CITRUS') # 2 OOV tag
])
# 6 distinct tokens
CLIENT2_DATASET = make_dataset([
('pear cod', 'FRUIT|FISH'),
('arugula peas', 'VEGETABLE'),
('kiwi pear', 'FRUIT'),
('sturgeon', 'FISH'), # OOV word
('sturgeon bass', 'FISH') # 2 OOV words
])
# A client with all possible words & tags (13 distinct tokens).
# With MAX_TOKENS_SELECTED_PER_CLIENT = 6, we won't download the model
# slices for all tokens that occur on this client.
CLIENT3_DATASET = make_dataset([
(' '.join(WORD_VOCAB + ['oovword']), '|'.join(TAG_VOCAB)),
# Mathe the OOV token and 'salmon' occur in the largest number
# of examples on this client:
('salmon oovword', 'FISH|OOVTAG')
])
print('Word vocab')
for i, word in enumerate(WORD_VOCAB):
print(f'{i:2d} {word}')
print('\nTag vocab')
for i, tag in enumerate(TAG_VOCAB):
print(f'{i:2d} {tag}')
NUM_WORDS = len(WORD_VOCAB)
NUM_TAGS = len(TAG_VOCAB)
WORD_VOCAB_SIZE = NUM_WORDS + NUM_OOV_BUCKETS
TAG_VOCAB_SIZE = NUM_TAGS + NUM_OOV_BUCKETS
batched_dataset1 = CLIENT1_DATASET.batch(2)
batched_dataset2 = CLIENT2_DATASET.batch(3)
batched_dataset3 = CLIENT3_DATASET.batch(2)
batch1 = next(iter(batched_dataset1))
batch2 = next(iter(batched_dataset2))
batch3 = next(iter(batched_dataset3))
def create_logistic_model(word_vocab_size: int, vocab_tags_size: int):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(word_vocab_size,), sparse=True),
tf.keras.layers.Dense(
vocab_tags_size,
activation='sigmoid',
kernel_initializer=tf.keras.initializers.zeros,
# For simplicity, don't use a bias vector; this means the model
# is a single tensor, and we only need sparse aggregation of
# the per-token slices of the model. Generalizing to also handle
# other model weights that are fully updated
# (non-dense broadcast and aggregate) would be a good exercise.
use_bias=False),
])
return model
model = create_logistic_model(WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)
p = model.predict(batch1.tokens)
print(p)
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.001),
loss=tf.keras.losses.BinaryCrossentropy())
model.train_on_batch(batch1.tokens, batch1.tags)
@tf.function
def token_count_fn(token_counts, batch):
Adds counts from `batch` to the running `token_counts` sum.
# Sum across the batch dimension.
flat_tokens = tf.sparse.reduce_sum(
batch.tokens, axis=0, output_is_sparse=True)
flat_tokens = tf.cast(flat_tokens, dtype=TOKEN_COUNT_DTYPE)
return tf.sparse.add(token_counts, flat_tokens)
# Simple tests
# Create the initial zero token counts using empty tensors.
initial_token_counts = tf.SparseTensor(
indices=tf.zeros(shape=(0, 1), dtype=TOKEN_DTYPE),
values=tf.zeros(shape=(0,), dtype=TOKEN_COUNT_DTYPE),
dense_shape=(WORD_VOCAB_SIZE,))
client_token_counts = batched_dataset1.reduce(initial_token_counts,
token_count_fn)
tokens = tf.reshape(client_token_counts.indices, (-1,)).numpy()
print('tokens:', tokens)
np.testing.assert_array_equal(tokens, [0, 1, 4, 8])
# The count is the number of *examples* in which the token/word
# occurs, not the total number of occurences, since we still featurize
# multiple occurences in the same example as a "1".
counts = client_token_counts.values.numpy()
print('counts:', counts)
np.testing.assert_array_equal(counts, [2, 3, 1, 1])
@tf.function
def keys_for_client(client_dataset, max_tokens_per_client):
Computes a set of max_tokens_per_client keys.
initial_token_counts = tf.SparseTensor(
indices=tf.zeros((0, 1), dtype=TOKEN_DTYPE),
values=tf.zeros((0,), dtype=TOKEN_COUNT_DTYPE),
dense_shape=(WORD_VOCAB_SIZE,))
client_token_counts = client_dataset.reduce(initial_token_counts,
token_count_fn)
# Find the most-frequently occuring tokens
tokens = tf.reshape(client_token_counts.indices, shape=(-1,))
counts = client_token_counts.values
perm = tf.argsort(counts, direction='DESCENDING')
tokens = tf.gather(tokens, perm)
counts = tf.gather(counts, perm)
num_raw_tokens = tf.shape(tokens)[0]
actual_num_tokens = tf.minimum(max_tokens_per_client, num_raw_tokens)
selected_tokens = tokens[:actual_num_tokens]
paddings = [[0, max_tokens_per_client - tf.shape(selected_tokens)[0]]]
padded_tokens = tf.pad(selected_tokens, paddings=paddings)
# Make sure the type is statically determined
padded_tokens = tf.reshape(padded_tokens, shape=(max_tokens_per_client,))
# We will pass these tokens as keys into `federated_select`, which
# requires SELECT_KEY_DTYPE=tf.int32 keys.
padded_tokens = tf.cast(padded_tokens, dtype=SELECT_KEY_DTYPE)
return padded_tokens, actual_num_tokens
# Simple test
# Case 1: actual_num_tokens > max_tokens_per_client
selected_tokens, actual_num_tokens = keys_for_client(batched_dataset1, 3)
assert tf.size(selected_tokens) == 3
assert actual_num_tokens == 3
# Case 2: actual_num_tokens < max_tokens_per_client
selected_tokens, actual_num_tokens = keys_for_client(batched_dataset1, 10)
assert tf.size(selected_tokens) == 10
assert actual_num_tokens == 4
@tf.function
def map_to_local_token_ids(client_data, client_keys):
global_to_local = tf.lookup.StaticHashTable(
# Note int32 -> int64 maps are not supported
tf.lookup.KeyValueTensorInitializer(
keys=tf.cast(client_keys, dtype=TOKEN_DTYPE),
# Note we need to use tf.shape, not the static
# shape client_keys.shape[0]
values=tf.range(0, limit=tf.shape(client_keys)[0],
dtype=TOKEN_DTYPE)),
# We use -1 for tokens that were not selected, which can occur for clients
# with more than MAX_TOKENS_SELECTED_PER_CLIENT distinct tokens.
# We will simply remove these invalid indices from the batch below.
default_value=-1)
def to_local_ids(sparse_tokens):
indices_t = tf.transpose(sparse_tokens.indices)
batch_indices = indices_t[0] # First column
tokens = indices_t[1] # Second column
tokens = tf.map_fn(
lambda global_token_id: global_to_local.lookup(global_token_id), tokens)
# Remove tokens that aren't actually available (looked up as -1):
available_tokens = tokens >= 0
tokens = tokens[available_tokens]
batch_indices = batch_indices[available_tokens]
updated_indices = tf.transpose(
tf.concat([[batch_indices], [tokens]], axis=0))
st = tf.sparse.SparseTensor(
updated_indices,
tf.ones(tf.size(tokens), dtype=FEATURE_DTYPE),
dense_shape=sparse_tokens.dense_shape)
st = tf.sparse.reorder(st)
return st
return client_data.map(lambda b: BatchType(to_local_ids(b.tokens), b.tags))
# Simple test
client_keys, actual_num_tokens = keys_for_client(
batched_dataset3, MAX_TOKENS_SELECTED_PER_CLIENT)
client_keys = client_keys[:actual_num_tokens]
d = map_to_local_token_ids(batched_dataset3, client_keys)
batch = next(iter(d))
all_tokens = tf.gather(batch.tokens.indices, indices=1, axis=1)
# Confirm we have local indices in the range [0, MAX):
assert tf.math.reduce_max(all_tokens) < MAX_TOKENS_SELECTED_PER_CLIENT
assert tf.math.reduce_max(all_tokens) >= 0
@tf.function
def slices_dataset_to_tensor(slices_dataset):
Convert a dataset of slices to a tensor.
# Use batching to gather all of the slices into a single tensor.
d = slices_dataset.batch(MAX_TOKENS_SELECTED_PER_CLIENT,
drop_remainder=False)
iter_d = iter(d)
tensor = next(iter_d)
# Make sure we have consumed everything
opt = iter_d.get_next_as_optional()
tf.Assert(tf.logical_not(opt.has_value()), data=[''], name='CHECK_EMPTY')
return tensor
# Simple test
weights = np.random.random(
size=(MAX_TOKENS_SELECTED_PER_CLIENT, TAG_VOCAB_SIZE)).astype(np.float32)
model_slices_as_dataset = tf.data.Dataset.from_tensor_slices(weights)
weights2 = slices_dataset_to_tensor(model_slices_as_dataset)
np.testing.assert_array_equal(weights, weights2)
@tf.function
def client_train_fn(model, client_optimizer,
model_slices_as_dataset, client_data,
client_keys, actual_num_tokens):
initial_model_weights = slices_dataset_to_tensor(model_slices_as_dataset)
assert len(model.trainable_variables) == 1
model.trainable_variables[0].assign(initial_model_weights)
# Only keep the "real" (unpadded) keys.
client_keys = client_keys[:actual_num_tokens]
client_data = map_to_local_token_ids(client_data, client_keys)
loss_fn = tf.keras.losses.BinaryCrossentropy()
for features, labels in client_data:
with tf.GradientTape() as tape:
predictions = model(features)
loss = loss_fn(labels, predictions)
grads = tape.gradient(loss, model.trainable_variables)
client_optimizer.apply_gradients(zip(grads, model.trainable_variables))
model_weights_delta = model.trainable_weights[0] - initial_model_weights
model_weights_delta = tf.slice(model_weights_delta, begin=[0, 0],
size=[actual_num_tokens, -1])
return client_keys, model_weights_delta
# Simple test
# Note if you execute this cell a second time, you need to also re-execute
# the preceeding cell to avoid "tf.function-decorated function tried to
# create variables on non-first call" errors.
on_device_model = create_logistic_model(MAX_TOKENS_SELECTED_PER_CLIENT,
TAG_VOCAB_SIZE)
client_optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
client_keys, actual_num_tokens = keys_for_client(
batched_dataset2, MAX_TOKENS_SELECTED_PER_CLIENT)
model_slices_as_dataset = tf.data.Dataset.from_tensor_slices(
np.zeros((MAX_TOKENS_SELECTED_PER_CLIENT, TAG_VOCAB_SIZE),
dtype=np.float32))
keys, delta = client_train_fn(
on_device_model,
client_optimizer,
model_slices_as_dataset,
client_data=batched_dataset3,
client_keys=client_keys,
actual_num_tokens=actual_num_tokens)
print(delta)
def federated_indexed_slices_sum(slice_indices, slice_values, dense_shape):
Sumes IndexedSlices@CLIENTS to a dense @SERVER Tensor.
Intermediate aggregation is performed by converting to a dense representation,
which may not be suitable for all applications.
Args:
slice_indices: An IndexedSlices.indices tensor @CLIENTS.
slice_values: An IndexedSlices.values tensor @CLIENTS.
dense_shape: A statically known dense shape.
Returns:
A dense tensor placed @SERVER representing the sum of the client's
IndexedSclies.
slices_dtype = slice_values.type_signature.member.dtype
zero = tff.tf_computation(
lambda: tf.zeros(dense_shape, dtype=slices_dtype))()
@tf.function
def accumulate_slices(dense, client_value):
indices, slices = client_value
# There is no built-in way to add `IndexedSlices`, but
# tf.convert_to_tensor is a quick way to convert to a dense representation
# so we can add them.
return dense + tf.convert_to_tensor(
tf.IndexedSlices(slices, indices, dense_shape))
return tff.federated_aggregate(
(slice_indices, slice_values),
zero=zero,
accumulate=tff.tf_computation(accumulate_slices),
merge=tff.tf_computation(lambda d1, d2: tf.add(d1, d2, name='merge')),
report=tff.tf_computation(lambda d: d))
dense_shape = (6, 2)
indices_type = tff.TensorType(tf.int64, (None,))
values_type = tff.TensorType(tf.float32, (None, 2))
client_slice_type = tff.type_at_clients(
(indices_type, values_type))
@tff.federated_computation(client_slice_type)
def test_sum_indexed_slices(indices_values_at_client):
indices, values = indices_values_at_client
return federated_indexed_slices_sum(indices, values, dense_shape)
print(test_sum_indexed_slices.type_signature)
x = tf.IndexedSlices(
values=np.array([[2., 2.1], [0., 0.1], [1., 1.1], [5., 5.1]],
dtype=np.float32),
indices=[2, 0, 1, 5],
dense_shape=dense_shape)
y = tf.IndexedSlices(
values=np.array([[0., 0.3], [3.1, 3.2]], dtype=np.float32),
indices=[1, 3],
dense_shape=dense_shape)
# Sum one.
result = test_sum_indexed_slices([(x.indices, x.values)])
np.testing.assert_array_equal(tf.convert_to_tensor(x), result)
# Sum two.
expected = [[0., 0.1], [1., 1.4], [2., 2.1], [3.1, 3.2], [0., 0.], [5., 5.1]]
result = test_sum_indexed_slices([(x.indices, x.values), (y.indices, y.values)])
np.testing.assert_array_almost_equal(expected, result)
DENSE_MODEL_SHAPE = (WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)
client_data_type = tff.SequenceType(batched_dataset1.element_spec)
model_type = tff.TensorType(tf.float32, shape=DENSE_MODEL_SHAPE)
@tff.tf_computation
def server_update(current_model_weights, update_sum, num_clients):
average_update = update_sum / num_clients
return current_model_weights + average_update
# Function to select slices from the model weights in federated_select:
select_fn = tff.tf_computation(
lambda model_weights, index: tf.gather(model_weights, index))
# We need to wrap `client_train_fn` as a `tff.tf_computation`, making
# sure we do any operations that might construct `tf.Variable`s outside
# of the `tf.function` we are wrapping.
@tff.tf_computation
def client_train_fn_tff(model_slices_as_dataset, client_data, client_keys,
actual_num_tokens):
# Note this is amaller than the global model, using
# MAX_TOKENS_SELECTED_PER_CLIENT which is much smaller than WORD_VOCAB_SIZE.
# W7e would like a model of size `actual_num_tokens`, but we
# can't build the model dynamically, so we will slice off the padded
# weights at the end.
client_model = create_logistic_model(MAX_TOKENS_SELECTED_PER_CLIENT,
TAG_VOCAB_SIZE)
client_optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
return client_train_fn(client_model, client_optimizer,
model_slices_as_dataset, client_data, client_keys,
actual_num_tokens)
@tff.tf_computation
def keys_for_client_tff(client_data):
return keys_for_client(client_data, MAX_TOKENS_SELECTED_PER_CLIENT)
@tff.federated_computation(
tff.type_at_server(model_type), tff.type_at_clients(client_data_type))
def sparse_model_update(server_model, client_data):
max_tokens = tff.federated_value(MAX_TOKENS_SELECTED_PER_CLIENT, tff.SERVER)
keys_at_clients, actual_num_tokens = tff.federated_map(
keys_for_client_tff, client_data)
model_slices = tff.federated_select(keys_at_clients, max_tokens, server_model,
select_fn)
update_keys, update_slices = tff.federated_map(
client_train_fn_tff,
(model_slices, client_data, keys_at_clients, actual_num_tokens))
dense_update_sum = federated_indexed_slices_sum(update_keys, update_slices,
DENSE_MODEL_SHAPE)
num_clients = tff.federated_sum(tff.federated_value(1.0, tff.CLIENTS))
updated_server_model = tff.federated_map(
server_update, (server_model, dense_update_sum, num_clients))
return updated_server_model
print(sparse_model_update.type_signature)
server_model = create_logistic_model(WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)
server_model.compile( # Compile to make evaluation easy.
optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.0), # Unused
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Recall(top_k=2, name='recall_at_2'),
])
def evaluate(model, dataset, name):
metrics = model.evaluate(dataset, verbose=0)
metrics_str = ', '.join([f'{k}={v:.2f}' for k, v in
(zip(server_model.metrics_names, metrics))])
print(f'{name}: {metrics_str}')
print('Before training')
evaluate(server_model, batched_dataset1, 'Client 1')
evaluate(server_model, batched_dataset2, 'Client 2')
evaluate(server_model, batched_dataset3, 'Client 3')
model_weights = server_model.trainable_weights[0]
client_datasets = [batched_dataset1, batched_dataset2, batched_dataset3]
for _ in range(10): # Run 10 rounds of FedAvg
# We train on 1, 2, or 3 clients per round, selecting
# randomly.
cohort_size = np.random.randint(1, 4)
clients = np.random.choice([0, 1, 2], cohort_size, replace=False)
print('Training on clients', clients)
model_weights = sparse_model_update(
model_weights, [client_datasets[i] for i in clients])
server_model.set_weights([model_weights])
print('After training')
evaluate(server_model, batched_dataset1, 'Client 1')
evaluate(server_model, batched_dataset2, 'Client 2')
evaluate(server_model, batched_dataset3, 'Client 3')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constrained problem
Step2: Modeling and joint acquisition function
Step3: Initial belief
Step4: Running Bayesian Optimizer
Step5: Results
Step6: If we inspect the sampling distribution, we can see that the amount of samples in the infeasible regions is limited. The optimization has focussed on the feasible areas. In addition, it has been active mostly in two optimal regions.
Step7: Finally, the evolution of the best value over the number of iterations clearly shows a very good solution is already found after only a few evaluations.
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import gpflow
import gpflowopt
import numpy as np
# Objective & constraint
def townsend(X):
return -(np.cos((X[:,0]-0.1)*X[:,1])**2 + X[:,0] * np.sin(3*X[:,0]+X[:,1]))[:,None]
def constraint(X):
return -(-np.cos(1.5*X[:,0]+np.pi)*np.cos(1.5*X[:,1])+np.sin(1.5*X[:,0]+np.pi)*np.sin(1.5*X[:,1]))[:,None]
# Setup input domain
domain = gpflowopt.domain.ContinuousParameter('x1', -2.25, 2.5) + \
gpflowopt.domain.ContinuousParameter('x2', -2.5, 1.75)
# Plot
def plotfx():
X = gpflowopt.design.FactorialDesign(101, domain).generate()
Zo = townsend(X)
Zc = constraint(X)
mask = Zc>=0
Zc[mask] = np.nan
Zc[np.logical_not(mask)] = 1
Z = Zo * Zc
shape = (101, 101)
f, axes = plt.subplots(1, 1, figsize=(7, 5))
axes.contourf(X[:,0].reshape(shape), X[:,1].reshape(shape), Z.reshape(shape))
axes.set_xlabel('x1')
axes.set_ylabel('x2')
axes.set_xlim([domain.lower[0], domain.upper[0]])
axes.set_ylim([domain.lower[1], domain.upper[1]])
return axes
plotfx();
# Initial evaluations
design = gpflowopt.design.LatinHyperCube(11, domain)
X = design.generate()
Yo = townsend(X)
Yc = constraint(X)
# Models
objective_model = gpflow.gpr.GPR(X, Yo, gpflow.kernels.Matern52(2, ARD=True))
objective_model.likelihood.variance = 0.01
constraint_model = gpflow.gpr.GPR(np.copy(X), Yc, gpflow.kernels.Matern52(2, ARD=True))
constraint_model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)
constraint_model.likelihood.variance = 0.01
constraint_model.likelihood.variance.prior = gpflow.priors.Gamma(1./4.,1.0)
# Setup
ei = gpflowopt.acquisition.ExpectedImprovement(objective_model)
pof = gpflowopt.acquisition.ProbabilityOfFeasibility(constraint_model)
joint = ei * pof
def plot():
Xeval = gpflowopt.design.FactorialDesign(101, domain).generate()
Yevala,_ = joint.operands[0].models[0].predict_f(Xeval)
Yevalb,_ = joint.operands[1].models[0].predict_f(Xeval)
Yevalc = np.maximum(ei.evaluate(Xeval), 0)
Yevald = pof.evaluate(Xeval)
Yevale = np.maximum(joint.evaluate(Xeval), 0)
shape = (101, 101)
plots = [('Objective model', Yevala), ('Constraint model', Yevalb),
('EI', Yevalc), ('PoF', Yevald),
('EI * PoF', Yevale)]
plt.figure(figsize=(10,10))
for i, plot in enumerate(plots):
if i == 4:
ax = plt.subplot2grid((3, 4), (2, 1), colspan=2)
else:
ax = plt.subplot2grid((3, 2), (int(i/2), i % 2))
ax.contourf(Xeval[:,0].reshape(shape), Xeval[:,1].reshape(shape), plot[1].reshape(shape))
ax.scatter(joint.data[0][:,0], joint.data[0][:,1], c='w')
ax.set_title(plot[0])
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xlim([domain.lower[0], domain.upper[0]])
ax.set_ylim([domain.lower[1], domain.upper[1]])
plt.tight_layout()
# Plot representing the model belief, and the belief mapped to EI and PoF
plot()
print(constraint_model)
# First setup the optimization strategy for the acquisition function
# Combining MC step followed by L-BFGS-B
acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 200),
gpflowopt.optim.SciPyOptimizer(domain)])
# Then run the BayesianOptimizer for 50 iterations
optimizer = gpflowopt.BayesianOptimizer(domain, joint, optimizer=acquisition_opt, verbose=True)
result = optimizer.optimize([townsend, constraint], n_iter=50)
print(result)
# Plotting belief again
print(constraint_model)
plot()
# Plot function, overlayed by the constraint. Also plot the samples
axes = plotfx()
valid = joint.feasible_data_index()
axes.scatter(joint.data[0][valid,0], joint.data[0][valid,1], label='feasible data', c='w')
axes.scatter(joint.data[0][np.logical_not(valid),0], joint.data[0][np.logical_not(valid),1], label='data', c='r');
axes.legend()
f, axes = plt.subplots(1, 1, figsize=(7, 5))
f = joint.data[1][:,0]
f[joint.data[1][:,1] > 0] = np.inf
axes.plot(np.arange(0, joint.data[0].shape[0]), np.minimum.accumulate(f))
axes.set_ylabel('fmin')
axes.set_xlabel('Number of evaluated points');
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Curve data
Step2: You can also access curves by index (remembering that the first index is zero but the first curve is the depth).
Step3: All the curves are stored as a two-dimensional ndarray under the data attribute
Step4: Header information
Step5: In order from top to bottom as they usually are in LAS files, the header sections are
Step6: To change metadata information you have to edit the relevant field of the HeaderItem object
Step7: The other section with header items is very similar...
Step8: For convenience there's a special attribute which brings together all the header metadata mnemonics and values
Step9: The ~Other section is left as raw text.
Step10: Note that commented-out lines beginning with # elsewhere in the file are not read in at all at the moment (this will be fixed in the future).
Step11: Notice of course that whereas l["NEUT"] returns you a numpy.ndarray directly, using the get_curve method gives you the whole Curve object, with the unit, value (usually an API curve type code), and descr attributes as well as the data.
| <ASSISTANT_TASK:>
Python Code:
import os
import lasio
l = lasio.read(os.path.join("..", "tests", "examples", "6038187_v1.2.las"))
print(type(l))
print(l._text)
l.keys()
l['NEUT']
print(l['GAMN'])
print(l[2])
print(l["COND"])
print(l[-1])
print(l.data.shape)
print(l.data)
from lasio.las import HeaderItem
example = HeaderItem(mnemonic="ROP", unit="m/min", value=2.3, descr="Rate of penetration")
print(example)
print(example.mnemonic)
print(example.unit)
print(example.value)
print(example.descr)
l.version
l.version['VERS'].value
l.well
l.well['SRVC'].value = "My logging company -- not anyone else's"
print(l.well["SRVC"])
l.params
l.metadata
print l.other
l.curves
neutron_log = l.get_curve("NEUT")
print(neutron_log)
print(l["DEPT"])
print(l.index)
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
fig = plt.figure(figsize=(18, 5))
gs = gridspec.GridSpec(1, len(l.curves)-1, wspace=0.2)
for i, curve in enumerate(l.curves[1:]):
if i > 0:
shareax = ax
else:
shareax = False
ax = fig.add_subplot(gs[i], sharey=None)
ax.plot(curve.data, l.index)
ax.set_title(curve.mnemonic)
if i > 0:
ax.set_yticklabels([])
ax.set_ylim(*ax.get_ylim()[::-1])
ax.set_xticklabels(["%.0f" % xi if xi in ax.get_xlim() else "" for xi in ax.get_xticks()], rotation=30)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python Basics (2016-09-09)
Step2: Data Types
Step3: What will be the data type of a+b?
Step4: It's a float. But why? If it was an integer, information would get lost. Python chooses the simplest data type that preserves all information. However, this can be tricky if you don't pay attention...
Step5: Other common data types
Step6: Simple Arithmetics
Step7: Again, be aware of the data types
Step8: For more complex mathematical operations, refer to the math or numpy modules, for instance.
| <ASSISTANT_TASK:>
Python Code:
# this is a single line comment
this is a
multi line
comment
a = 5.1
print 'a', type(a)
b = 3
print 'b', type(b)
print a+b, type(a+b)
print '3/2', 3/2 # integer divided by integer
print '3./2', 3./2 # float divided by integer
c = 'g'
print 'c', type(c)
d = 'stuff'
print 'd', type(d)
e = True
print 'e', type(e)
print a+b # addition
print a-b # subtraction
print a*b # multiplication
print a/b # division
print a//b # floor division
print a%b # modulus
print a**2 # power
#print a+d # results in a TypeError
print 'abc' + "def" # concatenate strings (' and " can both be used, but not mixed)
print 'thereisabunnyhidinginhere'.find('bunny') # index where the bunny hides
print 'thereisabunnyhidinginhere'.find('wolf') # return -1 if failed
print ' lots of space '.strip() # strip blanks
print '____lots of underscores____'.strip('_') # strip underscores
print 'string_with_underscores'.replace('_', ' ') # replace underscores
print 'list,of,things,like,in,a,csv,file'.split(',') # split in to list
print 'one\nword\nper\nline' # use '\n' to represent a line break
print '123abc'.isalpha() # check if string consists of letters only
print '123'.isdigit() # check if string consists of numbers only
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Jupyter notebooks
Step4: Some common terminology
Step5: Vandermonde matrices
Step6: This type of matrix is very common; we can also create it with numpy.vander.
Step7: Now suppose we know the value of a polynomial at a few points.
Step8: Evidently $p(x) = 12.983 x^3 - 1.748 x^2 - 9.476 x + 3.352$ is the unique cubic polynomial that interpolates those points.
Step9: Gram-Schmidt Orthogonalization
Step10: Theorem
Step11: Classical Gram-Schmidt is highly parallel, but unstable, as evidenced by the lack of orthogonality in $Q$.
Step13: Householder triangularization
Step14: Choice of two projections
Step15: Inside qr_householder1, we have the lines
Step16: The error $QR - A$ is still $10^{-8}$ for this very well-conditioned matrix so something else must be at play here.
Step18: We now have a usable implementation of Householder QR. There are some further concerns for factoring rank-deficient matrices. We will visit the concept of pivoting later, in the context of LU and Cholesky factorization.
Step19: Cost of Householder factorization
Step20: The Singular Value Decomposition
| <ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import numpy
from matplotlib import pyplot
def matmult1(A, x):
Entries of y are dot products of rows of A with x
y = numpy.zeros_like(A[:,0])
for i in range(len(A)):
row = A[i,:]
for j in range(len(row)):
y[i] += row[j] * x[j]
return y
A = numpy.array([[1,2],[3,5],[7,11]])
x = numpy.array([10,20])
matmult1(A, x)
def matmult2(A, x):
Same idea, but more compactly
y = numpy.zeros_like(A[:,0])
for i,row in enumerate(A):
y[i] = row.dot(x)
return y
matmult2(A, x)
def matmult3(A, x):
y is a linear expansion of the columns of A
y = numpy.zeros_like(A[:,0])
for j,col in enumerate(A.T):
y += col * x[j]
return y
matmult3(A, x)
# We will use this version
A.dot(x)
B = numpy.array([[2, 3],[0, 4]])
print(B)
print(B.dot(B.T), B.T.dot(B))
Binv = numpy.linalg.inv(B)
Binv.dot(B), B.dot(Binv)
x = numpy.linspace(-1,1)
A = numpy.array([x**3, x**2, x, 1+0*x]).T
print('shape =', A.shape) # This is a tall matrix with 4 columns
pyplot.style.use('ggplot')
pyplot.figure()
pyplot.plot(x, A)
pyplot.ylim((-1.1,1.1))
pyplot.show()
pyplot.figure()
p = numpy.array([5,0,-3,0])
pyplot.plot(x, A.dot(p))
x1 = numpy.array([-0.9, 0.1, 0.5, 0.8]) # points where we know values
y = numpy.array([1, 2.4, -0.2, 1.3]) # values at those points
pyplot.figure()
pyplot.plot(x1, y, '*')
B = numpy.vander(x1) # Vandermonde matrix at the known points
p = numpy.linalg.solve(B, y) # Compute the polynomial coefficients
print(p)
pyplot.plot(x, A.dot(p)) # Plot the polynomial evaluated at all points
print('B =', B, '\np =', p)
# Make some polynomials
q0 = A.dot(numpy.array([0,0,0,.5])) # .5
q1 = A.dot(numpy.array([0,0,1,0])) # x
q2 = A.dot(numpy.array([0,1,0,0])) # x^2
pyplot.figure()
pyplot.plot(x, numpy.array([q0, q1, q2]).T)
# Inner products of even and odd functions
q0 = q0 / numpy.linalg.norm(q0)
q1.dot(q0), q2.dot(q0), q2.dot(q1)
# What is the constant component of q2?
pyplot.figure()
pyplot.plot(x, q2.dot(q0)*q0)
# Let's project that away so that q2 is orthogonal to q0
q2 = q2 - q2.dot(q0)*q0
Q = numpy.array([q0, q1, q2]).T
print(Q.T.dot(Q))
pyplot.figure()
pyplot.plot(x, Q)
def gram_schmidt_naive(X):
Q = numpy.zeros_like(X)
R = numpy.zeros((len(X.T),len(X.T)))
for i in range(len(Q.T)):
v = X[:,i].copy()
for j in range(i):
r = v.dot(Q[:,j])
R[j,i] = r
v -= r * Q[:,j] # "modified Gram-Schmidt" - remove each component before next dot product
R[i,i] = numpy.linalg.norm(v)
Q[:,i] = v / R[i,i]
return Q, R
Q, R = gram_schmidt_naive(A)
print(Q.T.dot(Q))
print(numpy.linalg.norm(Q.dot(R)-A))
pyplot.figure()
pyplot.plot(x, Q)
Q, R = gram_schmidt_naive(numpy.vander(x, 4, increasing=True))
pyplot.figure()
pyplot.plot(x, Q)
x1 = numpy.array([-0.9, 0.1, 0.5, 0.8]) # points where we know values
y = numpy.array([1, 2.4, -0.2, 1.3]) # values at those points
pyplot.figure()
pyplot.plot(x1, y, '*')
B = numpy.vander(x1, 2) # Vandermonde matrix at the known points
Q, R = gram_schmidt_naive(B)
p = numpy.linalg.solve(R, Q.T.dot(y)) # Compute the polynomial coefficients
print(p)
pyplot.plot(x, numpy.vander(x,2).dot(p)) # Plot the polynomial evaluated at all points
print('B =', B, '\np =', p)
m = 20
V = numpy.vander(numpy.linspace(-1,1,m), increasing=False)
Q, R = gram_schmidt_naive(V)
def qr_test(qr, V):
Q, R = qr(V)
m = len(Q.T)
print(qr.__name__, numpy.linalg.norm(Q.dot(R) - V), numpy.linalg.norm(Q.T.dot(Q) - numpy.eye(m)))
qr_test(gram_schmidt_naive, V)
qr_test(numpy.linalg.qr, V)
def gram_schmidt_classical(X):
Q = numpy.zeros_like(X)
R = numpy.zeros((len(X.T),len(X.T)))
for i in range(len(Q.T)):
v = X[:,i].copy()
R[:i,i] = Q[:,:i].T.dot(v)
v -= Q[:,:i].dot(R[:i,i])
R[i,i] = numpy.linalg.norm(v)
Q[:,i] = v / R[i,i]
return Q, R
qr_test(gram_schmidt_classical, V)
def gram_schmidt_modified(X):
Q = X.copy()
R = numpy.zeros((len(X.T), len(X.T)))
for i in range(len(Q.T)):
R[i,i] = numpy.linalg.norm(Q[:,i])
Q[:,i] /= R[i,i]
R[i,i+1:] = Q[:,i+1:].T.dot(Q[:,i])
Q[:,i+1:] -= numpy.outer(Q[:,i], R[i,i+1:])
return Q, R
qr_test(gram_schmidt_modified, V)
def householder_Q_times(V, x):
Apply orthogonal matrix represented as list of Householder reflectors
y = x.copy()
for i in reversed(range(len(V))):
y[i:] -= 2 * V[i] * V[i].dot(y[i:])
return y
def qr_householder1(A):
"Compute QR factorization using naive Householder reflection"
m, n = A.shape
R = A.copy()
V = []
for i in range(n):
x = R[i:,i]
v = -x
v[0] += numpy.linalg.norm(x)
v = v/numpy.linalg.norm(v) # Normalized reflector plane
R[i:,i:] -= 2 * numpy.outer(v, v.dot(R[i:,i:]))
V.append(v) # Storing reflectors is equivalent to storing orthogonal matrix
Q = numpy.eye(m, n)
for i in range(n):
Q[:,i] = householder_Q_times(V, Q[:,i])
return Q, numpy.triu(R[:n,:])
qr_test(qr_householder1, numpy.array([[1.,2],[3,4],[5,6]]))
qr_test(qr_householder1, V)
qr_test(numpy.linalg.qr, V)
qr_test(qr_householder1, numpy.eye(1))
qr_test(qr_householder1, numpy.eye(3,2))
qr_test(qr_householder1, numpy.array([[1.,1], [2e-8,1]]))
print(qr_householder1(numpy.array([[1.,1], [2e-8,1]])))
def qr_householder2(A):
"Compute QR factorization using Householder reflection"
m, n = A.shape
R = A.copy()
V = []
for i in range(n):
v = R[i:,i].copy()
v[0] += numpy.sign(v[0])*numpy.linalg.norm(v) # Choose the further of the two reflections
v = v/numpy.linalg.norm(v) # Normalized reflector plane
R[i:,i:] -= 2 * numpy.outer(v, v.dot(R[i:,i:]))
V.append(v) # Storing reflectors is equivalent to storing orthogonal matrix
Q = numpy.eye(m, n)
for i in range(n):
Q[:,i] = householder_Q_times(V, Q[:,i])
return Q, numpy.triu(R[:n,:])
qr_test(qr_householder2, numpy.eye(3,2))
qr_test(qr_householder2, numpy.array([[1.,1], [1e-8,1]]))
print(qr_householder2(numpy.array([[1.,1], [1e-8,1]])))
qr_test(qr_householder2, V)
def R_solve(R, b):
Solve Rx = b using back substitution.
x = b.copy()
m = len(b)
for i in reversed(range(m)):
x[i] -= R[i,i+1:].dot(x[i+1:])
x[i] /= R[i,i]
return x
Q, R = numpy.linalg.qr(A)
b = Q.T.dot(A.dot(numpy.array([1,2,3,4])))
numpy.linalg.norm(R_solve(R, b) - numpy.linalg.solve(R, b))
R_solve(R, b)
# Test accuracy of solver for an ill-conditioned square matrix
x = numpy.linspace(-1,1,19)
A = numpy.vander(x)
print('cond(A) = ',numpy.linalg.cond(A))
Q, R = numpy.linalg.qr(A)
print('cond(R^{-1} Q^T A) =', numpy.linalg.cond(numpy.linalg.solve(R, Q.T.dot(A))))
L = numpy.linalg.cholesky(A.T.dot(A))
print('cond(L^{-T} L^{-1} A^T A) =', numpy.linalg.cond(numpy.linalg.solve(L.T, numpy.linalg.solve(L, A.T.dot(A)))))
m = 10
x = numpy.cos(numpy.linspace(0,numpy.pi,m))
f = 1.0*(x > 0) + (x < 0.5)
A = numpy.vander(x)
Q, R = numpy.linalg.qr(A)
p = numpy.linalg.solve(R, Q.T.dot(f))
y = numpy.linspace(-1,1,50)
g = numpy.vander(y, m).dot(p)
pyplot.figure()
pyplot.plot(x, f, '*')
pyplot.plot(y, g)
print(numpy.linalg.cond(A))
'%10e' % numpy.linalg.cond(numpy.vander(numpy.linspace(-1,1,100),20))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: The pipeline uses a mix of custom and pre-build components.
Step7: The custom components execute in a container image defined in base_image/Dockerfile.
Step8: The training step in the pipeline employes the AI Platform Training component to schedule a AI Platform Training job in a custom training container. The custom training image is defined in trainer_image/Dockerfile.
Step9: Building and deploying the pipeline
Step10: HINT
Step11: Build the trainer image
Step12: Note
Step13: Build the base image for custom components
Step14: Compile the pipeline
Step15: Use the CLI compiler to compile the pipeline
Step16: The result is the covertype_training_pipeline.yaml file.
Step17: Deploy the pipeline package
Step18: Submitting pipeline runs
Step19: Submit a run
Step20: Run the pipeline using the kfp command line by retrieving the variables from the environment to pass to the pipeline where
| <ASSISTANT_TASK:>
Python Code:
!grep 'BASE_IMAGE =' -A 5 pipeline/covertype_training_pipeline.py
%%writefile ./pipeline/covertype_training_pipeline.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KFP orchestrating BigQuery and Cloud AI Platform services.
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS =
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
Prepares the data sampling query.
sampling_query_template =
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
Orchestrates training and deployment of an sklearn model.
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret('user-gcp-sa'))
!cat base_image/Dockerfile
!cat trainer_image/Dockerfile
!gsutil ls
REGION = 'us-central1'
ENDPOINT = '337dd39580cbcbd2-dot-us-central2.pipelines.googleusercontent.com' # TO DO: REPLACE WITH YOUR ENDPOINT
ARTIFACT_STORE_URI = 'gs://qwiklabs-gcp-xx-xxxxxxx-kubeflowpipelines-default' # TO DO: REPLACE WITH YOUR ARTIFACT_STORE NAME
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
IMAGE_NAME='trainer_image'
TAG='latest'
TRAINER_IMAGE='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, TAG)
!gcloud builds submit --timeout 15m --tag $TRAINER_IMAGE trainer_image
IMAGE_NAME='base_image'
TAG='latest'
BASE_IMAGE='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, TAG)
!gcloud builds submit --timeout 15m --tag $BASE_IMAGE base_image
USE_KFP_SA = False
COMPONENT_URL_SEARCH_PREFIX = 'https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/'
RUNTIME_VERSION = '1.15'
PYTHON_VERSION = '3.7'
%env USE_KFP_SA={USE_KFP_SA}
%env BASE_IMAGE={BASE_IMAGE}
%env TRAINER_IMAGE={TRAINER_IMAGE}
%env COMPONENT_URL_SEARCH_PREFIX={COMPONENT_URL_SEARCH_PREFIX}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERSION={PYTHON_VERSION}
!dsl-compile --py pipeline/covertype_training_pipeline.py --output covertype_training_pipeline.yaml
!head covertype_training_pipeline.yaml
PIPELINE_NAME='covertype_continuous_training'
!kfp --endpoint $ENDPOINT pipeline upload \
-p $PIPELINE_NAME \
covertype_training_pipeline.yaml
!kfp --endpoint $ENDPOINT pipeline list
PIPELINE_ID='0918568d-758c-46cf-9752-e04a4403cd84' # TO DO: REPLACE WITH YOUR PIPELINE ID
EXPERIMENT_NAME = 'Covertype_Classifier_Training'
RUN_ID = 'Run_001'
SOURCE_TABLE = 'covertype_dataset.covertype'
DATASET_ID = 'splits'
EVALUATION_METRIC = 'accuracy'
EVALUATION_METRIC_THRESHOLD = '0.69'
MODEL_ID = 'covertype_classifier'
VERSION_ID = 'v01'
REPLACE_EXISTING_VERSION = 'True'
GCS_STAGING_PATH = '{}/staging'.format(ARTIFACT_STORE_URI)
!kfp --endpoint $ENDPOINT run submit \
-e $EXPERIMENT_NAME \
-r $RUN_ID \
-p $PIPELINE_ID \
project_id=$PROJECT_ID \
gcs_root=$GCS_STAGING_PATH \
region=$REGION \
source_table_name=$SOURCE_TABLE \
dataset_id=$DATASET_ID \
evaluation_metric_name=$EVALUATION_METRIC \
evaluation_metric_threshold=$EVALUATION_METRIC_THRESHOLD \
model_id=$MODEL_ID \
version_id=$VERSION_ID \
replace_existing_version=$REPLACE_EXISTING_VERSION
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Exercise 2
Step3: Exercise 3
| <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%pylab inline
#Your code here...
file1 = pd.read_json('../../data/data1.json')
file2 = pd.read_json('../../data/data2.json')
file2 = pd.read_json('../../data/data2.json')
file3 = pd.read_json('../../data/data3.json') # add orient=columns
file4 = pd.read_json('../../data/data4.json', orient='split')
combined = pd.concat([file1, file2.T, file3, file4], ignore_index=True)
combined
import apache_log_parser
from user_agents import parse
def parse_ua(line):
parsed_data = parse(line)
return str(parsed_data).split('/')[1]
def parse_ua_2(line):
parsed_data = parse(line)
return str(parsed_data).split('/')[2]
#Read in the log file
line_parser = apache_log_parser.make_parser("%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"")
server_log = open("../../data/hackers-access.httpd", "r")
parsed_server_data = []
for line in server_log:
data = {}
data = line_parser(line)
parsed_server_data.append( data )
server_df = pd.DataFrame(parsed_server_data)
server_df['OS'] = server_df['request_header_user_agent'].apply(parse_ua)
server_df['Browser'] = server_df['request_header_user_agent'].apply(parse_ua_2)
server_df['OS'].value_counts().head(10)
#Apply the functions to the dataframe
#Get the top 10 values
#Your code here...
bots = pd.read_csv('../../data/dailybots.csv')
gov_bots = bots[['botfam', 'hosts']][bots['industry'] == 'Government/Politics']
gov_bots.groupby(['botfam']).size()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare non-python dependencies by calling "nbinstall"
Step2: Set default options by calling "set_defaults" (optional)
Step3: Demo 1 - Rendering a DataFrame returned by Yahoo Finance
Step4: BEFORE - Here's IPython's default representation of our 'spy' DataFrame
Step5: AFTER - Here's the same 'spy' DataFrame being rendered as a qgrid
Step6: Configuring your grid using SlickGrid options
Step7: Demo 2 - Rendering a sample DataFrame that includes a MultiIndex
Step8: BEFORE - Here's IPython's default representation of our 'multi_index_df' DataFrame
Step9: AFTER - Here's the same 'multi_index_df' DataFrame being rendered as a qgrid
Step10: API & Usage
Step11: Prepare non-python dependencies by calling 'nbinstall'
| <ASSISTANT_TASK:>
Python Code:
import qgrid
qgrid.nbinstall(overwrite=True) # copies javascript dependencies to your /nbextensions folder
qgrid.set_defaults(remote_js=True, precision=4)
import pandas as pd
import numpy as np
randn = np.random.randn
pd.set_option('display.max_rows', 8)
from pandas.io.data import get_data_yahoo
spy = get_data_yahoo(
symbols='SPY',
start=pd.Timestamp('2011-01-01'),
end=pd.Timestamp('2014-01-01'),
adjust_price=True,
)
spy
qgrid.show_grid(spy)
qgrid.show_grid(spy, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
multi_index_df = pd.DataFrame(randn(8, 2), index=index, columns=['A', 'B'])
multi_index_df
qgrid.show_grid(multi_index_df)
import qgrid
qgrid.nbinstall(overwrite=True) # use overwrite=True to keep your /nbextensions folder up to date during development
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Jax Model Conversion For TFLite
Step3: Data Preparation
Step4: Build the MNIST model with Jax
Step5: Train & Evaluate the model
Step6: Convert to TFLite model.
Step7: Check the Converted TFLite Model
Step8: Optimize the Model
Step9: Evaluate the Optimized Model
Step10: Compare the Quantized Model size
| <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tf-nightly --upgrade
!pip install jax --upgrade
!pip install jaxlib --upgrade
import numpy as np
import tensorflow as tf
import functools
import time
import itertools
import numpy.random as npr
import jax.numpy as jnp
from jax import jit, grad, random
from jax.experimental import optimizers
from jax.experimental import stax
def _one_hot(x, k, dtype=np.float32):
Create a one-hot encoding of x of size k.
return np.array(x[:, None] == np.arange(k), dtype)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images.astype(np.float32)
test_images = test_images.astype(np.float32)
train_labels = _one_hot(train_labels, 10)
test_labels = _one_hot(test_labels, 10)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -jnp.mean(jnp.sum(preds * targets, axis=1))
def accuracy(params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(predict(params, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
init_random_params, predict = stax.serial(
stax.Flatten,
stax.Dense(1024), stax.Relu,
stax.Dense(1024), stax.Relu,
stax.Dense(10), stax.LogSoftmax)
rng = random.PRNGKey(0)
step_size = 0.001
num_epochs = 10
batch_size = 128
momentum_mass = 0.9
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass)
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
_, init_params = init_random_params(rng, (-1, 28 * 28))
opt_state = opt_init(init_params)
itercount = itertools.count()
print("\nStarting training...")
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
opt_state = update(next(itercount), opt_state, next(batches))
epoch_time = time.time() - start_time
params = get_params(opt_state)
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
print("Training set accuracy {}".format(train_acc))
print("Test set accuracy {}".format(test_acc))
serving_func = functools.partial(predict, params)
x_input = jnp.zeros((1, 28, 28))
converter = tf.lite.TFLiteConverter.experimental_from_jax(
[serving_func], [[('input1', x_input)]])
tflite_model = converter.convert()
with open('jax_mnist.tflite', 'wb') as f:
f.write(tflite_model)
expected = serving_func(train_images[0:1])
# Run the model with TensorFlow Lite
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], train_images[0:1, :, :])
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]["index"])
# Assert if the result of TFLite model is consistent with the JAX model.
np.testing.assert_almost_equal(expected, result, 1e-5)
def representative_dataset():
for i in range(1000):
x = train_images[i:i+1]
yield [x]
converter = tf.lite.TFLiteConverter.experimental_from_jax(
[serving_func], [[('x', x_input)]])
tflite_model = converter.convert()
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_quant_model = converter.convert()
with open('jax_mnist_quant.tflite', 'wb') as f:
f.write(tflite_quant_model)
expected = serving_func(train_images[0:1])
# Run the model with TensorFlow Lite
interpreter = tf.lite.Interpreter(model_content=tflite_quant_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], train_images[0:1, :, :])
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]["index"])
# Assert if the result of TFLite model is consistent with the Jax model.
np.testing.assert_almost_equal(expected, result, 1e-5)
!du -h jax_mnist.tflite
!du -h jax_mnist_quant.tflite
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and prepare the dataset
Step2: Limit the size of the dataset to experiment faster (optional)
Step3: Create a tf.data dataset
Step4: Write the encoder and decoder model
Step5: Define the optimizer and the loss function
Step6: Checkpoints (Object-based saving)
Step7: Training
Step8: Translate
Step9: Restore the latest checkpoint and test
| <ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.10 and enable eager execution
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import time
print(tf.__version__)
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return word_pairs
# This class creates a word -> index mapping (e.g,. "dad" -> 5) and vice-versa
# (e.g., 5 -> "dad") for each language,
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
def load_dataset(path, num_examples):
# creating cleaned input, output pairs
pairs = create_dataset(path, num_examples)
# index language using the class defined above
inp_lang = LanguageIndex(sp for en, sp in pairs)
targ_lang = LanguageIndex(en for en, sp in pairs)
# Vectorize the input and target languages
# Spanish sentences
input_tensor = [[inp_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs]
# English sentences
target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
# Try experimenting with the size of that dataset
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples)
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
def gru(units):
# If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
# the code automatically does that.
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying tanh(FC(EO) + FC(H)) to self.V
score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size * 1, vocab)
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.train.AdamOptimizer()
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
def evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weigths to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate(u'esta es mi vida.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate(u'todavia estan en casa?', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# wrong translation
translate(u'trata de averiguarlo.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is just a simple sample to show how Bollinger Bands work.
Step2: What is a 'Bollinger Band®'
Step4: In this example of Bollinger Bands®, the price of the stock is bracketed by an upper and lower band along with a 20-day simple moving average. Because standard deviation is a measure of volatility, when the markets become more volatile, the bands widen; during less volatile periods, the bands contract.
| <ASSISTANT_TASK:>
Python Code:
import quantiacsToolbox
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
F_AD = pd.read_csv('./tickerData/F_AD.txt')
CLOSE = np.array(F_AD.loc[:252-1, [' CLOSE']])
plt.plot(CLOSE)
def bollingerBands(a, n=20):
sma = np.nansum(a[-n:]) / n
std = np.std(a[-n:])
return sma, sma + 2 * std, sma - 2 * std
upperBand, lowerBand = np.zeros(252 - 20 + 1), np.zeros(252 - 20 + 1)
for i in range(252 - 20 + 1):
_, upperBand[i], lowerBand[i] = bollingerBands(CLOSE[i:i+20])
plt.plot(upperBand)
plt.plot(lowerBand)
plt.plot(CLOSE[20:])
class myStrategy(object):
def myTradingSystem(self, DATE, OPEN, HIGH, LOW, CLOSE, VOL, OI, P, R, RINFO, exposure, equity, settings):
def bollingerBands(a, n=20):
sma = np.nansum(a[-n:]) / n
std = np.std(a[-n:], ddof=1)
return sma, sma + 2 * std, sma - 2 * std
nMarkets = len(settings['markets'])
threshold = settings['threshold']
pos = np.zeros((1, nMarkets), dtype=np.float)
for market in range(nMarkets):
sma, upperBand, lowerBand = bollingerBands(CLOSE[:, market])
currentPrice = CLOSE[-1, market]
if currentPrice >= upperBand + (upperBand - lowerBand) * threshold:
pos[0, market] = -1
elif currentPrice <= lowerBand - (upperBand - lowerBand) * threshold:
pos[0, market] = 1
return pos, settings
def mySettings(self):
Define your trading system settings here
settings = {}
# Futures Contracts
settings['markets'] = ['CASH', 'F_AD', 'F_BO', 'F_BP', 'F_C', 'F_CC', 'F_CD',
'F_CL', 'F_CT', 'F_DX', 'F_EC', 'F_ED', 'F_ES', 'F_FC', 'F_FV', 'F_GC',
'F_HG', 'F_HO', 'F_JY', 'F_KC', 'F_LB', 'F_LC', 'F_LN', 'F_MD', 'F_MP',
'F_NG', 'F_NQ', 'F_NR', 'F_O', 'F_OJ', 'F_PA', 'F_PL', 'F_RB', 'F_RU',
'F_S', 'F_SB', 'F_SF', 'F_SI', 'F_SM', 'F_TU', 'F_TY', 'F_US', 'F_W', 'F_XX',
'F_YM']
settings['beginInSample'] = '19900101'
settings['endInSample'] = '20170522'
settings['lookback'] = 20
settings['budget'] = 10 ** 6
settings['slippage'] = 0.05
settings['threshold'] = 0.4
return settings
result = quantiacsToolbox.runts(myStrategy)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creation of Dictionary
Step3: Recalibration of Dictionary
| <ASSISTANT_TASK:>
Python Code:
file_path = '../data/2011.0.00419.S/sg_ouss_id/group_ouss_id/member_ouss_2013-03-06_id/product/IRAS16547-4247_Jet_CH3OH7-6.clean.fits'
noise_pixel = (15, 4)
train_pixels = [(133, 135),(134, 135),(133, 136),(134, 136)]
img = fits.open(file_path)
meta = img[0].data
hdr = img[0].header
# V axis
naxisv = hdr['NAXIS3']
onevpix = hdr['CDELT3']*0.000001
v0 = hdr['CRVAL3']*0.000001
v0pix = int(hdr['CRPIX3'])
vaxis = onevpix * (np.arange(naxisv)+1-v0pix) + v0
values = meta[0, :, train_pixels[0][0], train_pixels[0][1]] - np.mean(meta[0, :, train_pixels[0][0], train_pixels[0][1]])
values = values/np.max(values)
plt.plot(vaxis, values)
plt.xlim(np.min(vaxis), np.max(vaxis))
plt.ylim(-1, 1)
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
noise = meta[0, :, noise_pixel[0], noise_pixel[1]] - np.mean(meta[0, :, noise_pixel[0], noise_pixel[1]])
noise = noise/np.max(noise)
plt.plot(vaxis, noise)
plt.ylim(-1, 1)
plt.xlim(np.min(vaxis), np.max(vaxis))
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
cube_params = {
'freq' : vaxis[naxisv/2],
'alpha' : 0,
'delta' : 0,
'spe_bw' : naxisv*onevpix,
'spe_res' : onevpix*v0pix,
's_f' : 8,
's_a' : 0}
dictionary = gen_all_words(cube_params, True)
prediction = pd.DataFrame([])
for train_pixel in train_pixels:
dictionary_recal, detected_peaks = recal_words(file_path, dictionary, cube_params,
train_pixel, noise_pixel)
X = get_values_filtered_normalized(file_path, train_pixel, cube_params)
y_train = get_fortran_array(np.asmatrix(X))
dictionary_recal_fa = np.asfortranarray(dictionary_recal,
dtype= np.double)
lambda_param = 0
for idx in range(0, len(detected_peaks)):
if detected_peaks[idx] != 0:
lambda_param += 1
param = {
'lambda1' : lambda_param,
# 'L': 1,
'pos' : True,
'mode' : 0,
'ols' : True,
'numThreads' : -1}
alpha = spams.lasso(y_train, dictionary_recal_fa, **param).toarray()
total = np.inner(dictionary_recal_fa, alpha.T)
for i in range(0, len(alpha)):
iso_col = dictionary_recal.columns[i]
if(not prediction.columns.isin([iso_col]).any()):
prediction[iso_col] = alpha[i]
else:
prediction[iso_col] = prediction[iso_col]*alpha[i]
for p in prediction.columns:
if(prediction[p][0] != 0):
print(prediction[p])
latexify(8)
# Step 1: Read Cube
ax = plt.subplot(6, 1, 1)
data = get_data_from_fits(file_path)
y = data[0, :, train_pixel[0], train_pixel[1]]
plt.xticks([])
plt.plot(vaxis, y)
lines = get_lines_from_fits(file_path)
for line in lines:
# Shows lines really present
isotope_frequency = int(line[1])
isotope_name = line[0] + "-f" + str(line[1])
plt.axvline(x=isotope_frequency, ymin=0, ymax= 3, color='g')
# 2. Normalize, filter dada
ax = plt.subplot(6, 1, 2)
plt.ylim(ymin =0,ymax = 1.15)
y = get_values_filtered_normalized(file_path, train_pixel, cube_params)
plt.xticks([])
plt.plot(vaxis, y)
# 3. Possible Words
ax = plt.subplot(6, 1, 3)
plt.ylim(ymin =0,ymax = 1.15)
plt.xticks([])
plt.plot(vaxis, dictionary)
# 4. Detect Lines
ax = plt.subplot(6, 1, 4)
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, y)
plt.xticks([])
plt.ylabel("Temperature")
for idx in range(0, len(detected_peaks)):
if detected_peaks[idx] != 0:
plt.axvline(x=vaxis[idx], ymin=0, ymax= 1, color='r')
# 6. Recalibrate Dictionary
ax = plt.subplot(6, 1, 5)
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, dictionary_recal_fa)
plt.xticks([])
# 6. Recover Signal
ax = plt.subplot(6, 1, 6)
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, total)
plt.xlabel("Frequency [MHz]")
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
def latexify(fig_width=None, fig_height=None, columns=1):
Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 4.89 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 24.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ['\usepackage{gensymb}'],
'axes.labelsize': 8, # fontsize for x and y labels (was 10)
'axes.titlesize': 8,
'text.fontsize': 8, # was 10
'legend.fontsize': 8, # was 10
'xtick.labelsize': 10,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
for i in range(0, len((alpha > 0))):
if((alpha > 0)[i]):
print(dictionary_recal.columns[i])
print(prediction)
for i in range(0, len(dictionary.index)):
print(calculate_probability(alpha, dictionary.index[i], dictionary_recal))
print(dictionary.index[i])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Delta hedging
Step2: Construct efficient frontier based on return in infinitesimal future time $\mathrm{d}t$
Step3: The most risk averse portfolio is exactly the delta hedging portfolio.
| <ASSISTANT_TASK:>
Python Code:
from scipy.stats import norm
import numpy as np
def bs_put(df, fwd, t, vol, k):
log_fwd_moneyness = np.log(fwd/k)
d1 = (log_fwd_moneyness + vol**2 /2.0 * t) / vol / np.sqrt(t)
d2 = d1 - vol * np.sqrt(t)
return df * (-norm.cdf(-d1) * fwd + norm.cdf(-d2) * k), norm.cdf(-d1)
vol = 0.3
s0 = 10.0
k = 10.0
t = 2.0
r = 0.2 # riskless return
put_now, hedge = bs_put(np.exp(-r*t), s0*np.exp(r*t), t, vol, k)
w_stock_delta_hedging = s0*hedge/(s0*hedge+put_now)
w_put_delta_hedging = 1.0 - w_stock_delta_hedging
print("Weight of stock in delta hedging portfolio: "+str(w_stock_delta_hedging))
print("Weight of put option in delta hedging portfolio: " + str(w_put_delta_hedging))
from pymcef import SimpleEFp, RiskMeasure
dt = 0.001
riskless_reward = np.exp(r * dt) - 1.0
reward_portfolio = []
stock_ws_portfolio = []
put_ws_portfolio = []
r_ps = [-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3]# r under physical measure
for r_p in r_ps:
Zs = norm.rvs(size=50000) # number of samples in MC simulation
S = s0 * np.exp((r_p - vol** 2 / 2.0) * dt + vol* np.sqrt(dt) * Zs)
Fwd = S * np.exp(r * (t-dt))
puts, _ = bs_put(np.exp(-r*(t-dt)), Fwd, t-dt, vol, k)
return_s = S/s0 - 1
return_put = puts/put_now - 1
returns = np.vstack((return_s, return_put))
sol = SimpleEFp(training_set = returns, risk_measure = RiskMeasure.AbsoluteSemiDeviation)
prt = sol.frontier_in_sample[-1] # most risk averse portfolio
reward_portfolio.append(prt['reward'])
stock_ws_portfolio.append(prt['weight'][0])
put_ws_portfolio.append(prt['weight'][1])
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(9, 4))
# return of portfolio
ax = fig.add_subplot(1,2,1)
ax.plot(r_ps, reward_portfolio, 'og', label='return of min-risk portfolio')
ax.axhline(y=riskless_reward, color='r', linestyle='-', label='riskless return')
ax.set_ylim((0, riskless_reward*1.5))
ax.set_xlabel('return rate (r) under physical measure')
ax.set_ylabel('return of investment')
ax.legend(loc='best')
# weight of the stock and put
ax = fig.add_subplot(1,2,2)
ax.plot(r_ps, stock_ws_portfolio, 'ob', label='stock weight in min-risk portfolio')
ax.plot(r_ps, put_ws_portfolio, 'oy', label='put weight in min-risk portfolio')
ax.set_xlabel('return rate (r) under physical measure')
ax.axhline(y=w_stock_delta_hedging, color='r', linestyle='-', label='stock weight in delta hedging')
ax.axhline(y=w_put_delta_hedging, color='c', linestyle='-', label='put weight in delta hedging')
ax.set_ylim((0, w_stock_delta_hedging*1.5))
ax.set_ylabel('weight in portfolio')
ax.legend(loc='best')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: quickly add lots of similar procedures
Step2: Error handling
Step3: Advance usage
| <ASSISTANT_TASK:>
Python Code:
import os
import sys
import ruamel.yaml as yaml
from ddf_utils.chef.api import Chef
# at the beginning, initial the chef
chef = Chef()
# adding configs and metadata(to the info section)
(chef.add_config(ddf_dir=os.path.expanduser('~/src/work/Gapminder/datasets'))
.add_metadata(id='test_dataset',
base=['ddf--transpint--corrupton']))
# adding ingredient, parameters are just like in recipe
i = '''
id: cpi-datapoints
dataset: ddf--transpint--corrupton
key: country, year
value: "*"
'''
d = yaml.round_trip_load(i)
chef.add_ingredient(**d)
# add an other ingredient
chef.add_ingredient(id='bp-datapoints', dataset='ddf--bp--energy', key='geo, year', value='*')
# add procedures, parameters are same as in recipe
chef.add_procedure(collection='datapoints', # which part of cooking section the procedure is in
procedure='translate_header',
ingredients=['bp-datapoints'],
result='bp-datapoints-translate',
options={'dictionary': {'geo': 'country'}})
# add an other procedure
chef.add_procedure(collection='datapoints',
procedure='merge',
ingredients=['bp-datapoints-translate', 'cpi-datapoints'],
result='res')
# you can print the recipe in yaml format
a = chef.to_recipe()
# also in graph
chef.to_graph()
# to run the recipe and get the output:
res = chef.run()
res
res[0].get_data().keys()
res[0].get_data()['biofuels_production_kboed'].sample(5)
res[0].get_data()['cpi'].sample(5)
# we can also create a Chef instance from existing recipes
recipe_file = '../tests/recipes_pass/test_flatten.yml'
print(open(os.path.abspath(recipe_file)).read())
chef = Chef.from_recipe(os.path.expandvars(recipe_file),
ddf_dir=os.path.expanduser('/Users/semio/src/work/Gapminder/libs/ddf_utils/tests/datasets'))
chef.to_recipe()
res = chef.run()
res[0].get_data().keys()
res[0].get_data()['agriculture_percentage_f']
from ddf_utils.chef.helpers import gen_sym
chef = Chef()
chef.add_config(ddf_dir=os.path.expanduser('~/src/work/Gapminder/datasets'))
chef.add_ingredient(id='population_by_age_dps',
dataset='ddf--unpop--wpp_population_semio',
key='country_code,year,age',
value=['population'])
collection = 'datapoints'
groups = [list(map(str, range(0, 5))),
list(map(str, range(5, 10))),
list(map(str, range(10, 20)))
]
names = ['population_0_4', 'population_5_9', 'population_10_19']
ingredients_0 = ['population_by_age_dps']
to_merge = []
for g, n in zip(groups, names):
procedure = 'filter_row'
options = {
'filters': {
'population': {
'age': g}}}
result = gen_sym('filter_row', ingredients_0, options)
chef.add_procedure(collection=collection,
procedure=procedure,
ingredients=ingredients_0,
options=options,
result=result
)
ingredients = [result]
procedure = 'groupby'
options = {
'groupby': ['country_code', 'year'],
'aggregate': {'population': 'sum'}
}
result = gen_sym(procedure, ingredients, options)
chef.add_procedure(collection=collection,
procedure=procedure,
ingredients=ingredients,
options=options,
result=result
)
ingredients = [result]
procedure = 'translate_header'
options = {
'dictionary': {
'population': n
}
}
result = gen_sym(procedure, ingredients, options)
chef.add_procedure(collection=collection,
procedure=procedure,
ingredients=ingredients,
options=options,
result=result
)
to_merge.append(result)
chef.add_procedure(collection=collection,
procedure='merge',
ingredients=to_merge,
result='result'
)
chef.to_recipe()
chef.to_graph()
res = chef.run()
res
res[0].get_data().keys()
chef = Chef()
i = '''
id: cpi-datapoints
dataset: ddf--transpint--corrupton
key: country, year
value: "*"
snieot: 'soneot'
'''
d = yaml.round_trip_load(i)
chef.add_ingredient(**d)
i = '''
id: cpi-datapoints_
dataset: ddf--transpint--corrupton
oh_my_interesting_key: country, year # error
value: "*"
'''
d = yaml.round_trip_load(i)
try:
chef.add_ingredient(**d)
except KeyError as e:
print(str(e))
i = '''
procedure: my_new_procedure
ingredients:
- testing
options:
opt: val
result: result
'''
d = yaml.round_trip_load(i)
chef.add_procedure('datapoints', **d)
from ddf_utils.chef.ingredient import Ingredient, ProcedureResult
def multiply_1000(chef, ingredients, result, **options):
ingredients = [chef.dag.get_node(x) for x in ingredients]
ingredient = ingredients[0].evaluate()
new_data = dict()
for k, df in ingredient.get_data().items():
df_ = df.copy()
df_[k] = df_[k] * 1000
new_data[k] = df_
return ProcedureResult(chef, result, ingredient.key, new_data)
chef = Chef()
chef.add_config(ddf_dir=os.path.expanduser('~/src/work/Gapminder/datasets'))
i = '''
id: cpi-datapoints
dataset: ddf--transpint--corrupton
key: country, year
value: "*"
'''
d = yaml.round_trip_load(i)
chef.add_ingredient(**d)
chef.register_procedure(multiply_1000)
chef.add_procedure(collection='datapoints',
procedure='multiply_1000',
result='res',
ingredients=['cpi-datapoints']
)
res = chef.run()
res[0].get_data()['cpi'].head(5)
chef.ingredients
chef.ingredients[0].get_data()['cpi'].head() # the original
chef.to_graph()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a grid and set boundary conditions.
Step2: Here we make the initial grid of elevation of zeros with a very small amount of noise to make a more pleasing network.
Step3: Set parameters related to time.
Step4: Set parameters for incision and intializing all of the process components that do the work. We also initialize tools for quantifying the landscape.
Step5: Initialize rock uplift rate. This will need to be changed later.
Step6: Now for the code loop.
Step7: Plot the topography.
Step8: Plot the slope and area data at each point on the landscape (in log-log space). We will only plot the core nodes because the boundary nodes have slopes that are influenced by the boundary conditions.
Step9: It is slightly easier to interpret slope-area data when we look at a single channel, rather than the entire landscape. Below we plot the profile and slope-area data for the three largest channels on the landscape.
Step10: The chi index is a useful way to quantitatively interpret fluvial channels. Below we plot the chi index in the three largest channels and also a chi map across the entire landscape.
Step11: The channel steepness index is another useful index to quantify fluvial channels. Below we plot the steepness index in the same three largest channels, and also plot steepness index across the grid.
Step12: If you have a grid that you want to export, uncomment and edit the appropriate lines below and run the code block.
Step13: After running every code block once, has the landscape reached steady state? Answer
| <ASSISTANT_TASK:>
Python Code:
# Code block 1
import copy
import numpy as np
from matplotlib import pyplot as plt
from landlab import RasterModelGrid, imshow_grid
from landlab.components import (
ChannelProfiler,
ChiFinder,
FlowAccumulator,
SteepnessFinder,
StreamPowerEroder,
)
from landlab.io import write_esri_ascii
# Code Block 2
number_of_rows = 50 # number of raster cells in vertical direction (y)
number_of_columns = 100 # number of raster cells in horizontal direction (x)
dxy = 200 # side length of a raster model cell, or resolution [m]
# Below is a raster (square cells) grid, with equal width and height
mg1 = RasterModelGrid((number_of_rows, number_of_columns), dxy)
# Set boundary conditions - only the south side of the grid is open.
# Boolean parameters are sent to function in order of
# east, north, west, south.
mg1.set_closed_boundaries_at_grid_edges(True, True, True, False)
# Code Block 3
np.random.seed(35) # seed set so our figures are reproducible
mg1_noise = (
np.random.rand(mg1.number_of_nodes) / 1000.0
) # intial noise on elevation gri
# set up the elevation on the grid
z1 = mg1.add_zeros("topographic__elevation", at="node")
z1 += mg1_noise
# Code Block 4
tmax = 5e5 # time for the model to run [yr] (Original value was 5E5 yr)
dt = 1000 # time step [yr] (Original value was 100 yr)
total_time = 0 # amount of time the landscape has evolved [yr]
# total_time will increase as you keep running the code.
t = np.arange(0, tmax, dt) # each of the time steps that the code will run
# Code Block 5
# Original K_sp value is 1e-5
K_sp = 1.0e-5 # units vary depending on m_sp and n_sp
m_sp = 0.5 # exponent on drainage area in stream power equation
n_sp = 1.0 # exponent on slope in stream power equation
frr = FlowAccumulator(mg1, flow_director="FlowDirectorD8") # intializing flow routing
spr = StreamPowerEroder(
mg1, K_sp=K_sp, m_sp=m_sp, n_sp=n_sp, threshold_sp=0.0
) # initializing stream power incision
theta = m_sp / n_sp
# initialize the component that will calculate channel steepness
sf = SteepnessFinder(mg1, reference_concavity=theta, min_drainage_area=1000.0)
# initialize the component that will calculate the chi index
cf = ChiFinder(
mg1, min_drainage_area=1000.0, reference_concavity=theta, use_true_dx=True
)
# Code Block 6
# uplift_rate [m/yr] (Original value is 0.0001 m/yr)
uplift_rate = np.ones(mg1.number_of_nodes) * 0.0001
# Code Block 7
for ti in t:
z1[mg1.core_nodes] += uplift_rate[mg1.core_nodes] * dt # uplift the landscape
frr.run_one_step() # route flow
spr.run_one_step(dt) # fluvial incision
total_time += dt # update time keeper
print(total_time)
# Code Block 8
imshow_grid(
mg1, "topographic__elevation", grid_units=("m", "m"), var_name="Elevation (m)"
)
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
max_elev = np.max(z1)
print("Maximum elevation is ", np.max(z1))
# Code Block 9
plt.loglog(
mg1.at_node["drainage_area"][mg1.core_nodes],
mg1.at_node["topographic__steepest_slope"][mg1.core_nodes],
"b.",
)
plt.ylabel("Topographic slope")
plt.xlabel("Drainage area (m^2)")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
# Code Block 10
# profile the largest channels, set initially to find the mainstem channel in the three biggest watersheds
# you can change the number of watersheds, or choose to plot all the channel segments in the watershed that
# have drainage area below the threshold (here we have set the threshold to the area of a grid cell).
prf = ChannelProfiler(
mg1,
number_of_watersheds=3,
main_channel_only=True,
minimum_channel_threshold=dxy ** 2,
)
prf.run_one_step()
# plot the elevation as a function of distance upstream
plt.figure(1)
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
prf.plot_profiles(
xlabel="distance upstream (m)", ylabel="elevation (m)", title=title_text
)
# plot the location of the channels in map view
plt.figure(2)
prf.plot_profiles_in_map_view()
# slope-area data in just the profiled channels
plt.figure(3)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.loglog(
mg1.at_node["drainage_area"][profile_ids],
mg1.at_node["topographic__steepest_slope"][profile_ids],
".",
color=color,
label=label,
)
plt.legend(loc="lower left")
plt.xlabel("drainage area (m^2)")
plt.ylabel("channel slope [m/m]")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
# Code Block 11
# calculate the chi index
cf.calculate_chi()
# chi-elevation plots in the profiled channels
plt.figure(4)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.plot(
mg1.at_node["channel__chi_index"][profile_ids],
mg1.at_node["topographic__elevation"][profile_ids],
color=color,
label=label,
)
plt.xlabel("chi index (m)")
plt.ylabel("elevation (m)")
plt.legend(loc="lower right")
title_text = (
f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
)
plt.title(title_text)
# chi map
plt.figure(5)
imshow_grid(
mg1,
"channel__chi_index",
grid_units=("m", "m"),
var_name="Chi index (m)",
cmap="jet",
)
title_text = (
f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
)
plt.title(title_text)
# Code Block 12
# calculate channel steepness
sf.calculate_steepnesses()
# plots of steepnes vs. distance upstream in the profiled channels
plt.figure(6)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
distance_upstream = segment["distances"]
color = segment["color"]
plt.plot(
distance_upstream,
mg1.at_node["channel__steepness_index"][profile_ids],
"x",
color=color,
label=label,
)
plt.xlabel("distance upstream (m)")
plt.ylabel("steepness index")
plt.legend(loc="upper left")
plt.title(f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}")
# channel steepness map
plt.figure(7)
imshow_grid(
mg1,
"channel__steepness_index",
grid_units=("m", "m"),
var_name="Steepness index ",
cmap="jet",
)
title_text = (
"$K_{sp}$="
+ str(K_sp)
+ "; $time$="
+ str(total_time)
+ "yr; $dx$="
+ str(dxy)
+ "m"
+ "; concavity="
+ str(theta)
)
plt.title(f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}")
# Code Block 13
## Below has the name of the file that data will be written to.
## You need to change the name of the file every time that you want
## to write data, otherwise you will get an error.
## This will write to the directory that you are running the code in.
# write_file_name = 'data_file.txt'
## Below is writing elevation data in the ESRI ascii format so that it can
## easily be read into Arc GIS or back into Landlab.
# write_esri_ascii(write_file_name, mg1, 'topographic__elevation')
# Code Block 14
number_of_rows = 50 # number of raster cells in vertical direction (y)
number_of_columns = 100 # number of raster cells in horizontal direction (x)
dxy2 = 200 # side length of a raster model cell, or resolution [m]
# Below is a raster (square cells) grid, with equal width and height
mg2 = RasterModelGrid((number_of_rows, number_of_columns), dxy2)
# Set boundary conditions - only the south side of the grid is open.
# Boolean parameters are sent to function in order of
# east, north, west, south.
mg2.set_closed_boundaries_at_grid_edges(True, True, True, False)
z2 = copy.copy(z1) # initialize the elevations with the steady state
# topography produced for question 1
z2 = mg2.add_field("topographic__elevation", z2, at="node")
# K_sp value for base landscape is 1e-5
K_sp2 = 1e-5 # units vary depending on m_sp and n_sp
m_sp2 = 0.5 # exponent on drainage area in stream power equation
n_sp2 = 1.0 # exponent on slope in stream power equation
frr2 = FlowAccumulator(mg2, flow_director="FlowDirectorD8") # intializing flow routing
spr2 = StreamPowerEroder(
mg2, K_sp=K_sp2, m_sp=m_sp2, n_sp=n_sp2, threshold_sp=0.0
) # initializing stream power incision
theta2 = m_sp2 / n_sp2
# initialize the component that will calculate channel steepness
sf2 = SteepnessFinder(mg2, reference_concavity=theta2, min_drainage_area=1000.0)
# initialize the component that will calculate the chi index
cf2 = ChiFinder(
mg2, min_drainage_area=1000.0, reference_concavity=theta2, use_true_dx=True
)
# Code Block 15
tmax = 1e5 # time for the model to run [yr] (Original value was 5E5 yr)
dt = 500 # time step [yr] (Original value was 500 yr)
total_time = 0 # amount of time the landscape has evolved [yr]
# total_time will increase as you keep running the code.
t = np.arange(0, tmax, dt) # each of the time steps that the code will run
# Code Block 16
# uplift_rate [m/yr] (value was 0.0001 m/yr for base landscape)
uplift_rate = np.ones(mg2.number_of_nodes) * 0.0001
## If you want to add a one-time event that uplifts only part of the
## landscape, uncomment the 3 lines below
# fault_location = 4000 # [m]
# uplift_amount = 10 # [m]
# z2[np.nonzero(mg2.node_y>fault_location)] += uplift_amount
## IMPORTANT! To use the below fault generator, comment the one-time
## uplift event above if it isn't already commented out.
## Code below creates a fault horizontally across the grid.
## Uplift rates are greater where y values > fault location.
## To use, uncomment the 5 code lines below and edit to your values
# fault_location = 4000 # [m]
# low_uplift_rate = 0.0001 # [m/yr]
# high_uplift_rate = 0.0004 # [m/yr]
# uplift_rate[np.nonzero(mg2.node_y<fault_location)] = low_uplift_rate
# uplift_rate[np.nonzero(mg2.node_y>fault_location)] = high_uplift_rate
## IMPORTANT! To use below rock uplift gradient, comment the two
## uplift options above if they aren't already commented out.
## If you want a linear gradient in uplift rate
## (increasing uplift into the range),
## uncomment the 4 code lines below and edit to your values.
# low_uplift_rate = 0.0001 # [m/yr]
# high_uplift_rate = 0.0004 # [m/yr]
## below is uplift gradient per node row index, NOT row value in meters
# uplift_rate_gradient = (high_uplift_rate - low_uplift_rate)/(number_of_rows-3)
# uplift_rate = low_uplift_rate + ((mg2.node_y / dxy)-1) * uplift_rate_gradient
# Code Block 17
for ti in t:
z2[mg2.core_nodes] += uplift_rate[mg2.core_nodes] * dt # uplift the landscape
frr2.run_one_step() # route flow
spr2.run_one_step(dt) # fluvial incision
total_time += dt # update time keeper
print(total_time)
# Code Block 18
# Plot topography
plt.figure(8)
imshow_grid(
mg2, "topographic__elevation", grid_units=("m", "m"), var_name="Elevation (m)"
)
plt.title(f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m")
max_elev = np.max(z2)
print("Maximum elevation is ", np.max(z2))
# Code Block 19
# Plot Channel Profiles and slope-area data along the channels
prf2 = ChannelProfiler(
mg2,
number_of_watersheds=3,
main_channel_only=True,
minimum_channel_threshold=dxy ** 2,
)
prf2.run_one_step()
# plot the elevation as a function of distance upstream
plt.figure(9)
title_text = f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy} m"
prf2.plot_profiles(
xlabel="distance upstream (m)", ylabel="elevation (m)", title=title_text
)
# plot the location of the channels in map view
plt.figure(10)
prf2.plot_profiles_in_map_view()
# slope-area data in just the profiled channels
plt.figure(11)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.loglog(
mg2.at_node["drainage_area"][profile_ids],
mg2.at_node["topographic__steepest_slope"][profile_ids],
".",
color=color,
label=label,
)
plt.legend(loc="lower left")
plt.xlabel("drainage area (m^2)")
plt.ylabel("channel slope [m/m]")
title_text = f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m"
plt.title(title_text)
# Code Block 20
# Chi Plots
# calculate the chi index
cf2.calculate_chi()
# chi-elevation plots in the profiled channels
plt.figure(12)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.plot(
mg2.at_node["channel__chi_index"][profile_ids],
mg2.at_node["topographic__elevation"][profile_ids],
color=color,
label=label,
)
plt.xlabel("chi index (m)")
plt.ylabel("elevation (m)")
plt.legend(loc="lower right")
title_text = (
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
plt.title(title_text)
# chi map
plt.figure(13)
imshow_grid(
mg2,
"channel__chi_index",
grid_units=("m", "m"),
var_name="Chi index (m)",
cmap="jet",
)
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
# Code Block 21
# Plot channel steepness along profiles and across the landscape
# calculate channel steepness
sf2.calculate_steepnesses()
# plots of steepnes vs. distance upstream in the profiled channels
plt.figure(14)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = "_nolegend_"
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
distance_upstream = segment["distances"]
color = segment["color"]
plt.plot(
distance_upstream,
mg2.at_node["channel__steepness_index"][profile_ids],
"x",
color=color,
label=label,
)
plt.xlabel("distance upstream (m)")
plt.ylabel("steepness index")
plt.legend(loc="upper left")
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
# channel steepness map
plt.figure(15)
imshow_grid(
mg2,
"channel__steepness_index",
grid_units=("m", "m"),
var_name="Steepness index ",
cmap="jet",
)
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Working with ndarray
Step2: Examining ndrray
Step3: Why to use numpy?
Step4: Basic Operation
Step5: Most Common Functions
Step6: Multi Dimentional Array
Step7: zeros(shape, dtype=float, order='C') and ones(shape, dtype=float, order='C')
Step8: np.linspace(start, stop, num=50, endpoint=True, retstep=False)
Step9: random_sample(size=None)
Step10: Statistical Analysis
Step11: np.max(a, axis=None, out=None, keepdims=False)
Step12: np.min(a, axis=None, out=None, keepdims=False)
Step13: np.mean(a, axis=None, dtype=None, out=None, keepdims=False)
Step14: np.median(a, axis=None, out=None, overwrite_input=False)
Step15: np.std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Step16: np.sum(a, axis=None, dtype=None, out=None, keepdims=False)
Step17: Reshaping
Step18: np.ravel(a, order='C')
Step19: Slicing
Step20: Slicing a range
Step21: Stepping
Step22: Matrix Operations
Step23: Addition
Step24: Subtraction
Step25: Multiplication (Element by Element)
Step26: Multiplication (Matrix Multiplication)
Step27: Division
Step28: Square
Step29: Power
Step30: Transpose
Step31: Inverse
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
np.arange(10)
np.arange(1,10)
np.arange(1,10, 0.5)
np.arange(1,10, 3)
np.arange(1,10, 2, dtype=np.float64)
ds = np.arange(1,10,2)
ds.ndim
ds.shape
ds.size
ds.dtype
ds.itemsize
x=ds.data
list(x)
ds
# Memory Usage
ds.size * ds.itemsize
%%capture timeit_results
# Regular Python
%timeit python_list_1 = range(1,1000)
python_list_1 = range(1,1000)
python_list_2 = range(1,1000)
#Numpy
%timeit numpy_list_1 = np.arange(1,1000)
numpy_list_1 = np.arange(1,1000)
numpy_list_2 = np.arange(1,1000)
print timeit_results
# Function to calculate time in seconds
def return_time(timeit_result):
temp_time = float(timeit_result.split(" ")[5])
temp_unit = timeit_result.split(" ")[6]
if temp_unit == "ms":
temp_time = temp_time * 1e-3
elif temp_unit == "us":
temp_time = temp_time * 1e-6
elif temp_unit == "ns":
temp_time = temp_time * 1e-9
return temp_time
python_time = return_time(timeit_results.stdout.split("\n")[0])
numpy_time = return_time(timeit_results.stdout.split("\n")[1])
print "Python/NumPy: %.1f" % (python_time/numpy_time)
%%capture timeit_python
%%timeit
# Regular Python
[(x + y) for x, y in zip(python_list_1, python_list_2)]
[(x - y) for x, y in zip(python_list_1, python_list_2)]
[(x * y) for x, y in zip(python_list_1, python_list_2)]
[(x / y) for x, y in zip(python_list_1, python_list_2)];
print timeit_python
%%capture timeit_numpy
%%timeit
#Numpy
numpy_list_1 + numpy_list_2
numpy_list_1 - numpy_list_2
numpy_list_1 * numpy_list_2
numpy_list_1 / numpy_list_2;
print timeit_numpy
python_time = return_time(timeit_python.stdout)
numpy_time = return_time(timeit_numpy.stdout)
print "Python/NumPy: %.1f" % (python_time/numpy_time)
np.array([1,2,3,4,5])
np.array([[1,2],[3,4],[5,6]])
np.zeros((3,4))
np.zeros((3,4), dtype=np.int64)
np.ones((3,4))
np.linspace(1,5)
np.linspace(0,2,num=4)
np.linspace(0,2,num=4,endpoint=False)
np.random.random((2,3))
np.random.random_sample((2,3))
data_set = np.random.random((2,3))
data_set
np.max(data_set)
np.max(data_set, axis=0)
np.max(data_set, axis=1)
np.min(data_set)
np.mean(data_set)
np.median(data_set)
np.std(data_set)
np.sum(data_set)
np.reshape(data_set, (3,2))
np.reshape(data_set, (6,1))
np.reshape(data_set, (6))
np.ravel(data_set)
data_set = np.random.random((5,10))
data_set
data_set[1]
data_set[1][0]
data_set[1,0]
data_set[2:4]
data_set[2:4,0]
data_set[2:4,0:2]
data_set[:,0]
data_set[2:4:1]
data_set[::]
data_set[::2]
data_set[2:4]
data_set[2:4,::2]
import numpy as np
# Matrix A
A = np.array([[1,2],[3,4]])
# Matrix B
B = np.array([[3,4],[5,6]])
A+B
A-B
A*B
A.dot(B)
A/B
np.square(A)
np.power(A,3) #cube of matrix
A.transpose()
np.linalg.inv(A)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 그래디언트 및 자동 미분 소개
Step2: 그래디언트 계산하기
Step3: 일부 연산을 기록한 후에는 GradientTape.gradient(target, sources)를 사용하여 일부 소스(종종 모델 변수)에 상대적인 일부 대상(종종 손실)의 그래디언트를 계산합니다.
Step4: 위의 예제는 스칼라를 사용하지만, tf.GradientTape는 모든 텐서에서 쉽게 작동합니다.
Step5: 두 변수 모두에 대해 loss의 그래디언트를 가져오려면, 두 변수를 gradient 메서드에 소스로 전달할 수 있습니다. 테이프는 소스가 전달되는 방식에 대해 융통성이 있으며 목록 또는 사전의 중첩된 조합을 허용하고 같은 방식으로 구조화된 그래디언트를 반환합니다(tf.nest 참조).
Step6: 각 소스에 대한 그래디언트는 소스의 형상을 갖습니다.
Step7: 다음은 그래디언트 계산입니다. 이번에는 변수의 사전을 전달합니다.
Step8: 모델에 대한 그래디언트
Step9: <a id="watches"></a>
Step10: GradientTape.watched_variables 메서드를 사용하여 테이프에서 감시 중인 변수를 나열할 수 있습니다.
Step11: tf.GradientTape는 사용자가 감시 대상 또는 감시 예외 대상을 제어할 수 있는 후크를 제공합니다.
Step12: 반대로, 모든 tf.Variables을 감시하는 기본 동작을 비활성화하려면, 그래디언트 테이프를 만들 때 watch_accessed_variables=False를 설정합니다. 이 계산은 두 가지 변수를 사용하지만, 변수 중 하나의 그래디언트만 연결합니다.
Step13: x0에서 GradientTape.watch가 호출되지 않았으므로 이에 대한 그래디언트가 계산되지 않습니다.
Step14: 중간 결과
Step15: 기본적으로, GradientTape.gradient 메서드가 호출되면 GradientTape가 보유한 리소스가 해제됩니다. 동일한 계산에 대해 여러 그래디언트를 계산하려면 persistent=True 그래디언트 테이프를 만듭니다. 이렇게 하면 테이프 객체가 가비지 수집될 때 리소스가 해제되면 gradient 메서드를 여러 번 호출할 수 있습니다. 예를 들면 다음과 같습니다.
Step16: 성능에 대한 참고 사항
Step17: 따라서, 여러 대상의 그래디언트를 요청하면 각 소스의 결과는 다음과 같습니다.
Step18: 마찬가지로, 대상이 스칼라가 아닌 경우 합계의 그래디언트가 계산됩니다.
Step19: 이렇게 하면, 손실 컬렉션 합계의 그래디언트 또는 요소별 손실 계산 합계의 그래디언트를 간단하게 구할 수 있습니다.
Step20: 흐름 제어하기
Step21: 제어문 자체는 미분할 수 없으므로 그래디언트 기반 최적화 프로그램에는 보이지 않습니다.
Step22: None의 그래디언트 구하기
Step23: 여기서 z는 명확하게 x에 연결되어 있지 않지만, 그래디언트의 연결을 끊을 수 있는 몇 가지 덜 명확한 방법이 있습니다.
Step24: 2. TensorFlow 외부에서 계산했습니다.
Step25: 3. 정수 또는 문자열을 통해 그래디언트를 구했습니다.
Step26: TensorFlow는 유형 간에 자동으로 전송되지 않으므로 실제로 그래디언트가 누락되는 대신 유형 오류가 발생합니다.
Step27: 마찬가지로, tf.data.Dataset 반복기(iterator)와 tf.queue는 상태 저장이며 이들을 통과하는 텐서의 모든 그래디언트를 중지합니다.
Step28: 이 op를 통해 미분해야 하는 경우, 그래디언트를 구현하고 등록하거나(tf.RegisterGradient 사용) 다른 ops를 사용하여 함수를 다시 구현해야 합니다.
| <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
x = tf.Variable(3.0)
with tf.GradientTape() as tape:
y = x**2
# dy = 2x * dx
dy_dx = tape.gradient(y, x)
dy_dx.numpy()
w = tf.Variable(tf.random.normal((3, 2)), name='w')
b = tf.Variable(tf.zeros(2, dtype=tf.float32), name='b')
x = [[1., 2., 3.]]
with tf.GradientTape(persistent=True) as tape:
y = x @ w + b
loss = tf.reduce_mean(y**2)
[dl_dw, dl_db] = tape.gradient(loss, [w, b])
print(w.shape)
print(dl_dw.shape)
my_vars = {
'w': w,
'b': b
}
grad = tape.gradient(loss, my_vars)
grad['b']
layer = tf.keras.layers.Dense(2, activation='relu')
x = tf.constant([[1., 2., 3.]])
with tf.GradientTape() as tape:
# Forward pass
y = layer(x)
loss = tf.reduce_mean(y**2)
# Calculate gradients with respect to every trainable variable
grad = tape.gradient(loss, layer.trainable_variables)
for var, g in zip(layer.trainable_variables, grad):
print(f'{var.name}, shape: {g.shape}')
# A trainable variable
x0 = tf.Variable(3.0, name='x0')
# Not trainable
x1 = tf.Variable(3.0, name='x1', trainable=False)
# Not a Variable: A variable + tensor returns a tensor.
x2 = tf.Variable(2.0, name='x2') + 1.0
# Not a variable
x3 = tf.constant(3.0, name='x3')
with tf.GradientTape() as tape:
y = (x0**2) + (x1**2) + (x2**2)
grad = tape.gradient(y, [x0, x1, x2, x3])
for g in grad:
print(g)
[var.name for var in tape.watched_variables()]
x = tf.constant(3.0)
with tf.GradientTape() as tape:
tape.watch(x)
y = x**2
# dy = 2x * dx
dy_dx = tape.gradient(y, x)
print(dy_dx.numpy())
x0 = tf.Variable(0.0)
x1 = tf.Variable(10.0)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x1)
y0 = tf.math.sin(x0)
y1 = tf.nn.softplus(x1)
y = y0 + y1
ys = tf.reduce_sum(y)
# dys/dx1 = exp(x1) / (1 + exp(x1)) = sigmoid(x1)
grad = tape.gradient(ys, {'x0': x0, 'x1': x1})
print('dy/dx0:', grad['x0'])
print('dy/dx1:', grad['x1'].numpy())
x = tf.constant(3.0)
with tf.GradientTape() as tape:
tape.watch(x)
y = x * x
z = y * y
# Use the tape to compute the gradient of z with respect to the
# intermediate value y.
# dz_dy = 2 * y and y = x ** 2 = 9
print(tape.gradient(z, y).numpy())
x = tf.constant([1, 3.0])
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = x * x
z = y * y
print(tape.gradient(z, x).numpy()) # 108.0 (4 * x**3 at x = 3)
print(tape.gradient(y, x).numpy()) # 6.0 (2 * x)
del tape # Drop the reference to the tape
x = tf.Variable(2.0)
with tf.GradientTape(persistent=True) as tape:
y0 = x**2
y1 = 1 / x
print(tape.gradient(y0, x).numpy())
print(tape.gradient(y1, x).numpy())
x = tf.Variable(2.0)
with tf.GradientTape() as tape:
y0 = x**2
y1 = 1 / x
print(tape.gradient({'y0': y0, 'y1': y1}, x).numpy())
x = tf.Variable(2.)
with tf.GradientTape() as tape:
y = x * [3., 4.]
print(tape.gradient(y, x).numpy())
x = tf.linspace(-10.0, 10.0, 200+1)
with tf.GradientTape() as tape:
tape.watch(x)
y = tf.nn.sigmoid(x)
dy_dx = tape.gradient(y, x)
plt.plot(x, y, label='y')
plt.plot(x, dy_dx, label='dy/dx')
plt.legend()
_ = plt.xlabel('x')
x = tf.constant(1.0)
v0 = tf.Variable(2.0)
v1 = tf.Variable(2.0)
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
if x > 0.0:
result = v0
else:
result = v1**2
dv0, dv1 = tape.gradient(result, [v0, v1])
print(dv0)
print(dv1)
dx = tape.gradient(result, x)
print(dx)
x = tf.Variable(2.)
y = tf.Variable(3.)
with tf.GradientTape() as tape:
z = y * y
print(tape.gradient(z, x))
x = tf.Variable(2.0)
for epoch in range(2):
with tf.GradientTape() as tape:
y = x+1
print(type(x).__name__, ":", tape.gradient(y, x))
x = x + 1 # This should be `x.assign_add(1)`
x = tf.Variable([[1.0, 2.0],
[3.0, 4.0]], dtype=tf.float32)
with tf.GradientTape() as tape:
x2 = x**2
# This step is calculated with NumPy
y = np.mean(x2, axis=0)
# Like most ops, reduce_mean will cast the NumPy array to a constant tensor
# using `tf.convert_to_tensor`.
y = tf.reduce_mean(y, axis=0)
print(tape.gradient(y, x))
x = tf.constant(10)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
print(g.gradient(y, x))
x0 = tf.Variable(3.0)
x1 = tf.Variable(0.0)
with tf.GradientTape() as tape:
# Update x1 = x1 + x0.
x1.assign_add(x0)
# The tape starts recording from x1.
y = x1**2 # y = (x1 + x0)**2
# This doesn't work.
print(tape.gradient(y, x0)) #dy/dx0 = 2*(x1 + x0)
image = tf.Variable([[[0.5, 0.0, 0.0]]])
delta = tf.Variable(0.1)
with tf.GradientTape() as tape:
new_image = tf.image.adjust_contrast(image, delta)
try:
print(tape.gradient(new_image, [image, delta]))
assert False # This should not happen.
except LookupError as e:
print(f'{type(e).__name__}: {e}')
x = tf.Variable([2., 2.])
y = tf.Variable(3.)
with tf.GradientTape() as tape:
z = y**2
print(tape.gradient(z, x, unconnected_gradients=tf.UnconnectedGradients.ZERO))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we must generate a dictionary which maps vocabulary into a number
| <ASSISTANT_TASK:>
Python Code:
from itertools import chain
from collections import OrderedDict
reviews_merged = OrderedDict()
# Flatten the reviews, so each review is just a single list of words.
n_reviews = -1
for bus_id in set(review.business_id.values[:n_reviews]):
# This horrible line first collapses each review of a corresponding business into a list
# of lists, and then collapses the list of sentences to a long list of words
reviews_merged[bus_id] = list(chain.from_iterable(
chain.from_iterable( review.cleaned_tokenized[review.business_id==bus_id] )))
import time
from itertools import chain
print 'Generating vector dictionary....'
# Review level LDA
# review_flatten = list(chain.from_iterable(review.cleaned_tokenized.iloc[:]))
# id2word_wiki = corpora.Dictionary(review_flatten)
start = time.time()
# Business level LDA (all reviews for a business merged)
id2word_wiki = corpora.Dictionary(reviews_merged.values())
print 'Dictonary generated in %1.2f seconds'%(time.time()-start)
# Convert corpus to bag of words for use with gensim...
# See https://radimrehurek.com/gensim/tut1.html#from-strings-to-vectors
#corpus = map(lambda doc: id2word_wiki.doc2bow(doc), review_flatten)
corpus = map(lambda doc: id2word_wiki.doc2bow(doc), reviews_merged.values())
corpora.MmCorpus.serialize('../output/bar_corpus.mm', corpus)
# Can load the corpus with
# from gensim import corpora
# corpus = corpora.MmCorpus('../output/bar_corpus.mm')
import gensim
print 'Fitting LDA Model'
start = time.time()
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10,
id2word=id2word_wiki, passes=5,)
print 'LDA fit in %1.2f seconds'%(time.time()-start)
for topic in ldamodel.print_topics(num_topics=10, num_words=8):
print topic
from sklearn.decomposition import LatentDirichletAllocation, nmf
lda = LatentDirichletAllocation(n_topics=10, evaluate_every=1000, n_jobs=12, verbose=True)
lda.fit(corpus[:2000])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step3: Include an exploratory visualization of the dataset
Step7: Histogram of the data shows that the trainign data is unevenly distributed. This might affect the training of CNN model.
Step 2
Step13: Model Architecture
Step14: Train, Validate and Test the Model
Step15: Step 3
Step16: Predict the Sign Type for Each Image/Analyze Performance/ Output Soft Max
Step17: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step18: Project Writeup
| <ASSISTANT_TASK:>
Python Code:
# Import all the relevant modules.
import cv2
import csv
import matplotlib.image as mpimg
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from random import randint
import seaborn as sns
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow.contrib.layers import flatten
#Load the data
training_file = "train.p"
validation_file= "valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as file:
train = pickle.load(file)
with open(validation_file, mode='rb') as file:
valid = pickle.load(file)
with open(testing_file, mode='rb') as file:
test = pickle.load(file)
X_train_ori, y_train_ori = train['features'], train['labels']
#Create a array large enough to hold the new agumented images
#which will be created in the pre processing section
X_train = np.empty((3*X_train_ori.shape[0],X_train_ori.shape[1],X_train_ori.shape[2],X_train_ori.shape[3]))
y_train = np.empty((3*y_train_ori.shape[0]))
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
#Number of original training examples
n_train_ori = X_train_ori.shape[0]
print("Number of original training examples =", n_train_ori)
# Number of training examples after image agumentation
n_train = X_train.shape[0]
print("Number of training examples =", n_train)
# Number of validation examples
n_validation = X_valid.shape[0]
print("Number of validation examples =", n_validation)
# Number of testing examples.
n_test = X_test.shape[0]
print("Number of testing examples =", n_test)
# Shape of an traffic sign image
image_shape = X_train.shape[1:]
print("Image data shape =", image_shape)
# Unique classes/labels there are in the dataset.
n_classes = len(set(y_train_ori))
print("Number of classes =", n_classes)
### Data exploration visualization
# Visualizations will be shown in the notebook.
%matplotlib inline
def plotTrafficSign(n_rows, n_cols):
This function displays random images from the trainign data set.
fig, axes = plt.subplots(nrows = n_rows, ncols = n_cols, figsize=(60,30))
for row in axes:
for col in row:
index = randint(0,n_train_ori)
col.imshow(X_train_ori[index,:,:,:])
col.set_title(y_train_ori[index])
#Plot traffic signs for visualization
plotTrafficSign(10, 5)
#Plot distribution of data
sns.distplot(y_train_ori, kde=False, bins=n_classes)
sns.distplot(y_valid, kde=False, bins=n_classes)
sns.distplot(y_test, kde=False, bins=n_classes)
### Preprocess the data.
def dataGeneration():
This function auguments the training data by creating new data (via image rotation)
global X_train
global y_train
global y_train_ori
global X_train_ori
global n_train_ori
#Create new data by fliping the images in the vertical and horizontal directions
X_train[0:n_train_ori,:,:,:] = X_train_ori[:,:,:,:]
y_train[0:n_train_ori] = y_train_ori[:]
width = X_train.shape[1]
height = X_train.shape[2]
center = (width/ 2, height/ 2)
for index in range(n_train_ori):
#Rotate by 10 degrees
rotation = cv2.getRotationMatrix2D(center, 10, 1.0)
X_train[n_train_ori+index,:,:,:] = cv2.warpAffine(X_train_ori[index,:,:,:], rotation, (width, height))
y_train[n_train_ori+index] = y_train_ori[index]
#Flip the image horizontally
rotation = cv2.getRotationMatrix2D(center, -10, 1.0)
X_train[2*n_train_ori+index,:,:,:] = cv2.warpAffine(X_train_ori[index,:,:,:], rotation, (width, height))
y_train[2*n_train_ori+index] = y_train_ori[index]
def normalize(X_input):
This function normalizes the data
#Min-Max normalization of data
range_min = 0.1
range_max = 0.9
data_min = 0
data_max = 255
X_input = range_min + (((X_input - data_min)*(range_max - range_min) )/(data_max - data_min))
return X_input
def randomize(X_input, y_input):
This function randomizes the data.
#Randomize the data
X_input, y_input = shuffle(X_input, y_input)
return X_input, y_input
dataGeneration()
X_train = normalize(X_train)
X_valid = normalize(X_valid)
X_test = normalize(X_test)
X_train, y_train = randomize(X_train, y_train)
def LeNet(x, keep_prob=1.0):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
global n_classes
# Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
#Dropout
conv1 = tf.nn.dropout(conv1, keep_prob)
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
#Dropout
conv2 = tf.nn.dropout(conv2, keep_prob)
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 300.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 300), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(300))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
#Dropout
fc1 = tf.nn.dropout(fc1, keep_prob)
# Layer 4: Fully Connected. Input = 300. Output = 200.
fc2_W = tf.Variable(tf.truncated_normal(shape=(300, 200), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(200))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
#Dropout
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 5: Fully Connected. Input = 200. Output = n_classes.
fc3_W = tf.Variable(tf.truncated_normal(shape=(200, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
class CModel:
def __init__(self, input_conv, target, learning_rate = 0.001,
epochs = 10, batch_size = 128, keep_prob=1.0, debug_logging = False):
This is the ctor for the class CModel.
It initializes various hyper parameters required for training.
self.learning_rate = learning_rate
self.epoch = epochs
self.batch_size = batch_size
self.debug_logging = debug_logging
self.input_conv = input_conv
self.target = target
self.logits = None
self.one_hot_out_class = None
self.keep_prob = keep_prob
def __loss(self):
This function calculates the loss.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.one_hot_out_class, logits=self.logits)
loss_operation = tf.reduce_mean(cross_entropy)
return loss_operation
def __optimize(self, loss_operation):
This function runs the optimizer to train the weights.
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
minimize_loss = optimizer.minimize(loss_operation)
return minimize_loss
def trainLeNet(self):
This function trains the LeNet network.
print("n_classes ",n_classes)
self.logits = LeNet(self.input_conv,self.keep_prob)
self.one_hot_out_class = tf.one_hot(self.target, n_classes)
loss_operation = self.__loss()
minimize_loss = self.__optimize(loss_operation)
return minimize_loss
def accuracy(self):
This function calculates the accuracy of the model.
prediction, _ = self.prediction()
correct_prediction = tf.equal(prediction, tf.argmax(self.one_hot_out_class, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy_operation
def prediction(self):
return tf.argmax(self.logits, 1), tf.nn.top_k(tf.nn.softmax(self.logits), k=5)
#Model training
class CEvaluate:
def __init__(self, learning_rate=0.001, epoch=10, batch_size=128):
self.input_conv = tf.placeholder(tf.float32, (None, 32, 32, 3))
self.target = tf.placeholder(tf.int32, (None))
self.keep_prob = tf.placeholder(tf.float32)
self.model = CModel(self.input_conv, self.target, learning_rate, epoch, batch_size, self.keep_prob)
self.train = self.model.trainLeNet()
self.accuracy_operation = self.model.accuracy()
self.epoch = epoch
self.batch_size = batch_size
self.saver = tf.train.Saver()
self.prediction = self.model.prediction()
def __evaluate(self, X_data, y_data, keep_prob=1):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, self.batch_size):
batch_x, batch_y = X_data[offset:offset+self.batch_size], y_data[offset:offset+self.batch_size]
accuracy = sess.run(self.accuracy_operation, feed_dict={self.input_conv: batch_x, \
self.target: batch_y, self.keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def test(self):
global X_test
global y_test
with tf.Session() as sess:
self.saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = self.__evaluate(X_test, y_test)
print("Test Accuracy = ", test_accuracy)
def predictions(self, test_images):
with tf.Session() as sess:
self.saver.restore(sess, './lenet')
predict, top_k_softmax = sess.run(self.prediction, feed_dict={self.input_conv: test_images, self.keep_prob: 1.0})
return predict, top_k_softmax
def run(self):
global X_train
global y_train
global X_valid
global y_valid
validation_accuracy = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
for i in range(self.epoch):
print("Epoch == ", i)
for offset in range(0, num_examples, self.batch_size):
end = offset + self.batch_size
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(self.train, feed_dict={self.input_conv: batch_x, self.target: batch_y, self.keep_prob: 0.9})
validation_accuracy.append(self.__evaluate(X_valid, y_valid))
print("Validation Accuracy == ", validation_accuracy[i])
self.saver.save(sess, './lenet')
plt.plot(validation_accuracy)
plt.xlabel("Epoch")
plt.ylabel("Validation Accuracy")
plt.title("Tracking of validation accuracy")
plt.show()
learning_rate = 0.001
epoch = 30
batch_size = 128
eval_model = CEvaluate(learning_rate, epoch, batch_size)
eval_model.run()
eval_model.test()
### Load the images and plot them.
import os
test_images = os.listdir('test_images')
num_test_images = 5
X_new_test = np.empty((num_test_images, 32, 32, 3))
y_new_test = np.empty(num_test_images)
dic = {"60.jpg":3, "70.jpg":4, "roadwork.jpg":25, "stop.jpg":14, "yield.jpg":13}
for index, image_name in enumerate(test_images):
image_path = os.path.join('test_images', image_name)
original_image = mpimg.imread(image_path)
X_new_test[index,:,:,:] = cv2.resize(original_image,(32,32),interpolation=cv2.INTER_AREA)
y_new_test[index] = dic[image_name]
plt.imshow(X_new_test[index,:,:,:])
plt.show()
with open('signnames.csv', mode='r') as file:
reader = csv.reader(file)
sign_mapping = {rows[0]:rows[1] for rows in reader}
X_new_test = normalize(X_new_test)
predict, top_k_softmax = eval_model.predictions(X_new_test)
for output,expected in zip(predict,y_new_test):
print("Expected {} ...... Output {}".format(sign_mapping[str(int(expected))], sign_mapping[str(output)]))
### Calculate the accuracy for these 5 new images.
count = 0
for result, expectation in zip(predict, y_new_test):
if result == expectation:
count = count+1
accuracy = count/num_test_images
print("accuracy of the prediction of new test images", accuracy)
print("top_k_softmax == ", top_k_softmax)
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you want the confidence intervals
| <ASSISTANT_TASK:>
Python Code:
from lmfit.models import GaussianModel
# initialize the gaussian model
gm = GaussianModel()
# take a look at the parameter names
print gm.param_names
# I get RuntimeError since my numpy version is a little old
# guess parameters
par_guess = gm.guess(n,x=xpos)
# fit data
result = gm.fit(n, par_guess, x=xpos, method='leastsq')
# quick look at result
print result.fit_report()
# get best fit error and stderr
print result.params['amplitude'].value,result.params['amplitude'].stderr
print result.params['center'].value,result.params['center'].stderr
print result.params['sigma'].value,result.params['sigma'].stderr
fig = plt.figure()
plt.hist(xdata, bins=bins)
plt.plot(xpos, result.best_fit, 'green')
import lmfit
def my_gaussian_model(p, x, y):
a = np.float(p['a'])
b = np.float(p['b'])
c = np.float(p['c'])
return a/np.sqrt(2.*c) * np.exp( -np.power(x-b,2.)/2./np.power(c, 2.)) - y
pars = lmfit.Parameters()
pars.add_many(('a',0.1), ('b',0.1), ('c',0.1))
# initialize the minimizer
mini = lmfit.Minimizer(my_gaussian_model, pars, (xpos, n))
# do the minimization
result = mini.minimize(method='leastsq')
# print the fit report
print lmfit.fit_report(mini.params)
# NOTE
# the parameter 'a' in function my_gaussian_model is different from the built-in model in lmfit
# so the amplitude value is a little different
# predit the confidence interval of all parameters
ci, trace = lmfit.conf_interval(mini, sigmas=[0.68,0.95],
trace=True, verbose=False)
# ci = lmfit.conf_interval(mini)
lmfit.printfuncs.report_ci(ci)
print ci.values()
a,b,prob = trace['a']['a'], trace['a']['b'], trace['a']['prob']
cx, cy, grid = lmfit.conf_interval2d(mini, 'a','b',30,30)
plt.contourf(cx, cy, grid, np.linspace(0,1,11))
plt.xlabel('a')
plt.colorbar()
plt.ylabel('b')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Building the policy network
Step2: Loss function and updates
Step5: Computing cumulative rewards
Step7: Playing the game
Step9: Results & video
| <ASSISTANT_TASK:>
Python Code:
# This code creates a virtual display to draw game images on.
# If you are running locally, just ignore it
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
env = gym.make("CartPole-v0")
# gym compatibility: unwrap TimeLimit
if hasattr(env,'env'):
env=env.env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
import tensorflow as tf
tf.reset_default_graph()
# create input variables. We only need <s,a,R> for REINFORCE
states = tf.placeholder('float32', (None,)+state_dim, name="states")
actions = tf.placeholder('int32', name="action_ids")
cumulative_rewards = tf.placeholder('float32', name="cumulative_returns")
import keras
import keras.layers as L
#sess = tf.InteractiveSession()
#keras.backend.set_session(sess)
#<define network graph using raw tf or any deep learning library>
#network = keras.models.Sequential()
#network.add(L.InputLayer(state_dim))
#network.add(L.Dense(200, activation='relu'))
#network.add(L.Dense(200, activation='relu'))
#network.add(L.Dense(n_actions, activation='linear'))
network = keras.models.Sequential()
network.add(L.Dense(256, activation="relu", input_shape=state_dim, name="layer_1"))
network.add(L.Dense(n_actions, activation="linear", name="layer_2"))
print(network.summary())
#question: counting from the beginning of the model, the logits are in layer #9: model.layers[9].output
#logits = network.layers[2].output #<linear outputs (symbolic) of your network>
logits = network(states)
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
# utility function to pick action in one given state
def get_action_proba(s):
return policy.eval({states: [s]})[0]
# select log-probabilities for chosen actions, log pi(a_i|s_i)
indices = tf.stack([tf.range(tf.shape(log_policy)[0]), actions], axis=-1)
log_policy_for_actions = tf.gather_nd(log_policy, indices)
# REINFORCE objective function
# hint: you need to use log_policy_for_actions to get log probabilities for actions taken
J = tf.reduce_mean((log_policy_for_actions * cumulative_rewards), axis=-1)# <policy objective as in the last formula. Please use mean, not sum.>
# regularize with entropy
entropy = tf.reduce_mean(policy*log_policy) # <compute entropy. Don't forget the sign!>
# all network weights
all_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) #<a list of all trainable weights in your network>
# weight updates. maximizing J is same as minimizing -J. Adding negative entropy.
loss = -J - 0.1*entropy
update = tf.train.AdamOptimizer().minimize(loss, var_list=all_weights)
def get_cumulative_rewards(rewards, # rewards at each step
gamma=0.99 # discount for reward
):
take a list of immediate rewards r(s,a) for the whole session
compute cumulative rewards R(s,a) (a.k.a. G(s,a) in Sutton '16)
R_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...
The simple way to compute cumulative rewards is to iterate from last to first time tick
and compute R_t = r_t + gamma*R_{t+1} recurrently
You must return an array/list of cumulative rewards with as many elements as in the initial rewards.
#<your code here>
cumulative_rewards = np.zeros((len(rewards)))
cumulative_rewards[-1] = rewards[-1]
for t in range(len(rewards)-2, -1, -1):
cumulative_rewards[t] = rewards[t] + gamma * cumulative_rewards[t + 1]
return cumulative_rewards #< array of cumulative rewards>
assert len(get_cumulative_rewards(range(100))) == 100
assert np.allclose(get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9),
[1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])
assert np.allclose(get_cumulative_rewards([0, 0, 1, -2, 3, -4, 0], gamma=0.5),
[0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])
assert np.allclose(get_cumulative_rewards([0, 0, 1, 2, 3, 4, 0], gamma=0),
[0, 0, 1, 2, 3, 4, 0])
print("looks good!")
def train_step(_states, _actions, _rewards):
given full session, trains agent with policy gradient
_cumulative_rewards = get_cumulative_rewards(_rewards)
update.run({states: _states, actions: _actions,
cumulative_rewards: _cumulative_rewards})
def generate_session(t_max=1000):
play env with REINFORCE agent and train at the session end
# arrays to record session
states, actions, rewards = [], [], []
s = env.reset()
for t in range(t_max):
# action probabilities array aka pi(a|s)
action_probas = get_action_proba(s)
a = np.random.choice(a=len(action_probas), p=action_probas) #<pick random action using action_probas>
new_s, r, done, info = env.step(a)
# record session history to train later
states.append(s)
actions.append(a)
rewards.append(r)
s = new_s
if done:
break
train_step(states, actions, rewards)
# technical: return session rewards to print them later
return sum(rewards)
s = tf.InteractiveSession()
s.run(tf.global_variables_initializer())
for i in range(100):
rewards = [generate_session() for _ in range(100)] # generate new sessions
print("mean reward:%.3f" % (np.mean(rewards)))
if np.mean(rewards) > 300:
print("You Win!") # but you can train even further
break
# record sessions
import gym.wrappers
env = gym.wrappers.Monitor(gym.make("CartPole-v0"),
directory="videos", force=True)
sessions = [generate_session() for _ in range(100)]
env.close()
# show video
from IPython.display import HTML
import os
video_names = list(
filter(lambda s: s.endswith(".mp4"), os.listdir("./videos/")))
HTML(
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
.format("./videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices
from submit import submit_cartpole
submit_cartpole(generate_session, "[email protected]", "Cecc5rcVxaVUYtsQ")
# That's all, thank you for your attention!
# Not having enough? There's an actor-critic waiting for you in the honor section.
# But make sure you've seen the videos first.
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A univariate example
Step2: We create a bimodal distribution
Step3: The simplest non-parametric technique for density estimation is the histogram.
Step4: Fitting with the default arguments
Step5: We present a figure of the fit, as well as the true distribution.
Step6: In the code above, default arguments were used. We can also vary the bandwidth of the kernel, as we will now see.
Step7: Comparing kernel functions
Step8: The available kernel functions
Step9: The available kernel functions on three data points
Step10: A more difficult case
Step11: The KDE is a distribution
Step12: Cumulative distribution, it's inverse, and the survival function
Step13: The Cumulative Hazard Function
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
np.random.seed(12345) # Seed the random number generator for reproducible results
# Location, scale and weight for the two distributions
dist1_loc, dist1_scale, weight1 = -1 , .5, .25
dist2_loc, dist2_scale, weight2 = 1 , .5, .75
# Sample from a mixture of distributions
obs_dist = mixture_rvs(prob=[weight1, weight2], size=250,
dist=[stats.norm, stats.norm],
kwargs = (dict(loc=dist1_loc, scale=dist1_scale),
dict(loc=dist2_loc, scale=dist2_scale)))
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Scatter plot of data samples and histogram
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size)),
zorder=15, color='red', marker='x', alpha=0.5, label='Samples')
lines = ax.hist(obs_dist, bins=20, edgecolor='k', label='Histogram')
ax.legend(loc='best')
ax.grid(True, zorder=-5)
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit() # Estimate the densities
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(obs_dist, bins=20, density=True, label='Histogram from samples',
zorder=5, edgecolor='k', alpha=0.5)
# Plot the KDE as fitted using the default arguments
ax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)
# Plot the true distribution
true_values = (stats.norm.pdf(loc=dist1_loc, scale=dist1_scale, x=kde.support)*weight1
+ stats.norm.pdf(loc=dist2_loc, scale=dist2_scale, x=kde.support)*weight2)
ax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/40,
marker='x', color='red', zorder=20, label='Samples', alpha=0.5)
ax.legend(loc='best')
ax.grid(True, zorder=-5)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(obs_dist, bins=25, label='Histogram from samples',
zorder=5, edgecolor='k', density=True, alpha=0.5)
# Plot the KDE for various bandwidths
for bandwidth in [0.1, 0.2, 0.4]:
kde.fit(bw=bandwidth) # Estimate the densities
ax.plot(kde.support, kde.density, '--', lw=2, color='k', zorder=10,
label='KDE from samples, bw = {}'.format(round(bandwidth, 2)))
# Plot the true distribution
ax.plot(kde.support, true_values, lw=3, label='True distribution', zorder=15)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,
marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)
ax.legend(loc='best')
ax.set_xlim([-3, 3])
ax.grid(True, zorder=-5)
from statsmodels.nonparametric.kde import kernel_switch
list(kernel_switch.keys())
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, (ker_name, ker_class) in enumerate(kernel_switch.items()):
# Initialize the kernel object
kernel = ker_class()
# Sample from the domain
domain = kernel.domain or [-3, 3]
x_vals = np.linspace(*domain, num=2**10)
y_vals = kernel(x_vals)
# Create a subplot, set the title
ax = fig.add_subplot(2, 4, i + 1)
ax.set_title('Kernel function "{}"'.format(ker_name))
ax.plot(x_vals, y_vals, lw=3, label='{}'.format(ker_name))
ax.scatter([0], [0], marker='x', color='red')
plt.grid(True, zorder=-5)
ax.set_xlim(domain)
plt.tight_layout()
# Create three equidistant points
data = np.linspace(-1, 1, 3)
kde = sm.nonparametric.KDEUnivariate(data)
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, kernel in enumerate(kernel_switch.keys()):
# Create a subplot, set the title
ax = fig.add_subplot(2, 4, i + 1)
ax.set_title('Kernel function "{}"'.format(kernel))
# Fit the model (estimate densities)
kde.fit(kernel=kernel, fft=False, gridsize=2**10)
# Create the plot
ax.plot(kde.support, kde.density, lw=3, label='KDE from samples', zorder=10)
ax.scatter(data, np.zeros_like(data), marker='x', color='red')
plt.grid(True, zorder=-5)
ax.set_xlim([-3, 3])
plt.tight_layout()
obs_dist = mixture_rvs([.25, .75], size=250, dist=[stats.norm, stats.beta],
kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=1, args=(1, .5))))
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit()
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.hist(obs_dist, bins=20, density=True, edgecolor='k', zorder=4, alpha=0.5)
ax.plot(kde.support, kde.density, lw=3, zorder=7)
# Plot the samples
ax.scatter(obs_dist, np.abs(np.random.randn(obs_dist.size))/50,
marker='x', color='red', zorder=20, label='Data samples', alpha=0.5)
ax.grid(True, zorder=-5)
obs_dist = mixture_rvs([.25, .75], size=1000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5), dict(loc=1, scale=.5)))
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit(gridsize=2**10)
kde.entropy
kde.evaluate(-1)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cdf, lw=3, label='CDF')
ax.plot(np.linspace(0, 1, num = kde.icdf.size), kde.icdf, lw=3, label='Inverse CDF')
ax.plot(kde.support, kde.sf, lw=3, label='Survival function')
ax.legend(loc = 'best')
ax.grid(True, zorder=-5)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cumhazard, lw=3, label='Cumulative Hazard Function')
ax.legend(loc = 'best')
ax.grid(True, zorder=-5)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ... or call all the methods individually
| <ASSISTANT_TASK:>
Python Code:
# NOTE: This will take several minutes depending on the performance of your machine
audio_features = audioAnalyzer.analyze(audio_filename)
# plot the features
plt.rcParams['figure.figsize'] = [20, 8]
audioAnalyzer.plot(audio_features)
plt.show()
# audio metadata extraction
metadata = audioAnalyzer.crawl_musicbrainz_metadata(audio_filename)
# predominant melody extraction
pitch = audioAnalyzer.extract_pitch(audio_filename)
# pitch post filtering
pitch_filtered = audioAnalyzer.filter_pitch(pitch)
# histogram computation
pitch_distribution = audioAnalyzer.compute_pitch_distribution(pitch_filtered)
pitch_class_distribution = copy.deepcopy(pitch_distribution)
pitch_class_distribution.to_pcd()
# tonic identification
tonic = audioAnalyzer.identify_tonic(pitch_filtered)
# get the makam from metadata if possible else apply makam recognition
makams = audioAnalyzer.get_makams(metadata, pitch_filtered, tonic)
makam = list(makams)[0] # for now get the first makam
# transposition (ahenk) identification
transposition = audioAnalyzer.identify_transposition(tonic, makam)
# stable note extraction (tuning analysis)
note_models = audioAnalyzer.compute_note_models(pitch_distribution, tonic, makam)
# get the melodic progression model
melodic_progression = audioAnalyzer.compute_melodic_progression(pitch_filtered)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Create Classifier Pipeline
Step3: Cross Validation
Step4: Evaluate Model
Step5: To get an good measure of the model's accuracy, we calculate the mean of the three scores. This is our measure of model accuracy.
| <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
from sklearn.pipeline import make_pipeline
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn import svm
# Load the iris test data
iris = load_iris()
# View the iris data features for the first three rows
iris.data[0:3]
# View the iris data target for first three rows. '0' means it flower is of the setosa species.
iris.target[0:3]
# Create a pipeline that scales the data then trains a support vector classifier
classifier_pipeline = make_pipeline(preprocessing.StandardScaler(), svm.SVC(C=1))
# KFold/StratifiedKFold cross validation with 3 folds (the default)
# applying the classifier pipeline to the feature and target data
scores = cross_validation.cross_val_score(classifier_pipeline, iris.data, iris.target, cv=3)
scores
scores.mean()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Make a function for importing the station info for each line
Step3: Import the data for each line
Step4: Turn list of dict's into dic with line (key) and stations (value)
Step5: Use Pickel to save the dictionary of lines with the station locations
| <ASSISTANT_TASK:>
Python Code:
import http.client, urllib.request, urllib.parse, urllib.error, base64
import urllib
import json
import pickle
def metro_line_import(line):
Import station data from WMATA API and format it as a dictionary with staion name as the key and the
[latitude, longitude] as the values
headers = {
# Request headers
'api_key': '7f97903f83b24fcaa6dcc929bdb73437',
}
params = urllib.parse.urlencode({
# Request parameters
'LineCode': line,
})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/Rail.svc/json/jStations?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
#print(data)
conn.close()
data_js = json.loads(data)
line_dic = dict()
for ii in range(len(data_js['Stations'])):
line_dic[data_js['Stations'][ii]['Name']] = [data_js['Stations'][ii]['Lat'], data_js['Stations'][ii]['Lon']]
#print(line_dic)
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
return line_dic
lines = ['RD', 'YL', 'GR','BL', 'OR', 'SV']
line_stations = []
for ii in range(len(lines)):
line_stations.append(metro_line_import(lines[ii]))
station_data = dict()
for ii in range(len(lines)):
station_data[lines[ii]] = line_stations[ii]
pickle.dump( station_data, open( "station_data.p", "wb" ) )
station_data['RD']['Silver Spring']
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Problem 1) An (oversimplified) 1-D Model
Step3: Problem 1b
Step4: Problem 1c
Step5: Problem 2) Add Noise
Step6: Problem 2b
Step7: Problem 3) Flux Measurement
Step8: Problem 3b
Step9: Problem 3c
Step10: Problem 4) PSF Flux measurement
Step11: Problem 4b
Step12: Problem 4c
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
def phi(x, mu, fwhm):
Evalute the 1d PSF N(mu, sigma^2) along x
sigma = fwhm/2.3548
flux = 1/np.sqrt(2*np.pi*sigma**2)*np.exp(-(x - mu)**2/(2*sigma**2))
return flux
x = np.linspace(0,20,21)
plt.plot(x, phi(x, 10, 3))
print("The flux of the star is: {:.3f}".format(sum(phi(x, 10, 3))))
S = 100 * np.ones_like(x)
F = 500
plt.plot(x, S + F*phi(x, 10, 3))
no_noise = S + F*phi(x, 10, 3)
noisy_flux = np.random.normal(no_noise, np.sqrt(no_noise))
plt.plot(x, no_noise)
plt.errorbar(x, noisy_flux, noisy_flux - no_noise, fmt = 'o')
def simulate(x, mu, fwhm, S, F):
source = F * phi(x, mu, fwhm)
sky_plus_source = S * np.ones_like(x) + source
noisy_flux = np.random.normal(sky_plus_source, np.sqrt(sky_plus_source))
return noisy_flux
x = np.linspace(-20,20,41)
sim_star = simulate(x, 0, 5, 100, 1000)
ap_flux = np.sum((sim_star - 100)[np.where(np.abs(x) <= 5)])
print("The star has flux = {:.3f}".format(ap_flux))
sim_fluxes = np.empty(1000)
for sim_num, dummy in enumerate(sim_fluxes):
sim_star = simulate(x, 0, 5, 100, 1000)
ap_flux = np.sum((sim_star - 100)[np.where(np.abs(x) <= 5)])
sim_fluxes[sim_num] = ap_flux
print("The mean flux = {:.3f} with variance = {:.3f}".format(np.mean(sim_fluxes),
np.var(sim_fluxes, ddof=1)))
psf = phi(x, 0, 5)
psf /= sum(psf**2) # normalization
sim_star = simulate(x, 0, 5, 100, 1000)
psf_flux = np.sum((sim_star-100)*psf)
print("The PSF flux is {:.3f}".format(psf_flux))
sim_fluxes = np.empty(1000)
for sim_num, dummy in enumerate(sim_fluxes):
sim_star = simulate(x, 0, 5, 100, 1000)
psf_flux = np.sum((sim_star-100)*psf)
sim_fluxes[sim_num] = psf_flux
print("The mean flux = {:.3f} with variance = {:.3f}".format(np.mean(sim_fluxes),
np.var(sim_fluxes, ddof=1)))
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We are using TensorFlow-GPU 0.12.1 on Python 3.5.2, running on Windows 10 with Cuda 8.0.
Step2: Definitions
Step3: Data load
Step4: Model definition
Step5: Training and evaluation
Step6: Inspecting the result
Step7: Examples of correct predictions
Step8: Examples of incorrect predictions
| <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# We don't really need to import TensorFlow here since it's handled by Keras,
# but we do it in order to output the version we are using.
tf.__version__
from IPython.display import Image
from util import Util
u = Util()
import numpy as np
# Explicit random seed for reproducibility
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.datasets import mnist
batch_size = 512
nb_classes = 10
nb_epoch = 25
# path of the model graph
model_image_path = 'images/model_01_MNIST.png'
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
# dense layer size
dense_layer_size = 128
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
u.plot_images(X_train[0:9], y_train[0:9])
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape, name='covolution_1_' + str(nb_filters) + '_filters'))
model.add(Activation('relu', name='activation_1_relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], name='covolution_2_' + str(nb_filters) + '_filters'))
model.add(Activation('relu', name='activation_2_relu'))
model.add(MaxPooling2D(pool_size=pool_size, name='max_pooling_1_' + str(pool_size) + '_pool_size'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(dense_layer_size, name='fully_connected_1_' + str(dense_layer_size) + '_neurons'))
model.add(Activation('relu', name='activation_3_relu'))
model.add(Dropout(0.4))
model.add(Dense(nb_classes, name='output_' + str(nb_classes) + '_neurons'))
model.add(Activation('softmax', name='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
Image(u.maybe_save_network(model, model_image_path), width=300)
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
u.plot_history(history)
u.plot_history(history, metric='loss', loc='upper left')
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes = model.predict_classes(X_test)
# Check which items we got right / wrong
correct_indices = np.nonzero(predicted_classes == y_test)[0]
incorrect_indices = np.nonzero(predicted_classes != y_test)[0]
u.plot_confusion_matrix(y_test, nb_classes, predicted_classes)
u.plot_images(X_test[correct_indices[:9]], y_test[correct_indices[:9]],
predicted_classes[correct_indices[:9]])
u.plot_images(X_test[incorrect_indices[:9]], y_test[incorrect_indices[:9]],
predicted_classes[incorrect_indices[:9]])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 以上が数値計算を行うプログラムである.
Step2: 3. 可視化
Step3: animfuncを実行することで,pendulum.mp4のようなアニメーション動画が保存される.
Step4: <video width="432" height="288" controls>
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.integrate import odeint
from math import sin
''' constants '''
m = 1 # mass of the pendulum [kg]
l = 1 # length of the pendulum [m]
g = 10 # Gravitational acceleration [m/s^2]
''' time setting '''
t_end = 10 # simulation time [s]
t_fps = 50 # frame per second. This value means smoothness of produced graph and animation
t_step = 1/t_fps
t = np.arange(0, t_end, t_step)
''' initial value '''
theta_init = 0 # initial value of theta [rad]
dtheta_init = 1 # initial value of dot theta [rad/s]
s_init = np.r_[theta_init, dtheta_init]
def odefunc(s, t):
theta = s[0]
dtheta = s[1]
ddtheta = -g/l*sin(theta) # <- Equation of motion
return np.r_[dtheta, ddtheta]
s = odeint(odefunc, s_init, t)
print('ODE calculation finished.')
print(np.c_[t, s])
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import cos
def animfunc(s, t):
''' Create mp4 movie file of a pendulum '''
plt.close()
fig = plt.figure()
plt.axis('scaled')
plt.xlim(-1, 1)
plt.ylim(-1.5, .5)
plt.grid('on')
draw_ceiling, = plt.plot([-2, 2], [0, 0], c='k', lw=2)
draw_pendulum, = plt.plot([], [], lw=4, c='b')
draw_mass, = plt.plot([], [], lw=2, marker='o', ms=20, mew=4, mec='b', mfc='c')
indicate_time = plt.text(-0.3, 0.25, [], fontsize=12)
def update_figure(i):
''' Set data of each movie frame '''
mass_x = l*sin(s[i, 0])
mass_y = - l*cos(s[i, 0])
pendlum_x = [0, mass_x]
pendlum_y = [0, mass_y]
draw_pendulum.set_data(pendlum_x, pendlum_y)
draw_mass.set_data(mass_x, mass_y)
indicate_time.set_text('t = {0:4.2f} [s]'.format(t[i]))
''' Create a movie file '''
line_ani = animation.FuncAnimation(fig, update_figure, frames=len(t))
line_ani.save('./pendulum.mp4', fps=t_fps)
print('pendulum.mp4 created')
animfunc(s, t)
plt.figure()
plt.plot(t, s[:, 0])
plt.xlabel('t [s]')
plt.ylabel('theta [rad]')
plt.savefig('pendulum_graph.png')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters are given as follows. D and radius mean a diffusion constant and a radius of molecules, respectively. Dimensions of length and time are assumed to be micro-meter and second.
Step2: Make a model for all algorithms. No birth reaction with more than one product is accepted.
Step3: Save a result with ode as obs, and plot it
Step4: Simulating with gillespie (Bars represent standard error of the mean)
Step5: Simulating with meso
Step6: Simulating with spatiocyte
Step7: Simulating with egfrd
Step8: Simulating with bd
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ecell4.prelude import *
D = 1 # 0.01
radius = 0.005
N = 20 # a number of samples
y0 = {} # {'A': 60}
duration = 3
V = 8
with species_attributes():
A | {'radius': radius, 'D': D}
with reaction_rules():
~A > A | 45.0
A > ~A | 1.5
m = get_model()
ret1 = run_simulation(duration, y0=y0, volume=V, model=m)
ret1
ret2 = ensemble_simulations(duration, ndiv=20, y0=y0, volume=V, model=m, solver='gillespie', repeat=N)
ret2.plot('o', ret1, '-')
ret2 = ensemble_simulations(
duration, ndiv=20, y0=y0, volume=V, model=m, solver=('meso', Integer3(3, 3, 3), 0.25), repeat=N)
ret2.plot('o', ret1, '-')
ret2 = ensemble_simulations(
duration, ndiv=20, y0=y0, volume=V, model=m, solver=('spatiocyte', radius), repeat=N)
ret2.plot('o', ret1, '-')
ret2 = ensemble_simulations(
duration, ndiv=20, y0=y0, volume=V, model=m, solver=('egfrd', Integer3(8, 8, 8)), repeat=N)
ret2.plot('o', ret1, '-')
ret2 = ensemble_simulations(
duration, ndiv=20, y0=y0, volume=V, model=m, solver=('bd', Integer3(8, 8, 8), 0.1), repeat=N)
ret2.plot('o', ret1, '-')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pregunta 2
Step2: Pregunta 4
| <ASSISTANT_TASK:>
Python Code:
##escriba la función aqui##
horaValida('13:00:00')
rut = input("ingrese su rut: ")
##su código va aqui##
import random
random.seed(int(rut))
##su código va aqui##
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: STOCK PICKING 3.1 - FORECASTING ARIMAs
Step4: ARIMA
Step5: Massive Predictions
Step6: Reloading full results
Step7: Stock picking
Step8: Notice that we should not buy SNN (Smith & Nephew PLC) because the predicted cumulative return is too small vs. the actual cumulative return. Is this reasonable?
Step9: Note #2
Step10: Gain/Loss
Step11: The percentage of mistakes would be
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader.data as web
import datetime
def get_data(symbols,
add_ref=True,
data_source='yahoo',
price='Adj Close',
start='1/21/2010',
end='4/15/2016'):
Read stock data (adjusted close) for given symbols from.
if add_ref and 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
df = web.DataReader(symbols,
data_source=data_source,
start=start,
end=end)
return df[price,:,:]
def compute_daily_returns(df):
Compute and return the daily return values.
# Note: Returned DataFrame must have the same number of rows
daily_returns = (df / df.shift(1)) - 1
daily_returns.ix[0,:] = 0
return daily_returns
def fill_missing_values(df_data):
Fill missing values in data frame, in place.
df_data.fillna(method='ffill',inplace=True)
df_data.fillna(method='backfill',inplace=True)
return df_data
def cumulative_returns(df):
return df/df.ix[0,:] - 1
df = fill_missing_values(get_data(symbols=['GOOG','SPY','IBM','GLD'],
start='4/21/2015',
end='7/15/2016'))
df.plot()
plt.show(1)
df.head()
simbols = ["A","AA","AAL","AAP","AAPL","ABB","ABBV","ABC","ABMD","ABT","ABX","ACC","ACGL","ACM","ACN","ADBE","ADI","ADM","ADNT","ADP","ADS","ADSK","AEE","AEG","AEM","AEP","AER","AES","AET","AFG","AFL","AGCO","AGN","AGNC","AGR","AGU","AIG","AIV","AIZ","AJG","AKAM","ALB","ALGN","ALK","ALKS","ALL","ALLE","ALLY","ALSN","ALV","ALXN","AM","AMAT","AMD","AME","AMG","AMGN","AMH","AMP","AMT","AMTD","AMX","AMZN","ANET","ANSS","ANTM","AON","AOS","APA","APC","APD","APH","APO","AR","ARCC","ARD","ARE","ARMK","ARNC","ARRS","ARW","ASH","ASML","ASR","ASX","ATH","ATO","ATR","ATVI","AVB","AVGO","AVY","AWK","AXP","AXS","AXTA","AYI","AZN","AZO","BA","BABA","BAC","BAH","BAM","BAP","BAX","BBBY","BBD","BBL","BBRY","BBT","BBVA","BBY","BC","BCE","BCH","BCR","BCS","BDX","BEN","BERY","BF.B","BG","BHI","BHP","BIDU","BIIB","BIO","BIP","BIVV","BK","BKFS","BLK","BLL","BMA","BMO","BMRN","BMY","BNS","BOKF","BP","BPL","BPY","BR","BRCD","BRFS","BRK.A","BRO","BRX","BSAC","BSBR","BSMX","BSX","BT","BUD","BURL","BWA","BX","BXP","C","CA","CAG","CAH","CAJ","CAT","CB","CBG","CBOE","CBS","CBSH","CC","CCE","CCI","CCK","CCL","CDK","CDNS","CDW","CE","CELG","CEO","CERN","CF","CFG","CFR","CG","CGNX","CHA","CHD","CHK","CHKP","CHL","CHRW","CHT","CHTR","CHU","CI","CINF","CIT","CL","CLNS","CLR","CLX","CM","CMA","CMCSA","CME","CMG","CMI","CMS","CNA","CNC","CNHI","CNI","CNK","CNP","CNQ","COF","COG","COH","COHR","COL","COMM","COO","COP","COST","COTY","CP","CPA","CPB","CPL","CPRT","CPT","CRH","CRM","CS","CSCO","CSGP","CSL","CSX","CTAS","CTL","CTRP","CTSH","CTXS","CUK","CVE","CVS","CVX","CX","CXO","D","DAL","DB","DCI","DCM","DCP","DD","DE","DEI","DEO","DFS","DG","DGX","DHI","DHR","DIS","DISH","DKS","DLB","DLPH","DLR","DLTR","DNKN","DOV","DOW","DOX","DPS","DPZ","DRE","DRI","DTE","DUK","DVA","DVMT","DVN","DXC","DXCM","E","EA","EBAY","EC","ECA","ECL","ED","EDU","EEP","EFX","EGN","EIX","EL","ELS","EMN","EMR","ENB","ENBL","ENIA","ENIC","ENLK","EOCC","EOG","EPD","EPR","EQGP","EQIX","EQM","EQR","EQT","ERIC","ERIE","ES","ESLT","ESRX","ESS","ETE","ETFC","ETN","ETR","EV","EVHC","EW","EWBC","EXC","EXEL","EXPD","EXPE","EXR","F","FANG","FAST","FB","FBHS","FBR","FCE.A","FCX","FDC","FDS","FDX","FE","FFIV","FIS","FISV","FITB","FL","FLEX","FLIR","FLR","FLS","FLT","FMC","FMS","FMX","FNF","FOXA","FRC","FRT","FTI","FTNT","FTV","G","GD","GDDY","GE","GG","GGG","GGP","GIB","GIL","GILD","GIS","GLPI","GLW","GM","GNTX","GOLD","GOOGL","GPC","GPN","GPS","GRMN","GS","GSK","GT","GWW","GXP","H","HAL","HAS","HBAN","HBI","HCA","HCN","HCP","HD","HDB","HDS","HES","HFC","HHC","HIG","HII","HIW","HLF","HLT","HMC","HOG","HOLX","HON","HP","HPE","HPP","HPQ","HRB","HRL","HRS","HSBC","HSIC","HST","HSY","HTA","HTHT","HUBB","HUM","HUN","IAC","IBKR","IBM","IBN","ICE","IDXX","IEP","IEX","IFF","IHG","ILMN","INCY","INFO","INFY","ING","INGR","INTC","INTU","INVH","IONS","IP","IPG","IPGP","IR","IRM","ISRG","IT","ITUB","ITW","IVZ","IX","JAZZ","JBHT","JBL","JBLU","JD","JEC","JHX","JKHY","JLL","JNJ","JNPR","JPM","JWN","K","KAR","KB","KEP","KEY","KEYS","KHC","KIM","KKR","KLAC","KMB","KMI","KMX","KO","KORS","KR","KRC","KSS","KSU","KT","KYO","L","LAMR","LAZ","LB","LBTYA","LDOS","LEA","LECO","LEG","LEN","LFC","LFL","LH","LII","LKQ","LLL","LLY","LMT","LN","LNC","LNT","LOGI","LOGM","LOW","LPL","LPT","LRCX","LUK","LULU","LUV","LUX","LVLT","LVS","LW","LYB","LYG","LYV","M","MA","MAA","MAC","MAN","MAR","MAS","MAT","MBLY","MBT","MCD","MCHP","MCK","MCO","MD","MDLZ","MDT","MDU","MELI","MET","MFC","MFG","MGA","MGM","MHK","MIC","MIDD","MJN","MKC","MKL","MKTX","MLCO","MLM","MMC","MMM","MMP","MNST","MO","MOMO","MON","MOS","MPC","MPLX","MRK","MRO","MRVL","MS","MSCC","MSCI","MSFT","MSI","MSM","MT","MTB","MTD","MTN","MTU","MU","MXIM","MYL","NBL","NCLH","NCR","NDAQ","NDSN","NEE","NEM","NEU","NFLX","NFX","NGG","NI","NKE","NLSN","NLY","NMR","NNN","NOC","NOK","NOV","NOW","NRZ","NSC","NTAP","NTES","NTRS","NUAN","NUE","NVDA","NVO","NVR","NVS","NWL","NXPI","NYCB","O","OA","OAK","OC","ODFL","OGE","OHI","OKE","OKS","OLED","OLN","OMC","ON","ORAN","ORCL","ORI","ORLY","OSK","OTEX","OXY","OZRK","PAA","PAC","PACW","PAGP","PANW","PAYX","PBCT","PBR","PCAR","PCG","PCLN","PE","PEG","PEP","PF","PFE","PFG","PG","PGR","PH","PHG","PHI","PHM","PII","PK","PKG","PKI","PKX","PLD","PM","PNC","PNR","PNRA","PNW","POOL","POST","POT","PPC","PPG","PPL","PRGO","PRU","PSA","PSO","PSX","PSXP","PTC","PTR","PUK","PVH","PWR","PX","PXD","PYPL","Q","QCOM","QGEN","QRVO","QVCA","RACE","RAI","RBS","RCI","RCL","RDS.A","RDY","RE","REG","REGN","RELX","RENX","RF","RGA","RHI","RHT","RIO","RJF","RL","RMD","RNR","ROK","ROL","ROP","ROST","RPM","RRC","RS","RSG","RSPP","RTN","RY","RYAAY","S","SABR","SAN","SAP","SATS","SBAC","SBNY","SBS","SBUX","SCCO","SCG","SCHW","SCI","SEE","SEIC","SEP","SERV","SGEN","SHG","SHLX","SHOP","SHPG","SHW","SINA","SIRI","SIVB","SIX","SJM","SJR","SKM","SLB","SLF","SLG","SLM","SLW","SMFG","SMG","SMI","SNA","SNAP","SNE","SNI","SNN","SNP","SNPS","SNV","SNY","SO","SON","SPB","SPG","SPGI","SPLK","SPLS","SPR","SQ","SRCL","SRE","SSL","SSNC","ST","STE","STI","STLD","STM","STO","STT","STWD","STX","STZ","SU","SUI","SWK","SWKS","SYF","SYK","SYMC","SYT","SYY","T","TAL","TAP","TD","TDG","TEAM","TECK","TEF","TEL","TER","TEVA","TFX","TGNA","TGT","TI","TIF","TJX","TKC","TLK","TLLP","TM","TMK","TMO","TMUS","TOL","TOT","TRGP","TRI","TRIP","TRMB","TROW","TRP","TRQ","TRU","TRV","TS","TSCO","TSLA","TSM","TSN","TSO","TSRO","TSS","TSU","TTC","TTM","TTWO","TU","TV","TWTR","TWX","TXN","TXT","TYL","UAL","UBS","UDR","UGI","UGP","UHAL","UHS","UL","ULTA","ULTI","UMC","UN","UNH","UNM","UNP","UPS","URI","USB","USFD","UTHR","UTX","V","VAL","VALE","VAR","VEDL","VEEV","VEON","VER","VFC","VIAB","VIPS","VIV","VLO","VMC","VMW","VNO","VNTV","VOD","VOYA","VRSK","VRSN","VRTX","VTR","VZ","W","WAB","WAL","WAT","WB","WBA","WBC","WBK","WCG","WCN","WDAY","WDC","WEC","WES","WF","WFC","WFM","WFT","WGP","WHR","WIT","WLK","WLTW","WM","WMB","WMT","WOOF","WPC","WPPGY","WPZ","WR","WRB","WRK","WST","WTR","WU","WUBA","WY","WYN","WYNN","XEC","XEL","XL","XLNX","XOM","XPO","XRAY","XRX","XYL","Y","YHOO","YNDX","YPF","YUM","YUMC","ZAYO","ZBH","ZBRA","ZION","ZTO","ZTS"]
len(simbols)
DELTA = 45 ## delay in days
start_date = (datetime.date.today() - datetime.timedelta(DELTA)).strftime("%m-%d-%Y")
print("start_date",start_date)
end_date = (datetime.date.today()).strftime("%m-%d-%Y")
print("end_date",end_date)
df = fill_missing_values(get_data(symbols=simbols,
start=start_date,
end=end_date))
df.shape
df.head()
pd.isnull(df).sum().sum()
to_remove = pd.isnull(df).sum()[pd.isnull(df).sum() > 0]
to_remove
to_remove.index
df = df.drop(to_remove.index,axis=1)
pd.isnull(df).sum().sum()
df.shape
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
## let's start with an example
series = df["AAPL"]
series = series.tolist()
pd.Series(series).plot()
plt.show()
p_tr = 0.66
tr_size = int(len(series) * p_tr)
train, test = series[0:tr_size], series[tr_size:len(series)]
print("tr_size:",tr_size,"xval_size:",(len(series)-tr_size))
from pandas import datetime
from pandas import DataFrame
from statsmodels.tsa.arima_model import ARIMA
from matplotlib import pyplot
# fit model
model = ARIMA(train, order=(5,0,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = DataFrame(model_fit.resid)
residuals.plot()
pyplot.show()
residuals.plot(kind='kde')
pyplot.show()
print(residuals.describe())
model_fit.aic
from numpy.linalg import LinAlgError
def get_best_arima(sr,maxord = [6,6,6]):
best_aic = 1000000
best_ord = maxord
best_model = None
for p in range(maxord[0]):
for q in range(maxord[1]):
for d in range(maxord[2]):
try:
model = ARIMA(sr, order=(p,q,d))
model_fit = model.fit(disp=0)
if (best_aic > model_fit.aic):
best_aic = model_fit.aic
best_ord = [p,q,d]
best_model = model_fit
except:
pass
return best_aic, best_ord , best_model
# supress warnings
import warnings
warnings.filterwarnings('ignore')
best_aic, best_ord , best_model = get_best_arima(train)
print("best_aic:",best_aic)
print("best_ord:",best_ord)
def predict_arima(train,steps,best_order,verbose=True,do_plot=True):
history = [x for x in train]
predictions = list()
yhat = train[len(train)-1]
for t in range(steps):
model = ARIMA(history, order=(best_order[0],best_order[1],best_order[2]))
try:
model_fit = model.fit(disp=0)
output = model_fit.forecast()
except:
pass
if not(np.isnan(output[0] )) and not(np.isinf(output[0])):
yhat = np.asscalar(output[0])
predictions.append(yhat)
history.append(yhat)
return predictions
predictions = predict_arima(train,len(test),best_ord,verbose=True,do_plot=True)
MSE = mean_squared_error(test, predictions)
print("MSE:",MSE)
pyplot.plot(test , label="test data")
pyplot.plot(predictions, color='red' , label="prediction")
pyplot.legend()
pyplot.show()
def predict_arima_worse(train,steps,best_order,verbose=True,do_plot=True):
history = [x for x in train]
predictions = list()
model = ARIMA(history, order=(best_order[0],best_order[1],best_order[2]))
try:
model_fit = model.fit(disp=0)
output = model_fit.forecast(steps=steps)
except:
pass
yhat = history[len(history)-1]
for i in range(steps):
if not(np.isnan(output[0][i] )) and not(np.isinf(output[0][i])):
yhat = np.asscalar(output[0][i] )
predictions.append(yhat)
return predictions
predictions = predict_arima_worse(train,len(test),best_ord,verbose=True,do_plot=True)
MSE = mean_squared_error(test, predictions)
print("MSE:",MSE)
pyplot.plot(test , label="test data")
pyplot.plot(predictions, color='red' , label="prediction")
pyplot.legend()
pyplot.show()
arima_perf = DataFrame({'security': df.columns.tolist(),
'cumulative_return_test': 0,
'ARIMA_pred_cumulative_return_test': 0,
'ARIMA_MSE_cumulative_return_test': 0,
'ARIMA_R2_cumulative_return_test': 0,
'ARIMA_MSE_price_test': 0 ,
'ARIMA_R2_price_test': 0 ,
'cumulative_return_xval': 0,
'ARIMA_pred_cumulative_return_xval': 0,
'ARIMA_MSE_cumulative_return_xval': 0,
'ARIMA_R2_cumulative_return_xval': 0,
'ARIMA_MSE_price_xval': 0 ,
'ARIMA_R2_price_xval': 0 ,
'ARIMA_best_order_0': 0,
'ARIMA_best_order_1': 0,
'ARIMA_best_order_2': 0} )
arima_perf = arima_perf.set_index(['security'])
p_tr = 0.60
p_xval = 0.20
p_test = 1-p_tr-p_xval
for i,sec in enumerate(arima_perf.index.tolist()):
if (i % 100 == 0) or (i ==10):
print(i,sec)
## data
series = df[sec]
series = series.tolist()
tr_size = int(len(series) * p_tr)
xval_size = int(len(series) * p_xval)
train, txval, test = series[0:tr_size], series[tr_size:(tr_size+xval_size)] , series[(tr_size+xval_size):len(series)]
## fit model
best_aic, best_ord , best_model = get_best_arima(train)
## predict, assess
predictions = predict_arima(train,(len(txval)+len(test)),best_ord,verbose=False,do_plot=False)
## store
arima_perf.loc[sec,'ARIMA_best_order_0'] = best_ord[0]
arima_perf.loc[sec,'ARIMA_best_order_1'] = best_ord[1]
arima_perf.loc[sec,'ARIMA_best_order_2'] = best_ord[2]
# xval
pred_cumulative_returns_xval = cumulative_returns(pd.Series(predictions[0:xval_size]))
cumulative_returns_xval = cumulative_returns(pd.Series(txval))
arima_perf.loc[sec,'cumulative_return_xval'] = cumulative_returns_xval[len(cumulative_returns_xval)-1]
arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval'] = pred_cumulative_returns_xval[len(pred_cumulative_returns_xval)-1]
arima_perf.loc[sec,'ARIMA_MSE_cumulative_return_xval'] = mean_squared_error(cumulative_returns_xval, pred_cumulative_returns_xval)
arima_perf.loc[sec,'ARIMA_R2_cumulative_return_xval'] = r2_score(cumulative_returns_xval, pred_cumulative_returns_xval)
arima_perf.loc[sec,'ARIMA_MSE_price_xval'] = mean_squared_error(txval, predictions[0:xval_size])
arima_perf.loc[sec,'ARIMA_R2_price_xval'] = r2_score(txval, predictions[0:xval_size])
# test
pred_cumulative_returns_test = cumulative_returns(pd.Series(predictions[xval_size:]))
cumulative_returns_test = cumulative_returns(pd.Series(test))
arima_perf.loc[sec,'cumulative_return_test'] = cumulative_returns_test[len(cumulative_returns_test)-1]
arima_perf.loc[sec,'ARIMA_pred_cumulative_return_test'] = pred_cumulative_returns_test[len(pred_cumulative_returns_test)-1]
arima_perf.loc[sec,'ARIMA_MSE_cumulative_return_test'] = mean_squared_error(cumulative_returns_test, pred_cumulative_returns_test)
arima_perf.loc[sec,'ARIMA_R2_cumulative_return_test'] = r2_score(cumulative_returns_test, pred_cumulative_returns_test)
arima_perf.loc[sec,'ARIMA_MSE_price_test'] = mean_squared_error(test, predictions[xval_size:])
arima_perf.loc[sec,'ARIMA_R2_price_test'] = r2_score(test, predictions[xval_size:])
arima_perf.sort_values(by='ARIMA_MSE_price_test' , ascending=False).head(5)
#arima_perf.to_csv('data/arima_perf.csv')
arima_perf = pd.read_csv('data/arima_perf.csv')
arima_perf = arima_perf.set_index('security')
arima_perf.sort_values(by='ARIMA_R2_cumulative_return_xval' , ascending=False).head(10)
arima_perf['ACTION'] = ''
for i,sec in enumerate(arima_perf.index.tolist()):
if arima_perf.loc[sec,'ARIMA_R2_cumulative_return_xval'] > 0.65:
if arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval'] > 0 and arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval']/arima_perf.loc[sec,'cumulative_return_xval'] >= 0.7 and arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval']/arima_perf.loc[sec,'cumulative_return_xval'] <= 2:
arima_perf.loc[sec,'ACTION'] = 'BUY'
if arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval'] < 0 and arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval']/arima_perf.loc[sec,'cumulative_return_xval'] >= 0.7 and arima_perf.loc[sec,'ARIMA_pred_cumulative_return_xval']/arima_perf.loc[sec,'cumulative_return_xval'] <= 2:
arima_perf.loc[sec,'ACTION'] = 'SELL'
arima_perf = arima_perf.sort_values(by='ARIMA_R2_cumulative_return_xval' , ascending=False)
arima_perf[['ACTION','ARIMA_R2_cumulative_return_xval','ARIMA_pred_cumulative_return_xval','cumulative_return_xval']][:20]
arima_perf = arima_perf.sort_values(by='ARIMA_pred_cumulative_return_xval' , ascending=False)
loss = arima_perf[['ACTION','ARIMA_R2_cumulative_return_xval','ARIMA_pred_cumulative_return_xval','cumulative_return_xval']][:20]
loss['UNEXPECTED_LOSS'] = loss['cumulative_return_xval'] - loss['ARIMA_pred_cumulative_return_xval']
loss
arima_perf = arima_perf.sort_values(by='ARIMA_MSE_cumulative_return_xval' , ascending=True)
flat = arima_perf[['ACTION','ARIMA_R2_cumulative_return_xval','ARIMA_MSE_cumulative_return_xval','ARIMA_pred_cumulative_return_xval','cumulative_return_xval']][:20]
flat
arima_perf = arima_perf.sort_values(by='ARIMA_R2_cumulative_return_xval' , ascending=False)
gain_loss = arima_perf[arima_perf['ACTION'] != ''][['ACTION','ARIMA_R2_cumulative_return_test','ARIMA_pred_cumulative_return_test','cumulative_return_test']]
gain_loss['EX_POST'] = ''
for i,sec in enumerate(gain_loss.index.tolist()):
if gain_loss.loc[sec,'cumulative_return_test'] * gain_loss.loc[sec,'ARIMA_pred_cumulative_return_test'] < 0:
gain_loss.loc[sec,'EX_POST'] = 'WRONG'
elif np.absolute(gain_loss.loc[sec,'cumulative_return_test']) < np.absolute(gain_loss.loc[sec,'ARIMA_pred_cumulative_return_test']):
gain_loss.loc[sec,'EX_POST'] = 'ALMOST CORRECT'
else:
gain_loss.loc[sec,'EX_POST'] = 'CORRECT'
gain_loss
gain_loss[gain_loss['EX_POST'] == 'WRONG'].shape[0]/gain_loss.shape[0]
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Exercise 2
Step3: Exercise 3
Step4: b. Confidence Intervals.
Step5: Exercise 4
| <ASSISTANT_TASK:>
Python Code:
# Useful Functions
class DiscreteRandomVariable:
def __init__(self, a=0, b=1):
self.variableType = ""
self.low = a
self.high = b
return
def draw(self, numberOfSamples):
samples = np.random.randint(self.low, self.high, numberOfSamples)
return samples
class BinomialRandomVariable(DiscreteRandomVariable):
def __init__(self, numberOfTrials = 10, probabilityOfSuccess = 0.5):
self.variableType = "Binomial"
self.numberOfTrials = numberOfTrials
self.probabilityOfSuccess = probabilityOfSuccess
return
def draw(self, numberOfSamples):
samples = np.random.binomial(self.numberOfTrials, self.probabilityOfSuccess, numberOfSamples)
return samples
def factorial(n):return reduce(lambda x,y:x*y,[1]+range(1,n+1))
# Useful Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.stats as stats
from statsmodels.stats import stattools
from __future__ import division
# Histograms with 10 tosses.
## Your code goes here
plt.xlabel('Value')
plt.ylabel('Occurences')
plt.legend(['Coin Tosses']);
# Histograms with 1000000 tosses.
## Your code goes here
plt.xlabel('Value')
plt.ylabel('Occurences')
plt.legend(['Coin Tosses']);
# Binomial distribution with p=0.25 and n=20
## Your code goes here.
plt.title('Binomial Distributino with p=0.25 and n=20')
plt.xlabel('Value')
plt.ylabel('Occurences')
plt.legend(['Die Rolls']);
# Finding x which occurs most often
## Your code goes here
# Calculating the probability of finding x.
## Your code goes here
# Graphin a normal distribution pdf.
## Your code goes here
mu =
sigma =
x = np.linspace(-30, 30, 200)
y =
plt.plot(x, y)
plt.title('Graph of PDF with mu = 0 and sigma = 5')
plt.xlabel('Value')
plt.ylabel('Probability');
# finding the 1st, 2nd, and third confidence intervals.
## Your code goes here
first_ci =
second_ci =
third_ci =
print '1-sigma -> mu +/-', sigma
print '2-sigma -> mu +/-', second_ci[1]
print '3-sigma -> mu +/-', third_ci[1]
## Graphing.
## Your code goes here.
plt.title('Graph of PDF with 3 confidence intervals.')
plt.legend();
# Collect prices and retursn.
prices = get_pricing('SPY', start_date = '2016-01-01', end_date='2016-05-01',
fields = 'price')
returns = prices.pct_change()[1:]
# Calculating the mean and standard deviation.
## Your code goes here
sample_mean =
sample_std_dev =
x = np.linspace(-(sample_mean + 4 * sample_std_dev), (sample_mean + 4 * sample_std_dev), len(returns))
sample_distribution = ((1/(sample_std_dev * 2 * np.pi)) *
np.exp(-(x - sample_mean)*(x - sample_mean) / (2 * sample_std_dev * sample_std_dev)))
# Plotting histograms and confidence intervals.
## Your code goes here
plt.title('Graph of returns with fitted PDF and the 3 confidence intervals. ')
plt.legend();
# Run the JB test for normality.
## Your code goes here
print "The JB test p-value is: ", p_value
print "We reject the hypothesis that the data are normally distributed ", p_value < cutoff
print "The skewness of the returns is: ", skewness
print "The kurtosis of the returns is: ", kurtosis
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Request Data
Step3: Now that we have a function, we can run a query like this
Step4: Pandas Dataframes
Step5: Additional operations
Step6: Ordering the data
Step7: Grouping the data
Step9: Reset Scored Connections
Step10: Sandbox
| <ASSISTANT_TASK:>
Python Code:
import datetime
import pandas as pd
import numpy as np
import linecache, bisect
import os
spath = os.getcwd()
path = spath.split("/")
date = path[len(path)-1]
def makeGraphqlRequest(query, variables):
return GraphQLClient.request(query, variables)
suspicious_query = query($date:SpotDateType) {
flow {
suspicious(date:$date)
{
srcIp
dstIp
srcPort
dstPort
score
srcIp_domain
dstIp_rep
protocol
outBytes
inPkts
srcIp_rep
inBytes
srcIp_isInternal
rank
dstIp_geoloc
tstart
outPkts
dstIp_isInternal
dstIp_domain
}
}
}
##If you want to use a different date for your query, switch the
##commented/uncommented following lines
variables={
'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# 'date': "2016-10-08"
}
suspicious_request = makeGraphqlRequest(suspicious_query,variables)
##The variable suspicious_request will contain the resulting data from the query.
results = suspicious_request['data']['flow']['suspicious']
df = pd.read_json(json.dumps(results))
##Printing only the selected column list from the dataframe
##By default it will only print the first 15 results
print df[['srcIp','dstIp','srcPort','dstPort','score']]
##Filter results where the destination port = 3389
##The resulting data will be stored in df2
df2 = df[df['dstPort'].isin(['3389'])]
print df2[['srcIp','dstIp','srcPort','dstPort','score']]
srtd = df.sort_values(by="rank")
print srtd[['rank','srcIp','dstIp','srcPort','dstPort','score']]
## This command will group the results by pairs of source-destination IP
## summarizing all other columns
grpd = df.groupby(['srcIp','dstIp']).sum()
## This will print the resulting dataframe displaying the input and output bytes columnns
print grpd[["inBytes","inPkts"]]
# reset_scores = mutation($date:SpotDateType!) {
# flow{
# resetScoredConnections(date:$date){
# success
# }
# }
# }
# variables={
# 'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# }
# request = makeGraphqlRequest(reset_scores,variables)
# print request['data']['flow']['resetScoredConnections ']['success']
#Your code here
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Dataset Parameters
Step3: times
Step4: Compute Options
Step5: dynamics_method
Step6: The 'dynamics_method' parameter controls how stars and components are placed in the coordinate system as a function of time and has several choices
Step7: The 'ltte' parameter sets whether light travel time effects (Roemer delay) are included. If set to False, the positions and velocities are returned as they actually are for that given object at that given time. If set to True, they are instead returned as they were or will be when their light reaches the origin of the coordinate system.
Step8: Plotting
Step9: As always, you have access to any of the arrays for either axes, so if you want to plot 'vus' vs 'times'
Step10: We can also plot the orbit in 3D.
| <ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.1,<2.2"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('orb')
print b.filter(kind='orb')
print b['times']
print b['compute']
print b['dynamics_method']
print b['ltte']
b.set_value_all('times', np.linspace(0,3,201))
b.run_compute()
b['orb@model'].twigs
print b['times@primary@orb01@orb@model']
print b['us@primary@orb01@orb@model']
print b['vus@primary@orb01@orb@model']
afig, mplfig = b['orb@model'].plot(show=True)
afig, mplfig = b['orb@model'].plot(x='times', y='vus', show=True)
afig, mplfig = b['orb@model'].plot(projection='3d', xlim=(-4,4), ylim=(-4,4), zlim=(-4,4), show=True)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
| <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As with previous notebooks we will use an action selection function to give us an exploration strategy. Feel free to change this function and play with it to see what happens. For now, let me just provide a couple function.
Step2: Did you see that neuro_q_learning function??? I had to add comments to this one to make it easier to understand. There are a couple of things that you might want to read about. Check out the "Further reading" sections on the README.md files for that.
Step4: Hope you enjoyed your vacations.
Step5: Let's close up the environment and upload (in case you want to).
Step6: Now, let's look at some of the statistics
Step8: Cool, not bad at all!
Step9: Your turn
Step10: Let's test thing puppy.
Step12: Wake upp!!!
Step13: You happy with it????? Are you sure????
Step15: How about running your fully-trained greedy agent?
| <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tempfile
import base64
import pprint
import random
import json
import sys
import gym
import io
from gym import wrappers
from collections import deque
from subprocess import check_output
from IPython.display import HTML
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
def action_selection(state, model, episode, n_episodes):
epsilon = 0.99 if episode < n_episodes//4 else 0.33 if episode < n_episodes//2 else 0.
values = model.predict(state.reshape(1, state.shape[0]))[0]
if np.random.random() < epsilon:
action = np.random.randint(len(values))
else:
action = np.argmax(values)
return action, epsilon
def neuro_q_learning(env, gamma = 0.99):
nS = env.observation_space.shape[0]
nA = env.env.action_space.n
# memory bank
memory_bank = deque()
memory_bank_size = 100000
# function approximator
model = Sequential()
model.add(Dense(64, input_dim=nS, activation='relu'))
model.add(Dense(nA, activation='linear'))
model.compile(loss='mse', optimizer='adam')
# constant values
n_episodes = 50000
batch_size = 256
training_frequency = 20
# for statistics
epsilons = []
states = []
actions = []
# interactions
for episode in range(n_episodes):
state = env.reset()
done = False
# each episode
while not done:
states.append(state)
# select action
action, epsilon = action_selection(state, model, episode, n_episodes)
epsilons.append(epsilon)
actions.append(action)
# save history in memory bank
nstate, reward, done, info = env.step(action)
memory_bank.append((state, action, reward, nstate, done))
if len(memory_bank) > memory_bank_size:
memory_bank.popleft()
# iterate to next state
state = nstate
# only every few episodes enter training and update neural network weights
if episode % training_frequency == 0 and len(memory_bank) == memory_bank_size:
# randomly select batches of samples from the history
# for training to prevent values spiking due to high
# correlation of sequential values
minibatch = np.array(random.sample(memory_bank, batch_size))
# extract values by type from the minibatch
state_batch = np.array(minibatch[:,0].tolist())
action_batch = np.array(minibatch[:,1].tolist())
rewards_batch = np.array(minibatch[:,2].tolist())
state_prime_batch = np.array(minibatch[:,3].tolist())
is_terminal_batch = np.array(minibatch[:,4].tolist())
# use the current neural network to predict
# current state values and next state values
state_value_batch = model.predict(state_batch)
next_state_value_batch = model.predict(state_prime_batch)
# update the state values given the batch
for i in range(len(minibatch)):
if is_terminal_batch[i]:
state_value_batch[i, action_batch[i]] = rewards_batch[i]
else:
state_value_batch[i, action_batch[i]] = rewards_batch[i] + gamma * np.max(next_state_value_batch[i])
# update the neural network weights
model.train_on_batch(state_batch, state_value_batch)
return model, (epsilons, states, actions)
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
model, stats = neuro_q_learning(env)
videos = np.array(env.videos)
n_videos = 4
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag =
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
epsilons, states, actions = stats
plt.plot(np.arange(len(epsilons)), epsilons, '.')
hist, bins = np.histogram(actions, bins=3)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
for episode in range(100):
state = env.reset()
done = False
while not done:
action = np.argmax(model.predict(state.reshape(1, 4))[0])
nstate, reward, done, info = env.step(action)
state = nstate
videos = np.array(env.videos)
n_videos = 3
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag =
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
def action_selection(state, model, episode, n_episodes):
epsilon = 0.99 if episode < n_episodes//4 else 0.33 if episode < n_episodes//2 else 0.
values = model.predict(state.reshape(1, 4))[0]
if np.random.random() < epsilon:
action = np.random.randint(len(values))
else:
action = np.argmax(values)
return action, epsilon
def neuro_q_learning(env, gamma = 0.99):
nS = env.observation_space.shape[0]
nA = env.env.action_space.n
# memory bank
memory_bank = deque()
memory_bank_size = 100000
# function approximator
#### THIS IS A WEAK NEURAL NETWORK
#### CAN YOU HELP THE AGENT LEARN MORE
#### COMPLEX OBSERVATIONS?????
model = Sequential()
model.add(Dense(64, input_dim=nS, activation='relu'))
model.add(Dense(nA, activation='linear'))
model.compile(loss='mse', optimizer='adam')
# constant values
n_episodes = 50000
batch_size = 256
training_frequency = 20
# for statistics
epsilons = []
states = []
actions = []
# interactions
for episode in range(n_episodes):
state = env.reset()
done = False
# each episode
while not done:
states.append(state)
# select action
action, epsilon = action_selection(state, model, episode, n_episodes)
epsilons.append(epsilon)
actions.append(action)
# save history in memory bank
nstate, reward, done, info = env.step(action)
memory_bank.append((state, action, reward, nstate, done))
if len(memory_bank) > memory_bank_size:
memory_bank.popleft()
# iterate to next state
state = nstate
# only every few episodes enter training and update neural network weights
if episode % training_frequency == 0 and len(memory_bank) == memory_bank_size:
# randomly select batches of samples from the history
# for training to prevent values spiking due to high
# correlation of sequential values
minibatch = np.array(random.sample(memory_bank, batch_size))
# extract values by type from the minibatch
state_batch = np.array(minibatch[:,0].tolist())
action_batch = np.array(minibatch[:,1].tolist())
rewards_batch = np.array(minibatch[:,2].tolist())
state_prime_batch = np.array(minibatch[:,3].tolist())
is_terminal_batch = np.array(minibatch[:,4].tolist())
# use the current neural network to predict
# current state values and next state values
state_value_batch = model.predict(state_batch)
next_state_value_batch = model.predict(state_prime_batch)
# update the state values given the batch
for i in range(len(minibatch)):
if is_terminal_batch[i]:
state_value_batch[i, action_batch[i]] = rewards_batch[i]
else:
state_value_batch[i, action_batch[i]] = rewards_batch[i] + gamma * np.max(next_state_value_batch[i])
# update the neural network weights
model.train_on_batch(state_batch, state_value_batch)
return model, (epsilons, states, actions)
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
model, stats = neuro_q_learning(env)
videos = np.array(env.videos)
n_videos = 4
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag =
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
epsilons, states, actions = stats
plt.plot(np.arange(len(epsilons)), epsilons, '.')
hist, bins = np.histogram(actions, bins=3)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
for episode in range(100):
state = env.reset()
done = False
while not done:
action = np.argmax(model.predict(state.reshape(1, 4))[0])
nstate, reward, done, info = env.step(action)
state = nstate
videos = np.array(env.videos)
n_videos = 3
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag =
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initial set-up
Step2: Combine model and experiments to produce
Step3: Set up prior ranges for each parameter in the model.
Step4: Run ABC calibration
Step5: Results analysis
| <ASSISTANT_TASK:>
Python Code:
import os, tempfile
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from ionchannelABC import theoretical_population_size
from ionchannelABC import IonChannelDistance, EfficientMultivariateNormalTransition, IonChannelAcceptor
from ionchannelABC.experiment import setup
from ionchannelABC.visualization import plot_sim_results, plot_kde_matrix_custom
import myokit
from pyabc import Distribution, RV, History, ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
from pyabc.populationstrategy import ConstantPopulationSize
from experiments.ina_sakakibara import (sakakibara_act_nyg_adjust,
sakakibara_inact_nyg_adjust,
sakakibara_inact_kin_nyg_adjust,
sakakibara_rec_nyg_adjust)
from experiments.ina_schneider import schneider_taum_nyg_adjust
modelfile = 'models/standardised_ina.mmt'
observations, model, summary_statistics = setup(modelfile,
sakakibara_act_nyg_adjust,
sakakibara_inact_nyg_adjust,
schneider_taum_nyg_adjust,
sakakibara_inact_kin_nyg_adjust,
sakakibara_rec_nyg_adjust)
assert len(observations)==len(summary_statistics(model({})))
g = plot_sim_results(modelfile,
sakakibara_act_nyg_adjust,
schneider_taum_nyg_adjust,
sakakibara_inact_nyg_adjust,
sakakibara_inact_kin_nyg_adjust,
sakakibara_rec_nyg_adjust)
limits = {'log_ina.A': (0., 1.),
'log_ina.p_1': (1., 5.),
'ina.p_2': (1e-7, 0.2),
'log_ina.p_3': (-3., 1.),
'ina.p_4': (1e-7, 0.4),
'log_ina.p_5': (-1., 3.),
'ina.p_6': (1e-7, 0.2),
'log_ina.p_7': (-4., 0.),
'ina.p_8': (1e-7, 0.2)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
# Test this works correctly with set-up functions
assert len(observations) == len(summary_statistics(model(prior.rvs())))
db_path = ("sqlite:///" + os.path.join(tempfile.gettempdir(), "standardised_ina_unified.db"))
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
exp_id=list(observations.exp_id),
variance=list(observations.variance),
delta=0.05),
population_size=ConstantPopulationSize(1000),
summary_statistics=summary_statistics,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=100),
sampler=MulticoreEvalParallelSampler(n_procs=16),
acceptor=IonChannelAcceptor())
obs = observations.to_dict()['y']
obs = {str(k): v for k, v in obs.items()}
abc_id = abc.new(db_path, obs)
history = abc.run(minimum_epsilon=0., max_nr_populations=100, min_acceptance_rate=0.01)
history = abc.run(minimum_epsilon=0., max_nr_populations=100, min_acceptance_rate=0.01)
history = History(db_path)
history.all_runs() # most recent is relevant
df, w = history.get_distribution( m=0)
df.describe()
sns.set_context('poster')
mpl.rcParams['font.size'] = 14
mpl.rcParams['legend.fontsize'] = 14
g = plot_sim_results(modelfile,
sakakibara_act_nyg_adjust,
schneider_taum_nyg_adjust,
sakakibara_inact_nyg_adjust,
sakakibara_inact_kin_nyg_adjust,
sakakibara_rec_nyg_adjust,
df=df, w=w)
plt.tight_layout()
m,_,_ = myokit.load(modelfile)
sns.set_context('paper')
g = plot_kde_matrix_custom(df, w, limits=limits)
plt.tight_layout()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a survey plan
Step3: Transient model
Step4: Transient Generator
Step5: Number of injections, you can fix the number of generated transients or follow a rate. Rate should always be specified even for ntransient != None.
Step6: SimulSurvey
Step7: Save
Step8: Output
| <ASSISTANT_TASK:>
Python Code:
import os
home_dir = os.getcwd()
# Please enter the path to where you have placed the Schlegel, Finkbeiner & Davis (1998) dust map files
# You can also set the environment variable SFD_DIR to this path (in that case the variable below should be None)
sfd98_dir = os.path.join(home_dir, 'data/sfd98')
import simsurvey
import numpy as np
import matplotlib.pyplot as plt
import sncosmo
from astropy.cosmology import Planck15
import simsurvey_tools as sst
from scipy.interpolate import RectBivariateSpline as Spline2d
import ligo.skymap.plot
simsurvey.__version__
# Load the ZTF fields, CCD corners and filters
fields = sst.load_ztf_fields()
sst.load_ztf_filters()
# Load the ZTF CCD corners
ccds = sst.load_ztf_ccds()
# Load the ZTF quadrants corners
ccds = sst.load_ztf_ccds(filename='data/ZTF_corners_rcid.txt', num_segs=64)
obs = {'time': [], 'field': [], 'band': [], 'maglim': [], 'skynoise': [], 'comment': [], 'zp': []}
mjd_start = 58239.5
for k in range(0, 61, 3):
obs['time'].extend([mjd_start + k + l/24. for l in range(3)])
obs['field'].extend([683 for l in range(3)])
obs['band'].extend(['ztfg', 'ztfr', 'ztfi'])
obs['maglim'].extend([22 for l in range(3)])
obs['zp'].extend([30 for l in range(3)])
obs['comment'].extend(['' for l in range(3)])
obs['skynoise'] = 10**(-0.4 * (np.array(obs['maglim']) - 30)) / 5
plan = simsurvey.SurveyPlan(time=obs['time'],
band=obs['band'],
skynoise=obs['skynoise'],
obs_field=obs['field'],
obs_ccd=None,
zp=obs['zp'],
comment=obs['comment'],
fields=fields,
ccds=ccds
)
mjd_range = (plan.pointings['time'].min() - 30, plan.pointings['time'].max() + 30)
plan.pointings
! git clone https://github.com/mbulla/kilonova_models.git
def Bullamodel(dynwind=False, dataDir='kilonova_models/02_Dhawan2019/', mej=0.04, phi=30, temp=5000):
l = dataDir+'nph1.0e+06_mej'+'{:.2f}'.format(mej)+'_phi'+'{:.0f}'.format(phi)+'_T'+'{:.1e}'.format(temp)+'.txt'
f = open(l)
lines = f.readlines()
nobs = int(lines[0])
nwave = float(lines[1])
line3 = (lines[2]).split(' ')
ntime = int(line3[0])
t_i = float(line3[1])
t_f = float(line3[2])
cos_theta = np.linspace(0, 1, nobs) # 11 viewing angles
phase = np.linspace(t_i, t_f, ntime) # epochs
file_ = np.genfromtxt(l, skip_header=3)
wave = file_[0:int(nwave),0]
flux = []
for i in range(int(nobs)):
flux.append(file_[i*int(nwave):i*int(nwave)+int(nwave),1:])
flux = np.array(flux).T
return phase, wave, cos_theta, flux
# AngularTimeSeriesSource classdefined to create an angle dependent time serie source.
class AngularTimeSeriesSource(sncosmo.Source):
A single-component spectral time series model.
The spectral flux density of this model is given by
.. math::
F(t, \lambda) = A \\times M(t, \lambda)
where _M_ is the flux defined on a grid in phase and wavelength
and _A_ (amplitude) is the single free parameter of the model. The
amplitude _A_ is a simple unitless scaling factor applied to
whatever flux values are used to initialize the
``TimeSeriesSource``. Therefore, the _A_ parameter has no
intrinsic meaning. It can only be interpreted in conjunction with
the model values. Thus, it is meaningless to compare the _A_
parameter between two different ``TimeSeriesSource`` instances with
different model data.
Parameters
----------
phase : `~numpy.ndarray`
Phases in days.
wave : `~numpy.ndarray`
Wavelengths in Angstroms.
cos_theta: `~numpy.ndarray`
Cosine of
flux : `~numpy.ndarray`
Model spectral flux density in erg / s / cm^2 / Angstrom.
Must have shape ``(num_phases, num_wave, num_cos_theta)``.
zero_before : bool, optional
If True, flux at phases before minimum phase will be zeroed. The
default is False, in which case the flux at such phases will be equal
to the flux at the minimum phase (``flux[0, :]`` in the input array).
name : str, optional
Name of the model. Default is `None`.
version : str, optional
Version of the model. Default is `None`.
_param_names = ['amplitude', 'theta']
param_names_latex = ['A', r'\theta']
def __init__(self, phase, wave, cos_theta, flux, zero_before=True, zero_after=True, name=None,
version=None):
self.name = name
self.version = version
self._phase = phase
self._wave = wave
self._cos_theta = cos_theta
self._flux_array = flux
self._parameters = np.array([1., 0.])
self._current_theta = 0.
self._zero_before = zero_before
self._zero_after = zero_after
self._set_theta()
def _set_theta(self):
logflux_ = np.zeros(self._flux_array.shape[:2])
for k in range(len(self._phase)):
adding = 1e-10 # Here we are adding 1e-10 to avoid problems with null values
f_tmp = Spline2d(self._wave, self._cos_theta, np.log(self._flux_array[k]+adding),
kx=1, ky=1)
logflux_[k] = f_tmp(self._wave, np.cos(self._parameters[1]*np.pi/180)).T
self._model_flux = Spline2d(self._phase, self._wave, logflux_, kx=1, ky=1)
self._current_theta = self._parameters[1]
def _flux(self, phase, wave):
if self._current_theta != self._parameters[1]:
self._set_theta()
f = self._parameters[0] * (np.exp(self._model_flux(phase, wave)))
if self._zero_before:
mask = np.atleast_1d(phase) < self.minphase()
f[mask, :] = 0.
if self._zero_after:
mask = np.atleast_1d(phase) > self.maxphase()
f[mask, :] = 0.
return f
phase, wave, cos_theta, flux = Bullamodel()
source = AngularTimeSeriesSource(phase, wave, cos_theta, flux)
dust = sncosmo.CCM89Dust()
model = sncosmo.Model(source=source,effects=[dust, dust], effect_names=['host', 'MW'], effect_frames=['rest', 'obs'])
# Distribution of viewing angles
thetadist = 'uniform in cosine' # 'uniform in cosine', 'uniform in degrees', 'fixed theta'
def random_parameters(redshifts, model,r_v=2., ebv_rate=0.11,**kwargs):
# Amplitude
amp = []
for z in redshifts:
amp.append(10**(-0.4*Planck15.distmod(z).value))
if thetadist=='uniform in cosine':
theta = np.arccos(np.random.random(len(redshifts))) / np.pi * 180
elif thetadist=='uniform in degrees':
theta = np.random.uniform(0, 90,size=len(redshifts))
elif thetadist=='fixed theta':
theta = np.array([20]*len(redshifts)) # Viewing angle fixed to 20 degrees
return {
'amplitude': np.array(amp),
'theta': theta,
'hostr_v': r_v * np.ones(len(redshifts)),
'hostebv': np.random.exponential(ebv_rate, len(redshifts))
}
transientprop = dict(lcmodel=model, lcsimul_func=random_parameters)
ntransient = 1000
rate = 1000 * 1e-6 # Mpc-3 yr-1
dec_range=(plan.pointings['Dec'].min()-10,plan.pointings['Dec'].max()+10)
ra_range=(plan.pointings['RA'].min()-10,plan.pointings['RA'].max()+10)
tr = simsurvey.get_transient_generator([0, 0.05],
ntransient=ntransient,
ratefunc=lambda z: rate,
dec_range=dec_range,
ra_range=ra_range,
mjd_range=(mjd_range[0],
mjd_range[1]),
transientprop=transientprop,
sfd98_dir=sfd98_dir
)
# With sourcenoise==False, the flux error will correspond to the skynoise. Sourcenoise==True add an extra term in the flux errors from the brightness of the source.
survey = simsurvey.SimulSurvey(generator=tr, plan=plan, n_det=2, threshold=5., sourcenoise=False)
lcs = survey.get_lightcurves(
progress_bar=True, notebook=True # If you get an error because of the progress_bar, delete this line.
)
len(lcs.lcs)
lcs.save('lcs.pkl')
_ = sncosmo.plot_lc(lcs[0])
# Redshift distribution
plt.hist(lcs.meta_full['z'], lw=1, histtype='step', range=(0,0.05), bins=20, label='all')
plt.hist(lcs.meta['z'], lw=2, histtype='step', range=(0,0.05), bins=20, label='detected')
plt.xlabel('Redshift', fontsize='x-large')
plt.ylabel(r'$N_{KNe}$', fontsize='x-large')
plt.xlim((0, 0.05))
plt.legend()
plt.hist(lcs.stats['p_det'], lw=2, histtype='step', range=(0,10), bins=20)
plt.xlabel('Detection phase (observer-frame)', fontsize='x-large')
_ = plt.ylabel(r'$N_{KNe}$', fontsize='x-large')
plt.figure()
ax = plt.axes()
ax.grid()
ax.scatter(lcs.meta_notobserved['ra'], lcs.meta_notobserved['dec'], marker='*', label='meta_notobserved', alpha=0.7)
ax.scatter(lcs.meta_rejected['ra'], lcs.meta_rejected['dec'], marker='*', label='meta_rejected', alpha=0.7)
ax.scatter(lcs.meta['ra'], lcs.meta['dec'], marker='*', label='meta_detected', alpha=0.7)
#ax.legend(loc='center left', bbox_to_anchor=(0.9, .5))
ax.legend(loc=0)
ax.set_ylabel('DEC (deg)')
ax.set_xlabel('RA (deg)')
plt.tight_layout()
plt.show()
plt.figure()
ax = plt.axes(
[0.05, 0.05, 0.9, 0.9],
projection='geo degrees mollweide'
)
ax.grid()
ax.scatter(lcs.meta_notobserved['ra'], lcs.meta_notobserved['dec'], transform=ax.get_transform('world'), marker='*', label='meta_notobserved', alpha=0.7)
ax.scatter(lcs.meta_rejected['ra'], lcs.meta_rejected['dec'], transform=ax.get_transform('world'), marker='*', label='meta_rejected', alpha=0.7)
ax.scatter(lcs.meta['ra'], lcs.meta['dec'], transform=ax.get_transform('world'), marker='*', label='meta_detected', alpha=0.7)
#ax.legend(loc='center left', bbox_to_anchor=(0.9, .5))
ax.legend(loc=0)
ax.set_ylabel('DEC (deg)')
ax.set_xlabel('RA (deg)')
plt.tight_layout()
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: ALS implementation using DML
Step3: Running the Algorithm
Step5: Predictions
Step6: Just for Fun!
| <ASSISTANT_TASK:>
Python Code:
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from systemml import MLContext, dml
spark = SparkSession\
.builder\
.appName("als-example")\
.getOrCreate()
schema = StructType([StructField("movieId", IntegerType(), True),
StructField("userId", IntegerType(), True),
StructField("rating", IntegerType(), True),
StructField("date", StringType(), True)])
ratings = spark.read.csv("./netflix/training_set_normalized/mv_0*.txt", schema = schema)
ratings = ratings.select('userId', 'movieId', 'rating')
ratings.show(10)
ratings.describe().show()
#-----------------------------------------------------------------
# Create kernel in SystemML's DSL using the R-like syntax for ALS
# Algorithms available at : https://systemml.apache.org/algorithms
# Below algorithm based on ALS-CG.dml
#-----------------------------------------------------------------
als_dml = \
# Default values of some parameters
r = rank
max_iter = 50
check = TRUE
thr = 0.01
R = table(X[,1], X[,2], X[,3])
# check the input matrix R, if some rows or columns contain only zeros remove them from R
R_nonzero_ind = R != 0;
row_nonzeros = rowSums(R_nonzero_ind);
col_nonzeros = t(colSums (R_nonzero_ind));
orig_nonzero_rows_ind = row_nonzeros != 0;
orig_nonzero_cols_ind = col_nonzeros != 0;
num_zero_rows = nrow(R) - sum(orig_nonzero_rows_ind);
num_zero_cols = ncol(R) - sum(orig_nonzero_cols_ind);
if (num_zero_rows > 0) {
print("Matrix R contains empty rows! These rows will be removed.");
R = removeEmpty(target = R, margin = "rows");
}
if (num_zero_cols > 0) {
print ("Matrix R contains empty columns! These columns will be removed.");
R = removeEmpty(target = R, margin = "cols");
}
if (num_zero_rows > 0 | num_zero_cols > 0) {
print("Recomputing nonzero rows and columns!");
R_nonzero_ind = R != 0;
row_nonzeros = rowSums(R_nonzero_ind);
col_nonzeros = t(colSums (R_nonzero_ind));
}
###### MAIN PART ######
m = nrow(R);
n = ncol(R);
# initializing factor matrices
U = rand(rows = m, cols = r, min = -0.5, max = 0.5);
M = rand(rows = n, cols = r, min = -0.5, max = 0.5);
# initializing transformed matrices
Rt = t(R);
loss = matrix(0, rows=max_iter+1, cols=1)
if (check) {
loss[1,] = sum(R_nonzero_ind * (R - (U %*% t(M)))^2) + lambda * (sum((U^2) * row_nonzeros) +
sum((M^2) * col_nonzeros));
print("----- Initial train loss: " + toString(loss[1,1]) + " -----");
}
lambda_I = diag (matrix (lambda, rows = r, cols = 1));
it = 0;
converged = FALSE;
while ((it < max_iter) & (!converged)) {
it = it + 1;
# keep M fixed and update U
parfor (i in 1:m) {
M_nonzero_ind = t(R[i,] != 0);
M_nonzero = removeEmpty(target=M * M_nonzero_ind, margin="rows");
A1 = (t(M_nonzero) %*% M_nonzero) + (as.scalar(row_nonzeros[i,1]) * lambda_I); # coefficient matrix
U[i,] = t(solve(A1, t(R[i,] %*% M)));
}
# keep U fixed and update M
parfor (j in 1:n) {
U_nonzero_ind = t(Rt[j,] != 0)
U_nonzero = removeEmpty(target=U * U_nonzero_ind, margin="rows");
A2 = (t(U_nonzero) %*% U_nonzero) + (as.scalar(col_nonzeros[j,1]) * lambda_I); # coefficient matrix
M[j,] = t(solve(A2, t(Rt[j,] %*% U)));
}
# check for convergence
if (check) {
loss_init = as.scalar(loss[it,1])
loss_cur = sum(R_nonzero_ind * (R - (U %*% t(M)))^2) + lambda * (sum((U^2) * row_nonzeros) +
sum((M^2) * col_nonzeros));
loss_dec = (loss_init - loss_cur) / loss_init;
print("Train loss at iteration (M) " + it + ": " + loss_cur + " loss-dec " + loss_dec);
if (loss_dec >= 0 & loss_dec < thr | loss_init == 0) {
print("----- ALS converged after " + it + " iterations!");
converged = TRUE;
}
loss[it+1,1] = loss_cur
}
} # end of while loop
loss = loss[1:it+1,1]
if (check) {
print("----- Final train loss: " + toString(loss[it+1,1]) + " -----");
}
if (!converged) {
print("Max iteration achieved but not converged!");
}
# inject 0s in U if original R had empty rows
if (num_zero_rows > 0) {
U = removeEmpty(target = diag(orig_nonzero_rows_ind), margin = "cols") %*% U;
}
# inject 0s in R if original V had empty rows
if (num_zero_cols > 0) {
M = removeEmpty(target = diag(orig_nonzero_cols_ind), margin = "cols") %*% M;
}
M = t(M);
ml = MLContext(sc)
# Define input/output variables for DML script
alsScript = dml(als_dml).input("X", ratings) \
.input("lambda", 0.01) \
.input("rank", 100) \
.output("U", "M", "loss")
# Execute script
res = ml.execute(alsScript)
U, M, loss = res.get('U','M', "loss")
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss.toNumPy(), 'o');
predict_dml = \
R = table(R[,1], R[,2], R[,3])
K = 5
Rrows = nrow(R);
Rcols = ncol(R);
zero_cols_ind = (colSums(M != 0)) == 0;
K = min(Rcols - sum(zero_cols_ind), K);
n = nrow(X);
Urows = nrow(U);
Mcols = ncol(M);
X_user_max = max(X[,1]);
if (X_user_max > Rrows) {
stop("Predictions cannot be provided. Maximum user-id exceeds the number of rows of R.");
}
if (Urows != Rrows | Mcols != Rcols) {
stop("Number of rows of U (columns of M) does not match the number of rows (column) of R.");
}
# creats projection matrix to select users
s = seq(1, n);
ones = matrix(1, rows = n, cols = 1);
P = table(s, X[,1], ones, n, Urows);
# selects users from factor U
U_prime = P %*% U;
# calculate rating matrix for selected users
R_prime = U_prime %*% M;
# selects users from original R
R_users = P %*% R;
# create indictor matrix to remove existing ratings for given users
I = R_users == 0;
# removes already recommended items and creating user2item matrix
R_prime = R_prime * I;
# stores sorted movies for selected users
R_top_indices = matrix(0, rows = nrow (R_prime), cols = K);
R_top_values = matrix(0, rows = nrow (R_prime), cols = K);
# a large number to mask the max ratings
range = max(R_prime) - min(R_prime) + 1;
# uses rowIndexMax/rowMaxs to update kth ratings
for (i in 1:K){
rowIndexMax = rowIndexMax(R_prime);
rowMaxs = rowMaxs(R_prime);
R_top_indices[,i] = rowIndexMax;
R_top_values[,i] = rowMaxs;
R_prime = R_prime - range * table(seq (1, nrow(R_prime), 1), rowIndexMax, nrow(R_prime), ncol(R_prime));
}
R_top_indices = R_top_indices * (R_top_values > 0);
# cbind users as a first column
R_top_indices = cbind(X[,1], R_top_indices);
R_top_values = cbind(X[,1], R_top_values);
# user for which we want to recommend movies
ids = [116,126,130,131,133,142,149,158,164,168,169,177,178,183,188,189,192,195,199,201,215,231,242,247,248,
250,261,265,266,267,268,283,291,296,298,299,301,302,304,305,307,308,310,312,314,330,331,333,352,358,363,
368,369,379,383,384,385,392,413,416,424,437,439,440,442,453,462,466,470,471,477,478,479,481,485,490,491]
users = spark.createDataFrame([[i] for i in ids])
predScript = dml(predict_dml).input("R", ratings) \
.input("X", users) \
.input("U", U) \
.input("M", M) \
.output("R_top_indices")
pred = ml.execute(predScript).get("R_top_indices")
pred = pred.toNumPy()
import pandas as pd
titles = pd.read_csv("./netflix/movie_titles.csv", header=None, sep=';', names=['movieID', 'year', 'title'])
import re
import wikipedia as wiki
from bs4 import BeautifulSoup as bs
import requests as rq
from IPython.core.display import Image, display
def get_poster(title):
if title.endswith('Bonus Material'):
title = title.strip('Bonus Material')
title = re.sub(r'[^\w\s]','',title)
matches = wiki.search(title)
if matches is None:
return
film = [s for s in matches if 'film)' in s]
film = film[0] if len(film) > 0 else matches[0]
try:
url = wiki.page(film).url
except:
return
html = rq.get(url)
if html.status_code == 200:
soup = bs(html.content, 'html.parser')
infobox = soup.find('table', class_="infobox")
if (infobox):
img = infobox.find('img')
if img:
display(Image('http:' + img['src']))
def show_recommendations(userId, preds):
for row in preds:
if int(row[0]) == userId:
print("\nrecommendations for userId", int(row[0]) )
for title in titles.title[row[1:]].values:
print(title)
get_poster(title)
break
show_recommendations(192, preds=pred)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Paths
Step2: Config parameters
Step3: Data Loading
Step4: experimenting with Fastai
Step5: Misc / Asides / Notes
| <ASSISTANT_TASK:>
Python Code:
import pathlib
import os
import torchtext
# from torchtext.data import Field
from torchtext import data
# import spacy
import pandas as pd
import numpy as np
# from torchtext.data import TabularDataset
data_path = pathlib.Path('../../data')
comp_path = pathlib.Path(data_path/'competitions/jigsaw-toxic-comment-classification-challenge')
EMBEDDING_FILE = 'glove/glove.6B.50d.txt'
TRAIN_DATA_FILE= 'train.csv'
TEST_DATA_FILE = 'test.csv'
embed_sz = 50 # embedding vector columns (factors)
max_feat = 20000 # embedding vector rows (words)
maxlen = 100 # words in comment to use
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
# train = pd.read_csv(comp_path/TRAIN_DATA_FILE)
# test = pd.read_csv(comp/TEST_DATA_FILE)
# SEE: Aside 1, Aside 2
# TEXT = Field(sequential=True, tokenize='spacy', lower=True)
TEXT = data.Field(sequential=True, tokenize= lambda x: x.split(), lower=True, )
LABEL = data.Field(sequential=False, use_vocab=False)
# trainval_datafields = [("id",None),("comment_text",TEXT)]
# trainval_datafields.extend((clss, LABEL) for clss in list_classes)
# test_datafields = [("id",None), ("comment_text",TEXT)]
# train_dataset = data.TabularDataset(
# path=comp_path/TRAIN_DATA_FILE, format='csv',
# skip_header=True, fields=trainval_datafields,
# sort_within_batch=True)
# test_dataset = data.TabularDataset(
# path=comp_path/TEST_DATA_FILE, format='csv',
# skip_header=True, fields=test_datafields)
# # TEXT.build_vocab(train_dataset)
from fastai.nlp import *
train_df = pd.read_csv(comp_path/TRAIN_DATA_FILE)
# SEE: Aside 3
model = LanguageModelData.from_dataframes(
path=comp_path, field=TEXT, col="comment_text",
train_df=train_df, val_df=train_df, test_df=train_df,
bs=64, min_freq=3)
em_sz = 200
nh = 500
nl = 3
opt_fn = partial(optim.Adam, betas=(0.7, 0.99))
learner = model.get_model(opt_fn, em_sz, nh, nl,
dropouti=0.05, dropout=0.05, wdrop=0.1,
dropoute=0.02, dropouth=0.05)
learner.clip = 0.3 # gradient clipping
learner.model.parameters
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
train = pd.read_csv(comp_path/TRAIN_DATA_FILE)
# test = pd.read_csv(comp/TEST_DATA_FILE)
train[list_classes][55:65]
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the MNIST database
Step2: Each digit is represented by a one hot vector where the index of the 1 represents the number
Step3: Load the saved weight matrices that were created by training the model
Step4: Functions to perform the inhibition of each ensemble
Step5: The network where the mental imagery and rotation occurs
Step6: The following is not part of the brain model, it is used to view the output for the ensemble
Step7: Pickle the probe's output if it takes a long time to run
Step8: Testing
Step9: Just for fun
| <ASSISTANT_TASK:>
Python Code:
import nengo
import numpy as np
import cPickle
from nengo_extras.data import load_mnist
from nengo_extras.vision import Gabor, Mask
from matplotlib import pylab
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import linalg
# --- load the data
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
temp = np.diag([1]*10)
ZERO = temp[0]
ONE = temp[1]
TWO = temp[2]
THREE= temp[3]
FOUR = temp[4]
FIVE = temp[5]
SIX = temp[6]
SEVEN =temp[7]
EIGHT= temp[8]
NINE = temp[9]
labels =[ZERO,ONE,TWO,THREE,FOUR,FIVE,SIX,SEVEN,EIGHT,NINE]
dim =28
label_weights = cPickle.load(open("label_weights5000.p", "rb"))
activity_to_img_weights = cPickle.load(open("activity_to_img_weights5000.p", "rb"))
rotated_clockwise_after_encoder_weights = cPickle.load(open("rotated_after_encoder_weights_clockwise5000.p", "r"))
rotated_counter_after_encoder_weights = cPickle.load(open("rotated_after_encoder_weights5000.p", "r"))
#scale_up_after_encoder_weights = cPickle.load(open("scale_up_after_encoder_weights1000.p","r"))
#scale_down_after_encoder_weights = cPickle.load(open("scale_down_after_encoder_weights1000.p","r"))
#translate_up_after_encoder_weights = cPickle.load(open("translate_up_after_encoder_weights1000.p","r"))
#translate_down_after_encoder_weights = cPickle.load(open("translate_down_after_encoder_weights1000.p","r"))
#translate_left_after_encoder_weights = cPickle.load(open("translate_left_after_encoder_weights1000.p","r"))
#translate_right_after_encoder_weights = cPickle.load(open("translate_right_after_encoder_weights1000.p","r"))
#identity_after_encoder_weights = cPickle.load(open("identity_after_encoder_weights1000.p","r"))
#A value of zero gives no inhibition
def inhibit_rotate_clockwise(t):
if t < 1:
return dim**2
else:
return 0
def inhibit_rotate_counter(t):
if t < 1:
return 0
else:
return dim**2
def inhibit_identity(t):
if t < 1:
return dim**2
else:
return dim**2
def inhibit_scale_up(t):
return dim**2
def inhibit_scale_down(t):
return dim**2
def inhibit_translate_up(t):
return dim**2
def inhibit_translate_down(t):
return dim**2
def inhibit_translate_left(t):
return dim**2
def inhibit_translate_right(t):
return dim**2
def add_manipulation(main_ens,weights,inhibition_func):
#create ensemble for manipulation
ens_manipulation = nengo.Ensemble(n_hid,dim**2,seed=3,encoders=encoders, **ens_params)
#create node for inhibition
inhib_manipulation = nengo.Node(inhibition_func)
#Connect the main ensemble to each manipulation ensemble and back with appropriate transformation
nengo.Connection(main_ens.neurons, ens_manipulation.neurons, transform = weights.T, synapse=0.1)
nengo.Connection(ens_manipulation.neurons, main_ens.neurons, transform = weights.T,synapse = 0.1)
#connect inhibition
nengo.Connection(inhib_manipulation, ens_manipulation.neurons, transform=[[-1]] * n_hid)
#return ens_manipulation,inhib_manipulation
rng = np.random.RandomState(9)
n_hid = 1000
model = nengo.Network(seed=3)
with model:
#Stimulus only shows for brief period of time
stim = nengo.Node(lambda t: ONE if t < 0.1 else 0) #nengo.processes.PresentInput(labels,1))#
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIF(), #Why not use LIF?
intercepts=nengo.dists.Choice([-0.5]),
max_rates=nengo.dists.Choice([100]),
)
# linear filter used for edge detection as encoders, more plausible for human visual system
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)
#Ensemble that represents the image with different transformations applied to it
ens = nengo.Ensemble(n_hid, dim**2, seed=3, encoders=encoders, **ens_params)
#Connect stimulus to ensemble, transform using learned weight matrices
nengo.Connection(stim, ens, transform = np.dot(label_weights,activity_to_img_weights).T)
#Recurrent connection on the neurons of the ensemble to perform the rotation
#nengo.Connection(ens.neurons, ens.neurons, transform = rotated_counter_after_encoder_weights.T, synapse=0.1)
#add_manipulation(ens,rotated_clockwise_after_encoder_weights, inhibit_rotate_clockwise)
add_manipulation(ens,rotated_counter_after_encoder_weights, inhibit_rotate_counter)
add_manipulation(ens,scale_up_after_encoder_weights, inhibit_scale_up)
#add_manipulation(ens,scale_down_after_encoder_weights, inhibit_scale_down)
#add_manipulation(ens,translate_up_after_encoder_weights, inhibit_translate_up)
#add_manipulation(ens,translate_down_after_encoder_weights, inhibit_translate_down)
#add_manipulation(ens,translate_left_after_encoder_weights, inhibit_translate_left)
#add_manipulation(ens,translate_right_after_encoder_weights, inhibit_translate_right)
#Collect output, use synapse for smoothing
probe = nengo.Probe(ens.neurons,synapse=0.1)
sim = nengo.Simulator(model)
sim.run(5)
'''Animation for Probe output'''
fig = plt.figure()
output_acts = []
for act in sim.data[probe]:
output_acts.append(np.dot(act,activity_to_img_weights))
def updatefig(i):
im = pylab.imshow(np.reshape(output_acts[i],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'),animated=True)
return im,
ani = animation.FuncAnimation(fig, updatefig, interval=100, blit=True)
plt.show()
print(len(sim.data[probe]))
plt.subplot(161)
plt.title("100")
pylab.imshow(np.reshape(output_acts[100],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(162)
plt.title("500")
pylab.imshow(np.reshape(output_acts[500],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(163)
plt.title("1000")
pylab.imshow(np.reshape(output_acts[1000],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(164)
plt.title("1500")
pylab.imshow(np.reshape(output_acts[1500],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(165)
plt.title("2000")
pylab.imshow(np.reshape(output_acts[2000],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(166)
plt.title("2500")
pylab.imshow(np.reshape(output_acts[2500],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.show()
#The filename includes the number of neurons and which digit is being rotated
filename = "mental_rotation_output_ONE_" + str(n_hid) + ".p"
cPickle.dump(sim.data[probe], open( filename , "wb" ) )
testing = np.dot(ONE,np.dot(label_weights,activity_to_img_weights))
plt.subplot(121)
pylab.imshow(np.reshape(testing,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
#Get image
testing = np.dot(ONE,np.dot(label_weights,activity_to_img_weights))
#Get activity of image
_, testing_act = nengo.utils.ensemble.tuning_curves(ens, sim, inputs=testing)
#Get rotated encoder outputs
testing_rotate = np.dot(testing_act,rotated_after_encoder_weights)
#Get activities
testing_rotate = ens.neuron_type.rates(testing_rotate, sim.data[ens].gain, sim.data[ens].bias)
for i in range(5):
testing_rotate = np.dot(testing_rotate,rotated_after_encoder_weights)
testing_rotate = ens.neuron_type.rates(testing_rotate, sim.data[ens].gain, sim.data[ens].bias)
#testing_rotate = np.dot(testing_rotate,rotation_weights)
testing_rotate = np.dot(testing_rotate,activity_to_img_weights)
plt.subplot(122)
pylab.imshow(np.reshape(testing_rotate,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.show()
plt.subplot(121)
pylab.imshow(np.reshape(X_train[0],(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
#Get activity of image
_, testing_act = nengo.utils.ensemble.tuning_curves(ens, sim, inputs=X_train[0])
testing_rotate = np.dot(testing_act,activity_to_img_weights)
plt.subplot(122)
pylab.imshow(np.reshape(testing_rotate,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.show()
letterO = np.dot(ZERO,np.dot(label_weights,activity_to_img_weights))
plt.subplot(161)
pylab.imshow(np.reshape(letterO,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
letterL = np.dot(SEVEN,label_weights)
for _ in range(30):
letterL = np.dot(letterL,rotation_weights)
letterL = np.dot(letterL,activity_to_img_weights)
plt.subplot(162)
pylab.imshow(np.reshape(letterL,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
letterI = np.dot(ONE,np.dot(label_weights,activity_to_img_weights))
plt.subplot(163)
pylab.imshow(np.reshape(letterI,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.subplot(165)
pylab.imshow(np.reshape(letterI,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
letterV = np.dot(SEVEN,label_weights)
for _ in range(40):
letterV = np.dot(letterV,rotation_weights)
letterV = np.dot(letterV,activity_to_img_weights)
plt.subplot(164)
pylab.imshow(np.reshape(letterV,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
letterA = np.dot(SEVEN,label_weights)
for _ in range(10):
letterA = np.dot(letterA,rotation_weights)
letterA = np.dot(letterA,activity_to_img_weights)
plt.subplot(166)
pylab.imshow(np.reshape(letterA,(dim, dim), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Processing the text using TRIPS
Step2: We can pass the block of text defined above to the TRIPS processor
Step3: Here tp is a TripsProcessor object which contains the extacted INDRA Statements as a list. We can inspect the statements extracted by TRIPS to make sure that all of the information was extracted.
Step4: Assembling a PySB model
Step5: Next, we instantiate a PySB assembler object.
Step6: The assembler takes a list of INDRA Statements as input in order to build a model.
Step7: We finally call the assembler's make_model method to produce the PySB model.
Step8: Simulating the model
Step9: We add some initial active ATM to start off the reaction network.
Step10: Below are the parameters we define for the simulation (these override the nominal parameters automatically defined by INDRA's PySB Assembler).
Step11: Now we set up an observable which monitors the amount of active p53 over time in order to then be able to plot this quantity.
Step12: We want to simulate the model over a relevant length of time
Step13: We now instantiate a numerical ODE solver and run it with the model for the specified time span.
Step14: Finally, we plot the time course of active p53.
| <ASSISTANT_TASK:>
Python Code:
model_text = \
'''
Active ATM phosphorylates ATM, and phosphorylated ATM is active.
Active ATM activates p53.
p53 is transcribed and active p53 transcribes MDM2.
MDM2 is degraded.
Active p53 activates Wip1.
Active Wip1 inactivates p53.
Active Wip1 dephosphorylates ATM.
MDM2 ubiquitinates p53 and ubiquitinated p53 is degraded.
HIPK2 inactivates Wip1.
'''
from indra.sources import trips
tp = trips.process_text(model_text)
tp.statements
from indra.assemblers.pysb import PysbAssembler
pa = PysbAssembler()
pa.add_statements(tp.statements)
model = pa.make_model()
model.name = 'p53_DSB_model'
from pysb import Parameter, Observable
model.add_component(Parameter('ATMa_0', 1))
atm_atr_m = model.monomers['ATM']
model.initial(atm_atr_m(phospho='p'),model.parameters['ATMa_0'])
parameters = {
"kf_aa_phosphorylation_1": 5e-07,
"kf_pa_dephosphorylation_1": 1e-05,
"kf_mt_ubiquitination_1": 1e-06,
"kf_at_act_1": 1e-07,
"kf_tp_act_1": 1e-07,
"kf_pt_act_1": 5e-07,
"kf_hp_act_1": 1e-07,
"kf_m_deg_1": 0.08,
"kf_t_deg_1": 2e-05,
"kf_t_synth_1": 2.0,
"kf_tm_synth_1": 0.02,
"HIPK2_0": 10000.0,
"MDM2_0": 0,
"ATM_0": 10000.0,
"TP53_0": 10000.0,
"PPM1D_0": 10000.0,
"ATMa_0": 1.0,
}
for name, value in parameters.items():
model.parameters[name].value = value
# Add active p53 observable
p53 = model.monomers['TP53']
obs = Observable('p53_active', p53(activity='active'))
model.add_component(obs)
import numpy as np
sim_hours = 24
ts = np.linspace(0, sim_hours*3600, sim_hours*60)
from pysb.integrate import Solver
solver = Solver(model, ts)
solver.run()
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure()
plt.plot(ts, solver.yobs['p53_active'], 'r')
plt.xticks([])
plt.xlabel('Time (a.u.)')
plt.ylabel('Active p53')
plt.yticks([])
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the Data Processing Pipelines section we discovered how to declare a DynamicMap and control multiple processing steps with the use of custom streams as described in the Responding to Events guide. Here we will use the same example exploring a dataset of stock timeseries and build a small dashboard using the Panel library, which allows us to declare easily declare custom widgets and link them to our streams. We will begin by once again declaring our function that loads the stock data
Step2: Building dashboards
Step3: You will have noticed the param.depends decorator on the load_symbol method above, this declares that the method depends on these two parameters. When we pass the method to a DynamicMap it will now automatically listen to changes to the 'symbol', and 'variable' parameters. To generate a set of widgets to control these parameters we can simply supply the explorer.param accessor to a panel layout, and combining the two we can quickly build a little GUI
Step4: The rolling_window parameter is not yet connected to anything however, so just like in the Data Processing Pipelines section we will see how we can get the widget to control the parameters of an operation. Both the rolling and rolling_outlier_std operations accept a rolling_window parameter, so we create a Params stream to listen to that parameter and then pass it to the operations. Finally we compose everything into a panel Row
Step5: Replacing the output
Step6: In the previous example we explicitly called the view method, but to allow panel to update the plot when the datashade parameter is toggled we instead pass it the actual view method. Whenever the datashade parameter is toggled panel will call the method and update the plot with whatever is returned
| <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import holoviews as hv
from bokeh.sampledata import stocks
from holoviews.operation.timeseries import rolling, rolling_outlier_std
hv.extension('bokeh')
def load_symbol(symbol, variable='adj_close', **kwargs):
df = pd.DataFrame(getattr(stocks, symbol))
df['date'] = df.date.astype('datetime64[ns]')
return hv.Curve(df, ('date', 'Date'), variable)
stock_symbols = ['AAPL', 'IBM', 'FB', 'GOOG', 'MSFT']
dmap = hv.DynamicMap(load_symbol, kdims='Symbol').redim.values(Symbol=stock_symbols)
dmap.opts(framewise=True)
import param
import panel as pn
from holoviews.streams import Params
class StockExplorer(param.Parameterized):
rolling_window = param.Integer(default=10, bounds=(1, 365))
symbol = param.ObjectSelector(default='AAPL', objects=stock_symbols)
variable = param.ObjectSelector(default='adj_close', objects=[
'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close'])
@param.depends('symbol', 'variable')
def load_symbol(self):
df = pd.DataFrame(getattr(stocks, self.symbol))
df['date'] = df.date.astype('datetime64[ns]')
return hv.Curve(df, ('date', 'Date'), self.variable).opts(framewise=True)
explorer = StockExplorer()
stock_dmap = hv.DynamicMap(explorer.load_symbol)
pn.Row(explorer.param, stock_dmap)
# Apply rolling mean
window = Params(explorer, ['rolling_window'])
smoothed = rolling(stock_dmap, streams=[window])
# Find outliers
outliers = rolling_outlier_std(stock_dmap, streams=[window]).opts(
hv.opts.Scatter(color='red', marker='triangle')
)
pn.Row(explorer.param, (smoothed * outliers).opts(width=600))
from holoviews.operation.datashader import datashade, dynspread
class AdvancedStockExplorer(StockExplorer):
datashade = param.Boolean(default=False)
@param.depends('datashade')
def view(self):
stocks = hv.DynamicMap(self.load_symbol)
# Apply rolling mean
window = Params(self, ['rolling_window'])
smoothed = rolling(stocks, streams=[window])
if self.datashade:
smoothed = dynspread(datashade(smoothed)).opts(framewise=True)
# Find outliers
outliers = rolling_outlier_std(stocks, streams=[window]).opts(
width=600, color='red', marker='triangle', framewise=True)
return (smoothed * outliers)
explorer = AdvancedStockExplorer()
pn.Row(explorer.param, explorer.view)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enter Your Project and GCS Bucket
Step2: Set Your Application Name, Task Name, and Directories.
Step3: Set a GCS Prefix
Step7: Stack Overflow Data
Step8: Download Data
Step9: Prepare CSV Files for Creating Managed Dataset
Step10: Create Custom Training Python Package
Step11: Write the Training Script
Step12: Build Package
Step13: Upload the Package to GCS
Step14: Create TensorFlow Serving Container
Step15: Create a tag for registering the image and register the image with Cloud Container Registry (gcr.io).
Step16: Run Custom Python Package Training with Managed Text Dataset
Step17: Create a Dataset on Vertex AI
Step18: Option 1
Step19: Option 2
Step20: You will need to specify the python package that was built and uploaded to GCS, the module name of the python package, the pre-built training container image uri for training, and in this example, we are using TensorFlow serving container for prediction.
Step21: Run the Training Job
Step22: Deploy a Model and Create an Endpoint on Vertex AI
Step23: Predict on the Endpoint
Step25: Batch Prediction Job on the Model
| <ASSISTANT_TASK:>
Python Code:
!pip3 uninstall -y google-cloud-aiplatform
!pip3 install google-cloud-aiplatform
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
MY_PROJECT = "YOUR PROJECT ID"
MY_STAGING_BUCKET = "gs://YOUR BUCKET" # bucket should be in same region as ucaip
APP_NAME = "keras-text-class-stack-overflow-tag"
TASK_TYPE = "mbsdk_custom-py-pkg-training"
TASK_NAME = f"{TASK_TYPE}_{APP_NAME}"
TASK_DIR = f"./{TASK_NAME}"
DATA_DIR = f"{TASK_DIR}/data"
print(f"Task Name: {TASK_NAME}")
print(f"Task Directory: {TASK_DIR}")
print(f"Data Directory: {DATA_DIR}")
BUCKET_NAME = MY_STAGING_BUCKET.split("gs://")[1]
GCS_PREFIX = f"{TASK_TYPE}/{APP_NAME}"
print(f"Bucket Name: {BUCKET_NAME}")
print(f"GCS Prefix: {GCS_PREFIX}")
import csv
import os
from google.cloud import storage
from tensorflow.keras import utils
def upload_blob(bucket_name, source_file_name, destination_blob_name):
Uploads a file to the bucket.
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
destination_file_name = os.path.join("gs://", bucket_name, destination_blob_name)
return destination_file_name
def download_data(data_dir):
Download data.
if not os.path.exists(data_dir):
os.makedirs(data_dir)
url = "https://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz"
dataset = utils.get_file(
"stack_overflow_16k.tar.gz",
url,
untar=True,
cache_dir=data_dir,
cache_subdir="",
)
data_dir = os.path.join(os.path.dirname(dataset))
return data_dir
def upload_train_data_to_gcs(train_data_dir, bucket_name, destination_blob_prefix):
Create CSV file using train data content.
train_data_dir = os.path.join(data_dir, "train")
train_data_fn = os.path.join(data_dir, "train.csv")
fp = open(train_data_fn, "w", encoding="utf8")
writer = csv.writer(
fp, delimiter=",", quotechar='"', quoting=csv.QUOTE_ALL, lineterminator="\n"
)
for root, _, files in os.walk(train_data_dir):
for file in files:
if file.endswith(".txt"):
class_name = root.split("/")[-1]
file_fn = os.path.join(root, file)
with open(file_fn, "r") as f:
content = f.readlines()
lines = [x.strip().strip('"') for x in content]
writer.writerow((lines[0], class_name))
fp.close()
train_gcs_url = upload_blob(
bucket_name, train_data_fn, os.path.join(destination_blob_prefix, "train.csv")
)
return train_gcs_url
data_dir = download_data(DATA_DIR)
print(f"Data is downloaded to: {data_dir}")
!ls $data_dir
!ls $data_dir/train
gcs_source_train_url = upload_train_data_to_gcs(
train_data_dir=os.path.join(data_dir, "train"),
bucket_name=BUCKET_NAME,
destination_blob_prefix=f"{GCS_PREFIX}/data",
)
print(f"Train data content is loaded to {gcs_source_train_url}")
!gsutil ls gs://$BUCKET_NAME/$GCS_PREFIX/data
PYTHON_PACKAGE_APPLICATION_DIR = f"{TASK_NAME}/trainer"
!mkdir -p $PYTHON_PACKAGE_APPLICATION_DIR
!touch $PYTHON_PACKAGE_APPLICATION_DIR/__init__.py
%%writefile {PYTHON_PACKAGE_APPLICATION_DIR}/task.py
import os
import argparse
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import losses
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import json
import tqdm
VOCAB_SIZE = 10000
MAX_SEQUENCE_LENGTH = 250
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def build_model(num_classes, loss, optimizer, metrics, vectorize_layer):
# vocab_size is VOCAB_SIZE + 1 since 0 is used additionally for padding.
model = tf.keras.Sequential([
vectorize_layer,
layers.Embedding(VOCAB_SIZE + 1, 64, mask_zero=True),
layers.Conv1D(64, 5, padding="valid", activation="relu", strides=2),
layers.GlobalMaxPooling1D(),
layers.Dense(num_classes),
layers.Activation('softmax')
])
model.compile(
loss=loss,
optimizer=optimizer,
metrics=metrics)
return model
def get_string_labels(predicted_scores_batch, class_names):
predicted_labels = tf.argmax(predicted_scores_batch, axis=1)
predicted_labels = tf.gather(class_names, predicted_labels)
return predicted_labels
def predict(export_model, class_names, inputs):
predicted_scores = export_model.predict(inputs)
predicted_labels = get_string_labels(predicted_scores, class_names)
return predicted_labels
def parse_args():
parser = argparse.ArgumentParser(
description='Keras Text Classification on Stack Overflow Questions')
parser.add_argument(
'--epochs', default=25, type=int, help='number of training epochs')
parser.add_argument(
'--batch-size', default=16, type=int, help='mini-batch size')
parser.add_argument(
'--model-dir', default=os.getenv('AIP_MODEL_DIR'), type=str, help='model directory')
parser.add_argument(
'--data-dir', default='./data', type=str, help='data directory')
parser.add_argument(
'--test-run', default=False, type=str2bool, help='test run the training application, i.e. 1 epoch for training using sample dataset')
parser.add_argument(
'--model-version', default=1, type=int, help='model version')
args = parser.parse_args()
return args
def load_aip_dataset(aip_data_uri_pattern, batch_size, class_names, test_run, shuffle=True, seed=42):
data_file_urls = list()
labels = list()
class_indices = dict(zip(class_names, range(len(class_names))))
num_classes = len(class_names)
for aip_data_uri in tqdm.tqdm(tf.io.gfile.glob(pattern=aip_data_uri_pattern)):
with tf.io.gfile.GFile(name=aip_data_uri, mode='r') as gfile:
for line in gfile.readlines():
line = json.loads(line)
data_file_urls.append(line['textContent'])
classification_annotation = line['classificationAnnotations'][0]
label = classification_annotation['displayName']
labels.append(class_indices[label])
if test_run:
break
data = list()
for data_file_url in tqdm.tqdm(data_file_urls):
with tf.io.gfile.GFile(name=data_file_url, mode='r') as gf:
txt = gf.read()
data.append(txt)
print(f' data files count: {len(data_file_urls)}')
print(f' data count: {len(data)}')
print(f' labels count: {len(labels)}')
dataset = tf.data.Dataset.from_tensor_slices(data)
label_ds = tf.data.Dataset.from_tensor_slices(labels)
label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes))
dataset = tf.data.Dataset.zip((dataset, label_ds))
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def main():
args = parse_args()
class_names = ['csharp', 'java', 'javascript', 'python']
class_indices = dict(zip(class_names, range(len(class_names))))
num_classes = len(class_names)
print(f' class names: {class_names}')
print(f' class indices: {class_indices}')
print(f' num classes: {num_classes}')
epochs = 1 if args.test_run else args.epochs
aip_model_dir = os.environ.get('AIP_MODEL_DIR')
aip_data_format = os.environ.get('AIP_DATA_FORMAT')
aip_training_data_uri = os.environ.get('AIP_TRAINING_DATA_URI')
aip_validation_data_uri = os.environ.get('AIP_VALIDATION_DATA_URI')
aip_test_data_uri = os.environ.get('AIP_TEST_DATA_URI')
print(f"aip_model_dir: {aip_model_dir}")
print(f"aip_data_format: {aip_data_format}")
print(f"aip_training_data_uri: {aip_training_data_uri}")
print(f"aip_validation_data_uri: {aip_validation_data_uri}")
print(f"aip_test_data_uri: {aip_test_data_uri}")
print('Loading AIP dataset')
train_ds = load_aip_dataset(
aip_training_data_uri, args.batch_size, class_names, args.test_run)
print('AIP training dataset is loaded')
val_ds = load_aip_dataset(
aip_validation_data_uri, 1, class_names, args.test_run)
print('AIP validation dataset is loaded')
test_ds = load_aip_dataset(
aip_test_data_uri, 1, class_names, args.test_run)
print('AIP test dataset is loaded')
vectorize_layer = TextVectorization(
max_tokens=VOCAB_SIZE,
output_mode='int',
output_sequence_length=MAX_SEQUENCE_LENGTH)
train_text = train_ds.map(lambda text, labels: text)
vectorize_layer.adapt(train_text)
print('The vectorize_layer is adapted')
print('Build model')
optimizer = 'adam'
metrics = ['accuracy']
model = build_model(
num_classes, losses.CategoricalCrossentropy(from_logits=True), optimizer, metrics, vectorize_layer)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)
history = history.history
print('Training accuracy: {acc}, loss: {loss}'.format(
acc=history['accuracy'][-1], loss=history['loss'][-1]))
print('Validation accuracy: {acc}, loss: {loss}'.format(
acc=history['val_accuracy'][-1], loss=history['val_loss'][-1]))
loss, accuracy = model.evaluate(test_ds)
print('Test accuracy: {acc}, loss: {loss}'.format(
acc=accuracy, loss=loss))
inputs = [
"how do I extract keys from a dict into a list?", # python
"debug public static void main(string[] args) {...}", # java
]
predicted_labels = predict(model, class_names, inputs)
for input, label in zip(inputs, predicted_labels):
print(f'Question: {input}')
print(f'Predicted label: {label.numpy()}')
model_export_path = os.path.join(args.model_dir, str(args.model_version))
model.save(model_export_path)
print(f'Model version {args.model_version} is exported to {args.model_dir}')
loaded = tf.saved_model.load(model_export_path)
input_name = list(loaded.signatures['serving_default'].structured_input_signature[1].keys())[0]
print(f'Serving function input: {input_name}')
return
if __name__ == '__main__':
main()
%%writefile {TASK_DIR}/setup.py
from setuptools import find_packages
from setuptools import setup
setup(
name='trainer',
version='0.1',
packages=find_packages(),
install_requires=(),
include_package_data=True,
description='My training application.'
)
!ls $TASK_DIR
!cd $TASK_DIR && python3 setup.py sdist --formats=gztar
!ls -ltr $TASK_DIR/dist/trainer-0.1.tar.gz
destination_blob_name = f"custom-training-python-package/{APP_NAME}/trainer-0.1.tar.gz"
source_file_name = f"{TASK_DIR}/dist/trainer-0.1.tar.gz"
python_package_gcs_uri = upload_blob(
BUCKET_NAME, source_file_name, destination_blob_name
)
python_module_name = "trainer.task"
print(f"Custom Training Python Package is uploaded to: {python_package_gcs_uri}")
!docker pull tensorflow/serving:latest
TF_SERVING_CONTAINER_IMAGE_URI = f"gcr.io/{MY_PROJECT}/tf-serving"
!docker tag tensorflow/serving $TF_SERVING_CONTAINER_IMAGE_URI
!docker push $TF_SERVING_CONTAINER_IMAGE_URI
from google.cloud import aiplatform
aiplatform.init(project=MY_PROJECT, staging_bucket=MY_STAGING_BUCKET)
dataset_display_name = f"temp-{APP_NAME}-content"
gcs_source = gcs_source_train_url
dataset = aiplatform.TextDataset.create(
display_name=dataset_display_name,
gcs_source=gcs_source,
import_schema_uri=aiplatform.schema.dataset.ioformat.text.single_label_classification,
sync=False,
)
MODEL_NAME = APP_NAME
PRE_BUILT_TRAINING_CONTAINER_IMAGE_URI = (
"gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest"
)
job = aiplatform.CustomPythonPackageTrainingJob(
display_name=f"temp_{TASK_NAME}_tf-serving",
python_package_gcs_uri=python_package_gcs_uri,
python_module_name=python_module_name,
container_uri=PRE_BUILT_TRAINING_CONTAINER_IMAGE_URI,
model_serving_container_image_uri=TF_SERVING_CONTAINER_IMAGE_URI,
model_serving_container_command=["/usr/bin/tensorflow_model_server"],
model_serving_container_args=[
f"--model_name={MODEL_NAME}",
"--model_base_path=$(AIP_STORAGE_URI)",
"--rest_api_port=8080",
"--port=8500",
"--file_system_poll_wait_seconds=31540000",
],
model_serving_container_predict_route=f"/v1/models/{MODEL_NAME}:predict",
model_serving_container_health_route=f"/v1/models/{MODEL_NAME}",
)
model = job.run(
dataset=dataset,
annotation_schema_uri=aiplatform.schema.dataset.annotation.text.classification,
args=["--epochs", "50"],
replica_count=1,
model_display_name=f"temp_{TASK_NAME}_tf-serving",
sync=False,
)
model.wait()
endpoint = model.deploy(machine_type="n1-standard-4", sync=False)
endpoint.wait()
class_names = ["csharp", "java", "javascript", "python"]
class_ids = range(len(class_names))
class_indices = dict(zip(class_names, class_ids))
class_maps = dict(zip(class_ids, class_names))
print(f"Class Indices: {class_indices}")
print(f"Class Maps: {class_maps}")
text_inputs = [
"how do I extract keys from a dict into a list?", # python
"debug public static void main(string[] args) {...}", # java
]
import numpy as np
predictions = endpoint.predict(instances=[[text] for text in text_inputs])
for text, predicted_scores in zip(text_inputs, predictions.predictions):
class_id = np.argmax(predicted_scores)
class_name = class_maps[class_id]
print(f"Question: {text}")
print(f"Predicted Tag: {class_name}\n")
import json
import tensorflow as tf
def upload_test_data_to_gcs(test_data_dir, test_gcs_url):
Create JSON file using test data content.
input_name = "text_vectorization_input"
with tf.io.gfile.GFile(test_gcs_url, "w") as gf:
for root, _, files in os.walk(test_data_dir):
for file in files:
if file.endswith(".txt"):
file_fn = os.path.join(root, file)
with open(file_fn, "r") as f:
content = f.readlines()
lines = [x.strip().strip('"') for x in content]
data = {input_name: [lines[0]]}
gf.write(json.dumps(data))
gf.write("\n")
return
gcs_source_test_url = f"gs://{BUCKET_NAME}/{GCS_PREFIX}/data/test.json"
upload_test_data_to_gcs(
test_data_dir=os.path.join(data_dir, "test"), test_gcs_url=gcs_source_test_url
)
print(f"Test data content is loaded to {gcs_source_test_url}")
!gsutil ls $gcs_source_test_url
batch_predict_job = model.batch_predict(
job_display_name=f"temp_{TASK_NAME}_tf-serving",
gcs_source=gcs_source_test_url,
gcs_destination_prefix=f"gs://{BUCKET_NAME}/{GCS_PREFIX}/batch_prediction",
machine_type="n1-standard-4",
sync=False,
)
batch_predict_job.wait()
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_errors_stats = list()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction.errors_stats"):
prediction_errors_stats.append(blob.name)
if blob.name.split("/")[-1].startswith("prediction.results"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
text = line["instance"]["text_vectorization_input"][0]
prediction = line["prediction"]
class_id = np.argmax(prediction)
class_name = class_maps[class_id]
tags.append([text, class_name])
import pandas as pd
tags_df = pd.DataFrame(tags, columns=["question", "tag"])
tags_df.head()
tags_df["tag"].value_counts()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Symbolic differentiation
Step2: You can even ask sympy to give you a formula you can evaluate in a coding language
Step3: And sympy can evaluate the formula itself
Step4: A more complicated function
Step5: Another complicated function
Step6: But $f$ is just a polynomial! Is sympy clever enough to do the right thing? No.
Step7: sympy does the right thing if we give it some help...
Step8: Hand-coding derivatives
Step9: This code is pretty mechanical to write
Step10: This is fairly mechanical, similar to forward-mode
Step12: If we use forward mode, we can only compute one direction at a time, effectively
Step13: We've now traversed the loop with our work as many times as there are components in the vector. The forward evaluation for dot costs $O(n)$ and computing the gradient costs $O(n^2)$ because we have to do $O(n)$ for for each direction and there are $n$ directions.
Step15: We get the same values in only $O(n)$ work!
Step16: Example with linear function
Step17: another example
Step18: Software
| <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import sympy
from sympy.abc import x
f = sympy.cos(x**sympy.pi) * sympy.log(x)
f
sympy.diff(f, x)
sympy.ccode(f, 'y')
sympy.fcode(f, 'y')
f.evalf(40, subs={x: 1.9})
def g(x, m=np):
y = x
for i in range(2):
# a = m.log(y)
# b = y ** m.pi
# c = m.cos(b)
# y = c * a
y = m.cos(y**m.pi) * m.log(y)
return y
gexpr = g(x, m=sympy)
gexpr
sympy.diff(gexpr, x)
m = 8
#t = np.random.randn(m)
t = np.arange(1,m+1)
def f(x):
y = 1
for i in range(m):
y *= x - t[i]
return y
f(x)
sympy.diff(f(x), x)
sympy.expand( f(x) )
sympy.diff(sympy.expand( f(x) ), x)
def gprime(x):
y = x
dy = 1
for i in range(2):
a = np.log(y)
da = 1/y * dy
b = y ** np.pi
db = np.pi * y ** (np.pi - 1) * dy
c = np.cos(b)
dc = -np.sin(b) * db
y = c * a
dy = dc * a + c * da
return y, dy
print('by hand', gprime(1.9))
def gprime_rev(x):
# First compute all the values by going through the iteration forwards
# I'm unrolling two iterations here for clarity ("static single assignment" form)
# It is possible to write code that keeps the loop structure.
a1 = np.log(x)
b1 = x ** np.pi
c1 = np.cos(b1)
y1 = c1 * a1
a2 = np.log(y1)
b2 = y1 ** np.pi
c2 = np.cos(b2)
y = c2 * a2 # Result
# Now go backwards computing dy/d_ for each variable
y_ = 1
y_c2 = y_ * a2
y_a2 = c2 * y_
y_b2 = -y_c2 * np.sin(b2) # dy/db2 = dy/dc2 dc2/db2
y_y1 = y_b2 * np.pi * y1 ** (np.pi - 1) + y_a2 / y1
y_c1 = y_y1 * a1
y_a1 = c1 * y_y1
y_b1 = -y_c1 * np.sin(b1)
y_x = y_b1 * np.pi * x ** (np.pi - 1) + y_a1 / x
return y, y_x
print('forward', gprime(1.9))
print('reverse', gprime_rev(1.9))
def dot(c, x):
n = len(c)
sum = 0
for i in range(n):
sum += c[i] * x[i]
return sum
n = 20
c = np.random.randn(n)
x = np.random.randn(n)
f = dot(c, x)
f
def dot_x(c, x, dx):
Compute derivative in direction dx
n = len(c)
dsum = 0
for i in range(n):
dsum += c[i] * dx[i]
return dsum
def grad_dot(c, x):
n = len(c)
I = np.eye(n)
grad = np.zeros(n)
for j in range(n):
dx = I[:,j]
grad[j] = dot_x(c, x, dx)
return grad
grad_dot(c, x)
def grad_dot_rev(c, x):
n = len(c)
sum_ = np.zeros(n)
for i in range(n):
sum_[i] = c[i]
return sum_
grad_dot_rev(c, x)
# ! pip install jax jaxlib
import jax
import jax.numpy as jnp
# def g_jax(x):
# Same function as before, but using jnp in place of np.
# y = x
# for i in range(2):
# y = jnp.cos(y**jnp.pi) * jnp.log(y)
# return y
g_jax = lambda x : g(x, m=jnp)
gprime_jax = jax.grad(g_jax)
print(gprime_jax(1.9))
print(gprime(1.9)[1])
n = 20
c = np.random.randn(n)
y = np.random.randn(n)
# SRB changing example a bit
# Let h(y) = dot(y,c), then grad h = c
h = lambda y : jnp.vdot(y,c)
print("Gradient via JAX AD is")
print( jax.grad(h)(y) )
# Alternatively,
# jax.grad(jmp.vdot)(y,c) will pass in both y and c to the function
# but only differentiation with respect to the first input (in this case, y)
print("Gradient worked out by hand is")
print( c )
m = 30
t = np.random.randn(m)
def f(x):
y = 1
for i in range(m):
y *= x - t[i]
return y
f(x)
x0 = t[0] + 1e-15
sympy.diff(f(x), x).evalf(25,subs={x: x0})
jax.grad(f)(x0)
%%timeit
sympy.diff(f(x), x).evalf(15,subs={x: x0})
%%timeit
jax.grad(f)(x0)
z = jnp.zeros(3)
z[1] = 1
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: At this point, we have our stations and trips data loaded into memory.
Step2: Then, let's iterate over the stations DataFrame, and add in the node attributes.
Step3: In order to answer the question of "which stations are important", we need to specify things a bit more. Perhaps a measure such as betweenness centrality or degree centrality may be appropriate here.
Step4: Exercise
Step5: Exercise
Step6: Let's now try drawing the graph.
Step7: Exercise
Step8: In this visual, nodes are sorted from highest connectivity to lowest connectivity in the unfiltered graph.
| <ASSISTANT_TASK:>
Python Code:
# This block of code checks to make sure that a particular directory is present.
if "divvy_2013" not in os.listdir('datasets/'):
print('Unzip the divvy_2013.zip file in the datasets folder.')
stations = pd.read_csv('datasets/divvy_2013/Divvy_Stations_2013.csv', parse_dates=['online date'], index_col='id', encoding='utf-8')
stations
trips = pd.read_csv('datasets/divvy_2013/Divvy_Trips_2013.csv',
parse_dates=['starttime', 'stoptime'],
index_col=['trip_id'])
trips = trips.sort()
trips
G = nx.DiGraph()
for r, d in stations.iterrows(): # call the pandas DataFrame row-by-row iterator
G.add_node(r, attr_dict=d.to_dict())
# # Run the following code at your own risk :)
# for r, d in trips.iterrows():
# start = d['from_station_id']
# end = d['to_station_id']
# if (start, end) not in G.edges():
# G.add_edge(start, end, count=1)
# else:
# G.edge[start][end]['count'] += 1
for (start, stop), d in trips.groupby(['from_station_id', 'to_station_id']):
G.add_edge(start, stop, count=len(d))
G.edges(data=True)
len(G.edges())
len(G.nodes())
from collections import Counter
# Count the number of edges that have x trips recorded on them.
trip_count_distr = ______________________________
# Then plot the distribution of these
plt.scatter(_______________, _______________, alpha=0.1)
plt.yscale('log')
plt.xlabel('num. of trips')
plt.ylabel('num. of edges')
# Filter the edges to just those with more than 100 trips.
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
# Fill in your code here.
len(G_filtered.edges())
# Fill in your code here.
nodes = sorted(_________________, key=lambda x:_________________)
edges = ___________
edgeprops = dict(alpha=0.1)
nodecolor = plt.cm.viridis(np.arange(len(nodes)) / len(nodes))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, edgeprops=edgeprops, nodecolor=nodecolor)
c.draw()
plt.savefig('images/divvy.png', dpi=300)
nx.write_gpickle(G, 'datasets/divvy_2013/divvy_graph.pkl')
G = nx.read_gpickle('datasets/divvy_2013/divvy_graph.pkl')
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load house sales data
Step2: Split data into training and testing
Step3: Useful SFrame summary functions
Step4: As we see we get the same answer both ways
Step5: Aside
Step6: We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line
Step7: Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data!
Step8: Predicting Values
Step9: Now that we can calculate a prediction given the slop and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above.
Step10: Residual Sum of Squares
Step11: Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero!
Step12: Now use your function to calculate the RSS on training data from the squarefeet model calculated above.
Step13: Predict the squarefeet given price
Step14: Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that coses $800,000 to be.
Step15: New Model
Step16: Test your Linear Regression Algorithm
| <ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
train_data,test_data = sales.random_split(.8,seed=0)
# Let's compute the mean of the House Prices in King County in 2 different ways.
prices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray
# recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses:
sum_prices = prices.sum()
num_houses = prices.size() # when prices is an SArray .size() returns its length
avg_price_1 = sum_prices/num_houses
avg_price_2 = prices.mean() # if you just want the average, the .mean() function
print "average price via method 1: " + str(avg_price_1)
print "average price via method 2: " + str(avg_price_2)
# if we want to multiply every price by 0.5 it's a simple as:
half_prices = 0.5*prices
# Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with *
prices_squared = prices*prices
sum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up.
print "the sum of price squared is: " + str(sum_prices_squared)
def simple_linear_regression(input_feature, output):
# compute the mean of input_feature and output
x = input_feature
y = output
avg_x = x.mean()
avg_y = y.mean()
n = x.size()
# compute the product of the output and the input_feature and its mean
# compute the squared value of the input_feature and its mean
# use the formula for the slope
x_err = x-avg_x
slope = (y*x_err).sum()/(x*x_err).sum()
# use the formula for the intercept
intercept = y.mean() - x.mean()*slope
return (intercept, slope)
test_feature = graphlab.SArray(range(5))
test_output = graphlab.SArray(1 + 1*test_feature)
(test_intercept, test_slope) = simple_linear_regression(test_feature, test_output)
print "Intercept: " + str(test_intercept)
print "Slope: " + str(test_slope)
sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price'])
print "Intercept: " + str(sqft_intercept)
print "Slope: " + str(sqft_slope)
def get_regression_predictions(input_feature, intercept, slope):
# calculate the predicted values:
predicted_values = intercept + slope * input_feature
return predicted_values
my_house_sqft = 2650
estimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope)
print "The estimated price for a house with %d squarefeet is $%.2f" % (my_house_sqft, estimated_price)
def get_residual_sum_of_squares(input_feature, output, intercept, slope):
# First get the predictions
predictions = get_regression_predictions(input_feature, intercept, slope)
# then compute the residuals (since we are squaring it doesn't matter which order you subtract)
resd = predictions-output
# square the residuals and add them up
RSS = (resd*resd).sum()
return(RSS)
print get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope) # should be 0.0
rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope)
print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)
def inverse_regression_predictions(output, intercept, slope):
# solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions:
estimated_feature = (output - intercept)/slope
return estimated_feature
my_house_price = 800000
estimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope)
print "The estimated squarefeet for a house worth $%.2f is %d" % (my_house_price, estimated_squarefeet)
# Estimate the slope and intercept for predicting 'price' based on 'bedrooms'
bedrooms_intercept, bedrooms_slope = simple_linear_regression(train_data['bedrooms'], train_data['price'])
# Compute RSS when using bedrooms on TEST data:
get_residual_sum_of_squares(test_data['bedrooms'], test_data['price'], sqft_intercept, sqft_slope)
# Compute RSS when using squarfeet on TEST data:
get_residual_sum_of_squares(test_data['sqft_living'], test_data['price'], sqft_intercept, sqft_slope)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercícios - Métodos e Funções
| <ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Exercício 1 - Crie uma função que imprima a sequência de números pares entre 1 e 20 (a função não recebe parâmetro) e
# depois faça uma chamada à função para listar os números
def listaPar():
for i in range(2, 21, 2):
print(i)
listaPar()
# Exercício 2 - Crie uam função que receba uma string como argumento e retorne a mesma string em letras maiúsculas.
# Faça uma chamada à função, passando como parâmetro uma string
def listaString(texto):
print(texto.upper())
return
listaString('Rumo à Análise de Dados')
# Exercício 3 - Crie uma função que receba como parâmetro uma lista de 4 elementos, adicione 2 elementos a lista e
# imprima a lista
def novaLista(lista):
print(lista.append(5))
print(lista.append(6))
lista1 = [1, 2, 3, 4]
novaLista(lista1)
print(lista1)
# Exercício 4 - Crie uma função que receba um argumento formal e uma possível lista de elementos. Faça duas chamadas
# à função, com apenas 1 elemento e na segunda chamada com 4 elementos
def printNum( arg1, *lista ):
print (arg1)
for i in lista:
print (i)
return;
# Chamada à função
printNum( 100 )
printNum( 'A', 'B', 'C' )
# Exercício 5 - Crie uma função anônima e atribua seu retorno a uma variável chamada soma. A expressão vai receber 2
# números como parâmetro e retornar a soma deles
soma = lambda arg1, arg2: arg1 + arg2
print ("A soma é : ", soma( 452, 298 ))
# Exercício 6 - Execute o código abaixo e certifique-se que compreende a diferença entre variável global e local
total = 0
def soma( arg1, arg2 ):
total = arg1 + arg2;
print ("Dentro da função o total é: ", total)
return total;
soma( 10, 20 );
print ("Fora da função o total é: ", total)
# Exercício 7 - Abaixo você encontra uma lista com temperaturas em graus Celsius
# Crie uma função anônima que converta cada temperatura para Fahrenheit
# Dica: para conseguir realizar este exercício, você deve criar sua função lambda, dentro de uma função
# (que será estudada no próximo capítulo). Isso permite aplicar sua função a cada elemento da lista
# Como descobrir a fórmula matemática que converte de Celsius para Fahrenheit? Pesquise!!!
Celsius = [39.2, 36.5, 37.3, 37.8]
Fahrenheit = map(lambda x: (float(9)/5)*x + 32, Celsius)
print (list(Fahrenheit))
# Exercício 8
# Crie um dicionário e liste todos os métodos e atributos do dicionário
dic = {'k1': 'Natal', 'k2': 'Recife'}
dir(dic)
import pandas as pd
pd.__version__
# Exercício 9
# Abaixo você encontra a importação do Pandas, um dos principais pacotes Python para análise de dados.
# Analise atentamente todos os métodos disponíveis. Um deles você vai usar no próximo exercício.
import pandas as pd
dir(pd)
# ************* Desafio ************* (pesquise na documentação Python)
# Exercício 10 - Crie uma função que receba o arquivo abaixo como argumento e retorne um resumo estatístico descritivo
# do arquivo. Dica, use Pandas e um de seus métodos, describe()
# Arquivo: "binary.csv"
import pandas as pd
file_name = "binary.csv"
def retornaArq(file_name):
df = pd.read_csv(file_name)
return df.describe()
retornaArq(file_name)
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting Histogram
Step2: By default, hist is using 10 equal bins to plot the data, we can increase this no by using bins=n
Step3: Another important keyword parameter of hist is "normed". "normed" is optional and the default value is 'False'. If it is set to 'True', the first element of the return tuple will be the counts normalized to form a probability density,
Step4: If both the parameters 'normed' and 'stacked' are set to 'True', the sum of the histograms is normalized to 1.
Step5: can plot it as a cumulative distribution function as well by setting the parameter 'cumulative'
Step6: Bar Plots
| <ASSISTANT_TASK:>
Python Code:
# import
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# generating some data points
X = np.random.random_integers(20, 50, 1000)
Y = np.random.random_integers(20, 50, 1000)
plt.hist(X)
plt.xlabel("Value of X")
plt.ylabel("Freq")
gaussian_numbers = np.random.normal(size=10000)
plt.hist(gaussian_numbers)
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
n, bins, patches = plt.hist(gaussian_numbers)
print("n: ",n, np.sum(n)) # freq
print("bins: ", bins)
print("patches: ", patches)
for p in patches:
print(p,)
n, bins, patches = plt.hist(gaussian_numbers, bins=100)
n, bins, patches = plt.hist(gaussian_numbers, bins=100, normed=True)
plt.hist(gaussian_numbers,
bins=100,
normed=True,
stacked=True,
edgecolor="#6A9662",
color="#DDFFDD")
plt.show()
plt.hist(gaussian_numbers,
bins=100,
normed=True,
stacked=True,
cumulative=True)
plt.show()
bars = plt.bar([1,2,3,4], [1,4,9,16])
bars[0].set_color('green')
plt.show()
f=plt.figure()
ax=f.add_subplot(1,1,1)
ax.bar([1,2,3,4], [1,4,9,16])
children = ax.get_children()
children[3].set_color('g')
years = ('2010', '2011', '2012', '2013', '2014')
visitors = (1241, 50927, 162242, 222093, 296665 / 8 * 12)
index = np.arange(len(visitors))
bar_width = 1.0
plt.bar(index, visitors, bar_width, color="green")
plt.xticks(index + bar_width / 2, years) # labels get centered
plt.show()
<END_TASK> |
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
| <ASSISTANT_TASK:>
Python Code::
# one hot encode target sequence
def encode_output(sequences, vocab_size):
ylist = list()
for sequence in sequences:
encoded = to_categorical(sequence, num_classes=vocab_size)
ylist.append(encoded)
y = array(ylist)
y = y.reshape(sequences.shape[0], sequences.shape[1], vocab_size)
return y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercicio 2 - Juros Compostos
Step2: Exercicio 3 - Imposto de renda
Step3: Exercício 4 - Somatória
Step4: Exercicio 5 - Palíndromo
Step5: Exercício 6 - Menor e maior da lista
Step6: Exercício 7 - N-ímpares consecutivos
| <ASSISTANT_TASK:>
Python Code:
def CaixaEletronico(valor):
notas50 = valor // 50
valor = valor % 50
notas20 = valor // 20
valor = valor % 20
notas10 = valor // 10
valor = valor % 10
notas5 = valor // 5
valor = valor % 5
notas1 = valor // 1
return (notas50, notas20, notas10, notas5, notas1)
CaixaEletronico(184)
def JurosCompostos(montante,taxa,meses):
correcao = 1
for i in range(meses):
correcao *= (1+(taxa/100))
return montante*correcao, (correcao - 1)*100
print(JurosCompostos(1000,1,12))
def ImpostoRenda(salario):
salario = salario * 0.89
if (salario <= 24000):
return salario
else:
return salario - (0.15 * (salario-24000))
print(ImpostoRenda(30000))
def Soma(n):
soma = 0
for i in range(1,n+1):
soma = soma + i*( (-1)**i)
return soma
print(Soma(10))
def Palindromo(nome):
n = len(nome)
k = 0
while k<n/2:
if nome[k] != nome[n-k-1]:
return False
k = k+1
return True
print(Palindromo('RADAR'))
print(Palindromo('Python'))
def MaiorMenor(lista):
menor = lista[0]
maior = lista[0]
for elemento in lista:
if elemento > maior:
maior = elemento
if elemento < menor:
menor = elemento
return maior, menor
lista = [1,2,4,16,32,64,-128]
print(MaiorMenor(lista))
def Consecutivos(n):
soma = 0
for i in range(1,n+1):
soma += i
impares = [x for x in range(2*soma) if x%2!=0 ]
return impares[soma-n:soma]
print(Consecutivos(4), sum(Consecutivos(4)), 4**3)
type( (1,2))
<END_TASK> |