path
stringlengths 8
399
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_name
stringlengths 6
109
| repo_url
stringlengths 25
128
| star_events_count
int64 0
52.9k
| fork_events_count
int64 0
7.07k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_updated_at
timestamp[us] | gha_language
stringclasses 28
values | language
stringclasses 1
value | is_generated
bool 1
class | is_vendor
bool 1
class | conversion_extension
stringclasses 17
values | size
int64 317
10.5M
| script
stringlengths 245
9.7M
| script_size
int64 245
9.7M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/Auto AI Predicting Life Expectancy - P3 notebook.ipynb | b66091b122b6071b42e91f028ba55652a90c7eed | [] | no_license | SmartPracticeschool/llSPS-INT-2664-Predicting-Life-Expectancy-using-Machine-Learning | https://github.com/SmartPracticeschool/llSPS-INT-2664-Predicting-Life-Expectancy-using-Machine-Learning | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 48,784 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (sandboxed)
# language: python
# name: python3
# ---
# # Bloomberg BQuant Spotlight Webinar Series: Balancing Act
# This is a companion notebook to the "Understanding the Financial Statement Impact of Operating Leases" webinar.
# + deletable=true editable=true
import bqplot as bqp
from bqplot.interacts import BrushSelector
import pandas as pd
import numpy as np
from ipywidgets import Dropdown, HBox, VBox, HTML
import bqwidgets as bqw
import bql
bq=bql.Service()
# + deletable=true editable=true
params={'currency':'USD','fa_filing_status':'MRXP'}
qparams={'currency':'USD','fa_filing_status':'MRXP','fa_period_type':'Q'}
# + [markdown] deletable=true editable=true
# <h5 style='color:LIGHTSKYBLUE'>Proxy Metric - Operating Lease Percentage of Liabilities</h5>
# + deletable=true editable=true
total_ol_liability=bq.func.znav(bq.data.bs_total_operating_lease_liabs(**params))
debt=bq.func.znav(bq.data.bs_tot_liab2(**params))
op_lease_pct_of_liabilities=bq.func.if_(bq.func.or_(bq.func.equals(total_ol_liability,0),bq.func.equals(debt,0)),
0,
bq.func.dropna(total_ol_liability/debt))*100
# + [markdown] deletable=true editable=true
# <h5 style='color:LightSeaGreen'>Pre-ASC842 EBITDA</h5>
# + deletable=true editable=true
def bq_old_ebitda(params):
grid=bq.data.eqy_fund_ind()
da=bq.data.cf_depr_amort(**params)
oi=bq.data.is_oper_inc(**params)
reit_oi=bq.data.ebit(**params)
industrial=oi+da
financial=oi+da+bq.func.znav(bq.data.is_int_expenses(**params))
utility=oi+da+bq.func.znav(bq.data.is_total_d_and_a_adjustment(**params))
reit=reit_oi+da
old_ebitda=bq.func.if_(bq.func.in_(grid,['Industrial']),industrial,
bq.func.if_(bq.func.in_(grid,['Financial']),financial,
bq.func.if_(bq.func.in_(grid,['Utility']),utility,
bq.func.if_(bq.func.in_(grid,['REIT']),reit,bql.NA))))
return old_ebitda
old_ebitda=bq_old_ebitda(params)
old_qebitda=bq_old_ebitda(qparams)
# + [markdown] deletable=true editable=true
# <h5 style='color:LightSeaGreen'>Pre-ASC842 Enterprise Value</h5>
# + deletable=true editable=true
old_ev=bq.data.curr_entp_val(**params)-bq.func.znav(bq.data.bs_total_operating_lease_liabs(**params))
# + [markdown] deletable=true editable=true
# <h5 style='color:LightSeaGreen'>Pre-ASC842 Total Debt</h5>
# + deletable=true editable=true
old_debt=bq.data.short_and_long_term_debt(**params)-bq.func.znav(bq.data.bs_total_operating_lease_liabs(**params))
# + [markdown] deletable=true editable=true
# <h5 style='color:LIGHTSKYBLUE'>Post-ASC842 vs. Pre-ASC842</h5>
# + deletable=true editable=true
ev=bq.data.curr_entp_val(**params)
ebitda=bq.data.ebitda(**params)
qebitda=bq.data.ebitda(**qparams)
sales=bq.data.sales_rev_turn(**params)
debt=bq.data.short_and_long_term_debt(**params)
ev_ebitda=bq.func.if_(ebitda>0,ev/ebitda,bql.NA)
old_ev_ebitda=bq.func.if_(old_ebitda>0,old_ev/old_ebitda,bql.NA)
disc_ev_ebitda=bq.func.if_(ev_ebitda/old_ev_ebitda>0,
ev_ebitda/old_ev_ebitda-1,
bql.NA)*100
ebitda_margin=bq.func.if_(sales>0,ebitda/sales,bql.NA)
old_ebitda_margin=bq.func.if_(sales>0,old_ebitda/sales,bql.NA)
disc_ebitda_margin=bq.func.if_(ebitda_margin/old_ebitda_margin>0,
ebitda_margin/old_ebitda_margin-1,
bql.NA)*100
debt_ebitda=bq.func.if_(ebitda>0,debt/ebitda,bql.NA)
old_debt_ebitda=bq.func.if_(old_ebitda>0,old_debt/old_ebitda,bql.NA)
disc_debt_to_ebitda=(debt_ebitda/old_debt_ebitda-1)*100
disc_10q_ebitda=bq.func.if_(bq.func.and_(bq.func.equals(old_qebitda,0)==False,qebitda/old_qebitda>0),
qebitda/old_qebitda-1,
bql.NA)*100
# + [markdown] deletable=true editable=true
# <h5 style='color:ORANGE'>Data Request</h5>
# + deletable=true editable=true
req_d={'Name':bq.data.name(),
'Sector':bq.data.gics_sector_name(),
'Industry':bq.data.gics_industry_name(),
'FA Class':bq.data.eqy_fund_ind(),
'Op Lease Pct of Liabilities':op_lease_pct_of_liabilities,
'EV to EBITDA':disc_ev_ebitda,
'EBITDA Margin':disc_ebitda_margin,
'Debt to EBITDA':disc_debt_to_ebitda,
'Last 10-Q EBITDA':disc_10q_ebitda}
univ_filter_criteria=bq.func.and_(bq.func.znav(bq.data.bs_total_operating_lease_liabs(**params))>0,
bq.func.in_(bq.data.eqy_fund_ind(),['Industrial','REIT','Utility','Financial']))
univ=bq.univ.filter(bq.univ.members('SPX Index'),univ_filter_criteria)
req=bql.Request(univ,req_d)
# + deletable=true editable=true
bqexec=bq.execute(req)
# + deletable=true editable=true
reference_columns=['Name','Sector','Industry','FA Class','Op Lease Pct of Liabilities']
discrepancy_columns=['Last 10-Q EBITDA', 'EV to EBITDA','EBITDA Margin', 'Debt to EBITDA']
# + deletable=true editable=true
df_cols=[]
for col in reference_columns+discrepancy_columns:
df_cols.append(bqexec.get(col).df()[col])
data=pd.concat(df_cols,axis=1).reset_index().rename(columns={'ID':'Ticker'})
# + deletable=true editable=true
data.head()
# + deletable=true editable=true
data_clean=data.copy()
for col in discrepancy_columns:
data_clean[col]=data_clean[col].clip(lower=data[col].quantile(0.05),upper=data[col].quantile(0.95))
# + deletable=true editable=true
data_clean=data_clean.round(decimals=2)
# + [markdown] deletable=true editable=true
# <h5 style='color:ORANGE'>Post-ASC842 vs. Pre-ASC842 Visualization</h5>
# + deletable=true editable=true
# Data source
# data_clean
data_cols=['Op Lease Pct of Liabilities']+discrepancy_columns
# Create scales
scale_x = bqp.LinearScale()
scale_y = bqp.LinearScale()
c_sc=bqp.OrdinalColorScale()
ttp_flds=['name','color']
ttp_lbls=['Name','Sector']
ttp=bqp.Tooltip(fields=ttp_flds,labels=ttp_lbls)
# Create marks
mark_scatter = bqp.Scatter(x=data_clean[data_cols[0]],
y=data_clean[data_cols[1]],
scales={'x': scale_x, 'y': scale_y,'color':c_sc},
default_size=48,
color=data_clean['Industry'],
names=data_clean['Name'],
display_names=False,
tooltip=ttp)
# Create Axes
axis_x = bqp.Axis(scale=scale_x, label=data_cols[0])
axis_y = bqp.Axis(scale=scale_y,
orientation='vertical',
tick_format='0.0f',
label=data_cols[1])
# Create selector
selector = BrushSelector(x_scale=scale_x,
y_scale=scale_y,
marks=[mark_scatter])
# Create Figure
figure = bqp.Figure(marks=[mark_scatter],
axes=[axis_x, axis_y],
animation_duration=500,
layout={'width':'99%', 'height':'400px'},
padding_x=0.05,
title='S&P 500 ASC 842 Impact',
title_style={'font-size': '22px'},
padding_y=0.05,
interaction=selector,
fig_margin={'top': 50, 'bottom': 60,
'left': 50, 'right':30})
# Create dropown widgets
dropdown_x = Dropdown(description='X axis',
options=data_cols,
value=data_cols[0])
dropdown_y = Dropdown(description='Y axis',
options=data_cols,
value=data_cols[1])
# Define callback function for dropdown widgets
def update_plot(evt):
if evt is not None:
new_value = evt['new']
if evt['owner'] == dropdown_x:
mark_scatter.x = data_clean[new_value]
axis_x.label = new_value
elif evt['owner'] == dropdown_y:
mark_scatter.y = data_clean[new_value]
axis_y.label = new_value
# Define callback function for selections
def on_select(evt):
if evt is not None and evt['new'] is not None:
indices = evt['new']
datagrid.data = data_clean.iloc[indices].reset_index()
# Bind callback to the dropdown widgets
dropdown_x.observe(update_plot, names=['value'])
dropdown_y.observe(update_plot, names=['value'])
mark_scatter.observe(on_select, names=['selected'])
# Create datagrid
col_defs=[{'children': [{'field': 'Ticker', 'headerName': 'Ticker', 'width': 170},
{'field': 'Name', 'headerName': 'Name', 'width': 190},
{'field': 'Sector', 'headerName': 'Sector', 'width': 190},
{'field': 'Industry', 'headerName': 'Industry', 'width': 190},
{'field': 'FA Class', 'headerName': 'FA Class', 'width': 96},
{'field': 'Op Lease Pct of Liabilities',
'headerName': 'Op Lease Pct of Liabilities',
'width': 240}],
'headerName': 'Company'},
{'children': [{'field': 'Last 10-Q EBITDA',
'headerName': 'Last 10-Q EBITDA',
'width': 192},
{'field': 'EV to EBITDA', 'headerName': 'EV to EBITDA', 'width': 144},
{'field': 'EBITDA Margin', 'headerName': 'EBITDA Margin', 'width': 156},
{'field': 'Debt to EBITDA', 'headerName': 'Debt to EBITDA', 'width': 168}],
'headerName': '% Discrepancy in ASU 2016-02 Impacted Data'}]
datagrid = bqw.DataGrid(data=data_clean,column_defs=col_defs)
# Create Box containers
widget_box = HBox([dropdown_x, dropdown_y], layout={'margin': '10px'})
app_container = VBox([figure, widget_box, datagrid],
layout={'width':'100%'})
# Display the visualization
app_container
| 9,933 |
/minhpham/.ipynb_checkpoints/DataPrep-checkpoint.ipynb | 00745c1fa9e39ff55386587ddaedfc441a79395f | [] | no_license | saigontrade88/IEEEBigData21_RL | https://github.com/saigontrade88/IEEEBigData21_RL | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 10,817 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.datasets import make_blobs
X, y =make_blobs(centers=4, random_state =8)
y = y%2
# +
from IPython.display import display
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
import mglearn
import platform
from matplotlib import font_manager, rc
plt.rcParams['axes.unicode_minus']=False
if platform.system()=='Darwin':
rc('font', family='AppleGothic')
elif platform.system() =='Windows':
path = 'c:/Windows/Fonts/malgun.ttf'
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
print('Unknown system... sorry~~~~')
# -
mglearn.discrete_scatter(X[:,0], X[:,1],y)
plt.xlabel('ํน์ฑ 0')
plt.ylabel('ํน์ฑ 1')
from sklearn.svm import LinearSVC
linear_svm = LinearSVC().fit(X,y)
mglearn.plots.plot_2d_separator(linear_svm, X)
mglearn.discrete_scatter(X[:,0], X[:,1],y)
plt.xlabel('ํน์ฑ 0')
plt.ylabel('ํน์ฑ 1')
# ๋ ๋ฒ์งธ ํน์ฑ์ ์ ๊ณฑํ์ฌ ์ถ๊ฐํฉ๋๋ค.
X_new = np.hstack([X, X[:,1:]**2])
from mpl_toolkits.mplot3d import Axes3D, axes3d
figure = plt.figure()
# 3์ฐจ์ ๊ทธ๋ํ
ax = Axes3D(figure, elev=-152, azim =-26)
# y == 0์ธ ํฌ์ธํธ๋ฅผ ๋จผ์ ๊ทธ๋ฆฌ๊ณ ๋ค์ y==1์ธ ํฌ์ธํธ๋ฅผ ๊ทธ๋ฆฝ๋๋ค.
mask = y ==0
ax.scatter(X_new[mask,0], X_new[mask, 1], X_new[mask, 2], c='b',
cmap=mglearn.cm2, s=60, edgecolor='k')
ax.scatter(X_new[~mask,0], X_new[~mask,1], X_new[~mask, 2], c='r', marker='^',
cmap=mglearn.cm2, s=60, edgecolor='k')
ax.set_xlabel('ํน์ฑ 0')
ax.set_ylabel('ํน์ฑ 1')
ax.set_zlabel('ํน์ฑ 1**2')
# +
linear_svm_3d = LinearSVC().fit(X_new, y)
coef, intercept = linear_svm_3d.coef_.ravel(), linear_svm_3d.intercept_
# ์ ํ ๊ฒฐ์ ๊ฒฝ๊ณ ๊ทธ๋ฆฌ๊ธฐ
figure = plt.figure()
ax = Axes3D(figure, elev=-152, azim = -26)
xx = np.linspace(X_new[:,0].min()- 2,X_new[:,0].max() +2, 50)
yy = np.linspace(X_new[:,1].min()-2, X_new[:,1].max() +2, 50)
XX,YY = np.meshgrid(xx,yy)
ZZ =(coef[0] * XX +coef[1]*YY +intercept) / -coef[2]
ax.plot_surface(XX,YY,ZZ,rstride=8, cstride=8, alpha=0.3)
ax.scatter(X_new[mask, 0], X_new[mask,1], X_new[mask, 2],c ='b',
cmap=mglearn.cm2, s =60, edgecolor='k')
ax.scatter(X_new[~mask,0], X_new[~mask,1], X_new[~mask,2], c='r', marker='^',
cmap = mglearn.cm2, s=60, edgecolor='k')
ax.set_xlabel('ํน์ฑ 0')
ax.set_ylabel('ํน์ฑ 1')
ax.set_zlabel('ํน์ฑ 1 **2')
# -
ZZ = YY **2
dec = linear_svm_3d.decision_function(np.c_[XX.ravel(), YY.ravel(), ZZ.ravel()])
plt.contour(XX, YY, dec.reshape(XX.shape), levels = [dec.min(), 0, dec.max()],
cmap=mglearn.cm2, alpha =0.5)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
plt.xlabel('ํน์ฑ 0')
plt.ylabel('ํน์ฑ 1')
# ### ์ปค๋ ๊ธฐ๋ฒ
# ### SVM ์ดํดํ๊ธฐ
from sklearn.svm import SVC
X,y = mglearn.tools.make_handcrafted_dataset()
svm = SVC(kernel ='rbf', C=10, gamma = 0.1).fit(X,y)
mglearn.plots.plot_2d_separator(svm, X, eps=.5)
#๋ฐ์ดํฐ ํฌ์ธํธ ๊ทธ๋ฆฌ๊ธฐ
mglearn.discrete_scatter(X[:, 0], X[:,1], y)
#์ํฌํธ ๋ฒกํฐ
sv = svm.support_vectors_
# dual_coef_์ ๋ถํธ์ ์ํด ์ํฌํธ ๋ฒกํฐ์ ํด๋์ค ๋ ์ด๋ธ์ด ๊ฒฐ์ ๋ฉ๋๋ค.
sv_labels = svm.dual_coef_.ravel()>0
mglearn.discrete_scatter(sv[:,0], sv[:,1], sv_labels, s=15, markeredgewidth=3)
plt.xlabel('ํน์ฑ 0')
plt.ylabel('ํน์ฑ 1')
# ### SVM ๋งค๊ฐ๋ณ์ ํ๋
# +
fig, axes = plt.subplots(3,3, figsize=(15,10))
for ax, C in zip(axes, [-1,0,3]):
for a, gamma in zip(ax, range(-1,2)):
mglearn.plots.plot_svm(log_C=C, log_gamma=gamma, ax=a)
axes[0,0].legend(['ํด๋์ค 0', 'ํด๋์ค 1', 'ํด๋์ค 0 ์ํฌํธ ๋ฒกํฐ',
'ํด๋์ค 1 ์ํฌํธ ๋ฒกํฐ'], ncol =4, loc=(.9, 1.2))
# -
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
svc = SVC()
svc.fit(X_train, y_train)
print('ํ๋ จ ์ธํธ ์ ํ๋:{:.3f}'.format(svc.score(X_train, y_train)))
print('ํ
์คํธ ์ธํธ ์ ํ๋:{:.3f}'.format(svc.score(X_test, y_test)))
plt.boxplot(X_train)
plt.yscale('symlog')
plt.xlabel('ํน์ฑ ๋ชฉ๋ก')
plt.ylabel('ํน์ฑ ํฌ๊ธฐ')
# ### SVM์ ์ํ ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ
# +
# ํ๋ จ ์ธํธ์์ ํน์ฑ๋ณ ์ต์๊ฐ ๊ณ์ฐ
min_on_training = X_train.min(axis =0)
# ํ๋ จ ์ธํธ์์ ํน์ฑ๋ณ (์ต๋๊ฐ - ์ต์๊ฐ) ๋ฒ์ ๊ณ์ฐ
range_on_training = (X_train - min_on_training).max(axis =0)
#ํ๋ จ ๋ฐ์ดํฐ์ ์ต์๊ฐ์ ๋นผ๊ณ ๋ฒ์๋ก ๋๋๋ฉด
#๊ฐ ํน์ฑ์ ๋ํด ์ต์๊ฐ์ 0 ์ต๋๊ฐ์ 1์
๋๋ค.
X_train_scaled = (X_train - min_on_training) / range_on_training
print('ํน์ฑ๋ณ ์ต์๊ฐ \n', X_train_scaled.min(axis=0))
print('ํน์ฑ๋ณ ์ต๋๊ฐ \n', X_train_scaled.max(axis=0))
# -
# ํ
์คํธ ์ธํธ์๋ ๊ฐ์ ์์
์ ์ ์ฉํ์ง๋ง
# ํ๋ จ ์ธํธ์์ ๊ณ์ฐํ ์ต์๊ฐ๊ณผ ๋ฒ์๋ฅผ ์ฌ์ฉํฉ๋๋ค.
X_test_scaled = (X_test - min_on_training) / range_on_training
# +
svc = SVC()
svc.fit(X_train_scaled, y_train)
print('ํ๋ จ ์ธํธ ์ ํ๋: {:.3f}'.format(svc.score(X_train_scaled, y_train)))
print('ํ
์คํธ ์ธํธ ์ ํ๋:{:.3f}'.format(svc.score(X_test_scaled, y_test)))
# -
svc = SVC(C=1000)
svc.fit(X_train_scaled, y_train)
print('ํ๋ จ ์ธํธ ์ ํ๋:{:.3f}'.format(svc.score(X_train_scaled, y_train)))
print('ํ
์คํธ ์ธํธ ์ ํ๋:{:.3f}'.format(svc.score(X_test_scaled, y_test)))
= (rawTestSet.shape[0], N_ITEMS+N_USER_PORTRAITS)), columns = colNames)
# parse each line in parallel
# first objects in shared memory for input and output
print('creating shared memory objects ... ')
mpManager = mp.Manager()
inputSharedList = mpManager.list(rawTestSet.values.tolist()) # for memory efficiency
outputSharedList = mpManager.list(output.values.tolist()) # shared output as a list (because DataFrame can't)
p = mp.Pool(N_THREADS)
print('multiprocessing ... ')
for i in tqdm(range(rawTestSet.shape[0])):
p.apply_async(parseUserFeaturesOneLine, [i, inputSharedList, outputSharedList])
p.close()
p.join()
# convert outputSharedList back to DataFrame
print('convert to DataFrame ...')
output = pd.DataFrame(data = outputSharedList, columns = colNames)
# write to pkl file
output.to_pickle('/tf/shared/data/UserFeaturesTestSet.pkl')
test = prepareUserFeaturesTrainSet()
print(test)
# preparePurchasedItemsTrainSet()
def getUserFeaturesTrainSet():
"""
return: DataFrame with N_ITEMS+N_USER_PORTRAITS columns
first N_ITEMS cols: one hot encoding of clicked items
last N_USER_PORTRAITS cols: normalized user portraits
"""
return pd.read_pickle('./data/UserFeaturesTrainSet.pkl')
def getPurchasedItemsTrainSet():
"""
return: a list, each element is a list of purchased item by a user
list length is same as PurchasedItemsTrainSet's nrow
"""
file = open('/tf/minhpham/data/PurchasedItemsTrainSet.pkl', 'rb')
data = pickle.load(file)
file.close()
return data
# -
from sklearn.preprocessing import MinMaxScaler
UserFeaturesTrainSet = getUserFeaturesTrainSet()
for i in range(N_USER_PORTRAITS):
colName = 'userPortrait' + str(i+1)
scaler = MinMaxScaler()
UserFeaturesTrainSet[colName] = scaler.fit_transform(UserFeaturesTrainSet[colName].values.reshape(-1,1))
UserFeaturesTrainSet.to_pickle('./data/UserFeaturesTrainSet.pkl')
test = getPurchasedItemsTrainSet()
print(test[:10])
| 7,072 |
/chap010_numeric_limits/chap010_001_numeric_limits.ipynb | 35da385d399d31ebcd58fd0dfe6b7b2751dfd63c | [] | no_license | dandrewmyers/python-econometrics | https://github.com/dandrewmyers/python-econometrics | 6 | 4 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,396 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Numeric Limits
import numpy as np
np.finfo(float).max
np.finfo(float).min
np.finfo(float).tiny
np.finfo(float).eps
x = 1.0
eps = np.finfo(float).eps
x = x+eps/2
x == 1
x - 1
x = 1 + 2*eps
x == 1
x - 1
x = 10
x + 2*eps
x - 10
(x - 10) == 0
(1e120 - 1e103) == 1e120
1e103 / 1e120
rn int(PHI ** n / m.sqrt(5) + 0.5)
print("fib =",fib(8))
| 630 |
/Lasso Regression.ipynb | e11f9b9d269dc74d2773e66bc560bf4c5661e4bc | [] | no_license | krishnan166/ML-for-Antenna-Optimisation | https://github.com/krishnan166/ML-for-Antenna-Optimisation | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,231 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RedaElmar/Course_intro_to_TF_for_DL/blob/master/l02c01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="HnKx50tv5aZD"
# ##### Copyright 2018 The TensorFlow Authors.
# + [markdown] colab_type="text" id="YHI3vyhv5p85"
# # The Basics: Training Your First Model
# + [markdown] colab_type="text" id="F8YVA_634OFk"
# Welcome to this Colab where you will train your first Machine Learning model!
#
# We'll try to keep things simple here, and only introduce basic concepts. Later Colabs will cover more advanced problems.
#
# The problem we will solve is to convert from Celsius to Fahrenheit, where the approximate formula is:
#
# $$ f = c \times 1.8 + 32 $$
#
#
# Of course, it would be simple enough to create a conventional Python function that directly performs this calculation, but that wouldn't be machine learning.
#
#
# Instead, we will give TensorFlow some sample Celsius values (0, 8, 15, 22, 38) and their corresponding Fahrenheit values (32, 46, 59, 72, 100).
# Then, we will train a model that figures out the above formula through the training process.
# + [markdown] colab_type="text" id="fA93WUy1zzWf"
# ## Import dependencies
#
# First, import TensorFlow. Here, we're calling it `tf` for ease of use. We also tell it to only display errors.
#
# Next, import [NumPy](http://www.numpy.org/) as `np`. Numpy helps us to represent our data as highly performant lists.
# + colab_type="code" id="-ZMgCvSRFqxE" colab={}
import tensorflow as tf
# + colab_type="code" id="y_WQEM5MGmg3" colab={}
import numpy as np
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# + [markdown] colab_type="text" id="AC3EQFi20buB"
# ## Set up training data
#
# As we saw before, supervised Machine Learning is all about figuring out an algorithm given a set of inputs and outputs. Since the task in this Codelab is to create a model that can give the temperature in Fahrenheit when given the degrees in Celsius, we create two lists `celsius_q` and `fahrenheit_a` that we can use to train our model.
# + colab_type="code" id="gg4pn6aI1vms" colab={}
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i,c in enumerate(celsius_q):
print("{} degrees Celsius = {} degrees Fahrenheit".format(c, fahrenheit_a[i]))
# + [markdown] colab_type="text" id="wwJGmDrQ0EoB"
# ### Some Machine Learning terminology
#
# - **Feature** โ The input(s) to our model. In this case, a single value โ the degrees in Celsius.
#
# - **Labels** โ The output our model predicts. In this case, a single value โ the degrees in Fahrenheit.
#
# - **Example** โ A pair of inputs/outputs used during training. In our case a pair of values from `celsius_q` and `fahrenheit_a` at a specific index, such as `(22,72)`.
#
# + [markdown] colab_type="text" id="VM7_9Klvq7MO"
# ## Create the model
#
# Next, create the model. We will use the simplest possible model we can, a Dense network. Since the problem is straightforward, this network will require only a single layer, with a single neuron.
#
# ### Build a layer
#
# We'll call the layer `l0` and create it by instantiating `tf.keras.layers.Dense` with the following configuration:
#
# * `input_shape=[1]` โ This specifies that the input to this layer is a single value. That is, the shape is a one-dimensional array with one member. Since this is the first (and only) layer, that input shape is the input shape of the entire model. The single value is a floating point number, representing degrees Celsius.
#
# * `units=1` โ This specifies the number of neurons in the layer. The number of neTest accuracy: 0.6851
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Activation('elu'))
model.add(Dense(1000))
model.add(Activation('elu'))
model.add(Dense(100))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
"""
"""
#Training MLP took 1835.7603313922882 seconds
#Test loss: 2.2038055744171143
#Test accuracy: 0.6506
model = Sequential()
# https://stackoverflow.com/questions/34619177/what-does-tf-nn-conv2d-do-in-tensorflow
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
# https://www.quora.com/What-is-max-pooling-in-convolutional-neural-networks
model.add(MaxPooling2D(pool_size=(2, 2)))
# Ruido... mรกs o menos
model.add(Dropout(0.25))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Activation('elu'))
model.add(Dense(1000))
model.add(Activation('elu'))
model.add(Dense(100))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
"""
"""
# Test loss: 1.401836185646057
# Test accuracy: 0.7064
# Training MLP took 2226.8992404937744 seconds
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Activation('elu'))
model.add(Dense(1000))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
"""
"""
#Con sale 64 entra a 32:
# Test loss: 1.0052009043216705
# Test accuracy: 0.7138
# Training MLP took 2096.7810397148132 seconds
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu'))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Activation('elu'))
model.add(Dense(1000))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
"""
#Con sale 32 entra a 32:
# Test loss: 1.035853784275055
# Test accuracy: 0.7291
# Training MLP took 2071.0573613643646 seconds
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(32, kernel_size=(3, 3), activation='elu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='elu', input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Activation('elu'))
model.add(Dense(1000))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 3060} colab_type="code" id="NJ7U0u3IpuPj" outputId="57b74713-afd3-49fb-b0ec-f20d6ba9ab5a"
# Training
import time
start = time.time()
history = model.fit(X_train, Y_train, batch_size=128, epochs=20, verbose=1, validation_data=(X_test, Y_test))
end = time.time()
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', loss)
print('Test accuracy:', acc)
plot_model_history(history)
print("Training MLP took " + str(end - start) + " seconds")
# + colab={} colab_type="code" id="R4547GWaFX-c"
rks internally.
# + [markdown] colab_type="text" id="0-QsNCLD4MJZ"
# ## Display training statistics
#
# The `fit` method returns a history object. We can use this object to plot how the loss of our model goes down after each training epoch. A high loss means that the Fahrenheit degrees the model predicts is far from the corresponding value in `fahrenheit_a`.
#
# We'll use [Matplotlib](https://matplotlib.org/) to visualize this (you could use another tool). As you can see, our model improves very quickly at first, and then has a steady, slow improvement until it is very near "perfect" towards the end.
#
# + colab_type="code" id="IeK6BzfbdO6_" colab={}
import matplotlib.pyplot as plt
plt.xlabel('Epoch Number')
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
# + [markdown] colab_type="text" id="LtQGDMob5LOD"
# ## Use the model to predict values
#
# Now you have a model that has been trained to learn the relationship between `celsius_q` and `fahrenheit_a`. You can use the predict method to have it calculate the Fahrenheit degrees for a previously unknown Celsius degrees.
#
# So, for example, if the Celsius value is 100, what do you think the Fahrenheit result will be? Take a guess before you run this code.
# + colab_type="code" id="oxNzL4lS2Gui" colab={}
print(model.predict([100.0]))
# + [markdown] colab_type="text" id="jApk6tZ1fBg1"
# The correct answer is $100 \times 1.8 + 32 = 212$, so our model is doing really well.
#
# ### To review
#
#
# * We created a model with a Dense layer
# * We trained it with 3500 examples (7 pairs, over 500 epochs).
#
# Our model tuned the variables (weights) in the Dense layer until it was able to return the correct Fahrenheit value for any Celsius value. (Remember, 100 Celsius was not part of our training data.)
#
# + [markdown] colab_type="text" id="zRrOky5gm20Z"
# ## Looking at the layer weights
#
# Finally, let's print the internal variables of the Dense layer.
# + colab_type="code" id="kmIkVdkbnZJI" colab={}
print("These are the layer variables: {}".format(l0.get_weights()))
# + [markdown] colab_type="text" id="RSplSnMvnWC-"
# The first variable is close to ~1.8 and the second to ~32. These values (1.8 and 32) are the actual variables in the real conversion formula.
#
# This is really close to the values in the conversion formula. We'll explain this in an upcoming video where we show how a Dense layer works, but for a single neuron with a single input and a single output, the internal math looks the same as [the equation for a line](https://en.wikipedia.org/wiki/Linear_equation#Slope%E2%80%93intercept_form), $y = mx + b$, which has the same form as the conversion equation, $f = 1.8c + 32$.
#
# Since the form is the same, the variables should converge on the standard values of 1.8 and 32, which is exactly what happened.
#
# With additional neurons, additional inputs, and additional outputs, the formula becomes much more complex, but the idea is the same.
#
# ### A little experiment
#
# Just for fun, what if we created more Dense layers with different units, which therefore also has more variables?
# + colab_type="code" id="Y2zTA-rDS5Xk" colab={}
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
print(model.predict([100.0]))
print("Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit".format(model.predict([100.0])))
print("These are the l0 variables: {}".format(l0.get_weights()))
print("These are the l1 variables: {}".format(l1.get_weights()))
print("These are the l2 variables: {}".format(l2.get_weights()))
# + [markdown] colab_type="text" id="xrpFFlgYhCty"
# As you can see, this model is also able to predict the corresponding Fahrenheit value really well. But when you look at the variables (weights) in the `l0` and `l1` layers, they are nothing even close to ~1.8 and ~32. The added complexity hides the "simple" form of the conversion equation.
#
# Stay tuned for the upcoming video on how Dense layers work for the explanation.
# + id="wOW7URUlvKfZ" colab_type="code" colab={}
| 12,935 |
/notebook/run.ipynb | 0c9f70e94844cfbb60e7c08a32e85c9cfb3b34c1 | [] | no_license | 900groove/lstm-crf | https://github.com/900groove/lstm-crf | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,000 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
import pickle
import torch
sys.path.append(os.pardir)
from lstm_crf.lstm_crf import BiLSTM_CRF
from lstm_crf.util import parse_text
# +
START_TAG = "<START>"
STOP_TAG = "<STOP>"
with open('../data/word_to_ix_nikkei.pickle', mode='rb') as f:
word_to_ix = pickle.load(f)
tag_to_ix = {'*': 0,
'ใตๅคๆฅ็ถ': 1,
'ไธ่ฌ': 2,
'ไบบๅ': 3,
'ๅฏ่ฉๅฏ่ฝ': 4,
'ๅฉๅ่ฉ่ชๅนน': 5,
'ๅฉๆฐ่ฉ': 6,
'ๅฐๅ': 7,
'ๅผ็จ': 8,
'ๅฝขๅฎนๅ่ฉ่ชๅนน': 9,
'็นๆฎ': 10,
'็ต็น': 11,
'้ฃ่ช': 12,
START_TAG: 13,
STOP_TAG: 14}
ix_to_tag = {i:t for t, i in tag_to_ix.items()}
# +
EMBEDDING_DIM = 100
HIDDEN_DIM = 200
TRAINIG_EPOCH = 5
BATCH_SIZE = 128
# DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
DEVICE = 'cpu'
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
model.load_state_dict(torch.load('../model/trained_model.model'))
# -
def make_input(text):
words = parse_text(text)[0]
#word_id = [word_to_ix[w] for w in words]
input = []
result = []
for w in words:
try:
input.append(w)
result.append(word_to_ix[w])
except KeyError:
pass
return input, torch.tensor(result, dtype=torch.long)
def text_to_id(text):
text = text.replace('ใ', ' ')
text = [word_to_ix[s] for s in list(text)]
return torch.tensor(text, dtype=torch.long)
new_text = "2019ๅนด4ๆ23ๆฅใใกใซใซใชใฎๆฑบๆธๅญไผ็คพใงใใใกใซใใคใใชใณใฉใคใณใงๆฌไบบ็ขบ่ชใงใใใตใผใในใ้ๅงใใใ็ฟ24ๆฅใซใฏLINE Payใ2019ๅนด5ๆๅๆฌใซๅๆงใฎใตใผใในใในใฟใผใใใใจ็บ่กจใใใในใใๆฑบๆธใๅฉ็จใใใใใงๅฟ
่ฆใ ใๆ้ใฎใใใๆฌไบบ็ขบ่ชๆ็ถใใ็ฐก็ด ใซๅคใใๅฉ็จ่
็ฒๅพใๅชไฝใซ้ฒใใ็ใใใใใ่ๆฏใซใใใฎใฏ2018ๅนด็งใซ่กใใใๆณๆนๆญฃใ ใ"
# +
input_word, input_id = make_input(new_text)
result = model.forward(input_id)
for text, label in zip(input_word, result[1]):
print(f'{text}: {ix_to_tag[label]}')
# -
a, b = parse_text(new_text)
for aa, bb in zip(a, b):
print(aa, bb)
ult starting parameters for gamma are wrong
ts = Tseries(-IN.series/1000, Gamma, IN.name)
ml.add_tseries(ts)
# Add well extraction 3
IN = next(x for x in meny.IN if x.name == 'Extraction 3')
# extraction amount counts for the previous month
IN.series = IN.series.resample('d').bfill()
IN.name = IN.name.replace(' ','_')
# divide by thousand, as default starting parameters for gamma are wrong
ts = Tseries(-IN.series/1000, Gamma, IN.name)
ml.add_tseries(ts)
# Add noise model
n = NoiseModel()
ml.add_noisemodel(n)
# Solve
ml.solve()
# -
# ## 3. Plot the decomposition
# Show the decomposition of the groundwater head, by plotting the influence on groundwater head of each of the stresses.
ml.plot_decomposition()
| 2,914 |
/apache_spark/notebooks/spark_examples.ipynb | 62467b43c4c136384e393bc4514ad0263322ddf4 | [] | no_license | LabutinIgor/MLBD | https://github.com/LabutinIgor/MLBD | 0 | 0 | null | 2020-03-28T15:07:57 | 2020-03-28T14:29:35 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 411,460 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
gandalf = [10, 11, 13, 30, 22, 11, 10, 33, 22, 22]
saruman = [23, 66, 12, 43, 12, 10, 44, 23, 12, 17]
comb_spells = gandalf + saruman
comb_spells
# +
gandalf_wins = [0, 1, 2, 3, 4, 5]
saruman_wins = [0,1,2,3]
len(gandalf_wins)
# -
len(saruman_wins)
# +
for gandalf_wins in gandalf_wins:
print(gandalf_wins)
# -
for saruman_wins in saruman_wins:
print(saruman_wins)
# +
POWER = {'Fireball': 50,
'Lightning bolt': 40,
'Magic arrow': 10,
'Black Tentacles': 25,
'Contagion': 45}
gandalf = ['Fireball', 'Lightning bolt', 'Lightning bolt', 'Magic arrow', 'Fireball',
'Magic arrow', 'Lightning bolt', 'Fireball', 'Fireball', 'Fireball']
saruman = ['Contagion', 'Contagion', 'Black Tentacles', 'Fireball', 'Black Tentacles',
'Lightning bolt', 'Magic arrow', 'Contagion', 'Magic arrow', 'Magic arrow']
comb_spells = gandalf + saruman
comb_spells
# +
POWER = {'Fireball': 50,
'Lightning bolt': 40,
'Magic arrow': 10,
'Black Tentacles': 25,
'Contagion': 45}
gandalf = [50, 40, 40, 10, 50, 10, 40, 50, 50, 50]
saruman = [45, 45, 25, 50, 25, 40, 10, 45, 10, 10]
gandalf_wins=[0,1,2,3,4,5,6]
sarum_wins=[0,1,2]
len(gandalf_wins)
# +
len(sarum_wins)
# +
gandalf_power = ['Fireball':50, 'Lightning bolt':40, 'Lightning bolt':40, 'Magic arrow':10, 'Fireball':50,
'Magic arrow':10, 'Lightning bolt':40, 'Fireball':50, 'Fireball':50, 'Fireball':50]
saruman_power = ['Contagion':45, 'Contagion':45, 'Black Tentacles':25, 'Fireball':50, 'Black Tentacles':25,
'Lightning bolt':40, 'Magic arrow':10, 'Contagion':45, 'Magic arrow':10, 'Magic arrow':10]
# -
ะธั
ะฝัะถะฝะพ ัะพั
ัะฐะฝะธัั ะฒ hdfs.
#
# 1. ะะฐะฟัััะธัะต terminal ัะตัะตะท Jupyter
# 2. `hdfs dfs -copyFromLocal data .`
#
#
# ะัะพะฒะตััะตะผ, ััะพ ะฒัะต ะดะฐะฝะฝัะต ะฝะฐ ะผะตััะต
# !hdfs dfs -copyFromLocal ../../data .
# !hdfs dfs -ls data/ml-25m/
# ### ะงัะตะฝะธะต ะดะฐะฝะฝัั
#
# *ะะฐะผะตัะฐะฝะธะต*: ะคะฐะนะป `ml-25m-README.htm` ัะพะดะตัะถะธั ะพะฟะธัะฐะฝะธะต ะดะฐะฝะฝัั
# +
DATA_PATH = 'data/ml-25m'
RATINGS_PATH = os.path.join(DATA_PATH, 'ratings.csv')
MOVIES_PATH = os.path.join(DATA_PATH, 'movies.csv')
TAGS_PATH = os.path.join(DATA_PATH, 'tags.csv')
# -
ratings = sc.textFile(RATINGS_PATH)
ratings.take(5)
ratings.getNumPartitions()
ratings = ratings \
.map(lambda s: s.split(',')) \
.filter(lambda arr: arr[0].isdigit()) \
.map(lambda arr: Rating(user_id=int(arr[0]),
movie_id=int(arr[1]),
rating=float(arr[2]),
timestamp=int(arr[3])))
ratings.count()
# ะะพะปะธัะตััะฒะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะน
# +
# %%time
ratings \
.map(lambda r: r.user_id)\
.distinct()\
.count()
# -
# ะกะพั
ัะฐะฝะธะผ ะดะฐัะฐัะตั ะฒ ะฟะฐะผััั
ratings = ratings.persist()
# +
# %%time
ratings \
.map(lambda r: r.user_id)\
.distinct()\
.count()
# -
# ะะพะปะธัะตััะฒะพ ัะธะปัะผะพะฒ
ratings \
.map(lambda r: r.movie_id)\
.distinct()\
.count()
# ## ะฃะฟัะฐะถะฝะตะฝะธั
# ### ะคะธะปัะผั ั ะฝะฐะธะฑะพะปััะธะผ ััะตะดะฝะธะผ ัะตะนัะธะฝะณะพะผ
#
# ะะฐะนัะธ 10 ัะธะปัะผะพะฒ ั ะฝะฐะธะฑะพะปััะธะผ ััะตะดะฝะธะผ ัะตะนัะธะฝะณะพะผ. ะัะฒะตััะธ ะธั
ะฝะฐะทะฒะฐะฝะธั ะธ ััะตะดะฝะธะน ัะตะนัะธะฝะณ
movies = sc.textFile(MOVIES_PATH)
movies.take(5)
# +
movies = movies \
.map(lambda s: s.split(',')[:2]) \
.filter(lambda arr: arr[0].isdigit()) \
.keyBy(lambda arr: int(arr[0]))
movie_avg_rating = ratings \
.map(lambda r: (r.movie_id, (r.rating, 1))) \
.reduceByKey(lambda a, b: (a[0] + b[0], a[1] + b[1])) \
.mapValues(lambda ratings: ratings[0] / ratings[1])
movie_avg_rating \
.join(movies) \
.sortBy(lambda key_value: key_value[1][0], ascending=False)\
.take(10)
# -
# ะกะพั
ัะฐะฝะธัะต `rdd`, ัะพััะพััะธะน ะธะท ัััะพะบ ะฒะธะดะฐ `<movie_id>,<average_rating>` ะฝะฐ hdfs ะฒ ะฒะธะดะต ัะตะบััะพะฒะพะณะพ ัะฐะนะปะฐ
# +
# movie_avg_rating\
# .repartition(10) \
# .saveAsTextFile(os.path.join(DATA_PATH, 'movie_avg_rating'))
# -
# ! hdfs dfs -ls data/ml-25m/movie_avg_rating
# ### ะะพะฟัะปััะฝะพััั ััะณะพะฒ
#
# ะะฐะนัะธ 20 ะฝะฐะธะฑะพะปะตะต ะฟะพะฟัะปััะฝัั
ััะณะพะฒ
tags = sc.textFile(TAGS_PATH)
tags.take(5)
# +
tags_count = tags\
.map(lambda s: (s.split(',')[2], 1))\
.reduceByKey(lambda a, b: a + b)\
.collect()
len(tags_count)
# -
tags_count = sorted(tags_count, key=lambda tag_count: tag_count[1], reverse=True)
# +
keys, values = zip(*tags_count[:20])
f, ax = plt.subplots(figsize=(10, 6))
plt.xticks(rotation=85, fontsize=15)
plt.bar(keys, values, align="center")
plt.show()
# -
# ### ะคะธะปัะผั ั ะฝะฐะธะฑะพะปััะธะผ ัะธัะปะพะผ ะพัะตะฝะพะบ
#
# ะะฐะนัะธ 10 ัะธะปัะผะพะฒ ั ะฝะฐะธะฑะพะปััะธะผ ัะธัะปะพะผ ะพัะตะฝะพะบ. ะัะฒะตััะธ ะธั
ะฝะฐะทะฒะฐะฝะธั ะธ ัะธัะปะพ ะพัะตะฝะพะบ
movies.take(5)
# +
movie_cnt_rating = ratings \
.map(lambda r: (r.movie_id, 1)) \
.reduceByKey(lambda a, b: a + b) \
.join(movies) \
.sortBy(lambda key_value: key_value[1][0], ascending=False) \
.map(lambda key_value: (key_value[1][1][1], key_value[1][0])) \
movie_cnt_rating.take(10)
# -
# ### ะคะธะปัะผั ั ะฝะฐะธะฑะพะปััะธะผ ัะธัะปะพะผ 5
#
# ะะฐะนัะธ 10 ัะธะปัะผะพะฒ ั ะฝะฐะธะฑะพะปััะธะผ ัะธัะปะพะผ 5ะพะบ ะฒ ะบะฐัะตััะฒะต ะพัะตะฝะบะธ. ะัะฒะตััะธ ะธั
ะฝะฐะทะฒะฐะฝะธั ะธ ัะธัะปะพ 5ะพะบ
# +
movie_cnt_rating_5 = ratings \
.filter(lambda r: r.rating == 5) \
.map(lambda r: (r.movie_id, 1)) \
.reduceByKey(lambda a, b: a + b)
movie_cnt_rating_5 \
.join(movies) \
.sortBy(lambda key_value: key_value[1][0], ascending=False) \
.map(lambda key_value: (key_value[1][1][1], key_value[1][0])) \
.take(10)
# -
# ### ะ ะฐัะฟัะตะดะตะปะตะฝะธะต ัะตะนัะธะฝะณะพะฒ ัะธะปัะผะพะฒ
#
# ะะพัััะพะธัั ัะฐัะฟัะตะดะตะปะตะฝะธะต ัะธะปัะผะพะฒ ะฟะพ ัั. ัะตะนัะธะฝะณะฐะผ (ะณะธััะพะณัะฐะผะผั)
# +
movie_avg_rating_to_hist = movie_avg_rating \
.join(movies) \
.sortBy(lambda key_value: key_value[1][0], ascending=False) \
.map(lambda key_value: key_value[1][0]) \
.collect()
movie_avg_rating_to_hist[:5]
# -
plt.hist(movie_avg_rating_to_hist, 20)
plt.show()
# ### ะ ะฐัะฟัะตะดะตะปะตะฝะธะต ัะธัะปะฐ ะพัะตะฝะพะบ ะดะปั ัะธะปัะผะพะฒ
#
# ะะพัััะพะธัั ัะฐัะฟัะตะดะตะปะตะฝะธะต ัะธะปัะผะพะฒ ะฟะพ ัะธัะปั ะพัะตะฝะพะบ
# +
movie_cnt_rating_to_hist = movie_cnt_rating \
.map(lambda key_value: key_value[1]) \
.collect()
plt.hist(movie_cnt_rating_to_hist, 20)
plt.show()
# -
# ### ะ ะฐัะฟัะตะดะตะปะตะฝะธะต ัะธะปัะผะพะฒ ะฟะพ ะถะฐะฝัะฐะผ
#
# ะะพัััะพะธัั ะณะธััะพะณัะฐะผะผั ัะฐัะฟัะตะดะตะปะตะฝะธั ัะธะปัะผะพะฒ ะฟะพ ะถะฐะฝัะฐะผ. ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต, ััะพ ั ัะธะปัะผะฐ ะผะพะถะตั ะฑััั ัะบะฐะทะฐะฝะพ ะฑะพะปััะต ะพะดะฝะพะณะพ ะถะฐะฝัะฐ.
# +
movies = sc.textFile(MOVIES_PATH)
movies = movies \
.map(lambda s: s.split(',')) \
.filter(lambda arr: arr[0].isdigit())
genres = movies \
.flatMap(lambda arr: arr[-1].split('|')) \
.map(lambda g: (g, 1)) \
.reduceByKey(lambda a, b: a + b) \
.sortBy(lambda key_value: key_value[1], ascending=False) \
.collect()
keys, values = zip(*genres[:20])
f, ax = plt.subplots(figsize=(10, 6))
plt.xticks(rotation=85, fontsize=15)
plt.bar(keys, values, align="center")
plt.show()
# -
# ### ะะบัะตัั
#
# ะะปั ัะตัะตะฝะธั ะทะฐะดะฐั ะฝะธะถะต ะฝัะถะฝะพ ะฒะพัะฟะพะปัะทะพะฒะฐัััั ัะฐะนะปะฐะผะธ `ratings.csv`, `movies.csv`, `links.csv` ะธ `tmdb.json`.
#
# * `links.csv` - ะทะฐะดะฐะตั ะพัะพะฑัะฐะถะตะฝะธะต ะธะท `movie_id` ะฒ `tmdb_movie_id` (ะฟะพะดัะพะฑะฝะพะต ะพะฟะธัะฐะฝะธะต ะฒ `ml-25m-README.htm`)
# * `tmdb.json` - ัะพะดะตัะถะธั ะฑะพะปััะพะต ะบะพะปะธัะตััะฒะพ ะดะฐะฝะฝัั
ะพ ัะธะปัะผะฐั
ะฒ ัะพัะผะฐัะต json (ะฝะฐ ะบะฐะถะดะพะน ัััะพัะบะต ะพัะดะตะปัะฝัะน json)
#
# ะะฐะดะฐัะธ
#
# 1. ะะฐะนัะธ ะฒัะต ัะธะปัะผั, ะฒ ะบะพัะพััั
ะธะณัะฐะป `"id":31, "name":"Tom Hanks"`
# 2. ะะฐะนัะธ 10 ะฐะบัะตัะพะฒ ัะฝัะฒัะธั
ัั ะฒ ะฝะฐะธะฑะพะปััะตะผ ัะธัะปะต ัะธะปัะผะพะฒ. ะัะฒะตััะธ ะธั
ะธะผะตะฝะฐ ะธ ะบะพะป-ะฒะพ ัะธะปัะผะพะฒ, ะฒ ะบะพัะพััั
ะพะฝะธ ัะฝะธะผะฐะปะธัั
# +
LINKS_PATH = os.path.join(DATA_PATH, 'links.csv')
TMBD_PATH = os.path.join(DATA_PATH, 'tmdb.json')
links = sc.textFile(LINKS_PATH)
links = links \
.map(lambda s: s.split(','))\
.filter(lambda arr: arr[0].isdigit())\
.keyBy(lambda arr: int(arr[0]))
links.take(10)
# -
tmdb = sc.textFile(TMBD_PATH)
tmdb.take(5)
# +
import json
def hasTomHanks(casts):
for cast in casts.values():
for c in cast:
if c['id'] == 31:
return True
return False
movies_with_tom_hanks = tmdb \
.map(lambda a: json.loads(a)) \
.filter(lambda a: 'casts' in a and hasTomHanks(a['casts'])) \
.map(lambda a: a['original_title'])
movies_with_tom_hanks.take(20)
# -
# ### ะะพะปั ะถะฐะฝัะฐ ะฒ ัะตัะตะฝะธะธ ะฒัะตะผะตะฝะธ
#
# ะะปั ะบะฐะถะดะพะณะพ ะถะฐะฝัะฐ ะฝัะถะฝะพ ะฟะพัััะพะธัั ะบะฐะบ ะผะตะฝัะปะฐัั ะดะพะปั ะฒััะตะดัะธั
ัะธะปัะผะพะฒ ะดะฐะฝะฝะพะณะพ ะถะฐะฝัะฐ ะพัะฝะพัะธัะตะปัะฝะพ ะฒัะตั
ะพััะฐะปัะฝัั
ะถะฐะฝัะพะฒ.
#
# ะะฐัั ะฒัั
ะพะดะฐ ัะธะปัะผะฐ ะผะพะถะฝะพ ะฒะทััั ะธะท ัะฐะนะปะฐ `tmdb.json`.
#
# (ะกะผ. `plt.stackplot`)
# +
######################################
######### YOUR CODE HERE #############
######################################
# -
# ### ะะบัะฟะฐะตะผะพััั ัะธะปัะผะพะฒ
#
# ะะปั ะบะฐะถะดะพะณะพ ะถะฐะฝัะฐ ะฟะพััะธัะฐัั `ROI = mean(revenue) / mean(budget)` ะธ ะฟะพัััะพะธัั `barplot`, ะณะดะต ะฟะพ ะพัะธ x ะฑัะดะตั ะฝะฐะทะฒะฐะฝะธะต ะถะฐะฝัะฐ, ะฐ ะฟะพ ะพัะธ y - `ROI`
#
# ะะฐะฝะฝัะต ะพ `revenue` ะธ `budget` ะผะพะถะฝะพ ะฝะฐะนัะธ ะฒ ัะฐะนะปะต `tmdb.json`.
# +
######################################
######### YOUR CODE HERE #############
######################################
| 8,870 |
/br_fake_news_detection.ipynb | ab340c93a7236ae50397c3d318a11af996e3017f | [
"MIT"
] | permissive | jeffersonscampos/br_fake_news_detection | https://github.com/jeffersonscampos/br_fake_news_detection | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 93,577 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Talendar/br_fake_news_detection/blob/main/br_fake_news_detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="x4mHP2h67TNN"
# # Fake News Detection
#
# In this notebook, we'll use *deep learning* to classify texts written in Brazilian Portuguese as true or fake. The *corpus* used was created by NILC researches and is available [here](https://github.com/roneysco/Fake.br-Corpus). Let's start by downloading the data directly from GitHub.
# + id="jE-6bkw0Yks3" outputId="16a9a0f2-fc0e-43a6-cd16-6d96e97e96b7" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !git clone https://github.com/roneysco/Fake.br-Corpus
DATA_PATH = "./Fake.br-Corpus/size_normalized_texts"
# + [markdown] id="48n43eJVYrP1"
# Dealing with the project's dependencies:
# + id="xqjGIQgVjyOz"
import warnings
warnings.filterwarnings(action='once')
import numpy as np
import pandas as pd
import os
import re
import zipfile
# %tensorflow_version 2.x
import tensorflow as tf
from sklearn.utils import shuffle
from tensorflow.keras.callbacks import Callback
from IPython.display import clear_output
from gensim.models import KeyedVectors
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('rslp')
# this class will be used later on
class ClearCallback(Callback):
""" Handles the cleaning of the log during the training of a model. """
def __init__(self, current_k, total_k):
self._current_k = current_k
self._total_k = total_k
def on_epoch_end(self, epoch, logs=None):
""" Clears the log. Called when a training epoch ends. """
clear_output(wait=True)
print("Running %d-folds cross-validation. Current fold: %d.\n" % (self._total_k, self._current_k))
# + [markdown] id="u7OAYYqsFCWL"
# TO_DO: Load and explore data
# + id="xXDnyaoZBQc4" outputId="be0fd655-7d12-4b05-c98b-41b5bf636448" colab={"base_uri": "https://localhost:8080/", "height": 419}
def load_txts(path):
txts = []
for filename in sorted(os.listdir(path), key=lambda x: int(re.match("[0-9]+", x).group())):
with open(os.path.join(path, filename)) as f:
txts.append(f.read())
return txts
true_txts = load_txts(os.path.join(DATA_PATH, "true"))
fake_txts = load_txts(os.path.join(DATA_PATH, "fake"))
assert(len(true_txts) == len(fake_txts))
data = pd.DataFrame( [{"text": t, "label": 0} for t in true_txts] + [{"text": f, "label": 1} for f in fake_txts] ).sample(frac=1)
# %xdel true_txts
# %xdel fake_txts
pd.set_option('max_colwidth', 200)
data
# + [markdown] id="9RTUm-YNoKU8"
# # BAG-OF-WORDS
# + id="4-TfcLz2n_qJ"
import string
from sklearn.feature_extraction.text import CountVectorizer
STOPWORDS = nltk.corpus.stopwords.words('portuguese')
STEMMER = nltk.stem.RSLPStemmer()
# + id="JYTNTVKxl-Mp" outputId="d4b7df3a-a097-4201-c292-21df3faf72bc" colab={"base_uri": "https://localhost:8080/", "height": 436}
def normalize_texts(corpus, stem):
processed_texts = []
counter = 0
for i, row in corpus.iterrows():
clear_output(wait=True)
print("[%.2f%%] Processing text %d of %d." % (100*(counter+1)/len(corpus), counter+1, len(corpus)))
counter += 1
text = " ".join( [
(w if not stem else STEMMER.stem(w))
for w in nltk.tokenize.word_tokenize(row["text"]) if w not in STOPWORDS and w not in string.punctuation
] )
processed_texts.append({"text": text, "label": row["label"]})
return pd.DataFrame(processed_texts)
norm_data = normalize_texts(data, stem=True)
norm_data
# + id="n4e4fDFOQUWp" outputId="6cbbbbce-4568-4b43-ba9b-7909d0cdced6" colab={"base_uri": "https://localhost:8080/", "height": 153}
# k-fold cross-validation
k = 10
folds = np.split(norm_data.sample(frac=1), k)
accuracies = []
for i in range(len(folds)):
# separating data
test_data, test_labels = folds[i]["text"].values, folds[i]["label"].values
training_data = np.concatenate( [folds[j]["text"].values for j in range(len(folds)) if j != i] )
training_labels = np.concatenate( [folds[j]["label"].values for j in range(len(folds)) if j != i] )
# extracting features
vectorizer = CountVectorizer(max_features=1000)
training_data = vectorizer.fit_transform(training_data).toarray() # fit the vectorizer to the training corpus
test_data = vectorizer.transform(test_data).toarray() # words of the test corpus that don't appear in the training corpus will be ignored!
# preparing model
model = tf.keras.Sequential([
tf.keras.layers.Dense(32, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(1e-3)),
tf.keras.layers.Dense(1, activation="sigmoid")
])
model.compile(loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(1e-3),
metrics=["accuracy"])
# training
model.fit(training_data, training_labels, epochs=50,
validation_data=(test_data, test_labels),
callbacks=[ClearCallback(i + 1, k)])
# evaluating
loss, acc = model.evaluate(test_data, test_labels)
accuracies.append(acc)
clear_output(wait=True)
print("\n Cross-validation finished! Results:")
print(" . Mean accuracy: %.2f%%" % (100*np.mean(accuracies)))
print(" . Accuracies std: %.2f%%" % (100*np.std(accuracies)))
# + [markdown] id="k7aUVc5Wy9ID"
# # WORD EMBEDDINGS #
# + id="DC5drzH1Vq_z"
WVECTORS_LEN = 100 # dimenson of the word embeddings
MAX_TEXT_TOKENS = 200
# + [markdown] id="832MEJTbVabB"
# OPTION 1: download vectors
# + id="ZGp9L0XM7-X4"
# downloading vectors
if ("glove_s%d.zip" % WVECTORS_LEN) not in os.listdir():
# !wget -O {"glove_s%d.zip" % WVECTORS_LEN} {"http://143.107.183.175:22980/download.php?file=embeddings/glove/glove_s%d.zip" % WVECTORS_LEN}
if ("glove_s%d.txt" % WVECTORS_LEN) not in os.listdir():
with zipfile.ZipFile("glove_s%d.zip" % WVECTORS_LEN, 'r') as zip_ref:
zip_ref.extractall()
wv_pathname = "glove_s%d.txt" % WVECTORS_LEN
# + [markdown] id="sgElaTlMVyen"
# OPTION 2: load vectors from drive
# + id="tqDkyTJqVx55" outputId="6ee46a89-e18c-4137-824b-811a41d5de69" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
wv_pathname = "/content/gdrive/My Drive/Colab Notebooks//ml_data/glove_s%d.txt" % WVECTORS_LEN
# + [markdown] id="cGWFAYxdWV9I"
# Loading glove model (this might take a while)
# + id="Cn8o6-GIzGF2"
word_vectors = KeyedVectors.load_word2vec_format(wv_pathname)
# + [markdown] id="FeMg7k7gZO9N"
# Auxiliary functions:
# + id="pJ4JaN6IZM_k"
def vec_to_word(wv):
"""
Returns the closest word (string) to the given word vector. This is an
expensive operation.
"""
return word_vectors.most_similar(positive=[wv], topn=1)[0][0]
def vecs_to_txt(wv_list):
"""
Receives a list of word vectors and returns a list of words corresponding to
each vector (one word per vector). This is an expensive operation.
"""
txt = []
for v in wv_list:
txt.append(vec_to_word(v))
return txt
def txt_to_vecs(txt):
"""
Receives a list of tokens (words) and returns a numpy array with word vectors
corresponding to those tokens. If some word isn't found in the vocabulary,
it will be ignored.
"""
vecs, ignored = [], []
for word in txt:
try:
v = word_vectors[word]
vecs.append(v)
except KeyError:
ignored.append(word)
return np.array(vecs), set(ignored)
def pad(txts, mask_value):
"""
Pad sequences shorter than the max length seuquence using the given mask value.
"""
# find max len
max = 0
for t in txts:
max = len(t) if len(t) > max else max
# pad
for i, t in enumerate(txts):
if len(t) < max:
z = np.full(shape=(max - len(t) , WVECTORS_LEN), fill_value=mask_value)
txts[i] = np.concatenate((t, z))
return np.array(txts)
def build_wv_data(corpus, mask_value):
features, labels, ignored_tokens = [], [], []
count = 0
for i, row in corpus.iterrows():
print("[%.1f%%] Processing text %d of %d." % ( 100 * (count)/len(corpus), count+1, len(corpus) ))
count += 1
tokens = nltk.tokenize.word_tokenize(row["text"].lower())
if len(tokens) > MAX_TEXT_TOKENS:
tokens = tokens[:MAX_TEXT_TOKENS]
vecs, ign = txt_to_vecs( tokens )
features.append(vecs)
labels.append(row["label"])
ignored_tokens += ign
clear_output(wait=True)
print("Padding texts...")
return pad(features, mask_value), np.array(labels), \
set(ignored_tokens)
# + id="Rn91LPP-e9ps" outputId="7d03d393-b1b2-4be0-ddb4-4d152d3aefa0" colab={"base_uri": "https://localhost:8080/", "height": 122}
MASK_VALUE = -0.123 # value to be used for the masking procedure (ignore padding)
wv_data, wv_labels, ignored_tokens = build_wv_data(corpus=data, mask_value=MASK_VALUE)
print("All texts processed! \nIgnored tokens (unique): %d\n" % len(ignored_tokens))
print(ignored_tokens)
# freeing memory
# %xdel data
# %xdel word_vectors
# + id="KDQo6l-8jVlU" outputId="04b652e6-9425-48db-e662-d246b08e7d33" colab={"base_uri": "https://localhost:8080/", "height": 85}
# k-fold cross-validation
k = 10
folds, folds_labels = shuffle(wv_data, wv_labels)
folds = np.array_split(folds, k)
folds_labels = np.array_split(folds_labels, k)
accuracies = []
for i in range(len(folds)):
# separating data
test_data, test_labels = folds[i], folds_labels[i]
training_data = np.concatenate( [folds[j] for j in range(len(folds)) if j != i] )
training_labels = np.concatenate( [folds_labels[j] for j in range(len(folds)) if j != i] )
# preparing model
model = tf.keras.Sequential([
tf.keras.layers.Masking(mask_value=MASK_VALUE),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)), #, kernel_regularizer=tf.keras.regularizers.l2(3))),
#tf.keras.layers.Dropout(0.25),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), #, kernel_regularizer=tf.keras.regularizers.l2(3))),
#tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(32, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid")
])
model.compile(loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(1e-3),
metrics=["accuracy"])
# training
model.fit(training_data, training_labels, epochs=10,
validation_data=(test_data, test_labels),
callbacks=[ClearCallback(i + 1, k)])
# evaluating
loss, acc = model.evaluate(test_data, test_labels)
accuracies.append(acc)
clear_output(wait=True)
print("\n Cross-validation finished! Results:")
print(" . Mean accuracy: %.2f%%" % (100*np.mean(accuracies)))
print(" . Accuracies std: %.2f%%" % (100*np.std(accuracies)))
| 11,414 |
/ๆไปถไธIO/.ipynb_checkpoints/5.20 ไธไธฒ่ก็ซฏๅฃ็ๆฐๆฎ้ไฟก-checkpoint.ipynb | 67de47532a2df9975df986ff80d47c95c8a17688 | [] | no_license | Asunqingwen/cookbook | https://github.com/Asunqingwen/cookbook | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,965 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import StandardScaler
# %matplotlib inline
data = pd.read_csv('./data/Sensor_Weather_Data_Challenge.csv', index_col=0, parse_dates=True)
data.head()
data['date'] = data.index
d = data.index[0]
data['day'] = data['date'].apply(lambda x:x.weekday())
x_cols = data.columns[0:14]
data_summary = pd.DataFrame({'features': x_cols, "tot_reading": np.sum(data[x_cols]).values})
# +
data_summary['day_avg'] = data[x_cols].mean().values
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
for i in range(0,7):
data_summary[days[i]] = data[data['day'] == i][x_cols].sum().values/data_summary["tot_reading"]*100
data_summary['weekday']=data[(data['day']!=5) & (data['day']!=6)][x_cols].sum().values/data_summary["tot_reading"]*100
data_summary['weekend']=data[(data['day']==5) | (data['day']==6)][x_cols].sum().values/data_summary["tot_reading"]*100
# -
data_summary
data_summary.isna().sum()
scaler = StandardScaler()
scaled_mat = pd.DataFrame(scaler.fit_transform(data_summary.iloc[:, 1:]), columns = data_summary.columns[1:])
scaled_mat.index = data_summary.features
corr = scaled_mat.corr()
fig, ax = plt.subplots(figsize=(8, 6))
cax=ax.matshow(corr,vmin=-1,vmax=1)
ax.matshow(corr)
plt.xticks(range(len(corr.columns)), corr.columns)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.xticks(rotation=90)
plt.colorbar(cax)
# ### segmentation on time of day
data['hour'] = data['date'].apply(lambda x: round(x.hour/3))
data_hour = data.groupby(['day', 'hour']).mean()
data_hour.index = [''.join(str(idx[0])+'-'+str(idx[1])) for idx in data_hour.index.values]
data_hour = data_hour.transpose()
scaler = StandardScaler()
scaled_mat = pd.DataFrame(scaler.fit_transform(df), columns = df.columns, index=df.index)
scaled_mat
plt.figure(figsize=(8,20))
data_hour.transpose().iloc[:,0:14].plot()
def plot_BIC(matrix,K):
from sklearn import mixture
BIC=[]
for k in K:
model=mixture.GaussianMixture(n_components=k,init_params='kmeans')
model.fit(matrix)
BIC.append(model.bic(matrix))
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(K,BIC,'-cx')
plt.ylabel("BIC score")
plt.xlabel("k")
plt.title("BIC scoring for K-means cell's behaviour")
return(BIC)
scaled_mat.shape
K = range(2,22)
BIC = plot_BIC(scaled_mat,K)
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
cluster = KMeans(n_clusters=5,random_state=217)
scaled_mat['cluster'] = cluster.fit_predict(scaled_mat.iloc[:, 0:14])
print(scaled_mat.cluster.value_counts())
cluster.cluster_centers_.shape
from sklearn.metrics.pairwise import euclidean_distances
distance = euclidean_distances(cluster.cluster_centers_, cluster.cluster_centers_)
print(distance)
cluster.cluster_centers_.shape
pca.transform(scaled_mat).shape
# +
# Reduction dimention of the data using PCA
pca = PCA(n_components=3)
scaled_mat['x'] = pca.fit_transform(scaled_mat)[:,0]
scaled_mat['y'] = pca.fit_transform(scaled_mat)[:,1]
scaled_mat['z'] = pca.fit_transform(scaled_mat)[:,2]
# Getting the center of each cluster for plotting
cluster_centers = pca.transform(cluster.cluster_centers_)
cluster_centers = pd.DataFrame(cluster_centers, columns=['x', 'y', 'z'])
cluster_centers['cluster'] = range(0, len(cluster_centers))
print(cluster_centers)
# +
corr = scaled_mat.iloc[0:14,:].corr()
fig, ax = plt.subplots(figsize=(8, 6))
cax=ax.matshow(corr,vmin=-1,vmax=1)
ax.matshow(corr)
plt.xticks(range(len(corr.columns)), corr.columns)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.xticks(rotation=90)
plt.colorbar(cax)
# -
# ### Clustering Data Points
from statsmodels.tsa.seasonal import seasonal_decompose
seasonal_decompose(pd.Series(data.iloc[:,0], index=data.index), model = "additive")
temp = pd.Series(data.iloc[:, 14])
april = data.iloc[:, 0:16]
df = april.resample('4D').mean().dropna().transpose()
scaler = StandardScaler()
scaled_= pd.DataFrame(scaler.fit_transform(df), columns = df.columns, index=df.index)
K = range(2,16)
BIC = plot_BIC(scaled_mat,K)
cluster = KMeans(n_clusters=3,random_state=217)
scaled_mat['cluster'] = cluster.fit_predict(scaled_mat)
print(scaled_mat.cluster.value_counts())
cluster.cluster_centers_
scaled_mat
# Reduction dimention of the data using PCA
pca = PCA(n_components=3)
df_pca = pca.fit_transform(scaled_mat)
scaled_mat['x'] = df_pca[:,1]
scaled_mat['y'] = df_pca[:,2]
# Getting the center of each cluster for plotting
cluster_centers = pca.transform(cluster.cluster_centers_)
cluster_centers = pd.DataFrame(cluster_centers, columns=['x', 'y', 'z'])
matrix = scaled_mat
fig, ax = plt.subplots(figsize=(8, 6))
scatter=ax.scatter(matrix['x'],matrix['y'],c=matrix['cluster'],s=21,cmap=plt.cm.Set1_r)
ax.scatter(cluster_centers['x'],cluster_centers['y'],s=70,c='blue',marker='+')
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.colorbar(scatter)
plt.title('Data Segmentation')
day_sampled=data.resample('1W').mean().iloc[:, np.concatenate((np.arange(0, 17), [19, 20]))]
#day_sampled=data.resample('1W').mean().loc
day_sampled=day_sampled.fillna(0)
sc = StandardScaler()
day_sampled.iloc[:, 0].plot()
day_scaled = pd.DataFrame(sc.fit_transform(day_sampled), columns=day_sampled.columns, index=day_sampled.index)
day_scaled.iloc[:, 0].plot()
day_scaled = day_scaled.transpose()
plot_BIC(day_scaled, range(2, day_scaled.shape[0]))
k_means = KMeans(n_clusters=4, random_state=666)
k_means.fit(day_scaled)
scaled_centers = k_means.cluster_centers_
pca = PCA(n_components=3)
day_pca = pca.fit_transform(day_scaled)
pca.explained_variance_ratio_, sum(pca.explained_variance_ratio_)
pca_centers = pd.DataFrame(pca.transform(scaled_centers), columns=['x', 'y', 'z'])
pca_centers['cluster_id'] = pca_centers.index
day_pca = pd.DataFrame(day_pca, columns=['x', 'y', 'z'], index=day_scaled.index)
day_pca['cluster_id'] = pd.Categorical(k_means.predict(day_scaled))
plt.figure(figsize=(10, 8))
plt.scatter(x=day_pca['x'], y=day_pca['y'], c=day_pca['cluster_id'])
#plt.scatter(x=pca_centers['x'], y=pca_centers['y'], c=pca_centers['cluster_id'], marker='+', s=200)
for name in day_pca.index:
plt.annotate(s=name, xy=(day_pca.loc[name]['x'] + 0.05, day_pca.loc[name]['y'] - 0.05))
day_pca.cluster_id = day_pca.cluster_id.astype("int")
plt.figure(figsize=(10, 8))
sns.scatterplot(x='x', y='y', alpha = 0.6,hue='cluster_id', s=100, data=day_pca, palette=sns.color_palette("muted", n_colors=4))
for name in day_pca.index:
plt.annotate(s=name, xy=(day_pca.loc[name]['x'], day_pca.loc[name]['y'] + 0.05))
plt.figure(figsize=(10, 8))
plt.scatter(x=day_pca['y'], y=day_pca['z'], c=day_pca['cluster_id'])
plt.scatter(x=pca_centers['y'], y=pca_centers['z'], c=pca_centers['cluster_id'], marker='+', s=200)
for name in day_pca.index:
plt.annotate(s=name, xy=(day_pca.loc[name]['y'], day_pca.loc[name]['z'] + 0.05))
plt.figure(figsize=(10, 8))
plt.scatter(x=day_pca['x'], y=day_pca['z'], c=day_pca['cluster_id'])
plt.scatter(x=pca_centers['x'], y=pca_centers['z'], c=pca_centers['cluster_id'], marker='+', s=200)
for name in day_pca.index:
plt.annotate(s=name, xy=(day_pca.loc[name]['x'], day_pca.loc[name]['z'] + 0.05))
| 7,593 |
/mapbox_upload.ipynb | cc5bc09cf47f2070153db0d4fb65b8762c2328f0 | [
"MIT"
] | permissive | xdze2/arbresdegrenoble | https://github.com/xdze2/arbresdegrenoble | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,327 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import tensorflow.contrib.layers as layers
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import seaborn as sns
# %matplotlib inline
boston = datasets.load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['target'] = boston.target
df.describe()
# Plotting correlation colormap
_ , ax = plt.subplots( figsize =( 12 , 10 ) )
corr = df.corr(method='pearson')
cmap = sns.diverging_palette( 220 , 10 , as_cmap = True )
_ = sns.heatmap( corr, cmap = cmap, square=True, cbar_kws={ 'shrink' : .9 }, ax=ax, annot = True, annot_kws = { 'fontsize' : 12 })
# +
# Create Test Train Split
X_train, X_test, y_train, y_test = train_test_split(df [['RM', 'LSTAT', 'PTRATIO']], df[['target']], test_size=0.3, random_state=0)
# Normalize data
X_train = MinMaxScaler().fit_transform(X_train)
y_train = MinMaxScaler().fit_transform(y_train)
X_test = MinMaxScaler().fit_transform(X_test)
Y_test = MinMaxScaler().fit_transform(y_test)
# +
#Network Parameters#Networ
m = len(X_train)
n = 3 # Number of features
n_hidden = 20 # Number of hidden neurons
# Hyperparameters
batch_size = 200
eta = 0.01
max_epoch = 1000
# -
def multilayer_perceptron(x):
fc1 = layers.fully_connected(x, n_hidden, activation_fn=tf.nn.relu, scope='fc1')
out = layers.fully_connected(fc1, 1, activation_fn=tf.sigmoid, scope='out')
return out
def accuracy(a,b):
correct_prediction = tf.square(a -b)
return tf.reduce_mean(tf.cast(correct_prediction, "float"))
# +
# build model, loss, and train op
x = tf.placeholder(tf.float32, name='X', shape=[m,n])
y = tf.placeholder(tf.float32, name='Y')
y_hat = multilayer_perceptron(x)
mse = accuracy(y, y_hat)
train = tf.train.AdamOptimizer(learning_rate= eta).minimize(mse)
init = tf.global_variables_initializer()
# -
# Computation Graph
with tf.Session() as sess:
# Initialize variables
sess.run(init)
writer = tf.summary.FileWriter('graphs', sess.graph)
# train the model for 100 epcohs
for i in range(max_epoch):
_, l, p = sess.run([train, mse, y_hat], feed_dict={x: X_train, y: y_train})
if i%100 == 0:
print('Epoch {0}: Loss {1}'.format(i, l))
print("Training Done")
print("Optimization Finished!")
# Calculate accuracy
print(" Mean Squared Error (Train data):", mse.eval({x: X_train, y: y_train}))
plt.scatter(p,y_train)
plt.ylabel('Estimated Price')
plt.xlabel('Actual Price')
plt.title('Estimated vs Actual Price Train Data')
writer.close()
markers[y[i]],
markeredgecolor = colors[y[i]], #defining colors
markerfacecolor = 'None', #no color inside of the marker
markersize = 10, #size of marker
markeredgewidth = 2) #edges size
# -
# ## Let's find the Frauds
mappings = som.win_map(data=X)
frauds = np.concatenate((mappings[(6,3)], mappings[(4,7)], mappings[(4,8)]), axis=0)
#these frauds are potentially cheater so let's transform values as it was before
frauds = sc.inverse_transform(frauds)
fraud_customers = pd.DataFrame(frauds)
fraud_customers.columns=['CustomerID', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9',
'A10', 'A11', 'A12', 'A13', 'A14']
# ## below listed are potential frauds or higher chances to be,and these customers are not identified by bank. this data will help bank to take a closer look on these customer.
fraud_customers = fraud_customers.astype(int)
fraud_customers.shape
# ### Let's make one variable for keeping record of these potentially frauds and add it into main dataset so that we can apply supervised deep learning.
# creating the matrix of features
cutomers = dataset.iloc[:, :].values
# creating the dependent variable, looping through whole dataset
is_fraud = np.zeros(len(dataset))
for i in range(len(dataset)):
if dataset.iloc[i,0] in frauds:
is_fraud[i] = 1
customers = pd.DataFrame(cutomers)
is_fraud = pd.DataFrame(is_fraud)
bank_customers = pd.concat([customers, is_fraud], axis=1)
bank_customers
bank_customers.columns=['CustomerID','A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9',
'A10', 'A11', 'A12', 'A13', 'A14','Class1','Class']
bank_customers = bank_customers.astype(int)
bank_customers.head()
bank_customers.to_csv('bank_customers.csv', index=False)
| 4,751 |
/Simulator/Power Spectral Models.ipynb | 74773334711cc699fe1785ce04ff7291d4c250a6 | [
"MIT"
] | permissive | StingraySoftware/notebooks | https://github.com/StingraySoftware/notebooks | 18 | 34 | MIT | 2023-08-31T20:53:44 | 2023-08-22T13:03:01 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 84,524 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Natural Language Processing Assignment 2
# ## Minimum Edit Distance
#
# Name: Zoe Tagboto
import numpy as np
def min_edit_distance(source_word, target_word):
# First I find the length of my words and create a matrix
source_word_len = len(source_word)
target_word_len = len(target_word)
matrix = np.zeros ((source_word_len+1, target_word_len+1),np.int64)
#These are the costs associated with deletion insertion
#and substitution
del_cost = 1
ins_cost = 1
sub_cost = 2
for x in range(source_word_len+1):
matrix [x, 0] = x
for y in range(target_word_len+1):
matrix [0, y] = y
#This is to compute the minimum edit distance
for x in range(1, source_word_len+1):
for y in range(1, target_word_len+1):
if source_word[x-1] == target_word[y-1]:
matrix [x,y] = matrix[x-1, y-1]
else:
matrix [x,y] = min(
matrix[x-1,y] + del_cost,
matrix[x,y-1] + ins_cost,
matrix[x-1,y-1] + sub_cost)
print ("The minimum edit distance between "+source_word+" and the "+target_word+" is " +
str(matrix[source_word_len, target_word_len]))
min_edit_distance("intention", "execution")
| 1,616 |
/notebooks/heart_disease_analysis.ipynb | cb914022b5fbe34b2a5e35b68e9666ecc2e6036c | [
"MIT"
] | permissive | mahnoorbaig/heart-disease-data-analysis | https://github.com/mahnoorbaig/heart-disease-data-analysis | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,104,364 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="smQWTwI7k4Bf"
# # Paso 1
# **Configuracion de Object Detection API**: en este paso, se descarga el modelo para la detecciรณn de objetos, tambiรฉn se realizan algunas copias y eliminaciones de referencia con el objetivo de dejar todo el ambiente configurado.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="XnBVJiIzYune"
# !git clone https://github.com/tensorflow/models.git
# !apt-get -qq install libprotobuf-java protobuf-compiler
# !protoc ./models/research/object_detection/protos/string_int_label_map.proto --python_out=.
# !cp -R models/research/object_detection/ object_detection/
# !rm -rf models
# + [markdown] colab_type="text" id="qwWt0kSihqCv"
# # Paso 2
# ** Importaciones ** necesarias para ejecutar la demostraciรณn de Object Detection API
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="YspILW_rZu0v"
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# + [markdown] colab_type="text" id="kGx_08UcmtOF"
# # Paso 3
# ** Configuraciรณn ** del modelo a utilizar, ruta al modelo pre-entrenado y elementos de configuraciรณn adicionales para la implementaciรณn de Object Detection API.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8n_alUkLZ1gl"
MODEL_NAME = 'faster_rcnn_inception_resnet_v2_atrous_coco_2018_01_28'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# + [markdown] colab_type="text" id="PbXKPFiWh1jG"
# # Paso 4
# Secciรณn con las imรกgenes de demostraciรณn
# +
# !mkdir images
# esta url-imagen deberรญa ser reemplazada por ustedes. este es solo el ejemplo almacenado en una capeta personal
# !wget https://storage.googleapis.com/demostration_images/image.jpg -O images/image_1.jpg
PATH_TO_TEST_IMAGES_DIR = 'images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image_{}.jpg'.format(i)) for i in range(1, 2) ]
IMAGE_SIZE = (15, 11)
# + [markdown] colab_type="text" id="_Vvi4-2fm2qe"
# # Paso 5
# Pieza de implementaciรณn que representa la detecciรณn concreta, llamando a la sesiรณn TF
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="q9FZsaZkaPUz"
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=3)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
| 5,158 |
/Module 4.ipynb | 97612888e9c9c8907a0270757f389512576605f0 | [] | no_license | Vigneshbaalaji/PreSec | https://github.com/Vigneshbaalaji/PreSec | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 12,474 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Probe attacks: Prediction
import pandas as p
# load the dataset
df = p.read_csv("data.csv")
# feature names
features = ["duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "Wrong_fragment", "Urgent", "hot", "num_failed_login", "logged_in", "num_compromised", "root_shell", "su_attempted", "num_root", "num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds", "is_host_login", "is_guest_login", "count", "srv_count", "serror_rate", "srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate", "diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count", "dst_host_same_srv_rate", "dst_host_diff_ srv_rate", "dst_host_same_src_port_rate", "dst_host_srv_diff_host _rate", "dst_host_serror_rate", "dst_host_srv_serror_rate", "dst_host_rerror_rate", "dst_host_srv_rerror_rate", "class"]
df = p.read_csv("data.csv", names = features)
df['Probe'] = df['class'].map({'normal.':0, 'snmpgetattack.':0, 'named.':0, 'xlock.':0, 'smurf.':0,
'ipsweep.':1, 'multihop.':0, 'xsnoop.':0, 'sendmail.':0, 'guess_passwd.':0,
'saint.':1, 'buffer_overflow.':0, 'portsweep.':1, 'pod.':0, 'apache2.':0,
'phf.':0, 'udpstorm.':0, 'warezmaster.':0, 'perl.':0, 'satan.':1, 'xterm.':0,
'mscan.':1, 'processtable.':0, 'ps.':0, 'nmap.':1, 'rootkit.':0, 'neptune.':0,
'loadmodule.':0, 'imap.':0, 'back.':0, 'httptunnel.':0, 'worm.':0,
'mailbomb.':0, 'ftp_write.':0, 'teardrop.':0, 'land.':0, 'sqlattack.':0,
'snmpguess.':0})
from sklearn.preprocessing import LabelEncoder
var_mod = ['duration', 'protocol_type', 'service', 'flag', 'src_bytes',
'dst_bytes', 'land', 'Wrong_fragment', 'Urgent', 'hot',
'num_failed_login', 'logged_in', 'num_compromised', 'root_shell',
'su_attempted', 'num_root', 'num_file_creations', 'num_shells',
'num_access_files', 'num_outbound_cmds', 'is_host_login',
'is_guest_login', 'count', 'srv_count', 'serror_rate',
'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate',
'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',
'dst_host_srv_count', 'dst_host_same_srv_rate',
'dst_host_diff_ srv_rate', 'dst_host_same_src_port_rate',
'dst_host_srv_diff_host _rate', 'dst_host_serror_rate',
'dst_host_srv_serror_rate', 'dst_host_rerror_rate'
]
le = LabelEncoder()
for i in var_mod:
df[i] = le.fit_transform(df[i]).astype(str)
del df["dst_host_srv_rerror_rate"]
del df["class"]
#According to the cross-validated MCC scores, the random forest is the best-performing model, so now let's evaluate its performance on the test set.
from sklearn.metrics import confusion_matrix, classification_report, matthews_corrcoef, cohen_kappa_score, accuracy_score, average_precision_score, roc_auc_score
X = df.drop(labels='Probe', axis=1)
#Response variable
y = df.loc[:,'Probe']
del df
#We'll use a test size of 30%. We also stratify the split on the response variable, which is very important to do because there are so few fraudulent transactions.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
#for our convienient we delete X,y variable for differentiate confusion
del X, y
# Prevent view warnings
X_train.is_copy = False
X_test.is_copy = False
#According to the cross-validated MCC scores, the random forest is the best-performing model, so now let's evaluate its performance on the test set.
from sklearn.metrics import confusion_matrix, classification_report, matthews_corrcoef, cohen_kappa_score, accuracy_score, average_precision_score, roc_auc_score
# +
from sklearn.linear_model import LogisticRegression
logR= LogisticRegression()
logR.fit(X_train,y_train)
predictR = logR.predict(X_test)
print(classification_report(y_test,predictR))
x = (accuracy_score(y_test,predictR)*100)
print('Accuracy result is', x)
print("")
print(confusion_matrix(y_test,predictR))
# +
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train, y_train)
predictDT = dtree.predict(X_test)
print(classification_report(y_test,predictDT))
x = (accuracy_score(y_test,predictDT)*100)
print('Accuracy result is', x)
print("")
print(confusion_matrix(y_test,predictDT))
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
predictrf = rf.predict(X_test)
print(classification_report(y_test,predictrf
))
x = (accuracy_score(y_test,predictrf)*100)
print('Accuracy result is', x)
print("")
print(confusion_matrix(y_test,predictrf))
# +
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier()
neigh.fit(X_train, y_train)
predictknn = neigh.predict(X_test)
print(classification_report(y_test,predictknn
))
x = (accuracy_score(y_test,predictknn)*100)
print('Accuracy result is', x)
print("")
print(confusion_matrix(y_test,predictknn))
# -
| 5,329 |
/2019 Twins vs. World Series Twins.ipynb | fdae6b340b63bfae172a38608f1a7d6a06ceccd6 | [] | no_license | parkererickson/baseballDataScience | https://github.com/parkererickson/baseballDataScience | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 230,645 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dรฉveloppement d'un algorithme d'รฉvaluation des films en Spark
# # Objectif du Projet
# Il s'agit de dรฉvelopper en Spark une mรฉthode de descente de gradient, dans le but de rรฉsoudre un problรจme de filtrage collaboratif, et de la comparer avec une mรฉthode de la librairie MLIB. Ce Notebook a pour but le dรฉveloppement et la validation de l'approche, avant intรฉgration et exploitation dans le cadre de l'infrastructure dรฉveloppรฉe dans le projet. Pour information, de nombreuses versions de ce problรจme existent sur le web.
# # Position du problรจme
# Nous avons ร notre disposition un RDD "ratings" du type (userID, movieID, rating). Les donnรฉes sont fournies par le fichier `ratings.dat`, stockรฉes au format ci-joint :
# ```
# UserID::MovieID::Rating::Timestamp
# ```
#
# Ce RDD peut รชtre stockรฉ dans une matrice $R$ oรน l'on trouve "rating" ร l'intersection de la ligne "userID" et de la colonne "movieID".
# Si la matrice $R$ est de taille $m \times n$, nous cherchons $P \in R^{m,k}$ et $Q \in R^{n,k}$ telles que $R \approx \hat{R} = PQ^T$.
# Pour cela on considรจre le problรจme
# $$ \min_{P,Q} \sum_{i,j : r_{ij} \text{existe}} \ell_{i,j}(R,P,Q), $$
# oรน
# $$ \ell_{i,j}(R,P,Q)= \left(r_{ij} - q_{j}^{\top}p_{i}\right)^2 + \lambda(|| p_{i} ||^{2}_2 + || q_{j} ||^2_2 ) $$ et $(p_i)_{1\leq i\leq m}$ et $(q_j)_{1\leq j\leq n}$ sont les lignes des matrices $P$ et $Q$ respectivement. Le paramรจtre $\lambda\geq 0$ est un paramรจtre de rรฉgularisation.
#
# Le problรจme que nous rรฉsolvons ici est un problรจme dit de "filtrage collaboratif", qui permet d'apporter une solution possible du problรจme Netflix. Les donnรฉes sont issues de la base de donnรฉes "The MoviLens Datasets" :
#
# F. Maxwell Harper and Joseph A. Konstan. 2015. The MovieLens Datasets: History and Context. ACM Transactions on Interactive Intelligent Systems (TiiS) 5, 4: 19:1โ19:19
#
# +
# Librairies
import numpy as np
from scipy import sparse
import findspark
findspark.init()
# Environnement Spark
from pyspark import SparkContext, SparkConf
conf = SparkConf()
conf.setMaster("local[*]")
conf.setAppName("Matrix Factorization")
sc = SparkContext(conf = conf)
# -
# #### Crรฉation du RDD et premiรจres statistiques sur le jeu de donnรฉes.
# +
# Rรฉpertoire contenant le jeu de donnรฉes
movieLensHomeDir="data/"
# ratings est un RDD du type (userID, movieID, rating)
def parseRating(line):
fields = line.split('::')
return int(fields[0]), int(fields[1]), float(fields[2])
ratingsRDD = sc.textFile(movieLensHomeDir + "ratings.dat").map(parseRating).setName("ratings").cache()
# Calcul du nombre de ratings
numRatings = ratingsRDD.count()
# Calcul du nombre d'utilisateurs distincts
numUsers = ratingsRDD.map(lambda r: r[0]).distinct().count()
# Calcul du nombre de films distincts
numMovies = ratingsRDD.map(lambda r: r[1]).distinct().count()
print("We have %d ratings from %d users on %d movies.\n" % (numRatings, numUsers, numMovies))
# Dimensions de la matrice R
M = ratingsRDD.map(lambda r: r[0]).max()
N = ratingsRDD.map(lambda r: r[1]).max()
matrixSparsity = float(numRatings)/float(M*N)
print("We have %d users, %d movies and the rating matrix has %f percent of non-zero value.\n" % (M, N, 100*matrixSparsity))
# -
# Nous allons utiliser la routine ALS.train() de la librairie [MLLib](http://spark.apache.org/docs/latest/ml-guide.html) et en รฉvaluer la performance par un calcul de " Mean Squared Error" du rating de prรฉdiction.
# +
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
# Construction du modรจle de recommendations depuis l'approche "Alternating Least Squares"
rank = 10
numIterations = 10
# Paramรจtres de la mรฉthode Alternating Least Squares (ALS)
# ratings โ RDD de Rating ou tuple (userID, productID, rating).
# rank โ Rang de la matrice modรจle.
# iterations โ Nombre d'itรฉrations. (default: 10)
# lambda_ โ Paramรจtre de rรฉgularisation. (default: 0.01)
# Build the recommendation model using ALS
model = ALS.train(ratingsRDD, rank, iterations=numIterations, lambda_=0.02)
# Evaluation du modรจle sur le jeu de donnรฉes complet
# Evaluate the model on rating data
testdata = ratingsRDD.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratingsRDD.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
print("Mean Squared Error = " + str(MSE))
# -
# # Algorithmes de descente de gradient
#
# Le but de cette section est
# 1. de calculer le gradient de la fonction,
# 2. d'implรฉmenter une mรฉthode de gradient,
# 3. de mesurer la prรฉcision de cette mรฉthode
#
# __Etapes :__
#
# > Sรฉparer le jeu de donnรฉes en un jeu d'apprentissage (70%) et un jeu de test, en utilisant la fonction randomsplit ( http://spark.apache.org/docs/2.0.0/api/python/pyspark.html )
#
# > Complรฉter la routine ci-dessous qui retourne le "rating" prรฉdit. Crรฉer un RDD contenant `(i,j,true rating,predicted rating)`.
#
# > Complรฉter la routine qui calcule le Mean Square Error (MSE) sur le jeu de donnรฉes.
#
# > Tester ensuite la routine de MSE en vous donnant les matrices $P$ et $Q$ alรฉatoires (utiliser np.random.rand(M,K)) et calculer quelques "ratings" prรฉdits.
#
#
# +
# Sรฉparation du jeu de donnรฉes en un jeu d'apprentissage et un jeu de test
# Taille du jeu d'apprentissage (en %)
learningWeight = 0.7
# Crรฉation des RDD "apprentissage" et "test" depuis la fonction randomsplit
trainRDD, testRDD = ratingsRDD.randomSplit([learningWeight, 1 - learningWeight], seed = None)
# Calcul du rating prรฉdit.
def predictedRating(x, P, Q):
"""
This function computes predicted rating
Args:
x: tuple (UserID, MovieID, Rating)
P: user's features matrix (M by K)
Q: item's features matrix (N by K)
Returns:
predicted rating: l
"""
return (x[0], x[1], x[2], np.dot(P[x[0] - 1,:], Q[x[1] - 1,:].T))
# Calcul de l'erreur MSE
def computeMSE(rdd, P, Q):
"""
This function computes Mean Square Error (MSE)
Args:
rdd: RDD(UserID, MovieID, Rating)
P: user's features matrix (M by K)
Q: item's features matrix (N by K)
Returns:
mse: mean square error
"""
r = rdd.collect()
s = sum((r[i][2] - predictedRating(r[i], P, Q)[3])**2 for i in range(len(r)))
return s/len(r)
# +
# Tailles des jeux de donnรฉes d'apprentissage et de tests.
print("Size of the training dataset:", trainRDD.count())
print("Size of the testing dataset:", testRDD.count())
# Crรฉation de matrices alรฉatoires de dimension (M,K) et (N,K)
K = 20
P = np.random.rand(M,K)
Q = np.random.rand(N,K)
# Calcul et affichage de l'erreur MSE pour ces matrices alรฉatoires
MSE = computeMSE(ratingsRDD, P, Q)
print("\nMSE (training set) = ", MSE)
# Affichage de 10 ratings prรฉdits depuis ces matrices
print("\n(userID, movieID, rating, predicted)")
r = ratingsRDD.collect()
for x in np.random.randint(len(r), size=10) :
print(predictedRating(r[x], P, Q))
# -
# Etapes :
#
# > Donner la formule des dรฉrivรฉes des fonctions $\ell_{i,j}$ selon $p_t$ et $q_s$ avec $1\leq t\leq m$ et $1\leq s\leq n$.
#
# > Implanter de l'algorithme de gradient sur l'ensemble d'apprentissage. Prendre un pas รฉgal ร $\gamma=0.001$ et arrรชter sur un nombre maximum d'itรฉrations.
#
# > Commenter les tracรฉs de convergence et des indicateurs de qualitรฉ de la prรฉvision en fonction de la dimension latente (rang de $P$ et $Q$).
# Algorithem de descente de gradient pour la factorisation de matrices
def GD(trainRDD, K=10, MAXITER=50, GAMMA=0.001, LAMBDA=0.05):
# Construction de la matrice R (creuse)
row=[]
col=[]
data=[]
for part in trainRDD.collect():
row.append(part[0]-1)
col.append(part[1]-1)
data.append(part[2])
R=sparse.csr_matrix((data, (row, col)))
# Initialisation alรฉatoire des matrices P et Q
M,N = R.shape
P = np.random.rand(M,K)
Q = np.random.rand(N,K)
# Calcul de l'erreur MSE initiale
mse=[]
mse_tmp = computeMSE(trainRDD, P, Q)
mse.append([0, mse_tmp])
print("epoch: ", str(0), " - MSE: ", str(mse_tmp))
# Boucle
nonzero = R.nonzero()
nbNonZero = R.nonzero()[0].size
I,J = nonzero[0], nonzero[1]
for epoch in range(MAXITER):
for i,j in zip(I,J):
# Mise ร jour de P[i,:] et Q[j,:] par descente de gradient ร pas fixe
e = R[i, j] - np.dot(P[i, :], Q[j, :])
P[i,:] += GAMMA*(e*Q[j,:] - LAMBDA*P[i,:])
Q[j,:] += GAMMA*(e*P[i,:] - LAMBDA*Q[j,:])
# Calcul de l'erreur MSE courante, et sauvegarde dans le tableau mse
mse.append(computeMSE(trainRDD,P,Q))
print("epoch: ", str(epoch + 1), " - MSE: ", str(computeMSE(trainRDD, P, Q)))
return P, Q, mse
# Calcul de P, Q et de la mse
P,Q,mse = GD(trainRDD, K=10, MAXITER=10, GAMMA=0.001, LAMBDA=0.05)
# Calcul de P, Q et de la mse
P,Q,mse = GD(trainRDD, K=10, MAXITER=10, GAMMA=0.001, LAMBDA=0.05)
# +
import matplotlib.pyplot as plt
# Affichage de l'erreur MSE
print('mse = ', computeMSE(testRDD,P,Q))
# -
# Etapes :
#
# > Calculer les ratings prรฉdits par la solution de la mรฉthode du gradient dans un RDD
#
# > Comparer sur le jeu de test les valeurs prรฉdites aux ratings sur 10 รฉchantillons alรฉatoires.
# Calcul et affichage des ratings prรฉdits
for i in range(10):
print(predictedRating(testRDD.collect()[i],P,Q))
#
r_vol
,order_price
,mot1_oco1.ordertype_name as ordertype_oco1
,order_vol_oco1
,order_price_oco1
,mot1_oco2.ordertype_name as ordertype_oco2
,order_vol_oco2
,order_price_oco2
,call_order_time
,mot2.ordertype_name as call_ordertype
,call_order_vol
,call_order_price
,execution_order_time
,mot3.ordertype_name as execution_ordertype
,execution_order_type as e_ordertype
,mos.orderstatus_name as execution_order_status
,execution_order_vol
,execution_order_price
,execution_order_time2
,mot4.ordertype_name as execution_ordertype2
,execution_order_type2 as e_ordertype2
,mos2.orderstatus_name as execution_order_status2
,execution_order_vol2
,execution_order_price2
,mpt.positiontype_name
,cash
,pos_vol
,pos_price
,total_value
,profit_value
,profit_rate
,position_count
,case
when total_value > total_deposit then total_deposit
when total_value <= total_deposit then total_value
end as real_deposit
,total_unrealized_value
,leverage
,max_drawdown
,fee
,spread_fee
,regist_time
,entry_strategy
,exit_strategy
from backtest_history as bh
inner join m_ordertype as mot1
on bh.order_type = mot1.ordertype_id
inner join m_ordertype as mot2
on bh.call_order_type = mot2.ordertype_id
inner join m_ordertype as mot3
on bh.execution_order_type = mot3.ordertype_id
inner join m_ordertype as mot4
on bh.execution_order_type2 = mot4.ordertype_id
inner join m_positiontype as mpt
on bh.position = mpt.positiontype_id
inner join m_orderstatus as mos
on bh.execution_order_status = mos.orderstatus_id
inner join m_orderstatus as mos2
on bh.execution_order_status2 = mos2.orderstatus_id
inner join m_ordertype as mot1_oco1
on bh.order_type_oco1 = mot1_oco1.ordertype_id
inner join m_ordertype as mot1_oco2
on bh.order_type_oco2 = mot1_oco2.ordertype_id
where symbol = '{}'
and leg = '1d'
and date(time) between '{}' and '{}'
order by time
"""
# +
def draw_backtest_history(df, ylim1, ylim2):
x_size = df.shape[0] / 20
fig = plt.figure(figsize=(6 * x_size, 12))
ax = plt.subplot(4, 1, 1)
candlestick2_ohlc(ax, df["open"], df["high"], df["low"], df["close"], width=0.9, colorup="b", colordown="r")
ax.set_xlim([0, df.shape[0]])
ax.set_xticklabels([(df["time"][x].strftime("%Y%m%d") if x <= df.shape[0] else x) for x in ax.get_xticks()], rotation=30)
ax.set_ylim(ylim1, ylim2)
# extry_indicators
ax.plot(df['entry_indicator1'], color="blue")
ax.plot(df['entry_indicator2'], color="mediumblue")
ax.plot(df['entry_indicator3'], color="mediumslateblue")
ax.plot(df['entry_indicator4'], color="purple")
ax.plot(df['entry_indicator5'], color="fuchsia")
ax.plot(df['entry_indicator6'], color="orchid")
ax.plot(df['entry_indicator7'], color="navy")
# exit_indicators
ax.plot(df['exit_indicator1'], color="orange")
ax.plot(df['exit_indicator2'], color="tan")
ax.plot(df['exit_indicator3'], color="moccasin")
ax.plot(df['exit_indicator4'], color="brown")
ax.plot(df['exit_indicator5'], color="maroon")
ax.plot(df['exit_indicator6'], color="sandybrown")
ax.plot(df['exit_indicator7'], color="tomato")
# ็ดๅฎ
entry_order = [1,2,3,4,5,6,7,8]
exit_order = [9,10,11,12,13,14,15,16]
for x in range(len(df.index)):
if (df['execution_order_status'][x] == '็ดๅฎ' and df['execution_order_price'][x] != 0):
if df['e_ordertype'][x] in entry_order:
ax.plot(df.index[x], df['execution_order_price'][x], color="green", marker="D")
else:
ax.plot(df.index[x], df['execution_order_price'][x], color="red", marker="D")
for x in range(len(df.index)):
if (df['execution_order_status2'][x] == '็ดๅฎ' and df['execution_order_price2'][x] != 0):
if df['e_ordertype2'][x] in entry_order:
ax.plot(df.index[x], df['execution_order_price2'][x], color="green", marker="D")
else:
ax.plot(df.index[x], df['execution_order_price2'][x], color="red", marker="D")
# ๆ็
ax2 = plt.subplot(4, 1, 2)
ax2.bar(df.index, df['profit_value'], color="orange")
ax2.set_xlim([0, df.shape[0]])
# ax2.set_xticklabels([(df["time"][x].strftime("%Y%m%d") if x <= df.shape[0] else x) for x in ax.get_xticks()], rotation=30)
# ใใธใทใงใณใซใฆใณใ
ax3 = plt.subplot(4, 1, 3)
ax3.plot(df.index, df['position_count'], color="grey")
ax3.set_xlim([0, df.shape[0]])
# ax3.set_xticklabels([(df["time"][x].strftime("%Y%m%d") if x <= df.shape[0] else x) for x in ax.get_xticks()], rotation=30)
# ็ท่ณ็ฃ
ax4 = plt.subplot(4, 1, 4)
ax4.bar(df.index, df['total_value'], color="purple")
ax4.set_xlim([0, df.shape[0]])
# ax4.set_xticklabels([(df["time"][x].strftime("%Y%m%d") if x <= df.shape[0] else x) for x in ax.get_xticks()], rotation=30)
# deposit
ax4.bar(df.index, df['real_deposit'], color="yellow")
ax4.bar(df.index, df['cash'], color="deepskyblue")
# ax4.set_xlim([0, df.shape[0]])
ax4.set_xticklabels([(df["time"][x].strftime("%Y%m%d") if x <= df.shape[0] else x) for x in ax.get_xticks()], rotation=30)
pd.set_option('display.max_columns', 100)
# -
| 14,876 |
/.ipynb_checkpoints/DLND Your first neural network-checkpoint.ipynb | 4b51e091c359ebf802a4caf6d00c49065dfc033e | [] | no_license | jeffthardy/ml_proj1 | https://github.com/jeffthardy/ml_proj1 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 328,063 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Your first neural network
#
# In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
#
#
# + deletable=true editable=true
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + [markdown] deletable=true editable=true
# ## Load and prepare the data
#
# A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
# + deletable=true editable=true
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# + deletable=true editable=true
rides.head()
# + [markdown] deletable=true editable=true
# ## Checking out the data
#
# This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.
#
# Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
# + deletable=true editable=true
rides[:24*10].plot(x='dteday', y='cnt')
# + [markdown] deletable=true editable=true
# ### Dummy variables
# Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
# + deletable=true editable=true
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# + [markdown] deletable=true editable=true
# ### Scaling target variables
# To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
#
# The scaling factors are saved so we can go backwards when we use the network for predictions.
# + deletable=true editable=true
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# + [markdown] deletable=true editable=true
# ### Splitting the data into training, testing, and validation sets
#
# We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
# + deletable=true editable=true
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# + [markdown] deletable=true editable=true
# We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
# + deletable=true editable=true
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# + [markdown] deletable=true editable=true
# ## Time to build the network
#
# Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
#
# <img src="assets/neural_network.png" width=300px>
#
# The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
#
# We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
#
# > **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
#
# Below, you have these tasks:
# 1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.
# 2. Implement the forward pass in the `train` method.
# 3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.
# 4. Implement the forward pass in the `run` method.
#
# + deletable=true editable=true
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#Sigmoid Activation Function
self.activation_function = lambda x : 1/(1+np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
### Forward pass ###
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
### Backward pass ###
# Output error
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output,error)
# Backpropagated error terms
output_error_term = error # error * f'(x) , f'(x) = 1
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs) # error * f'(x), f'(x) = f(x)*(1-f(x))
# Scaling weight delta by learning rate / record count
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# Update the weights
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Prediction forward pass ####
# Hidden layer
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
# + deletable=true editable=true
def MSE(y, Y):
return np.mean((y-Y)**2)
# + [markdown] deletable=true editable=true
# ## Unit tests
#
# Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly before you starting trying to train it. These tests must all be successful to pass the project.
# + deletable=true editable=true
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# + [markdown] deletable=true editable=true
# ## Training the network
#
# Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
#
# You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
#
# ### Choose the number of iterations
# This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.
#
# ### Choose the learning rate
# This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
#
# ### Choose the number of hidden nodes
# The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
# + deletable=true editable=true
import sys
### Set the hyperparameters here ###
iterations = 20000
learning_rate = 0.11
hidden_nodes = 5
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# + [markdown] deletable=true editable=true
# **Hyperparameter Testing Notes**
#
# Initially I tried picking a few values at random and came to use 3 hidden nodes with a 0.1 learning rate and 20,000 iterations, but I wanted to get a more systematic view of that region, so I decided to loop through some options:
#
# ~~~~
# Initially trying a loop through various hidden node counts range(1,20,2) using 2000 iter and .1 learn rate:
# Progress: 100.0% ... Training loss: 0.790 ... Validation loss: 1.292
# H#=1
# Progress: 100.0% ... Training loss: 0.262 ... Validation loss: 0.429
# H#=3
# Progress: 100.0% ... Training loss: 0.265 ... Validation loss: 0.435
# H#=5
# Progress: 100.0% ... Training loss: 0.265 ... Validation loss: 0.436
# H#=7
# Progress: 100.0% ... Training loss: 0.274 ... Validation loss: 0.446
# H#=9
# Progress: 100.0% ... Training loss: 0.290 ... Validation loss: 0.458
# H#=11
# Progress: 100.0% ... Training loss: 0.283 ... Validation loss: 0.445
# H#=13
# Progress: 100.0% ... Training loss: 0.278 ... Validation loss: 0.451
# H#=15
# Progress: 100.0% ... Training loss: 0.277 ... Validation loss: 0.447
# H#=17
# Progress: 100.0% ... Training loss: 0.276 ... Validation loss: 0.448
# H#=19
#
# Since the last value is decreasing from the previous I'll run another 10 hidden node cases to see if I find a different minimum.
#
# Progress: 100.0% ... Training loss: 0.291 ... Validation loss: 0.447
# H#=21
# Progress: 100.0% ... Training loss: 0.285 ... Validation loss: 0.449
# H#=23
# Progress: 100.0% ... Training loss: 0.290 ... Validation loss: 0.464
# H#=25
# Progress: 100.0% ... Training loss: 0.292 ... Validation loss: 0.451
# H#=27
# Progress: 100.0% ... Training loss: 0.281 ... Validation loss: 0.440
# H#=29
# Progress: 100.0% ... Training loss: 0.297 ... Validation loss: 0.461
# H#=31
#
# It seems like the best option is around 2-5 nodes, which makes sense to me since this data isn't a huge number of inputs. Next I will loop between 2-5 nodes and use a higher number of iterations to see finer detail.
#
# iterations = 20,000 , learn rate = .1
#
# Progress: 100.0% ... Training loss: 0.512 ... Validation loss: 0.614
# H#=1
# Progress: 100.0% ... Training loss: 0.195 ... Validation loss: 0.356
# H#=2
# Progress: 100.0% ... Training loss: 0.185 ... Validation loss: 0.330
# H#=3
# Progress: 100.0% ... Training loss: 0.064 ... Validation loss: 0.144
# H#=4
# Progress: 100.0% ... Training loss: 0.067 ... Validation loss: 0.141
# H#=5
# Progress: 100.0% ... Training loss: 0.069 ... Validation loss: 0.152
# H#=6
# Progress: 100.0% ... Training loss: 0.080 ... Validation loss: 0.168
# H#=7
# ~~~~
#
#
# From this it actually looks like we should focus on 4 or 5. 4 had a lower training loss, but higher validation loss. My guess is that 5 may be the better choice, but they are pretty close. Next I'll try these two numbers but with a few different learning rates.
#
# ~~~~
# Learning Rate=0.05
# H#=4
# Progress: 100.0% ... Training loss: 0.163 ... Validation loss: 0.295
# H#=5
# Progress: 100.0% ... Training loss: 0.193 ... Validation loss: 0.357
#
# Learning Rate=0.07
# H#=4
# Progress: 100.0% ... Training loss: 0.093 ... Validation loss: 0.187
# H#=5
# Progress: 100.0% ... Training loss: 0.077 ... Validation loss: 0.165
#
# Learning Rate=0.09
# H#=4
# Progress: 100.0% ... Training loss: 0.086 ... Validation loss: 0.170
# H#=5
# Progress: 100.0% ... Training loss: 0.076 ... Validation loss: 0.162
#
# Learning Rate=0.1
# H#=4
# Progress: 100.0% ... Training loss: 0.090 ... Validation loss: 0.165
# H#=5
# Progress: 100.0% ... Training loss: 0.074 ... Validation loss: 0.163
#
# Learning Rate=0.11
# H#=4
# Progress: 100.0% ... Training loss: 0.068 ... Validation loss: 0.165
# H#=5
# Progress: 100.0% ... Training loss: 0.069 ... Validation loss: 0.156
#
# Learning Rate=0.13
# H#=4
# Progress: 100.0% ... Training loss: 0.081 ... Validation loss: 0.167
# H#=5
# Progress: 100.0% ... Training loss: 0.076 ... Validation loss: 0.176
#
# Learning Rate=0.15
# H#=4
# Progress: 100.0% ... Training loss: 0.078 ... Validation loss: 0.198
# H#=5
# Progress: 100.0% ... Training loss: 0.067 ... Validation loss: 0.182
# ~~~~
#
# After seeing these results it seems that I am getting some statistical randomness as far as how good these settings work. When testing various hidden node values I saw the best result at 5 with only .141 validation loss. When I tried various learning rates over the 4 and 5 node setting I didn't even see a result as good as the previous 4 result, but did see the best results at .11 learning rate. Next I will stick to 5 nodes, .11 learning rate, and 20,000 iterations but run the test 5 times to see what kind of variance I get.
#
# ~~~~
# test 0
# Progress: 100.0% ... Training loss: 0.078 ... Validation loss: 0.165
# test 1
# Progress: 100.0% ... Training loss: 0.075 ... Validation loss: 0.171
# test 2
# Progress: 100.0% ... Training loss: 0.060 ... Validation loss: 0.146
# test 3
# Progress: 100.0% ... Training loss: 0.063 ... Validation loss: 0.140
# test 4
# Progress: 100.0% ... Training loss: 0.068 ... Validation loss: 0.149
# test 5
# Progress: 100.0% ... Training loss: 0.075 ... Validation loss: 0.164
#
# ~~~~
#
# I've decided these parameters are ok, but there is a bunch of variation from run to run. Maybe some longer runs would improve things, but it generally gives pretty good results.
#
# + deletable=true editable=true
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
# + [markdown] deletable=true editable=true
# ## Check out your predictions
#
# Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
# + deletable=true editable=true
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# + [markdown] deletable=true editable=true
# ## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).
#
# Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
#
# > **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
#
# #### answer:
#
# This model doesn't predict results extremely accurately, but it it usually gets the general gist of the data. It fails in some of the test data because it shows an abnormal characteristic from the rest of the training data. It looks like this has to do with Christmas and a typical period of vacation. You can't expect a model to predict abnormal events that it hasn't seen in training, which I think is the case here. For the more normal weeks before Christmas you see fairly accurate predictions, but once the Christmas holiday time starts you see differently shaped data and as a result poorer predictions.
| 24,364 |
/notebooks/4_molecular_visualization/1_Visualization_Tutorial.ipynb | 5dfc38667826b21ad8b5fa4607411d0ec71590b4 | [
"BSD-3-Clause"
] | permissive | celinedurniak/python-course-ikon | https://github.com/celinedurniak/python-course-ikon | 0 | 0 | null | 2020-02-07T08:37:11 | 2020-02-05T12:22:36 | null | Jupyter Notebook | false | false | .py | 12,952 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Molecular Visualization Tutorial
# ## Introduction
# Jupyter Notebook can be used for visualization of both molecular and periodic structures.
# Multiple viewers have been ported for use in the Notebook - here we will show how to construct a simple molecule and visualize it using several viewers.
# This tutorial will also present operations on larger structures, including big PDB files.
#
# To create simple molecular sytems we will use the ASE package.
#
# ASE is an Atomic Simulation Environment written in the Python programming language with the aim of setting up, steering, and analyzing atomistic simulations.
#
# ASE contains objects and structures for atomic structures as well as for calculators.
#
#
# ### This tutorial
#
# In this tutorial we will learn about the basic concepts of visualizing structures using the Atomic Simulation Environment:
#
# 1. The Atom and Atoms objects and how to construct atomic structures with and without periodic boundary condistions
# 2. Visualization of molecules and periodics with x3d and NGLView
# 2. Visualization of larger structures with NGLView
#
#
# The tutorial requires that the following python modules are installed:
# 1. ase
# 2. matplotlib
# 3. nglview
#
#
# These packages should be pre-installed on the virtual machine
# ### Constructing a water molecule from scratch
#
#
# For this task we will use ASE. To create molecules we need to first define atoms which are its constituents.
from ase import Atom
from ase import Atoms
# A water molecule is fundametally a terahedral structure with the oxygen atoms in the center and the two hydrogen atoms and two lone pairs at the corners. The angle between two bonds in a fully symmetrical tetrahedron is 109 degrees. The lengths of the O-H bonds can be estimated from the atoms covalent radii. We will place the oxygen in origon and the two hydrogen atoms in the yz plane symmetrically around the z-axis.
# +
import numpy as np
water = Atoms('OH2') # placing oxygen first
# calculate bond length
from ase.data import covalent_radii
radius_h = covalent_radii[1] # indexed by atomic number
radius_o = covalent_radii[8]
bondlength = radius_h + radius_o
# calculate x and y projections of unit vector pointing along the o-h bond
angle = 109.*np.pi/180. # converting to radians
xu = np.cos(angle/2)
yu = np.sin(angle/2)
# set positions
# method 1 (indexig on atoms)
water[1].position = bondlength*np.array([0, xu, yu])
water[2].position = bondlength*np.array([0, xu, -yu])
water.positions
# -
# A quicker one-liner for numpy users
water.positions[1:, 1:] = bondlength*np.array( [[xu, yu], [xu, -yu]])
water.positions
# ## Visualization
#
# ASE supports many molecular viewers. For embedded views in Jupyter notebooks it supports two, the x3d and nglviewer.
from ase.visualize import view
# The simple ase gui is default, but it pops out as a separate window, if this notebook is run locally.
view([water, water])
# A notebook-embedded representation of a structure can be viewed with the `x3d` viewer, also internal to ASE.
view(water, viewer='x3d')
# The `x3d` viewer does not offer any scripting capability and the only operations you can perform are rotation (left mouse button), translation (Ctrl+left mouse button) and zoom (mouse wheel)
# For some more bling directly in the Jupyter notebook we can use the `nglviewer`
view(water, viewer='ngl')
# Alternatively, we may read the ASE structure directly from NGLView.
import nglview as nv
v = nv.show_structure_file("dna.pdb")
v
# NGLviewer is a powerful utility. We can control many aspects of the display quality.
# +
# set size of the widget
v._remote_call("setSize", target="Widget", args=["400px", "400px"])
# center the view
v.center()
# change the color of the background
v.background='#ffc'
# modify the z-clipping distance
v.parameters=dict(clipDist=-10)
# -
# ### Exercise 1: Construct a molecule
# H2S has an H-S-H angle of 90 degrees, construct an ASE molecule representing H2S using bond lengths based on covalent radius.
#
# *Hint* See below, in case you don't know the atomic_number of sulfur (but who doesn't?)
sulfur = Atom('S')
sulfur.number # atomic number
# alternatively
from ase.data import atomic_numbers
atomic_numbers['S']
sh2 = 'replace this string with your code'
# ### Exercise 2 using both x3d and NGLView viewers
#
# Visualize your SH2 molecule:
'replace with your code'
# ## Crystals
# We can use the viewers to look at not just 0D materials (molecules), but also for periodic systems: 1D (e.g. wires), 2D (e.g. surfaces), and 3D materials (e.g. crystals)
#
# Here we will focus on crystals
#
# Let's read in the NaH structure from a file, already present in the right location.
from ase import io
nah = io.read('NaH.cif')
view(nah, viewer='ngl')
# Repeating the cell three times is as easy as using a simple method on the loaded structure.
view(nah.repeat(3), viewer='ngl')
# ### Building
# Like for the molecule a crystal can be generated by building from scratch, or reading it from a file as above, or by using predefined structures.
# Let's build a crystal for silver using the ASE `bulk` module
from ase.build import bulk
ag = bulk('Ag')
# Note, that ASE automatically assigned crystal symmetry (fcc) and lattice constant.
#
# This structure can now be nicely visualized.
view(ag, viewer='ngl')
# ### Databases
#
# Multiple databases can be queried for systems so we don't need to manually create them!
#
# One example is ASE's own `builder` database
from ase import build
ch3nh2 = build.molecule('CH3NO2')
view(ch3nh2, viewer='ngl')
# Jupyter Notebook can also be used to visualize entries from external databases, like the PDB database. We can query them with the name.
# Let's have a look at the main proteaze of the 2019-nCoV coronavirus.
import nglview
import os
os.environ['HTTPS_PROXY']='http://172.18.12.30:8123'
os.environ['HTTP_PROXY']='http://172.18.12.30:8123'
# This command will query the online database for the given PDB ID
view = nglview.show_pdbid("6lu7")
view.render_image()
view
# Notice how clicking on the protein shows the clicked atom exact location (residue).
# NGLView offers a large number of options to allow for customized view.
# +
view.add_cartoon(selection="protein")
view.add_surface(selection="protein", opacity=0.3)
# specify color
view.add_cartoon(selection="protein", color='blue')
view.camera = 'orthographic'
view.background = 'yellow'
# -
# We can of course load local files for viewing as well.
view = nglview.show_structure_file("dna.pdb")
view.add_cartoon()
view
| 6,834 |
/spam_classifier.ipynb | c17d7782ed0f2a325ed5bf53fe3408c515ba6eb1 | [] | no_license | ArtyomMinsk/spambase | https://github.com/ArtyomMinsk/spambase | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 11,461 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import train_test_split
import numpy as np
import pandas as pd
from column_names import column_list
from pandas import DataFrame, Series
# Loading in the data file:
spam_df = pd.read_csv('spambase.data', names = column_list, index_col = False)
# Since we are going to split the given dataset into train and test datasets let's find out the shape of the original data, so later we will be able to check if `train_test_split` method splitted the data according to the set of parameters:
spam_df.shape
# Before we split the data we need to assign to $X$ and $y$ variables the data from the original dataset. Since our goal is to classify spam then the very last Series (1 - spam; 0 - not spam) of the original DataFrame has to be assign to variable $y$. And the rest of the DataFrame is assigned to variable $X$.
#X = spam_df.drop('spam', axis = 1)
X = spam_df[column_list[:-1]]
y = spam_df['spam']
# Splitting the original dataset into training and testing datasets (60/40):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.6, random_state = 42)
X_train.shape
# Let's check the `train_test_split` function: 2760 rows is 60% of 4601 rows of the original dataset (2760 / 4601 = 0.599)
# Training our model on the training set of data
clf = MultinomialNB()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
clf.score(X_test, y_test)
# Making predictions on the testing set of data
list_1_0 = clf.predict(X_test)
list_1_0
# `list_1_0` is a list of values 1, 0 that represent spam and not spam. Let's find how many percent of spam is in the list using regular and Pandas methods:
count_spam = 0
for item in list_1_0:
if item == 1:
count_spam += 1
count_spam
spam_precentage = count_spam / len(list_1_0) * 100
spam_precentage
df_list = DataFrame(list_1_0)
df_list.columns = ['spam']
df_list.head()
df_list.spam.value_counts()
len(df_list)
703 / 1841 * 100
# ### Advanced Mode
#
# Let's eliminate the features `capital_run_length_average`, `capital_run_length_longest` and `capital_run_length_total` of the original dataset and check how the score changes.
X = spam_df[column_list[:-4]]
y = spam_df['spam']
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.6, random_state = 42)
X_train.shape
clf = MultinomialNB()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
clf.score(X_test, y_test)
# Result: By eliminating the features `capital_run_length_average`, `capital_run_length_longest` and `capital_run_length_total` of the original dataset the score goes up from 0.78 to 0.87.
| 2,916 |
/Recommendation Engine/Collaborative_Filtering.ipynb | fcc4000f465909dc06ef4e42108cf612d9b60492 | [] | no_license | pittssp/DataScience-Scratch | https://github.com/pittssp/DataScience-Scratch | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 114,464 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Collaborative Filtering
#
# Collaborative Filtering is a commonly used approach for recommender systems. In this notebook I will be using the "movie lens 100k" data set to further my understanding of how recommender systems are constructed. This includes constructing a sparse user-item(user-movie) matrix, weighing the pros and cons of the various imputing methods (mean, zero, KNN, SGD(didn't implement), using the sparse matrix to find similarities between users, and ultimately scoring movies to be recommended based on user and user similarities.
#
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# #### Import Data
movies_df = pd.read_csv('ml-latest-small/movies.csv')
ratings_df = pd.read_csv('ml-latest-small/ratings.csv')
movies_df.head()
movies_df.shape
ratings_df.describe()
rating_count = ratings_df.groupby('movieId')['rating'].count()
plt.hist(rating_count, bins= 100, log=True)
plt.show()
rating_count.quantile(np.arange(1, 0.6, -0.05))
# obviously very skewed data ... lets only select movies above the 85 % threshold
movies_keep = rating_count[rating_count > 17].index
ratings_df = ratings_df[ratings_df['movieId'].isin(movies_keep)]
ratings_df.shape
rating_count = ratings_df.groupby('movieId')['rating'].count()
plt.hist(rating_count, bins= 100)
plt.show()
avg_ratings = ratings_df.groupby('userId', as_index=False)['rating'].mean()
ratings_df = ratings_df.merge(avg_ratings, on='userId')
ratings_df.head()
df = ratings_df.merge(movies_df, on='movieId')
df['norm_rating'] = df['rating_y'] - df['rating_x']
df.sort_values(['userId', 'movieId']).head()
check = df.pivot(index='userId', columns='movieId', values='rating_x')
plt.figure(figsize=(12,12))
plt.spy(check)
plt.show()
# I just think this is cool :) Sparsity Visualization
feats_df = df.pivot(index='userId', columns='movieId', values='norm_rating')
feats_df.head()
# #### Filling Sparse Array
#
# When creating a User-Item matrix, many users will not have had the opportunity to review more than 5% of items. To deal with this extremely sparse matrix, we will try multiple methods to fill in missing values, each coming with trade offs that impact how recommendations are made.
# +
# Replacing NaN by Movie (column) Average
final_movie = feats_df.fillna(feats_df.mean(axis=0))
# Replacing NaN by User (row) Average
final_user = feats_df.apply(lambda row: row.fillna(row.mean()), axis=1)
# Replacing NaN with zero
final_zero = feats_df.fillna(0)
# +
from sklearn.impute import KNNImputer
final_knn = pd.DataFrame(KNNImputer(missing_values=np.nan, n_neighbors=4,
weights='distance').fit_transform(check),
columns=feats_df.columns)
# -
set(final_zero.columns) - set(check.columns) # checking columns
# +
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
# calculating cosine similarity matrix between all user's depending on how we will df
def get_sim(df):
"""
Calculate user-user similarity scores
"""
temp = cosine_similarity(df)
np.fill_diagonal(temp, 0)
temp = pd.DataFrame(temp, index=df.index)
temp.columns = df.index
return temp
# find users with highest cosine similarity scores between 1 user and the next
def find_knn(df,n):
return df.apply(lambda x: pd.Series(x.sort_values(ascending=False)
.iloc[:n].index,
index=['top{}'.format(i) for i in range(1, n+1)]), axis=1)
# -
# #### checking get_sim and find_knn with sample data
#
# checking cosine similarity logic
# +
cosine_data = pd.DataFrame([[1, 5, 5],
[5, 1, 1],
[1, 4, 4]])
# user 0 - most similar to user 2
# user 1 - more similar to user 2 than user 1, both unsimilar
# user 2 - more similar to user 0 than user 1
get_sim(cosine_data)
# look at find neighbors for further exploration
# -
find_knn(get_sim(cosine_data),2)
# checking how cosine similarity changes with sparse matrix (zeros)
# +
sparse_data = pd.DataFrame([[5,1,0],
[5,0,5],
[0,1,5]])
get_sim(sparse_data)
# -
find_knn(get_sim(sparse_data),2)
# +
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
# Test Sklearn Nearest Neighbors vs above find_knn method
pd.DataFrame(NearestNeighbors(n_jobs=-1, n_neighbors=2, algorithm='brute', metric='cosine'). \
fit(csr_matrix(sparse_data)). \
kneighbors(return_distance=False))
# -
# Test KNN Imputer
pd.DataFrame(KNNImputer(missing_values=0, n_neighbors=4,
weights='distance').fit_transform(sparse_data))
# ---
# ### Recommender Algorithm
# +
# converting MovieId to string for joining to list
ratings_df = ratings_df.astype({"movieId": str})
# creating dataframe where for each user we have list of movies seen
movie_user = ratings_df.groupby(by = 'userId')['movieId'].apply(lambda x:','.join(x))
movie_user.head()
# -
def find_rec(user, sparse_matrix):
"""
find_rec is dependent on sim_df, i.e. which method is used to fill in sparse matrix
- ideally I want to try SGD method in filling in sparse matrix w/ SVD or PCA
to determine best features for movie recommendation
http://nicolas-hug.com/blog/matrix_facto_1
:param: user - userId whom you wish to retrieve list of movie recommendation's for
:param: sim_df - similarity matrix
must be either 'final_movie', 'final_user', 'final_zero', or 'final_knn'
each differs in how NaN values are filled, movie avg, user avg, zero value, or knn_imputer
"""
# get list of movies user has seen
movies_seen = check.columns[check[check.index==user].notna().any()].tolist()
# get list of similar users based on cosine similarity
# results are largely dependent on which method is used to fill in for NaNs
sim_df = get_sim(sparse_matrix) # cosine similarity
knn_df = find_knn(sim_df, 10) # top 30 users based on similarity
close_users = knn_df[knn_df.index == user].values[0].tolist()
# map nearby neighbors to find which movies should be under consideration
similarly_seen = movie_user[movie_user.index.isin(close_users)].values
similarly_seen = ','.join(similarly_seen).split(',')
# take difference from what user has seen and what close user's have seen
not_seen = set(similarly_seen) - set(movies_seen)
not_seen = list(map(int, not_seen))
# take only movies that user has not seen and what neighbors have rated
# use final_zero so that we only capture movies that similar users have reviewed
# i.e. movies that similar users have not seen are 0 and hold no weight for ranking propensity
related_movies = final_zero.loc[:, not_seen]
# select only users (rows) that are neighbors to given user
related_movies = related_movies[related_movies.index.isin(close_users)]
related_movies = related_movies[related_movies.notnull()]
# select user cosine similarity w/ respect to other users
corr = sim_df.loc[user, :]
fin = pd.concat([related_movies, corr], axis=1).dropna()
# multiply given user cosine similarity and similar users' movie ratings
fin_corr_x_score = pd.DataFrame([fin.iloc[:,i]*fin.iloc[:,-1]
for i in range(len(fin.columns[:-1]))]).T
fin_corr_x_score.columns = fin.columns[:-1].tolist()
# sum user corr_x_score i.e. (cosine sim x similar users' ratings)
pre_score = fin_corr_x_score.sum()
# get final score by adding average to pre_score
final_score = pd.DataFrame((pre_score).sort_values(ascending=False),
columns=['score'])
final_score = final_score.reset_index()
recommendations = final_score.join(movies_df, on='index').dropna()
return recommendations[['title', 'score']]
# return sim_matrix
find_rec(1, final_zero)
# final_zero
find_rec(1, final_movie)
# +
# find_rec(2, final_user) # results identicial to final_zero, bc sim_matrix nearly identical
# -
find_rec(1, final_knn)
# ### Draw backs of this approach,
# - very common items (movies) tend to consistently appear in recommendations
#
# - Users with common high reviews are more similar than users with common low reviews
#
# - method used to fill in sparse matrix gives very different results
#
# - method to fill sparse matrix is extremely likely under capturing user's ratings
# - SGD Method to counter (http://nicolas-hug.com/blog/matrix_facto_1)
# - put min function here ...
#
# - cosine similarity may not capture entire full 'distances' between users/ movies
# - SVD to reduce dimension
# - DNN to do something else idk
| 9,164 |
/Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | 16f4b519933b3951fb03c03731e1633053234434 | [] | no_license | AndreasC93/Natural_language_processing | https://github.com/AndreasC93/Natural_language_processing | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 470,895 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <img src="http://files.oproject.org/img/HeaderOpenData.png">
#
# # CMS Open Data Example #1: Di-Muons
# ## Import Modules and Turn on Javascript
# +
from ROOT import TFile, TTree, TCanvas, TH1F
# %jsroot on
# -
# ## Read in Data from Input File
# +
file = TFile("data/Dimuons.root","READ")
Dimuons = file.Get("Dimuons")
# -
# ## Plot Muon Charge and Momentum
# ### Setup the Canvas
# +
canvas = TCanvas()
canvas.Divide(2,2)
# -
# ### Plot First Muon Charge and Momentum
# +
canvas.cd(1)
Dimuons.Draw("Muon1_Px")
canvas.cd(2);
Dimuons.Draw("Muon1_Py")
canvas.cd(3);
Dimuons.Draw("Muon1_Pz")
canvas.cd(4);
Dimuons.Draw("Muon1_Charge");
canvas.Draw();
# -
# ### Plot Second Muon Charge and Momentum
# +
canvas.cd(1)
Dimuons.Draw("Muon2_Px")
canvas.cd(2)
Dimuons.Draw("Muon2_Py")
canvas.cd(3)
Dimuons.Draw("Muon2_Pz")
canvas.cd(4)
Dimuons.Draw("Muon2_Charge")
canvas.Draw()
# -
# ## Apply Muon Quality Selection:
# <img src="http://cms.web.cern.ch/sites/cms.web.cern.ch/files/styles/large/public/field/image/2011-bs-1-2.jpg?itok=k5_hcnFt"></img> <BR>
# Muon _Global = 1 is a Global Muon (Global Muons have higher probability to be real muons)<BR>
# Muon _Global = 0 not a Global Muon <BR>
Selection = "Muon1_Global == 1 && Muon2_Global == 1"
# # Compute Di-Muon Invariant Mass
# Let's calculate the invariant mass $M$ of the two muons using a formula
# ## Declare Histogram
InvariantMass = TH1F("InvariantMass","#mu#mu mass; #mu#mu mass [GeV];Events", 900, 2, 120)
# ## Define Invariant Mass Formula
InvariantMassFormula ="sqrt((Muon1_Energy + Muon2_Energy)^2 - (Muon1_Px + Muon2_Px)^2 - (Muon1_Py + Muon2_Py)^2 - (Muon1_Pz + Muon2_Pz)^2)"
# ## Plot Results
# +
Canvas = TCanvas()
Dimuons.Draw( InvariantMassFormula + ">>InvariantMass", Selection)
Canvas.SetLogy()
Canvas.SetLogx()
Canvas.Draw()
# -
# ## Exercise 1a: Can you spot any Di-Muon Resonances by eye?
# ## Exercise 1b: Toggle Logarithmic scale on/off with Your Mouse
# ## Exercise 1c: Zoom In on a One of the Resonances with Your Mouse
# ## Exercise 2: Repeat exercise 1b by modifying code in cell [12]
InvariantMass2 = TH1F("InvariantMass2","#mu#mu mass; #mu#mu mass [GeV];Events", 3000, 2, 120)
InvariantMass2Formula ="sqrt((Muon1_Energy + Muon2_Energy)^2 - (Muon1_Px + Muon2_Px)^2 - (Muon1_Py + Muon2_Py)^2 - (Muon1_Pz + Muon2_Pz)^2)"
# +
Canvas = TCanvas()
Dimuons.Draw( InvariantMass2Formula + ">>InvariantMass2")
Canvas.SetLogy()
Canvas.SetLogx()
Canvas.Draw()
# -
itcoin Sentiment
btc_df.describe()
# Describe the Ethereum Sentiment
eth_df.describe()
# ### Questions:
#
# Q: Which coin had the highest mean positive score?
#
# A:
#
# Q: Which coin had the highest compound score?
#
# A:
#
# Q. Which coin had the highest positive score?
#
# A:
# ---
# # Tokenizer
#
# In this section, you will use NLTK and Python to tokenize the text for each coin. Be sure to:
# 1. Lowercase each word
# 2. Remove Punctuation
# 3. Remove Stopwords
# +
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from string import punctuation
import re
import nltk
lemmatizer = WordNetLemmatizer()
# +
# Expand the default stopwords list if necessary
sentence_tokenized = [sent_tokenize(i) for i in btc_df["text"]]
print(sentence_tokenized)
# -
# Complete the tokenizer function
def tokenizer(text):
"""Tokenizes text."""
sw = set(stopwords.words('english'))
regex = re.compile("[^a-zA-Z ]")
#regex = re.compile("btc_df["text"]")
re_clean = regex.sub('', text)
words = word_tokenize(re_clean)
lem = [lemmatizer.lemmatize(word) for word in words]
output = [word.lower() for word in lem if word.lower() not in sw]
# Create a list of the words
# Convert the words to lowercase
# Remove the punctuation
# Remove the stop words
# Lemmatize Words into root words
return output
# +
# Create a new tokens column for bitcoin
btc_df["btc_tokenized"] = btc_df["text"].apply(tokenizer)
btc_df
# -
# Create a new tokens column for ethereum
eth_df["eth_tokenized"] = eth_df["text"].apply(tokenizer)
eth_df
# ---
# # NGrams and Frequency Analysis
#
# In this section you will look at the ngrams and word frequency for each coin.
#
# 1. Use NLTK to produce the n-grams for N = 2.
# 2. List the top 10 words for each coin.
from collections import Counter
from nltk import ngrams
# Generate the Bitcoin N-grams where N=2
#def bigram_counter(btc_tokenized):
# Combine all articles in corpus into one large string
big_string_btc = ''.join(btc_df.text)
#processed = process_text(big_string)
bigrams = ngrams(big_string_btc.split(), n=2)
top_10 = dict(Counter(bigrams).most_common(10))
pd.DataFrame(list(top_10.items()), columns=['bigram', 'count'])
# Generate the Ethereum N-grams where N=2
#def bigram_counter(btc_tokenized):
# Combine all articles in corpus into one large string
big_string_eth = ''.join(eth_df.text)
#processed = process_text(big_string)
bigrams = ngrams(big_string_eth.split(), n=2)
top_10 = dict(Counter(bigrams).most_common(10))
pd.DataFrame(list(top_10.items()), columns=['bigram', 'count'])
# Use the token_count function to generate the top 10 words from each coin
def token_count(tokens, N=10):
"""Returns the top N tokens from the frequency count"""
return Counter(tokens).most_common(N)
# Get the top 10 words for Bitcoin
token_count(tokenizer(big_string_btc))
# Get the top 10 words for Ethereum
token_count(tokenizer(big_string_eth))
# # Word Clouds
#
# In this section, you will generate word clouds for each coin to summarize the news for each coin
from wordcloud import WordCloud
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [20.0, 10.0]
# Generate the Bitcoin word cloud
wc = WordCloud().generate(" ".join(tokenizer(big_string_btc)))
plt.imshow(wc)
# Generate the Ethereum word cloud
wc = WordCloud().generate(" ".join(tokenizer(big_string_eth)))
plt.imshow(wc)
# # Named Entity Recognition
#
# In this section, you will build a named entity recognition model for both coins and visualize the tags using SpaCy.
import spacy
from spacy import displacy
# +
# Optional - download a language model for SpaCy
# # !python -m spacy download en_core_web_sm
# -
# Load the spaCy model
nlp = spacy.load('en_core_web_sm')
# ## Bitcoin NER
# Concatenate all of the bitcoin text together
print(big_string_btc)
# +
# Run the NER processor on all of the text
doc = nlp(big_string_btc)
doc.user_data["title"]= "BTC NER"
displacy.render(doc, style='ent')
# Add a title to the document
#doc.user_data["title"]= "BTC NER"
# +
# Render the visualization
# YOUR CODE HERE!
# -
# List all Entities
for ent in doc.ents:
print(ent.text, ent.label_)
# ---
# ## Ethereum NER
# Concatenate all of the bitcoin text together
print(big_string_eth)
# +
# Run the NER processor on all of the text
doc = nlp(big_string_eth)
doc.user_data["title"]= "ETH NER"
displacy.render(doc, style='ent')
# Add a title to the document
# YOUR CODE HERE!
# +
# Render the visualization
# YOUR CODE HERE!
# -
# List all Entities
for ent in doc.ents:
print(ent.text, ent.label_)
| 7,547 |
/clustering/How HDBSCAN Works.ipynb | 4f06e4c302748b811b233d2855b58d377f2b08bf | [] | no_license | ethen8181/programming | https://github.com/ethen8181/programming | 17 | 14 | null | 2022-12-26T20:21:33 | 2022-06-23T20:21:13 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 286,195 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # How HDBSCAN Works
#
# HDBSCAN is a clustering algorithm developed by [Campello, Moulavi, and Sander](http://link.springer.com/chapter/10.1007%2F978-3-642-37456-2_14). It extends DBSCAN by converting it into a hierarchical clustering algorithm, and then using a technique to extract a flat clustering based in the stability of clusters. The goal of this notebook is to give you an overview of how the algorithm works and the motivations behind it. In contrast to the HDBSCAN paper I'm going to describe it without reference to DBSCAN. Instead I'm going to explain how I like to think about the algorithm, which aligns more closely with [Robust Single Linkage](http://cseweb.ucsd.edu/~dasgupta/papers/tree.pdf) with [flat cluster extraction](http://link.springer.com/article/10.1007%2Fs10618-013-0311-4) on top of it.
#
# Before we get started we'll load up most of the libraries we'll need in the background, and set up our plotting (because I believe the best way to understand what is going on is to actually see it working in pictures).
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.datasets as data
# %matplotlib inline
sns.set_context('poster')
sns.set_style('white')
sns.set_color_codes()
plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0}
# The next thing we'll need is some data. To make for an illustrative example we'll need the data size to be fairly small so we can see what is going on. It will also be useful to have several clusters, preferably of different kinds. Fortunately sklearn has facilities for generating sample clustering data so I'll make use of that and make a dataset of one hundred data points.
moons, _ = data.make_moons(n_samples=50, noise=0.05)
blobs, _ = data.make_blobs(n_samples=50, centers=[(-0.75,2.25), (1.0, 2.0)], cluster_std=0.25)
test_data = np.vstack([moons, blobs])
plt.scatter(test_data.T[0], test_data.T[1], color='b', **plot_kwds)
# Now, the best way to explain HDBSCAN is actually just use it and then go through the steps that occurred along the way teasing out what is happening at each step. So let's load up the [hdbscan library](https://github.com/lmcinnes/hdbscan) and get to work.
import hdbscan
clusterer = hdbscan.HDBSCAN(min_cluster_size=5, gen_min_span_tree=True)
clusterer.fit(test_data)
# So now that we have clustered the data -- what actually happened? We can break it out into a series of steps
#
# 1. Transform the space according to the density/sparsity.
# 2. Build the minimum spanning tree of the distance weighted graph.
# 3. Construct a cluster hierarchy of connected components.
# 4. Condense the cluster hierarchy based on minimum cluster size.
# 5. Extract the stable clusters from the condensed tree.
# ## Transform the space
#
# To find clusters we want to find the islands of higher density amid a sea of sparser noise -- and the assumption of noise is important: real data is messy and has outliers, corrupt data, and noise. The core of the clustering algorithm is single linkage clustering, and it can be quite sensitive to noise: a single noise data point in the wrong place can act as a bridge between islands, gluing them together. Obviously we want our algorithm to be robust against noise so we need to find a way to help 'lower the sea level' before running a single linkage algorithm.
#
# How can we characterize 'sea' and 'land' without doing a clustering? As long as we can get an estimate of density we can consider lower density points as the 'sea'. The goal here is not to perfectly distinguish 'sea' from 'land' -- this is an initial step in clustering, not the ouput -- just to make our clustering core a little more robust to noise. So given an identification of 'sea' we want to lower the sea level. For practical purposes that means making 'sea' points more distant from each other and from the 'land'.
#
# That's just the intuition however. How does it work in practice? We need a very inexpensive estimate of density, and the simplest is the distance to the *k*th nearest neighbor. If we have the distance matrix for our data (which we will need imminently anyway) we can simply read that off; alternatively if our metric is supported (and dimension is low) this is the sort of query that [kd-trees](http://scikit-learn.org/stable/modules/neighbors.html#k-d-tree) are good for. Let's formalise this and (following the DBSCAN, LOF, and HDBSCAN literature) call it the **core distance** defined for parameter *k* for a point *x* and denote as $\mathrm{core}_k(x)$. Now we need a way to spread apart points with low density (correspondingly high core distance). The simple way to do this is to define a new distance metric between points which we will call (again following the literature) the **mutual reachability distance**. We define mutual reachability distance as follows:
#
# <center>$d_{\mathrm{mreach-}k}(a,b) = \max \{\mathrm{core}_k(a), \mathrm{core}_k(b), d(a,b) \}$</center>
#
# where $d(a,b)$ is the original metric distance between *a* and *b*. Under this metric dense points (with low core distance) remain the same distance from each other but sparser points are pushed away to be at least their core distance away from any other point. This effectively 'lowers the sea level' spreading sparse 'sea' points out, while leaving 'land' untouched. The caveat here is that obviously this is dependent upon the choice of *k*; larger *k* values interpret more points as being in the 'sea'. All of this is a little easier to understand with a picture, so let's use a *k* value of five. Then for a given point we can draw a circle for the core distance as the circle that touches the fifth nearest neighbor, like so:
#
# <img src="images/distance1.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# Pick another point and we can do the same thing, this time with a different set of neighbors (one of them even being the first point we picked out).
#
# <img src="images/distance2.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# And we can do that a third time for good measure, with another set of five nearest neighbors and another circle with slightly different radius again.
#
# <img src="images/distance3.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# Now if we want to know the mutual reachabiility distance between the blue and green points we can start by draing in and arrow giving the distance between green and blue:
#
# <img src="images/distance4.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# This passes through the blue circle, but not the green circle -- the core distance for green is larger than the distance between blue and green. Thus we need to mark the mutual reachability distance between blue and green as larger -- equal to the radius of the green circle (easiest to picture if we base one end at the green point).
#
# <img src="images/distance4a.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# On the other hand the mutual reachablity distance from red to green is simply distance from red to green since that distance is greater than either core distance (i.e. the distance arrow passes through both circles).
#
# <img src="images/distance5.svg" alt="Diagram demonstrating mutual reachability distance" width=640 height=480>
#
# In general there is [underlying theory](http://arxiv.org/pdf/1506.06422v2.pdf) to demonstrate that mutual reachability distance as a transform works well in allowing single linkage clustering to more closely approximate the hierarchy of level sets of whatever true density distribution our points were sampled from.
# ## Build the minimum spanning tree
#
# Now that we have a new mutual reachability metric on the data we want start finding the islands on dense data. Of course dense areas are relative, and different islands may have different densities. Conceptually what we will do is the following: consider the data as a weighted graph with the data points as vertices and an edge between any two points with weight equal to the mutual reachability distance of those points.
#
# Now consider a threshold value, starting high, and steadily being lowered. Drop any edges with weight above that threshold. As we drop edges we will start to disconnect the graph into connected components. Eventually we will have a hierarchy of connected components (from completely connected to completely disconnected) at varying threshold levels.
#
# In practice this is very expensive: there are $n^2$ edges and we don't want to have to run a connected components algorithm that many times. The right thing to do is to find a minimal set of edges such that dropping any edge from the set causes a disconnection of components. But we need more, we need this set to be such that there is no lower weight edge that could connect the components. Fortunately graph theory furnishes us with just such a thing: the minimum spanning tree of the graph.
#
# We can build the minimum spanning tree very efficiently via [Prim's algorithm](https://en.wikipedia.org/wiki/Prim%27s_algorithm) -- we build the tree one edge at a time, always adding the lowest weight edge that connects the current tree to a vertex not yet in the tree. You can see the tree HDBSCAN constructed below; note that this is the minimum spanning tree for *mutual reachability distance* which is different from the pure distance in the graph. In this case we had a *k* value of 5.
#
# In the case that the data lives in a metric space we can use even faster methods, such as Dual Tree Boruvka to build the minimal spanning tree.
clusterer.minimum_spanning_tree_.plot(edge_cmap='viridis',
edge_alpha=0.6,
node_size=80,
edge_linewidth=2)
# ## Build the cluster hierarchy
#
# Given the minimal spanning tree, the next step is to convert that into the hierarchy of connected components. This is most easily done in the reverse order: sort the edges of the tree by distance (in increasing order) and then iterate through, creating a new merged cluster for each edge. The only difficult part here is to identify the two clusters each edge will join together, but this is easy enough via a [union-find](https://en.wikipedia.org/wiki/Disjoint-set_data_structure) data structure. We can view the result as a dendrogram as we see below:
clusterer.single_linkage_tree_.plot(cmap='viridis', colorbar=True)
# This brings us to the point where robust single linkage stops. We want more though; a cluster hierarchy is good, but we really want a set of flat clusters. We could do that by drawing a a horizontal line through the above diagram and selecting the clusters that it cuts through. This is in practice what [DBSCAN](http://scikit-learn.org/stable/modules/clustering.html#dbscan) effectively does (declaring any singleton clusters at the cut level as noise). The question is, how do we know where to draw that line? DBSCAN simply leaves that as a (very unintuitive) parameter. Worse, we really want to deal with variable density clusters and any choice of cut line is a choice of mutual reachability distance to cut at, and hence a single fixed density level. Ideally we want to be able to cut the tree at different places to select our clusters. This is where the next steps of HDBSCAN begin and create the difference from robust single linkage.
# ## Condense the cluster tree
#
# The first step in cluster extraction is condensing down the large and complicated cluster hierarchy into a smaller tree with a little more data attached to each node. As you can see in the hierarchy above it is often the case that a cluster split is one or two points splitting off from a cluster; and that is the key point -- rather than seeing it as a cluster splitting into two new clusters we want to view it as a single persistent cluster that is 'losing points'. To make this concrete we need a notion of **minimum cluster size** which we take as a parameter to HDBSCAN. Once we have a value for minimum cluster size we can now walk through the hierarchy and at each split ask if one of the new clusters created by the split has fewer points than the minimum cluster size. If it is the case that we have fewer points than the minimum cluster size we declare it to be 'points falling out of a cluster' and have the larger cluster retain the cluster identity of the parent, marking down which points 'fell out of the cluster' and at what distance value that happened. If on the other hand the split is into two clusters each at least as large as the minimum cluster size then we consider that a true cluster split and let that split persist in the tree. After walking through the whole hierarchy and doing this we end up with a much smaller tree with a small number of nodes, each of which has data about how the size of the cluster at that node descreases over varying distance. We can visualize this as a dendrogram similar to the one above -- again we can have the width of the line represent the number of points in the cluster. This time, however, that width varies over the length of the line as points fall our of the cluster. For our data using a minimum cluster size of 5 the result looks like this:
clusterer.condensed_tree_.plot()
# This is much easier to look at and deal with, particularly in as simple a clustering problem as our current test dataset. However we still need to pick out clusters to use as a flat clustering. Looking at the plot above should give you some ideas about how one might go about doing this.
# ## Extract the clusters
#
# Intuitively we want the choose clusters that persist and have a longer lifetime; short lived clusters are ultimately probably merely artifcacts of the single linkage approach. Looking at the previous plot we could say that we want to choose those clusters that have the greatest area of ink in the plot. To make a flat clustering we will need to add a further requirement that, if you select a cluster, then you cannot select any cluster that is a descendant of it. And in fact that intuitive notion of what should be done is exactly what HDBSCAN does. Of course we need to formalise things to make it a concrete algorithm.
#
# First we need a different measure than distance to consider the persistence of clusters; instead we will use $\lambda = \frac{1}{\mathrm{distance}}$. For a given cluster we can then define values $\lambda_{\mathrm{birth}}$ and $\lambda_{\mathrm{death}}$ to be the lambda value when the cluster split off and became it's own cluster, and the lambda value (if any) when the cluster split into smaller clusters respectively. In turn, for a given cluster, for each point *p* in that cluster we can define the value $\lambda_p$ as the lambda value at which that point 'fell out of the cluster' which is a value somewhere between $\lambda_{\mathrm{birth}}$ and $\lambda_{\mathrm{death}}$ since the point either falls out of the cluster at some point in the cluster's lifetime, or leaves the cluster when the cluster splits into two smaller clusters. Now, for each cluster compute the **stability** to as
#
# $\sum_{p \in \mathrm{cluster}} (\lambda_p - \lambda_{\mathrm{birth}})$.
#
# Declare all leaf nodes to be selected clusters. Now work up through the tree (the reverse topological sort order). If the sum of the stabilities of the child clusters is greater than the stability of the cluster then we set the cluster stability to be the sum of the child stabilities. If, on the other hand, the cluster's stability is greater than the su of it's children then we declare the cluster to be a selected cluster, and unselect all its descendants. Once we reach the root node we call the current set of selected clusters our flat clsutering and return that.
#
# Okay, that was wordy and complicated, but it really is simply performing our 'select the clusters in the plot with the largest total ink area' subject to descendant constraints that we explained earlier. We can select the clusters in the condensed tree dendrogram via this algorithm, and you get what you expect:
clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette())
# Now that we have the clusters it is a simple enough matter to turn that into cluster labelling as per the sklearn API. Any point not in a selected cluster is simply a noise point (and assigned the label -1). We can do a little more though: for each cluster we have the $\lambda_p$ for each point *p* in that cluster; If we simply normalize those values (so they range from zero to one) then we have a measure of the strength of cluster membership for each point in the cluster. The hdbscan library returns this as a `probabilities_` attribute of the clusterer object. Thus, with labels and membership strengths in hand we can make the standard plot, choosing a color for points based on cluster label, and desaturating that color according the strength of membership (and make unclustered points pure gray).
palette = sns.color_palette()
cluster_colors = [sns.desaturate(palette[col], sat)
if col >= 0 else (0.5, 0.5, 0.5) for col, sat in
zip(clusterer.labels_, clusterer.probabilities_)]
plt.scatter(test_data.T[0], test_data.T[1], c=cluster_colors, **plot_kwds)
# And that is how HDBSCAN works. It may seem somewhat complicated -- there are a fair number of moving parts to the algorithm -- but ultimately each part is actually very straightforward and can be optimized well. Hopefully with a better understanding both of the intuitions and some of the implementation details of HDBSCAN you will feel motivated to [try it out](https://github.com/lmcinnes/hdbscan). The library continues to develop, and will provide a base for new ideas including a near parameterless Persistent Density Clustering algorithm, and a new semi-supervised clustering algorithm.
| 18,295 |
/.ipynb_checkpoints/credit_risk_ensemble-checkpoint.ipynb | 665e2ae82f3f3d031a07a0d28e04027a890a557a | [] | no_license | Davisg1179/classification-homework | https://github.com/Davisg1179/classification-homework | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 37,661 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ensemble Learning
#
# ## Initial Imports
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
# ## Read the CSV and Perform Basic Data Cleaning
# +
# Load the data
file_path = Path('LoanStats_2019Q1.csv')
df = pd.read_csv(file_path)
# Preview the data
df.head()
# -
# ## Split the Data into Training and Testing
# +
# Create our features
X = df.copy()
X.drop("loan_status", axis=1, inplace=True)
X = pd.get_dummies(X)
X.head()
# Create our target
y = df["loan_status"]
# -
X.describe()
# Check the balance of our target values
y.value_counts()
# Split the X and y into X_train, X_test, y_train, y_test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=78)
# ## Data Pre-Processing
#
# Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).
# Create the StandardScaler instance
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Fit the Standard Scaler with the training data
# When fitting scaling functions, only train on the training dataset
X_scaler = scaler.fit(X_train)
# Scale the training and testing data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# ## Ensemble Learners
#
# In this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble classifier . For each algorithm, be sure to complete the folliowing steps:
#
# 1. Train the model using the training data.
# 2. Calculate the balanced accuracy score from sklearn.metrics.
# 3. Display the confusion matrix from sklearn.metrics.
# 4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
# 5. For the Balanced Random Forest Classifier only, print the feature importance sorted in descending order (most important feature to least important) along with the feature score
#
# Note: Use a random state of 1 for each algorithm to ensure consistency between tests
# ### Balanced Random Forest Classifier
# Resample the training data with the BalancedRandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
brf = BalancedRandomForestClassifier(n_estimators=100, random_state=1)
brf.fit(X_train, y_train)
# Calculated the balanced accuracy score
from sklearn.metrics import balanced_accuracy_score
y_pred = brf.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
# +
# Print the imbalanced classification report
from imblearn.metrics import classification_report_imbalanced
y_pred = brf.predict(X_test)
print(classification_report_imbalanced(y_test, y_pred))
# -
# List the features sorted in descending order by feature importance
importances = brf.feature_importances_
importances_sorted = sorted(zip(brf.feature_importances_, X.columns), reverse=True)
importances_sorted
# ### Easy Ensemble Classifier
# Train the Classifier
from imblearn.ensemble import EasyEnsembleClassifier
ee = EasyEnsembleClassifier(n_estimators=100, random_state=1)
ee.fit(X_train, y_train)
# Calculated the balanced accuracy score
y_pred = ee.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
y_pred = ee.predict(X_test)
print(classification_report_imbalanced(y_test, y_pred))
# ### Final Questions
#
# 1. Which model had the best balanced accuracy score?
#
# YOUR ANSWER HERE.
#
# 2. Which model had the best recall score?
#
# YOUR ANSWER HERE.
#
# 3. Which model had the best geometric mean score?
#
# YOUR ANSWER HERE.
#
# 4. What are the top three features?
#
# YOUR ANSWER HERE.
# 1. The Easy Ensemble model gives the best balanced accuracy score
# 2. The Easy Ensemble model has the best recall score
# 3. The Easy Ensemble model has the geometric mean score
# 4. The top three features are 'total_rec_prncp', 'total_pymnt', and 'total_pymnt_inv'
| 4,754 |
/gt_exact_stats_soc_ep.ipynb | 42161d76ee8b44ef2aedc21cb7b9dc4139014a90 | [] | no_license | MaximilianPavon/DM_project | https://github.com/MaximilianPavon/DM_project | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,378 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import analysis
import time
filenames = ['soc-Epinions1.txt']
# %%time
g = analysis.load_graph(filenames[0], directed=True)
print('vertices:', g.num_vertices(), 'edges:', g.num_edges())
# +
# %%time
print('=====LSCC=====')
lscc = analysis.calculate_largest_strongly_connected_comp(g)
print('LSCC edges: \t', lscc.num_edges())
print('LSCC nodes: \t', lscc.num_vertices())
lscc_dists = analysis.calculate_distances(lscc)
s_median, s_mean, s_diam, s_eff_diam = analysis.compute_stats(lscc_dists)
print('median distance:\t', s_median)
print('mean distance:\t\t', s_mean)
print('diameter:\t\t', s_diam)
print('effective diameter:\t', s_eff_diam)
# +
# %%time
print('=====LWCC=====')
lwcc = analysis.calculate_largest_weakly_connected_comp(g)
print('LWCC edges: \t', lwcc.num_edges())
print('LWCC nodes: \t', lwcc.num_vertices())
lwcc_dists = analysis.calculate_distances(lwcc)
w_median, w_mean, w_diam, w_eff_diam = analysis.compute_stats(lwcc_dists)
print('median distance:\t', w_median)
print('mean distance:\t\t', w_mean)
print('diameter:\t\t', w_diam)
print('effective diameter:\t', w_eff_diam)
'postag=' + postag,
'postag[:2]=' + postag[:2],
]
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.extend([
'-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(),
'-1:postag=' + postag1,
'-1:postag[:2]=' + postag1[:2],
])
else:
features.append('BOS')
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.extend([
'+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(),
'+1:postag=' + postag1,
'+1:postag[:2]=' + postag1[:2],
])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
# -
sent2features(train_sents[0:4])
# +
# %%time
X_train = sent2features(train_sents)
y_train = sent2labels(train_sents)
X_test = sent2features(train_sents)
y_test = sent2labels(train_sents)
# -
# temp=unravelravel(X_train)
for i in range
# +
len(X_train),len(y_train)
# +
# # %%time
# X_train = [sent2features(s) for s in train_sents]
# y_train = [sent2labels(s) for s in train_sents]
#
# X_test = [sent2features(s) for s in test_sents]
# y_test = [sent2labels(s) for s in test_sents]
# +
# %%time
trainer = pycrfsuite.Trainer(verbose=False)
trainer.append(X_train, y_train)
# +
# # %%time
# trainer = pycrfsuite.Trainer(verbose=False)
# for xseq, yseq in zip(X_train, y_train):
# trainer.append(xseq, yseq)
# -
trainer.set_params({
'c1': 1.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
'max_iterations': 50, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
trainer.params()
# %%time
trainer.train('conll2002-esp.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('conll2002-esp.crfsuite')
# +
example_sent = test_sents[0:10]
print(' '.join(sent2tokens(example_sent)), end='\n\n')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
# -
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels = [class_indices[cls] for cls in tagset],
target_names = tagset,
)
# %%time
y_pred = [tagger.tag(xseq) for xseq in X_test]
# %%time
y_pred =tagger.tag(X_test)
def trained_model_performance(predicted_list):
tag_list=[x[1] for x in predicted_list]
NE_list=list(set(tag_list))
print (NE_list)
if 'O' in NE_list:
NE_list.remove('O')
if len(NE_list)!=0:
count=[(x,tag_list.count(x))for x in NE_list]
var_list=[]
for i in NE_list:
var_list=var_list+['tp_'+i,'fp_'+i,'fn_'+i]
variable_dict=dict.fromkeys(var_list, 0)
for lines in predicted_list:
if lines[1]!='O':
if lines[1]==lines[2]:
variable_dict['tp_'+str(lines[1])]=variable_dict['tp_'+str(lines[1])]+1
elif lines[1]!=lines[2]:
if lines[2]=='O':
variable_dict['fn_'+str(lines[1])]=variable_dict['fn_'+str(lines[1])]+1
else:
variable_dict['fp_'+str(lines[1])]=variable_dict['fp_'+str(lines[1])]+1
else:
if lines[2]!='O':
variable_dict['fp_'+str(lines[2])]=variable_dict['fp_'+str(lines[2])]+1
print ('NE counts', count)
print ("Entity TP FP FN")
tp=0
fp=0
fn=0
for ne in NE_list:
tp=tp+variable_dict['tp_'+ne]
fp=fp+variable_dict['fp_'+ne]
fn=fn+variable_dict['fn_'+ne]
print (ne, variable_dict['tp_'+ne] ,variable_dict['fp_'+ne] ,variable_dict['fn_'+ne] )
if (tp+fp)==0 or (tp+fn)==0:
print ('division by zero')
precision='denominator_zero'
recall='denominator_zero'
else:
precision=float(tp)/(tp+fp)
recall=float(tp)/(tp+fn)
print ('precision =',float(precision))
print ('recall =' ,float(recall))
return (precision,recall)
else:
return ('No NE','No NE')
pred=zip(tokens,y_test,y_pred)
pred=[x for x in pred]
len(y_pred),len(y_test),len(tokens),len(pred)
trained_model_performance(pred)
results_train=trained_model_performance(pred_training)
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_test)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
len(y_true_combined),len(y_pred_combined)
classification_report(
y_true_combined,
y_pred_combined,
labels = [class_indices[cls] for cls in tagset],
target_names = tagset
)
print(bio_classification_report(y_test, y_pred))
# +
from collections import Counter
info = tagger.info()
def print_transitions(trans_features):
for (label_from, label_to), weight in trans_features:
print("%-6s -> %-7s %0.6f" % (label_from, label_to, weight))
print("Top likely transitions:")
print_transitions(Counter(info.transitions).most_common(15))
print("\nTop unlikely transitions:")
print_transitions(Counter(info.transitions).most_common()[-15:])
# +
def print_state_features(state_features):
for (attr, label), weight in state_features:
print("%0.6f %-6s %s" % (weight, label, attr))
print("Top positive:")
print_state_features(Counter(info.state_features).most_common(20))
print("\nTop negative:")
print_state_features(Counter(info.state_features).most_common()[-20:])
# -
import nltk
groucho_grammar = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det N | Det N PP | 'I'
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
sent = ['I', 'shot', 'an', 'gulshan', 'in', 'my', 'pajamas']
parser = nltk.ChartParser(groucho_grammar)
for tree in parser.parse(sent):
print(tree)
from nltk.grammar import DependencyGrammar
from nltk.parse import *
s article</a> to learn how to share your work.
# <hr>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <h2>Get IBM Watson Studio free of charge!</h2>
# <p><a href="https://cocl.us/bottemNotebooksPython101Coursera"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p>
# </div>
# <h3>About the Authors:</h3>
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| 9,812 |
/cross_validation/cross_validation.ipynb | 10f5ca4e496e7869186c5dbbe3f6c76c46ce8b1c | [] | no_license | smart1004/tools | https://github.com/smart1004/tools | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 9,146 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://datascienceschool.net/view-notebook/266d699d748847b3a3aa7b9805b846ae/
#
# https://m.blog.naver.com/PostView.nhn?blogId=sanghan1990&logNo=221116465873&proxyReferer=https%3A%2F%2Fwww.google.com%2F
#
# conda install statsmodels
# #pip install -U statsmodels
# +
import pandas as pd
import numpy as np
from sklearn.datasets import load_boston
boston = load_boston()
dfX = pd.DataFrame(boston.data, columns=boston.feature_names)
dfy = pd.DataFrame(boston.target, columns=["MEDV"])
df = pd.concat([dfX, dfy], axis=1)
N = len(df)
ratio = 0.7
np.random.seed(0)
idx_train = np.random.choice(np.arange(N), np.int(ratio * N))
idx_test = list(set(np.arange(N)).difference(idx_train))
df_train = df.iloc[idx_train]
df_test = df.iloc[idx_test]
# -
import statsmodels.formula.api as sm
model = sm.OLS.from_formula("MEDV ~ " + "+".join(boston.feature_names), data=df_train)
result = model.fit()
print(result.summary())
# +
from sklearn.model_selection import KFold
scores = np.zeros(5)
cv = KFold(5, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df)):
df_train = df.iloc[idx_train]
df_test = df.iloc[idx_test]
model = sm.OLS.from_formula("MEDV ~ " + "+".join(boston.feature_names), data=df_train)
result = model.fit()
pred = result.predict(df_test)
rss = ((df_test.MEDV - pred) ** 2).sum()
tss = ((df_test.MEDV - df_test.MEDV.mean())** 2).sum()
rsquared = 1 - rss / tss
scores[i] = rsquared
print("ํ์ต R2 = {:.8f}, ๊ฒ์ฆ R2 = {:.8f}".format(result.rsquared, rsquared))
# +
from sklearn.metrics import r2_score
scores = np.zeros(5)
cv = KFold(5, shuffle=True, random_state=0)
for i, (idx_train, idx_test) in enumerate(cv.split(df)):
df_train = df.iloc[idx_train]
df_test = df.iloc[idx_test]
model = sm.OLS.from_formula("MEDV ~ " + "+".join(boston.feature_names), data=df_train)
result = model.fit()
pred = result.predict(df_test)
rsquared = r2_score(df_test.MEDV, pred)
scores[i] = rsquared
scores
# +
from sklearn.base import BaseEstimator, RegressorMixin
import statsmodels.formula.api as smf
import statsmodels.api as sm
class StatsmodelsOLS(BaseEstimator, RegressorMixin):
def __init__(self, formula):
self.formula = formula
self.model = None
self.data = None
self.result = None
def fit(self, dfX, dfy):
self.data = pd.concat([dfX, dfy], axis=1)
self.model = smf.ols(self.formula, data=self.data)
self.result = self.model.fit()
def predict(self, new_data):
return self.result.predict(new_data)
# +
from sklearn.model_selection import cross_val_score
model = StatsmodelsOLS("MEDV ~ " + "+".join(boston.feature_names))
cv = KFold(5, shuffle=True, random_state=0)
cross_val_score(model, dfX, dfy, scoring="r2", cv=cv)
# -
| 3,122 |
/.ipynb_checkpoints/smoother-checkpoint.ipynb | 643d85103e8d5f2a526f1b501890fa94d16141e7 | [] | no_license | ogi-iii/SparseAdditiveModels | https://github.com/ogi-iii/SparseAdditiveModels | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 22,988 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy
from scipy.interpolate import UnivariateSpline
import matplotlib.pylab as plt
# %matplotlib inline
x = np.linspace(-3, 3, 50)
y = np.sin(x**2) + np.random.randn(50)
plt.plot(x, y, 'ro', ms=5)
# +
plt.plot(x, y, 'ro', ms=5)
xs = np.linspace(-3, 3, 1000)
ss = UnivariateSpline(x, y) # sใparamๆๅฎใใชใใจlen(y)
fxs = ss(xs)
#print(fxs)
plt.plot(xs, fxs, '-')
# -
ons_univ import bar_chart_solution_1, bar_chart_solution_2
# -
# In this workspace, you'll be working with this dataset comprised of attributes of creatures in the video game series Pokรฉmon. The data was assembled from the database of information found in [this GitHub repository](https://github.com/veekun/pokedex/tree/master/pokedex/data/csv).
pokemon = pd.read_csv('./data/pokemon.csv')
pokemon.head()
# **Task 1**: There have been quite a few Pokรฉmon introduced over the series' history. How many were introduced in each generation? Create a _bar chart_ of these frequencies using the 'generation_id' column.
base_color = sb.color_palette()[0]
sb.countplot(data=pokemon, x='generation_id', color=base_color)
# Once you've created your chart, run the cell below to check the output from our solution. Your visualization does not need to be exactly the same as ours, but it should be able to come up with the same conclusions.
bar_chart_solution_1()
# **Task 2**: Each Pokรฉmon species has one or two 'types' that play a part in its offensive and defensive capabilities. How frequent is each type? The code below creates a new dataframe that puts all of the type counts in a single column.
pkmn_types = pokemon.melt(id_vars = ['id','species'],
value_vars = ['type_1', 'type_2'],
var_name = 'type_level', value_name = 'type').dropna()
pkmn_types.head()
# Your task is to use this dataframe to create a _relative frequency_ plot of the proportion of Pokรฉmon with each type, _sorted_ from most frequent to least. **Hint**: The sum across bars should be greater than 100%, since many Pokรฉmon have two types. Keep this in mind when considering a denominator to compute relative frequencies.
# +
n_count = pokemon.shape[0]
pkmn_counts = pkmn_types['type'].value_counts()
pkmn_index = pkmn_counts.index
pkmn_max = pkmn_counts.max() / n_count
tick = np.arange(0, pkmn_max, 0.02)
tick_label = ['{:.02f}'.format(x) for x in tick]
# -
sb.countplot(data=pkmn_types, y='type', color=base_color, order=pkmn_index)
plt.xticks(tick*n_count, tick_label)
plt.xlabel('proportion')
bar_chart_solution_2()
# If you're interested in seeing the code used to generate the solution plots, you can find it in the `solutions_univ.py` script in the workspace folder. You can navigate there by clicking on the Jupyter icon in the upper left corner of the workspace. Spoiler warning: the script contains solutions for all of the workspace exercises in this lesson, so take care not to spoil your practice!
| 3,207 |
/3 Matplotlib/32-MatplotlibExercises.ipynb | 6d45b86817621f8aaeba560001e2272875b023d6 | [] | no_license | jovanidesouza/CalculoComplementar | https://github.com/jovanidesouza/CalculoComplementar | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 104,268 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data_df = pd.read_csv(file_to_load)
purchase_data_df.head()
# -
#check for missing data
purchase_data_df.count()
# ## Player Count
# * Display the total number of players
#
# +
#Calculate the total number of players
Total_Players = len(purchase_data_df["SN"].unique())
#Create new dataframe of Total Players
Total_Players_df = pd.DataFrame({"Total Players":[Total_Players]})
Total_Players_df
# -
# ## Purchasing Analysis (Total)
# * Run basic calculations to obtain number of unique items, average price, etc.
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
#
# +
# Unique on Item_ID
Total_Unique_Items = len(purchase_data_df["Item ID"].unique())
#Take average of Price
Average_Price = purchase_data_df["Price"].mean()
#Len of Purchase ID
Total_Purchased_Items = len(purchase_data_df["Purchase ID"])
#Sum of Price
Tot_Revenue = purchase_data_df["Price"].sum()
#Create new Dataframe of summarized dat
Summary_df = pd.DataFrame({"Number of Unique Items":[Total_Unique_Items],"Average Price":Average_Price,"Number of Purchases":Total_Purchased_Items,"Total Revenue":Tot_Revenue})
Summary_df
# -
# ## Gender Demographics
# * Percentage and Count of Male Players
#
#
# * Percentage and Count of Female Players
#
#
# * Percentage and Count of Other / Non-Disclosed
#
#
#
# +
Player_Group = purchase_data_df.groupby("SN")
Gender_Group_df = pd.DataFrame(Player_Group["Gender"])
Gender_Group_df = Gender_Group_df.groupby("Gender")
Gender_Group_df.head()
# -
# +
#User_Group_df = purchase_data_df.groupby(['SN'])
#Gender_Group_df = User_Group_df["Gender"].count()
#Gender_Group_df
# +
#Gender_Group_df = purchase_data_df.groupby(['Gender'])
#Gender_Group_2_df = Gender_Group_df['SN'].unique
#Gender_Group_2_df
# +
#Get count of different genders
#Gender_Count = User_Group_df["Gender"].value_counts()
#Gender_Count
# -
#
# ## Purchasing Analysis (Gender)
# * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
#
#
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
# +
items_purchased = Player_Group["SN"].count()
#
# -
# ## Age Demographics
# * Establish bins for ages
#
#
# * Categorize the existing players using the age bins. Hint: use pd.cut()
#
#
# * Calculate the numbers and percentages by age group
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: round the percentage column to two decimal points
#
#
# * Display Age Demographics Table
#
# ## Purchasing Analysis (Age)
# * Bin the purchase_data data frame by age
#
#
# * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
#
#
# * Create a summary data frame to hold the results
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display the summary data frame
# ## Top Spenders
# * Run basic calculations to obtain the results in the table below
#
#
# * Create a summary data frame to hold the results
#
#
# * Sort the total purchase value column in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the summary data frame
#
#
# ## Most Popular Items
# * Retrieve the Item ID, Item Name, and Item Price columns
#
#
# * Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value
#
#
# * Create a summary data frame to hold the results
#
#
# * Sort the purchase count column in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the summary data frame
#
#
# ## Most Profitable Items
# * Sort the above table by total purchase value in descending order
#
#
# * Optional: give the displayed data cleaner formatting
#
#
# * Display a preview of the data frame
#
#
a = plt.figure()
ax3 = figura.add_axes([0,0,1,1])
ax4 = figura.add_axes([0.2,0.5,.4,.4])
# + [markdown] id="A4h6_l49TLXq" colab_type="text"
# ## Exercรญcio 4
#
# **Use plt.subplots (nrows = 1, ncols = 2) para criar o grรกfico abaixo.**
# + id="4fYIyIjwTLXr" colab_type="code" outputId="84295de1-0b5d-4989-9fc8-f849af66e081" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1572972379389, "user_tz": 120, "elapsed": 922, "user": {"displayName": "jovani de souza", "photoUrl": "https://lh5.googleusercontent.com/-oK60BEkZFG4/AAAAAAAAAAI/AAAAAAAAALQ/c3xa2z4LUBE/s64/photo.jpg", "userId": "13092737875236348379"}}
fig,axes = plt.subplots(nrows=1,ncols=2)
# + [markdown] id="Mx7saP-GTLXs" colab_type="text"
# **Agora plote (x, y) e (x, z) nos eixos. Brinque com a largura de linha e o estilo**
# + id="W9ilP-1cTLXt" colab_type="code" outputId="e6d088db-039b-452b-99a6-420f12fb9c72" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1572972430035, "user_tz": 120, "elapsed": 1154, "user": {"displayName": "jovani de souza", "photoUrl": "https://lh5.googleusercontent.com/-oK60BEkZFG4/AAAAAAAAAAI/AAAAAAAAALQ/c3xa2z4LUBE/s64/photo.jpg", "userId": "13092737875236348379"}}
fig,axes = plt.subplots(nrows=1,ncols=2)
axes[0].plot(x,y,color='blue',lw="5",ls="--")
axes[1].plot(x,z,color='red',lw="3")
# + [markdown] id="5VI9EqrITLXu" colab_type="text"
#
# **Veja se vocรช pode redimensionar o grรกfico adicionando o argumento figsize () em plt.subplots () estรก copiando e colando o cรณdigo anterior.**
# + id="Hxyevp9YTLXv" colab_type="code" outputId="7a342ce1-c857-491f-f9df-c2f9f3317aa5" colab={"base_uri": "https://localhost:8080/", "height": 228} executionInfo={"status": "ok", "timestamp": 1572972555608, "user_tz": 120, "elapsed": 1144, "user": {"displayName": "jovani de souza", "photoUrl": "https://lh5.googleusercontent.com/-oK60BEkZFG4/AAAAAAAAAAI/AAAAAAAAALQ/c3xa2z4LUBE/s64/photo.jpg", "userId": "13092737875236348379"}}
fig,axes = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
axes[0].plot(x,y,color='blue',lw="5",ls="--")
axes[1].plot(x,z,color='red',lw="3")
| 6,877 |
/aa/10/code/ไธ็งๅๅฝๆจกๅๆฏ่พ.ipynb | 74cf3f58736da8890a2468d35adefe0d8cd42e6c | [] | no_license | shuaixiaohao/ML | https://github.com/shuaixiaohao/ML | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 108,286 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Question :** Load the iris.csv dataset using pandas
#
# **Level:** Easy
# **Input format :**
#
# Load the iris dataset
# **Output format :**
#
# csv values
# **Sample Input :**
#
# import packages
# **Sample Output :**
#
# values
# +
import pandas as pd
# write your code here
# -
ndom(size=200)
# ๅ่ฎพ200ไธชๆฐๆฎ็นๅพๅชๆ10ไธชๆฏๅฏน็ปๆๆๅฝฑๅ
index = np.arange(0,200,1)
# ไฟฎๆน่พๅ
ฅๆฐๆฎ็ๅ
ๅญ็ปๆ
np.random.shuffle(index)
# -
coefs[index[10:]] = 0
# ็ณปๆฐไนไปฅๆ ทๆฌ็นๅพ๏ผๅฐฑๅพๅฐไบๆ ทๆฌๆ ็ญพ
y = np.dot(X,coefs)
y.shape
y.max(),y.min()
# ๅฏนๆ ทๆฌๆ ็ญพๆทปๅ ๅชๅฃฐ
noise = np.random.random(size=50)*2-1
noise.max(),noise.min()
# ๆๅช้ณๆฐๆฎๆทปๅ ๅฐๆ ทๆฌๆ ็ญพyไธญ
y += noise
plt.plot(coefs)
plt.xlabel('features')
plt.ylabel('coefs')
# +
linear = LinearRegression()
ridge = Ridge(alpha=100000)
lasso = Lasso(alpha=0.05)
linear.fit(X,y)
ridge.fit(X,y)
lasso.fit(X,y)
# ไฟ็ไธ็ง็ฎๆณ็็ณปๆฐ
coefs1 = linear.coef_
coefs2 = ridge.coef_
coefs3 = lasso.coef_
plt.figure(figsize=(12,8))
axes1 = plt.subplot(2,2,1)
axes1.plot(coefs)
axes1.set_title('True',color='blue')
axes2 = plt.subplot(2,2,2)
axes2.plot(coefs1,color='green')
axes2.set_title('LinearRegression')
axes3 = plt.subplot(2,2,3)
axes3.plot(coefs2,color='orange')
axes3.set_title('Ridge')
axes4 = plt.subplot(2,2,4)
axes4.plot(coefs3,color='cyan')
axes4.set_title('Lasso')
# -
a = np.array([1,2,3,4,5])
b = [0,3,4]
a[b]
# +
x = np.array([[1,2,3],[2,3,4],[3,4,5]])
w = np.array([0,1,2])
np.dot(x,w)
| 1,649 |
/PPE/ENSEMBLE2/scripts/Analyse/Choix_param_reduce.ipynb | d2828a790a648d7e6d77b68cb2d989c47091adc5 | [] | no_license | speatier/CNRMppe_save | https://github.com/speatier/CNRMppe_save | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 634,474 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
prokka=pd.read_csv("/home/tsweet/Going_through_Phages/data/vgasGeneFunctionData_Prokka.csv")
prokka.head(10)
data = np.array(prokka)
covMatrix = np.cov(data,bias=True)
print (covMatrix)
lib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import pandas.plotting
import matplotlib.ticker as ticker
# scatter plot matrix des variables quantitatives
from pandas.plotting import scatter_matrix
import seaborn as sns; sns.set()
# Scikit-learn
from sklearn import linear_model
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
from sklearn import preprocessing
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.neural_network import MLPRegressor
# -
# ## Import functions
import sys
sys.path.append('/data/home/globc/peatier/CNRMppe')
import Fonctions
from Fonctions import get_wavg_budget_df
from Fonctions import wavg
from Fonctions import plotlines_Xdf
from Fonctions import plotlines_1df
from Fonctions import Deltas_Lambda
from Fonctions import get_3D_budget_xarr
from Fonctions import get_3D_xarr
from Fonctions import get_3D_SW_xarr
from Fonctions import get_3D_LW_xarr
# # Import data
# +
param_values = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/X_EmulateurFeedbacksN.npy")
feedbacks = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/Net_feedbacks.npy")
pc1_SW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF1pc_SW.npy")
pc2_SW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF2pc_SW.npy")
pc3_SW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF3pc_SW.npy")
pc1_LW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF1pc_LW.npy")
pc2_LW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF2pc_LW.npy")
pc3_LW = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/PPE2_EOF3pc_LW.npy")
LW_feedbacks = np.load(file = "/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/LW_feedbacks.npy")
SW_feedbacks = np.load(file = "/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/SW_feedbacks.npy")
param_names = np.load(file="/data/home/globc/peatier/CNRMppe/PPE/ENSEMBLE2/files/npy/LHS_paramNames.npy")
# +
feedbacks_classes = (feedbacks*10).astype('int')/10
pc1_SW_classes = (pc1_SW*10).astype('int')/10
pc2_SW_classes = (pc2_SW*10).astype('int')/10
pc3_SW_classes = (pc3_SW*10).astype('int')/10
pc1_LW_classes = (pc1_LW*10).astype('int')/10
pc2_LW_classes = (pc2_LW*10).astype('int')/10
pc3_LW_classes = (pc3_LW*10).astype('int')/10
LW_feedbacks_classes = (LW_feedbacks*10).astype('int')/10
SW_feedbacks_classes = (SW_feedbacks*10).astype('int')/10
# +
df = pd.DataFrame(param_values, columns = param_names)
df['Net_Feedbacks'] = feedbacks_classes
df['LW_feedbacks'] = LW_feedbacks_classes
df['SW_feedbacks'] = SW_feedbacks_classes
df['pc1_SW'] = pc1_SW_classes
df['pc2_SW'] = pc2_SW_classes
df['pc3_SW'] = pc3_SW_classes
df['pc1_LW'] = pc1_LW_classes
df['pc2_LW'] = pc2_LW_classes
df['pc3_LW'] = pc3_LW_classes
df
# -
# # Pair plot
data = ['RKDX', 'AGRE2', 'RAUTEFR', 'VVN', 'RQLCR', 'Net_Feedbacks']
df_short = df[data]
data_short = ['RKDX', 'AGRE2', 'RAUTEFR', 'VVN', 'RQLCR']
df_short
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Pair plot of parameters with the highest Sobol index
sm = plt.cm.ScalarMappable(cmap='RdBu_r', norm=norm)
ax = sns.pairplot(df_short,hue='Net_Feedbacks',markers="o",
palette=('coolwarm'),vars=data_short, diag_kind='hist', diag_kws = {'alpha': 1.0, 'edgecolor' : None},
plot_kws = {'alpha': 1.0, 'edgecolor' : None})
#ax._legend.remove()
#ax.fig.legend(labels=range(0,10,1), title = 'Net Feedbacks')
ax.fig.subplots_adjust(top=0.92, bottom=0.08)
# Title
plt.suptitle('5 dominant parameters for Net Feedbacks',
size = 28)
# Enregistrer les figures ...............................................................
#g.savefig("/data/home/globc/peatier/figures/Pairplot_HighSoboIndices.png", dpi=None,
# orientation='portrait', bbox_inches='tight', pad_inches=0.1,
# frameon=None, metadata=None)
# Show the graph ..................................
plt.show()
# -
data = ['TFVL', 'RSWINHF_ICE', 'AGRE2', 'VVN', 'RLWINHF_LIQ', 'SW_feedbacks']
df_short = df[data]
data_short = ['TFVL', 'RSWINHF_ICE', 'AGRE2', 'VVN', 'RLWINHF_LIQ']
df_short
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Pair plot of parameters with the highest Sobol index
sm = plt.cm.ScalarMappable(cmap='coolwarm', norm=norm)
ax = sns.pairplot(df_short,hue='SW_feedbacks',markers="o",
palette=('coolwarm'),vars=data_short, diag_kind='hist', diag_kws = {'alpha': 1.0, 'edgecolor' : None},
plot_kws = {'alpha': 1.0, 'edgecolor' : None})
#ax._legend.remove()
#ax.fig.legend(labels=range(0,10,1), title = 'Net Feedbacks')
ax.fig.subplots_adjust(top=0.92, bottom=0.08)
# Title
plt.suptitle('5 dominant parameters for SW feedbacks',
size = 28)
# Enregistrer les figures ...............................................................
#g.savefig("/data/home/globc/peatier/figures/Pairplot_HighSoboIndices.png", dpi=None,
# orientation='portrait', bbox_inches='tight', pad_inches=0.1,
# frameon=None, metadata=None)
# Show the graph ..................................
plt.show()
# -
data = ['VVN', 'AGRE2', 'AGRE1', 'RLWINHF_ICE', 'TENTRX', 'LW_feedbacks']
df_short = df[data]
data_short = ['VVN', 'AGRE2', 'AGRE1', 'RLWINHF_ICE', 'TENTRX']
df_short
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Pair plot of parameters with the highest Sobol index
sm = plt.cm.ScalarMappable(cmap='coolwarm', norm=norm)
ax = sns.pairplot(df_short,hue='LW_feedbacks',markers="o",
palette=('coolwarm'),vars=data_short, diag_kind='hist', diag_kws = {'alpha': 1.0, 'edgecolor' : None},
plot_kws = {'alpha': 1.0, 'edgecolor' : None})
#ax._legend.remove()
#ax.fig.legend(labels=range(0,10,1), title = 'Net Feedbacks')
ax.fig.subplots_adjust(top=0.92, bottom=0.08)
# Title
plt.suptitle('5 dominant parameters for LW feedbacks',
size = 28)
# Enregistrer les figures ...............................................................
#g.savefig("/data/home/globc/peatier/figures/Pairplot_HighSoboIndices.png", dpi=None,
# orientation='portrait', bbox_inches='tight', pad_inches=0.1,
# frameon=None, metadata=None)
# Show the graph ..................................
plt.show()
# -
# # Choix final des 5 paramรจtres gardรฉs pour l'ENSEMBLE 3
# +
param_ENSEMBLE3 = ['ALMAVE', 'VVX', 'RSWINHF_ICE', 'FNEBC', 'RQLCR']
# Save the parameter names in a file for the LHS_generate
np.save('/data/home/globc/peatier/CNRMppe/PPE/files/npy/ENSEMBLE3_param_names.npy', param_ENSEMBLE3)
# -
| 7,555 |
/src/tutorials/Tutorial with stdout.ipynb | ededc908066b0536627d8de3999ec115a8785f28 | [] | no_license | Sylhare/Project-P | https://github.com/Sylhare/Project-P | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,125 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Working with stdout
import sys # Import system functionality
sys.stdout.write("A test")
print("?")
sys.stdout = sys.__stdout__ # Redirect stdout to the default system one
print("Something...")
sys.stdout.write("A test")
# It will be printed on the console, the default `stdout`
file = open('output.txt', 'w')
sys.stdout = file
print("I am in the file")
file.close()
| 644 |
/Real_Time_Dog_Breed_Classification_Kafka_Service.ipynb | 20c5c9519127cbd27ef087f533fe3fdb283c1e72 | [] | no_license | KroneckerDelta/realtime-image-classification-kafka-service | https://github.com/KroneckerDelta/realtime-image-classification-kafka-service | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,962 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers import Input, Dense
from keras.layers.core import Dropout, Activation
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import img_to_array
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.applications import inception_v3
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import pickle
import os.path
from keras.applications.inception_v3 import InceptionV3, preprocess_input
import base64
from PIL import Image
from kafka import KafkaConsumer, KafkaProducer
from io import BytesIO
import json
try:
assert os.path.isfile('dogbreed_model.hdf5') and \
os.path.isfile('dogbreed_labels.pickle')
except:
print("Run the Train_Dog_Breed_Model Script first to train the Dog Breed Classification Model")
raise
inception_model = InceptionV3(weights='imagenet', include_top=False)
# +
net_input = Input(shape=(8, 8, 2048))
net = GlobalAveragePooling2D()(net_input)
net = Dense(512, use_bias=False, kernel_initializer='uniform')(net)
net = BatchNormalization()(net)
net = Activation("relu")(net)
net = Dropout(0.5)(net)
net = Dense(256, use_bias=False, kernel_initializer='uniform')(net)
net = BatchNormalization()(net)
net = Activation("relu")(net)
net = Dropout(0.5)(net)
net = Dense(133, kernel_initializer='uniform', activation="softmax")(net)
dog_breed_model = Model(inputs=[net_input], outputs=[net])
dog_breed_model.summary()
dog_breed_model.load_weights('dogbreed_model.hdf5')
# +
with open("dogbreed_labels.pickle", "rb") as f:
dogbreed_labels = np.array(pickle.load(f))
def format_percentage(raw_probability):
return "{0:.2f}%".format(raw_probability * 100)
class LabelRecord(object):
def __init__(self, predictions):
probabilities = np.array(predictions[0])
top_five_breed_index = np.argsort(probabilities)[::-1][:5]
dog_breed_names = dogbreed_labels[top_five_breed_index]
self.label1 = dog_breed_names[0].upper()
self.probability1 = format_percentage(probabilities[top_five_breed_index[0]])
self.label2 = dog_breed_names[1].upper()
self.probability2 = format_percentage(probabilities[top_five_breed_index[1]])
self.label3 = dog_breed_names[2].upper()
self.probability3 = format_percentage(probabilities[top_five_breed_index[2]])
self.label4 = dog_breed_names[3].upper()
self.probability4 = format_percentage(probabilities[top_five_breed_index[3]])
self.label5 = dog_breed_names[4].upper()
self.probability5 = format_percentage(probabilities[top_five_breed_index[4]])
def toJSON(self):
return json.dumps(self, default=lambda obj: obj.__dict__, sort_keys=True, indent=4)
# -
# Kafka Service
consumer = KafkaConsumer('classificationimage', group_id='group1')
producer = KafkaProducer(bootstrap_servers='localhost:9092')
for message in consumer:
# transform image
image_data = base64.b64decode(message.value.decode())
pil_image = Image.open(BytesIO(image_data))
image_array = img_to_array(pil_image)
image_batch = np.expand_dims(image_array, axis=0)
processed_image = preprocess_input(image_batch.copy())
# make predictions
inception_v3_predictions = inception_model.predict(processed_image)
predictions = dog_breed_model.predict(inception_v3_predictions)
# transform predictions to json
label = LabelRecord(predictions)
label_json = label.toJSON()
# send encoded label
producer.send('classificationlabel', label_json.encode())
| 3,984 |
/miyamoto/FK_IK_furukawa-Copy1.ipynb | 072cabfb18d65d8c395047d141041b76fcbd4120 | [] | no_license | maeda-lab/Scaledown | https://github.com/maeda-lab/Scaledown | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,932,043 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ๅทฎๅ้ข็ฏใใใใฅใฌใผใฟใฎ้้ๅๅญฆ
#
# Created by Masahiro Furukawa, Aug 18, 2020
#
# ![image.png](attachment:image.png)
# +
#ๅ่URL -> https://qiita.com/tibigame/items/61cecf86fc978628bfee
#ๅ่ๅณๆธ -> ใใผใซใฎใญใใใใปใใใใฅใฌใผใฟ
import numpy as np
import sympy as sym
sym.init_printing()
Pi = sym.S.Pi # ๅๅจ็
#sympyใฎๅๅจ็ใฎๆนใไฝฟใใใจใใใใใ๏ผใใฃใกใฎๆนใ้ๅญๅ่ชคๅทฎใๅคงใใใชใ๏ผnumpyใๅๆงใซๅคงใใ๏ผ
import math
pi = math.pi
# ่งๅบฆๅคๆฐ
(J_1,J_2,J_3,J_4,J_5,J_6) = sym.symbols('J_1,J_2,J_3,J_4,J_5,J_6')
# ใชใณใฏใใฉใกใผใฟ
(a_1,a_2,a_3,d_4) = sym.symbols('a_1,a_2,a_3,d_4')
# ใชใณใฏใใฉใกใผใฟ
(j,a,d,alpha) = sym.symbols('j,a,d,alpha')
# T6
(n_x, n_y, n_z, o_x, o_y, o_z, a_x, a_y, a_z, p_x, p_y, p_z) = sym.symbols('n_x, n_y, n_z, o_x, o_y, o_z, a_x, a_y, a_z, p_x, p_y, p_z')
# +
#sin.cosใฎ็ฐกๆ่จ่ฟฐ็จ
def S(a):
return sym.sin(a)
def C(a):
return sym.cos(a)
# +
#ๅ่ปขใปไธฆ้ฒ่กๅ
def rotx(a):
return sym.Matrix([[1, 0, 0, 0], [0, C(a), -S(a), 0], [0, S(a), C(a), 0], [0, 0, 0, 1]])
def roty(a):
return sym.Matrix([[C(a), 0, S(a), 0], [0, 1, 0, 0], [-S(a), 0, C(a), 0], [0, 0, 0, 1]])
def rotz(a):
return sym.Matrix([[C(a), -S(a), 0, 0], [S(a), C(a), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
def trans(x, y, z):
return sym.Matrix([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]])
# DH matrix
def DH(j, alpha, a, d):
return rotz(j)*trans(a,0,d)*rotx(alpha)
# inverse DH matrix
def DHi(j, alpha, a, d):
return rotx(-alpha)*trans(-a,0,-d)*rotz(-j)
# -
# target้
ใๅผeqใใใใใๅบใใใใฎ้ขๆฐ
def obs(eq, target):
sol = sym.solve(eq, target)
for i in range(len(sol)):
display( sym.Eq(target, sol[i]))
print( str(len(sol)) +' equation(s) in total')
return sol
# ### DHๆณใซๅบใฅใๅบงๆจ็ณป้ใฎ้ขไฟ่กจ็พ
#
# |ๅบงๆจ็ณป i|Z_i-1่ปธๅใใซ่งๅบฆฮธ_i|X_i่ปธๅจใใซใญใใ่งฮฑ_iใ ใๅ่ปข|ๅ่ปขๅพใฎX_i-1 (=X_i)ใซๆฒฟใฃใฆ้ทใa_iใ ใไธฆ้ฒ|Z_i-1ใซๆฒฟใฃใฆ่ท้ขd_iใ ใไธฆ้ฒ|
# |-|-|-|-|-|
# |1|$J_1$|$\pi/2$|$a_1$|0|
# |2|$J_2+\pi/2$|0|$a_2$|0|
# |3|$J_3-J_2$|$\pi/2$|$a_3$|0|
# |4|$J_4$|$-\pi/2$|0|$d_4$|
# |5|$J_5$|$\pi/2$|0|0|
# |6|$J_6$|0|0|0|$
# ### ๅคๆ่กๅ A
# +
easy=False
if(easy):
A1=sym.trigsimp( DH (J_1, Pi/2, 0, 0))
A3=sym.trigsimp( DH (J_3 - J_2, Pi/2, 0, 0))
A1i=sym.trigsimp( DHi (J_1, Pi/2, 0, 0))
A3i=sym.trigsimp( DHi (J_3 - J_2, Pi/2, 0, 0))
else:
A1=sym.trigsimp( DH (J_1, Pi/2, a_1, 0))
A3=sym.trigsimp( DH (J_3 - J_2, Pi/2, a_3, 0))
# inverse matrix
A1i=sym.trigsimp( DHi (J_1, Pi/2, a_1, 0))
A3i=sym.trigsimp( DHi (J_3 - J_2, Pi/2, a_3, 0))
A2=sym.trigsimp( DH (J_2+ Pi/2, 0, a_2, 0))
A4=sym.trigsimp( DH (J_4, -Pi/2, 0, d_4))
A5=sym.trigsimp( DH (J_5, Pi/2, 0, 0))
A6=sym.trigsimp( DH (J_6, 0, 0, 0))
# inverse matrix
A2i=sym.trigsimp( DHi (J_2+ Pi/2, 0, a_2, 0))
A4i=sym.trigsimp( DHi (J_4, -Pi/2, 0, d_4))
A5i=sym.trigsimp( DHi (J_5, Pi/2, 0, 0))
A6i=sym.trigsimp( DHi (J_6, 0, 0, 0))
# -
A3
# ้่กๅใใใใใจๅไฝ่กๅใซใชใใใจใฎ็ขบ่ช
ret = A1i*A1
sym.trigsimp(ret)
# # ้้ๅๅญฆ
# ![image.png](attachment:image.png)
# ![image.png](attachment:image.png)
T6=sym.Matrix([[n_x, o_x, a_x, p_x], [n_y, o_y, a_y, p_y], [n_z, o_z, a_z, p_z], [0, 0, 0, 1]])
T6
# forward kinematics
A56 = sym.trigsimp( A5*A6 )
A456 = sym.trigsimp( A4*A5*A6 )
A3456 = sym.trigsimp( A3*A4*A5*A6 )
A23456 = sym.trigsimp( A2*A3*A4*A5*A6 )
T = sym.trigsimp( A1*A2*A3*A4*A5*A6 )
T
# ### ้ ้ๅๅญฆ่จ็ฎ็จCใฝใผในใณใผใใๅพใ
# +
# Masahiro Furukawa
# Aug, 17, 2020
#
# refernce : https://qiita.com/JmpM/items/4bea4997aaf406cca3b4
# Cใฝใผในใๅพใ
for ii in range(4):
for jj in range(4):
idx = jj*4+ii
code = sym.ccode(T[idx],assign_to=('Trans['+str(jj)+']['+str(ii)+']'), standard='C89')
print(code)
print()
# -
# # inverse kinematics
# $$
# % reference : https://qiita.com/namoshika/items/63db972bfd1030f8264a
# % ็ฉบ็ฝใฏ่กจ็คบใซๅฝฑ้ฟใใชใใใณใกใณใใฏ"%"ใงๅงใใ
# % ไธไปใๆๅญใฏ"_a"ใไธไปใๆๅญใฏ"^a"
# % ๆน่กใฏ"\\"ใไปใใ
# {\boldsymbol{A}_{1}}^{-1} \boldsymbol{T}_6 =
# \boldsymbol{A}_2
# \boldsymbol{A}_3
# \boldsymbol{A}_4
# \boldsymbol{A}_5
# \boldsymbol{A}_6 ใใใ(3.75)\\
# {\boldsymbol{A}_{1}}^{-1} \boldsymbol{T}_6 =
# ^{1}\boldsymbol{T}_6 ใใใ(3.76)
# % ่คๆฐๆๅญใ1่ฆ็ด ใจใใ้ใฏ{...}ใงๅฒใ
# % ็ฉบ็ฝใฏ"\quad"
# $$
T16 = sym.trigsimp( A1i*T6 ) # eq(3.70)
T26 = sym.trigsimp( A2i*A1i*T6 ) # eq(3.71)
T36 = sym.trigsimp( A3i*A2i*A1i*T6 ) # eq(3.72)
T46 = sym.trigsimp( A4i*A3i*A2i*A1i*T6 ) # eq(3.73)
T56 = sym.trigsimp( A5i*A4i*A3i*A2i*A1i*T6 ) # eq(3.74)
# Left hand of (3.76)
A1iT6 = T16
A1iT6
# Right hand of (3.76)
A23456
# $$
# \displaystyle A_{1i}T_{6} = A_{23456} \\
# $$
# ใใไปฅไธใฎ็ญๅผ็พคใๅพใ
for idx in range(12):
display(sym.simplify ( sym.expand( sym.Eq( A1iT6[idx], A23456[idx])) ) )
# # J1
# ใใฎใใกไฝ็ฝฎใซ้ขใใ้
ใฎใฟใๆฝๅบใใใจ
for idx in [3,7,11]:
display(sym.simplify ( sym.expand( sym.Eq( A1iT6[idx], A23456[idx])) ) )
# ใฎ๏ผๅ
้ฃ็ซๆน็จๅผใๅพใ๏ผใไธ่จ็ฌฌ๏ผๅผใฏ๏ผ
sym.Eq(A1iT6[11] , A23456[11])
# ## p_x = 0 ใชใ็ดใกใซ
# +
sol = sym.solve(p_y * C(J_1), J_1)
for i in range(len(sol)):
display( sym.simplify(sym.Eq(J_1, sol[i])))
print( str(len(sol)) +' equation(s) in total')
# +
# ใใใๅคๅ(-PI < J1 < PI)ใใ , p_x = 0 ใชใ็ดใกใซ
sol = sym.solve(p_y * C(J_1), J_1)
for i in range(len(sol)):
if -Pi < sol[0] and sol[i] < Pi:
display( sym.simplify(sym.Eq(J_1, sol[i])), 'where p_x = 0')
# -
# ## p_y = 0 ใชใ็ดใกใซ
# +
sol = sym.solve(p_x * S(J_1), J_1)
for i in range(len(sol)):
display( sym.simplify(sym.Eq(J_1, sol[i])))
print( str(len(sol)) +' equation(s) in total')
# +
# ใงใใใ๏ผใๅๆงใซๅคๅ(-PI < J1 < PI)ใใ , p_y = 0 ใชใ็ดใกใซ
sol = sym.solve(p_x * S(J_1), J_1)
for i in range(len(sol)):
if -Pi < sol[0] and sol[i] < Pi:
display( sym.simplify(sym.Eq(J_1, sol[i])) , 'where p_y = 0')
# -
# ## p_x != 0 ใงใใใใจใ็ขบๅฎใใใชใใฐ๏ผ
# ไธก่พบใp_xใงๅฒใใใจใใงใ๏ผ
# p_x != 0 ใชใใฐ ๅ่ฟฐใฎ้ใC(J1) != 0ใงใใใใจใๅฟ
็ถ็ใซๆฑใพใใใC(J1)ใงไธก่พบใๅฒใฃใฆใใใใ๏ผ
eq =sym.simplify((A1iT6[11] - A23456[11])/p_x/C(J_1))
display(eq)
# ใใฎๆน็จๅผใJ1ใซใคใใฆ่งฃใใจ๏ผ
sol = sym.solve(eq, J_1)
for i in range(len(sol)):
display( sym.simplify(sym.Eq(J_1, sol[i])) , 'where p_x != 0')
sol_J1 = sol
# ใJ1ใซ้ขใใ่งฃๆ่งฃใงใใ๏ผ
# # J2
# ๆฌกใซไธ่จ็ฌฌ๏ผๅผใซ็็ฎใใใจ
sym.Eq(A1iT6[7] , A23456[7])
# ใงใใใใ,ไธ่จ็ฌฌ๏ผๅผใซ S(J2) ใไปฃๅ
ฅใในใ C(J2)ใๆฑใใใจ
eq =A1iT6[7] - A23456[7]
target = C(J_2)
CJ2 = obs(eq, target)[0]
# ๅพใฃใฆ๏ผS(J2)ใๆฑใใใใใซใฏ๏ผC(J2)**2 + S(J2)**2 = 1 ใงใใใใๆจ็ใจใชใๅผใๅๆฒใใใจ๏ผ
eq = sym.Eq(A1iT6[3] , A23456[3])
display(eq)
# ใๅพใ๏ผ J1ใๆข็ฅใฎใใไธๅผใฎๅทฆ่พบใLใจใใ
sub = A1iT6[3]
L = sym.symbols('L')
eq = eq.subs(sub, L)
display(sym.trigsimp(eq))
# S(J2)ใซใคใใฆๆด็ใใใจ๏ผ
target = S(J_2)
SJ2 = obs(eq, target)[0]
# ใใใงS(J2)**2 + C(J2)**2 = 1 ใใ
eq = sym.trigsimp(sym.Eq((SJ2**2+CJ2**2),1))
display(eq)
display(sym.collect(sym.trigsimp(eq.expand()), [C(J_3),S(J_3)]))
# ๆน็จๅผใJ3ใซใคใใฆ่งฃใใจ๏ผ
target = J_3
sol_J3 = obs(eq, target)
sol_L = L
# +
# Masahiro Furukawa
# Aug, 21, 2020
#
# refernce : https://qiita.com/JmpM/items/4bea4997aaf406cca3b4
sol_L = sub
# J3 ใซๅฏพใใCใฝใผในใๅพใ
code = sym.ccode(sub, assign_to=('L'), standard='C89')
print("// constant L \n" + code + "\n")
for ii in range(len(sol_J3)):
code = sym.ccode(sol_J3[ii], assign_to=('J3'), standard='C89')
print("// Solusion #"+ str(ii) +"\n" + code + "\n")
# -
# # J2
# ๆฌกใซ๏ผJ2ใซใคใใฆ่งฃใ๏ผC(J2)ใซใคใใฆ๏ผCJ2ใจใใฆๅพใใใฆใใใใใใใใฎๆน็จๅผใใไปฅไธใๅพใ๏ผ
eq = sym.Eq(C(J_2), CJ2)
display(eq)
sol_J2 = obs(eq, J_2)
# J2 ใซๅฏพใใCใฝใผในใๅพใ
for ii in range(len(sol_J3)):
code = sym.ccode(sol_J2[ii], assign_to=('J2'), standard='C89')
print("// Solusion #"+ str(ii) +"\n" + code + "\n")
# ไปฅไธใฎๅฐๅบใใ J1, J2, J3 ใๆฑใพใฃใ๏ผ
# # J5
A3iA2iA1iT6 = A3i*A2i*A1i*T6
for idx in range(12):
display(sym.simplify ( sym.expand( sym.Eq( A3iA2iA1iT6[idx], A456[idx])) ) )
# ไธ่จๅผใฎ็ฌฌ๏ผ๏ผๅผใฏ๏ผ
idx=10
eq = sym.simplify ( sym.Eq( A3iA2iA1iT6[idx], A456[idx]))
display(eq)
sol_J5 = obs(eq, J_5)
# J5 ใซๅฏพใใCใฝใผในใๅพใ
for ii in range(len(sol_J5)):
code = sym.ccode(sol_J5[ii], assign_to=('J5'), standard='C89')
print("// Solusion #"+ str(ii) +"\n" + code + "\n")
# # J4
# ไธ่จๅผใฎ็ฌฌ7ๅผใฏ๏ผ
idx=6
eq = sym.simplify ( sym.Eq( A3iA2iA1iT6[idx], A456[idx]))
display(eq)
sol_J4 = obs(eq, J_4)
# J4 ใซๅฏพใใCใฝใผในใๅพใ
for ii in range(len(sol_J4)):
code = sym.ccode(sol_J4[ii], assign_to=('J4'), standard='C89')
print("// Solusion #"+ str(ii) +"\n" + code + "\n")
# # J6
A5iA4iA3iA2iA1iT6 = A5i*A4i*A3i*A2i*A1i*T6
for idx in range(12):
display(sym.simplify ( sym.expand( sym.Eq( A5iA4iA3iA2iA1iT6[idx], A6[idx])) ) )
# ไธ่จๅผใฎ็ฌฌ6ๅผใฏ๏ผ
idx=5
eq = sym.simplify ( sym.Eq( A5iA4iA3iA2iA1iT6 [idx], A6[idx]))
display(eq)
sol_J6 = obs(eq, J_6)
# J6 ใซๅฏพใใCใฝใผในใๅพใ
for ii in range(len(sol_J6)):
code = sym.ccode(sol_J6[ii], assign_to=('J6'), standard='C89')
print("// Solusion #"+ str(ii) +"\n" + code + "\n")
# ไปฅไธใใ J4, J5, J6ใๅพใ๏ผ
# # ๆจกๆฌๅฎ้จ
# +
def substitute(f, j1,j2,j3,j4,j5,j6):
# Link Length in [mm]
a_1_ = 30.0
a_2_ = 120.0
a_3_ = 20.0
d_4_ = 129.0
# substitusion real value to variables
f =f.subs([
(J_1, j1), (J_2, j2), (J_3, j3), (J_4, j4), (J_5, j5), (J_6, j6),
(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_) ])
f = sym.N(f)
x.append(f[3])
y.append(f[7])
z.append(f[11])
return f
def cal_FK(j1, j2, j3, j4, j5, j6):
substitute(A1, j1, j2, j3, j4, j5, j6)
substitute(A1*A2, j1, j2, j3, j4, j5, j6)
substitute(A1*A2*A3, j1, j2, j3, j4, j5, j6)
substitute(A1*A2*A3*A4, j1, j2, j3, j4, j5, j6)
substitute(A1*A2*A3*A4*A5, j1, j2, j3, j4, j5, j6)
# display(substitute(A1*A2*A3*A4*A5*A6, j1, j2, j3, j4, j5, j6) )
# test for FK
x=[]
y=[]
z=[]
# cal_FK(Pi/5,-Pi/4,-Pi/4,Pi/5,Pi/5,Pi/3)
cal_FK(0,0,0,0,0,0)
print(x)
print(y)
print(z)
# # test for IK
# N=[0, 0, 1]
# O=[0,-1, 0]
# A=[1, 0, 0]
# P=[159,0,140]
# # IK
# (ij1, ij2, ij3, ij4, ij5, ij6) = cal_IK(N,O,A,P)
# -
# # ้ ้ๅๅญฆใขใใกใผใทใงใณ
# +
# Masahiro Furukawa
# Aug 23, 2020
# # %matplotlib inline ่กจ็คบใ ใจใขใใกใผใทใงใณใใชใ
# %matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(9,9))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224) #, projection='3d'
ax1.set_title("Top View (X-Y)")
ax2.set_title("Front View (X-Z)")
ax3.set_title("Right View (Y-Z)")
def draw_(ax, x_, y_, a_=0.2):
for i, X in enumerate(x_[:-1]):
ax.plot([x_[i],x_[i+1]],[y_[i],y_[i+1]], "o-", color=plt.cm.tab10.colors[i], linewidth=2, markersize=3,alpha=a_)
for i, X in enumerate(x_):
s = ' ' + str(i)
# ax.text(x_[i],y_[i], s, color="k")
def draw(x,y,z,a_):
# ๅณๆ็ณปไธ้ขๅณ
draw_(ax1,x,y,a_)
draw_(ax2,x,z,a_)
draw_(ax3,y,z,a_)
def set_lim_(ax,lx,ly):
ax.set_xlabel(lx)
ax.set_ylabel(ly)
ax.set_aspect('equal')
ax.grid(True)
def set_lim():
set_lim_(ax1,'X','Y')
ax1.set_xlim([-400,400])
ax1.set_ylim([-400,400])
set_lim_(ax2,'X','Z')
ax2.set_xlim([-400,400])
ax2.set_ylim([ -10,400])
set_lim_(ax3,'Y','Z')
ax3.set_xlim([-400,400])
ax3.set_ylim([ -10,400])
set_lim()
for i in range(10):
# origin point
x=[0]
y=[0]
z=[0]
cal_FK(Pi/15*0, -Pi/20*i, -Pi/20, Pi/5,Pi/5,Pi/3)
draw(x,y,z,float(i)/10)
plt.pause(.5)
# +
# Masahiro Furukawa
# Aug 23, 2020
# # %matplotlib inline # ่กจ็คบใ ใจใขใใกใผใทใงใณใใชใ
# %matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(9,9))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224) #, projection='3d'
ax1.set_title("Top View (X-Y)")
ax2.set_title("Front View (X-Z)")
ax3.set_title("Right View (Y-Z)")
def cal_IK(n,o,a,p):
sj1 = sym.N( sol_J1[0].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_)]))
ll = sym.N( sol_L.subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1)]))
sj3 = sym.N( sol_J3[1].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (L,ll),(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1), (L,ll)]))
sj2 = sym.N( sol_J2[0].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (L,ll),(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1), (J_3, sj3)]))
sj4 = sym.N( sol_J4[0].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (L,ll),(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1), (J_2, sj2), (J_3, sj3)]))
sj5 = sym.N( sol_J5[0].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (L,ll),(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1), (J_2, sj2), (J_3, sj3), (J_4, sj4)]))
sj6 = sym.N( sol_J5[0].subs([ (n_x,n[0]), (n_y,n[1]), (n_z,n[2]), (o_x,o[0]), (o_y,o[1]), (o_z,o[2]), (a_x,a[0]), (a_y,a[1]), (a_z,a[2]), (p_x,p[0]), (p_y,p[1]), (p_z,p[2]) , (L,ll),(a_1, a_1_ ), (a_2 , a_2_), (a_3, a_3_), (d_4, d_4_), (J_1, sj1), (J_2, sj2), (J_3, sj3), (J_4, sj4), (J_5, sj5)]))
return (sj1, sj2, sj3, sj4, sj5, sj6)
set_lim()
for i in range(10):
# origin point
x=[0]
y=[0]
z=[0]
N=[0, 0, 1]
O=[0,-1, 0]
A=[1, 0, 0]
P=[200 ,0,110-11*i] # p_x = 159, P_z = 140
# IK
(ij1, ij2, ij3, ij4, ij5, ij6) = cal_IK(N,O,A,P)
# FK
cal_FK(ij1, ij2, ij3, ij4, ij5, ij6)
draw(x,y,z,float(i)/10)
# plt.pause(.5)
plt.show()
| 14,398 |
/.ipynb_checkpoints/Lesson10 File IO-checkpoint.ipynb | 6300fa0b312aea569ecf58d205c00beb664a91d6 | [
"Apache-2.0"
] | permissive | fzhcary/TCEF_Python_2021 | https://github.com/fzhcary/TCEF_Python_2021 | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 339,316 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gabselbach/TCC-implementacoes/blob/master/AcentuaF%C3%A1cil.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="h1S30k1xK4FS"
# # import
# + id="9sIveSgeK0Xx"
# !pip3 install Dicio
import os
import re
import pandas as pd
import time
import requests
import spacy
import nltk
from spacy.tokenizer import Tokenizer
from bs4 import BeautifulSoup
from unicodedata import normalize
from re import match as re_match
from re import compile as re_compile
import spacy
import spacy.cli
import ast
from dicio import Dicio
dicio = Dicio()
nltk.download('stopwords')
spacy.cli.download('pt_core_news_sm')
nlp = spacy.load("pt_core_news_sm")
from openpyxl.workbook import Workbook
nltk.download('stopwords')
# !pip install python-Levenshtein
from Levenshtein import distance
# + [markdown] id="7s1OGAnDLCjI"
# # busca no DICIO
# + id="OOmuh1ZRLE--"
def isdigit(s):
comp = re_compile("^\d+?\.\d+?$")
if comp.match(s) is None:
return s.isdigit()
return True
def faz_busca2(token):
acento = re.compile('ร |[รก-รบ]|รช|รด|รฃ|รต|รญ')
dataNova = []
for k in token:
t = 0
busca = k.lower()
if(len(busca) > 2 and (not isdigit(busca))):
busca = re.sub("[?|;|,*|.*]", "", busca)
page = requests.get('https://www.dicio.com.br/'+busca+'/')
soup = BeautifulSoup(page.text, 'html.parser')
pDicio = soup.find('h1').text
if(not acento.search(pDicio) or pDicio==busca or pDicio.lower()=="nรฃo encontrada"):
temp = {
'PALAVRAANT': k.lower(),
'PALAVRADICIO': pDicio.lower(),
'SILABA':'',
'CLASSE': '',
'FORTE': '',
'MONOSSILABA': 0,
'ACENTO': 0,
'CORREC':0,
'TIPOCORREC':0,
'EXCEรAO':0,
'REGRAVERB':0,
'REGRANaoVERB': 0
}
dataNova.append(temp)
else:
texto = soup.find_all('p', {'class': 'adicional'})
try:
n = re.search(r"silรกbica: .+</b>", str(texto))
if(n == None):
temp = {
'PALAVRAANT': k.lower(),
'PALAVRADICIO': pDicio.lower(),
'SILABA': 'FALTA',
'CLASSE': '',
'FORTE': '',
'MONOSSILABA': 0,
'ACENTO': 1,
'CORREC': 0,
'TIPOCORREC': 'DICIO+SEPARADOR',
'EXCEรAO': 0,
'REGRAVERB': 0,
'REGRANaoVERB': 0
}
dataNova.append(temp)
else:
novo = str(re.sub('<[^>]+?>', '', n.group(0))).split(':')
silaba = novo[1]
silsepara = silaba.split('-')
if(len(silsepara) == 1):
mono = 1
else:
mono = 0
oxi = ''
paro = ''
propa = ''
classe = ''
forte = ''
aux = 0
for j in range(len(silsepara)-1, -1, -1):
aux += 1
if(acento.search(silsepara[j]) and aux == 1):
classe = 'oxรญtona'
forte = silsepara[j]
elif(acento.search(silsepara[j]) and aux == 2):
classe = 'paroxรญtona'
forte = silsepara[j]
elif(acento.search(silsepara[j]) and aux == 3):
classe = 'proparoxรญtona'
forte = silsepara[j]
temp = {
'PALAVRAANT': k.lower(),
'PALAVRADICIO': pDicio.lower(),
'SILABA': silaba,
'CLASSE': classe,
'FORTE': forte,
'MONOSSILABA': mono,
'ACENTO': 1,
'CORREC': 1,
'TIPOCORREC': 'dicionรกrio',
'EXCEรAO': 0,
'REGRAVERB': 0,
'REGRANaoVERB': 0
}
dataNova.append(temp)
except:
print("")
else:
temp = {
'PALAVRAANT': k.lower(),
'PALAVRADICIO': 'NAN',
'SILABA': '',
'CLASSE': '',
'FORTE': '',
'MONOSSILABA': '',
'ACENTO': 0,
'CORREC': 0,
'TIPOCORREC': 'NAN',
'EXCEรAO': 0,
'REGRAVERB': 0,
'REGRANaoVERB': 0
}
dataNova.append(temp)
return dataNova
# + [markdown] id="rUS4rdtcLF9z"
# # busca no VOP
# + id="P5zRJc3nLKeo"
def faz_busca(token):
dataNova = []
valorTime = 0
count = 0
acento = re.compile('ร |[รก-รบ]|รช|รด|รฃ|รต|รญ')
for k in token:
t = 0
busca = normalize('NFKD', str(k).lower()).encode(
'ASCII', 'ignore').decode('ASCII')
if(len(busca) > 2 and (not isdigit(busca))):
page = requests.get(
'http://www.portaldalinguaportuguesa.org/index.php?action=syllables&act=list&search='+busca)
soup = BeautifulSoup(page.text, 'html.parser')
palavras = soup.find_all('td', {'title': 'Palavra'})
classe = ''
if(not palavras or len(k) == 1):
temp = {
'PALAVRAANT': k.lower(),
'PALAVRAVOP': 'NAN',
'SILABA': 'NAN',
'CLASSE': 'NAN',
'FORTE': 'NAN',
'MONOSSILABA': 0,
'ACENTO': 1,
'CORREC':0,
'TIPOCORREC':''
}
dataNova.append(temp)
else:
aux = 0
for i in palavras:
link = re.sub('<[^>]+?>', '', str(i.find('a')))
pala = i.text.replace(" ", '').split('\n')[0]
palavraNormalizada = normalize('NFKD', link).encode(
'ASCII', 'ignore').decode('ASCII').lower()
if(palavraNormalizada == busca):
if(not acento.search(str(link)) or k.lower()==str(link).lower()):
t = 1
temp = {
'PALAVRAANT': k.lower(),
'PALAVRAVOP': 'NAN',
'SILABA': 'NAN',
'CLASSE': 'NAN',
'FORTE': 'NAN',
'MONOSSILABA': 0,
'ACENTO': 0,
'CORREC':0,
'TIPOCORREC':''
}
dataNova.append(temp)
break
forte = re.sub('<[^>]+?>', '', str(i.find('u')))
silaba = pala.split(')')[1]
silsepara = silaba.split('ยท')
if(len(silsepara) == 1):
mono = 1
else:
mono = 0
oxi = ''
paro = ''
propa = ''
for j in range(len(silsepara)-1, -1, -1):
if(aux == 0):
oxi = silsepara[j] + oxi
elif(aux == 1):
paro = silsepara[j] + paro
elif(aux == 2):
propa = silsepara[j] + propa
aux += 1
if(oxi == forte):
classe = 'oxรญtona'
elif(paro == forte):
classe = 'paroxรญtona'
elif(propa == forte):
classe = 'proparoxรญtona'
temp = {
'PALAVRAANT': k.lower(),
'PALAVRAVOP': link.lower(),
'SILABA': silaba,
'CLASSE': classe,
'FORTE': normalize('NFKD', str(forte)).encode('ASCII', 'ignore').decode('ASCII'),
'MONOSSILABA': mono,
'ACENTO': 1,
'CORREC':1,
'TIPOCORREC':'dicionรกrio'
}
dataNova.append(temp)
t = 1
break
if(t == 1):
break
if(t != 1):
temp = {
'PALAVRAANT': k.lower(),
'PALAVRAVOP': 'NAN',
'SILABA': 'NAN',
'CLASSE': 'NAN',
'FORTE': 'NAN',
'MONOSSILABA': 0,
'ACENTO': 1,
'CORREC':0,
'TIPOCORREC':''
}
dataNova.append(temp)
else:
if(isdigit(busca)):
x = 1
else:
temp = {
'PALAVRAANT': k.lower(),
'PALAVRAVOP': 'NAN',
'SILABA': 'NAN',
'CLASSE': 'NAN',
'FORTE': 'NAN',
'MONOSSILABA': 0,
'ACENTO': 0,
'CORREC':0,
'TIPOCORREC':''
}
dataNova.append(temp)
return dataNova
# + [markdown] id="a4lex5EVLRDV"
# # crawler para o seaparador e funรงรฃo para pegar a sรญlaba forte
# + id="4CfMS14vLe1-"
def separador(dicionario):
for (l,row) in dicionario.iterrows():
if(row['SILABA']=='FALTA' and (row['PALAVRADICIO']!='NAN' and row['PALAVRADICIO']!='nรฃo encontrada') ):
page = requests.get("https://www.separaremsilabas.com/index.php?lang=index.php&p="+row['PALAVRADICIO']+"&button="+"Separa%C3%A7%C3%A3o+das+s%C3%ADlabas")
soup = BeautifulSoup(page.text, 'html.parser')
texto = str(soup.find('font', {'color': '#0018BF'}))
texto = re.sub('<[^>]+?>', '',texto)
dicionario.at[l,'SILABA']=texto
dicionario.at[l,'ACENTO']=1
dicionario.at[l,'CORREC']=1
dicionario.at[l,'TIPOCORREC']='dicionario+separador'
temp = pegaForte(texto)
dicionario.at[l,'CLASSE']=temp['classe']
dicionario.at[l,'FORTE']=temp['forte']
return dicionario
# + id="yFIaAZ1ZLgFr"
def pegaForte(silaba):
acento = re.compile('ร |[รก-รบ]|รช|รด|รฃ|รต|รญ')
silsepara = silaba.split('-')
oxi = ''
paro = ''
propa = ''
forte=''
classe=''
aux=0
for j in range(len(silsepara)-1, -1, -1):
aux += 1
if(acento.search(silsepara[j]) and aux==1):
classe = 'oxรญtona'
forte = silsepara[j]
elif(acento.search(silsepara[j]) and aux==2):
classe = 'paroxรญtona'
forte = silsepara[j]
elif(acento.search(silsepara[j]) and aux==3):
classe = 'proparoxรญtona'
forte = silsepara[j]
t = {
'forte':forte,
'classe':classe
}
return t
| 12,248 |
/lab16thorello/analisi.ipynb | 52f97c4889ef840720b369430987919d66798efd | [
"MIT"
] | permissive | grigolet/laboratorio-plasmi-I | https://github.com/grigolet/laboratorio-plasmi-I | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 364,903 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
from utilities import *
import pprint
pp = pprint.PrettyPrinter(indent=4)
# %matplotlib inline
# ## Caratteristica filamento
#
# Come si puรฒ osservare dal grafico sotto il filamento non presenta una caratteristica lineare. Non รจ possibile stimare una resistenza vera e propria, perchรจ varia al variare della corrente fornita.
# Cause:
#
# * riscaldamento del filamento?
plot_data('data/R_Filamento_15122016.txt', x_label='I(A)', y_label='V(V)', title='Caratteristica filamento')
# ## Caratteristica del plasma e grafico di isteresi
#
# Mostriamo le varie curve ottenute in laboratorio
# +
annotation_1 = """
$I_{bobina}$ = 10 A
$V_{bobina}$ = 3 V
$I_{filamento}$ = 60 A
$V_{filamento}$ = 15.35 V
P = 3.2e-4 mbar
"""
annotation_2 = """
$I_{bobina}$ = 253 A
$V_{bobina}$ = 61.2 V
$I_{filamento}$ = 60 A
$V_{filamento}$ = 15.10 V
P = 3.1e-4 mbar
"""
annotation_3 = """
$I_{bobina}$ = 550 A
$V_{bobina}$ = 137.7 V
$I_{filamento}$ = 60 A
$V_{filamento}$ = 15.10 V
P = 3.6e-4 mbar
"""
annotation_4 = """
$I_{scarica}$ = 0.94 A
$V_{scarica}$ = 100 V
$I_{filamento}$ = ? A
$V_{filamento}$ = ? V
P = 3.2e-4 mbar
"""
caratteristica_1 = plot_caratteristica_plasma('data/00115122016_discesa.txt', 'data/00115122016_salita.txt',
title='Caratteristica di plasma', notes=annotation_1)
caratteristica_2 = plot_caratteristica_plasma('data/00215122016_discesa.txt', 'data/00215122016_salita.txt',
title='Caratteristica di plasma', notes=annotation_2)
caratteristica_3 = plot_caratteristica_plasma('data/00315122016_discesa.txt', 'data/00315122016_salita.txt',
title='Caratteristica di plasma', notes=annotation_3)
caratteristica_4 = plot_caratteristica_plasma('data/00415122016_discesa.txt', 'data/00415122016_salita.txt',
title='Caratteristica di campo', x_label='$B_{campo}(A)$',
y_label='$I_{plasma} (A)$', notes=annotation_4)
# -
| 2,382 |
/Part2.ipynb | 75e51e33dcc26fd170a90477c636eafeee48239e | [] | no_license | fairfield-university-ba505-fall2018/healthstats-project-parts-1-to-4-Kerry-Clarke | https://github.com/fairfield-university-ba505-fall2018/healthstats-project-parts-1-to-4-Kerry-Clarke | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 12,195 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Health Stats Part 2: Dictionaries
# <!--- Paste in your explanation of Waist-to-Hip ratios from Part 1. --->
# EDIT THIS MARKDOWN CELL
# ## Source Data
# <!--- Paste in your data definitions from Part 1. Then Try to organize them into a table in Markdown. --->
# EDIT THIS MARKDOWN CELL
# ## Data Import
# +
# Goal: Extract the data from the file
# opens the w2h_data.csv for reading
f = open("w2h_data.csv", "r")
# loads the file into a list of strings, one string per line
raw_lines = list(f)
# closes the file
f.close()
# +
# Goal: Scrub and convert the data, loading it into a new list called rows
# Strips out newline '\n' characters and converts to a list
raw_rows = [r.rstrip('\n').split(',') for r in raw_lines] # <--- Whoa. Why does this work?
# Creates a new list, starting with just the column names
rows = list()
rows.append(raw_rows[0]);
# Convert each row_row, starting with the second
columns = ["ID", "Waist", "Hip", "Gender"]
for raw_row in raw_rows[1:]:
# Note: the values in the raw_row list are all strings.
# Create a new list called row that converts each item in raw_row to the right data type
row = [int(raw_row[0]),int(raw_row[1]),int(raw_row[2]),raw_row[3]]
record = dict(zip(columns, row))
# Append the new row to the rows list
rows.append(record)
# from here on out use the rows list instead of raw_rows or raw_lines
rows
# -
# ## Calculations
# +
# Goal: For each row of data calculate and store the w2h_ratio and shape.
# For each row in the rows list, calculate the waist to hips ratio and shape
for row in rows[1:]:
# Calculate the w2h_ratio
w2h_ratio = float(row["Waist"])/float(row["Hip"])
# Based on the ratio and the gender, set the variable shape to either 'apple' or 'pear'
if ((row["Gender"]=='M' and w2h_ratio > 0.9) or (row["Gender"] == 'F' and w2h_ratio > 0.8)) :
shape = "Apple"
else:
shape = "Pear"
# Add the new data to the end of the row
row ["w2h_ratio"] = w2h_ratio
row ["shape"] = shape # note: += is shorthand for the extend method used above
rows
# -
# ## Output
# +
# Goal: pretty print the rows as an HTML table
# Note: this works, but we can do this much better with pandas
html_table = '<table><tr><th>'
html_table += "</th><th>".join(rows[0])
html_table += '</th></tr>'
for row in rows[1:]:
html_table += "<tr><td>"
html_table += "</td><td>".join(str(col) for col in row.values())
html_table += "</td></tr>"
html_table += "</table>"
from IPython.display import HTML, display
display(HTML(html_table))
# -
= string.punctuation.replace('%', '')
dataset["lowercase_cleaned"] = dataset[colname].apply(lambda words: ' '.join(word.lower().translate(str.maketrans(string.punctuation, ' '*len(string.punctuation))) for word in words.split()))
dataset["lowercase_cleaned"] = dataset["lowercase_cleaned"].str.replace('\d+', '')
return dataset
#retrieving pos for the words and lemmatisation
def retreive_pos_wordnet(sentence):
lemmatizer = nltk.stem.WordNetLemmatizer()
sentence = ' '.join(word.lower().translate(str.maketrans(string.punctuation, ' '*len(string.punctuation))) for word in sentence.split())
list_words = sentence.split()
final_list = []
for i in range (len(list_words)):
tag = nltk.pos_tag(list_words)[i][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
final_tag = tag_dict.get(tag, wordnet.NOUN)
lemmatized_word = lemmatizer.lemmatize(list_words[i],final_tag)
final_list.append([list_words[i],final_tag,lemmatized_word])
return final_list
# function to remove stop words and words with length < 3
def remove_stop_words_from_pos(pos_input_list):
return_list = []
stop = stopwords.words('english')
for pos in pos_input_list:
if (pos[2] not in stop and (len(pos[2])>2 or pos[2]=="%%")):
return_list.append(pos)
return return_list
# function to clean dictionary
def lemmatize_sentences(sentence):
lemmatizer = nltk.stem.WordNetLemmatizer()
sentence = ' '.join(word.lower().translate(str.maketrans(string.punctuation, ' '*len(string.punctuation))) for word in sentence.split())
list_words = sentence.split()
lemmatize_words = ''
for i in range (len(list_words)):
tag = nltk.pos_tag(list_words)[i][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
final_tag = tag_dict.get(tag, wordnet.NOUN)
lemmatize_words += " " + lemmatizer.lemmatize(list_words[i],final_tag)
return lemmatize_words.strip()
def getLemmaExamplesFromSenseDict(word_pos, sense):
word_pos = word_pos.strip()
lemmaSenseKey = word_pos+ "_"+sense.get('id')
sense_examples = ""
if (lemmaSenseKey in SenseLemmaDictionary):
sense_examples = SenseLemmaDictionary.get(lemmaSenseKey)
else:
sense_examples = (
lemmatize_sentences(sense.get('gloss').lower())
+ " | "
+ ('.'.join(lemmatize_sentences(sentence.lower()) for sentence in sense.get('examples').split(".")))
)
SenseLemmaDictionary[lemmaSenseKey] = sense_examples
return sense_examples
def getLemmaExamplesFromCorpusSenseDict(word_pos, sense):
word_pos = word_pos.strip()
lemmaSenseKey = word_pos+ "_"+sense.get('id')
sense_examples = ""
if (lemmaSenseKey in SenseLemmaCorpusDictionary):
sense_examples = SenseLemmaCorpusDictionary.get(lemmaSenseKey)
else:
sense_examples = (
lemmatize_sentences(sense.get('gloss').lower())
+ " | "
+ ('.'.join(lemmatize_sentences(sentence.lower()) for sentence in sense.get('examples').split(".")))
)
SenseLemmaCorpusDictionary[lemmaSenseKey] = sense_examples
return sense_examples
# -
# Model 1 : Simple lesk Algorithm
def calculate_sense_model_one(target_word, pos_data):
print(target_word)
target_data = target_word.split(".")
senses = getSenses(target_data[0].strip(), target_data[1].strip())
score_map = {}
pos_sentence = []
for pos_word in pos_data:
pos_sentence.append(pos_word[2])
for sense in senses:
sense_score = 0
sense_examples = getLemmaExamplesFromSenseDict(target_word, sense)
sense_example_words = sense_examples.split()
common = set(sense_example_words).intersection( set(pos_sentence) )
score_map[sense.get('id')] = len(common)
key_max = max(score_map, key=score_map.get)
return key_max
# +
# Model 2 : Orginal lesk Algorithm
def limitizeContextMapModelTwo(context_sense):
dictionary_examples = ""
for context_data in context_sense:
for sense_data in context_data[2]:
#dictionary_examples += lemmatize_sentences(sense_data.get('gloss').lower())+ " | " + lemmatize_sentences(sense_data.get('examples').lower())
dictionary_examples += getLemmaExamplesFromSenseDict(context_data[1], sense_data)
return dictionary_examples
def getContextDictModelTwo(target_data, pos_data, corpus=False):
context_sense = []
target_sense = []
sentence = pos_data
sentence_length = len(sentence)
target_word = target_data.split(".")[0]
target_pos = target_data.split(".")[1]
for k in range(len(sentence)):
if sentence[k][0] == "%%":
target_index = k-1
targetWord = sentence[target_index][0]
break
i = target_index-2
j = target_index+2
k = 0
while((i>=0 or j<len(sentence)) and k<30):
if(i>=0 and len(sentence[i][2].strip())>= 3 and sentence[i][2].strip() != target_word):
context_word = sentence[i][2].strip()
context_pos = sentence[i][1].strip()
if(corpus):
sense = getNewSenses(context_word,context_pos)
else:
sense = getSenses(context_word,context_pos)
if len(sense) >= 1:
context_sense.append([targetWord,context_word+"."+context_pos,sense, target_index-i])
if(j<len(sentence) and len(sentence[j][2].strip())>= 3 and sentence[j][2].strip() != target_word):
context_word = sentence[j][2].strip()
context_pos = sentence[j][1].strip()
if(corpus):
sense = getNewSenses(context_word,context_pos)
else:
sense = getSenses(context_word,context_pos)
if len(sense) >= 1:
context_sense.append([target_word,context_word+"."+context_pos,sense, j-target_index])
i = i-1
j = j+1
k = k+1
return context_sense
def calculateSenseIdModelTwo(target_word_pos, pos_without_stopwords):
print(target_word_pos)
target_word_details = target_word_pos.split(".")
target_senses = getSenses(target_word_details[0].strip(), target_word_details[1].strip())
score_map = {}
context_sentence = limitizeContextMapModelTwo(getContextDictModelTwo(target_word_pos, pos_without_stopwords))
for sense in target_senses:
#sense_examples = lemmatize_sentences(sense.get('gloss').lower())+ " | " + lemmatize_sentences(sense.get('examples').lower())
sense_examples = getLemmaExamplesFromSenseDict(target_word_pos.strip(), sense)
sense_example_words = sense_examples.split()
context_example_words = context_sentence.split()
common = set(sense_example_words).intersection( set(context_example_words) )
context_score = len(common)
score_map[sense.get('id')] = context_score
key_max = max(score_map, key=score_map.get)
return key_max
# +
# Model 3 + 5 : Advance original lesk Algorithm with and without corpus lesk
def getContextClassificationModel3(context_sense, corpus=False):
context_classification = {}
for context_data in context_sense:
dictionary_examples = ""
context_interval = int(context_data[3]/5)
for sense_data in context_data[2]:
if (corpus):
dictionary_examples += getLemmaExamplesFromCorpusSenseDict(context_data[1], sense_data)
else:
dictionary_examples += getLemmaExamplesFromSenseDict(context_data[1], sense_data)
if(context_interval in context_classification):
dictionary_examples = context_classification.get(context_interval) + dictionary_examples
context_classification[context_interval] = dictionary_examples
return context_classification
def calculateSenseIdModel3(target_word_pos, pos_without_stopwords, corpus=False):
print(target_word_pos)
target_word_details = target_word_pos.split(".")
if(corpus):
target_senses = getNewSenses(target_word_details[0].strip(), target_word_details[1].strip())
else:
target_senses = getSenses(target_word_details[0].strip(), target_word_details[1].strip())
score_map = {}
context_classification = getContextClassificationModel3(getContextDictModelTwo(target_word_pos, pos_without_stopwords, corpus), corpus)
for sense in target_senses:
sense_score = 0
if (corpus):
sense_examples = getLemmaExamplesFromCorpusSenseDict(target_word_pos.strip(), sense)
else:
sense_examples = getLemmaExamplesFromSenseDict(target_word_pos.strip(), sense)
for context_level in context_classification:
sense_example_words = sense_examples.split()
context_example_words = context_classification.get(context_level).split()
common = set(sense_example_words).intersection( set(context_example_words) )
context_score = len(common)*(6-int(context_level)+1)
sense_score += context_score
score_map[sense.get('id')] = sense_score
key_max = max(score_map, key=score_map.get)
return key_max
# -
# Model 4: Corpus lesk using simple algorithm
def calculate_sense_corpus_model_one(target_word, pos_data):
print(target_word)
target_data = target_word.split(".")
senses = getNewSenses(target_data[0].strip(), target_data[1].strip())
score_map = {}
pos_sentence = []
for pos_word in pos_data:
pos_sentence.append(pos_word[2])
for sense in senses:
sense_score = 0
sense_examples = getLemmaExamplesFromCorpusSenseDict(target_word, sense)
sense_example_words = sense_examples.split()
common = set(sense_example_words).intersection( set(pos_sentence) )
score_map[sense.get('id')] = len(common)
key_max = max(score_map, key=score_map.get)
return key_max
# +
# Calculating accuracies of all models
def calculate_accuracy(dataframe, column_name):
accuracy_number = 0
i=0
for index, row in dataframe.iterrows():
if(int(row['Sense_ID'])==int(row[column_name])):
accuracy_number += 1
i += 1
return ((accuracy_number/i)*100)
# Exporting to CSV
def exportToCSV(input_data_frame, csv_path):
tmp_df = input_data_frame.drop(['Sentence', 'lowercase_cleaned', 'pos_data'], axis=1)
tmp_df.to_csv(csv_path, index = False)
# -
# Main function
if __name__ == "__main__":
global Tree
global TreeNew
global SenseLemmaDictionary
global SenseLemmaCorpusDictionary
SenseLemmaDictionary = {}
SenseLemmaCorpusDictionary = {}
# Read the dictionary file - original
Parser = objectify.makeparser(recover=True)
Tree = objectify.fromstring(''.join(open('dictionary.xml').readlines()), Parser)
#read test data
train_data = pd.read_csv (r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\train.data',header=None,delimiter = "|")
test_data = pd.read_csv (r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\test.data',header=None,delimiter = "|")
validation_data = pd.read_csv (r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\validate.data',header=None,delimiter = "|")
#rename columns for all the datasets
train_data_new = rename_columns(train_data)
test_data_new = rename_columns(test_data)
validation_data_new = rename_columns(validation_data)
#create new dictionary
newDictionary()
ParserNew = objectify.makeparser(recover=True)
TreeNew = objectify.fromstring(''.join(open('new_dictionary.xml').readlines()), ParserNew)
################################# Validation data ###################################
# validation set cleaning process
method_one_validation_df = validation_data_new
method_one_validation_df = lowercase_cleaned_data(method_one_validation_df, 'Sentence')
method_one_validation_df["pos_data"] = method_one_validation_df['lowercase_cleaned'].apply(lambda sentence: retreive_pos_wordnet(sentence))
method_one_validation_df["pos_data"] = method_one_validation_df["pos_data"].apply(lambda pos_data_list: remove_stop_words_from_pos(pos_data_list))
# Model 1 - Simple lesk
method_one_validation_df['simple_lesk_sense_id'] = method_one_validation_df.apply(lambda x: calculate_sense_model_one(x['Target_Word'], x['pos_data']), axis=1)
# Model 2 - Original Lesk
method_two_validation_df = method_one_validation_df
method_two_validation_df['original_lesk_sense_id'] = method_two_validation_df.apply(lambda x: calculateSenseIdModelTwo(x['Target_Word'], x['pos_data']), axis=1)
# Model 3 - Advance original lesk
method_three_validation_df = method_two_validation_df
method_three_validation_df['adv_original_lesk_sense_id'] = method_three_validation_df.apply(lambda x: calculateSenseIdModel3(x['Target_Word'], x['pos_data']), axis=1)
# Model 4 - Corpus lesk
method_four_validation_df = method_three_validation_df
method_four_validation_df['corpus_lesk_sense_id'] = method_four_validation_df.apply(lambda x: calculate_sense_corpus_model_one(x['Target_Word'], x['pos_data']), axis=1)
# Model 5 - Adv Corpus lesk
method_five_validation_df = method_four_validation_df
method_five_validation_df['adv_corpus_lesk_sense_id'] = method_five_validation_df.apply(lambda x: calculateSenseIdModel3(x['Target_Word'], x['pos_data'], True), axis=1)
print("Accuracy of validation data for simple_lesk: " + str(calculate_accuracy(method_one_validation_df, "simple_lesk_sense_id")))
print("Accuracy of validation data for original_lesk: " + str(calculate_accuracy(method_two_validation_df, "original_lesk_sense_id")))
print("Accuracy of validation data for adv_original_lesk: " + str(calculate_accuracy(method_three_validation_df, "adv_original_lesk_sense_id")))
print("Accuracy of validation data for corpus_lesk: " + str(calculate_accuracy(method_four_validation_df, "corpus_lesk_sense_id")))
print("Accuracy of validation data for adv_corpus_lesk: " + str(calculate_accuracy(method_five_validation_df, "adv_corpus_lesk_sense_id")))
# Export validation results to CSV
exportToCSV(method_five_validation_df, r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\validation_results.csv')
################################### Test data ######################################
# test set cleaning process
method_one_test_df = test_data_new
method_one_test_df = lowercase_cleaned_data(method_one_test_df, 'Sentence')
method_one_test_df["pos_data"] = method_one_test_df['lowercase_cleaned'].apply(lambda sentence: retreive_pos_wordnet(sentence))
method_one_test_df["pos_data"] = method_one_test_df["pos_data"].apply(lambda pos_data_list: remove_stop_words_from_pos(pos_data_list))
# Model 1 - Simple Lesk
method_one_test_df['simple_lesk_sense_id'] = method_one_test_df.apply(lambda x: calculate_sense_model_one(x['Target_Word'], x['pos_data']), axis=1)
# Model 2 - Original Lesk
method_two_test_df = method_one_test_df
method_two_test_df['original_lesk_sense_id'] = method_two_test_df.apply(lambda x: calculateSenseIdModelTwo(x['Target_Word'], x['pos_data']), axis=1)
# Model 3 - Advance original lesk
method_three_test_df = method_two_test_df
method_three_test_df['adv_original_lesk_sense_id'] = method_three_test_df.apply(lambda x: calculateSenseIdModel3(x['Target_Word'], x['pos_data']), axis=1)
# Model 4 - Corpus lesk
method_four_test_df = method_three_test_df
method_four_test_df['corpus_lesk_sense_id'] = method_four_test_df.apply(lambda x: calculate_sense_corpus_model_one(x['Target_Word'], x['pos_data']), axis=1)
# Model 5 - Adv Corpus lesk
method_five_test_df = method_four_test_df
method_five_test_df['adv_corpus_lesk_sense_id'] = method_five_test_df.apply(lambda x: calculateSenseIdModel3(x['Target_Word'], x['pos_data'], True), axis=1)
# Export validation results to CSV
exportToCSV(method_five_test_df, r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\test_data_results.csv')
################################### Training data ######################################
# train data cleaning process
method_one_train_df = train_data_new
method_one_train_df = lowercase_cleaned_data(method_one_train_df, 'Sentence')
method_one_train_df["pos_data"] = method_one_train_df['lowercase_cleaned'].apply(lambda sentence: retreive_pos_wordnet(sentence))
method_one_train_df["pos_data"] = method_one_train_df["pos_data"].apply(lambda pos_data_list: remove_stop_words_from_pos(pos_data_list))
# Model 1 - Simple Lesk
method_one_train_df['simple_lesk_sense_id'] = method_one_train_df.apply(lambda x: calculate_sense_model_one(x['Target_Word'], x['pos_data']), axis=1)
# Model 2 - Original Lesk
method_two_train_df = method_one_train_df
method_two_train_df['original_lesk_sense_id'] = method_two_train_df.apply(lambda x: calculateSenseIdModelTwo(x['Target_Word'], x['pos_data']), axis=1)
# Model 3 - Advance original lesk
method_three_train_df = method_two_train_df
method_three_train_df['adv_original_lesk_sense_id'] = method_three_train_df.apply(lambda x: calculateSenseIdModel3(x['Target_Word'], x['pos_data']), axis=1)
# Calculating accuracies of training data
print("Accuracy of training data for simple_lesk: " + str(calculate_accuracy(method_one_train_df, "simple_lesk_sense_id")))
print("Accuracy of training data for original_lesk: " + str(calculate_accuracy(method_two_train_df, "original_lesk_sense_id")))
print("Accuracy of training data for adv_original_lesk: " + str(calculate_accuracy(method_three_train_df, "adv_original_lesk_sense_id")))
# Export validation results to CSV
exportToCSV(method_three_train_df, r'C:\Users\ritu2\Desktop\UIC MSBA\Sem 2\Text Analytics\Assignments\Assignment 2\training_data_results.csv')
| 21,493 |
/ะะปะณะพัะธัะผั ะฐะฝะฐะปะธะทะฐ ะดะฐะฝะฝัั
/Lesson5.ipynb | 3b5f265369cff5253b28eb436d2e8c94fb7ae22e | [] | no_license | TataMoskovkina/GeekUniversity | https://github.com/TataMoskovkina/GeekUniversity | 0 | 0 | null | 2020-03-13T22:14:00 | 2020-03-10T21:28:23 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 290,185 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="q0Z7pb2vbIWg"
# # ะฃัะพะบ 5. ะกะปััะฐะนะฝัะน ะปะตั
# -
# ### 1) ะคะพัะผะธัะพะฒะฐะฝะธั ะฒัะฑะพัะบะธ - bootstrap
# ![img/bootstrap.jpg](img/bootstrap.jpg)
#
#
# ### 2) ะะพัััะพะตะฝะธะต ะบะพะผะฟะพะทะธัะธะธ ะฐะปะณะพัะธัะผะพะฒ - bagging
# ![img/bagging.png](img/bagging.png)
#
#
# ### Random Forest == bagging ะฝะฐ ัะตัะฐััะธั
ะดะตัะตะฒััั
# + colab={} colab_type="code" id="ZNR-FOeobIWs"
import matplotlib.pyplot as plt
import random
from matplotlib.colors import ListedColormap
from sklearn import datasets
import numpy as np
# + colab={} colab_type="code" id="m4Mb7omZbIWw"
# ัะณะตะฝะตัะธััะตะผ ะดะฐะฝะฝัะต, ะฟัะตะดััะฐะฒะปัััะธะต ัะพะฑะพะน 500 ะพะฑัะตะบัะพะฒ ั 5-ั ะฟัะธะทะฝะฐะบะฐะผะธ
classification_data, classification_labels = datasets.make_classification(n_samples=500,
n_features = 5, n_informative = 5,
n_classes = 2, n_redundant=0,
n_clusters_per_class=1, random_state=23)
# + colab={} colab_type="code" id="2R53TJClbIWz" outputId="ff9cd4bc-207b-4b32-8efd-772e9af6868d"
# ะฒะธะทัะฐะปะธะทะธััะตะผ ัะณะตะฝะตัะธัะพะฒะฐะฝะฝัะต ะดะฐะฝะฝัะต
colors = ListedColormap(['red', 'blue'])
light_colors = ListedColormap(['lightcoral', 'lightblue'])
plt.figure(figsize=(8,8))
plt.scatter(list(map(lambda x: x[0], classification_data)), list(map(lambda x: x[1], classification_data)),
c=classification_labels, cmap=colors);
# + [markdown] colab_type="text" id="L9ZdDJGvbIW8"
# ะะพะฒัะพัะธะผ ัะตะฐะปะธะทะฐัะธั ะฟะพัััะพะตะฝะธั ะดะตัะตะฒะฐ ัะตัะตะฝะธะน ะธะท ะฟัะตะดัะดััะตะณะพ ััะพะบะฐ
# + colab={} colab_type="code" id="AGdBq1lbbIW9"
# ะ ะตะฐะปะธะทัะตะผ ะบะปะฐัั ัะทะปะฐ
class Node:
def __init__(self, index, t, true_branch, false_branch):
self.index = index # ะธะฝะดะตะบั ะฟัะธะทะฝะฐะบะฐ, ะฟะพ ะบะพัะพัะพะผั ะฒะตะดะตััั ััะฐะฒะฝะตะฝะธะต ั ะฟะพัะพะณะพะผ ะฒ ััะพะผ ัะทะปะต
self.t = t # ะทะฝะฐัะตะฝะธะต ะฟะพัะพะณะฐ
self.true_branch = true_branch # ะฟะพะดะดะตัะตะฒะพ, ัะดะพะฒะปะตัะฒะพััััะตะต ััะปะพะฒะธั ะฒ ัะทะปะต
self.false_branch = false_branch # ะฟะพะดะดะตัะตะฒะพ, ะฝะต ัะดะพะฒะปะตัะฒะพััััะตะต ััะปะพะฒะธั ะฒ ัะทะปะต
# + colab={} colab_type="code" id="QGT-Wsx6bIW_"
# ะ ะบะปะฐัั ัะตัะผะธะฝะฐะปัะฝะพะณะพ ัะทะปะฐ (ะปะธััะฐ)
class Leaf:
def __init__(self, data, labels):
self.data = data
self.labels = labels
self.prediction = self.predict()
def predict(self):
# ะฟะพะดััะตั ะบะพะปะธัะตััะฒะฐ ะพะฑัะตะบัะพะฒ ัะฐะทะฝัั
ะบะปะฐััะพะฒ
classes = {} # ััะพัะผะธััะตะผ ัะปะพะฒะฐัั "ะบะปะฐัั: ะบะพะปะธัะตััะฒะพ ะพะฑัะตะบัะพะฒ"
for label in self.labels:
if label not in classes:
classes[label] = 0
classes[label] += 1
# ะฝะฐะนะดะตะผ ะบะปะฐัั, ะบะพะปะธัะตััะฒะพ ะพะฑัะตะบัะพะฒ ะบะพัะพัะพะณะพ ะฑัะดะตั ะผะฐะบัะธะผะฐะปัะฝัะผ ะฒ ััะพะผ ะปะธััะต ะธ ะฒะตัะฝะตะผ ะตะณะพ
prediction = max(classes, key=classes.get)
return prediction
# + [markdown] colab_type="text" id="JvjWiryZbIW2"
# ะ ะตะฐะปะธะทัะตะผ ะณะตะฝะตัะฐัะธั $N$ ะฑัััััะฐะฟ-ะฒัะฑะพัะพะบ ะธ ะฟะพะดะผะฝะพะถะตััะฒะฐ ะฟัะธะทะฝะฐะบะพะฒ ะดะปั ะฝะฐั
ะพะถะดะตะฝะธั ัะฐะทะฑะธะตะฝะธั ะฒ ัะทะปะต.
# + colab={} colab_type="code" id="d7if4ogqbIW3"
random.seed(42)
def get_bootstrap(data, labels, N):
n_samples = data.shape[0]
bootstrap = []
for i in range(N):
b_data = np.zeros(data.shape)
b_labels = np.zeros(labels.shape)
# TODO: random.choice()
for j in range(n_samples):
sample_index = random.randint(0, n_samples-1)
b_data[j] = data[sample_index]
b_labels[j] = labels[sample_index]
bootstrap.append((b_data, b_labels))
return bootstrap
# -
def get_subsample(len_sample):
sample_indexes = [i for i in range(len_sample)]
len_subsample = int(np.sqrt(len_sample))
subsample = []
random.shuffle(sample_indexes)
for i in range(len_subsample):
subsample.append(sample_indexes.pop())
return subsample
# + colab={} colab_type="code" id="DRTe458CbIXE"
# ะ ะฐััะตั ะบัะธัะตัะธั ะะถะธะฝะธ
def gini(labels):
# ะฟะพะดััะตั ะบะพะปะธัะตััะฒะฐ ะพะฑัะตะบัะพะฒ ัะฐะทะฝัั
ะบะปะฐััะพะฒ
classes = {}
for label in labels:
if label not in classes:
classes[label] = 0
classes[label] += 1
# ัะฐััะตั ะบัะธัะตัะธั
impurity = 1
for label in classes:
p = classes[label] / len(labels)
impurity -= p ** 2
return impurity
# + colab={} colab_type="code" id="YT7T4h3WbIXH"
# ะ ะฐััะตั ะบะฐัะตััะฒะฐ
def quality(left_labels, right_labels, current_gini):
# ะดะพะปั ะฒัะฑะพะบะธ, ััะตะดัะฐั ะฒ ะปะตะฒะพะต ะฟะพะดะดะตัะตะฒะพ
p = float(left_labels.shape[0]) / (left_labels.shape[0] + right_labels.shape[0])
return current_gini - p * gini(left_labels) - (1 - p) * gini(right_labels)
# + colab={} colab_type="code" id="rqbAx1cXbIXK"
# ะ ะฐะทะฑะธะตะฝะธะต ะดะฐัะฐัะตัะฐ ะฒ ัะทะปะต
def split(data, labels, index, t):
left = np.where(data[:, index] <= t)
right = np.where(data[:, index] > t)
true_data = data[left]
false_data = data[right]
true_labels = labels[left]
false_labels = labels[right]
return true_data, false_data, true_labels, false_labels
# + colab={} colab_type="code" id="zP2pg3HUbIXP"
# ะะฐั
ะพะถะดะตะฝะธะต ะฝะฐะธะปัััะตะณะพ ัะฐะทะฑะธะตะฝะธั
def find_best_split(data, labels, min_leaf=1):
# ะพะฑะพะทะฝะฐัะธะผ ะผะธะฝะธะผะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ะพะฑัะตะบัะพะฒ ะฒ ัะทะปะต
# min_leaf = 1
current_gini = gini(labels)
best_quality = 0
best_t = None
best_index = None
n_features = data.shape[1]
# ะฒัะฑะพั ะธะฝะดะตะบัะฐ ะธะท ะฟะพะดะฒัะฑะพัะบะธ ะดะปะธะฝะพะน sqrt(n_features)
subsample = get_subsample(n_features)
for index in subsample:
t_values = [row[index] for row in data]
for t in t_values:
true_data, false_data, true_labels, false_labels = split(data, labels, index, t)
# ะฟัะพะฟััะบะฐะตะผ ัะฐะทะฑะธะตะฝะธั, ะฒ ะบะพัะพััั
ะฒ ัะทะปะต ะพััะฐะตััั ะผะตะฝะตะต 5 ะพะฑัะตะบัะพะฒ
if len(true_data) < min_leaf or len(false_data) < min_leaf:
continue
current_quality = quality(true_labels, false_labels, current_gini)
# ะฒัะฑะธัะฐะตะผ ะฟะพัะพะณ, ะฝะฐ ะบะพัะพัะพะผ ะฟะพะปััะฐะตััั ะผะฐะบัะธะผะฐะปัะฝัะน ะฟัะธัะพัั ะบะฐัะตััะฒะฐ
if current_quality > best_quality:
best_quality, best_t, best_index = current_quality, t, index
return best_quality, best_t, best_index
# + colab={} colab_type="code" id="dQ4ZPJRUbIXR"
# ะะพัััะพะตะฝะธะต ะดะตัะตะฒะฐ ั ะฟะพะผะพััั ัะตะบัััะธะฒะฝะพะน ััะฝะบัะธะธ
def build_tree(data, labels):
quality, t, index = find_best_split(data, labels, min_leaf=1)
# ะะฐะทะพะฒัะน ัะปััะฐะน - ะฟัะตะบัะฐัะฐะตะผ ัะตะบัััะธั, ะบะพะณะดะฐ ะฝะตั ะฟัะธัะพััะฐ ะฒ ะบะฐัะตััะฒะฐ
if quality == 0:
return Leaf(data, labels)
true_data, false_data, true_labels, false_labels = split(data, labels, index, t)
# ะ ะตะบัััะธะฒะฝะพ ัััะพะธะผ ะดะฒะฐ ะฟะพะดะดะตัะตะฒะฐ
true_branch = build_tree(true_data, true_labels)
false_branch = build_tree(false_data, false_labels)
# ะะพะทะฒัะฐัะฐะตะผ ะบะปะฐัั ัะทะปะฐ ัะพ ะฒัะตะผะธ ะฟะพะดะดะตัะตะฒััะผะธ, ัะพ ะตััั ัะตะปะพะณะพ ะดะตัะตะฒะฐ
return Node(index, t, true_branch, false_branch)
# + [markdown] colab_type="text" id="T_YX8fnmbIXU"
# ะขะตะฟะตัั ะดะพะฑะฐะฒะธะผ ััะฝะบัะธั ัะพัะผะธัะพะฒะฐะฝะธั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ.
# + colab={} colab_type="code" id="PZMieMMrbIXV"
def random_forest(data, labels, n_trees):
forest = []
bootstrap = get_bootstrap(data, labels, n_trees)
for b_data, b_labels in bootstrap:
forest.append(build_tree(b_data, b_labels))
return forest
# + colab={} colab_type="code" id="tWNbZTz4bIXX"
# ะคัะฝะบัะธั ะบะปะฐััะธัะธะบะฐัะธะธ ะพัะดะตะปัะฝะพะณะพ ะพะฑัะตะบัะฐ
def classify_object(obj, node):
# ะััะฐะฝะฐะฒะปะธะฒะฐะตะผ ัะตะบัััะธั, ะตัะปะธ ะดะพััะธะณะปะธ ะปะธััะฐ
if isinstance(node, Leaf):
answer = node.prediction
return answer
if obj[node.index] <= node.t:
return classify_object(obj, node.true_branch)
else:
return classify_object(obj, node.false_branch)
# + colab={} colab_type="code" id="rWOM8g_YbIXZ"
# ััะฝะบัะธั ัะพัะผะธัะพะฒะฐะฝะธั ะฟัะตะดัะบะฐะทะฐะฝะธั ะฟะพ ะฒัะฑะพัะบะต ะฝะฐ ะพะดะฝะพะผ ะดะตัะตะฒะต
def predict(data, tree):
classes = []
for obj in data:
prediction = classify_object(obj, tree)
classes.append(prediction)
return classes
# -
# ### ะัะตะดัะบะฐะทะฐะฝะธะต ะณะพะปะพัะพะฒะฐะฝะธะตะผ ะดะตัะตะฒัะตะฒ
# + colab={} colab_type="code" id="ZtIgR7R-bIXc"
def tree_vote(forest, data):
predictions = []
for tree in forest:
predictions.append(predict(data, tree))
predictions_per_object = list(zip(*predictions))
voted_predictions = []
for obj in predictions_per_object:
voted_predictions.append(max(set(obj), key=obj.count))
return voted_predictions
# + [markdown] colab_type="text" id="fkMTjBewbIXf"
# ะะฐะปะตะต ะผั ัะดะตะปะฐะตะผ ะพะฑััะฝะพะต ัะฐะทะฑะธะตะฝะธะต ะฒัะฑะพัะบะธ ะฝะฐ ะพะฑััะฐัััั ะธ ัะตััะพะฒัั, ะบะฐะบ ััะพ ะดะตะปะฐะปะพัั ัะฐะฝะตะต.
# + colab={} colab_type="code" id="Ie9t9IyAbIXh"
# ะ ะฐะทะพะฑัะตะผ ะฒัะฑะพัะบั ะฝะฐ ะพะฑััะฐัััั ะธ ัะตััะพะฒัั
from sklearn import model_selection
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(classification_data,
classification_labels,
test_size = 0.3,
random_state = 1)
# + colab={} colab_type="code" id="z4apOFB9bIXk"
# ะะฒะตะดะตะผ ััะฝะบัะธั ะฟะพะดััะตัะฐ ัะพัะฝะพััะธ ะบะฐะบ ะดะพะปะธ ะฟัะฐะฒะธะปัะฝัั
ะพัะฒะตัะพะฒ
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# + colab={} colab_type="code" id="5hXVQEyJJYyY"
# -
# ### ะขะตัั ัะฐะผะพะฟะธัะฝะพะณะพ ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ
# %%time
n_trees = 1
my_forest_1 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_1, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_1, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# -
# %%time
n_trees = 3
my_forest_3 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_3, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_3, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# -
# %%time
n_trees = 10
my_forest_10 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_10, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_10, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# -
# %%time
n_trees = 30
my_forest_30 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_30, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_30, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# -
# %%time
n_trees = 50
my_forest_50 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_50, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_50, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# -
# ### ะะพะผะฐัะฝะตะต ะทะฐะดะฐะฝะธะต
# 1. ะกัะพัะผะธัะพะฒะฐัั ั ะฟะพะผะพััั sklearn.make_classification ะดะฐัะฐัะตั ะธะท 100 ะพะฑัะตะบัะพะฒ ั ะดะฒัะผั ะฟัะธะทะฝะฐะบะฐะผะธ, ะพะฑััะธัั ัะปััะฐะนะฝัะน ะปะตั ะธะท 1, 3, 10 ะธ 50 ะดะตัะตะฒัะตะฒ ะธ ะฒะธะทัะฐะปะธะทะธัะพะฒะฐัั
# ะธั
ัะฐะทะดะตะปัััะธะต ะณะธะฟะตัะฟะปะพัะบะพััะธ ะฝะฐ ะณัะฐัะธะบะฐั
(ะฟะพ ะฟะพะดะพะฑะธั ะฒะธะทัะฐะปะธะทะฐัะธะธ ะดะตัะตะฒัะตะฒ ะธะท ะฟัะตะดัะดััะตะณะพ ััะพะบะฐ, ะฝะตะพะฑั
ะพะดะธะผะพ ัะพะปัะบะพ ะทะฐะผะตะฝะธัั ะฒัะทะพะฒ ััะฝะบัะธะธ predict ะฝะฐ tree_vote).
# ะกะดะตะปะฐัั ะฒัะฒะพะดั ะพ ะฟะพะปััะฐะตะผะพะน ัะปะพะถะฝะพััะธ ะณะธะฟะตัะฟะปะพัะบะพััะธ ะธ ะฝะตะดะพะพะฑััะตะฝะธะธ ะธะปะธ ะฟะตัะตะพะฑััะตะฝะธะธ ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะฒ ะทะฐะฒะธัะธะผะพััะธ ะพั ะบะพะปะธัะตััะฒะฐ ะดะตัะตะฒัะตะฒ ะฒ ะฝะตะผ (*).
# 2. ะะฐะผะตะฝะธัั ะฒ ัะตะฐะปะธะทะพะฒะฐะฝะฝะพะผ ะฐะปะณะพัะธัะผะต ะฟัะพะฒะตัะบั ั ะฟะพะผะพััั ะพัะปะพะถะตะฝะฝะพะน ะฒัะฑะพัะบะธ ะฝะฐ Out-of-Bag.
# 3. (ะะฐ ะฟะพะฒัะพัะตะฝะธะต) ะะตัะตะฟะธัะฐัั ััะฝะบัะธั calc_gini ะธะท ััะพะบะฐ ะฟัะพ ัะตัะฐััะธะต ะดะตัะตะฒัั ัะฐะบ, ััะพะฑั ะฒ ะบะฐัะตััะฒะต ะบัะธัะตัะธั ะธัะฟะพะปัะทะพะฒะฐะปะฐัั ัะฝััะพะฟะธั ะจัะฝะฝะพะฝะฐ. ะะตัะตะธะผะตะฝะพะฒะฐัั ััะฝะบัะธั ะฒ calc_entropy (*).
# #### 1. ะกัะพัะผะธัะพะฒะฐัั ั ะฟะพะผะพััั sklearn.make_classification ะดะฐัะฐัะตั ะธะท 100 ะพะฑัะตะบัะพะฒ ั ะดะฒัะผั ะฟัะธะทะฝะฐะบะฐะผะธ, ะพะฑััะธัั ัะปััะฐะนะฝัะน ะปะตั ะธะท 1, 3, 10 ะธ 50 ะดะตัะตะฒัะตะฒ ะธ ะฒะธะทัะฐะปะธะทะธัะพะฒะฐัั ะธั
ัะฐะทะดะตะปัััะธะต ะณะธะฟะตัะฟะปะพัะบะพััะธ ะฝะฐ ะณัะฐัะธะบะฐั
(ะฟะพ ะฟะพะดะพะฑะธั ะฒะธะทัะฐะปะธะทะฐัะธะธ ะดะตัะตะฒัะตะฒ ะธะท ะฟัะตะดัะดััะตะณะพ ััะพะบะฐ, ะฝะตะพะฑั
ะพะดะธะผะพ ัะพะปัะบะพ ะทะฐะผะตะฝะธัั ะฒัะทะพะฒ ััะฝะบัะธะธ predict ะฝะฐ tree_vote). ะกะดะตะปะฐัั ะฒัะฒะพะดั ะพ ะฟะพะปััะฐะตะผะพะน ัะปะพะถะฝะพััะธ ะณะธะฟะตัะฟะปะพัะบะพััะธ ะธ ะฝะตะดะพะพะฑััะตะฝะธะธ ะธะปะธ ะฟะตัะตะพะฑััะตะฝะธะธ ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะฒ ะทะฐะฒะธัะธะผะพััะธ ะพั ะบะพะปะธัะตััะฒะฐ ะดะตัะตะฒัะตะฒ ะฒ ะฝะตะผ (*).
classification_data, classification_labels = datasets.make_classification(n_samples=100,
n_features = 2, n_classes = 2, n_redundant=0,
n_clusters_per_class=1, random_state=23)
# +
# ะฒะธะทัะฐะปะธะทะธััะตะผ ัะณะตะฝะตัะธัะพะฒะฐะฝะฝัะต ะดะฐะฝะฝัะต
colors = ListedColormap(['red', 'blue'])
light_colors = ListedColormap(['lightcoral', 'lightblue'])
plt.figure(figsize=(8,8))
plt.scatter(list(map(lambda x: x[0], classification_data)), list(map(lambda x: x[1], classification_data)),
c=classification_labels, cmap=colors);
# -
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(classification_data,
classification_labels,
test_size = 0.3,
random_state = 67)
# %%time
n_trees = 1
my_forest_1 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_1, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_1, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# +
def get_meshgrid(data, step=.05, border=1.2):
x_min, x_max = data[:, 0].min() - border, data[:, 0].max() + border
y_min, y_max = data[:, 1].min() - border, data[:, 1].max() + border
return np.meshgrid(np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
plt.figure(figsize = (16, 7))
# ะณัะฐัะธะบ ะพะฑััะฐััะตะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(tree_vote(my_forest_1, np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, cmap = colors)
plt.title(f'Train accuracy={train_acc:.2f}')
# ะณัะฐัะธะบ ัะตััะพะฒะพะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, cmap = colors)
plt.title(f'Test accuracy={test_acc:.2f}')
# -
# %%time
n_trees = 3
my_forest_3 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_3, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_3, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# +
plt.figure(figsize = (16, 7))
# ะณัะฐัะธะบ ะพะฑััะฐััะตะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(tree_vote(my_forest_1, np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, cmap = colors)
plt.title(f'Train accuracy={train_acc:.2f}')
# ะณัะฐัะธะบ ัะตััะพะฒะพะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, cmap = colors)
plt.title(f'Test accuracy={test_acc:.2f}')
# -
# %%time
n_trees = 10
my_forest_10 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_10, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_10, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# +
plt.figure(figsize = (16, 7))
# ะณัะฐัะธะบ ะพะฑััะฐััะตะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(tree_vote(my_forest_10, np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, cmap = colors)
plt.title(f'Train accuracy={train_acc:.2f}')
# ะณัะฐัะธะบ ัะตััะพะฒะพะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, cmap = colors)
plt.title(f'Test accuracy={test_acc:.2f}')
# -
# %%time
n_trees = 50
my_forest_50 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_50, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_50, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# +
plt.figure(figsize = (16, 7))
# ะณัะฐัะธะบ ะพะฑััะฐััะตะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(tree_vote(my_forest_50, np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, cmap = colors)
plt.title(f'Train accuracy={train_acc:.2f}')
# ะณัะฐัะธะบ ัะตััะพะฒะพะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, cmap = colors)
plt.title(f'Test accuracy={test_acc:.2f}')
# -
# ะะพะปััะตะฝะฝะฐั ะฟะปะพัะบะพััั, ะฝะฐ ะผะพะน ะฒะทะณะปัะด, ััะตะดะฝะตะตะน ัะปะพะถะฝะพััะธ. ะะฝะฐ ะฝะต ะฟัะธะผะธัะธะฒะฝะฐ, ะธะผะตะตั ะฝะตะบะพัะพััะต ัะธะณััะฝัะต ะพัะพะฑะตะฝะฝะพััะธ. Test accuracy ะทะดะตัั ะพะดะธะฝะฐะบะพะฒัะน ะบะฐะบ ะฟัะธ ะปะตัะต ะฒ 10 ะดะตัะตะฒัะตะฒ, ัะฐะบ ะธ ะฟัะธ ะปะตัะต ะฒ 50 ะดะตัะตะฒัะตะฒ ะธ ะฑะพะปััะต ะฝะต ัะฐััะตั, ะทะฝะฐัะธั 10 ะดะตัะตะฒัะตะฒ ะดะพััะฐัะพัะฝะพะต ะบะพะปะธัะตััะฒะพ ะดะปั ััะพะณะพ ัะปััะฐั. ะัะธ ะปะตัะต ะฒ 50 ะดะตัะตะฒัะตะฒ ะฝะต ะฝะฐัััะฟะธะปะพ ะฟะตัะตะพะฑััะตะฝะธะต, ะฟะพัะบะพะปัะบั ะผะตััะธะบะฐ ะฝะต ัะฟะฐะปะฐ, ะฐ ะพััะฐะปะฐัั ะฟัะตะถะฝะตะน. Accuracy = 96.67 - ะดะพะฒะพะปัะฝะพ ะฝะต ะฟะปะพั
ะพะน ัะตะทัะปััะฐั.
# ะะท ะปัะฑะพะฟััััะฒะฐ ะฟะพัะผะพััะธะผ, ััะพ ะฑัะดะตั ะฟัะธ ัะฐะทะผะตัะต ะปะตัะฐ ะฒ 100 ะดะตัะตะฒัะตะฒ.
# %%time
n_trees = 100
my_forest_100 = random_forest(train_data, train_labels, n_trees)
# +
train_answs = tree_vote(my_forest_100, train_data)
train_acc = accuracy_metric(train_labels, train_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ train: {train_acc:.3f}')
# +
test_answs = tree_vote(my_forest_100, test_data)
test_acc = accuracy_metric(test_labels, test_answs)
print(f'ะขะพัะฝะพััั ัะปััะฐะนะฝะพะณะพ ะปะตัะฐ ะธะท {n_trees} ะดะตัะตะฒัะตะฒ ะฝะฐ test: {test_acc:.3f}')
# +
plt.figure(figsize = (16, 7))
# ะณัะฐัะธะบ ะพะฑััะฐััะตะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(tree_vote(my_forest_100, np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, cmap = colors)
plt.title(f'Train accuracy={train_acc:.2f}')
# ะณัะฐัะธะบ ัะตััะพะฒะพะน ะฒัะฑะพัะบะธ
plt.subplot(1,2,2)
plt.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
plt.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, cmap = colors)
plt.title(f'Test accuracy={test_acc:.2f}')
# -
# ะะฐะบ ะฒะธะดะฝะพ, ะทะดะตัั ะฒัะต ะฑะตะท ะธะผะตะฝะตะฝะธะน, ะฟะพััะพะผั ะฒะฟะพะปะฝะต ะผะพะถะฝะพ ะฑัะปะพ ะพััะฐะฝะพะฒะธัััั ะฝะฐ 10 ะดะตัะตะฒััั
.
# #### 2. ะะฐะผะตะฝะธัั ะฒ ัะตะฐะปะธะทะพะฒะฐะฝะฝะพะผ ะฐะปะณะพัะธัะผะต ะฟัะพะฒะตัะบั ั ะฟะพะผะพััั ะพัะปะพะถะตะฝะฝะพะน ะฒัะฑะพัะบะธ ะฝะฐ Out-of-Bag.
def out_of_bag()
# #### 3.(ะะฐ ะฟะพะฒัะพัะตะฝะธะต) ะะตัะตะฟะธัะฐัั ััะฝะบัะธั calc_gini ะธะท ััะพะบะฐ ะฟัะพ ัะตัะฐััะธะต ะดะตัะตะฒัั ัะฐะบ, ััะพะฑั ะฒ ะบะฐัะตััะฒะต ะบัะธัะตัะธั ะธัะฟะพะปัะทะพะฒะฐะปะฐัั ัะฝััะพะฟะธั ะจัะฝะฝะพะฝะฐ. ะะตัะตะธะผะตะฝะพะฒะฐัั ััะฝะบัะธั ะฒ calc_entropy (*)
# +
import math
def calc_entropy(labels):
# ะฟะพะดััะตั ะบะพะปะธัะตััะฒะฐ ะพะฑัะตะบัะพะฒ ัะฐะทะฝัั
ะบะปะฐััะพะฒ
classes = {}
for label in labels:
if label not in classes:
classes[label] = 0
classes[label] += 1
# ัะฐััะตั ะบัะธัะตัะธั
entropy = 0
for label in classes:
p = classes[label] / len(labels)
entropy -= p * math.log2(p)
return entropy
# -
calc_entropy(classification_labels)
# ะะปะฐััั ะฒ ะฒัะฑะพัะบะต ั ะฝะฐั ัะฑะฐะปะฐะฝัะธัะพะฒะฐะฝั, ะฟะพััะพะผั ะธ ัะฝััะพะฟะธั ะผะฐะบัะธะผะฐะปัะฝะฐั.
| 21,662 |
/NLPTrainingCamp/seq2seq/seq2seq-translation-attention-pretrain-embedding.ipynb | ce2343e7faae9cd88aadb15bf06749ec6a90ff38 | [
"MIT"
] | permissive | hotbaby/ml | https://github.com/hotbaby/ml | 3 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 38,936 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # seq2seqๆจกๅโโๆบๅจ็ฟป่ฏ
#
# ่ฟ้ถๆน่ฟ๏ผ
#
# ไฝฟ็จ่
พ่ฎฏ้ข่ฎญ็ป่ฏๅ้
# ## ็ฏๅขไพ่ต
# +
import unicodedata
import string
import re
import random
import time
import math
import jieba
import pandas as pd
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
# -
# ## ๆฐๆฎ้ขๅค็
USE_CUDA = torch.cuda.is_available()
print('USE_CUDA: %s' % USE_CUDA)
SEGMENTATION = True # ๆฏๅฆๅ่ฏ
# ### ๆๆฌ้ขๅค็
#
# ไธขๅผ้คไบไธญๆใๅญๆฏๅๅธธ็จๆ ็นไนๅค็็ฌฆๅทใ
# + jupyter={"outputs_hidden": false}
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalize_string(s):
s = unicode_to_ascii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z\u4e00-\u9fa5.!?๏ผใ๏ผ]+", r" ", s)
return s
# -
# ### ๆๅปบ่ฏ่กจ
# ๅผๅ
ฅไธไธช็นๆฎ็Token:
#
# 1. `SOS`, "Start of sentenceโ๏ผๆ ่ฏๅฅๅญๅผๅง
# 2. `EOS`, โEnd of sentenceโ๏ผ่กจ็คบๅฅๅญ็ปๆ
# 3. `UNK`, "Unknown Token"๏ผๆ ่ฏๆช็ปๅฝ่ฏ
# +
SOS_token = 0
EOS_token = 1
UNK_token = 2
class Lang(object):
"""
่ฏ่กจVocabulary.
"""
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS", '2': 'UNK'}
self.n_words = 3 # Count SOS and EOS
def index_words(self, sentence):
if self.name == 'cn':
words = list(jieba.cut(sentence)) if SEGMENTATION else sentence
for word in words:
self.index_word(word)
else:
words = sentence.split(' ')
for word in words:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# -
# ่ฏปๅๅนณ่ก่ฏญๆ๏ผๅนถ่ฟ่กๆธ
็ใ
def read_langs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('%s-%s.txt' % (lang1, lang2)).read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
# ### ๅๅคๆฐๆฎ้
#
# ๆ ทไพไธบไบๅ ๅฟซ่ฎญ็ป๏ผๅชไฟ็ไบไธ้ฟไบ10ไธชๅ่ฏ็ๅฅๅฏน๏ผ็ๆญฃๅฎ้ชไธญๅฐๆดๅคๆฐๆฎ่่่ฟๆฅๅฏ่ฝ่ทๅพๆดๅฅฝ็ๆๆใ
# + jupyter={"outputs_hidden": false}
MAX_LENGTH = 10
def filter_pair(p):
return len(p[1].split(' ')) < MAX_LENGTH
def filter_pairs(pairs):
return [pair for pair in pairs if filter_pair(pair)]
# -
# ๅค็ๆฐๆฎ็ๅ
จ่ฟ็จ๏ผ
#
# - ่ฏปๅๆฐๆฎ๏ผๆฏไธ่กๅๅซๅค็๏ผๅฐๅ
ถ่ฝฌๆขๆๅฅๅฏน
# - ๅฏนไบๆๆฌ่ฟ่กๅค็๏ผ่ฟๆปคๆ ็จ็ฌฆๅท
# - ๆ นๆฎๅทฒๆๆๆฌๅฏนไบๅ่ฏ่ฟ่ก็ผๅท๏ผๆๅปบ็ฌฆๅทๅฐ็ผๅท็ๆ ๅฐ
#
# + jupyter={"outputs_hidden": false}
def prepare_data(lang1_name, lang2_name, reverse=False):
input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filter_pairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Indexing words...")
for pair in pairs:
input_lang.index_words(pair[0])
output_lang.index_words(pair[1])
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepare_data('cn', 'eng', False)
# Print an example pair
print(random.choice(pairs))
# -
# ไปๆฐๆฎ้ไธญsampleๅบ200ๆกๆฐๆฎไฝไธบ้ช่ฏ้
def sample_test_dataset(size=100):
with open('cn-eng-test.txt', 'w+') as f:
f.write('\n'.join(['\t'.join(pair) for pair in random.sample(pairs, k=size)]))
# +
# sample_test_dataset()
# -
# ### ๅฐๆๆฌๆฐๆฎ่ฝฌๆขไธบๅผ ้
#
# ไธบไบ่ฎญ็ป๏ผๆไปฌ้่ฆๅฐๅฅๅญๅๆ็ฅ็ป็ฝ็ปๅฏไปฅ็่งฃ็ไธ่ฅฟ๏ผๆฐๅญ๏ผใๆฏไธชๅฅๅญๅฐ่ขซๅ่งฃๆๅ่ฏ๏ผ็ถๅๅๆๅผ ้๏ผๅ
ถไธญๆฏไธชๅ่ฏ้ฝ่ขซ็ดขๅผๆฟๆข๏ผๆฅ่ชไนๅ็Lang็ดขๅผ๏ผใๅจๅๅปบ่ฟไบๅผ ้ๆถ๏ผๆไปฌ่ฟๅฐ้ๅ EOSไปค็ไปฅ่กจ็คบ่ฏฅๅฅๅญๅทฒ็ปๆใ
#
# ![](https://i.imgur.com/LzocpGH.png)
# + jupyter={"outputs_hidden": false}
# Return a list of indexes, one for each word in the sentence
def indexes_from_sentence(lang, sentence):
"""
ๆ นๆฎ่ฏ่กจ๏ผๅฐๅฅๅญ่ฝฌๅๆ็ดขๅผๅ่กจใ
:reutrn list๏ผe.g. [1, 2, 3, 4]
"""
if lang.name == 'cn':
words = list(jieba.cut(sentence)) if SEGMENTATION else sentence
return [lang.word2index[word] if word in lang.word2index else UNK_token for word in words ]
else:
words = sentence.split(' ')
return [lang.word2index[word] if word in lang.word2index else UNK_token for word in words]
def variable_from_sentence(lang, sentence):
"""
ๅฐๅฅๅญ่ฝฌๆขๆTensor.
:return Tensor, shape(n, 1)
"""
indexes = indexes_from_sentence(lang, sentence)
indexes.append(EOS_token)
var = torch.LongTensor(indexes).view(-1, 1)
if USE_CUDA: var = var.cuda()
return var
def variables_from_pair(pair):
"""
ๅฐๅนณ่ก่ฏญๆๅฏน่ฝฌๅๆTensors.
:return (input_tensor, output_tensor)
"""
input_variable = variable_from_sentence(input_lang, pair[0])
target_variable = variable_from_sentence(output_lang, pair[1])
return (input_variable, target_variable)
# +
pair = random.choice(pairs)
print('pair: %s' % pair)
input_tensor, target_tensor = variables_from_pair(pair)
print('input_tensor shape: %s, output_tensor shap: %s' % (input_tensor.shape, target_tensor.shape))
print('input_tensor: %s' % input_tensor)
# -
# # ๆจกๅ
# ## ็ผ็ ๅจ
# + jupyter={"outputs_hidden": false}
class EncoderGRU(nn.Module):
"""GRU ็ผ็ ๅจ"""
def __init__(self, input_size, hidden_size, n_layers=1, bidirectional=False):
"""
ๅๅงๅ
:param input_size, ่พๅ
ฅ่ฏ่กจๅคง
:param hidden_size, Embedding็ปดๅบฆๅคงๅฐ๏ผRNN hiddenๅคงๅฐ
:param n_layers, RNNๅฑๆฐ
"""
super(EncoderGRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.bidirectional = bidirectional
self.embedding = nn.Embedding(input_size, hidden_size)
# ็จGRUๆฟๆขRNN
# self.rnn = nn.RNN(hidden_size, hidden_size, n_layers)
self.rnn = nn.GRU(hidden_size, hidden_size, n_layers)
def forward(self, word_inputs, hidden):
"""
ๅๅไผ ๆญ
:param word_inputs, ่พๅ
ฅๅบๅ shape(n, 1)
:param hidden, ้ๅฑ shape(seq_len*n_layers, batch_size, hidden_size)
:return output(seq_len, batch, num_directions*hidden_size),
hidden(num_layers*num_directions, hidden_size)
"""
# Note: we run this all at once (over the whole input sequence)
seq_len = len(word_inputs)
embedded = self.embedding(word_inputs).view(seq_len, 1, -1)
output, hidden = self.rnn(embedded, hidden)
return output, hidden
def init_hidden(self):
num_directions = 2 if self.bidirectional else 1
hidden = torch.zeros(self.n_layers*num_directions, 1, self.hidden_size)
if USE_CUDA: hidden = hidden.cuda()
return hidden
# +
# encoder = EncoderGRU(input_lang.n_words, 100)
# encoder.to(device)
# pair = random.choice(pairs)
# print('pair: %s' % pair)
# encoder_hidden = encoder.init_hidden()
# encoder_outputs, encoder_hidden = encoder(variable_from_sentence(input_lang, pair[0]), encoder_hidden)
# print('seq_len: %s, encoder_outputs shape: %s, encoder_hidden shape: %s' % (
# len(pair[0]), encoder_outputs.shape, encoder_hidden.shape))
# -
# ## ๆณจๆๅๆบๅถ
# +
class Attention(nn.Module):
"""ๆๅๆบๅถ"""
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
def forward(self, decoder_hidden, encoder_outputs):
"""
ๅๅไผ ๆญ
:param decoder_hidden: shape(num_layers*num_directions, batch, hidden_size)
:param encoder_outputs: shape(seq_len, batch, num_directions*hidden_size)
:return attention_weighted_encoder_output shape(num_layers, batch, hidden_size)
"""
attn_weights = F.softmax(torch.matmul(torch.squeeze(encoder_outputs),
torch.squeeze(decoder_hidden).view(-1, 1)))
attn_weights = attn_weights.expand(encoder_outputs.shape[0], -1)
attn_output = torch.sum(attn_weights * torch.squeeze(encoder_outputs), dim=0)
return attn_output.view(1, 1, -1)
# +
# attention = Attention(100)
# decoder_hidden = torch.randn(1, 1, 100)
# encoder_outputs = torch.randn(6, 1, 100)
# attention(decoder_hidden, encoder_outputs).shape
# -
# ## ่งฃ็ ๅจ
# + jupyter={"outputs_hidden": false}
class DecoderGRU(nn.Module):
"""ๆณจๆๅๆบๅถ่งฃ็ ๅจ"""
def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1):
super(DecoderGRU, self).__init__()
# Keep parameters for reference
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.attention = Attention(hidden_size)
# Define layers
self.embedding = nn.Embedding(output_size, hidden_size)
# ไฝฟ็จGRUๆฟๆขRNN
# self.rnn = nn.RNN(hidden_size, hidden_size, n_layers, dropout=dropout_p)
self.rnn = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p)
self.out = nn.Linear(hidden_size*2, output_size)
def forward(self, word_input, last_hidden, encoder_outputs):
# Note: we run this one step at a time
word_embedded = self.embedding(word_input).view(1, 1, -1) # S=1 x B x N
rnn_output, hidden = self.rnn(word_embedded, last_hidden)
rnn_output = rnn_output.squeeze(0)
# attention weighted encoder output
attn_weighted_encoder_output = self.attention(hidden, encoder_outputs)
attn_weighted_encoder_output = attn_weighted_encoder_output.squeeze(0)
concat_output = torch.cat([rnn_output, attn_weighted_encoder_output], dim=1)
output = F.log_softmax(self.out(concat_output))
return output, hidden
# +
# decoder = DecoderGRU(100, output_lang.n_words)
# decoder.to(device)
# decoder_hidden = encoder_hidden.view(1, 1, -1)
# decoer_output, decoder_hidden = decoder(variable_from_sentence(output_lang, pair[1])[0],
# decoder_hidden,
# encoder_outputs)
# print('decoder_output shape: %s, decoder_hidden shape: %s' % (decoer_output.shape, decoder_hidden.shape))
# -
# # ่ฎญ็ป
#
# ไธบไบ่ฎญ็ป๏ผๆไปฌ้ฆๅ
้่ฟ็ผ็ ๅจ้ๅญ่ฟ่ก่พๅ
ฅ่ฏญๅฅ๏ผๅนถ่ท่ธชๆฏไธช่พๅบๅๆๆฐ็้่็ถๆใๆฅไธๆฅ๏ผไธบ่งฃ็ ๅจๆไพ่งฃ็ ๅจ็ๆๅไธไธช้่็ถๆไฝไธบๅ
ถ็ฌฌไธ้่็ถๆ๏ผๅนถๅๅ
ถๆไพ`<SOS>`ไฝไธบๅ
ถ็ฌฌไธ่พๅ
ฅใไป้ฃ้ๅผๅง๏ผๆไปฌ่ฟญไปฃๅฐ้ขๆตๆฅ่ช่งฃ็ ๅจ็ไธไธไธชๅ่ฏใ
#
# **Teacher Forcing ๅ Scheduled Sampling**
#
# "Teacher Forcing"ๆ็ๆฏๆฏๆฌก้ฝๅบไบๅฎๅ
จๅ็กฎ็ไธๆ่ฟ่ก่งฃ็ ๏ผ่ฟๆ ท่ฎญ็ปๆจกๅๆถๆๅพๅฟซ๏ผไฝๆฏไผ้ ๆๅฎ้
ๅบๆฏๅ่ฎญ็ปๅบๆฏๆ่พๅคงๅทฎๅซ๏ผๅ ไธบๅฎ้
ๅบๆฏไธๆไน้ฝๆฏๆจกๅ้ขๆต็๏ผๅฏ่ฝไธๅ็กฎ๏ผๅ
ทไฝ็ป่ๅฏๅ่[่ฎบๆ](http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf)ใ
#
# ่งๅฏTeacher Forcing็็ฝ็ป็่พๅบ๏ผๆไปฌๅฏไปฅ็ๅฐ่ฏฅ็ฝ็ป่ฏญๆณ่ฟ่ดฏ๏ผไฝๆฏๅ็ฆปๆญฃ็กฎ็็ฟป่ฏใๅฏไปฅๅฐๅ
ถไธบๅญฆไผไบๅฆไฝๅฌ่ๅธ็ๆ็คบ๏ผ่ๆชๅญฆไน ๅฆไฝ็ฌ่ชๅ้ฉใ
#
# ่งฃๅณๅผบ่ฟซๆๅธ้ฎ้ข็ๆนๆณ็งฐไธบโ่ฎกๅๆฝๆ ทโ๏ผ[Scheduled Sampling](https://arxiv.org/abs/1506.03099)๏ผ๏ผๅฎๅจ่ฎญ็ปๆถไป
ๅจไฝฟ็จ็ฎๆ ๅผๅ้ขๆตๅผไน้ด่ฟ่กๅๆขใๆไปฌๅฐๅจ่ฎญ็ปๆถ้ๆบ้ๆฉ,ๆๆถๆไปฌๅฐไฝฟ็จ็ๅฎ็ฎๆ ไฝไธบ่พๅ
ฅ๏ผๅฟฝ็ฅ่งฃ็ ๅจ็่พๅบ๏ผ๏ผๆๆถๆไปฌๅฐไฝฟ็จ่งฃ็ ๅจ็่พๅบใ
# + jupyter={"outputs_hidden": false}
teacher_forcing_ratio = 0.5
clip = 5.0
def train(input_variable, target_variable,
encoder, decoder,
encoder_optimizer, decoder_optimizer,
criterion, max_length=MAX_LENGTH):
# Zero gradients of both optimizers
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0 # Added onto for each word
# Get size of input and target sentences
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
# Run words through encoder
encoder_hidden = encoder.init_hidden()
encoder_outputs, encoder_hidden = encoder(input_variable, encoder_hidden)
# Prepare input and output variables
decoder_input = torch.LongTensor([[SOS_token]])
# Use last hidden state from encoder to start decoder
decoder_hidden = encoder_hidden
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Choose whether to use teacher forcing
use_teacher_forcing = random.random() < teacher_forcing_ratio
if use_teacher_forcing:
# Teacher forcing: Use the ground-truth target as the next input
for di in range(target_length):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Next target is next input
else:
# Without teacher forcing: use network's own prediction as the next input
for di in range(target_length):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
# Get most likely word index (highest value) from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = torch.LongTensor([[ni]]) # Chosen word is next input
if USE_CUDA: decoder_input = decoder_input.cuda()
# Stop at end of sentence (not necessary when using known targets)
if ni == EOS_token: break
# Backpropagation
loss.backward()
torch.nn.utils.clip_grad_norm(encoder.parameters(), clip)
torch.nn.utils.clip_grad_norm(decoder.parameters(), clip)
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
# -
# ไธ้ขๆฏ็จไบ่พ
ๅฉ่พๅบ่ฎญ็ปๆ
ๅต็ๅฝๆฐ
# + jupyter={"outputs_hidden": false}
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
# -
# ## ่ฟ่ก่ฎญ็ปGRU
# + jupyter={"outputs_hidden": false}
hidden_size = 500
n_layers = 1
dropout_p = 0.05
n_epochs = 150000
# Initialize models
encoder = EncoderGRU(input_lang.n_words, hidden_size, n_layers)
decoder = DecoderGRU(hidden_size, output_lang.n_words, n_layers, dropout_p=dropout_p)
# Move models to GPU
if USE_CUDA:
encoder.cuda()
decoder.cuda()
# Initialize optimizers and criterion
learning_rate = 0.0001
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
# Configuring training
plot_every = 200
print_every = 1000
# Keep track of time elapsed and running averages
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
# Begin!
for epoch in range(1, n_epochs + 1):
# Get training data for this cycle
training_pair = variables_from_pair(random.choice(pairs))
input_variable = training_pair[0]
target_variable = training_pair[1]
# Run the train function
loss = train(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion)
# Keep track of loss
print_loss_total += loss
plot_loss_total += loss
if epoch == 0: continue
if epoch % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print_summary = 'Epoch %d/%d, %s, %.4f' % (epoch, n_epochs, time_since(start, epoch / n_epochs),
print_loss_avg)
print(print_summary)
if epoch % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0.05
# -
# **็ปๅถ่ฎญ็ปloss**
# + jupyter={"outputs_hidden": false}
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
# %matplotlib inline
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals
ax.yaxis.set_major_locator(loc)
plt.plot(points)
show_plot(plot_losses)
# -
# # ๆจกๅ้ช่ฏ
# + jupyter={"outputs_hidden": false}
def evaluate(sentence, max_length=MAX_LENGTH):
input_variable = variable_from_sentence(input_lang, sentence)
input_length = input_variable.size()[0]
# Run through encoder
encoder_hidden = encoder.init_hidden()
encoder_outputs, encoder_hidden = encoder(input_variable, encoder_hidden)
# Create starting vectors for decoder
decoder_input = torch.LongTensor([[SOS_token]]) # SOS
if USE_CUDA:
decoder_input = decoder_input.cuda()
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni.item()])
# Next input is chosen word
decoder_input = torch.LongTensor([[ni]])
if USE_CUDA: decoder_input = decoder_input.cuda()
return decoded_words
# -
# ้ๆบ้ๅไธไธชๅฅๅญ่ฟ่ก้ช่ฏใ
def evaluate_randomly():
pair = random.choice(pairs)
output_words = evaluate(pair[0])
output_sentence = ' '.join(output_words)
print('>', pair[0])
print('=', pair[1])
print('<', output_sentence)
print('')
# + jupyter={"outputs_hidden": false}
evaluate_randomly()
# -
' '.join(evaluate('ไบบ็ๆฏๆ่ถฃ็ใ'))
# ้ๆบ็้ช่ฏๅชๆฏไธไธช็ฎๅ็ไพๅญ๏ผไธบไบ่ฝ็ณป็ปๆง็ๅฎๆๆต่ฏๆฐๆฎ็็ฟป่ฏ๏ผ่ฟ้ไป้่ฆๅฎ็ฐไธไธชๆฐ็ๅฝๆฐใ
# +
import collections
from torchtext.data.metrics import bleu_score
# ่ฏปๅๆต่ฏๆฐๆฎ้
with open('cn-eng-test.txt') as f:
lines = f.read().strip().split('\n')
test_pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines]
test_pairs_dict = collections.defaultdict(lambda : [])
for pair in test_pairs:
test_pairs_dict[pair[0]].append(pair[1].split(' '))
def evaluate_bleu_score():
candicates = []
references = []
for i, pair in enumerate(test_pairs_dict.items(), start=1):
candicate = evaluate(pair[0])
if candicate[-1] == '<EOS>':
candicate.pop(-1)
candicates.append(candicate)
references.append(pair[1])
score = bleu_score(candicates, references)
return score
# -
print('test dataset bleu score: %s' % evaluate_bleu_score())
| 19,028 |
/data_gathering/get_links_and_pics.ipynb | fe2a42eeb82e4afb6c3e388e5382f9ea846c053b | [] | no_license | leem99/found_in_time | https://github.com/leem99/found_in_time | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,804 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Get Links and Images
#
# * Script goes through the watch listing pages such as [here](https://www.prestigetime.com/luxury-watches-for-men.html).
# * Download all of the watch pictures
# * Get links for all individual watch pages (so that I can go back later and get more watch attributes).
# * Save basic watch attributes to a csv.
#
# Note: Due to the structure of the PrestigeTime website, code is run seperately for men's and womens watches.
# +
# General Libraries
import os
import re
import time
import csv
# Analysis Libraries
import numpy as np
import pandas as pd
# Scraping Libraries
from bs4 import BeautifulSoup
import requests
from fake_useragent import UserAgent
# -
# Randomize Me to Prevent Getting Blocked
def random_soup(url):
us = UserAgent()
user_agent = {'User-Agent':us.random}
response = requests.get(url,headers = user_agent)
page = response.text
soup = BeautifulSoup(page,"lxml")
return soup
# __Loop through all of the pages__
#Get all Watch Links (Not Previously Saved)
#num_pages = 158 #mens
num_pages = 127 #womens
watch_list = []
for ix in range(1,num_pages+1):
time.sleep(1+np.random.uniform(0,2))
#listing_soup = random_soup('https://www.prestigetime.com/luxury-watches-for-men.html&page='+str(ix))
listing_soup = random_soup('https://www.prestigetime.com/luxury-watches-for-women.html&page='+str(ix))
listings = listing_soup.find_all('div',class_='thumbnail thumbnail-center')
for listing in listings:
watch_dict = dict()
#URL
watch_dict['url'] = listing.find('a')['href']
#Image URL
watch_dict['image_url'] = listing.find('img')['src']
#Brand
watch_dict['brand'] = listing.find('strong').text.strip()
# Model Name
watch_dict['model_name'] = listing.find('span',id=re.compile("series-")).text.strip()
# Model Number
watch_dict['model_num'] = listing.find('span',id=re.compile("model_no")).text.strip()
#Price
price = listing.find('div',class_="caption-bottom").text
price = price.split(':')[1]
price = price.replace(',','')
price = re.findall(r"(\d+)\.(\d+)", price)
try:
price = float(price[0][0] +'.'+ price[0][1])
except:
IndexError
price = np.nan
watch_dict['price'] = price
# Image Name
image_name = watch_dict['brand'] + watch_dict['model_name'] + watch_dict['model_num']
image_name = re.sub('[^0-9a-zA-Z]+', '', image_name)
watch_dict['image_name'] = image_name
watch_list.append(watch_dict)
if image_name+'.jpg' not in os.listdir('prestige_time_pics/'):
# Download Image
us = UserAgent()
user_agent = {'User-Agent':us.random}
time.sleep(1+np.random.uniform(0,2))
response = requests.get(watch_dict['image_url'],headers = user_agent)
#f = open('../prestige_time_pics_mens/'+image_name+'.jpg','wb')
f = open('../prestige_time_pics_womens/'+image_name+'.jpg','wb')
f.write(requests.get(watch_dict['image_url']).content)
f.close()
print(ix)
# __Save Summaries to CSV__
watch_DF = pd.DataFrame(watch_list)
watch_DF.to_csv('watch_page_list_womens.csv',index=False)
#watch_DF.to_csv('watch_page_list_mens.csv',index=False)
ls)
# + [markdown] papermill={"duration": 0.033697, "end_time": "2021-12-26T14:44:43.929710", "exception": false, "start_time": "2021-12-26T14:44:43.896013", "status": "completed"}
# # Test Train Split
# + papermill={"duration": 1.33781, "end_time": "2021-12-26T14:44:45.286218", "exception": false, "start_time": "2021-12-26T14:44:43.948408", "status": "completed"}
train_dataset, test_dataset = temporal_signal_split(dataset, train_ratio=0.8 and launch JupyterLab. You should know see "U4-S1-NLP (Python3)" in the list of available kernels on launch screen.
# +
# Dependencies for the week (instead of conda)
# Run if you're using colab, otherwise you should have a local copy of the data
# #!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-4-Sprint-1-NLP/main/requirements.txt
# # !pip install -r requirements.txt
# + jupyter={"outputs_hidden": true}
# You'll use en_core_web_sm for the sprint challenge due memory constraints on Codegrader
# #!python -m spacy download en_core_web_sm
# Locally (or on colab) let's use en_core_web_lg
# !python -m spacy download en_core_web_lg # Can do lg, takes awhile
# Also on Colab, need to restart runtime after this step!
# + [markdown] id="I0ssyXeiGEqc" toc-hr-collapsed=false
# # Tokenze Text (Learn)
# <a id="p1"></a>
# + [markdown] id="sd6cxaNTGEqc" toc-hr-collapsed=true
# ## Overview
#
# > **token**: an instance of a sequence of characters in some particular document that are grouped together as a useful semantic unit for processing
#
# > [_*Introduction to Information Retrival*_](https://nlp.stanford.edu/IR-book/)
#
#
# ### The attributes of good tokens
#
# * Should be stored in an iterable data structure
# - Allows analysis of the "semantic unit"
# * Should be all the same case
# - Reduces the complexity of our data
# * Should be free of non-alphanumeric characters (ie punctuation, whitespace)
# - Removes information that is probably not relevant to the analysis
# + [markdown] id="dK-EKGVNGEqd"
# Let's pretend we are trying analyze the random sequence here. Question: what is the most common character in this sequence?
# + id="NODbGehhGEqe"
random_seq = "AABAAFBBBBCGCDDEEEFCFFDFFAFFZFGGGGHEAFJAAZBBFCZ"
# + [markdown] id="Uj0FHiJEGEqh"
# A useful unit of analysis for us is going to be a letter or character
# + colab={"base_uri": "https://localhost:8080/", "height": 54} id="OFWePC6XGEqh" outputId="41945a1a-6a50-4419-fcad-a08b4fe536cc"
tokens = list(random_seq)
print(tokens)
# + [markdown] id="8tbp-hyDGEql"
# Our tokens are already "good": in an iterable datastructure, all the same case, and free of noise characters (punctuation, whitespace), so we can jump straight into analysis.
# + colab={"base_uri": "https://localhost:8080/", "height": 319} id="mFQcACruGEql" outputId="5f37008f-1886-498a-b4f6-6356a65059a3"
import seaborn as sns
sns.countplot(tokens);
# + [markdown] id="o3TbbxfHGEqo"
# The most common character in our sequence is "F". We can't just glance at the the sequence to know which character is the most common. We (humans) struggle to subitize complex data (like random text sequences).
#
# > __Subitize__ is the ability to tell the number of objects in a set, quickly, without counting.
#
# We need to chunk the data into countable pieces "tokens" for us to analyze them. This inability subitize text data is the motivation for our discussion today.
# + [markdown] id="UMa8NJjlGEqo" toc-hr-collapsed=true
# ### Tokenizing with Pure Python
# + id="im96HX4XGEqp"
sample = "Friends, Romans, countrymen, lend me your ears;"
# + [markdown] id="Q8ACUekrGEqr"
# ##### Iterable Tokens
#
# A string object in Python is already iterable. However, the item you iterate over is a character not a token:
#
# ```
# from time import sleep
# for num, character in enumerate(sample):
# sleep(.5)
# print(f"Char {num} - {character}", end="\r")
# ```
#
# If we instead care about the words in our sample (our semantic unit), we can use the string method `.split()` to separate the whitespace and create iterable units. :)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Q5Vh69V5GEqr" outputId="c16dd32b-fb89-4598-ef93-8a7b8c17122d"
sample.split(" ")
# + [markdown] id="3h3fMFY0GEqu"
# ##### Case Normalization
# A common data cleaning data cleaning task with token is to standardize or normalize the case. Normalizing case reduces the chance that you have duplicate records for things which have practically the same semantic meaning. You can use either the `.lower()` or `.upper()` string methods to normalize case.
#
# Consider the following example:
# + id="i2K43cyJGEqu"
import pandas as pd
df = pd.read_csv('./data/Datafiniti_Amazon_Consumer_Reviews_of_Amazon_Products_May19.csv/Datafiniti_Amazon_Consumer_Reviews_of_Amazon_Products_May19.csv')
# -
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 85} deletable=false id="vbsvd0VvGEqx" nbgrader={"cell_type": "code", "checksum": "e986ee4afc96df48ac674f9b99732272", "grade": false, "grade_id": "cell-a170e7dda094d54e", "locked": false, "schema_version": 3, "solution": true, "task": false} outputId="ee9a8342-c843-449e-ac69-b52d6668bb05"
# Get the count of how many times each unique brand occurs
# YOUR CODE HERE
df['brand'] = df['brand'].apply(lambda txt: txt.lower())
# -
df['brand'].value_counts()
# + [markdown] id="YkhFYsNXGEq1"
# ##### Keep Only Alphanumeric Characters
# Yes, we only want letters and numbers. Everything else is probably noise: punctuation, whitespace, and other notation. This one is little bit more complicated than our previous example. Here we will have to import the base package `re` (regular expressions).
#
# The only regex expression pattern you need for this is `'[^a-zA-Z 0-9]'` which keeps lower case letters, upper case letters, spaces, and numbers.
# + colab={"base_uri": "https://localhost:8080/", "height": 37} id="YRY7kKSAGEq4" outputId="050ddca4-0c50-40bb-95be-c06c4e3d1055"
sample = sample +" 911"
print(sample)
# -
sample.split(' ')
# + colab={"base_uri": "https://localhost:8080/", "height": 37} id="YRY7kKSAGEq4" outputId="050ddca4-0c50-40bb-95be-c06c4e3d1055"
import re
clean_text = re.sub('[^a-zA-Z 0-9]', '', sample)
# -
# Clean token example
clean_token = clean_text.rstrip(' ').lower().split(' ')
clean_token
# + [markdown] id="obloLh7rGEq7"
# #### Two Minute Challenge
# - Complete the function `tokenize` below
# - Combine the methods which we discussed above to clean text before we analyze it
# - You can put the methods in any order you want
# + deletable=false id="0zgbOnoIGEq7" nbgrader={"cell_type": "code", "checksum": "0f09207520f5a3a425756889e9cf78aa", "grade": false, "grade_id": "cell-42630c1891924a1a", "locked": false, "schema_version": 3, "solution": true, "task": false}
def tokenize(text):
"""Parses a string into a list of semantic units (words)
Args:
text (str): The string that the function will tokenize.
Returns:
list: tokens parsed out by the mechanics of your choice
"""
# YOUR CODE HERE
# Use regex to remove non-alphabetical chars
remove_non_alpha = "[^a-zA-Z ]"
clean_text = re.sub(remove_non_alpha,'', text)
# Case normalization
norm_text = clean_text.lower()
return norm_text.split()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="qWsYy-LqGEq9" outputId="22ff3fdc-2185-4224-b42e-42ccaeb9663e"
# this should be your output
tokenize(sample)
# + [markdown] id="erSMd4diGEq_" toc-hr-collapsed=true
# ## Follow Along
#
# Our inability to analyze text data becomes quickly amplified in a business context. Consider the following:
#
# A business which sells widgets also collects customer reviews of those widgets. When the business first started out, they had a human read the reviews to look for patterns. Now, the business sells thousands of widgets a month. The human readers can't keep up with the pace of reviews to synthesize an accurate analysis. They need some science to help them analyze their data.
#
# Now, let's pretend that business is Amazon, and the widgets are Amazon products such as the Alexa, Echo, or other AmazonBasics products. Let's analyze their reviews with some counts. This dataset is available on [Kaggle](https://www.kaggle.com/datafiniti/consumer-reviews-of-amazon-products/).
# -
# !python -m spacy download en_core_web_lg
# + id="8Ap1zL81GErA"
"""
Import Statements
"""
# Base
from collections import Counter
import re
import pandas as pd
# Plotting
import squarify
import matplotlib.pyplot as plt
import seaborn as sns
# NLP Libraries
import spacy
from spacy.tokenizer import Tokenizer
from nltk.stem import PorterStemmer
# Load our spacy english language model
nlp = spacy.load('en_core_web_lg')
# + colab={"base_uri": "https://localhost:8080/", "height": 267} id="ydhFysF-GErC" outputId="a6ba71f2-a186-441c-a00f-0bb9f0b6dc76"
df.head(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 884} deletable=false id="Lkl_l_3KGErH" nbgrader={"cell_type": "code", "checksum": "c9af223b0b92d4f38bf65a8a59b77553", "grade": false, "grade_id": "cell-afe39f461a4852ac", "locked": false, "schema_version": 3, "solution": true, "task": false} outputId="23b1f11e-5349-46d7-8823-e1a46a93747f"
# View counts of product review categories
df['primaryCategories'].value_counts()
# -
#
electronic_mask = df['primaryCategories'] == 'Electronics'
df = df[electronic_mask]
df['primaryCategories'].value_counts()
# ### Create Tokens
df['tokens'] = df["reviews.text"].apply(tokenize)
df['tokens']
# + [markdown] id="8hOBAw2yGErU"
# #### Analyzing Tokens
# + colab={"base_uri": "https://localhost:8080/", "height": 187} deletable=false id="6jVvZAvJGErU" nbgrader={"cell_type": "code", "checksum": "4fcf360b68c204e6742580d513c4239e", "grade": false, "grade_id": "cell-1df54ac52c426166", "locked": false, "schema_version": 3, "solution": true, "task": false} outputId="79b4c6c6-5dda-4903-e472-94b4530cd049"
# Object from Base Python
from collections import Counter
# YOUR CODE HERE
word_counter = Counter()
df['tokens'].apply(lambda token: word_counter.update(token))
# -
word_counter.most_common(10)
# + [markdown] id="TiVHbw6xGErW"
# Let's create a fuction which takes a corpus of document and returns and dataframe of word counts for us to analyze.
# + id="ypyH-_x1GErX"
def count(tokens):
"""
Calculates some basic statistics about tokens in our corpus (i.e. corpus means collections text data)
"""
# stores the count of each token
word_counts = Counter()
# stores the number of docs that each token appears in
appears_in = Counter()
total_docs = len(tokens)
for token in tokens:
# stores count of every appearance of a token
word_counts.update(token)
# use set() in order to not count duplicates, thereby count the num of docs that each token appears in
appears_in.update(set(token))
# build word count dataframe
temp = zip(word_counts.keys(), word_counts.values())
wc = pd.DataFrame(temp, columns = ['word', 'count'])
# rank the the word counts
wc['rank'] = wc['count'].rank(method='first', ascending=False)
total = wc['count'].sum()
# calculate the percent total of each token
wc['pct_total'] = wc['count'].apply(lambda token_count: token_count / total * 100)
# calculate the cumulative percent total of word counts
wc = wc.sort_values(by='rank')
wc['cul_pct_total'] = wc['pct_total'].cumsum()
# create dataframe for document stats
t2 = zip(appears_in.keys(), appears_in.values())
ac = pd.DataFrame(t2, columns=['word', 'appears_in'])
# merge word count stats with doc stats
wc = ac.merge(wc, on='word')
wc['appears_in_pct'] = wc['appears_in'].apply(lambda x: x / total_docs * 100)
return wc.sort_values(by='rank')
# + id="GqqwygrUGErZ"
# Use the Function
wc = count(df['tokens'])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="_0EkCpReGEra" outputId="9a6a6a11-b9ea-49ab-f07e-d8bdadd781c1"
wc.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="u9kI5BjnGErc" outputId="8622477c-d7c7-4d6c-c58e-994ae23b65ee"
import seaborn as sns
# Cumulative Distribution Plot
plt.figure(figsize=(15,6))
plt.grid()
sns.lineplot(x='rank', y='cul_pct_total', data=wc);
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="GKpixh5DGEre" outputId="0611cd25-46ce-4e16-edd3-d1ae2ca09e92"
wc[wc['rank'] <= 350]['cul_pct_total'].max()
# + colab={"base_uri": "https://localhost:8080/", "height": 248} id="-yuCq8nuGErg" outputId="a4d60c06-06a5-47cc-84ad-cd1faa8d30de"
import squarify
import matplotlib.pyplot as plt
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc | 16,384 |
/sklearn_logistic_regression.ipynb | 7eabf259546e0cdfbbef3dee0af9fc33d2664bda | [] | no_license | yash2798/Alexa_Project | https://github.com/yash2798/Alexa_Project | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,243 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yash2798/Alexa_Project/blob/master/sklearn_logistic_regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="PDf_Tjerhjov"
import os
import tarfile
from six.moves import urllib
# + [markdown] id="eH88SbYTiPNA"
# # New Section
l(scale=eps, size=(n,1))
Y = Y_true + eps
return X,Y,Y_true
def visualize(X, Y, Y_true, mu, sigma, title=None):
sns.scatterplot(X[:,0], Y.flatten(), label='Noisy measurement')
sns.lineplot(X[:,0], Y_true.flatten(), label='True function')
plt.xlabel('X')
plt.ylabel('Y')
if not title is None:
plt.title(title)
# estimation
Y_pred = np.matmul(X,mu)
sns.lineplot(X[:,0].flatten(), Y_pred.flatten(), color='lightgreen', label='Predicted Mean')
Y0 = np.matmul(X,mu-np.sqrt(np.array([[sigma[0,0]],[0]])))
Y1 = np.matmul(X,mu+np.sqrt(np.array([[sigma[0,0]],[0]])))
plt.fill_between(X[:,0].flatten(),Y0[:,0],Y1[:,0], color='lightgreen', alpha=0.3, label='Predicted std')
Y0 = np.matmul(X,mu-np.sqrt(np.array([[0],[sigma[1,1]]])))
Y1 = np.matmul(X,mu+np.sqrt(np.array([[0],[sigma[1,1]]])))
plt.fill_between(X[:,0].flatten(),Y0[:,0],Y1[:,0], color='lightgreen', alpha=0.3)
plt.legend()
for n in [3,20,100]:
plt.figure(figsize=(10,5))
X,Y,Y_true = gen_data(n=n, eps=0.1)
# prior
mu = np.array([[0],[0]])
b = 5
sigma = np.eye(2)*b**2
plt.subplot(1,2,1)
visualize(X,Y,Y_true,mu,sigma,title='n={}, Prior'.format(n))
print(mu,sigma)
# likelihood
a = 1
# posterior
ATA = np.matmul(X.T, X)
Lam = ATA/a**2 + np.eye(2)/b**2
sigma_post = np.linalg.inv(Lam)
mu_post = np.matmul(sigma_post, (np.matmul(X.T, Y)/a**2 + mu/b*82))
plt.subplot(1,2,2)
visualize(X,Y,Y_true,mu_post,sigma_post,title='n={}, Posterior'.format(n))
print(mu_post, sigma_post)
| 2,256 |
/week13/day1/theory/SimpleRNN-time-series.ipynb | a7dee094e3de987d5b0a432de9a40c410112cc17 | [] | no_license | RMolleda/Data_science_RM | https://github.com/RMolleda/Data_science_RM | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,424,255 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time-series prediction with Keras `SimpleRNN` class
# ### Dr. Tirthajyoti Sarkar, Fremont, CA 94536 ([LinkedIn](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/), [Github](https://tirthajyoti.github.io))
#
# For more tutorial-style notebooks on deep learning, **[here is my Github repo](https://github.com/tirthajyoti/Deep-learning-with-Python)**.
#
# For more tutorial-style notebooks on general machine learning, **[here is my Github repo](https://github.com/tirthajyoti/Machine-Learning-with-Python)**.
#
# ---
# ### What is this Notebook about?
# In this notebook, we show a building simple recurrent neural network (RNN) using Keras.
#
# We will generate some synthetic time-series data by multiplying two periodic/ sinusoidal signals and adding some stochasticity (Gaussian noise). Then, we will take a small fraction of the data and train a simple RNN model with it and try to predict the rest of the data and see how the predictions match up with the ground truth.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.callbacks import Callback
# +
# Total time points
N = 3000
# Time point to partition train/test splits
Tp = 750
t = np.arange(0,N)
x = (2*np.sin(0.02*t)*np.sin(0.003*t))+0.5*np.random.normal(size=N)
df = pd.DataFrame(x, columns=['Data'])
len(df)
# -
plt.figure(figsize=(15,4))
plt.plot(df, c='blue')
plt.grid(True)
plt.show()
# ### Split the values in train and test
#
# So, we took only 25% of the data as training samples and set aside the rest of the data for testing.
#
# Looking at the time-series plot, we think **it is not easy for a standard model to come up with correct trend predictions.**
values = df.values
train, test = values[0:Tp ,:], values[Tp:N,:]
print("Train data length:", train.shape)
print("Test data length:", test.shape)
index = df.index.values
plt.figure(figsize=(15,4))
plt.plot(index[0:Tp],train,c='blue')
plt.plot(index[Tp:N],test,c='orange',alpha=0.7)
plt.legend(['Train','Test'])
plt.axvline(df.index[Tp], c="r")
plt.grid(True)
plt.show()
# ### Step (or _embedding_)
# RNN model requires a step value that contains n number of elements as an input sequence.
#
# Suppose x = {1,2,3,4,5,6,7,8,9,10}
#
# for step=1, x input and its y prediction become:
#
# | x | y |
# |---|---|
# | 1 | 2 |
# | 2 | 3 |
# | 3 | 4 |
# | ... | ... |
# | 9 | 10 |
#
# for step=3, x and y contain:
#
# | x | y |
# |---|---|
# | 1,2,3 | 4 |
# | 2,3,4 | 5 |
# | 3,4,5 | 6 |
# | ... | ... |
# | 7,8,9 | 10 |
#
# Here, we choose `step=4`. In more complex RNN and in particular for text processing, this is also called _embedding size_.
train.shape
step = 4
np.append(train,np.repeat(train[-1,],step)).shape
train[-1,]
np.repeat(train[-1],10)
train[-8:]
# +
step = 4
# add step elements into train and test
test = np.append(test, np.repeat(test[-1,],step))
train = np.append(train, np.repeat(train[-1,],step))
# -
train[-8:]
print("Train data length:", train.shape)
print("Test data length:", test.shape)
# ### Converting to a multi-dimensional array
# Next, we'll convert test and train data into the matrix with step value as it has shown above example.
def convert_to_matrix(data, step):
X, Y = [], []
for i in range(len(data)-step):
d = i+step
X.append(data[i:d,])
Y.append(data[d,])
return np.array(X), np.array(Y)
# +
trainX, trainY = convert_to_matrix(train,step)
testX, testY = convert_to_matrix(test,step)
trainX.shape
# -
# 750 trozos de 4 valores
trainX[0]
trainX[1]
trainX[2]
trainX[3]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
trainX.shape
# (9, 3, 1, 2, 1, 4)
# ### Ejemplo de dimensiones
[1,4,2,3] --> (4,)
[[1,4,2,3], [1,4,2,3]] --> (2, 4)
[[ [1,4,2,3],
[1,4,2,3]
]] --> (1, 2, 4)
# +
[
[
[
[1,4,2,3],
[1,4,2,3]
]
],
[
[
[1,4,2,3],
[1,4,2,3]
]
]
]
--> (2, 1, 2, 4)
# -
# Para entrenar el modelo, necesito que los datos tengan la siguiente dimensiรณn:
#
# (750, 1, 4)
#
# - 750: el nรบmero total de trozos
# - 1: una fila de datos
# - 4: cada trozo tiene cuatro valores
#
# En el caso de una imagen, recordemos con un ejemplo:
#
# (750, 28, 28)
#
# 750 imรกgenes de resoluciรณn 28x28
print("Training data shape:", trainX.shape,', ',trainY.shape)
print("Test data shape:", testX.shape,', ',testY.shape)
# ### Keras model with `SimpleRNN` layer
#
# - 256 neurons in the RNN layer
# - 32 denurons in the densely connected layer
# - a single neuron for the output layer
# - ReLu activation
# - learning rate: 0.001
def build_simple_rnn(num_units=128, embedding=4, num_dense=32, lr=0.001):
"""
Builds and compiles a simple RNN model
Arguments:
num_units: Number of units of a the simple RNN layer
embedding: Embedding length - Steps - Tamaรฑo de ventana
num_dense: Number of neurons in the dense layer followed by the RNN layer
lr: Learning rate (uses RMSprop optimizer)
Returns:
A compiled Keras model.
"""
model = Sequential()
model.add(SimpleRNN(units=num_units, input_shape=(1, embedding), activation="relu"))
model.add(Dense(num_dense, activation="relu"))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer=RMSprop(lr=lr),metrics=['mse'])
return model
model = build_simple_rnn() # Taking the defaults
model.summary()
# ### A simple callback class to show a message every 50 epochs
class MyCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
if (epoch+1) % 50 == 0 and epoch>0:
print("Epoch number {} done".format(epoch+1))
# ### Fit the model
# Con batch_size = 16 lo que harรญamos es que cogemos los datos de esta forma:
#
# - (16, 1, 4)
#
# Cogemos 16 trozos de 1 fila con 4 datos
batch_size=16
num_epochs = 1000
model.fit(trainX,trainY,
epochs=num_epochs,
batch_size=batch_size,
callbacks=[MyCallback()],verbose=0)
# ### Plot loss
plt.figure(figsize=(7,5))
plt.title("RMSE loss over epochs",fontsize=16)
plt.plot(np.sqrt(model.history.history['loss']),c='k',lw=2)
plt.grid(True)
plt.xlabel("Epochs",fontsize=14)
plt.ylabel("Root-mean-squared error",fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
# ### Predictions
# Note that the model was fitted only with the `trainX` and `trainY` data.
plt.figure(figsize=(5,4))
plt.title("This is what the model saw",fontsize=18)
plt.plot(trainX[:,0][:,0],c='blue')
plt.grid(True)
plt.show()
plt.figure(figsize=(5,4))
plt.title("This is what the model saw",fontsize=18)
plt.plot(testX[:,0][:,0],c='blue')
plt.grid(True)
plt.show()
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# predicted contiene todo el conjunto de datos predicho por nuestro modelo
predicted = np.concatenate((trainPredict,testPredict),axis=0)
plt.figure(figsize=(10,4))
plt.title("This is what the model predicted",fontsize=18)
plt.plot(testPredict,c='orange')
plt.grid(True)
plt.show()
# ### Comparing it with the ground truth (test set)
index = df.index.values
plt.figure(figsize=(15,4))
plt.title("Ground truth and prediction together",fontsize=18)
plt.plot(index,df,c='blue')
plt.plot(index,predicted,c='orange',alpha=0.75)
plt.legend(['True data','Predicted'],fontsize=15)
plt.axvline(df.index[Tp], c="r")
plt.grid(True)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
# ### How are the errors distributed?
# The errors, or residuals, as they are called in a regression problem, can be plotted to see if they follow any specific distribution. In the generation process, we injected Gaussian noise, so we expect the error to follow the same pattern, _if the model has been able to fit to the real data correctly_.
error = predicted[Tp:N]-df[Tp:N]
error = np.array(error).ravel()
plt.figure(figsize=(7,5))
plt.hist(error,bins=25,edgecolor='k',color='orange')
plt.show()
plt.figure(figsize=(15,4))
plt.plot(error,c='blue',alpha=0.75)
plt.hlines(y=0,xmin=-50,xmax=2400,color='k',lw=3)
plt.xlim(-50,2350)
plt.grid(True)
plt.show()
# ## Make the model better
#
# Note, for running these experiments reasonably fast, we will fix the model size to be smaller than the model above. We will use a RNN layer with 32 neurons followed by a densely connected layer of 8 neurons.
# ### Varying the embedding/step size
def predictions(model,trainX,testX):
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
predicted = np.concatenate((trainPredict,testPredict),axis=0)
return predicted
def plot_compare(predicted):
index = df.index.values
plt.figure(figsize=(15,4))
plt.title("Ground truth and prediction together",fontsize=18)
plt.plot(index,df,c='blue')
plt.plot(index,predicted,c='orange',alpha=0.75)
plt.legend(['True data','Predicted'],fontsize=15)
plt.axvline(df.index[Tp], c="r")
plt.grid(True)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def prepare_data(step=4):
values = df.values
train, test = values[0:Tp,:], values[Tp:N,:]
test = np.append(test,np.repeat(test[-1,],step))
train = np.append(train,np.repeat(train[-1,],step))
trainX, trainY =convertToMatrix(train,step)
testX, testY =convertToMatrix(test,step)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
return trainX,testX,trainY,testY
from sklearn.metrics import mean_squared_error, mean_absolute_error
def errors(testX, df):
y_true = df[Tp:N].values
y_pred = model.predict(testX)
error = y_pred - y_true
return [mean_absolute_error(y_true=y_true, y_pred=y_pred)]
for s in [2,4,6,8,10,12]: # s = steps - tamaรฑo ventana
trainX,testX,trainY,testY = prepare_data(s)
model = build_simple_rnn(num_units=32,num_dense=8, embedding=s)
batch_size=16
num_epochs = 100
model.fit(trainX,trainY,
epochs=num_epochs,
batch_size=batch_size,
verbose=0)
preds = predictions(model,trainX,testX)
print("Embedding size: {}".format(s))
print("Error (mae):", errors(testX, df))
print("-"*100)
plot_compare(preds)
print()
# ### Number of epochs
for e in [100,200,300,400,500]:
trainX, testX, trainY, testY = prepare_data(2)
model = build_simple_rnn(num_units=32,num_dense=8,embedding=2)
batch_size=8
num_epochs = e
model.fit(trainX,trainY,
epochs=num_epochs,
batch_size=batch_size,
verbose=0)
preds = predictions(model,trainX,testX)
print("Ran for {} epochs".format(e))
print("Error:", errors(testX, df))
print("-"*100)
plot_compare(preds)
print()
# ### Batch size
# +
best_step = 2
for b in [2,4,8,16,32,64]:
trainX,testX,trainY,testY = prepare_data(best_step)
model = build_simple_rnn(num_units=32,num_dense=8,embedding=best_step)
batch_size=b
num_epochs = 250
model.fit(trainX,trainY,
epochs=num_epochs,
batch_size=batch_size,
verbose=0)
preds = predictions(model,trainX,testX)
print("Ran with batch size: {}".format(b))
print("Error:", errors(testX, df))
print("-"*100)
plot_compare(preds)
print()
# -
# Ultimately, an exhaustive hyperparameter tuning is needed for the best overall performance. Also, the predictive power is not well-defined as we are judging the quality of the prediction mostly visually here but a neumerical metric (or a few of them) would be a better approach.
| 12,095 |
/graph_Weights/.ipynb_checkpoints/Untitled-checkpoint.ipynb | 0b46b6a769eeb6011f7fbad4f76e127d4fa61dc8 | [] | no_license | coderXmachina2/Deep-learning-Meerkat-RFI-removal | https://github.com/coderXmachina2/Deep-learning-Meerkat-RFI-removal | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 10,174 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
#from requests_html import HTML
from string import punctuation
from collections import Counter
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
# +
stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
'Is','If','While','This']
def clean_doc2tokens(text, remove_stop_words=True, stem_words=False):
# Clean the text
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"what's", "", text)
text = re.sub(r"What's", "", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"([0-9])[Kk] ",r"\1 000 ",text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e-mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " America ", text)
text = re.sub(r" USA ", " America ", text)
text = re.sub(r" u s ", " America ", text)
text = re.sub(r" uk ", " England ", text)
text = re.sub(r" UK ", " England ", text)
#text = re.sub(r"india", "India", text)
#text = re.sub(r"switzerland", "Switzerland", text)
#text = re.sub(r"china", "China", text)
text = re.sub(r"chinese", "Chinese", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
#text = re.sub(r"quora", "Quora", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r"KMs", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"gps", "GPS", text)
text = re.sub(r"gst", "GST", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"dna", "DNA", text)
text = re.sub(r"III", "3", text)
text = re.sub(r"the US", "America", text)
text = re.sub(r"Astrology", "astrology", text)
text = re.sub(r"Method", "method", text)
text = re.sub(r"Find", "find", text)
text = re.sub(r"banglore", "Banglore", text)
text = re.sub(r" J K ", " JK ", text)
text = text.split()
# Remove punctuation from text
text = [c for c in text if c not in punctuation]
text = [c.lower() for c in text]
# Optionally, remove stop words
if remove_stop_words:
text = [w for w in text if not w in stop_words]
# Optionally, shorten words to their stems
if stem_words:
stemmer = SnowballStemmer('english')
text = [stemmer.stem(word) for word in text]
# Return a list of words
return text
def tokens2doc(tokens, vocab_list):
tokens = ['UNK' if w not in vocab_list else w for w in tokens]
return ' '.join(tokens)
def update_vocab(tokens, vocab):
tokens = clean_doc2tokens(data)
vocab.update(tokens)
# create vocab (clean data, split tokens)
# create clean corpus (clean data again, replace UNK)
# train word vectors
# +
def create_vocab(corpus):
corpus_nounk = list()
vocab = Counter()
loop = 1
for data in corpus:
if loop % 10000 == 0:
print(loop)
tokens = clean_doc2tokens(data)
corpus_nounk.append(tokens)
vocab.update(tokens)
loop = loop + 1
return corpus_nounk,vocab
def create_vocab_list(vocab, min_occurrence):
vocab_freq_list = [[k,c] for k,c in vocab.most_common() if c >= min_occurrence]
return vocab_freq_list
def create_clean_corpus(corpus_nounk, vocab_list):
corpus_withunk = list()
loop = 1
for data in corpus_nounk:
if loop%10000 == 0:
print(loop)
loop = loop + 1
tokens = ['UNK' if w not in vocab_list else w for w in data]
corpus_withunk.append(tokens)
return corpus_withunk
# -
train_pairs = pd.read_csv('/Users/zhang/MscProject_tweak2vec/QuoraQuestionPairs/train.csv',encoding='ISO-8859-1')
pd.options.display.max_colwidth=200
train_pairs[:10]
question1 = train_pairs['question1']
question2 = train_pairs['question2']
is_duplicate = train_pairs['is_duplicate']
questions = []
labels = []
line = 0
for q in zip(question1,question2):
if type(q[0])==str and type(q[1])==str:
questions.append(q[0])
questions.append(q[1])
labels.append(is_duplicate[line])
line = line+1
np.save('quora_labels',np.array(labels))
corpus_clean, vocab = create_vocab(questions)
len(vocab)
np.save('quora_vocaball.npy', vocab)
# +
# >5 30299
# >10 20900
# >20 14468
min_occurrence = 5
vocab_freq_list = create_vocab_list(vocab, min_occurrence)
vocab_list = [w[0] for w in vocab_freq_list]
len(vocab_list)
# -
np.save('quora_vocab5.npy',np.array(vocab_freq_list))
corpus_withunk = create_clean_corpus(corpus_clean, vocab_list)
np.save('quora_corpus_withunk5.npy',np.array(corpus_withunk))
quora_tokens = np.load('/Users/zhang/MscProject_tweak2vec/corpus/quora_corpus_withunk5.npy')
vocab = Counter()
for line in quora_tokens:
vocab.update(line)
len(vocab)
txtName = "/Users/zhang/MscProject_tweak2vec/corpus/quora_train5.txt"
f=open(txtName, "a+")
for line in quora_tokens:
new_context = ' '.join(line)
new_context = new_context + '\n'
f.write(new_context)
f.close()
na)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
plt.show()
# -
| 6,668 |
/Lung csv/Untitled1.ipynb | 358aa4f77eb6efbaf6afea4b665f2f049d2a99cb | [] | no_license | varunsharma92/MajorProject | https://github.com/varunsharma92/MajorProject | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 10,369 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: tf
# language: python
# name: tf
# ---
from sklearn import datasets
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
iris = datasets.load_iris()
x = iris.data[:, :2]
y = iris.target
x_min, x_max = x[:, 0].min() - .5, x[:, 0].max() + .5
y_min, y_max = x[:, 1].min() - .5, x[:, 1].max() + .5
plt.figure(2, figsize=(8,6))
plt.clf()
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.Set1, edgecolor='k')
plt.xlabel('Sepal Length')
plt.ylabel('Sepal Width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# +
fig = plt.figure(1, figsize=(8,6))
ax = Axes3D(fig, elev=-150, azim=110)
x_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(x_reduced[:, 0], x_reduced[:, 1], x_reduced[:, 2], c=y, cmap=plt.cm.Set1, edgecolor='k', s=40)
ax.set_title("First three PCA Directions")
ax.set_xlabel('First eigenvector')
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel('2nd Eigenvector')
ax.w_yaxis.set_ticklabels({})
ax.set_zlabel('3rd Eigenvector')
ax.w_zaxis.set_ticklabels([])
plt.show()
# -
| 1,305 |
/259. 3Sum Smaller.ipynb | 0638ae4103e3a3ab8edaeb35de1dfa1e5041a6c2 | [] | no_license | EvaXue/Leet_code | https://github.com/EvaXue/Leet_code | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,074 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://leetcode.com/problems/3sum-smaller/
#
# https://www.youtube.com/watch?v=F4UKF07-tvo
class Solution(object):
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums)<3:
return 0
newnums = sorted(nums)
counter = 0
for i in range(len(newnums)-2):
counter+=self.twoSumSmaller(newnums[i+1:],target-newnums[i])
return counter
def twoSumSmaller(self, nums, target):
counter=0
l,r=0,len(nums)-1
while l<r:
if nums[l]+nums[r]<target:
counter+=r-l
l+=1
else:
r-=1
return counter
test =Solution()
nums=[3,2,6,1,7,-1,-3]
target =9
test.threeSumSmaller(nums,target)
| 1,132 |
/Assignment_2.ipynb | 08634df0cb456acc22cd503d4855348bd0bf051e | [] | no_license | saadhzubairi/Essential-Software | https://github.com/saadhzubairi/Essential-Software | 0 | 1 | null | 2021-02-25T09:29:26 | 2021-02-25T05:19:27 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 3,391 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.8.3 32-bit
# metadata:
# interpreter:
# hash: e6858818f08132fef2d8e9cc72590e325ddf64885e96d19b2831d8fd2b733b18
# name: python3
# ---
# Q1) Your first task is to print all the multiples of 7 in the range 0 to 100 using a for loop. Keep in mind that 0 is also a multiple of 7.
# Q2) Write a code that prints the first 30 cube numbers (x**3), starting with x=0 and ending with x=30.
# Q4) Write a code that prints the factorial of a given number. See if you can do it using both for and while loops.
#
# (Remember that the factorial of a number is defined as the product of an integer and all integers before it. For example, the factorial of five (5!) is equal to 1\*2\*3\*4\*5=120. Also recall that the factorial of zero (0!) is equal to 1.)
# Q3) The following code contains an error that will leave it in an infinite loop. Fix the code so that it works for all numbers.
#
# for eg.
# When n = 0, we get False;
# When n = 1, we get True;
# When n = 8, we get True;
# When n = 9, we get False;
#
# (The code takes in an integer n and return True if n is a power of 2 or False if n is not a power of 2)
#
# +
n = 0
# Check if the number can be divided by two without a remainder
while n % 2 == 0:
n = n / 2
# If after dividing by two the number is 1, it's a power of two
if n == 1:
return True
return False
# -
# Q4) If we have a string variable named Weather = "Rainfall", what code will print the substring or all characters before the "f"?
# Q5) When animal = "Hippopotamus", what commands will return (i) "pop" (ii) "t" (iii) "us"?
#
| 1,799 |
/PIL/PIL_module/PIL_2.0.ipynb | ae4065bb524328660cf38404e294d381640fa151 | [] | no_license | husun0822/BlueSky_Project | https://github.com/husun0822/BlueSky_Project | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 227,586 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#By Emmanuel Cocom
import pandas as pd
import numpy as np
# ### READ RAW DATA
# +
#link to problem on kaggle
#https://www.kaggle.com/c/bike-sharing-demand/data
#get untouched dataset
bycle_df = pd.read_csv('train.csv')
#columns
bycle_df.columns
# -
#shape
bycle_df.shape
#df printed out to see raw data
bycle_df[:350:50]
# ### MUST DROP BOTH CASUAL AND REGISTERED COLUMNS. LEAKAGE VARIABLES
#
# They are another representation of the label we are trying to predict 'count' in two columns
#
# registered + casual = count (label)
#
# we will not know registered, casual data in ever in any real life testing because they are the label 'count' we are trying predict!
#
# Keeping them jeapordizes the integrity and usefulness of our model as it will rely on those columns to predict the label, but they are the label
# +
#Drop casual
bycle_df.drop('casual', axis=1, inplace=True)
#DROP registered
bycle_df.drop('registered', axis=1, inplace=True)
bycle_df.columns
#both casual and registered are gone
# -
# ## Unsupported Formats: Datetime column cannot be used in it's current format: date hour:00.
# #### Data will be extracted from datetime column to form three new columns:
#
# hour : 0-23
#
# day_of_year: 0 - 365 (but raw data only provides up to 364)
#
# weekday: 0-6 (0-Sun, 6-Sat)
# +
from datetime import datetime
#extract hour from datetime
bycle_df['hour'] = bycle_df.datetime.apply(lambda x : x.split()[1].split(':')[0])
#extract day_of_year from datetime
bycle_df['day_of_year'] = bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%-j'))
#extract weekday from datetime
bycle_df['weekday']= bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%w'))
#bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%w'))
#extract weekday from datetime
bycle_df['year']= bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%Y')).to_frame('year')
#extract weekday from datetime
bycle_df['month']= bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%-m')).to_frame('month')
bycle_df.head()
#adding month year features to see if model improves if we can predict by month or year
#year_df = bycle_df.datetime.apply(lambda x : datetime.strptime(x, '%m/%d/%y %H:00').strftime('%Y')).to_frame('year')
#datetime_df = bycle_df.datetime.apply(lambda x: datetime.strptime(x, '%m/%d/%y %H:00').strftime('%-m')).to_frame('month')
#month_year_df = pd.concat([year_df ,datetime_df ], axis=1)
#month_year_df.head()
# -
# ### MISSING VALUES and Corrupt data values
# +
#HELPER FUNCTIONS, TO TEST FOR MISSING OR CORRUPT DATA IN EACH COLUMN/FEATURE
#helper for string data
def check_if_missing_strings(value):
if value == None or value.strip() == '' or value.lower() == 'nan' or value == np.nan:
print(value, j, ' is the missing value')
return np.nan
else:
return value
#Helper for numerical data
def check_if_empty_ints(value):
if value == '' or value == None or value == 'nan' or value == np.nan:
print('missing value')
else:
pass
def check_if_less_one(value):
if value <1:
print('less than one, check it out!')
def check_if_greater_than(value, limit):
if value > limit:
print('less than one, check it out!')
def check_if_less_than(value, limit):
if value < limit:
print('less than one, check it out!')
def check_if_less_than_x_greater_than_y(value, x_limit, y_limit):
if value < x_limit:
print(value)
print(' is dangerous data and less than ', x_limit)
if value > y_limit:
print(value)
print(' is dangerous data and greater than ', y_limit)
#checking for both missing and corrupt data( data that does not make sense )
#datetime stamp :: clear
bycle_df.datetime.apply(lambda x: check_if_missing_strings(x)) #results show NO missing values
#season :: clear
bycle_df.season.apply(lambda x: check_if_empty_ints(x))#results show NO missing values
bycle_df.season.apply(lambda x: check_if_less_one(x))#results show NO corrupt values
#holiday :: clear
bycle_df.holiday.apply(lambda x: check_if_empty_ints(x))#results show NO missing values
bycle_df.holiday.apply(lambda x: check_if_less_than_x_greater_than_y(x, 0, 1))#results show NO corrupt values
bycle_df.holiday.unique()
#workingday:: clear
bycle_df.workingday.apply(lambda x: check_if_empty_ints(x))#results show NO missing values
bycle_df.workingday.apply(lambda x: check_if_less_than_x_greater_than_y(x, 0, 1))#results show NO corrupt values
bycle_df.workingday.unique()
#weather:: clear
bycle_df.weather.apply(lambda x: check_if_empty_ints(x))#results show NO missing values
bycle_df.weather.apply(lambda x: check_if_less_than_x_greater_than_y(x, 1, 4))#results show NO corrupt values
bycle_df.weather.unique()
#atemp :: clear
bycle_df.atemp.apply(lambda x: check_if_empty_ints(x))#results show NO missing values
bycle_df.atemp.apply(lambda x: check_if_less_than(x, 0.1))#results show NO corrupt values
bycle_df.atemp.unique()
#-humidity :: NOT CLEAR! --> NO MISSING DATA: HOWEVER ---> CORRUPT DATA: YES
bycle_df.humidity.apply(lambda x: check_if_empty_ints(x)) #results show NO missing values
bycle_df.humidity.unique() #results show YES Corrupt Data, as it's impossible to have 0 humidity on earth
#windspeed :: clear
bycle_df.windspeed.apply(lambda x: check_if_empty_ints(x)) #results show NO missing values
bycle_df.windspeed.unique() #results show No Missing Data, as it is possible to have 0 windspeed
#hour :: clear
bycle_df.hour.apply(lambda x: check_if_empty_ints(x)) #results show NO missing values
bycle_df.hour.apply(lambda x: check_if_less_than_x_greater_than_y(int(x), 0, 23))#results show NO corrupt data values
bycle_df.hour.unique() #results show No Missing Data,
#day_of_year :: clear
bycle_df.day_of_year.apply(lambda x: check_if_empty_ints(x)) #results show NO missing values
bycle_df.day_of_year.apply(lambda x: check_if_less_than_x_greater_than_y(int(x), int(0), int(365)))#results show NO corrupt data values
bycle_df.day_of_year.unique() #results show No Missing Data,
#Weekday :: clear
bycle_df.weekday.apply(lambda x: check_if_empty_ints(x)) #results show NO missing values
bycle_df.weekday.apply(lambda x: check_if_less_than_x_greater_than_y(int(x), int(0), int(6)))#results show NO corrupt data values
bycle_df.weekday.unique() #results show No Missing Data,
print('testing done')
# -
print(bycle_df.shape)
bycle_df.head()
# ### Fixing Corrupt Data - By replacing it with column average
# +
#fixing humidity by adding average of the column to all zero values (as zero is impossible value it means the data is just missing)
import numpy as np
def nan_if_zero(value):
if value == 0:
return np.nan
return value
def mean_if_nan(value, mean):
if value == 0:
return mean
else:
return value
#verify corrupt data
bycle_df['humidity'].unique()
#changes zero corrupt data to numpy.nan value
random_var = bycle_df.humidity.apply(lambda x: nan_if_zero(x))
#finds mean of column, ignores nan values by default
mean_humid = random_var.mean()
#apply mean to all zero corrupt data values in original df column humidity
bycle_df['humidity'] = bycle_df.humidity.apply(lambda x: mean_if_nan(int(x), int(mean_humid)))
#DATA IS CLEARED NOW --> NO CORRUPTION IN HUMIDITY COLUMN
bycle_df['humidity'].unique()
# +
#Numerical features - scaled
# -
# ### One Hot Encode categorical features that are non-binary
# ### Looking at all possible values for all columns that need one Hot Encoding
# +
#categorical features
cat_features = ['season', 'holiday','workingday', 'weather', 'hour', 'month']
#looking for non-binary categorical features to OneHotEncode
non_binary_cat_features = {}
for x in cat_features:
if len(bycle_df[x].unique()) > 2: #only if they have 3 or more possible values
non_binary_cat_features[x] = bycle_df[x].unique()
print('The following need to go through One Hot Encoded Transformation:\n')
for x,y in non_binary_cat_features.items():
print(x, 'has the features ', y)
#features that do no not need to be one hot encoded
non_encoded_features = ['holiday','workingday', 'temp','atemp', 'humidity', 'windspeed', 'day_of_year', 'weekday', 'year']
# -
# #### Column 'Year needs to be changed to binary form'
# +
def year_to_bin(value):
if value == '2011':
return 0
return 1
bycle_df['year']= bycle_df.year.apply(lambda x : year_to_bin(x))
# -
bycle_df.head()
# ### Column 'hour' will be put into 4 bins of 6 hours. This is reduce the number of features that will be created due to OneHotEncoding. Features created will be 4 instead of 24.
# +
#HELPER FUNCTION TO PUT HOUR VALUES INTO BINS
def four_hour_bins(hour):
hour = int(hour)
if hour <=5:
return 1
elif hour <=11:
return 2
elif hour <=17:
return 3
else:
return 4
bycle_df['hour'] = bycle_df.hour.apply(lambda x : four_hour_bins(x))
print('The possible choicse of column hour after putting into bins')
print(bycle_df['hour'].unique())
# -
bycle_df.head()
# #### Actual OHE Proces
# +
#One hot encoding
from sklearn.preprocessing import OneHotEncoder
#create OneHotEncoder object for each one
one_hot_encod_season = OneHotEncoder()
one_hot_encod_weather = OneHotEncoder()
one_hot_encod_hour = OneHotEncoder()
one_hot_encod_month = OneHotEncoder()
#transform values to OneHotEncoding values with new columns for each feature
x_season = one_hot_encod_season.fit_transform(bycle_df.season.values.reshape(-1,1)).toarray()
x_weather = one_hot_encod_weather.fit_transform(bycle_df.weather.values.reshape(-1,1)).toarray()
x_hour = one_hot_encod_weather.fit_transform(bycle_df.weather.values.reshape(-1,1)).toarray()
x_month = one_hot_encod_month.fit_transform(bycle_df.month.values.reshape(-1,1)).toarray()
print(x_hour)
# +
#Make a data data frame for each categorical feature that was one hot encoded using numpy results and proper column names
df_bycle_ohe_season = pd.DataFrame(x_season, columns = [' spring,', 'summer', 'fall', 'winter'])
df_bycle_ohe_weather = pd.DataFrame(x_weather, columns = [' clear', 'mist', 'light', 'heavy_rain'])
df_bycle_ohe_hour = pd.DataFrame(x_hour, columns = [' EarlyMorning', 'Morning', 'Evening', 'Night'])
df_bycle_ohe_month = pd.DataFrame(x_month, columns = ['Jan', 'Feb', 'March', 'April', 'May', 'June', 'July', 'August', 'Sept','Oct','Nov','Dec'])
print(df_bycle_ohe_season[:300:50])
print('\n\n\n')
print(df_bycle_ohe_weather[:300:50])
print('\n\n\n')
print(df_bycle_ohe_hour[:300:50])
print('\n\n\n')
print(df_bycle_ohe_month[:300:50])
#concatenate all the individual one hot encoded dataframes into one dataframe
df_bycle_ohe_feature_matrix = pd.concat([df_bycle_ohe_season,df_bycle_ohe_weather ], axis=1)
df_bycle_ohe_feature_matrix = pd.concat([df_bycle_ohe_feature_matrix, df_bycle_ohe_hour ], axis=1)
df_bycle_ohe_feature_matrix = pd.concat([df_bycle_ohe_feature_matrix, df_bycle_ohe_month ], axis=1)
#print out new df containing onehotencoding columns and values
print('\n\n\nOneHotEncoded DF:\n\n')
print(df_bycle_ohe_feature_matrix[:300:50])
non_encoded_feature_matrix = bycle_df[non_encoded_features]
# -
# ### Features & Labels
# #### New Feature Matrix-- Combining numerical and OneHotEncoded Featuress
# +
#Labels
label = bycle_df['count']
print(label.shape)
print('label is count: \n')
print(label[0:300:50])
#combine non encoded and encoded feature matrices
bycle_feature_matrix = pd.concat([non_encoded_feature_matrix, df_bycle_ohe_feature_matrix], axis = 1)
#columns of new feature matrix
print('\n\nfeature matrix columns')
print('column names are \n', bycle_feature_matrix.columns)
#print(bycle_feature_matrix.head())
#df_to_be_used_later = bycle_feature_matrix.copy()
#df_to_be_used_later = pd.concat([df_to_be_used_later ,df_bycle_ohe_weather ], axis=1)
#df_to_be_used_later.head()
# -
# #### We will make a Models Based Of Each Month instead of a model for every month in the year, so a df split will created for each month
#first bring it back together, so labels are paired off correctly with f matrix as data is filtered and split
df_with_label = pd.concat([bycle_feature_matrix, label], axis = 1)
df_with_label.head()
# +
df_splits = []
months = ['Jan', 'Feb', 'March', 'April', 'May', 'June', 'July', 'August', 'Sept', 'Oct', 'Nov', 'Dec']
#0- feature_matrix 1-label
for month in months:
df_month_fmatrix = df_with_label[df_with_label[month] == 1.0]#filter out rows for only that month
label_month = df_month_fmatrix['count'] #labels are extracted for filtered rows
del df_month_fmatrix['count'] #label is dropped from feature matrix
df_splits.append([df_month_fmatrix, label_month]) #feature matrix and labels are put into a list
print('\n\n\n\n')
for x in range(len(df_splits)):
print('Month: ', months[x])
print('shape fmatrix', df_splits[x][0].shape)
print('shape label is', df_splits[x][1].shape)
print('\n')
# -
# ### NORMALIZING DATA
# #### Normalize data for each month
# +
from sklearn import preprocessing
#normalize data
for x in range(len(df_splits)):
#scale it -> d type changes to numpy array
scaled_feature_matrix_month_numpyarray = preprocessing.scale(df_splits[x][0])
#change back to df
df_month_scaled = pd.DataFrame(scaled_feature_matrix_month_numpyarray, columns = df_splits[x][0].columns)
#store back the scaled data back into list.
df_splits[x][0] = df_month_scaled
print('sample of list of stored monthly dataframes and label\n\n')
print(df_splits[0][0].head())
print(df_splits[0][1][:5:])
# -
# ## ALGORITHM 1: RANDOM FOREST
bycle_feature_matrix.head()
print(bycle_feature_matrix.columns)
# ### X_train, X_test, y_train, y_test for each month of year - going to be used from here on out, for any individual runs
from sklearn.model_selection import train_test_split
monthly_train_test_splits = []
for month in df_splits:
X_train, X_test, y_train, y_test = train_test_split(month[0], month[1], test_size=0.25,
random_state=4)
monthly_train_test_splits.append([X_train, X_test, y_train, y_test])
# +
print('sample of monthly split for first month')
print('X_train shape is', monthly_train_test_splits[0][0].shape)
print('y_train shape is',monthly_train_test_splits[0][2].shape )
print('X_test shape is', monthly_train_test_splits[0][1].shape)
print('y_test shape is', monthly_train_test_splits[0][3].shape)
# -
# ### INDIVIDUAL RUN
from sklearn.ensemble import RandomForestRegressor
# +
from sklearn import metrics
import numpy as np
#kaggle requested metric
def rmsle(y, y_):
log1 = np.nan_to_num(np.array([np.log(v + 1) for v in y]))
log2 = np.nan_to_num(np.array([np.log(v + 1) for v in y_]))
calc = (log1 - log2) ** 2
return np.sqrt(np.mean(calc))
#rmse, feature importance, predictions below
monthly_forests_rmse = {}
monthly_f_importance = {}
monthly_predictions = {}
monthly_forests_rmsle = {}
for x in range(len(monthly_train_test_splits)):
rf = RandomForestRegressor(n_estimators = 200, random_state = 1)
rf.fit(monthly_train_test_splits[x][0], monthly_train_test_splits[x][2]);
predictions = rf.predict(monthly_train_test_splits[x][1])
mse = metrics.mean_squared_error(monthly_train_test_splits[x][3], predictions)
rmse = np.sqrt(mse)
monthly_forests_rmse[months[x]] = rmse
#save feature importance for each month
monthly_f_importance[months[x]] = pd.Series(rf.feature_importances_,index=bycle_feature_matrix.columns).sort_values(ascending=False)
monthly_predictions[months[x]] = predictions
monthly_forests_rmsle[months[x]] = rmsle(monthly_train_test_splits[x][3], predictions)
#save predictions
# -
# #### Feature Importance
#print('\n\n\n')
print('The following are the feature importance generated by random forest model for each month\n\n')
for x,y in monthly_f_importance.items():
print('month: ', x, '\n\nfeature_importance:\n\n', y)
print('\n\n')
# ### Evaluating our results
# #### Using kaggle's requested metric for evaluation
def rmsle(y, y_):
log1 = np.nan_to_num(np.array([np.log(v + 1) for v in y]))
log2 = np.nan_to_num(np.array([np.log(v + 1) for v in y_]))
calc = (log1 - log2) ** 2
return np.sqrt(np.mean(calc))
for x,y in monthly_forests_rmsle.items():
print('month: ', x, 'rmsle: ', y)
# #### Using RMSE Metric for evaulation
for x,y in monthly_forests.items():
print('month: ', x, ' rmse: ', y)
# ### CROSS VALIDATION RUN
# +
from sklearn.cross_validation import cross_val_score
crossv_montly_forests_rmse_list = {}
crossv_montly_forests_rmse = {}
for month in range(len(df_splits)):
rf_cv = RandomForestRegressor(random_state = 42)
mse_list= cross_val_score(rf_cv, df_splits[month][0], df_splits[month][1], cv=10, scoring='neg_mean_squared_error')
mse_list_positive = -mse_list
rmse_list = np.sqrt(mse_list_positive)
rmse_mean = rmse_list.mean()
#save monthly rmse list
crossv_montly_forests_rmse_list[months[month]] = rmse_list
#save monthly rmse mean
crossv_montly_forests_rmse[months[month]] = rmse_mean
for x,y in crossv_montly_forests_rmse_list.items():
print('\nmonth: ', x, '\nrmse_list: ', y)
for x,y in crossv_montly_forests_rmse.items():
print('month: ', x, 'rmse: ', y)
# -
# ### IMPROVING ACCURACY ATTEMPT - FEATURE REDUCTION -FAILED TO IMPROVE ACCURACY
# #### Manual Feature Reduction... Checking RMSE With up to 7 best features.
#
# #FEATURE IMPORTANCE- Best features below
#
# #best 5 features for each month model
# #### ['humidity','atemp','temp','windspeed','year' ] #1 - Jan
# #### ['humidity','temp','windspeed','atemp','day_of_year']#Feb
# #### ['temp','atemp','humidity','windspeed','year']#3 March
# #### ['humidity','windspeed','temp','atemp','day_of_year']#April
# #### ['atempt','humidity','windspeed','day_of_year','temp'],#May
# #### ['humidity','windspeed','year', 'day_of_year', 'temp',],#6 June
# #### ['temp','humidity','day_of_year','windspeed','atemp'],#July
# #### ['temp','humidity','windspeed','day_of_year','year'],#August
# #### ['humidity','atemp','windspeed','temp','day_of_year'],#9 Sept
# #### ['humidity','windspeed','atemp','temp','day_of_year'],#10 - Oct
# #### ['humidity','temp','windspeed','day_of_year','temp',],#11 - #Nov
# #### ['humidity','temp','windspeed','day_of_year','atemp',],#12 - December
# #### cross validation and feature reduction
#best 5 features for each month model
best_features = [
['humidity','atemp','temp','windspeed','year' ],#1 - Jan
['humidity','temp','windspeed','atemp','day_of_year'],#Feb
['temp','atemp','humidity','windspeed','year'],#3 March
['humidity','windspeed','temp','atemp','day_of_year'],#April
['atempt','humidity','windspeed','day_of_year','temp'],#May
['humidity','windspeed','year', 'day_of_year', 'temp',],#6 June
['temp','humidity','day_of_year','windspeed','atemp'],#July
['temp','humidity','windspeed','day_of_year','year'],#August
['humidity','atemp','windspeed','temp','day_of_year'],#9 Sept
['humidity','windspeed','atemp','temp','day_of_year'],#10 - Oct
['humidity','temp','windspeed','day_of_year','temp',],#11 - Nov
['humidity','temp','windspeed','day_of_year','atemp',],#12 - December
]
# +
from sklearn.cross_validation import cross_val_score
crossv_montly_forests_feature_reduction_rmse_list = {}
crossv_montly_forests_feature_reduction_rmse = {}
for month in range(len(df_splits)):
rf_cv = RandomForestRegressor(random_state = 42)
#line below filters out all other columsn other than 5 best for each model of each year
mse_list= cross_val_score(rf_cv, df_splits[month][0][best_features[0]], df_splits[month][1], cv=10, scoring='neg_mean_squared_error')
mse_list_positive = -mse_list
rmse_list = np.sqrt(mse_list_positive)
rmse_mean = rmse_list.mean()
#save monthly rmse list
crossv_montly_forests_feature_reduction_rmse_list[months[month]] = rmse_list
#save monthly rmse mean
crossv_montly_forests_feature_reduction_rmse[months[month]] = rmse_mean
#for x,y in crossv_montly_forests_feature_reduction_rmse_list.items():
# print('\nmonth: ', x, '\nrmse_list: ', y)
for x,y in crossv_montly_forests_feature_reduction_rmse.items():
print('month: ', x, 'rmse: ', y)
# -
# ### IMPROVING ACCURACY ATTEMPT -ADA BOOST - SUCCESS
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
from math import sqrt
# +
ada_boost_montly_rmse = {}
for x in range(len(monthly_train_test_splits)):
#rf_ab= RandomForestRegressor(n_estimators = 100, random_state = 3)
ada_boost_reg = AdaBoostRegressor(RandomForestRegressor(n_estimators = 100, random_state = 3), n_estimators=100, random_state=3)
#rf_ab.fit(X_train, y_train)
ada_boost_reg.fit(monthly_train_test_splits[x][0], monthly_train_test_splits[x][2])
#rf_predictions = rf_ab.predict(X_test)
abr_predictions = ada_boost_reg.predict(monthly_train_test_splits[x][1])
#rf_rmse= sqrt(mean_squared_error(y_test, rf_predictions))
# abr_predictions_rmse = sqrt(mean_squared_error(monthly_train_test_splits[x][3], abr_predictions))
mse = metrics.mean_squared_error(monthly_train_test_splits[x][3], abr_predictions)
rmse = np.sqrt(mse)
ada_boost_montly_rmse[months[x]] = rmse
for x,y in ada_boost_montly_rmse.items():
print('\nmonth: \n', x , ' rmse: ', y, '\n')
# -
from statistics import mean
mean(ada_boost_montly_rmse.values())
print('averaging the rmse of all 12 models we get', mean(ada_boost_montly_rmse.values()))
# ### IMPROVING ACCURACY ATTEMPT -ADA BOOST CROSS VALIDATION- SUCCESS
# +
from sklearn.cross_validation import cross_val_score
crossv_adab_montly_forests_rmse_list = {}
crossv_adab_montly_forests_rmse = {}
for month in range(len(df_splits)):
ada_boost = AdaBoostRegressor(RandomForestRegressor(n_estimators = 100, random_state = 3), n_estimators = 100, random_state = 6)
mse_ada = cross_val_score(ada_boost, df_splits[month][0], df_splits[month][1], cv=10, scoring='neg_mean_squared_error')
mse_ada_positive = - mse_ada
rmse_ada_list = np.sqrt(mse_ada_positive)
#print(rmse_ada)
rmse_cv_ada= rmse_ada_list.mean()
#print(accuracy_cv_ada)
#save monthly rmse list
crossv_adab_montly_forests_rmse_list[months[month]] = rmse_ada_list
#save monthly rmse mean
crossv_adab_montly_forests_rmse[months[month]] = rmse_cv_ada
#for x,y in crossv_adab_montly_forests_rmse_list.items():
# print('month: ', x, '\nrmsle_list: ', y)
for x,y in crossv_adab_montly_forests_rmse.items():
print('month: ', x, 'rmsle: ', y)
# -
mean(crossv_adab_montly_forests_rmse.values())
# ### IMPROVING ACCURACY ATTEMPT - PCA - DIMENSIONALITY REDUCTION - SUCCESS
# +
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from math import sqrt
import numpy as np
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
pca_montly_rmse = {}
for x in range(len(monthly_train_test_splits)):
#grabbing data from split done early on
X_train = monthly_train_test_splits[x][0]
y_train = monthly_train_test_splits[x][2]
X_test = monthly_train_test_splits[x][1]
y_test = monthly_train_test_splits[x][3]
scaler = StandardScaler().fit(X_train)
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index=X_train.index.values, columns=X_train.columns.values)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index=X_test.index.values, columns=X_test.columns.values)
pca = PCA() #create pca object
pca.fit(X_train)
cpts = pd.DataFrame(pca.transform(X_train))
#print(cpts) #still same amount of columns as training
x_axis = np.arange(1, pca.n_components_+1)
#print(x_axis) # still same amount of columns as training
pca_scaled = PCA()
pca_scaled.fit(X_train_scaled)
#print(pca_scaled)
cpts_scaled = pd.DataFrame(pca.transform(X_train_scaled))
#from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=6)
#print(X_train.head())
#rf.fit(X_train, y_train)
rf.fit(cpts_scaled, y_train)
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
rf_rmse= sqrt(mean_squared_error(y_test, predicted_test))
pca_montly_rmse[months[x]] = rf_rmse
# +
for x,y in pca_montly_rmse.items():
print('month is: ', x, ' rmse is ', y)
# -
# #### ADA PCA RF
# +
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from math import sqrt
import numpy as np
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
ab_pca_montly_rmse = {}
for x in range(len(monthly_train_test_splits)):
#grabbing data from split done early on
X_train = monthly_train_test_splits[x][0]
y_train = monthly_train_test_splits[x][2]
X_test = monthly_train_test_splits[x][1]
y_test = monthly_train_test_splits[x][3]
#create the scaler and fit it using training data
scaler = StandardScaler().fit(X_train)
#create df with trained scaled data
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index=X_train.index.values, columns=X_train.columns.values)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index=X_test.index.values, columns=X_test.columns.values)
pca_rf = PCA() #create pca object
pca_rf.fit(X_train)#pass x_train data
cpts = pd.DataFrame(pca_rf.transform(X_train))
#print(cpts) #still same amount of columns as training
x_axis = np.arange(1, pca_rf.n_components_+1)
#print(x_axis) # still same amount of columns as training
pca_scaled = PCA()
pca_scaled.fit(X_train_scaled)
#print(pca_scaled)
cpts_scaled = pd.DataFrame(pca.transform(X_train_scaled))
rf = RandomForestRegressor(n_estimators=100, oob_score=True, random_state=6)
rf.fit(X_train, y_train)
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
ada_boost_reg2 = AdaBoostRegressor(rf, n_estimators=100, random_state=3)
ada_boost_reg2.fit(X_train, y_train)
abr_predictions = ada_boost_reg2.predict(X_test)
ab_pca_rmse= sqrt(mean_squared_error(y_test, abr_predictions))
#print('x',x, 'rmse is', ab_pca_rmse)
ab_pca_montly_rmse[months[x]] = ab_pca_rmse
print('\n\n')
for x,y in ab_pca_montly_rmse.items():
print('month is: ', x, ' rmse is ', y)
# -
| 27,253 |
/Day-3/exercises/Remote Iteration.ipynb | c97ce146502c1b8338b884c156fc5e03bf8d5bbd | [] | no_license | Dr-RIZWANAHMED/ngcm-tutorial-python | https://github.com/Dr-RIZWANAHMED/ngcm-tutorial-python | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 8,960 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python (myenv)
# language: python
# name: myenv
# ---
# +
#imports
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats as stats
matplotlib.style.use('ggplot') #style of plots
#to plot in this notebook
# %matplotlib inline
champs = ['Brazil - Sรฉrie A','Brazil - Sรฉrie B','Spain - La Liga 1',
'Germany - Bundesliga','Italy - Serie A', 'England - Premier League',
'France - Ligue 1', 'Portugal - Primeira Liga', 'Netherlands - Eredivise']
PATH_PROJECT = "/home/igormago/git/doutorado/"
PATH_NOTEBOOKS_DATA = PATH_PROJECT + 'notebooks/data/'
# +
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features3.csv')
resume = pd.DataFrame()
rows = ['books','rf1','rf2']
resume['matches_num'] = pd.Series(len(df))
resume['hits_books'] = pd.Series(len(df[df.m_favorite == df.m_column_result]))
resume['hits_rf1'] = pd.Series(len(df[df.m_column_result == df.rf1000]))
resume['hits_rf2'] = pd.Series(len(df[df.m_column_result == df.rf1000_fs1]))
hits=[resume.hits_books, resume.hits_rf1, resume.hits_rf2]
resume['errors_books'] = resume['matches_num'] - resume['hits_books']
resume['errors_rf1'] = resume['matches_num'] - resume['hits_rf1']
resume['errors_rf2'] = resume['matches_num'] - resume['hits_rf2']
errors=[resume.errors_books, resume.errors_rf1, resume.errors_rf2]
resume['p_hits_books'] = resume['hits_books'] / resume['matches_num']
resume['p_hits_rf1'] = resume['hits_rf1'] / resume['matches_num']
resume['p_hits_rf2'] = resume['hits_rf2'] / resume['matches_num']
resume['p_errors_books'] = resume['errors_books'] / resume['matches_num']
resume['p_errors_rf1'] = resume['errors_rf1'] / resume['matches_num']
resume['p_errors_rf2'] = resume['errors_rf2'] / resume['matches_num']
resume['pl_books'] = df['m_odd_favorite'][df.m_favorite == df.m_column_result].sum() - resume['matches_num']
resume['pl_rf1'] = df['m_odd_favorite'][df.m_column_result == df.rf1000].sum() - resume['matches_num']
resume['pl_rf2'] = df['m_odd_favorite'][df.m_column_result == df.rf1000_fs1].sum() - resume['matches_num']
resume.to_csv(PATH_NOTEBOOKS_DATA + 'eval1.csv',index=False);
print(resume)
# +
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features3.csv')
resume = pd.DataFrame()
x1 = df.groupby(['m_favorite','m_medium','m_underdog'])['m_match_id'].count()
x1 = pd.Series.to_frame(x1)
x2 = df[df.m_column_result == df.m_favorite].groupby(['m_favorite','m_medium','m_underdog'])['m_match_id'].count()
x3 = df[df.m_column_result == df.rf1000].groupby(['m_favorite','m_medium','m_underdog'])['m_match_id'].count()
x4 = df[df.m_column_result == df.rf1000_fs1].groupby(['m_favorite','m_medium','m_underdog'])['m_match_id'].count()
resume = pd.concat([x1,x2],axis=1, join='inner')
resume = pd.concat([resume,x3],axis=1, join='inner')
resume = pd.concat([resume,x4],axis=1, join='inner')
print(resume)
resume.columns = ['matches_num','hits_books','hits_rf1','hits_rf2']
resume['errors_books'] = resume['matches_num'] - resume['hits_books']
resume['errors_rf1'] = resume['matches_num'] - resume['hits_rf1']
resume['errors_rf2'] = resume['matches_num'] - resume['hits_rf2']
resume['p_hits_books'] = resume['hits_books'] / resume['matches_num']
resume['p_hits_rf1'] = resume['hits_rf1'] / resume['matches_num']
resume['p_hits_rf2'] = resume['hits_rf2'] / resume['matches_num']
resume['p_errors_books'] = resume['errors_books'] / resume['matches_num']
resume['p_errors_rf1'] = resume['errors_rf1'] / resume['matches_num']
resume['p_errors_rf2'] = resume['errors_rf2'] / resume['matches_num']
print(resume)
# +
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features3.csv')
resume = pd.DataFrame()
x1 = df.groupby(['m_favorite'])['m_match_id'].count()
x1 = pd.Series.to_frame(x1)
x2 = df.groupby(['rf1000'])['m_match_id'].count()
x2 = pd.Series.to_frame(x2)
x3 = df.groupby(['rf1000_fs1'])['m_match_id'].count()
x3 = pd.Series.to_frame(x3)
book_A = len(df[df.m_favorite == 'A'][df.m_column_result == df.m_favorite])
book_D = len(df[df.m_favorite == 'D'][df.m_column_result == df.m_favorite])
book_H = len(df[df.m_favorite == 'H'][df.m_column_result == df.m_favorite])
rf1_A = len(df[df.rf1000 == 'A'][df.m_column_result == df.rf1000])
rf1_D = len(df[df.rf1000 == 'D'][df.m_column_result == df.rf1000])
rf1_H = len(df[df.rf1000 == 'H'][df.m_column_result == df.rf1000])
rf2_A = len(df[df.rf1000_fs1 == 'A'][df.m_column_result == df.rf1000_fs1])
rf2_D = len(df[df.rf1000_fs1 == 'D'][df.m_column_result == df.rf1000_fs1])
rf2_H = len(df[df.rf1000_fs1 == 'H'][df.m_column_result == df.rf1000_fs1])
book_PL = df['m_odd_favorite'][df.m_column_result == df.m_favorite].groupby(df.m_favorite).sum()
rf1_PL = df['m_odd_favorite'][df.m_column_result == df.rf1000].groupby(df.rf1000).sum()
rf2_PL = df['m_odd_favorite'][df.m_column_result == df.rf1000_fs1].groupby(df.rf1000_fs1).sum()
hits_books = [book_A, book_D, book_H]
hits_rf1 = [rf1_A, rf1_D, rf1_H]
hits_rf2 = [rf2_A, rf2_D, rf2_H]
x1['hits_books'] = hits_books
x2['hits_books'] = hits_rf1
x3['hits_books'] = hits_rf2
x1['p'] = x1.hits_books / x1.m_match_id
x2['p'] = x2.hits_books / x2.m_match_id
x3['p'] = x3.hits_books / x3.m_match_id
x1 = pd.concat([x1,book_PL],axis=1, join='inner')
x2 = pd.concat([x2,rf1_PL],axis=1, join='inner')
x3 = pd.concat([x3,rf2_PL],axis=1, join='inner')
print(x1)
print(x2)
print(x3)
# +
def teste(row):
if (row.rf1000 == 'H'):
return row.m_odd_home
elif (row.rf1000 == 'D'):
return row.m_odd_draw
else:
return row.m_odd_away
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features3.csv')
df['rf1000_odd'] = df.apply(teste, axis=1)
resume = pd.DataFrame()
xBook = df.groupby(['m_favorite'])['m_match_id'].count()
xBook = pd.Series.to_frame(xBook)
x1 = df.groupby(['rf1000'])['m_match_id'].count()
x1 = pd.Series.to_frame(x1)
x2 = df.groupby(['rf1000_fs1'])['m_match_id'].count()
x2 = pd.Series.to_frame(x2)
x3 = df.groupby(['rf1000_fs3'])['m_match_id'].count()
x3 = pd.Series.to_frame(x3)
x4 = df.groupby(['rf1000_fs4'])['m_match_id'].count()
x4 = pd.Series.to_frame(x4)
book_A = len(df[df.m_favorite == 'A'][df.m_column_result == df.m_favorite])
book_D = len(df[df.m_favorite == 'D'][df.m_column_result == df.m_favorite])
book_H = len(df[df.m_favorite == 'H'][df.m_column_result == df.m_favorite])
rf1_A = len(df[df.rf1000 == 'A'][df.m_column_result == df.rf1000])
rf1_D = len(df[df.rf1000 == 'D'][df.m_column_result == df.rf1000])
rf1_H = len(df[df.rf1000 == 'H'][df.m_column_result == df.rf1000])
rf2_A = len(df[df.rf1000_fs1 == 'A'][df.m_column_result == df.rf1000_fs1])
rf2_D = len(df[df.rf1000_fs1 == 'D'][df.m_column_result == df.rf1000_fs1])
rf2_H = len(df[df.rf1000_fs1 == 'H'][df.m_column_result == df.rf1000_fs1])
rf3_A = len(df[df.rf1000_fs3 == 'A'][df.m_column_result == df.rf1000_fs3])
rf3_D = len(df[df.rf1000_fs3 == 'D'][df.m_column_result == df.rf1000_fs3])
rf3_H = len(df[df.rf1000_fs3 == 'H'][df.m_column_result == df.rf1000_fs3])
rf4_A = len(df[df.rf1000_fs4 == 'A'][df.m_column_result == df.rf1000_fs4])
rf4_D = len(df[df.rf1000_fs4 == 'D'][df.m_column_result == df.rf1000_fs4])
rf4_H = len(df[df.rf1000_fs4 == 'H'][df.m_column_result == df.rf1000_fs4])
book_PL = df['m_odd_favorite'][df.m_column_result == df.m_favorite].groupby(df.m_favorite).sum()
rf1_PL = df['rf1000_odd'][df.m_column_result == df.rf1000].groupby(df.rf1000).sum()
rf2_PL = df['m_odd_favorite'][df.m_column_result == df.rf1000_fs1].groupby(df.rf1000_fs1).sum()
rf3_PL = df['m_odd_favorite'][df.m_column_result == df.rf1000_fs3].groupby(df.rf1000_fs3).sum()
rf4_PL = df['m_odd_favorite'][df.m_column_result == df.rf1000_fs4].groupby(df.rf1000_fs4).sum()
hits_books = [book_A, book_D, book_H]
hits_rf1 = [rf1_A, rf1_D, rf1_H]
hits_rf2 = [rf2_A, rf2_D, rf2_H]
hits_rf3 = [rf3_A, rf3_D, rf3_H]
hits_rf4 = [rf4_A, rf4_D, rf4_H]
xBook['hits'] = hits_books
x1['hits'] = hits_rf1
x2['hits'] = hits_rf2
x3['hits'] = hits_rf3
x4['hits'] = hits_rf4
xBook['p'] = xBook.hits / xBook.m_match_id
x1['p'] = x1.hits / x1.m_match_id
x2['p'] = x2.hits / x2.m_match_id
x3['p'] = x3.hits / x3.m_match_id
x4['p'] = x4.hits / x4.m_match_id
# xBook = pd.concat([xBook,book_PL],axis=1, join='inner')
# x1 = pd.concat([x1,rf1_PL],axis=1, join='inner')
# x2 = pd.concat([x2,rf2_PL],axis=1, join='inner')
# x3 = pd.concat([x3,rf3_PL],axis=1, join='inner')
# x4 = pd.concat([x4,rf4_PL],axis=1, join='inner')
print(xBook)
print(x1)
print(x2)
print(x3)
print(x4)
# -
# +
def setRFOddFavorite(row):
if (row.rf1000 == 'H'):
return row.m_odd_home
elif (row.rf1000 == 'D'):
return row.m_odd_draw
else:
return row.m_odd_away
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features3.csv')
df['rf1000_odd'] = df.apply(teste, axis=1)
resume = pd.DataFrame()
xBook = df.groupby(['m_favorite'])['m_match_id'].count()
xBook = pd.Series.to_frame(xBook)
x1 = df[df.m_match_num > 180].groupby(['rf1000'])['m_match_id'].count()
x1 = pd.Series.to_frame(x1)
x2 = df[df.m_match_num > 180].groupby(['rf1000_fs1'])['m_match_id'].count()
x2 = pd.Series.to_frame(x2)
x3 = df[df.m_match_num > 180].groupby(['rf1000_fs3'])['m_match_id'].count()
x3 = pd.Series.to_frame(x3)
x4 = df[df.m_match_num > 180].groupby(['rf1000_fs4'])['m_match_id'].count()
x4 = pd.Series.to_frame(x4)
book_A = len(df[df[df.m_match_num > 180].m_favorite == 'A'][df.m_column_result == df.m_favorite])
book_D = len(df[df[df.m_match_num > 180].m_favorite == 'D'][df.m_column_result == df.m_favorite])
book_H = len(df[df[df.m_match_num > 180].m_favorite == 'H'][df.m_column_result == df.m_favorite])
rf1_A = len(df[df[df.m_match_num > 180].rf1000 == 'A'][df.m_column_result == df.rf1000])
rf1_D = len(df[df[df.m_match_num > 180].rf1000 == 'D'][df.m_column_result == df.rf1000])
rf1_H = len(df[df[df.m_match_num > 180].rf1000 == 'H'][df.m_column_result == df.rf1000])
rf2_A = len(df[df[df.m_match_num > 180].rf1000_fs1 == 'A'][df.m_column_result == df.rf1000_fs1])
rf2_D = len(df[df[df.m_match_num > 180].rf1000_fs1 == 'D'][df.m_column_result == df.rf1000_fs1])
rf2_H = len(df[df[df.m_match_num > 180].rf1000_fs1 == 'H'][df.m_column_result == df.rf1000_fs1])
rf3_A = len(df[df[df.m_match_num > 180].rf1000_fs3 == 'A'][df.m_column_result == df.rf1000_fs3])
rf3_D = len(df[df[df.m_match_num > 180].rf1000_fs3 == 'D'][df.m_column_result == df.rf1000_fs3])
rf3_H = len(df[df[df.m_match_num > 180].rf1000_fs3 == 'H'][df.m_column_result == df.rf1000_fs3])
rf4_A = len(df[df[df.m_match_num > 180].rf1000_fs4 == 'A'][df.m_column_result == df.rf1000_fs4])
rf4_D = len(df[df[df.m_match_num > 180].rf1000_fs4 == 'D'][df.m_column_result == df.rf1000_fs4])
rf4_H = len(df[df[df.m_match_num > 180].rf1000_fs4 == 'H'][df.m_column_result == df.rf1000_fs4])
book_PL = df['m_odd_favorite'][df.m_match_num > 180][df.m_column_result == df.m_favorite].groupby(df.m_favorite).sum()
rf1_PL = df['rf1000_odd'][df.m_match_num > 180][df.m_column_result == df.rf1000].groupby(df.rf1000).sum()
rf2_PL = df['m_odd_favorite'][df.m_match_num > 180][df.m_column_result == df.rf1000_fs1].groupby(df.rf1000_fs1).sum()
rf3_PL = df['m_odd_favorite'][df.m_match_num > 180][df.m_column_result == df.rf1000_fs3].groupby(df.rf1000_fs3).sum()
rf4_PL = df['m_odd_favorite'][df.m_match_num > 180][df.m_column_result == df.rf1000_fs4].groupby(df.rf1000_fs4).sum()
hits_books = [book_A, book_D, book_H]
hits_rf1 = [rf1_A, rf1_D, rf1_H]
hits_rf2 = [rf2_A, rf2_D, rf2_H]
hits_rf3 = [rf3_A, rf3_D, rf3_H]
hits_rf4 = [rf4_A, rf4_D, rf4_H]
xBook['hits'] = hits_books
x1['hits'] = hits_rf1
x2['hits'] = hits_rf2
x3['hits'] = hits_rf3
x4['hits'] = hits_rf4
xBook['p'] = xBook.hits / xBook.m_match_id
x1['p'] = x1.hits / x1.m_match_id
x2['p'] = x2.hits / x2.m_match_id
x3['p'] = x3.hits / x3.m_match_id
x4['p'] = x4.hits / x4.m_match_id
# xBook = pd.concat([xBook,book_PL],axis=1, join='inner')
# x1 = pd.concat([x1,rf1_PL],axis=1, join='inner')
# x2 = pd.concat([x2,rf2_PL],axis=1, join='inner')
# x3 = pd.concat([x3,rf3_PL],axis=1, join='inner')
# x4 = pd.concat([x4,rf4_PL],axis=1, join='inner')
print(xBook)
print(x1)
print(x2)
print(x3)
print(x4)
# +
def setRFOddFavorite(row):
if (row.pred == 'H'):
return row.m_odd_home
elif (row.pred == 'D'):
return row.m_odd_draw
else:
return row.m_odd_away
df = pd.read_csv(PATH_NOTEBOOKS_DATA + 'features2.csv')
#df = df[df.m_match_group_num > 8]
df['pred_books'] = df['m_favorite']
df['favorite_odd_books'] = df['m_odd_favorite']
methods = []
for x in range (1,15):
methods.append('rf'+ str(x))
for m in methods:
temp = pd.read_csv(PATH_NOTEBOOKS_DATA + 'pred_' + m + '.csv')
temp['favorite_odd'] = temp.apply(setRFOddFavorite, axis=1)
df['pred_' + m] = temp['pred']
df['favorite_odd_' + m] = temp['favorite_odd']
df = df[df.pred_rf1.isnull() == False]
methods.append('books')
# +
def createRow (name, gp1, hits, pl):
row = {}
row['method'] = name
print(gp1['H'])
row['num_preds'] = gp1['H'] + gp1['D'] + gp1['A']
row['H'] = gp1['H']
row['D'] = gp1['D']
row['A'] = gp1['A']
row['hits_H'] = hits['H']
row['hits_D'] = hits['D']
row['hits_A'] = hits['A']
row['hits'] = hits['H'] + hits['D'] + hits['A']
row['profit_H'] = pl['H']
row['profit_D'] = pl['D']
row['profit_A'] = pl['A']
row['profit'] = pl['H'] + pl['D'] + pl['A']
return row
results = []
for m in methods:
res = {}
res['name'] = m
res['g'] = df.groupby(['pred_' + m])['m_match_id'].count()
res['hits'] = df[df.m_column_result == df['pred_' + m]].groupby(['pred_' + m])['m_match_id'].count()
res['profit'] = df[df.m_column_result == df['pred_' + m]].groupby(['pred_' + m])['favorite_odd_' + m].sum()
results.append(res)
resume = pd.DataFrame()
for r in results:
resume = resume.append(pd.Series(createRow(r['name'],r['g'], r['hits'], r['profit'])), ignore_index=True)
resume = resume[['method','num_preds','H','D','A','hits','hits_H','hits_D','hits_A','profit_H','profit_D','profit_A']]
resume['p_hits'] = resume['hits'] / resume ['num_preds']
resume['p_hits_H'] = resume['hits_H'] / resume ['H']
resume['p_hits_D'] = resume['hits_D'] / resume ['D']
resume['p_hits_A'] = resume['hits_A'] / resume ['A']
resume['pl_H'] = resume ['profit_H'] - resume['H']
resume['pl_D'] = resume ['profit_D'] - resume['D']
resume['pl_A'] = resume ['profit_A'] - resume['A']
print(resume)
# +
# book_A = len(df[df.pred_books == 'A'])
# book_D = len(df[df.pred_books == 'D'])
# book_H = len(df[df.pred_books == 'H'])
# rf1_A = len(df[df.pred_rf1 == 'A'])
# rf1_D = len(df[df.pred_rf1 == 'D'])
# rf1_H = len(df[df.pred_rf1 == 'H'])
# rf2_A = len(df[df.pred_rf2 == 'A'])
# rf2_D = len(df[df.pred_rf2 == 'D'])
# rf2_H = len(df[df.pred_rf2 == 'H'])
# books_hits = len(df[df.m_column_result == df.pred_books])
# rf1_hits = len(df[df.m_column_result == df.pred_rf1])
# rf2_hits = len(df[df.m_column_result == df.pred_rf2])
# h_book_A = len(df[df.pred_books == 'A'][df.m_column_result == df.pred_books])
# h_book_D = len(df[df.pred_books == 'D'][df.m_column_result == df.pred_books])
# h_book_H = len(df[df.pred_books == 'H'][df.m_column_result == df.pred_books])
# h_rf1_A = len(df[df.pred_rf1 == 'A'][df.m_column_result == df.pred_rf1])
# h_rf1_D = len(df[df.pred_rf1 == 'D'][df.m_column_result == df.pred_rf1])
# h_rf1_H = len(df[df.pred_rf1 == 'H'][df.m_column_result == df.pred_rf1])
# h_rf2_A = len(df[df.pred_rf2 == 'A'][df.m_column_result == df.pred_rf2])
# h_rf2_D = len(df[df.pred_rf2 == 'D'][df.m_column_result == df.pred_rf2])
# h_rf2_H = len(df[df.pred_rf2 == 'H'][df.m_column_result == df.pred_rf2])
# print(books_hits/len(df))
# print(book_A,h_book_A, h_book_A/book_A)
# print(book_D,h_book_D, h_book_D/book_D)
# print(book_H,h_book_H, h_book_H/book_H)
# print("")
# print(rf1_hits/len(df))
# print(rf1_A,h_rf1_A, h_rf1_A/rf1_A)
# print(rf1_D,h_rf1_D, h_rf1_D/rf1_D)
# print(rf1_H,h_rf1_H, h_rf1_H/rf1_H)
# print("")
# print(rf2_hits/len(df))
# print(rf2_A,h_rf1_A, h_rf2_A/rf2_A)
# print(rf2_D,h_rf1_D, h_rf2_D/rf2_D)
# print(rf2_H,h_rf1_H, h_rf2_H/rf2_H)
# value = len(df[df.pred_rf2 == 'D'][df.m_column_result == "H"])
# print((h_rf1_H+value)/(rf2_D+rf2_H))
# print(value/rf2_D)
#group by result prediction
# +
rounds = pd.DataFrame()
rounds['matches'] = df.groupby(['m_match_group_num'])['m_match_id'].count()
for m in methods:
#rounds['hits_' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['m_match_group_num'])['m_match_id'].count()
rounds['p_hits' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['m_match_group_num'])['m_match_id'].count() / rounds['matches']
print(rounds)
# +
rounds = pd.DataFrame()
rounds['matches'] = df.groupby(['m_match_group_num'])['m_match_id'].count()
for m in methods:
#rounds['hits_' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['m_match_group_num'])['m_match_id'].count()
rounds['p_hits' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['m_match_group_num'])['m_match_id'].count() / rounds['matches']
print(rounds)
# +
champs = pd.DataFrame()
champs['matches'] = df.groupby(['c_championship_name'])['m_match_id'].count()
for m in methods:
#rounds['hits_' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['m_match_group_num'])['m_match_id'].count()
champs['p_hits' + m] = df[df.m_column_result == df['pred_' + m]].groupby(['c_championship_name'])['m_match_id'].count() / champs['matches']
print(champs)
# -
nfo_dict['esfMRI']['fa_objs'][1], 'values')
# ## ALL CONTRASTS - TASK and RT EXCLUDED
#
# ### Only excluding Task fails the KMO test
info_dict, BIC_df = pipeline(ALL_MAPS_NO_TASK_NO_RT)
# # RT MAPS
info_dict, BIC_df = pipeline(RT_MAPS, factor_options=np.arange(1,15,1), nfac_plot=[7])
| 17,970 |
/.ipynb_checkpoints/Curso Introduรงรฃo ร Ciรชncia da Computaรงรฃo com Python - Parte 2-checkpoint.ipynb | cc2cd069df033a7fb9a24a16ef8b8c4b39b0c4f7 | [
"MIT"
] | permissive | marcelomiky/PythonCodes | https://github.com/marcelomiky/PythonCodes | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 14,664 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ECE 457B - Tutorial 3
#
# 1. Introduction to [Sklearn](https://scikit-learn.org/stable/index.html)
# 2. The [IRIS dataset](https://archive.ics.uci.edu/ml/datasets/iris)
# 3. Comparing Models
# +
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix, classification_report, ConfusionMatrixDisplay
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
# -
# ## Load and Explore Dataset
#
# Features
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
#
#
# Classes:
# 1. Iris Setosa
# 2. Iris Versicolour
# 3. Iris Virginica
#
# "One class is linearly separable from the other 2; the latter are NOT linearly separable from each other."
# +
iris = load_iris()
x = iris.data
y_ = iris.target
print("Size of features: {}".format(x.shape))
print("Size of labels: {}".format(y_.shape))
print("Sample data: {}".format(x[:3]))
print("Sample labels: {}".format(y_[:3]))
class_names = ['setosa', 'versicolor', 'virginica']
# -
# ## Data Preprocessing and Preparation
#
# Let's explore how we can use sklearn for full data proprocessing
#
# 1. Normalize the feature space
# 2. For the purpose of using a neural network and since this is a classification problem, we will be using softmax activation in the output layer. For that, we will change the labels to be one-hot-encoded (sklearn)
# 3. Lets use a 80-20 train-test split. For that, we'll use the train_test_split function from sklearn
# +
# Normalize the data
X_norm = (x - x.min(axis=0)) / (x.max(axis=0) - x.min(axis=0))
# One-hot encode the labels
encoder = OneHotEncoder(sparse=False)
# Split the data into training and testing
train_x, test_x, train_y, test_y = train_test_split(X_norm, y_, test_size=0.20)
train_y_enc = encoder.fit_transform(train_y.reshape(-1,1))
test_y_enc = encoder.fit_transform(test_y.reshape(-1,1))
print("Sample train data: {}".format(train_x[:3]))
print("Sample train labels: {}".format(train_y_enc[:3]))
# -
# ## The Model
# +
# Build the model
model = Sequential()
model.add(Dense(10, input_shape=(4,), activation='relu', name='input'))
model.add(Dense(10, activation='relu', name='hidden1'))
model.add(Dense(3, activation='softmax', name='output'))
print(model.summary())
# Compile the model
# Adam optimizer with learning rate of 0.001
optimizer = Adam(lr=0.001)
model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# -
# Train the model
model.fit(train_x, train_y_enc, verbose=1
, batch_size=5, epochs=50)
# +
# Test on unseen data
results = model.evaluate(test_x, test_y_enc)
print('Final test set loss: {:4f}'.format(results[0]))
print('Final test set accuracy: {:4f}'.format(results[1]))
# -
# ## Use Support Vector Machines
# +
# Using SVM (Also from sklearn)
svm_ = SVC(gamma='auto')
svm_.fit(train_x, train_y)
y_svm = svm_.predict(test_x)
acc_svm = sum([1 for i in range(0,len(test_y)) if test_y[i] == y_svm[i] ])/len(test_y)
print("SVM Accuracy: {}%".format(acc_svm*100))
# -
# ## Confusion Matrix
# +
y_mlp = model.predict_classes(test_x)
cm_mlp = confusion_matrix(test_y, y_mlp)
print(cm_mlp)
print(classification_report(test_y, y_mlp, target_names=class_names))
disp1 = ConfusionMatrixDisplay(confusion_matrix=cm_mlp,display_labels=class_names)
disp1.plot()
# +
cm_svm = confusion_matrix(test_y, y_svm)
print(cm_svm)
print(classification_report(test_y, y_svm, target_names=class_names))
disp2 = ConfusionMatrixDisplay(confusion_matrix=cm_svm,display_labels=class_names)
disp2.plot()
# -
batches[1][1].shape))
print("็ฌฌไธไธชmini_batch_Y็็ปดๅบฆ: " + str(mini_batches[2][1].shape))
# -
# # ๆณจๆ
mini_batch็ๅคงๅฐไธ่ฌ้ๆฉ2็ๆฌกๆน
:
return False
else:
matriz = []
for i in range(len(m1)):
linha = []
for j in range(len(m1[0])):
linha.append(m1[i][j] + m2[i][j])
matriz.append(linha)
return matriz
# + deletable=true editable=true
m1 = [[1, 2, 3], [4, 5, 6]]
m2 = [[2, 3, 4], [5, 6, 7]]
soma_matrizes(m1, m2)
# + deletable=true editable=true
m1 = [[1], [2], [3]]
m2 = [[2, 3, 4], [5, 6, 7]]
soma_matrizes(m1, m2)
# + [markdown] deletable=true editable=true
# ### Praticar tarefa de programaรงรฃo: Exercรญcios adicionais (opcionais)
#
# Exercรญcio 1: Imprimindo matrizes
#
# Como proposto na primeira vรญdeo-aula da semana, escreva uma funรงรฃo imprime_matriz(matriz), que recebe uma matriz como parรขmetro e imprime a matriz, linha por linha. Note que NรO se deve imprimir espaรงos apรณs o รบltimo elemento de cada linha!
#
# Exemplos:
#
#
# minha_matriz = [[1], [2], [3]]
#
# imprime_matriz(minha_matriz)
#
# 1
#
# 2
#
# 3
#
# minha_matriz = [[1, 2, 3], [4, 5, 6]]
#
# imprime_matriz(minha_matriz)
#
# 1 2 3
#
# 4 5 6
#
# + deletable=true editable=true
def imprime_matriz(A):
for i in range(len(A)):
for j in range(len(A[i])):
print(A[i][j])
# + deletable=true editable=true
minha_matriz = [[1], [2], [3]]
imprime_matriz(minha_matriz)
# + deletable=true editable=true
minha_matriz = [[1, 2, 3], [4, 5, 6]]
imprime_matriz(minha_matriz)
# + [markdown] deletable=true editable=true
# ### Exercรญcio 2: Matrizes multiplicรกveis
#
# Duas matrizes sรฃo multiplicรกveis se o nรบmero de colunas da primeira รฉ igual ao nรบmero de linhas da segunda. Escreva a funรงรฃo sao_multiplicaveis(m1, m2) que recebe duas matrizes como parรขmetro e devolve True se as matrizes forem multiplicavรฉis (na ordem dada) e False caso contrรกrio.
#
# Exemplos:
#
# m1 = [[1, 2, 3], [4, 5, 6]]
#
# m2 = [[2, 3, 4], [5, 6, 7]]
#
# sao_multiplicaveis(m1, m2) => False
#
#
# m1 = [[1], [2], [3]]
#
# m2 = [[1, 2, 3]]
#
# sao_multiplicaveis(m1, m2) => True
# + deletable=true editable=true
def sao_multiplicaveis(m1, m2):
'''Recebe duas matrizes como parรขmetros e devolve True se as matrizes forem multiplicรกveis (nรบmero de colunas
da primeira รฉ igual ao nรบmero de linhs da segunda). False se nรฃo forem
'''
if len(m1) == len(m2[0]):
return True
else:
return False
# -
m1 = [[1, 2, 3], [4, 5, 6]]
m2 = [[2, 3, 4], [5, 6, 7]]
sao_multiplicaveis(m1, m2)
m1 = [[1], [2], [3]]
m2 = [[1, 2, 3]]
sao_multiplicaveis(m1, m2)
| 6,673 |
/tensorflow_/Tensorflow_Basic3_variable.ipynb | ba1986a28c75e4f5b20964bb55c5a62c1c303c09 | [] | no_license | yennanliu/analysis | https://github.com/yennanliu/analysis | 11 | 9 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,913 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
# - A TensorFlow variable is the best way to represent shared, persistent state manipulated by your program.
#
# - Variables are manipulated via the `tf.Variable class.` A tf.Variable represents a tensor whose value can be changed by running ops on it. Unlike tf.Tensor objects, a tf.Variable exists outside the context of a single session.run call.
#
# - Internally, a `tf.Variable` stores a persistent tensor. Specific ops allow you to read and modify the values of this tensor. These modifications are visible across multiple tf.Sessions, so multiple workers can see the same values for a tf.Variable
#
# - https://www.tensorflow.org/programmers_guide/variables
# Create two variables.
weights = tf.Variable(tf.random_normal([784, 200], stddev=0.35),
name="weights")
biases = tf.Variable(tf.zeros([200]), name="biases")
weights
# +
# https://www.tensorflow.org/versions/r1.0/programmers_guide/variables
# Create two variables.
weights = tf.Variable(tf.random_normal([784, 200], stddev=0.35),
name="weights")
biases = tf.Variable(tf.zeros([200]), name="biases")
...
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Later, when launching the model
with tf.Session() as sess:
# Run the init operation.
sess.run(init_op)
...
# Use the model
...
# +
# dev
# -
# +
# example
# https://github.com/MorvanZhou/Tensorflow-Tutorial/blob/master/tutorial-contents/203_variable.py
var = tf.Variable(0) # our first variable in the "global_variable" set
add_operation = tf.add(var, 1)
update_operation = tf.assign(var, add_operation)
with tf.Session() as sess:
# once define variables, you have to initialize them by doing this
sess.run(tf.global_variables_initializer())
for _ in range(3):
sess.run(update_operation)
print(sess.run(var))
# -
add_operation
type(add_operation)
| 2,219 |
/LR.ipynb | 9e7d7ceca62de45913458a97d9091c541470f800 | [] | no_license | MuhamadElBeheiry/Optical_Character_Recognizer | https://github.com/MuhamadElBeheiry/Optical_Character_Recognizer | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 10,747 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mohamedahmedsaadahmed77/Research-Project-Selected-2/blob/master/LR.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Fam5ytVcjiLl" colab_type="code" outputId="dc801a1f-e734-4296-c970-957c26701870" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %tensorflow_version 1.x
import tensorflow as tf
print(tf.__version__)
# + id="ZsjEBty7h9pG" colab_type="code" outputId="e2bf8830-4fdd-4cc3-8526-7b9db9de5a46" colab={"base_uri": "https://localhost:8080/", "height": 153}
# !git clone https://github.com/mohamedahmedsaadahmed77/OCR-Selected-2.git
# + id="en2JPYgZh9lt" colab_type="code" colab={}
import numpy as np
import os
from imutils import paths
import cv2
import pandas as pd
def load_images(path):
print("[INFO] loading images...")
imagePaths = list(paths.list_images(path))
imagePaths.sort()
data = []
labels = []
for imagePath in imagePaths:
label = imagePath.split(os.path.sep)[-2]
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = np.reshape(image, (28*28))/255.0
data.append(image)
labels.append(label)
data = np.array(data)
labels = pd.get_dummies(labels)
labels = np.array(labels)
print("[INFO] done")
return data,labels
# + id="CZDZv0I_h9h0" colab_type="code" outputId="b99a4ce4-f046-4279-e742-87b5080d2d00" colab={"base_uri": "https://localhost:8080/", "height": 85}
x_train, y_train = load_images('/content/OCR-Selected-2/Dataset/Training')
x_test, y_test = load_images('/content/OCR-Selected-2/Dataset/Test')
# + id="XarFVDTfbjUf" colab_type="code" outputId="aa783076-7517-42ff-aef8-21f510f5cd9d" colab={"base_uri": "https://localhost:8080/", "height": 51}
from sklearn.decomposition import PCA
pca = PCA(.95)
pca.fit(x_train)
# + id="dsDAMqBUcEas" colab_type="code" outputId="6175aebb-5170-4a96-f381-1db54ef5faf0" colab={"base_uri": "https://localhost:8080/", "height": 51}
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
print(x_train.shape)
print(x_test.shape)
# + id="DzkBQD7bhqbK" colab_type="code" colab={}
# Hyper parameters
learning_rate = 0.0001
epochs = 32
batch_size = 16
batches = int(x_train.shape[0] / batch_size)
# I choose placeholder to make it recieve any number of records to make code flexable
X = tf.placeholder(tf.float32, [None, 116])
Y = tf.placeholder(tf.float32, [None, 26])
# I choose variable cause itโs values will be changed to get more better values and i put random values for the first time
W = tf.Variable(.1 * np.random.randn(116, 26).astype(np.float32))
B = tf.Variable(.1 * np.random.randn(26).astype(np.float32))
# + id="KS_H36hKhxfF" colab_type="code" colab={}
# Formula of logistic regression : X * W + B
pred = tf.nn.softmax(tf.add(tf.matmul(X,W), B))
# Cost function
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(pred), axis=1))
# Optimiser that will get better values for bais and weights
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# + id="birGFpOmhxdW" colab_type="code" outputId="a34fe446-aade-48b5-8a79-e2c9346d9255" colab={"base_uri": "https://localhost:8080/", "height": 578}
# Session to determine the flow of computitional graph (TensorFlow)
with tf.Session() as sesh:
# Set initial values of tensor variables (Mandatory)
sesh.run(tf.global_variables_initializer())
for epoch in range(epochs):
for i in range(batches):
offset = i * epoch
x = x_train[offset: offset + batch_size]
y = y_train[offset: offset + batch_size]
sesh.run(optimizer, feed_dict={X: x, Y:y})
# Get cost function value (Optional)
costVal = sesh.run(cost, feed_dict={X:x, Y:y})
print(f'epoch: {epoch:2d} cost_val= {costVal:.4f}')
# Calculate the accuracy
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
acc = accuracy.eval({X: x_test, Y: y_test})
print(f'Accuracy: {acc * 100:.2f}%')
| 4,406 |
/04-fisher-example.ipynb | 7f8d1e768cb8d8e2371c894b06bd90baabacc8fe | [
"MIT"
] | permissive | romilly/machine-learning-1 | https://github.com/romilly/machine-learning-1 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 95,398 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dimensionality reduction and classification with Fisher's linear discriminant
#
# In this notebook we will deal with two interesting applications of Fisher's linear discriminant: dimensionality reduction, and classification. This discriminant is formulated so that an appropriate projection of the data is found, so that the distance between points of different classes is **maximized** and the distance between points of the same class is **minimized**. The fact that it needs label information makes this a supervised learning method, in contrast to other dimensionality reduction techniques that work without labels, such as [PCA](https://dfdazac.github.io/pca_ex.html).
#
# ## The data
#
# For illustration purposes, we will use a synthetic dataset, containing samples from two Gaussian distributions.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
# Mean and covariance matrix of the distributions
mu1 = np.array([-1.5, -0.0])
cov1 = np.array([[1, -0.2], [-0.2, 0.5]])
mu2 = np.array([2, 1])
cov2 = np.array([[1, -0.2], [-0.2, 0.5]])
# Get samples and plot
data1 = np.random.multivariate_normal(mu1, cov1, 100)
data2 = np.random.multivariate_normal(mu2, cov2, 100)
plt.figure(figsize=(5, 5))
plt.scatter(data1[:,0], data1[:,1])
plt.scatter(data2[:,0], data2[:,1]);
# -
# ## Projecting the data
#
# As we introduced it, Fisher's linear discriminant is calculated so that the optimal projection that maximizes the between-class variance and minimizes the within-class variance is found. The projection is one-dimensional, which might be too extreme for some applications, but for a classification task it is useful, since we can find a threshold in the projected one-dimensional space that separates between the two classes.
# +
from discriminant_analysis.fisher import FisherDiscriminant
# Collect the features and labels in arrays
X = np.vstack((data1, data2))
Y = np.concatenate((np.zeros(len(data1), dtype=np.int), np.ones(len(data2), dtype=np.int)))
# Find the optimal projection
model = FisherDiscriminant()
model.fit(X, Y)
X_proj = model.transform(X)
# -
# We have projected the data, which originally lay in a two-dimensional space, to a one-dimensional space, which we stored in the `X_proj` array. We can plot a histogram of this data to observe how well the classes can be discriminated in the projected space.
plt.hist(X_proj[Y == 0], label='Class 0')
plt.hist(X_proj[Y == 1], label='Class 1')
plt.legend()
plt.title('Data in the projected space');
# Great! It looks as if we reduced the dimensionality of the data, and now we are able to discriminate between two classes by defining a single appropriate threshold. For this example, this threshold seems to lie between 0 and 1. Now we might ask, how do we choose the correct threshold?
#
# ## Grid-searching for the best threshold
#
# A quick idea that comes to my mind is to split the data into training and test splits, and use the training split to find the best threshold between 0 and 1, using 4-fold cross-validation. Let's try!
# +
from data.utils import crossval_indices, split_train_test
from metrics.classification import accuracy
# Shuffle the data and split into training and test
rand_idx = np.random.permutation(len(X))
X = X[rand_idx]
Y = Y[rand_idx]
X_train, Y_train, X_test, Y_test = split_train_test(X, Y)
# Find the best threshold in the interval [0, 1)
threshold_values = np.linspace(0, 1, 20)
accuracies = np.zeros(len(threshold_values))
n_folds = 4
for i, threshold in enumerate(threshold_values):
# Get cross-validation indices
train_folds, valid_folds = crossval_indices(len(X_train), n_folds)
acc = 0
for train_i, valid_i in zip(train_folds, valid_folds):
# Fit the model
model.fit(X_train[train_i], Y_train[train_i])
# Project validation data
X_proj = model.transform(X_train[valid_i])
# Predict using the threshold
Y_pred = np.zeros(len(Y_train[valid_i]), dtype=np.int)
Y_pred[X_proj > threshold] = 1
# Get accuracy
acc += accuracy(Y_train[valid_i], Y_pred)
# Calculate average accuracy
acc /= n_folds
accuracies[i] = acc
# Plot accuracy as a function of the threshold
plt.plot(threshold_values, accuracies)
max_threshold_idx = np.argmax(accuracies)
best_threshold = threshold_values[max_threshold_idx]
plt.title('Accuracy, \n maximum of {:.3f} with threshold = {:.3f}'.format(accuracies[max_threshold_idx],
best_threshold))
plt.xlabel('Threshold');
# -
# We have obtained the best threshold that separates the data in the one-dimensional space using cross-validation. What is the final accuracy on the test set?
# Project test data
X_proj = model.transform(X_test)
# Predict using the threshold
Y_pred = np.zeros(len(Y_test), dtype=np.int)
Y_pred[X_proj > best_threshold] = 1
# Get accuracy
print('Accuracy: {:.4f}'.format(accuracy(Y_test, Y_pred)))
# Not bad for our toy example.
#
# There is a second idea that we can use to solve the problem of classification with Fisher's discriminant, which is more formal, as we will now see.
#
# ## Maximum likelihood and some decision theory
#
# If we take a look again at the histograms obtained for the projected data, we can see that the classes are normally distributed. This is the case because they come from two-dimensional Gaussian distributions. This means that instead of searching manually for a threshold, we can let the data speak to us, by finding maximum likelihood estimates of the parameters (the mean and standard deviation) of the projected distributions. It turns out that the `fit()` method of the `FisherDiscriminant` class does exactly this, so we can visualize the distributions after fitting the model.
# +
from scipy.stats import norm
# Fitting the model finds the optimal projection
# as well as the maximum likelihood estimates
model.fit(X_train, Y_train)
X_proj = model.transform(X_train)
# Plot histograms of projected data
fig, ax1 = plt.subplots()
ax1.hist(X_proj[Y_train == 0], label='Class 0', alpha=0.4)
ax1.hist(X_proj[Y_train == 1], label='Class 1', alpha=0.4)
ax1.set_ylabel('Counts')
# Plot estimated densities
ax2 = ax1.twinx()
x = np.linspace(-5, 5, 100)
ax2.plot(x, norm.pdf(x, loc=model.mean1, scale=model.std1))
ax2.plot(x, norm.pdf(x, loc=model.mean2, scale=model.std2))
ax2.set_ylim([0, 1])
ax2.set_ylabel('Probability density');
# -
# We can now find the best threshold using the training data by using a handy result from decision theory (see [1] for more details): the minimum misclassification rate is obtained at the intersection of the class-conditional densities, which we just found. This intersection can be found analitically and is also computed when calling the `fit()` method. Let's see what this value is.
model.threshold
# This is the threshold used by the `predict()` method, so we can readily make predictions for the test data and obtain the accuracy.
Y_pred = model.predict(X_test)
print('Accuracy: {:.4f}'.format(accuracy(Y_test, Y_pred)))
# We obtain the same accuracy than with the cross-validation method, even though the threshold found with both methods is different. However, the estimation approach is preferable since the solution is found analitically instead of iterating, which saves computational resources, and also it doesn't involve setting hyperparameters.
#
# ---
# ## Nonlinear data
#
# The example data we have used so far is easy because it's already linearly separable in the original space. What if we have more complicated data, like the moons dataset?
# +
from sklearn.datasets import make_moons
X, Y = make_moons(100, noise=0.1)
plt.scatter(X[Y == 0, 0], X[Y == 0, 1])
plt.scatter(X[Y == 1, 0], X[Y == 1, 1]);
# -
# Clearly there is not a line that can separate the two classes. Let's try, however, just to satisfy our curiosity.
# Split into training and test
X_train, Y_train, X_test, Y_test = split_train_test(X, Y)
# Train and evaluate
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
print('Accuracy: {:.4f}'.format(accuracy(Y_test, Y_pred)))
# We can do better than that!
#
# We will now help the discriminant by extracting features out of the data. We will use a cubic polynomial basis to map the data to a higher dimensional space (from two dimensions up to 9). In this space, ideally the two classes will be linearly separable, so that when we project it down to a one-dimensional space using Fisher's discriminant the threshold will be more effective.
# +
from features.basis_functions import polynomial_basis
# Map data to a higher dimensional space
# (The constant is dropped to avoid singular matrices)
degree = 3
X_feat = polynomial_basis(X, degree)[:, 1:]
# Split into training and test
X_train, Y_train, X_test, Y_test = split_train_test(X_feat, Y)
# Train and evaluate
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
print('Accuracy: {:.4f}'.format(accuracy(Y_test, Y_pred)))
# -
# Yes! We could make this number bigger by mapping to a space of higher dimension, although we have to keep in mind that by doing so the number of features will increase, which adds to the computational cost. For now we will keep this degree and move to one last cool visualization: the decision boundary created by the polynomial basis and Fisher's discriminant.
# Create a grid
N = 200
x1 = np.linspace(-2, 3, N)
x2 = np.linspace(-1, 2, N)
X1, X2 = np.meshgrid(x1, x2)
X_flat = np.column_stack((X1.flatten(), X2.flatten()))
# Get features
X_feat = polynomial_basis(X_flat, degree)[:, 1:]
# Evaluate model on grid
Y_pred = model.predict(X_feat).reshape(X1.shape)
plt.contourf(X1, X2, Y_pred, cmap='bone', alpha=0.1)
plt.scatter(X[Y == 0, 0], X[Y == 0, 1])
plt.scatter(X[Y == 1, 0], X[Y == 1, 1]);
# Whoa.
#
# As we have seen, the mapping to a higher dimension gives us more flexibility on the kind of problems that we can tackle with Fisher's discriminant. There are also extensions of the discriminant for multi-class problems, which might be worth examining considering what we have seen for the binary case.
#
# ### References
# [1] Bishop, Christopher M. "Pattern recognition and machine learning (information science and statistics)." (2006).
)
# -
# +
folds = StratifiedKFold(n_splits= 10, shuffle=True)
oof_preds = np.zeros(X.shape[0])
sub_preds = np.zeros(rx.shape[0])
feature_importance_df = pd.DataFrame()
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):
train_x, train_y = X[train_idx,:], Y[train_idx]
valid_x, valid_y = X[valid_idx,:], Y[valid_idx]
train_wx = wX[train_idx,:]
valid_wx = wX[valid_idx,:]
train_id, valid_id = Xid[train_idx], Xid[valid_idx]
print("Train Index:",train_idx,",Val Index:",valid_idx)
if n_fold >= 0:
lstmmodel=train_lstm(n_symbols, embedding_weights,train_x, train_y, valid_x, valid_y)
# feats = Model(inputs=lstmmodel.input, outputs=lstmmodel.get_layer('dense1').output)
lstmmodel.save('LSTM_fold_%d.h5'%(n_fold))
tmp_valid = lstmmodel.predict(valid_x)
tmp_valid= np.reshape(tmp_valid, [-1])
oof_preds[valid_idx] = tmp_valid
res1 = np.reshape(lstmmodel.predict(rx), [-1])
sub_preds += (res1) / folds.n_splits
print('Fold %2d AUC-LSTM : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del train_x, train_y, valid_x, valid_y
app_test = pd.read_csv('testing-set.csv', usecols=['order_id'])
preds = pd.DataFrame({"order_id":app_test["order_id"], "deal_or_not":sub_preds})
# create output sub-folder
preds.to_csv("output/LSTM_" + str(roc_auc_score(Y, oof_preds)) + ".csv", index=False)
# -
# +
len1 = len(Y)
tind = np.zeros(len1, np.int)
for i in range(len1):
tind[i]=i
import random as rn
rn.shuffle(tind)
train_x, train_y = X[tind[1000:],:], Y[tind[1000:]]
valid_x, valid_y = X[tind[:1000],:], Y[tind[:1000]]
print("Train Index:",tind[1000:],",Val Index:",tind[:1000])
lstmmodel=train_lstm(n_symbols, embedding_weights,train_x, train_y, valid_x, valid_y)
lstmmodel.save('LSTM_%d.h5'%(n_fold))
tmp_valid = lstmmodel.predict(valid_x)
tmp_valid= np.reshape(tmp_valid, [-1])
res1 = np.reshape(lstmmodel.predict(rx), [-1])
app_test = pd.read_csv('testing-set.csv', usecols=['order_id'])
preds = pd.DataFrame({"order_id":app_test["order_id"], "deal_or_not":res1})
# create output sub-folder
preds.to_csv("output/LSTM_all.csv", index=False)
# -
app_test = pd.read_csv('testing-set.csv', usecols=['order_id'])
preds = pd.DataFrame({"order_id":app_test["order_id"], "deal_or_not":res1})
# create output sub-folder
preds.to_csv("output/LSTM_all.csv", index=False)
| 13,106 |
/iris/notebook.ipynb | 8ef6515c4ce7cda6cc86ae46c4a3b627242e83b1 | [] | no_license | jkrukowski/strata | https://github.com/jkrukowski/strata | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,916 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [conda env:py35]
# language: python
# name: conda-env-py35-py
# ---
import tensorflow as tf
import numpy as np
# +
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename='iris_training.csv',
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename='iris_test.csv',
target_dtype=np.int,
features_dtype=np.float32)
# -
feature_columns = [tf.feature_column.numeric_column("x", shape=[4])]
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="./model")
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=2000)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(test_set.data)},
y=np.array(test_set.target),
num_epochs=1,
shuffle=False)
# +
accuracy_score = classifier.evaluate(input_fn=test_input_fn)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# -
| 1,506 |
/nbs/09b_vision.utils.ipynb | f875258edef1e22445598692f0fc3c835a6871b9 | [
"Apache-2.0"
] | permissive | ROCmSoftwarePlatform/fastai2 | https://github.com/ROCmSoftwarePlatform/fastai2 | 0 | 0 | Apache-2.0 | 2020-09-15T17:44:03 | 2020-06-30T21:21:22 | null | Jupyter Notebook | false | false | .py | 11,235 | # ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp vision.utils
# -
#export
from fastai2.torch_basics import *
from fastai2.data.all import *
from fastai2.vision.core import *
#hide
from nbdev.showdoc import *
# +
# path = untar_data(URLs.IMAGENETTE)
# +
# path
# -
# # Vision utils
#
# > Some utils function to quickly download a bunch of images, check them and pre-resize them
#export
def _download_image_inner(dest, inp, timeout=4):
i,url = inp
suffix = re.findall(r'\.\w+?(?=(?:\?|$))', url)
suffix = suffix[0] if len(suffix)>0 else '.jpg'
try: download_url(url, dest/f"{i:08d}{suffix}", overwrite=True, show_progress=False, timeout=timeout)
except Exception as e: f"Couldn't download {url}."
with tempfile.TemporaryDirectory() as d:
d = Path(d)
url = "https://www.fast.ai/images/jh-head"
_download_image_inner(d, (125,url))
assert (d/'00000125.jpg').is_file()
#export
def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4):
"Download images listed in text file `url_file` to path `dest`, at most `max_pics`"
if urls is None: urls = url_file.read().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout), list(enumerate(urls)), n_workers=n_workers)
with tempfile.TemporaryDirectory() as d:
d = Path(d)
url_file = d/'urls.txt'
url_file.write("\n".join([f"https://www.fast.ai/images/{n}" for n in "jh-head thomas.JPG sg-head".split()]))
download_images(d, url_file)
for i in [0,2]: assert (d/f'0000000{i}.jpg').is_file()
assert (d/f'00000001.JPG').is_file()
#export
def resize_to(img, targ_sz, use_min=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
# +
class _FakeImg():
def __init__(self, size): self.size=size
img = _FakeImg((200,500))
test_eq(resize_to(img, 400), [160,400])
test_eq(resize_to(img, 400, use_min=True), [400,1000])
# -
#export
def verify_image(fn):
"Confirm that `fn` can be opened"
try:
im = Image.open(fn)
im.draft(im.mode, (32,32))
im.load()
return True
except: return False
#export
def verify_images(fns):
"Find images in `fns` that can't be opened"
return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
#export
def resize_image(file, dest, max_size=None, n_channels=3, ext=None,
img_format=None, resample=Image.BILINEAR, resume=False, **kwargs ):
"Resize file to dest to max_size"
dest = Path(dest)
dest_fname = dest/file.name
if resume and dest_fname.exists(): return
if verify_image(file):
img = Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=resample)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
file = Path('images/puppy.jpg')
dest = Path('.')
resize_image(file, max_size=400, dest=dest)
im = Image.open(dest/file.name)
test_eq(im.shape[1],400)
(dest/file.name).unlink()
#export
def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False,
dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=Image.BILINEAR,
resume=None, **kwargs):
"Resize files on path recursevely to dest to max_size"
path = Path(path)
if resume is None and dest != Path('.'): resume=False
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
parallel(resize_image, files, max_workers=max_workers, max_size=max_size, dest=dest, n_channels=n_channels, ext=ext,
img_format=img_format, resample=resample, resume=resume, **kwargs)
with tempfile.TemporaryDirectory() as d:
dest = Path(d)/'resized_images'
resize_images('images', max_size=100, dest=dest)
# # Export -
#hide
from nbdev.export import notebook2script
notebook2script()
| 4,736 |
/data_preprocessing/Finishing alignment pipeline.ipynb | be5207b5e97395f8f39508ea328472c04afd9cac | [
"BSD-3-Clause"
] | permissive | portugueslab/Prat_et_al | https://github.com/portugueslab/Prat_et_al | 0 | 0 | null | 2022-09-12T13:49:46 | 2022-09-12T13:33:57 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 4,473 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
import deepdish as dd
import numpy as np
import pandas as pd
import json
from skimage import io
from notebook_utilities.display import stack_browser, display_array
from matplotlib import pyplot as plt
from fimpy.core.split_dataset import H5SplitDataset
from pathlib import Path
# -
% autoreload
from fimpy.registration.plane import align_single_planes_sobel, displacement_between_planes, shift_planes
from fimpy.registration.volume import sobel_stack
from fimpy.registration.reg_from_skimage import register_translation
path = Path(r"J:\_Shared\exp22_2p\EC\imported\180526 f1\src")
ds = H5SplitDataset(path)
ref = np.mean(ds[:10,:,:,:], 0)
prefilter_sigma = 3.3
upsampling=10
ds.shape
whole_stack.shape
whole_stack = np.zeros((ds.shape[0]//50+1,) + ds.shape[1:])
# %%time
whole_stack[:,:,:,:] = ds[:,:,:,:][::50, :,:,:]
ref = whole_stack[:2,:,:,:].mean(0)
sob_ref = sobel_stack(ref, prefilter_sigma)
# Find between-planes shifts
# +
shifts_planes = np.zeros((ref.shape[0], 2))
num_planes = ref.shape[0]
centre_plane = int(num_planes // 2)
for i in range(centre_plane, ref.shape[0]-1):
s, error, diffphase = register_translation(ref[i,:,:], ref[i+1,:,:], 10)
shifts_planes[i+1,:] = shifts_planes[i,:] + s
for i in range(centre_plane, 0, -1):
s, error, diffphase = register_translation(ref[i,:,:], ref[i-1,:,:], 10)
shifts_planes[i-1,:] = shifts_planes[i,:] + s
# -
display_array(shift_planes(ref[np.newaxis, :,:,:], dict(shifts=shifts_planes)))
shifts_planes[30,:]
i = 30
shifted, shifts = align_single_planes_sobel(whole_stack[:,i:i+1,:,:], np.fft.fftn(sob_ref[i:i+1, :, :]),
prefilter_sigma=prefilter_sigma, upsample_factor=10, maxshift=15)
display_array(whole_stack[0,i:i+1,:,:] - whole_stack[-1,i:i+1,:,:])
display_array(shifted[0,:,:,:] - shifted[-1,:,:,:])
| 2,125 |
/Classificacao/Naive_Bayes/Naiva_Bayes_Countries.ipynb | 36723b940870ee2174e5fca0cdf7057a1aca9ef4 | [] | no_license | BAssis777/CursoDS_ProfDanilo | https://github.com/BAssis777/CursoDS_ProfDanilo | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 90,070 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Baseline simulations and deterministic sensitivity analyses
# In this notebook computations for a health state transition model, commonly referred to as Markov model, are implemented. In the healthcare setting a patient can be in a predefined set of Markov states (health states) per unit of time. Each health state is related with a health reward (Quality-Adjusted Life Years) and costs per unit of time. Furthermore, transitions between health states are possible based on defined transition probabilities. For further explanation of Markov models read Markov_model_explanation.docx in the repository.
#
# Baseline simulations refer to the use of a set of predefined parameters to perform health state transition simulations. Baseline parameters are reported in table 1 in: <br>
#
# "Cost and health effects of case management compared to outpatient clinic follow-up in a Dutch heart failure cohort" <br> by H. van Voorst and A.E.R. Arnold <br>
# DOI: 10.1002/ehf2.12692
#
# Next to the baseline simulation results the code below directly computes deterministic one-way sensitivity analyses based on percentages change. In one-way deterministic sensitivity analyses no correlation between parameters was included, thus each parameter is changed while all the other parameters are set to their baseline value. Given a percentage change for every model parameter the simulated Quality-Adjusted Life Years (QALYs), costs (โฌ), and Net Monetary Benefit (NMB in โฌ) cumulative over a 5 year simulated follow-up are computed. An explanation of the background of the computations is available in the Markov_model_explanation.docx file in this repository.
#
# This notebook is the first in an series of three:
# 1. Baseline simulations and one-way deterministic sensitivity analyses.
# 2. Probabilistic sensitivity analysis: uniform distributed parameters
# 3. Probabilistic sensitivity analysis: most probable distributed parameters
import pandas as pd
import numpy as np
import math
import time
import os
import pickle
# ## Probability-time adjustment functions
# Since the baseline input values of each baseline parameter was not estimated over the same time span computations were required. Furhtermore, based on a probability of an event in a control arm the probability of an event in the intervention arm was computed with the Relative Risk (RR). Functions below assume constant distribution of probabilities through time.
# +
def monthly_prob(totmonths, events, total):
"""
Function computes the monthly probability of an
event if measurement of events is over multiple months
totmonths: amount of months for measurement
events: number of events in totmonths
total: total amount of patients at risk in totmonths
"""
prob_event = (events/total)
prob_surv = 1 - prob_event
monthly_event_free_prob = prob_surv**(1/totmonths)
mothly_event_prob = 1 - monthly_event_free_prob
return mothly_event_prob
def RR_intervention(p_control, RR, rr_months, pc_months):
"""
Computes the probability of an event
in the intervention arm based on
the Relative Risk (RR) and probability
in the control arm (p_control).
p_control: Probability of event in control arm
RR: Relative Risk of event in intervention
arm relative to control arm
rr_months: months used to compute RR
pc_months: months over which p_control is measured
"""
if rr_months==pc_months:
p_intervention = p_control*RR
else:
# first convert the control probability to the
# same follow up time of the RR probabilities
pc_adj = 1-(1-p_control)**(rr_months/pc_months)
# compute the intervention probability event free
p_int_eventfree = 1-pc_adj*RR # probability of no event in intervention group
# go back to the followup time of the control probability
p_intervention = 1-p_int_eventfree**(pc_months/rr_months)
return p_intervention
# -
# ## Defining Costs and QALYs per health state
# The functions below implement the defining of Cost and QALY related parameters. The function infl_adjustment computes a correction factor for an increase in costs through time.
# +
def infl_adjustment(months, yearly_CPI=1.029): #months
"""
Compute a inflation adjustment factor for the amount
of months (months) that have passed since the
start year (reference year; 2020). Use a predefined
inflation factor (yearly_CPI).
Output: The inflation adjustment factor.
"""
CPI_adj_factor = yearly_CPI**(months/12)
return CPI_adj_factor
def define_Costs(ic, CPI, refyear):
"""
- ic: Either the 'Intervention' or 'Control' arm
as follow-up costs can differ.
- CPI: Consumer price index adjustment factor,
used to compute the current
costs indexed from the year in which costs were computed.
In the study either 2014 or 2016 were
used for different costs.
- refyear: The refernce case year in which the simulations start,
in the study 2020 was used.
Output: Cost per month for each of the 4 Markov States
"""
FU_cost = 36*(CPI**(refyear-2016))
if ic=='Intervention':
FU_cost = 36*(CPI**(refyear-2014))
Costs_N12 = round(FU_cost,2)
Costs_N34 = round(FU_cost,2)
Costs_H = round(3800*(CPI**(refyear-2016)),2)
Costs_D = 0
return Costs_N12, Costs_N34, Costs_H, Costs_D
def define_QALYs():
"""
Define monthly QALYs for the 4 Markov
states used in this model.
Output: QALYs per month for each of the 4 Markov states
"""
QALY_N12 = 0.76/12
QALY_N34 = 0.54/12
QALY_H = 0.54/12
QALY_D = 0
return QALY_N12, QALY_N34, QALY_H, QALY_D
# -
# ## Model input definition
# The function model_input receives a dictionary with all the parameters as control settings (including RR, costs and QALYs) and returns the probability transition matrix and cost and QALY matrices for both control and intervention arm. As the Model contains 4 Markov states each simulation period (month) 4 possible transitions can occur and thus a 4x4 transition matrix was defined.
def model_input(dct):
"""
A dictionary with all the below defined
parameters was used as input for this function.
Output: control (fullc) and intervention (fulli)
transition matrices. Cost (control;intervention:
C_mat_c;C_mat_i) and QALY (Q_mat) matrices.
"""
#control arm
bc = dct['b']
cc = dct['c']
dc = dct['d']
ac = 1-bc-cc-dc
ec = dct['e'] #0
gc = dct['g']
hc = dct['h']
fc = 1-ec-gc-hc
jc = dct['j']
kc = dct['k'] #0
lc = dct['l']
ic = 1-jc-kc-lc
fullc = np.array([[ac, bc, cc, dc],
[ec, fc, gc, hc],
[ic, jc, kc, lc],
[0,0,0,1]], dtype = 'float64')
#intervention arm
# RR was computed over 12 months
# Probabilities over 1 month
bi = dct['b']
ci = RR_intervention(dct['c'], dct['RR_read'], 12, 1)
di = RR_intervention(dct['d'], dct['RR_mort'], 12, 1)
ai = 1-bi-ci-di
ei = dct['e']
gi = RR_intervention(dct['g'], dct['RR_read'], 12, 1)
hi = RR_intervention(dct['h'], dct['RR_mort'], 12, 1)
fi = 1-ei-gi-hi
ji = dct['j']
ki = dct['k']
li = dct['l']
ii = 1-ji-ki-li
fulli = np.array([[ai, bi, ci, di],
[ei, fi, gi, hi],
[ii, ji, ki, li],
[0,0,0,1]],dtype = 'float64')
Q_mat = np.array([dct['Q_N12'],dct['Q_N34'],dct['Q_H'],0])
# define the cost matrices, assume equal costs for hospitalization
C_mat_c = np.array([dct['C_N12_c'],dct['C_N34_c'],dct['C_H_c'],0])
C_mat_i = np.array([dct['C_N12_i'],dct['C_N34_i'],dct['C_H_c'],0])
return fullc, fulli, C_mat_c, C_mat_i, Q_mat
# ## Simulate a month
# The function below simulates a single period (month) based on input transition probabilities in a matrix and then calculates the QALYs and Costs with discounting.
def simulate_month(df, # pd DataFrame where all results are stored
r, # A name to add to each row of new results in df
month, # the period (month) since start of simulation
patient_dist, # Markov state distribution before new period
transition_mat, # Matrix with transition probabilities
Q_mat, # Matrix with QALYs per Markov state
C_mat, # Matrix with Costs per Markov state
discount_rate_C, # Discounting % for costs
CPI, # Inflation rate
discount_rate_Q): # Discounting % for QALYs
"""
Simulates a single month given input parameters
Output: df with results (df) and new Markov state
distribution of patients
"""
# Compute inflation adjustment factor (CPI_adj)
# for the amount of months that have passed since
# the begin of simulations
CPI_adj = infl_adjustment(month, CPI)
# compute the patient Markov state distribution after 1 period (month)
new_patient_dist = np.matmul(patient_dist,transition_mat)
# Use the patient Markov state distribution
#to compute costs and QALYs for the specified period (month)
QALYs = new_patient_dist*Q_mat
Costs = new_patient_dist*C_mat
T_Q = QALYs.sum()
T_C = Costs.sum()
# Compute discounted costs and QALYs
disc_factor_C = discount_rate_C**(month/12)
disc_factor_Q = discount_rate_Q**(month/12)
disc_Q = T_Q/(disc_factor_Q )
disc_C = round((T_C*CPI_adj)/(disc_factor_C), 2)
# put everything in a new row in the dataframe
nr = [r, month, *new_patient_dist,
*QALYs, T_Q, disc_Q,
*Costs, T_C, disc_C]
df.loc[len(df)]=nr
return df, new_patient_dist
# ## Perform the simulation
# The function below implements simulation af a single cohort for multiple periods (months) given a specified transition matrix.
def simulate_cohort(transition_mat, # the transition matrix per period (month)
C_mat, # Matrix with Costs per Markov state
Q_mat, # Matrix with QALYs per Markov state
r='Control', #Name to add to each row in the output df
sim_months = 60, # Amount of total motnhs to simulate
cohort_size = 1e5, # Amount of patients in the cohort
CPI = 1.029, # Yearly inflation rate (2.9%)
discount_rate_Q = 1.015,# Yearly discounting rate of QALYs
discount_rate_C = 1.04):# Yearly discounting rate of Costs
"""
Simulates the cohort for multiple periods (months)
Output: Dataframe with outcome per period (result_df),
cumulative costs (Cost_tot_disc) and QALYs (QALY_tot_disc)
over the simulated period (sim_months)
"""
t1 = time.time()
# Define columns of the output file
result_df = \
pd.DataFrame(columns = ['Cohort_type', 'Month',
'NYHA_12', 'NYHA_34', 'Hospital', 'Dead',
'Q_N12','Q_N34','Q_H', 'Q_D',
'QALY_tot', 'QALY_disc',
'C_N12', 'C_N34','C_H', 'C_D',
'Cost_tot', 'Cost_disc'])
# Define the start patient distribution across Markov states
patient_dist = np.array([0,0,cohort_size,0])
# Perform computations per month
for month in range(1,sim_months+1):
result_df, patient_dist = \
simulate_month(result_df,r,month,
patient_dist,transition_mat, Q_mat,
C_mat,discount_rate_C, CPI, discount_rate_Q)
Cost_tot_disc = result_df['Cost_disc'].sum()
QALY_tot_disc = result_df['QALY_disc'].sum()
t2 = time.time()
print('Total simulation time '+r+':', round((t2-t1),2), 'seconds')
return result_df, Cost_tot_disc, QALY_tot_disc
# ## General functions implemented in final function
# Two general functions were used to crunch all the data in usefull format
# +
def excel_multtabs(df_list, tabname_list, loc, fname):
"""
Based on a list of pandas dataframes (df_list),
defined tabnames (tabname_list where len(df_list)),
a location and filename (loc, fname) a excel file
with multiple tabs is created and saved.
"""
f = loc+'\\'+fname
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(f, engine='xlsxwriter')
#loop over the list and write the tabs
for df,tabname in zip(df_list,tabname_list):
df.to_excel(writer, sheet_name=tabname)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
return
def merge_dicts(d1, d2):
"""
Function to merge two dictionaries to one
"""
for k,v in d1.items():
d1[k] = {**d1[k],**d2[k]}
return d1
# -
# ## Deterministic sensitivity analysis probabilities
# In order to evaluate the independent changes of the probabilities (except for a, e, f, i, k due to the definition of their probability), all options with a change of -10% and +10% of one variable are generated in a dataframe. The parameters a, f, i are used to absorb any changes in the other parameters in order to keep the sum of all transition probabilities 1.
# +
def one_way_sens(fullc,# monthly probabilities of the control arm
perc_change, # percentage change to use for sensitivity analysis
RR_readmission=0.64, # RR of hospital readmission for intervention arm
RR_mortality=0.78, #RR of mortality for intervention arm
cohort_size = 1e5,# cohort size
sloc=None): # if sloc is specified results are saved
"""
Given a set of input parameters one-way sensitivity
analysis with change of each parameter with
a defined percentage (perc_change) was performed
Output: Dictionary with {parameter:{change_percentage:
{dCosts:cost values, dQALYs: QALY values}}}
"""
# define baseline parameters
a,b,c,d,e,f,g,h,i,j,k,l = list(fullc)
dt = np.array([a,b,c,d,e,f,g,h,i,j,k,l])
costs_control = list(define_Costs('Control', 1.029, 2020)[:-1])
costs_intervention = list(define_Costs('Intervention', 1.029, 2020)[:-1])
QALYs = list(define_QALYs()[:-1])
# create a dict of baseline control arm parameters
cols = ['changed_parameter',
'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'RR_read', 'RR_mort',
'C_N12_c', 'C_N34_c','C_H_c',
'C_N12_i', 'C_N34_i','C_H_i',
'Q_N12','Q_N34','Q_H']
data_row1 = ['original', *dt,RR_readmission,RR_mortality,
*costs_control, *costs_intervention, *QALYs]
dct = {}
for col,dr in zip(cols,data_row1):
dct[col]=dr
# Generate baseline model results
c_mat, i_mat, Cm_c, Cm_i, Qm = model_input(dct)
dfc, costc, qc = simulate_cohort(c_mat, Cm_c, Qm ,
r='Control_baseline',
cohort_size = cohort_size)
dfi, costi, qi = simulate_cohort(i_mat, Cm_i, Qm ,
r='Intervention_baseline',
cohort_size = cohort_size)
dcost_base = (costi-costc)/cohort_size
dqaly_base = (qi-qc)/cohort_size
df_list = [dfc,dfi]
tabname_list = ['C_base', 'I_base']
res = pd.DataFrame(columns = ['changed_parameter',
'C_costs', 'I_costs',
'C_QALY', 'I_QALY',
'dcosts', 'dQALYs',
'dcosts_base', 'dQALY_base'])
res.loc[len(res)] = \
['baseline',costc/cohort_size, costi/cohort_size,
qc/cohort_size, qi/cohort_size, dcost_base, dqaly_base, 0,0]
# define variables that are not used for
# deterministic sensitivity (by default left over parameters)
resultants = ['a', 'f', 'i']
zeros = ['e','k']
dct_restabl = {}
# loop over all parameters defined
for k,v in dct.items():
# exclude paramters that are resultants or defined as zero
if (k not in resultants)&\
(k not in zeros)&\
(k!='changed_parameter'):
tdict = {} # data is stored in this dictionary
updict = {**dct} # copy original parameters
downdict = {**dct} # copy original parameters
#change values +/- a perc_change
updict[k] = v*(1+perc_change)
downdict[k] = v*(1-perc_change)
#construct transition matrices for perc_change up and down
upc_mat, upi_mat, upCost_c, upCost_i, upQ = model_input(updict)
downc_mat, downi_mat, downCost_c, downCost_i, downQ = model_input(downdict)
#compute results for up and down perc_change of parameter k
addname = k+'__'+str(1+perc_change)
dfc, costc, qc = simulate_cohort(upc_mat, \
upCost_c, upQ, r='Control_'+addname, cohort_size = cohort_size)
dfi, costi, qi = simulate_cohort(upi_mat, \
upCost_i, upQ, r='Intervention_'+addname, cohort_size = cohort_size)
#compute difference between intervention and
#cohort and difference of difference compared to baseline
dcost = (costi-costc)/cohort_size # per patient difference in costs (intervention-control)
dqaly = (qi-qc)/cohort_size # per patient difference in QALYs (intervention-control)
dCb = abs(dcost)-abs(dcost_base) # difference in costs relative to baseline simulation
dQb = abs(dqaly)-abs(dqaly_base)# difference in QALYs relative to baseline simulation
df_list.extend([dfc,dfi])
tabname_list.extend(['C_'+addname, 'I_'+addname])
res.loc[len(res)] = [addname,costc/cohort_size, costi/cohort_size,
qc/cohort_size, qi/cohort_size, dcost, dqaly, dCb, dQb]
tdict[perc_change] = {'dcosts': dcost, 'dQALYs':dqaly}
addname = k+'__'+str(1-perc_change)
dfc, costc, qc = simulate_cohort(downc_mat, downCost_c, downQ, r='Control_'+addname,cohort_size = cohort_size)
dfi, costi, qi = simulate_cohort(downi_mat, downCost_i, downQ, r='Intervention_'+addname,cohort_size = cohort_size)
dcost = (costi-costc)/cohort_size
dqaly = (qi-qc)/cohort_size
dCb = abs(dcost)-abs(dcost_base)
dQb = abs(dqaly)-abs(dqaly_base)
df_list.extend([dfc,dfi])
tabname_list.extend(['C_'+addname, 'I_'+addname])
res.loc[len(res)] = [addname,costc/cohort_size, costi/cohort_size,
qc/cohort_size, qi/cohort_size, dcost, dqaly,dCb, dQb]
tdict[-perc_change] = {'dcosts': dcost, 'dQALYs':dqaly}
dct_restabl[k] = tdict
df_list.append(res)
tabname_list.append('differences')
# store all dataframes with per simulation results if required
if sloc!=None:
fname = 'one-way-sens_'+str(perc_change)+'.xlsx'
excel_multtabs(df_list, tabname_list, sloc, fname)
return dct_restabl
# -
# ## Implementation of study data
# +
# Control arm probabilities per month
#B = N12 -> N34 (NYHA decay from NYHA 1/2 to
#NYHA 3/4; net effect assumed zero)
bc = 0
#C = N12 -> H (Hospital readmission from NYHA 1/2)
c_tot = 948
c_event = 185
c_months = 12
cc = monthly_prob(c_months, c_event, c_tot)
#D = N12 -> D (Mortality from NYHA 1/2)
d_tot = 948
d_event = 217
d_months = 12
dc = monthly_prob(d_months, d_event, d_tot)
#A = N12 -> N12 (residual; No change from NYHA 1/2)
ac = 1 - bc - cc - dc
#E = N34 -> N12 (No recovery from NYHA 3/4 to
#NYHA 1/2 was assumed 0)
ec = 0
#G = N34 -> H (Hospital readmission rate from NYHA 3/4)
g_tot = 78 # Value observed in the cohort was not used
gc = 0.0227 # Literature estimate that was used (monthly)
#H = N34 -> D (Mortality rate from NYHA 3/4)
h_tot = 78
h_event = 31
h_months = 12
hc = monthly_prob(h_months, h_event, h_tot)
#F = N34 -> N34 (residual; No change from NYHA 3/4)
fc = 1 - gc - hc
#fi = 1 - gi - hi
#I = H -> N12 (Discharge to NYHA 1/2)
i_tot = 1114
i_event = 948
ic = i_event/i_tot
#J = H -> N34 (Discharge to NYHA 3/4)
j_tot = 1114
j_event = 78
jc = j_event/j_tot
#K = H -> H (Hospital admission were
# not longer than 1 month; defined 0)
kc = 0
#L = H -> D (In hospital mortality)
l_tot = 1114
l_event = 88
lc = l_event/l_tot
#M,N,O define as zero, P defined as 1 (dead = dead)
fullc = np.array([ac, bc, cc, dc,
ec, fc, gc, hc,
ic, jc, kc, lc])
# -
# ## Implementation of multiple one-way deterministic sensitivity analyses
# Below the results of multiple one-way deterministic sensitivity analyses is implemented for different percentage change (pc)
sloc = r'C:\Users\henkvanvoorst\Documents\publicaties\HF simulatie\rebuttle\final_results\rebuttle_codecorrect'
dct_restabl = None
for pc in [.1,.2,.3,.4,.5]:
dct_restabl2 = one_way_sens(fullc,pc,RR_readmission=0.64, RR_mortality=0.78, sloc=sloc)
if dct_restabl!=None:
dct_restabl = merge_dicts(dct_restabl, dct_restabl2)
else:
dct_restabl = dct_restabl2
| 21,769 |
/.ipynb_checkpoints/Project1_MAKI-checkpoint.ipynb | 4a4a5d45c7c087f7fa68ed235634a632f77904ab | [] | no_license | Daskalovski13/Homework | https://github.com/Daskalovski13/Homework | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 217,860 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cv2
from matplotlib import pyplot as plt
import pandas as pd
img=cv2.imread('Downloads/Kitti_traning_dataset_5/000000.png')
dst0 = cv2.resize(img, dsize=(640, 360), interpolation=cv2.INTER_AREA)
# ## 680*380 resize ํ bounding_box
#
# +
img=cv2.imread('Downloads/Kitti_traning_dataset_5/000000.png')
dst0 = cv2.resize(img, dsize=(640, 360), interpolation=cv2.INTER_AREA)
h_์ ,w_์ ,c_์ =img.shape
h, w, c = dst0.shape
print('๋ฆฌ์ฌ์ด์ฆ๊ฐ๋ก:', w,'\n๊ฐ๋ก:',w_์ ,'\n๋น์จ:',w/w_์ )
print('\n๋ฆฌ์ฌ์ด์ฆ์ธ๋ก:', h,'\n์ธ๋ก:',h_์ ,'\n๋น์จ:',h/h_์ )
w_๋น์จ=w/w_์
h_๋น์จ=h/h_์
# -
def boundingbox(img,df):
for X in range (0,len(df)):
label=df[0][X]
x1=round(float(df[4][X])*w_๋น์จ)
y1=round(float(df[5][X])*h_๋น์จ)
x2=round(float(df[6][X])*w_๋น์จ)
y2=round(float(df[7][X])*h_๋น์จ)
img2=cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
img2=cv2.putText(img, label, (x1-10,y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
return img2
img=cv2.imread('Downloads/Kitti_traning_dataset_5/000000.png')
df=pd.read_csv('Downloads/Kitti_traning_dataset_5/000000.txt',sep=' ',header=None)
img=boundingbox(dst0,df)
cv2.imshow('img',img)
cv2.waitKey(0)
plt.imshow(dst0)
# # ํ์ ํ bounding_box
# x
# =
# r
# c
# o
# s
# ฮธ
#
#
# y
# =
# r
# s
# i
# n
# ฮธ
#
#
# ๋ณํ ๊ณต์์ ์ฌ์ฉํ์ฌ ๊ทน์ขํ๋ฅผ ์ง๊ต์ขํ๋ก ๋ณํํฉ๋๋ค.
from sympy import *
import math
# +
i=60
sinx = math.sin(math.pi * (i/180))
cosx = math.cos(math.pi * (i/180))
print('sin',i ,':',sinx,'cos',i,':', cosx)
# -
gression
# Ordinary least squares:
# Setting up the design matrices for 3rd, 4th and 5th order Polynomial:
poly3 = PolynomialFeatures(degree=3)
Xdes3 = poly3.fit_transform(np.c_[xx.ravel(), yy.ravel()])
poly4 = PolynomialFeatures(degree=4)
Xdes4 = poly4.fit_transform(np.c_[xx.ravel(), yy.ravel()])
poly5 = PolynomialFeatures(degree=5)
Xdes5 = poly5.fit_transform(np.c_[xx.ravel(), yy.ravel()])
# Reshaping the zz to fit the data
z = zz.reshape(-1,1)
# Setting up the fitting functions
ols3 = LinearRegression()
ols3.fit(Xdes3,z)
ols4 = LinearRegression()
ols4.fit(Xdes4,z)
ols5 = LinearRegression()
ols5.fit(Xdes5,z)
# New data for testing and plotting the model:
n_row = 100
n_col = 100
ax_row = np.random.rand(n_row)
ax_col = np.random.rand(n_col)
sort_inds_row = np.argsort(ax_row)
sort_inds_col = np.argsort(ax_col)
ROW = ax_row[sort_inds_row]
COL = ax_col[sort_inds_col]
ROWp, COLp = np.meshgrid(ROW, COL)
X3plot = poly3.fit_transform(np.c_[ROWp.ravel(), COLp.ravel()])
X4plot = poly4.fit_transform(np.c_[ROWp.ravel(), COLp.ravel()])
X5plot = poly5.fit_transform(np.c_[ROWp.ravel(), COLp.ravel()])
# This evaluates the height associated for each pair of coordinate made from np.meshgrid
Zpredict3 = ols3.predict(X3plot)
Zpredict4 = ols4.predict(X4plot)
Zpredict5 = ols5.predict(X5plot)
# +
# Plot the generated surfaces for OLS models.
fig3 = plt.figure()
ax3 = fig3.gca(projection='3d')
surf = ax3.plot_surface(ROWp, COLp, Zpredict3.reshape(*ROWp.shape), linewidth = 0, antialiased = False, cmap=cm.plasma)
ax3.set_title('3rd order Polynomial model')
ax3.set_xlabel("x")
ax3.set_ylabel("y")
ax3.set_zlabel("z")
fig3.colorbar(surf)
fig4 = plt.figure()
ax4 = fig4.gca(projection='3d')
surf = ax4.plot_surface(ROWp, COLp, Zpredict4.reshape(*ROWp.shape), linewidth = 0, antialiased = False, cmap=cm.rainbow)
ax4.set_title('4th order Polynomial model')
ax4.set_xlabel("x")
ax4.set_ylabel("y")
ax4.set_zlabel("z")
fig4.colorbar(surf)
fig5 = plt.figure()
ax5 = fig5.gca(projection='3d')
surf = ax5.plot_surface(ROWp, COLp, Zpredict5.reshape(*ROWp.shape), linewidth = 0, antialiased = False, cmap=cm.terrain)
ax5.set_title('5th order Polynomial model')
ax5.set_xlabel("x")
ax5.set_ylabel("y")
ax5.set_zlabel("z")
fig5.colorbar(surf)
plt.show()
# +
# Evaluating the OLS models: variances, MSE and R2 scores:
from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
v_beta3 = np.diag(np.linalg.inv(Xdes3.T.dot(Xdes3)))
sigma3 = 1/(n-3-1) * np.sum((z-Zpredict3)**2)
var3 = v_beta3*sigma3
print('Ordinary least squares results: \n')
print(' Variance of betas, 3rd order:', var3)
v_beta4 = np.diag(np.linalg.inv(Xdes4.T.dot(Xdes4)))
sigma4 = 1/(n-4-1) * np.sum((z-Zpredict4)**2)
var4 = v_beta4*sigma4
print('\n Variance of betas, 4th order:', var4)
v_beta5 = np.diag(np.linalg.inv(Xdes5.T.dot(Xdes5)))
sigma5 = 1/(n-5-1) * np.sum((z-Zpredict5)**2)
var5 = v_beta5*sigma5
print('\n Variance of betas, 5th order:', var5)
print("\n Mean squared error, 3rd order:", mean_squared_error(z,Zpredict3))
print(" Mean squared error, 4th order:", mean_squared_error(z,Zpredict4))
print(" Mean squared error, 5th order:", mean_squared_error(z,Zpredict5))
print('\n R2 score 3rd order, 3rd order:', r2_score(z,Zpredict3))
print(' R2 score 4th order, 4th order:', r2_score(z,Zpredict4))
print(' R2 score 5th order, 5th order:', r2_score(z,Zpredict5))
# -
# +
# Ridge method:
# Define various lambda values to be tested
lmb_values = [1e-4, 1e-3, 1e-2, 10, 1e2, 1e4]
num_values = len(lmb_values)
## Ridge-regression for 3rd, 4th and 5th order Polynomial
beta_ridge3 = np.zeros((np.size(Xdes3,1),num_values))
beta_ridge4 = np.zeros((np.size(Xdes4,1),num_values))
beta_ridge5 = np.zeros((np.size(Xdes5,1),num_values))
I3 = np.eye(np.size(Xdes3,1))
I4 = np.eye(np.size(Xdes4,1))
I5 = np.eye(np.size(Xdes5,1))
for i,lmb in enumerate(lmb_values):
beta_ridge3[:,i] = (np.linalg.inv( Xdes3.T @ Xdes3 + lmb*I3) @ Xdes3.T @ z).flatten()
for i,lmb in enumerate(lmb_values):
beta_ridge4[:,i] = (np.linalg.inv( Xdes4.T @ Xdes4 + lmb*I4) @ Xdes4.T @ z).flatten()
for i,lmb in enumerate(lmb_values):
beta_ridge5[:,i] = (np.linalg.inv( Xdes5.T @ Xdes5 + lmb*I5) @ Xdes5.T @ z).flatten()
pred_ridge3 = X3plot @ beta_ridge3
pred_ridge4 = X4plot @ beta_ridge4
pred_ridge5 = X5plot @ beta_ridge5
### R2-score of the results
print('Ridge method R2 scores: \n')
for i in range(num_values):
print('lambda = %g'%lmb_values[i])
print('R2 3rd order Polynomial: %g'%r2_score(z,pred_ridge3[:,i]))
print('R2 4th order Polynomial: %g'%r2_score(z,pred_ridge4[:,i]))
print('R2 5th order Polynomial: %g\n'%r2_score(z,pred_ridge5[:,i]))
# -
| 6,476 |
/Model/brain-tumor-classification-using-cnn.ipynb | 302423eaf9d294246f24f119cc949ed1f5136317 | [] | no_license | AM1CODES/HackOff---Brain-Tumor-Detection-Web-app | https://github.com/AM1CODES/HackOff---Brain-Tumor-Detection-Web-app | 4 | 2 | null | 2021-06-02T13:07:46 | 2021-02-10T15:14:46 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 128,856 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-output=false
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop,Adam
import cv2
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
DATA = r"/kaggle/input/brain-tumor-classification-mri/Training/" #reading the data
CATEGORIES = ["glioma_tumor","meningioma_tumor","no_tumor","pituitary_tumor"] #defining the 4 categories that we have
for category in CATEGORIES:
path = os.path.join(DATA,category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
plt.imshow(img_array)
plt.show()
plt.axis("off")
break
break
# -
IMG_SIZE = 150 #defining our image size
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))#scaling down our images
plt.imshow(new_array,cmap = "gray")
plt.axis("off")
# +
training_data = [] #manipulating our training data
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATA,category)
class_num = CATEGORIES.index(category) #defining the different categories of the images in our data
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE) #loading the images in grayscale
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data.append([new_array,class_num]) #adding our data in to the training_data list which we will use to define our X and y for train-tets split
except Exception as e:
pass
create_training_data()
# -
X = [] #used for storing the features
y = [] #used for storing the labels
for features,label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1,IMG_SIZE,IMG_SIZE)
#print(X.shape)
X = X/255.0
X = X.reshape(-1,150,150,1)
print(X.shape)
from keras.utils.np_utils import to_categorical #one-hot eencoding our values
y = to_categorical(y, num_classes = 4)
from sklearn.model_selection import train_test_split #splitting the data into training and validaton set
X_train, X_val, Y_train, Y_val = train_test_split(X, y, test_size = 0.2, random_state=42)
print("x_train shape",X_train.shape)
print("x_test shape",X_val.shape)
print("y_train shape",Y_train.shape)
print("y_test shape",Y_val.shape)
# +
#defining our model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu',padding = 'Same', input_shape=(150, 150, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128, (3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128, (3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128, (3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(256, (3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4, activation='softmax')
])
optimizer = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
optimizer = optimizer,
metrics=['accuracy'])
epochs = 50
batch_size = 40
datagen = ImageDataGenerator(
rotation_range=0,
zoom_range = 0,
width_shift_range=0,
height_shift_range=0,
horizontal_flip=True,
vertical_flip=False)
# -
model.summary() #checking what our final model would look like
datagen.fit(X_train)
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.title('Loss')
plt.xlabel('epoch')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['accuracy', 'val_accuracy'])
plt.title('Accuracy')
plt.xlabel('epoch')
model.save('BrainTumor.h5')
vation data for this station and plot the results as a histogram
Waihee = session.query((Measurement.tobs)).\
filter(and_((Measurement.station=="USC00519281"),(Measurement.date>yearago))).all()
Waihee = pd.DataFrame(Waihee, columns = ['Temperature'])
Waihee.head()
# +
ax = Waihee.hist(column='Temperature', bins=12, grid=False, figsize=(12,8), color='blue', zorder=2, rwidth=0.9, label='Temperature')
ax = ax[0]
for x in ax:
x.set_title("Waihee Temps 8-24-16 to 8-23-17")
x.set_xlabel("Temperature (Degrees Fahrenheit)", labelpad=20, weight='bold', size=12)
x.set_ylabel("Frequency", labelpad=20, weight='bold', size=12)
plt.grid(True)
# -
# ![precipitation](Images/station-histogram.png)
# +
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-2-28', '2012-3-05'))
# -
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# --------------------------------------------------------------------------
# no trip dates given.. I made them up. I also had to grab old dates that would be withing the range of the data...
# This didn't take long but the instructions were incomplete and misleading.
print(calc_temps('2016-10-7','2016-11-1'))
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
result = calc_temps('2016-10-7','2016-11-1')
result
result[0][0]
# fig, ax = plt.subplots()
# plt.bar(x='Trip Duration', y='Temperature',yerr=(result[0][2]-result[0][0]), ax=result[0][1], capsize=4)
# errorbar(result[0][1],result[0][1], yerr=(result[0][2]-result[0][0]), marker='s', mfc='red',
# mec='green', ms=20, mew=4)
trips = ["Oct 7th to Nov 1st"]
Avg_Temps = [result[0][1]]
x_axis = np.arange(len(Avg_Temps))
plt.bar(trips, Avg_Temps, yerr=(result[0][2]-result[0][0]), color="b", align="center",capsize=10)
plt.title("Trip Avg Temp")
plt.ylabel("Temp (F)")
# +
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
# order_by(desc(total_rain)).all()
sel6 = [Measurement.station, Station.name, Station.latitude,Station.longitude,Station.elevation, func.sum(Measurement.prcp).label('total_rain')]
trip_rain = session.query(*sel6).filter(Measurement.date >= '2016-10-7').filter(Measurement.date <= '2016-11-1').\
group_by(Measurement.station)
trip_rain
trip_rain_df=pd.DataFrame(trip_rain, columns= ['station', 'name', 'latitude','longitude','elevation','precipitation'])
trip_rain_df.sort_values(by='precipitation', ascending=False).reset_index()
# -
trip_rain.all()
# ## Optional Challenge Assignment
# +
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# +
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# -
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
| 9,601 |
/3. Data Visualisations/.ipynb_checkpoints/Data Visualisation-checkpoint.ipynb | f4eff6a89106d4dcddb265e1def3a5cf7b792a82 | [] | no_license | spencerldixon/intro-to-data-science | https://github.com/spencerldixon/intro-to-data-science | 2 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 601,411 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Visualisation with Matplotlib and Seaborn
#
# In this lesson, we'll look at two popular data visualisation libraries; Matplotlib, and Seaborn (built on top of matplotlib)
#
# Let's get started by importing our dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
# Magic helper for inline graphs in jupyter notebook
# %matplotlib inline
# -
x = np.linspace(0,5,11)
y = x ** 2
x
y
# ## Matplotlib Approach #1 - Functional
#
# Matplotlib can be used in two ways, the functional approach, and the object oriented approach. You'll probably come across both, so it's worth being familiar with both approaches.
plt.plot(x, y)
plt.show()
plt.plot(x, y)
plt.xlabel('X Label')
plt.ylabel('Y Label')
plt.title('My Graph')
plt.show()
# +
plt.subplot(1,2,1) # Number of rows, number of columns, plot we're referring to
plt.plot(x,y,'r')
plt.subplot(1,2,2)
plt.plot(y,x,'b')
plt.show()
# -
# ## Approach #2 - Object Oriented
#
# The object oriented approach is a little more tricky but can be used to generate more complex plots. The idea is to create a blank canvas first (called a figure) and then add the charts we want (axes) and plot to those axes.
# +
fig = plt.figure() # Create a blank figure
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # Add a graph/set of axes (Left start, bottom start, width, height)
axes.plot(x, y)
axes.set_xlabel('X Label')
axes.set_ylabel('Y Label')
axes.set_title("My Title")
# +
fig = plt.figure(figsize=(5,5))
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3])
axes1.plot(x,y,'r')
axes2.plot(y,x)
# -
fig, axes = plt.subplots(nrows=1, ncols=2)
axes
# +
fig, axes = plt.subplots(nrows=1, ncols=2)
axes[0].plot(x, y)
axes[0].set_title("first plot")
axes[1].plot(y, x)
axes[1].set_title("second plot")
plt.tight_layout()
# -
# ## Legends
#
# To add legends to our charts, we need to do two things; firstly, we need to ensure our plots are labelled. Secondly we can call the legend method and optionally specify our location for our legend.
# +
fig = plt.figure(figsize=(3,2))
ax = fig.add_axes([0,0,1,1])
ax.plot(x, y, label="Things") # Must use labels for legends
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title("My Chart")
ax.legend(loc="best")
# -
# ## Saving Charts
#
# I recommend a 300dpi parameter if using for print or publication
fig.savefig('my_chart.png', dpi=300)
# ## Styling
# +
z = np.random.rand(11)
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.plot(x, y, color="red", linewidth=1) # Can use strings, or hex
ax.plot(y, x, color="#0000FF", linewidth=3, alpha=0.5, linestyle="--")
ax.plot(y, z, color="green", linestyle=":", marker="o", markersize="5")
# -
# # Pandas Built In Plotting
df = pd.DataFrame({"name": ["Alice", "Bob", "Charles", "David", "Emma", "Fred"],
"age": [27, 28, 40, 27, 30, 41]
})
df
df['age'].plot.hist(bins=5)
df.plot.bar()
# # Seaborn
#
# Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.
import seaborn as sns
tips = sns.load_dataset('tips')
tips.head()
sns.countplot(x='sex', data=tips)
# +
plt.figure(figsize=((12,3)))
sns.countplot(x='sex', data=tips)
# -
sns.distplot(tips['total_bill'], kde=False, bins=30) # kernel density estimation
sns.jointplot(x='total_bill', y='tip', data=tips)
sns.pairplot(tips)
sns.pairplot(tips, hue='sex')
sns.rugplot(tips['total_bill'])
# +
# x is categorical, y is numerical
# Uses mean by default
sns.barplot(x='sex', y='total_bill', data=tips)
# -
sns.barplot(x='sex', y='total_bill', data=tips, estimator=np.std)
sns.boxplot(x='day', y='total_bill', data=tips)
sns.boxplot(x='day', y='total_bill', data=tips, hue='smoker')
sns.violinplot(x='day', y='total_bill', data=tips, hue='sex')
sns.violinplot(x='day', y='total_bill', data=tips, hue='sex', split=True)
sns.stripplot(x='day', y='total_bill', data=tips)
sns.stripplot(x='day', y='total_bill', data=tips, jitter=True)
sns.violinplot(x='day', y='total_bill', data=tips)
sns.stripplot(x='day', y='total_bill', data=tips, jitter=True, color="black")
# ## Matrix plots
# +
flights = sns.load_dataset('flights')
flights.head()
# -
tc = tips.corr()
sns.heatmap(data=tc, annot=True, cmap='coolwarm')
fp = flights.pivot_table(index='month', columns='year', values='passengers')
fp
sns.heatmap(fp, cmap='coolwarm')
sns.clustermap(fp, cmap='coolwarm')
sns.clustermap(fp, cmap='coolwarm', standard_scale=1)
# ## Grids
iris = sns.load_dataset('iris')
iris.head()
iris['species'].unique()
g = sns.FacetGrid(data=tips, col='time', row='smoker')
g.map(sns.distplot, 'total_bill')
sns.lmplot(x='total_bill', y='tip', data=tips)
predict([1,0,0])))
# +
#Demonstrate NAND logic
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1],
])
#only false when all 1's
y = np.array([
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[0],
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 500000
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
print("Should be 1: {}".format(predict([1,0,0])))
print("Should be 1: {}".format(predict([1,0,1])))
print("Should be 1: {}".format(predict([0,0,0])))
# +
#Demonstrate an OR logic
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1],
])
#only false when all 0's
y = np.array([
[0],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 1: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
print("Should be 1: {}".format(predict([1,0,0])))
print("Should be 1: {}".format(predict([0,0,1])))
print("Should be 0: {}".format(predict([0,0,0])))
# +
#Demonstrate AND on the second two
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1],
])
#only true when last two are true
y = np.array([
[0],
[0],
[0],
[1],
[0],
[0],
[0],
[1],
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([1,0,1])))
print("Should be 0: {}".format(predict([1,1,0])))
print("Should be 0: {}".format(predict([0,0,0])))
print("Should be 1: {}".format(predict([0,1,1])))
print("Should be 1: {}".format(predict([1,1,1])))
# +
#Demonstrate OR on the second two
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1],
])
#only true when either of the last two are true
y = np.array([
[0],
[1],
[1],
[1],
[0],
[1],
[1],
[1],
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([0,0,0])))
print("Should be 0: {}".format(predict([1,0,0])))
print("Should be 1: {}".format(predict([0,0,1])))
print("Should be 1: {}".format(predict([0,1,1])))
print("Should be 1: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
# +
#The prior tests have given the perceptron a very straightforward truth table,
#Now let's throw in a curve ball, where there is at least one ambiguous case
#So based on the OR on the second two test from before, we will throw in an extra record
#that contradicts the prior case.
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0], #this one
[1,0,1],
[1,1,0],
[1,1,1],
[1,0,0] #and this one
])
#
y = np.array([
[0],
[1],
[1],
[1],
[0], #are
[1],
[1],
[1],
[1] #ambiguous
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([0,0,0])))
print("Should be 1: {}".format(predict([0,0,1])))
print("Should be 1: {}".format(predict([0,1,1])))
print("Should be 1: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
print("This is the ambiguous case: {}".format(predict([1,0,0])))
# +
#So the prior one with the ambiguous case came down on the side of zero,
#lets skew that with more qty of ambiguous leaning toward zero
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0], #this one
[1,0,1],
[1,1,0],
[1,1,1],
[1,0,0], #and this one
[1,0,0] #but so is this this one
])
#
y = np.array([
[0],
[1],
[1],
[1],
[0], #are
[1],
[1],
[1],
[1], #ambiguous
[0] #but now we lean zero
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([0,0,0])))
print("Should be 1: {}".format(predict([0,0,1])))
print("Should be 1: {}".format(predict([0,1,1])))
print("Should be 1: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
print("This is the ambiguous case, hoping for zero: {}".format(predict([1,0,0])))
# +
#So the prior one with the ambiguous case came down on the side of ,
#lets skew it back to one with more data
X = np.array([
[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0], #this one
[1,0,1],
[1,1,0],
[1,1,1],
[1,0,0], #and this one
[1,0,0], #but so is this this one
[1,0,0], #and this
[1,0,0], #and this
])
#
y = np.array([
[0],
[1],
[1],
[1],
[0], #are
[1],
[1],
[1],
[1], #ambiguous
[0], #but now we lean zero
[1], #and now back to one
[1],
])
#set up the learning rate
lr = 0.1
#set up the number of epochs
e = 700
train(X, y, lr, e)
print("Final Weights: ")
print_vars()
print("--------------------Predictions")
print("Should be 0: {}".format(predict([0,0,0])))
print("Should be 1: {}".format(predict([0,0,1])))
print("Should be 1: {}".format(predict([0,1,1])))
print("Should be 1: {}".format(predict([1,1,1])))
print("Should be 1: {}".format(predict([1,1,0])))
print("This is the ambiguous case, hoping for one: {}".format(predict([1,0,0])))
# -
| 11,103 |
/list/frequency_of_elements_solution.ipynb | fca48ec6e626acf31c8224c72f5488b6709743b5 | [
"Apache-2.0"
] | permissive | cntfk2017/Udemy_Python_Hand_On | https://github.com/cntfk2017/Udemy_Python_Hand_On | 1 | 0 | Apache-2.0 | 2020-05-25T07:09:26 | 2020-05-25T07:09:25 | null | Jupyter Notebook | false | false | .py | 1,299 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Write a Python program to get the frequency of the elements in a list.
# input
# my_list = [10,10,10,10,20,20,20,20,40,40,50,50,30]
# outout
# {10: 4, 20: 4, 40: 2, 50: 2, 30: 1}
import collections
my_list = [10,10,10,10,20,20,20,20,40,40,50,50,30]
print("Original List : ",my_list)
ctr = collections.Counter(my_list)
print("Frequency of the elements in the List : ",ctr)
| 645 |
/code2.ipynb | e91b508123b83b0736eacf1ca9688ad304afed8a | [] | no_license | acachila/RealEstateETL | https://github.com/acachila/RealEstateETL | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 50,703 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sqlalchemy import create_engine
from datetime import datetime
import datetime as dt
rds_connection_string = "postgres:Dpspfmrlvk*9@localhost:5432/House_sales"
engine = create_engine(f'postgresql://{rds_connection_string}')
engine.table_names()
data = pd.read_sql_table(con = engine, table_name = "list_per_sq_ft")
data.head()
# start = input("start: ")
# end = input("end: ")
start = "2019-1-1"
end = "2019-5-1"
start = datetime.strptime(start,'%Y-%m-%d')
end = datetime.strptime(end,'%Y-%m-%d')
start = datetime.strftime(start,'%Y-%m')
end = datetime.strftime(end,'%Y-%m')
df = data[["regionname","city","state","metro","countyname",start,end]]
df.rename(columns={"regionname":"zip"},inplace=True)
df["diff"] = df[end].astype(float)/df[start].astype(float)
df1 = df.sort_values("diff",ascending=True).head()
df1 = df1.reset_index(drop=True)
df1
zipcodes= df1["zip"]
cities = df1["city"]
states = df1["state"]
zipcode = zipcodes[0]
city = cities[0]
state = states[0]
# +
# Create code to answer each of the following questions.
# Hint: You will need multiple target URLs and multiple API requests.
# Dependencies
import requests
import json
# from config import gkey
gkey = "AIzaSyA-Rjp6nOeJp6815Xt1Kkuxc5XKMiKl_yA"
# Retrieve Google API key from config.py
# +
# 1. What are the geocoordinates (latitude/longitude) of Seattle, Washington?
target = zipcode
# Build the endpoint URL
target_url = (f'https://maps.googleapis.com/maps/api/geocode/json?address={target}&key={gkey}')
geo_data = requests.get(target_url).json()
# +
# 2. What are the geocoordinates (latitude/longitude) of The White House?
lat = geo_data["results"][0]["geometry"]["location"]["lat"]
lng = geo_data["results"][0]["geometry"]["location"]["lng"]
# Print the latitude and longitude
print('''
Place: {0}
Latitude: {1}
Longitude: {2}
'''.format(target, lat, lng))
# -
# create dataframe/clear datafrmae
import pandas as pd
records = pd.DataFrame()
# +
target_coordinates = str(lat) + "," + str(lng)
target_search = input("search: ")
target_radius = 8000
target_type = input("type: ")
# set up a parameters dictionary
params = {
"location": target_coordinates,
"keyword": target_search,
"radius": target_radius,
"type": target_type,
"key": gkey
}
# base url
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# run a request using our params dictionary
response = requests.get(base_url, params=params)
places_data = response.json()
n=0
# while int(n) > len(places_data):
while int(n) < len(places_data["results"]):
try:
price=places_data["results"][int(n)]["price_level"]
except KeyError:
price = "NA"
content = pd.DataFrame ({"type":target_search,
"name":[places_data["results"][int(n)]["name"]],
"score":[places_data["results"][int(n)]["rating"]],
"reviews":[places_data["results"][int(n)]["user_ratings_total"]],
"price":price,
"address":[places_data["results"][int(n)]["vicinity"]]})
records = records.append(content)
n+=1
records
# -
#to put it in dataframe
records.to_sql("local_place1",con=engine,index=False,if_exists='replace')
#crime rate data gathering
# Dependencies
# Dependencies
import os
from bs4 import BeautifulSoup as bs
import requests
import re
crime_rate = pd.DataFrame()
for n in range(0,len(df1)):
state = states[n]
city = cities[n]
zipcode = zipcodes[n]
# URL of page to be scraped
url = f'https://www.bestplaces.net/crime/zip-code/{state}/{city}/{zipcode}'
# Retrieve page with the requests module
response = requests.get(url)
# Create a Beautiful Soup object
soup = bs(response.text, 'html.parser')
# Print all divs with col-md-12
divs = soup.find_all("div", {"class": "col-md-12"})
# n=0
# for div in divs:
# print(f'dqyun:{n}')
# n+=1
# print(div.text)
s = str(divs[2]).split("violent crime is ")[1]
result = re.findall(r"[-+]?\d*\.\d+|\d+", s)
vcr = result[0]
s = str(divs[2]).split("property crime is ")[1]
result = re.findall(r"[-+]?\d*\.\d+|\d+", s)
pcr = result[0]
apnd = pd.DataFrame({"zip":[zipcode],
"violent crime": [vcr],
"property crime": [pcr]})
print("df")
crime_rate = crime_rate.append(apnd)
divs[2]
crime_rate
crime_save = pd.merge(df1,crime_rate,on="zip")
crime_save.to_sql("crime1",con=engine,index=False,if_exists='replace')
engine.table_names()
recalled_crime_data = pd.read_sql_table(con = engine, table_name = "crime1")
recalled_crime_data.head()
recalled_local_data = pd.read_sql_table(con = engine, table_name = "local_place1")
recalled_local_data.head()
| 5,094 |
/examples/FinRL_PaperTrading_Demo.ipynb | 7a0913277a3f1d1e05e0f6b45b8ad18f3f1c1237 | [
"MIT"
] | permissive | AI4Finance-LLC/FinRL-Library | https://github.com/AI4Finance-LLC/FinRL-Library | 1,949 | 478 | null | null | null | null | Jupyter Notebook | false | false | .py | 72,036 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scrapeing data about US congress members from Wikipedia
# In this notebook we will start a project with the aim to have a machine learning model which can determine what party a US congressional district is most likely to vote for. The first part for every project is to aquire the data required. This can be done by being given it from a third party, downloading data files or, as in this case, scrapeing a webpage for it.
from bs4 import BeautifulSoup as bs #For inspecting html webpage in notebook
import pandas as pd #To put data into frames for joining into a final result, also sued for printing to csv
import lxml #For parsing html
import requests #For requesting the webpages which we will srape
import time #To have a wait timer when scraping, for politeness sake
import os #For saving in folder
import multiprocessing as mp
# For this project we will start with the wikipedia page detailing the current (2020-06-21) list of US congress members. From this base page we can get the representative from each congress district together with data about their party affiliation previous experience, education, when they assumed their current office, residence, and which year they were born.
url = "https://en.wikipedia.org/wiki/List_of_current_members_of_the_United_States_House_of_Representatives" #Url to wikipedia page
response = requests.get(url) #The received page when requesting the specified url
soup = bs(response.content, 'lxml') #creating a BeautifulSoup object which we can display in the notebook and inspect
print(soup.prettify()) #Print the parsed html page
tables = soup.find_all('table') #Returns all tables on the webpage
tables #Print all tables in jupyter
#Returns the tables where you can sort the data on the webpage.
members_table = soup.find_all("table", class_ ="wikitable sortable")[2] #The webpage which we are interested in
print(members_table.prettify()) #Print the table of interest
# Pandas has built in function to instantly scrape the wikipedia table and put the information into a pandas frame.
congress_members_frame = pd.read_html("https://en.wikipedia.org/wiki/List_of_current_members_of_the_United_States_House_of_Representatives")[6] #Index specifies which table to put into a fram
congress_members_frame
# Now we will go through the table of congress members and scrape the links to their wikipedia pages
links_to_members = [] #list to store links
for row in members_table.findAll('tr'): #find all rows
cells = row.findAll('td') #find all columns
if len(cells) == 9: #the number of columns in the table of interest is 9
links = cells[1].findAll('a') #By inspecting the parsed html side we can see that links are started with an a hence we want to find all links in the second column
if links != []: #Make sure that there is a link, vacancies have no links for example
link = links[1].get('href') #Since the table has a link to an image of the congress member before the link to their page we need to chose the second link
links_to_members.append('https://en.wikipedia.org' + link) #Add the unique link to the list
else:
continue #If no link is found continue to next row
# Use the list created above to visit each members page and extract the name of their spouse, if any, and number of childre, if any. Names are scraped to get a unique key for later joining.
# +
names = [] #List to keep the names used as keys.
spouses = [] #List to keep name of spouses
childrens = [] #List to keep number of childrens
for member in links_to_members:
#Set the three items of interest to a base case, in case we don't find the data we want we don't want to save the data from the previous
#candidata again.
cname = " "
bname = " "
spouse = "none"
children = " "
url = member #link to specific member
resp = requests.get(url, params={'action': 'raw'}) #request the page as raw wikidata page for easy of scrapeing the info box
page = resp.text
for line in page.splitlines(): #go through each line
#We are looking for names which might most likely be under birth_name, name, or Name with either a white space after the '|' or no whitespace.
if line.startswith('| birth_name'):
bname = line.partition('=')[-1].strip()
elif line.startswith('|birth_name'):
bname = line.partition('=')[-1].strip()
elif line.startswith('|name'):
cname = line.partition('=')[-1].strip()
elif line.startswith('| name'):
cname = line.partition('=')[-1].strip()
elif line.startswith('|Name'):
cname = line.partition('=')[-1].strip()
elif line.startswith('| Name'):
cname = line.partition('=')[-1].strip()
#Spouse are most likelt found under spouse or Spouse
elif line.startswith('|spouse'):
spouse = line.partition('=')[-1].strip()
elif line.startswith('|Spouse'):
spouse = line.partition('=')[-1].strip()
elif line.startswith('| Spouse'):
spouse = line.partition('=')[-1].strip()
elif line.startswith('| spouse'):
spouse = line.partition('=')[-1].strip()
#number of childrens might be udner children, Children, childrens, or Childrens
elif line.startswith('| children'):
children = line.partition('=')[-1].strip()
elif line.startswith('| Children'):
children = line.partition('=')[-1].strip()
elif line.startswith('|children'):
children = line.partition('=')[-1].strip()
elif line.startswith('|Children'):
children = line.partition('=')[-1].strip()
elif line.startswith('|Childrens'):
children = line.partition('=')[-1].strip()
elif line.startswith('| Childrens'):
children = line.partition('=')[-1].strip()
elif line.startswith('| childrens'):
children = line.partition('=')[-1].strip()
elif line.startswith('| childrens'):
children = line.partition('=')[-1].strip()
#Website appears to be the last part of the infobox so when we reach it we stop scan their page.
elif line.startswith('|website'):
break
elif line.startswith('| website'):
break
if cname != " ": #We will prefere their called name which should correspond better between tables
name = cname
elif bname != " ": #If we only find their birth name we will use that instead to make manual pairing easier when cleaning data
name = bname
else: #If we do not find any name we wil lfill it in as blank
name = " "
names.append(name) #Add the name to the list
spouses.append(spouse) #Add the name of the spouse to the list
childrens.append(children) #Add the number of childrens to the list
time.sleep(0.5) #Wait this time to be polite
# -
member_personal_data = pd.DataFrame(names,columns=['Member']) #Put the new data into a frame with first column being member.
member_personal_data['Spouse'] = spouses
member_personal_data['Childrens'] = childrens
# Join the two tables using the member name as the key. In this case a full outer join will be used in order to include data which we fail to find the correct keys, e.g. one of frame might have th name Joe while another has the name Joseph. Another alternative would be do join on the position in the frames however the vacancies will mess up this ordering so we would need to place these last, or first.
result = pd.merge(congress_members_frame, member_personal_data,how = 'outer', on = 'Member')
result.to_csv(os.getcwd()+'/data/resultingData/congress_members.csv') #Print the results to a csv file.
# This then concoludes the first part of trying to create a model for which party a congress district in the US will vote for, the code in here is written as a runable program in the file "web_scrapeing_us_congress.py". In the next part we will go through and clean the data which we just scraped.
args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.soft_update_tau = args.soft_update_tau
self.states = None # assert self.states == (1, state_dim)
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), args.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), args.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
class AgentPPO(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.if_off_policy = False
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.ratio_clip = getattr(args, "ratio_clip", 0.25) # `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_gae_adv = getattr(args, "lambda_gae_adv", 0.95) # could be 0.80~0.99
self.lambda_entropy = getattr(args, "lambda_entropy", 0.01) # could be 0.00~0.10
self.lambda_entropy = torch.tensor(self.lambda_entropy, dtype=torch.float32, device=self.device)
def explore_env(self, env, horizon_len: int) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
logprobs = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.states[0]
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action, logprob = [t.squeeze(0) for t in get_action(state.unsqueeze(0))[:2]]
ary_action = convert(action).detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
logprobs[i] = logprob
rewards[i] = reward
dones[i] = done
self.states[0] = ary_state
rewards = (rewards * self.reward_scale).unsqueeze(1)
undones = (1 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, logprobs, rewards, undones
def update_net(self, buffer) -> [float]:
with torch.no_grad():
states, actions, logprobs, rewards, undones = buffer
buffer_size = states.shape[0]
'''get advantages reward_sums'''
bs = 2 ** 10 # set a smaller 'batch_size' when out of GPU memory.
values = [self.cri(states[i:i + bs]) for i in range(0, buffer_size, bs)]
values = torch.cat(values, dim=0).squeeze(1) # values.shape == (buffer_size, )
advantages = self.get_advantages(rewards, undones, values) # advantages.shape == (buffer_size, )
reward_sums = advantages + values # reward_sums.shape == (buffer_size, )
del rewards, undones, values
advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-5)
assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size,)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for _ in range(update_times):
indices = torch.randint(buffer_size, size=(self.batch_size,), requires_grad=False)
state = states[indices]
action = actions[indices]
logprob = logprobs[indices]
advantage = advantages[indices]
reward_sum = reward_sums[indices]
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, reward_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = advantage * ratio
surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy.mean() * self.lambda_entropy
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_critics += obj_critic.item()
obj_actors += obj_actor.item()
a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean()
return obj_critics / update_times, obj_actors / update_times, a_std_log.item()
def get_advantages(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
advantages = torch.empty_like(values) # advantage value
masks = undones * self.gamma
horizon_len = rewards.shape[0]
next_state = torch.tensor(self.states, dtype=torch.float32).to(self.device)
next_value = self.cri(next_state).detach()[0, 0]
advantage = 0 # last_gae_lambda
for t in range(horizon_len - 1, -1, -1):
delta = rewards[t] + masks[t] * next_value - values[t]
advantages[t] = advantage = delta + masks[t] * self.lambda_gae_adv * advantage
next_value = values[t]
return advantages
class PendulumEnv(gym.Wrapper): # a demo of custom gym env
def __init__(self):
gym.logger.set_level(40) # Block warning
gym_env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
super().__init__(env=gym.make(gym_env_name))
'''the necessary env information when you design a custom env'''
self.env_name = gym_env_name # the name of this env.
self.state_dim = self.observation_space.shape[0] # feature number of state
self.action_dim = self.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> np.ndarray: # reset the agent in env
return self.env.reset()
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict): # agent interacts in env
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
return state.reshape(self.state_dim), float(reward), done, info_dict
def train_agent(args: Config):
args.init_before_training()
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.states = env.reset()[np.newaxis, :]
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step,
eval_times=args.eval_times,
cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer_items)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
torch.save(agent.act.state_dict(), args.cwd + '/actor.pth')
break # stop training when reach `break_step` or `mkdir cwd/stop`
def render_agent(env_class, env_args: dict, net_dims: [int], agent_class, actor_path: str, render_times: int = 8):
env = build_env(env_class, env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
agent = agent_class(net_dims, state_dim, action_dim, gpu_id=-1)
actor = agent.act
print(f"| render and load actor from: {actor_path}")
actor.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
for i in range(render_times):
cumulative_reward, episode_step = get_rewards_and_steps(env, actor, if_render=True)
print(f"|{i:4} cumulative_reward {cumulative_reward:9.3f} episode_step {episode_step:5.0f}")
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = []
print(f"\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
f"\n| `time`: Time spent from the start of training to this moment."
f"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
f"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
f"\n| `avgS`: Average of steps in an episode."
f"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
f"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
return cumulative_returns, episode_steps + 1
# + [markdown] id="9tzAw9k26nAC"
# ##DRL Agent Class
# + id="pwCbbocm6PHM"
from __future__ import annotations
import torch
# from elegantrl.agents import AgentA2C
MODELS = {"ppo": AgentPPO}
OFF_POLICY_MODELS = ["ddpg", "td3", "sac"]
ON_POLICY_MODELS = ["ppo"]
# MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()}
#
# NOISE = {
# "normal": NormalActionNoise,
# "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise,
# }
class DRLAgent:
"""Implementations of DRL algorithms
Attributes
----------
env: gym environment class
user-defined class
Methods
-------
get_model()
setup DRL algorithms
train_model()
train DRL algorithms in a train dataset
and output the trained model
DRL_prediction()
make a prediction in a test dataset and get results
"""
def __init__(self, env, price_array, tech_array, turbulence_array):
self.env = env
self.price_array = price_array
self.tech_array = tech_array
self.turbulence_array = turbulence_array
def get_model(self, model_name, model_kwargs):
env_config = {
"price_array": self.price_array,
"tech_array": self.tech_array,
"turbulence_array": self.turbulence_array,
"if_train": True,
}
environment = self.env(config=env_config)
env_args = {'config': env_config,
'env_name': environment.env_name,
'state_dim': environment.state_dim,
'action_dim': environment.action_dim,
'if_discrete': False}
agent = MODELS[model_name]
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
model = Config(agent_class=agent, env_class=self.env, env_args=env_args)
model.if_off_policy = model_name in OFF_POLICY_MODELS
if model_kwargs is not None:
try:
model.learning_rate = model_kwargs["learning_rate"]
model.batch_size = model_kwargs["batch_size"]
model.gamma = model_kwargs["gamma"]
model.seed = model_kwargs["seed"]
model.net_dims = model_kwargs["net_dimension"]
model.target_step = model_kwargs["target_step"]
model.eval_gap = model_kwargs["eval_gap"]
model.eval_times = model_kwargs["eval_times"]
except BaseException:
raise ValueError(
"Fail to read arguments, please check 'model_kwargs' input."
)
return model
def train_model(self, model, cwd, total_timesteps=5000):
model.cwd = cwd
model.break_step = total_timesteps
train_agent(model)
@staticmethod
def DRL_prediction(model_name, cwd, net_dimension, environment):
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
agent_class = MODELS[model_name]
environment.env_num = 1
agent = agent_class(net_dimension, environment.state_dim, environment.action_dim)
actor = agent.act
# load agent
try:
cwd = cwd + '/actor.pth'
print(f"| load actor from: {cwd}")
actor.load_state_dict(torch.load(cwd, map_location=lambda storage, loc: storage))
act = actor
device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
_torch = torch
state = environment.reset()
episode_returns = [] # the cumulative_return / initial_account
episode_total_assets = [environment.initial_total_asset]
with _torch.no_grad():
for i in range(environment.max_step):
s_tensor = _torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor) # action_tanh = act.forward()
action = (
a_tensor.detach().cpu().numpy()[0]
) # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = environment.step(action)
total_asset = (
environment.amount
+ (
environment.price_ary[environment.day] * environment.stocks
).sum()
)
episode_total_assets.append(total_asset)
episode_return = total_asset / environment.initial_total_asset
episode_returns.append(episode_return)
if done:
break
print("Test Finished!")
# return episode total_assets on testing data
print("episode_return", episode_return)
return episode_total_assets
# + [markdown] id="zjLda8No6pvI"
# ## Train & Test Functions
# + id="j8-e03ev32oz"
from __future__ import annotations
from finrl.config import ERL_PARAMS
from finrl.config import INDICATORS
from finrl.config import RLlib_PARAMS
from finrl.config import SAC_PARAMS
from finrl.config import TRAIN_END_DATE
from finrl.config import TRAIN_START_DATE
from finrl.config_tickers import DOW_30_TICKER
from finrl.meta.data_processor import DataProcessor
# construct environment
def train(
start_date,
end_date,
ticker_list,
data_source,
time_interval,
technical_indicator_list,
drl_lib,
env,
model_name,
if_vix=True,
**kwargs,
):
# download data
dp = DataProcessor(data_source, **kwargs)
data = dp.download_data(ticker_list, start_date, end_date, time_interval)
data = dp.clean_data(data)
data = dp.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = dp.add_vix(data)
else:
data = dp.add_turbulence(data)
price_array, tech_array, turbulence_array = dp.df_to_array(data, if_vix)
env_config = {
"price_array": price_array,
"tech_array": tech_array,
"turbulence_array": turbulence_array,
"if_train": True,
}
env_instance = env(config=env_config)
# read parameters
cwd = kwargs.get("cwd", "./" + str(model_name))
if drl_lib == "elegantrl":
DRLAgent_erl = DRLAgent
break_step = kwargs.get("break_step", 1e6)
erl_params = kwargs.get("erl_params")
agent = DRLAgent_erl(
env=env,
price_array=price_array,
tech_array=tech_array,
turbulence_array=turbulence_array,
)
model = agent.get_model(model_name, model_kwargs=erl_params)
trained_model = agent.train_model(
model=model, cwd=cwd, total_timesteps=break_step
)
# + id="Evsg8QtEDHDO"
from __future__ import annotations
from finrl.config import INDICATORS
from finrl.config import RLlib_PARAMS
from finrl.config import TEST_END_DATE
from finrl.config import TEST_START_DATE
from finrl.config_tickers import DOW_30_TICKER
def test(
start_date,
end_date,
ticker_list,
data_source,
time_interval,
technical_indicator_list,
drl_lib,
env,
model_name,
if_vix=True,
**kwargs,
):
# import data processor
from finrl.meta.data_processor import DataProcessor
# fetch data
dp = DataProcessor(data_source, **kwargs)
data = dp.download_data(ticker_list, start_date, end_date, time_interval)
data = dp.clean_data(data)
data = dp.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = dp.add_vix(data)
else:
data = dp.add_turbulence(data)
price_array, tech_array, turbulence_array = dp.df_to_array(data, if_vix)
env_config = {
"price_array": price_array,
"tech_array": tech_array,
"turbulence_array": turbulence_array,
"if_train": False,
}
env_instance = env(config=env_config)
# load elegantrl needs state dim, action dim and net dim
net_dimension = kwargs.get("net_dimension", 2**7)
cwd = kwargs.get("cwd", "./" + str(model_name))
print("price_array: ", len(price_array))
if drl_lib == "elegantrl":
DRLAgent_erl = DRLAgent
episode_total_assets = DRLAgent_erl.DRL_prediction(
model_name=model_name,
cwd=cwd,
net_dimension=net_dimension,
environment=env_instance,
)
return episode_total_assets
# + [markdown] id="pf5aVHAU-xF6"
# ## Import Dow Jones 30 Symbols
# + id="jx25TA_X87F-"
ticker_list = DOW_30_TICKER
action_dim = len(DOW_30_TICKER)
# + colab={"base_uri": "https://localhost:8080/"} id="UIV0kO_y-inG" outputId="bd7b3c21-641e-4eb7-a4af-ae7d156042a6"
print(ticker_list)
# + colab={"base_uri": "https://localhost:8080/"} id="CnqQ-cC5-rfO" outputId="29b248c9-ec98-44cd-befb-65192af72ea4"
print(INDICATORS)
# + [markdown] id="rZMkcyjZ-25l"
# ## Calculate the DRL state dimension manually for paper trading
# + id="GLfkTsXK-e90"
# amount + (turbulence, turbulence_bool) + (price, shares, cd (holding time)) * stock_dim + tech_dim
state_dim = 1 + 2 + 3 * action_dim + len(INDICATORS) * action_dim
# + colab={"base_uri": "https://localhost:8080/"} id="QqUkvImG-n66" outputId="9cb4a3d8-5064-4971-d095-65d3ab12f11a"
state_dim
# + id="8Z6qlLXY-fA2"
env = StockTradingEnv
# + [markdown] id="J25MuZLiGqCP"
# ## Show the data
# + [markdown] id="puJZWm8NHtSN"
# ### Step 1. Pick a data source
# + colab={"base_uri": "https://localhost:8080/"} id="3ZCru8f7GqgL" outputId="010e6a83-1280-410a-e240-4bc8ec124774"
#DP = DataProcessor(data_source = 'alpaca',
# API_KEY = API_KEY,
# API_SECRET = API_SECRET,
# API_BASE_URL = API_BASE_URL
# )
# + [markdown] id="nvPEW2mYHvkR"
# ### Step 2. Get ticker list, Set start date and end date, specify the data frequency
# + id="NPNxj6c8HIiE"
#data = DP.download_data(start_date = '2021-10-04',
# end_date = '2021-10-08',
# ticker_list = ticker_list,
# time_interval= '1Min')
# + colab={"base_uri": "https://localhost:8080/"} id="pPcazCq1d5ec" outputId="39d61284-7b51-46c2-cc2d-424f0f569e25"
#data['timestamp'].nunique()
# + [markdown] id="i46jGdE0IAel"
# ### Step 3. Data Cleaning & Feature Engineering
# + colab={"base_uri": "https://localhost:8080/"} id="x9euUsEPHWFK" outputId="2ae7fae7-d9ae-4f34-f32a-13e1476debea"
#data = DP.clean_data(data)
#data = DP.add_technical_indicator(data, INDICATORS)
#data = DP.add_vix(data)
# + colab={"base_uri": "https://localhost:8080/"} id="GOcPTaAgHdxa" outputId="4da334de-fbf6-49ca-ed22-bf1a99469457"
#data.shape
# + [markdown] id="bbu03L_UIMWt"
# ### Step 4. Transform to numpy array
# + colab={"base_uri": "https://localhost:8080/"} id="Rzj0vjZZHdGM" outputId="d0ec43a2-b78e-4c09-c048-b88e7eba6c81"
#price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix=True)
# + [markdown] id="eW0UDAXI1nEa"
# # Part 2: Train the agent
# + [markdown] id="lArLOFcJ7VMO"
# ## Train
# + id="g1F84mebj4gu"
ERL_PARAMS = {"learning_rate": 3e-6,"batch_size": 2048,"gamma": 0.985,
"seed":312,"net_dimension":[128,64], "target_step":5000, "eval_gap":30,
"eval_times":1}
env = StockTradingEnv
#if you want to use larger datasets (change to longer period), and it raises error,
#please try to increase "target_step". It should be larger than the episode steps.
# + colab={"base_uri": "https://localhost:8080/"} id="BxcNI2fdNjip" outputId="8db09736-a3a1-48a2-9e61-f9d8828ee327"
train(start_date = '2022-08-25',
end_date = '2022-08-31',
ticker_list = ticker_list,
data_source = 'alpaca',
time_interval= '1Min',
technical_indicator_list= INDICATORS,
drl_lib='elegantrl',
env=env,
model_name='ppo',
if_vix=True,
API_KEY = API_KEY,
API_SECRET = API_SECRET,
API_BASE_URL = API_BASE_URL,
erl_params=ERL_PARAMS,
cwd='./papertrading_erl', #current_working_dir
break_step=1e5)
# + [markdown] id="g37WugV_1pAS"
# ## Test
# + id="SxYoWCDa02TW"
account_value_erl=test(start_date = '2022-09-01',
end_date = '2022-09-02',
ticker_list = ticker_list,
data_source = 'alpaca',
time_interval= '1Min',
technical_indicator_list= INDICATORS,
drl_lib='elegantrl',
env=env,
model_name='ppo',
if_vix=True,
API_KEY = API_KEY,
API_SECRET = API_SECRET,
API_BASE_URL = API_BASE_URL,
cwd='./papertrading_erl',
net_dimension = ERL_PARAMS['net_dimension'])
# + [markdown] id="e8aNQ58X7avM"
# ## Use full data toย train
# + [markdown] id="3CQ9_Yv41r88"
# After tuning well, retrain on the training and testing sets
# + colab={"base_uri": "https://localhost:8080/"} id="cUSgbwt_10V3" outputId="50f3d8c6-b333-480e-b2fb-25e566797806"
train(start_date = '2022-08-25',
end_date = '2022-09-02',
ticker_list = ticker_list,
data_source = 'alpaca',
time_interval= '1Min',
technical_indicator_list= INDICATORS,
drl_lib='elegantrl',
env=env,
model_name='ppo',
if_vix=True,
API_KEY = API_KEY,
API_SECRET = API_SECRET,
API_BASE_URL = API_BASE_URL,
erl_params=ERL_PARAMS,
cwd='./papertrading_erl_retrain',
break_step=2e5)
# + [markdown] id="sIQN6Ggt7gXY"
# # Part 3: Deploy the agent
# + [markdown] id="UFoxkigg1zXa"
# ## Setup Alpaca Paper trading environment
# + id="LpkoZpYzfneS"
import datetime
import threading
from finrl.meta.data_processors.processor_alpaca import AlpacaProcessor
import alpaca_trade_api as tradeapi
import time
import pandas as pd
import numpy as np
import torch
import gym
class AlpacaPaperTrading():
def __init__(self,ticker_list, time_interval, drl_lib, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
API_BASE_URL, tech_indicator_list, turbulence_thresh=30,
max_stock=1e2, latency = None):
#load agent
self.drl_lib = drl_lib
if agent =='ppo':
if drl_lib == 'elegantrl':
agent_class = AgentPPO
agent = agent_class(net_dim, state_dim, action_dim)
actor = agent.act
# load agent
try:
cwd = cwd + '/actor.pth'
print(f"| load actor from: {cwd}")
actor.load_state_dict(torch.load(cwd, map_location=lambda storage, loc: storage))
self.act = actor
self.device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
elif drl_lib == 'rllib':
from ray.rllib.agents import ppo
from ray.rllib.agents.ppo.ppo import PPOTrainer
config = ppo.DEFAULT_CONFIG.copy()
config['env'] = StockEnvEmpty
config["log_level"] = "WARN"
config['env_config'] = {'state_dim':state_dim,
'action_dim':action_dim,}
trainer = PPOTrainer(env=StockEnvEmpty, config=config)
trainer.restore(cwd)
try:
trainer.restore(cwd)
self.agent = trainer
print("Restoring from checkpoint path", cwd)
except:
raise ValueError('Fail to load agent!')
elif drl_lib == 'stable_baselines3':
from stable_baselines3 import PPO
try:
#load agent
self.model = PPO.load(cwd)
print("Successfully load model", cwd)
except:
raise ValueError('Fail to load agent!')
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
else:
raise ValueError('Agent input is NOT supported yet.')
#connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY,API_SECRET,API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
#read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
#read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
#initialize account
self.stocks = np.asarray([0] * len(ticker_list)) #stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None #cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def test_latency(self, test_times = 10):
total_time = 0
for i in range(0, test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time/test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
self.awaitMarketOpen()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if(self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
threads = []
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
threads.append(tSubmitOrder) # record thread for joining later
for x in threads: # wait for all threads to complete
x.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
self.trade()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time,last_equity])
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while(not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
if self.drl_lib == 'elegantrl':
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(int)
elif self.drl_lib == 'rllib':
action = self.agent.compute_single_action(state)
elif self.drl_lib == 'stable_baselines3':
action = self.model.predict(state)[0]
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
threads = []
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
threads.append(tSubmitOrder) # record thread for joining later
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for x in threads: # wait for all threads to complete
x.join()
threads = []
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
if (buy_num_shares != buy_num_shares): # if buy_num_change = nan
qty = 0 # set to 0 quantity
else:
qty = abs(int(buy_num_shares))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
threads.append(tSubmitOrder) # record thread for joining later
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for x in threads: # wait for all threads to complete
x.join()
else: # sell all when turbulence
threads = []
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
threads.append(tSubmitOrder) # record thread for joining later
for x in threads: # wait for all threads to complete
x.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = AlpacaProcessor(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list = self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = ( abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype = float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(self.cash * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
state[np.isnan(state)] = 0.0
state[np.isinf(state)] = 0.0
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if(qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
class StockEnvEmpty(gym.Env):
#Empty Env used for loading rllib agent
def __init__(self,config):
state_dim = config['state_dim']
action_dim = config['action_dim']
self.env_num = 1
self.max_step = 10000
self.env_name = 'StockEnvEmpty'
self.state_dim = state_dim
self.action_dim = action_dim
self.if_discrete = False
self.target_return = 9999
self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)
def reset(self):
return
def step(self, actions):
return
# + [markdown] id="os4C4-4H7ns7"
# ## Run Paper trading
# + colab={"base_uri": "https://localhost:8080/"} id="7nw0i-0UN3-7" outputId="25729df7-4775-49af-bf5a-38e3970d0056"
print(DOW_30_TICKER)
# + colab={"base_uri": "https://localhost:8080/"} id="YsSBK9ION1t6" outputId="49a69655-850f-436b-a21c-fffe48528e71"
state_dim
# + colab={"base_uri": "https://localhost:8080/"} id="xYtSv6P1N247" outputId="174550ce-664a-41fc-bd89-9d3726960c5b"
action_dim
# + id="Kl9nulnAJtiI"
paper_trading_erl = AlpacaPaperTrading(ticker_list = DOW_30_TICKER,
time_interval = '1Min',
drl_lib = 'elegantrl',
agent = 'ppo',
cwd = './papertrading_erl_retrain',
net_dim = ERL_PARAMS['net_dimension'],
state_dim = state_dim,
action_dim= action_dim,
API_KEY = API_KEY,
API_SECRET = API_SECRET,
API_BASE_URL = API_BASE_URL,
tech_indicator_list = INDICATORS,
turbulence_thresh=30,
max_stock=1e2)
paper_trading_erl.run()
# + [markdown] id="srzBZfYEUI1O"
# # Part 4: Check Portfolio Performance
# + id="chovN1UhTAht"
import alpaca_trade_api as tradeapi
import exchange_calendars as tc
import numpy as np
import pandas as pd
import pytz
import yfinance as yf
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from datetime import datetime as dt
from finrl.plot import backtest_stats
import matplotlib.pyplot as plt
# + id="CaofxMNCfAR1"
def get_trading_days(start, end):
nyse = tc.get_calendar('NYSE')
df = nyse.sessions_in_range(pd.Timestamp(start,tz=pytz.UTC),
pd.Timestamp(end,tz=pytz.UTC))
trading_days = []
for day in df:
trading_days.append(str(day)[:10])
return trading_days
def alpaca_history(key, secret, url, start, end):
api = tradeapi.REST(key, secret, url, 'v2')
trading_days = get_trading_days(start, end)
df = pd.DataFrame()
for day in trading_days:
#df = df.append(api.get_portfolio_history(date_start = day,timeframe='5Min').df.iloc[:78])
df= pd.concat([df,api.get_portfolio_history(date_start = day,timeframe='5Min').df.iloc[:78]],ignore_index=True)
equities = df.equity.values
cumu_returns = equities/equities[0]
cumu_returns = cumu_returns[~np.isnan(cumu_returns)]
return df, cumu_returns
def DIA_history(start):
data_df = yf.download(['^DJI'],start=start, interval="5m")
data_df = data_df.iloc[:]
baseline_returns = data_df['Adj Close'].values/data_df['Adj Close'].values[0]
return data_df, baseline_returns
# + [markdown] id="5CHiZRVpURpx"
# ## Get cumulative return
# + id="O_YT7v-LSdfV"
df_erl, cumu_erl = alpaca_history(key=API_KEY,
secret=API_SECRET,
url=API_BASE_URL,
start='2022-09-01', #must be within 1 month
end='2022-09-12') #change the date if error occurs
# + colab={"base_uri": "https://localhost:8080/"} id="IMcQjwHOS6Zb" outputId="1fb21460-1da9-4998-f0c0-fcbf5b056e66"
df_djia, cumu_djia = DIA_history(start='2022-09-01')
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="PJXPwmx9Ts5o" outputId="c59014eb-c2f9-4be2-8a87-7892cc0b1094"
df_erl.tail()
# + colab={"base_uri": "https://localhost:8080/"} id="o1Iaw90FTNfU" outputId="0629dca2-d9dd-4c2a-e363-dc0f01daba41"
returns_erl = cumu_erl -1
returns_dia = cumu_djia - 1
returns_dia = returns_dia[:returns_erl.shape[0]]
print('len of erl return: ', returns_erl.shape[0])
print('len of dia return: ', returns_dia.shape[0])
# + id="2IawaMsDwZni"
returns_erl
# + [markdown] id="5Z0LEm7KUZ5W"
# ## plot and save
# + id="Foqk1wIQTQJ3"
import matplotlib.pyplot as plt
plt.figure(dpi=1000)
plt.grid()
plt.grid(which='minor', axis='y')
plt.title('Stock Trading (Paper trading)', fontsize=20)
plt.plot(returns_erl, label = 'ElegantRL Agent', color = 'red')
#plt.plot(returns_sb3, label = 'Stable-Baselines3 Agent', color = 'blue' )
#plt.plot(returns_rllib, label = 'RLlib Agent', color = 'green')
plt.plot(returns_dia, label = 'DJIA', color = 'grey')
plt.ylabel('Return', fontsize=16)
plt.xlabel('Year 2021', fontsize=16)
plt.xticks(size = 14)
plt.yticks(size = 14)
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(78))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(6))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.005))
ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=2))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(['','10-19','','10-20',
'','10-21','','10-22']))
plt.legend(fontsize=10.5)
plt.savefig('papertrading_stock.png')
# + id="O_LsHVj_TZGL"
| 53,478 |
/Assignment_scarpingdata_from _Poineer.ipynb | a66db9fe6375f07eba6ff28e7f401859bff0e5a1 | [] | no_license | leenachatterjee/FliprobointernshipLeena | https://github.com/leenachatterjee/FliprobointernshipLeena | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 179,015 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import requests
import selenium
from selenium import webdriver
from bs4 import BeautifulSoup
#importing the Web driver
dr=webdriver.Chrome("chromedriver.exe")
import time
# Importing required Exceptions which needs to handled
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException
import re
# -
url="https://www.dailypioneer.com/archive/"
dr.get(url)
April=dr.find_element_by_xpath('//div[@class="frst-timeline archiveTimeline frst-date-opposite frst-left-align"]/div[6]/div/div/ul/li[4]/a')
April
dr.get(April.get_attribute('href'))
url=[]
# +
for j in dr.find_elements_by_xpath('//div[@class="pagingList"]/ul/li/a'):
url.append(j.get_attribute('href'))
url
# -
url
A=[]
for i in url:
for i in dr.find_elements_by_xpath('//div[@class="innerNewsList"]/div/div/h2/a'):
A.append(i.get_attribute('href'))
A
# +
date_att=[]
date=dr.find_elements_by_xpath('//div[@class="innerNewsList"]/div/div/h2/a')
# -
date_att
HeadS_Line=[]
for i in A:
dr.get(i)
time.sleep(3)
try:
HeadS_Line_tag=dr.find_element_by_xpath('//h2[@itemprop="headline"]')
HeadS_Line.append(HeadS_Line_tag.text)
except NoSuchElementException:
HeadS_Line.append(" ")
HeadS_Line
Head_Line=[]
# +
for i in date_att:
dr.get(i)
time.sleep(3)
try:
Head_Line_tag=dr.find_element_by_xpath('//h2[@itemprop="headline"]')
Head_Line.append(Head_Line_tag.text)
except NoSuchElementException:
Head_Line.append(" ")
# -
Head_Line
A
# +
T2=[]
# -
for i in A:
dr.get(i)
time.sleep(3)
try:
A1_tag=dr.find_element_by_xpath('//span[@itemprop="datePublished"]')
T2.append(A1_tag.text)
except NoSuchElementException:
T2.append(" ")
T2
# +
Author=[]
for i in A:
dr.get(i)
time.sleep(3)
try:
A_tag=dr.find_element_by_xpath('//span[@itemprop="author"]')
Author.append(A_tag.text)
except NoSuchElementException:
Author.append(" ")
# -
Author
# +
P=[]
for i in A:
dr.get(i)
time.sleep(3)
try:
P_tag=dr.find_element_by_xpath('//div[@class="newsDetailedContent"]')
P.append(P_tag.text)
except NoSuchElementException:
P.append(" ")
# -
P
data=pd.DataFrame({})
data['HeadLine']=HeadS_Line[:500]
data['Time']=T2[:500]
data['Author']=Author[:500]
data['Paragraph']=P[:500]
data
df=data.copy()
df.head()
df.to_csv('A.csv', index=False)
df.to_excel('B.xlsx')
| 2,833 |
/rf_model.ipynb | 1f74aaaac5c042ca79fdefee891b4e6c8f1ccf4b | [
"MIT"
] | permissive | ENEmyr/autotrading | https://github.com/ENEmyr/autotrading | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 29,921 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import math, os, sys
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
dataset = np.load('dataset/all_data-preprocessed.npz')
features, labels = dataset['features'].astype('float32'), dataset['labels'].astype('float32')
train_test_split_factor = .8
validation_split_factor = .2
train_x, train_y, test_x, test_y = features[:math.floor(len(features)*train_test_split_factor)], labels[:math.floor(len(labels)*train_test_split_factor)], features[math.floor(len(features)*train_test_split_factor):], labels[math.floor(len(labels)*train_test_split_factor):]
train_x, test_x = np.expand_dims(train_x, axis=-1), np.expand_dims(test_x, axis=-1) # for use with TimeDistributed
input_shape = train_x.shape
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
train_x = train_x.reshape(train_x.shape[0], 7).astype('float32')
test_x = test_x.reshape(test_x.shape[0], 7).astype('float32')
print(train_x.shape, test_x.shape)
model = RandomForestRegressor(n_estimators=200 ,max_depth=10,random_state=0)
model.fit(train_x, train_y)
pred = model.predict(test_x[:64])
close_pred = np.reshape(pred, (-1, 1))
test_y_reshape = np.reshape(test_y[:64], (-1, 1))
days = np.arange(1, len(test_y_reshape)+1)
plt.plot(days, test_y_reshape, 'b', label='Actual line')
plt.plot(days, close_pred, 'r', label='Predicted line')
plt.title('RFRegressor')
plt.xlabel('Days')
plt.ylabel('Close Prices')
plt.legend()
plt.show()
from sklearn.metrics import mean_squared_error as MSE
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('RMSE: {:0.4f}' .format(math.sqrt(MSE(test_y[:64], pred))))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy
accuracy = evaluate(model, test_x, test_y)
# save model
import joblib
joblib.dump(model, 'weights/rf.sav')
| 2,352 |
/.ipynb_checkpoints/EDA_ratings_vod-checkpoint.ipynb | 2c8d40952984b817786deb11786ecc49461f15c9 | [] | no_license | stephanerappeneau/scienceofmovies | https://github.com/stephanerappeneau/scienceofmovies | 4 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 109,336 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. Data Updates
# ## 3.1. Rearrange Dataframe
oneday_desktop = pd.read_excel("oneday.xlsx", "desktop")
oneday_mobile = pd.read_excel("oneday.xlsx", "mobile")
oneday_tablet = pd.read_excel("oneday.xlsx", "tablet")
period_desktop = pd.read_excel("period.xlsx", "desktop")
period_mobile = pd.read_excel("period.xlsx", "mobile")
period_tablet = pd.read_excel("period.xlsx", "tablet")
def df_toint(df, date_type):
df = df.drop(["deviceCategory"], axis=0)
df = df.apply(pd.to_numeric)
cols = []
for col in df.columns:
col = col.replace("X", "")
col = col.split(".")
if date_type == "oneday":
col = col[0] + "/" + col[1] + "/" + col[2]
elif date_type == "period":
col = col[0] + "/" + col[1] + "/" + col[2] + "-" + col[3] + "/" + col[4] + "/" + col[5]
cols.append(col)
df.columns = cols
return df
oneday_dfs = [oneday_desktop, oneday_mobile, oneday_tablet]
oneday_desktop, oneday_mobile, oneday_tablet = [df_toint(oneday_df, "oneday") for oneday_df in oneday_dfs]
period_dfs = [period_desktop, period_mobile, period_tablet]
period_desktop, period_mobile, period_tablet = [df_toint(period_df, "period") for period_df in period_dfs]
# ## 3.2. Update Excel with Extracted Data
# Run excel in the background.
xw.App().visible = False
# Detemine which workbook to activate.
wb = xw.Book("performance.xlsx")
# Detemine which worksheet to activate.
sheet_ga = wb.sheets["ga"]
# Find the last column of dataframe which is to be written to Excel.
oneday_col = sheet_ga.api.UsedRange.Find(oneday_tablet.columns[-1]).address
oneday_col = oneday_col.split('$')[1]
period_col = sheet_ga.api.UsedRange.Find(period_tablet.columns[-1]).address
period_col = period_col.split('$')[1]
# Specify where to insert the extracted data.
def insert_value(sheet_ga, df, col, ga_dict):
for key in ga_dict:
sheet_ga.range(col + str(key)).value = df.iloc[:,-1][ga_dict[key]]
return sheet_ga
ga_dict_desktop = {17:"users", 18:"sessions", 19:"pageviews", 20:"totalEvents", 21:"searchSessions", 22:"campaignPageviews"}
ga_dict_mobile = {27:"users", 28:"sessions", 29:"pageviews", 30:"totalEvents", 31:"searchSessions", 32:"campaignPageviews"}
ga_dict_tablet = {37:"users", 38:"sessions", 39:"pageviews", 40:"totalEvents", 41:"searchSessions", 42:"campaignPageviews"}
sheet_ga = insert_value(sheet_ga, oneday_desktop, oneday_col, ga_dict_desktop)
sheet_ga = insert_value(sheet_ga, oneday_mobile, oneday_col, ga_dict_mobile)
sheet_ga = insert_value(sheet_ga, oneday_tablet, oneday_col, ga_dict_tablet)
sheet_ga = insert_value(sheet_ga, period_desktop, period_col, ga_dict_desktop)
sheet_ga = insert_value(sheet_ga, period_mobile, period_col, ga_dict_mobile)
sheet_ga = insert_value(sheet_ga, period_tablet, period_col, ga_dict_tablet)
# Save and close the workbook.
wb.save()
wb.close()
r = pd.DataFrame()
df_other = dft.loc[:,{'value','user_id'}]
df_other = df_other.rename(columns = {'value':'titre_ref_score'})
df_other = df_other.merge(dff,how='inner',on='user_id')
df_other = df_other.rename(columns = {'value':'titre_other_score'})
df_other['score_ref_moins_other'] = df_other.titre_ref_score-df_other.titre_other_score
#calcul de la note moyenne, variance & nb de notes sur le film {titre}
meta.loc[meta[meta["titre"]==titre].index,"note_moyenne"] = float(round(dft.value.mean(),2))
meta.loc[meta[meta["titre"]==titre].index,"nb_noteurs"] = dft.value.count()
meta.loc[meta[meta["titre"]==titre].index,"ecart_type"] = float(round(dft.value.var(),2))
meta.loc[meta[meta["titre"]==titre].index,"annรฉe"] = dft.year.unique()[0]
meta.loc[meta[meta["titre"]==titre].index,"nb_films_moyens_vus_par_noteur"] = int(len(df_other)/dft.value.count())
meta.loc[meta[meta["titre"]==titre].index,"ecart_moyen_other_movies"] = float(round(df_other.score_ref_moins_other.mean(),2))
meta.sort_values(by="nb_noteurs", ascending=False)
# +
#Zoomons sur eros + massacre (un film trรจs apprรฉciรฉ) et le sang sรฉchรฉ (un film peu apprรฉciรฉ)
dft = df[df["title"]=="Eros + Massacre"] #"Le Sang sรฉchรฉ"]
if len(dft)>0:
#Rรฉcupรฉration des autres films vus par les utilisateurs ayant notรฉ le film {titre} et diffรฉrentiel de notes
df_other = pd.DataFrame()
df_other = dft.loc[:,{'value','user_id'}]
df_other = df_other.rename(columns = {'value':'titre_ref_score'})
df_other = df_other.merge(dff,how='inner',on='user_id')
df_other = df_other.rename(columns = {'value':'titre_other_score'})
df_other['score_ref_moins_other'] = df_other.titre_ref_score-df_other.titre_other_score
# Distrib du nb de note par utilisateur
rg = df_other.groupby('user_id').count()['year']
plt.hist(rg,bins=range(500,9000,500))
plt.xlabel("# of rating")
plt.ylabel("# of users")
plt.title("Distribution of vodkaster number of ratings by user")
#On affiche
plt.show()
#COnclusion : pour le titre de "eros + massacre", le film a รฉtรฉ vu par des gros power users (3k films par user en moyenne oO)
# -
#quels sont les autres films qui ont รฉtรฉ les plus vus par ceux qui ont vu 'eros + massacre' ?
df_other.groupby('title').count()['user_id'].sort_values(ascending=False)[:10]
#en toute logique on a les films les plus populaires qui apparaissent....il faudrait virer les films 'trop vus'
#ceux qui apparaissent plus de fois que le nb de users correspondent ร des films qui ont le mรชme titre
#pour eliminer le pb il suffit de grouper par identifiant unique
df_other[:10]
# +
#Comment les users ont t'ils apprรฉciรฉ ce film par rapport au reste de leur collection ?
plt.hist(df_other.score_ref_moins_other,bins=np.arange(-5,5,0.5))
plt.xlabel("ecart de note avec "+titre)
plt.ylabel("# of movies")
plt.title("Distribution des ecarts de note avec {}, moyenne รฉcart {} (diff>0 signifie film en moyenne plus apprรฉciรฉ)".format(titre, df_other.score_ref_moins_other.mean()))
plt.plot([0,0], [0,10000], color='red', linestyle='-', linewidth=1)
#On affiche
plt.show()
#Pour Eros+M, le film est en moyenne aprรฉciรฉ 1 cran au dessus des autres films vus par les users l'ayant notรฉ
# -
#Quels sont films les plus proches et les plus lointains en terme de note
dft = df_other.groupby('title')['score_ref_moins_other'].agg(['mean','count']).reset_index().sort_values(ascending=False, by ='mean')
#n'affichons que ceux qui ont รฉtรฉ vus au moins par 5 users
dft[dft['count']>5]
#intepretation : c'est interessant parce que รงa commence ร "polariser" les gouts, trรจs clairement
#les films les plus lointains nรฉgativement sont des boues et les plus lointains positivements
#sont des films "artsy" et vus uniquement par un public de connaisseurs
#et si on enlรจve le filtre d'un film vu par aux moins 5 users, on voit plus spรฉcifiquement pointer des films vus par des
#connaisseurs en cinรฉma japonais
# Idรฉes
# - faire un scatterplot pour chaque film entre nb de voteur et note moyenne en รฉcart
# - crรฉer N subplots pour toute la liste des films de yoshida pour avoir une meilleure idรฉe du positionnement dans un cursus de cinรฉphile
# - tracer un "criticogramme" e.g quand temporellement un mec a notรฉ les yoshida dans son "cursus" de cinรฉphile => besoin de la date de la notation.
# - question : comment sรฉlectionner les films "aux antipodes" ? comment gรฉrer la popularitรฉ ?
# - idรฉe ร venir : moteur de recommandation (filtrage collaboratif)
#
# Sinon
# - difficultรฉ de jointure imdb
# - feedback D3.js
# - feedback neo4j
#
# A faire
# - apparition des ratings de yoshida/oshima dans le temps, par film
# - apparition des ratings de yoshida/oshima dans le temps, par utilisateur pour ceux qui vu plus que N films
# vs niveau d'activitรฉ de l'utilisateur
# - regression / prรฉdiction
# - montrer les films qui ont des pics de notation (trolls)
# - รฉvolution des notes au fil du cursus cinรฉphile
# - montrage les utilisateurs les plus actifs
# - รฉvolution frรฉquence personnages gays au fil des annรฉes
| 8,221 |
/Arrays.ipynb | 15d55110b75484f4f07759b64f71fe317ae94ecc | [] | no_license | Chandrakanth10/Pythontest- | https://github.com/Chandrakanth10/Pythontest- | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 20,180 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
iris = sns.load_dataset("iris")
iris.head()
group = iris.groupby("species")
group.head()
group.first()
iris.describe()
iris = sns.load_dataset("iris")
sns.stripplot(x="sepal_length", y="petal_length", data=iris);
sns.boxplot(x="sepal_length", y="petal_length", hue="species", data=iris);
sns.regplot(x = 'sepal_length', y = 'petal_length', data = iris)
sns.factorplot(x="sepal_length", y="petal_length", hue="petal_width",
col="species", data=iris, kind="swarm");
sns.jointplot(x = 'petal_length', y = 'petal_width', data = iris)
sns.pairplot(hue = 'species', data = iris)
gapminder = pd.read_csv("https://raw.githubusercontent.com/swcarpentry/python-novice-gapminder/gh-pages/data/asia_gdp_per_capita.csv")
gapminder.head()
gapminder.columns
iris.columns
gapminder.to_csv('gapminder.csv')
gap2 = pd.read_csv('gapminder.csv') save under new file name gap2
gap2.head()
y module
# import array
# import array as arr {the mostly used array importing method}
# from array import * {imports all the components present in the array module}
# +
#Method -1
# import array
# a=array.array('i',[1,2,3,4,5,6])
#The first array is the name of the module
#The second array is array constructor
#The 'i' reffers to the type code; type code - type of the data type that can be inserted into the array
# +
#Method -2
# import array as arr
# Here, the arr is the alias name
# a=arr.array('i',[1,2,3,4,5,6])
# a
# +
#Method -3
#from array import *
# a=arr.array('i',[1,2,3,4,5,6])
# a
# +
# Insertion and deletion of arrays will be a bit difficult
# Insertion of a element at the end of the array will be easy
# Insertion will be difficult if we need to add a element at the middle of the array
# The insertion of element at the middle of the array will result in moving the elements one step backword
# The insertion operation at the middle is not an efficient task as we need to move all the elements
# In the worse case, the operation can take Linear Time or O(N) to perform insertion operation
# Deletion also causes a similar peoblem as that of the Insertion operation
# When we delete an element, we need to move the remaining elements one step forward
# Each individual byte of memory can be stored or retrived on O(1) time
# Time Complexity
# Add a new item :- O(1)
# Insert item into a given index:- O(N)
# Removing last item:- O(1)
# Removing the middle item:- O(N)
# -
# # Accessing array Elements
# +
#To access array elements, we need to access with the help of index of the element
#Indexing always starts from 0
#Negative indexing can also be used,Negative indexing - the indexing starts from the reverse order of traversal.
#By using negative indexing, we can access the array elements from the end
# -
import array as arr
a=arr.array('i',[1,2,3,4,5,6])
a
a[2]
a[-2]
# # Basic array Operations
# +
#Arrays are mutable; mutable - which means they can be changable
#Some of the Basic operations in the Arrays are
#Finding the length of an array
#Adding/ Changing element of an array
#Removing/ Deleting elements of an array
#Array Concatenation
#Slicing
#Looping through an array
# -
# # Finding length of an array
# +
#TO find the length of an array, we can use the len() function to achive this.
#The len() function returns an integer value that is equal to number of elements present in that array
#The len() function takes only one input parameter, i.e the name of the array
len(a)
# -
# # Adding elements to the array
# +
#There are three methods to add elements to an arrary:
#1.append():- used when we want to add a single element to the array
#2.extend():- used to add when we want to add more than one element to the array at the end
#3.insert():- Used when we want to add an element at a specific position of the array
#When we use the extend function, we need to specify the values in the square brackets
# -
a
a.append(8)
a
a.extend([9,34,53])
a
# +
a.insert(1,6)
a
#in the index function, the first value will be the array index where the value should be added
#The second value will be the data that should be added to the array
# -
# # Removing elements of an Array
# +
# TO temove the elements of the array, we can use two methods:
#1. pop() :- used when we want to remove an element and to return it
#2. remove() :- Used when we want to remove an element with a specific value without returnng it
#The pop() function can take no parameters or only one parameter.
#The parameter pop() function can take is the index of the element
#If we dont specify any parameter, the pop() function returns the last element in the array
#The remove() function takes only one parameter that is the element that is to be removed
# -
a
a.pop()
a
a.pop(-2)
a
a.remove(6) #Here in this case, the 1st occurance of '6' will be removed
a
# # Array Concatination
# +
#concatination means joining
#Array concatination can be done with "+" symbol
#One rule is that both the arrays should have the same type code
# When we try to concatenate of different data types, we get TypeError
# -
b=arr.array('i',[1,2,3,4,5,6,7])
c=arr.array('i',[23,25,26,27])
d=arr.array('i')
d=b+c
d
# # Slicing an Array
# +
#Slicing actually means, fetching particulat values into the array in a certain range
#An array can be sliced using the : symbol
#This returns the range of elements that we have specified by the index numbers
#Slicing an array only returns the values, but dosent remove the values from the array
# -
d
d[0:5]
#Here the index starts from index 0 and then goes till 5. Dosent include the value at index position 5
d
d[0:-2]
d[0:-3]
d[::-1]
d
# +
#[::-1] - reprints the reversed copy of the array, dosent reverse the array
#This method is not preffered, because it exhausts the memory
# -
# # Looping through an array
# +
#There are two type of loops:
# 1. for :- for loop iterates over the items of an array specified number of times
# 2. while :- While loop iterates over the elements until a certain condition is met
# 3 conditions to use while loop
# 1:- initialise the iterator
# 2:- Specify a condition
# 3:- increment the iterator
#If we dont increment the iterator, the while loop will go on for - ever
# -
d
for x in d:
print(x)
for x in d[0:5]:
print(x)
d
# +
#Here the iterator is named as temp; we can use any name we want
temp = 0
while temp<d[2]:
print(d[temp])
temp = temp+1
# -
a
temp=0
while temp<len(a):
print(a[temp])
temp +=1
rch.std(dataFunc(b), unbiased=False)))
unbiased.append(sampleKTimes(iters, lambda: torch.std(dataFunc(b), unbiased=True)))
biasedFixed.append(sampleKTimes(iters, lambda: torch.std(dataFunc(b)*getCorrectionBiased(b), unbiased=False)))
unbiasedFixed.append(sampleKTimes(iters, lambda: torch.std(dataFunc(b)*getCorrectionUnbiased(b), unbiased=True)))
if not returnResults:
fig = plt.figure(figsize=(10,10))
ax1 = fig.add_subplot(211)
ax1.plot(batchSizes,biased, label="Biased " + str(iters) + " iters")
ax1.plot(batchSizes,unbiased, label="Unbiased " + str(iters) + " iters")
ax1.plot(batchSizes,biasedFixed, label="Biased fixed " + str(iters) + " iters")
ax1.plot(batchSizes,unbiasedFixed, label="Unbiased fixed " + str(iters) + " iters")
ax1.set_ylabel('Estimated Standard Deviation')
ax1.set_xlabel('Batch Size')
ax1.legend()
else: return batchSizes, biased, unbiased, biasedFixed, unbiasedFixed
runComparisonVarianceExperiment(iters=10000, maxBatchSize=40, dataFunc=dataFunc)
runComparisonVarianceExperiment(iters=10000, maxBatchSize=40, dataFunc=lambda batchSize: torch.normal(10, 0.1, [batchSize]))
# So that's all great, but how does this relate to actual neural networks? Lets see if these still hold up. To do that, what we will do is make a single layer, then make a "fixup layer" that tries to make the outputs of that layer zero mean and one standard deviation. If our estimations are too far off, we will end up not getting the correct output statistics.
class FeedforwardLayer(torch.nn.Module):
def __init__(self, inSize, outSize):
super().__init__()
self.inSize = inSize
self.outSize = outSize
self.weights = nn.Parameter(torch.normal(0, 1, [inSize, outSize]))
self.bias = nn.Parameter(torch.normal(0, 1, [outSize]))
def forward(self, x):
res = [email protected]+self.bias
return res
def generateInputData(self, batchSize):
return torch.normal(0, 1, [batchSize, self.inSize])
class FixupLayer(torch.nn.Module):
def __init__(self, layer, fixupIters, fixupBatchSize, applyCorrection=True, eps=0.01):
super().__init__()
assert fixupBatchSize>1, "Fixup batch size needs to be greater than one to compute std"
self.fixupIters, self.fixupBatchSize = fixupIters, fixupBatchSize
self.layer = layer
x = layer.generateInputData(fixupBatchSize)
layerOutput = layer(x)
layerOutputShape = list(layerOutput.shape)[1:]
self.avgStd = torch.ones(layerOutputShape)
self.avgMean = torch.zeros(layerOutputShape)
for i in range(fixupIters):
x = layer.generateInputData(fixupBatchSize)
y = layer(x)
std = y.std(axis=0)
if applyCorrection: std = std*getCorrectionUnbiased(fixupBatchSize)
self.avgStd += std
self.avgMean += y.mean(axis=0)
self.avgStd /= float(fixupIters)
self.avgMean /= float(fixupIters)
# Make sure it's not too small so we don't get nans
self.avgStd = torch.clamp(self.avgStd, min=eps)
def forward(self, x):
return (self.layer(x)-self.avgMean)/self.avgStd
def runFixupExperiment(layer, fixupIters, maxBatchSize, sampleIters, layerInputs, layerOutputs, ax=None, **kwargs):
batchSizes = []
uncorrected = []
corrected = []
for b in range(2, maxBatchSize):
fixupUncorrected = FixupLayer(layer, fixupIters=fixupIters, fixupBatchSize=b, applyCorrection=False)
fixupCorrected = FixupLayer(layer, fixupIters=fixupIters, fixupBatchSize=b, applyCorrection=True)
# Use a large enough batch size that we can get the correct estimate for comparing
uncorrected.append(sampleKTimes(sampleIters, lambda: torch.std(fixupUncorrected(torch.normal(0, 1, [2000, layerInputs])))))
corrected.append(sampleKTimes(sampleIters, lambda: torch.std(fixupCorrected(torch.normal(0, 1, [2000, layerInputs])))))
batchSizes.append(b)
if ax is None:
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(211)
ax.plot(batchSizes,uncorrected, '--', label="Unorrected " + str(fixupIters) + " iters (" + str(layerInputs) + "," + str(layerOutputs) + ")" + str(kwargs))
ax.plot(batchSizes,corrected, label="Corrected " + str(fixupIters) + " iters (" + str(layerInputs) + "," + str(layerOutputs) + ")" + str(kwargs))
ax.set_ylabel('Standard Deviation')
ax.set_xlabel('Fixup Batch Size')
ax.legend()
return ax
# +
def runExperiement(layerInputs, layerOutputs, ax=None):
layer = FeedforwardLayer(layerInputs, layerOutputs)
return runFixupExperiment(layer=layer, fixupIters=1000, maxBatchSize=20, sampleIters=10, layerInputs=layerInputs, layerOutputs=layerOutputs, ax=ax)
ax = runExperiement(100, 10)
ax = runExperiement(10, 10, ax=ax)
ax = runExperiement(10, 100, ax=ax)
# -
# Great, so we see that the adjustment seems to help, and also that the ratio between inputs and outputs doesn't seem to matter much. What if we add an activation function?
# +
class SoftRELULayer(torch.nn.Module):
def __init__(self, weightLess, offset):
super().__init__()
self.weightLess = weightLess
self.offset = offset
def forward(self, x):
biggerThan = torch.max(torch.tensor([0.0]), x)
lessThan = torch.min(torch.tensor([0.0]), x)
return biggerThan + lessThan*self.weightLess - self.offset
class DenseLayer(torch.nn.Module):
def __init__(self, inSize, outSize, act):
super().__init__()
self.inSize, self.outSize, self.act = inSize, outSize, act
self.feedforward = FeedforwardLayer(inSize, outSize)
def forward(self, x):
return self.act(self.feedforward(x))
def generateInputData(self, bs):
return torch.normal(0, 1, [bs, self.inSize])
# +
def runExperiement(layerInputs, layerOutputs, ax=None, **kwargs):
layer = DenseLayer(layerInputs, layerOutputs, act=SoftRELULayer(**kwargs))
print(layerInputs, layerOutputs, kwargs)
return runFixupExperiment(layer=layer, fixupIters=1000, maxBatchSize=20, sampleIters=20, layerInputs=layerInputs, layerOutputs=layerOutputs, ax=ax, **kwargs)
ax = runExperiement(10, 10, weightLess=0.0, offset=0.0)
ax = runExperiement(10, 10, weightLess=1.0, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=2.0, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=0.5, offset=0.0, ax=ax)
# +
ax = runExperiement(10, 10, weightLess=0.0, offset=0.5)
ax = runExperiement(10, 10, weightLess=1.0, offset=0.5, ax=ax)
ax = runExperiement(10, 10, weightLess=2.0, offset=0.5, ax=ax)
ax = runExperiement(10, 10, weightLess=0.5, offset=0.5, ax=ax)
# -
# That's interesting. Fixup seems to work as long we don't use "pure" RELU. Let's try something very close to pure RELU to verify this:
ax = runExperiement(10, 10, weightLess=0.001, offset=0.0)
ax = runExperiement(10, 10, weightLess=0.01, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=0.1, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=0.3, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=1.5, offset=0.0)
ax = runExperiement(10, 10, weightLess=3.0, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=10.0, offset=0.0, ax=ax)
ax = runExperiement(10, 10, weightLess=100.0, offset=0.0, ax=ax)
layerInputs = 100
layerOutputs = 10
layer = FeedforwardLayer(layerInputs, layerOutputs)
runFixupExperiment(layer=layer, fixupIters=1000, maxBatchSize=20, sampleIters=1000, layerInputs=layerInputs, layerOutputs=layerOutputs)
def adjustedStd(x, batchSize, unbiased=True, **kwargs):
if unbiased: return torch.std(x, unbiased=unbiased, **kwargs)*getCorrectionUnbiased(batchSize)
else: return torch.std(x, unbiased=unbiased, **kwargs)*getCorrectionBiased(batchSize)
test_pr = pr.fit_transform(x_test[['horsepower']])
lr.fit(x_train_pr, y_train)
Rsqu_test.append(lr.score(x_test_pr, y_test))
plt.plot(order, Rsqu_test)
plt.xlabel('order')
plt.ylabel('R^2')
plt.title('R^2 Using Test Data')
plt.text(3, 0.75, 'Maximum R^2 ')
# -
# We see the R^2 gradually increases until an order three polynomial is used. Then the R^2 dramatically decreases at four.
# The following function will be used in the next section; please run the cell.
def f(order, test_data):
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=test_data, random_state=0)
pr = PolynomialFeatures(degree=order)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
poly = LinearRegression()
poly.fit(x_train_pr,y_train)
PollyPlot(x_train[['horsepower']], x_test[['horsepower']], y_train,y_test, poly, pr)
# The following interface allows you to experiment with different polynomial orders and different amounts of data.
# + jupyter={"outputs_hidden": false}
interact(f, order=(0, 6, 1), test_data=(0.05, 0.95, 0.05))
# -
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4a):</h1>
#
# <b>We can perform polynomial transformations with more than one feature. Create a "PolynomialFeatures" object "pr1" of degree two?</b>
# </div>
# +
pr1=PolynomialFeatures(degree=2)
x_train_pr1=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
x_test_pr1=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
print("number of test samples :", x_test_pr1[0])
print("number of training samples:",x_train_pr1.shape[0])
poly1=linear_model.LinearRegression().fit(x_train_pr1,y_train)
yhat_test1=poly1.predict(x_test_pr1)
Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)
# -
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4b): </h1>
#
# <b>
# Transform the training and testing samples for the features 'horsepower', 'curb-weight', 'engine-size' and 'highway-mpg'. Hint: use the method "fit_transform"
# ?</b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# x_train_pr1=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# x_test_pr1=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# -->
# <!-- The answer is below:
#
# x_train_pr1=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
# x_test_pr1=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4c): </h1>
# <b>
# How many dimensions does the new feature have? Hint: use the attribute "shape"
# </b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# There are now 15 features: x_train_pr1.shape
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4d): </h1>
#
# <b>
# Create a linear regression model "poly1" and train the object using the method "fit" using the polynomial features?</b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# poly1=linear_model.LinearRegression().fit(x_train_pr1,y_train)
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4e): </h1>
# <b>Use the method "predict" to predict an output on the polynomial features, then use the function "DistributionPlot" to display the distribution of the predicted output vs the test data?</b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# yhat_test1=poly1.predict(x_test_pr1)
# Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
# DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4f): </h1>
#
# <b>Use the distribution plot to determine the two regions were the predicted prices are less accurate than the actual prices.</b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# The predicted value is lower than actual value for cars where the price $ 10,000 range, conversely the predicted price is larger than the price cost in the $30, 000 to $40,000 range. As such the model is not as accurate in these ranges .
#
# -->
#
# <img src = "https://ibm.box.com/shared/static/c35ipv9zeanu7ynsnppb8gjo2re5ugeg.png" width = 700, align = "center">
#
# <h2 id="ref3">Part 3: Ridge regression</h2>
# In this section, we will review Ridge Regression we will see how the parameter Alfa changes the model. Just a note here our test data will be used as validation data.
# Let's perform a degree two polynomial transformation on our data.
pr=PolynomialFeatures(degree=2)
x_train_pr=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
x_test_pr=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
# Let's import <b>Ridge</b> from the module <b>linear models</b>.
from sklearn.linear_model import Ridge
# Let's create a Ridge regression object, setting the regularization parameter to 0.1
RigeModel=Ridge(alpha=0.1)
# Like regular regression, you can fit the model using the method <b>fit</b>.
# + jupyter={"outputs_hidden": false}
RigeModel.fit(x_train_pr, y_train)
# -
# Similarly, you can obtain a prediction:
# + jupyter={"outputs_hidden": false}
yhat = RigeModel.predict(x_test_pr)
# -
# Let's compare the first five predicted samples to our test set
# + jupyter={"outputs_hidden": false}
print('predicted:', yhat[0:4])
print('test set :', y_test[0:4].values)
# -
# We select the value of Alfa that minimizes the test error, for example, we can use a for loop.
# + jupyter={"outputs_hidden": false}
Rsqu_test = []
Rsqu_train = []
dummy1 = []
ALFA = 10 * np.array(range(0,1000))
for alfa in ALFA:
RigeModel = Ridge(alpha=alfa)
RigeModel.fit(x_train_pr, y_train)
Rsqu_test.append(RigeModel.score(x_test_pr, y_test))
Rsqu_train.append(RigeModel.score(x_train_pr, y_train))
# -
# We can plot out the value of R^2 for different Alphas
# + jupyter={"outputs_hidden": false}
width = 12
height = 10
plt.figure(figsize=(width, height))
plt.plot(ALFA,Rsqu_test, label='validation data ')
plt.plot(ALFA,Rsqu_train, 'r', label='training Data ')
plt.xlabel('alpha')
plt.ylabel('R^2')
plt.legend()
# -
# Figure 6:The blue line represents the R^2 of the test data, and the red line represents the R^2 of the training data. The x-axis represents the different values of Alfa
# The red line in figure 6 represents the R^2 of the test data, as Alpha increases the R^2 decreases; therefore as Alfa increases the model performs worse on the test data. The blue line represents the R^2 on the validation data, as the value for Alfa increases the R^2 decreases.
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #5): </h1>
#
# Perform Ridge regression and calculate the R^2 using the polynomial features, use the training data to train the model and test data to test the model. The parameter alpha should be set to 10.
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
RigeModel = Ridge(alpha=0)
RigeModel.fit(x_train_pr, y_train)
RigeModel.score(x_test_pr, y_test)
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# RigeModel = Ridge(alpha=0)
# RigeModel.fit(x_train_pr, y_train)
# RigeModel.score(x_test_pr, y_test)
#
# -->
# <h2 id="ref4">Part 4: Grid Search</h2>
# The term Alfa is a hyperparameter, sklearn has the class <b>GridSearchCV</b> to make the process of finding the best hyperparameter simpler.
# Let's import <b>GridSearchCV</b> from the module <b>model_selection</b>.
# + jupyter={"outputs_hidden": false}
from sklearn.model_selection import GridSearchCV
# -
# We create a dictionary of parameter values:
# + jupyter={"outputs_hidden": false}
parameters1= [{'alpha': [0.001,0.1,1, 10, 100, 1000, 10000, 100000, 100000]}]
parameters1
# -
# Create a ridge regions object:
# + jupyter={"outputs_hidden": false}
RR=Ridge()
RR
# -
# Create a ridge grid search object
# + jupyter={"outputs_hidden": false}
Grid1 = GridSearchCV(RR, parameters1,cv=4)
# -
# Fit the model
# + jupyter={"outputs_hidden": false}
Grid1.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_data)
# -
# The object finds the best parameter values on the validation data. We can obtain the estimator with the best parameters and assign it to the variable BestRR as follows:
# + jupyter={"outputs_hidden": false}
BestRR=Grid1.best_estimator_
BestRR
# -
# We now test our model on the test data
# + jupyter={"outputs_hidden": false}
BestRR.score(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_test)
# -
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #6): </h1>
# Perform a grid search for the alpha parameter and the normalization parameter, then find the best values of the parameters
# </div>
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
parameters2= [{'alpha': [0.001,0.1,1, 10, 100, 1000,10000,100000,100000],'normalize':[True,False]} ]
Grid2 = GridSearchCV(Ridge(), parameters2,cv=4)
Grid2.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']],y_data)
Grid2.best_estimator_
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# parameters2= [{'alpha': [0.001,0.1,1, 10, 100, 1000,10000,100000,100000],'normalize':[True,False]} ]
# Grid2 = GridSearchCV(Ridge(), parameters2,cv=4)
# Grid2.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']],y_data)
# Grid2.best_estimator_
#
# -->
# <h1>Thank you for completing this notebook!</h1>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# <p><a href="https://cocl.us/corsera_da0101en_notebook_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p>
# </div>
#
# <h3>About the Authors:</h3>
#
# This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank">Mahdi Noorian PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a>, Bahare Talayian, Eric Xiao, Steven Dong, Parizad, Hima Vsudevan and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank">Fiorella Wenver</a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>.
#
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| 26,645 |
/.ipynb_checkpoints/Untitled1-checkpoint.ipynb | c1e5289da4a5ec638fa77716b6bff30f441d181f | [] | no_license | XianyiCheng/videogame_music_generation | https://github.com/XianyiCheng/videogame_music_generation | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 80,980 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import CRBM
import input_manipulation
tf.reset_default_graph()
"""
This file stores the code for initializing the weights of the RNN-RBM. We initialize the parameters of the RBMs by
training them directly on the data with CD-k. We initialize the parameters of the RNN with small weights.
"""
num_epochs = 20 #The number of epochs to train the CRBM
lr = 0.0001 #The learning rate for the CRBM
num_conv_filters = 8
conv_strides = 2
span = 123
num_timesteps = 32
size_conv_filters = 4
hidden_width = np.floor((num_timesteps-size_conv_filters)/conv_strides) + 1
n_hidden_recurrent = 100
#Load the Songs
songs = input_manipulation.get_songs('Game_Music_Midi')
x = tf.placeholder(tf.float32, [None, num_timesteps, span], name="x") #The placeholder variable that holds our data
#Testing
batch_size = tf.placeholder(tf.int64, [1], name="batch_size")
#parameters of CRBM
W = tf.Variable(tf.truncated_normal([size_conv_filters, span, 1, num_conv_filters], 0.001), name="W") #The weight matrix of the RBM
bh = tf.Variable(tf.zeros([hidden_width,num_conv_filters], tf.float32), name="bh") #The RNN -> RBM hidden bias vector
bv = tf.Variable(tf.zeros([num_timesteps, span], tf.float32), name="bv")#The RNN -> RBM visible bias vector
#parameters related to RNN
Wuh = tf.Variable(tf.random_normal([n_hidden_recurrent, int(hidden_width*num_conv_filters)], 0.0001), name="Wuh") #The RNN -> RBM hidden weight matrix
Wuv = tf.Variable(tf.random_normal([n_hidden_recurrent, int(num_timesteps*span)], 0.0001), name="Wuv") #The RNN -> RBM visible weight matrix
Wvu = tf.Variable(tf.random_normal([int(num_timesteps*span), n_hidden_recurrent], 0.0001), name="Wvu") #The data -> RNN weight matrix
Wuu = tf.Variable(tf.random_normal([n_hidden_recurrent, n_hidden_recurrent], 0.0001), name="Wuu") #The RNN hidden unit weight matrix
bu = tf.Variable(tf.zeros([1, n_hidden_recurrent], tf.float32), name="bu") #The RNN hidden unit bias vector
u0 = tf.Variable(tf.zeros([1, n_hidden_recurrent], tf.float32), name="u0") #The initial state of the RNN
#The RBM bias vectors. These matrices will get populated during rnn-rbm training and generation
BH_t = tf.Variable(tf.ones([hidden_width,num_conv_filters], tf.float32), name="BH_t")
BV_t = tf.Variable(tf.ones([num_timesteps, span], tf.float32), name="BV_t")
#Build the RBM optimization
saver = tf.train.Saver()
#Note that we initialize the RNN->RBM bias vectors with the bias vectors of the trained RBM. These vectors will form the templates for the bv_t and
#bh_t of each RBM that we create when we run the RNN-RBM
# -
updt = CRBM.get_cd_update_batch(x, W, bv, bh, 1, lr)
# +
sess = tf.Session()
#Initialize the variables of the model
init = tf.global_variables_initializer()
sess.run(init)
#Run over each song num_epoch times
for epoch in tqdm(range(20)):
for song in songs:
sess.run(updt, feed_dict={x: song})
"""
for i in range(song.shape[0]):
[W_c,bv_c, bh_c]=sess.run(updt, feed_dict={x: song[i,:,:]})
#W = tf.add(W,W_c)
#bv = tf.add(bv,bv_c)
#bh = tf.add(bh,bh_c)
"""
# -
s = songs[0][0,:,:]
x_sample = CRBM.gibbs_sample(s, W, bv, bh, 1)
#The sample of the hidden nodes, starting from the visible state of x
h = CRBM.crbm_inference(s,W,bh)
#The sample of the hidden nodes, starting from the visible state of x_sample
h_sample = CRBM.crbm_inference(x_sample, W, bh)
#fc = CRBM.free_energy(x_sample, h_sample, W, bv, bh)
fc = tf.exp(h_sample)
#fc = h_sample
g = tf.gradients(fc,h_sample,stop_gradients = h_sample)
s = song[0,:,:]
x_sample = CRBM.gibbs_sample(s, W, bv, bh, 1)
#The sample of the hidden nodes, starting from the visible state of x
h = CRBM.crbm_inference(s,W,bh)
#The sample of the hidden nodes, starting from the visible state of x_sample
h_sample = CRBM.crbm_inference(x_sample, W, bh)
#fc = CRBM.free_energy(x_sample, h_sample, W, bv, bh)
fc = tf.exp(h_sample)
#fc = h_sample
g = tf.gradients(fc,h_sample,stop_gradients = h_sample)
h_sample.eval(session=sess)
h_sample
hhh = sess.run(h_sample)
print(hhh)
print('111')
print(sess.run(tf.exp(hhh)))
ck</td>
# <td>32.2</td>
# <td>42</td>
# <td>17-Feb-76</td>
# <td></td>
# <td>9.5</td>
# </tr>
# <tr>
# <td>Bee Gees</td>
# <td>Saturday Night Fever</td>
# <td>1977</td>
# <td>1:15:54</td>
# <td>Disco</td>
# <td>20.6</td>
# <td>40</td>
# <td>15-Nov-77</td>
# <td>Y</td>
# <td>9.0</td>
# </tr>
# <tr>
# <td>Fleetwood Mac</td>
# <td>Rumours</td>
# <td>1977</td>
# <td>00:40:01</td>
# <td>Soft rock</td>
# <td>27.9</td>
# <td>40</td>
# <td>04-Feb-77</td>
# <td></td>
# <td>9.5</td>
# </tr>
# </table></font>
# <hr>
# <h2 id="tuple">Tuples</h2>
# In Python, there are different data types: string, integer and float. These data types can all be contained in a tuple as follows:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesType.png" width="750" align="center" />
# Now, let us create your first tuple with string, integer and float.
# +
# Create your first tuple
tuple1 = ("disco",10,1.2 )
tuple1
# -
# The type of variable is a **tuple**.
# +
# Print the type of the tuple you created
type(tuple1)
# -
# <h3 id="index">Indexing</h3>
# Each element of a tuple can be accessed via an index. The following table represents the relationship between the index and the items in the tuple. Each element can be obtained by the name of the tuple followed by a square bracket with the index number:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesIndex.gif" width="750" align="center">
# We can print out each value in the tuple:
# +
# Print the variable on each index
print(tuple1[0])
print(tuple1[1])
print(tuple1[2])
# -
# We can print out the **type** of each value in the tuple:
#
# +
# Print the type of value on each index
print(type(tuple1[0]))
print(type(tuple1[1]))
print(type(tuple1[2]))
# -
# We can also use negative indexing. We use the same table above with corresponding negative values:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNeg.png" width="750" align="center">
# We can obtain the last element as follows (this time we will not use the print statement to display the values):
# +
# Use negative index to get the value of the last element
tuple1[-1]
# -
# We can display the next two elements as follows:
# +
# Use negative index to get the value of the second last element
tuple1[-2]
# +
# Use negative index to get the value of the third last element
tuple1[-3]
# -
# <h3 id="concate">Concatenate Tuples</h3>
# We can concatenate or combine tuples by using the **+** sign:
# +
# Concatenate two tuples
tuple2 = tuple1 + ("hard rock", 10)
tuple2
# -
# We can slice tuples obtaining multiple values as demonstrated by the figure below:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesSlice.gif" width="750" align="center">
# <h3 id="slice">Slicing</h3>
# We can slice tuples, obtaining new tuples with the corresponding elements:
# +
# Slice from index 0 to index 2
tuple2[0:3]
# -
# We can obtain the last two elements of the tuple:
# +
# Slice from index 3 to index 4
tuple2[3:5]
# -
# We can obtain the length of a tuple using the length command:
# +
# Get the length of tuple
len(tuple2)
# -
# This figure shows the number of elements:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesElement.png" width="750" align="center">
# <h3 id="sort">Sorting</h3>
# Consider the following tuple:
# +
# A sample tuple
Ratings = (0, 9, 6, 5, 10, 8, 9, 6, 2)
# -
# We can sort the values in a tuple and save it to a new tuple:
# +
# Sort the tuple
RatingsSorted = sorted(Ratings)
RatingsSorted
# -
# <h3 id="nest">Nested Tuple</h3>
# A tuple can contain another tuple as well as other more complex data types. This process is called 'nesting'. Consider the following tuple with several elements:
# +
# Create a nest tuple
NestedT =(1, 2, ("pop", "rock") ,(3,4),("disco",(1,2)))
# -
# Each element in the tuple including other tuples can be obtained via an index as shown in the figure:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestOne.png" width="750" align="center">
# +
# Print element on each index
print("Element 0 of Tuple: ", NestedT[0])
print("Element 1 of Tuple: ", NestedT[1])
print("Element 2 of Tuple: ", NestedT[2])
print("Element 3 of Tuple: ", NestedT[3])
print("Element 4 of Tuple: ", NestedT[4])
# -
# We can use the second index to access other tuples as demonstrated in the figure:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestTwo.png" width="750" align="center">
# We can access the nested tuples :
# +
# Print element on each index, including nest indexes
print("Element 2, 0 of Tuple: ", NestedT[2][0])
print("Element 2, 1 of Tuple: ", NestedT[2][1])
print("Element 3, 0 of Tuple: ", NestedT[3][0])
print("Element 3, 1 of Tuple: ", NestedT[3][1])
print("Element 4, 0 of Tuple: ", NestedT[4][0])
print("Element 4, 1 of Tuple: ", NestedT[4][1])
# -
# We can access strings in the second nested tuples using a third index:
# +
# Print the first element in the second nested tuples
NestedT[2][1][0]
# +
# Print the second element in the second nested tuples
NestedT[2][1][1]
# -
# We can use a tree to visualise the process. Each new index corresponds to a deeper level in the tree:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestThree.gif" width="750" align="center">
# Similarly, we can access elements nested deeper in the tree with a fourth index:
# +
# Print the first element in the second nested tuples
NestedT[4][1][0]
# +
# Print the second element in the second nested tuples
NestedT[4][1][1]
# -
# The following figure shows the relationship of the tree and the element <code>NestedT[4][1][1]</code>:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestFour.gif" width="750" align="center">
# <h2 id="quiz">Quiz on Tuples</h2>
# Consider the following tuple:
# +
# sample tuple
genres_tuple = ("pop", "rock", "soul", "hard rock", "soft rock", \
"R&B", "progressive rock", "disco")
genres_tuple
# -
# Find the length of the tuple, <code>genres_tuple</code>:
# Write your code below and press Shift+Enter to execute
len(genres_tuple)
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesQuiz.png" width="1100" align="center">
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# len(genres_tuple)
# -->
# Access the element, with respect to index 3:
# Write your code below and press Shift+Enter to execute
genres_tuple[3]
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# genres_tuple[3]
# -->
# Use slicing to obtain indexes 3, 4 and 5:
# Write your code below and press Shift+Enter to execute
genres_tuple[3:6]
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# genres_tuple[3:6]
# -->
# Find the first two elements of the tuple <code>genres_tuple</code>:
# Write your code below and press Shift+Enter to execute
genres_tuple[0:2]
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# genres_tuple[0:2]
# -->
# Find the first index of <code>"disco"</code>:
# Write your code below and press Shift+Enter to execute
genres_tuple.index("disco")
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# genres_tuple.index("disco")
# -->
# Generate a sorted List from the Tuple <code>C_tuple=(-5, 1, -3)</code>:
# Write your code below and press Shift+Enter to execute
C_tuple=(-5, 1, -3)
sorted(C_tuple)
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
# C_tuple = (-5, 1, -3)
# C_list = sorted(C_tuple)
# C_list
# -->
# <hr>
# <h2>The last exercise!</h2>
# <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
# <hr>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <h2>Get IBM Watson Studio free of charge!</h2>
# <p><a href="https://cocl.us/NotebooksPython101bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p>
# </div>
# <h3>About the Authors:</h3>
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| 14,659 |
/DecisionTree-Implementation.ipynb | fec68bd786db1aa25fccc1c92240d63f638b4814 | [] | no_license | sakshi2199/MachineLearning | https://github.com/sakshi2199/MachineLearning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,243 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install numpy pandas keras sklearn
#Make necessary imports
import numpy as np
import itertools
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
#Now lets read the data in a dataframe and take a look at the first few rows
df = pd.read_csv("news.csv")
df.shape
df.head()
#Lets get some labels from dataframe
labels = df.label
labels.head()
#Split the dataset into testing and training datasets
X_train, X_test, Y_train, Y_test = train_test_split(df['text'],labels,test_size=0.2,random_state=7)
#Initialize a tfidvectorizer with stopwords from English and a maximum frequency of 0.7, words with more frequency will be lost.
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
#Fit and transform the training data and transform the training data
tfidf_train=tfidf_vectorizer.fit_transform(X_train)
tfidf_test=tfidf_vectorizer.transform(X_test)
pac = PassiveAggressiveClassifier(max_iter=50)
pac.fit(tfidf_train,Y_train)
Y_pred = pac.predict(tfidf_test)
score = accuracy_score(Y_test,Y_pred)
score
confusion_matrix(Y_test,Y_pred, labels=['FAKE','REAL'])
type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="b258f11a-2a9d-4a58-bf0d-4443aed37d70"
predicted_output
# + id="IZK5lztSByJu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="cec7bf46-8e0b-4e45-b7e9-c2602eac4ebc"
test_ans
# + id="qsGfhRpAB0RB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ab1e0099-02e3-43e2-b08c-e9fcb61aed42"
score=accuracy_score(test_ans, predicted_output)
score*100
# + id="HgCqCpUFCCRn" colab_type="code" colab={}
plt.show()
plt.close()
# +
plt.axis("equal")
plt.pie([df['Total Deaths'].max(),df['Total Recoveries'].max()],labels=["Deaths","Recoveries"], shadow=True,colors=["red","green"],
autopct='%1.1f%%',radius=1.5,explode=[0,0.1],counterclock=True, startangle=45)
plt.savefig("piechart2.png", bbox_inches="tight", pad_inches=1, transparent=False)
plt.show()
plt.close()
# -
# Deaths Vs. Recoveries
| 2,511 |
/Tutorials/DeepLearningForAudio/Deep Learning for Audio Part 2b - Train and Predict on UrbanSound dataset.ipynb | 41beb7c0380573e6e9805b2ef680b233e62a31bb | [
"MIT"
] | permissive | Azure/DataScienceVM | https://github.com/Azure/DataScienceVM | 177 | 112 | MIT | 2022-12-13T19:55:55 | 2022-11-22T19:35:11 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 140,379 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning for Audio Part 2b - Train and Predict on UrbanSound dataset
# Following the pre-processing code in part 2a, we will train a neural network to achieve the state-of-art performance using a CNN. There are a few published benchmarks, and this paper [Learning from Between-class Examples for Deep Sound Recognition](https://arxiv.org/abs/1711.10282) by Tokozume et al. achieves the state-of-art result with error rate of 21.7%.
# +
# change the seed before anything else
import numpy as np
np.random.seed(1)
import tensorflow as tf
tf.set_random_seed(1)
import os
import time
import keras
keras.backend.clear_session()
import matplotlib.pyplot as plt
import sklearn
from keras.models import Sequential
from keras.layers import Activation
from keras.layers import Convolution2D, MaxPooling2D, Dropout
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import Adamax
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.regularizers import l2
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
from keras.layers.normalization import BatchNormalization
# +
frames = 150
bands = 150
feature_size = bands * frames
num_channels = 3
data_dir = "/mnt/us8k-" + str(bands) + "bands-" + str(frames) + "frames-"+str(num_channels)+"channel"
num_labels = 10
# -
# If you're going to run this code with the full data set, this notebook assumes you've already parsed all the files and saved the numpy array to disk. We will load all the training examples (a set of 43722 examples, the first 8 folds), then use fold9 as the validation fold, and use fold 10 as the test fold.
#
# this will aggregate all the training data
def load_all_folds(test_fold):
assert (type(test_fold) == int)
assert (test_fold > 0 and test_fold < 11)
subsequent_fold = False
train_set_range = list(range(1, 11))
train_set_range.remove(test_fold)
valid_fold = train_set_range.pop()
for k in train_set_range:
fold_name = 'fold' + str(k)
feature_file = os.path.join(data_dir, fold_name + '_x.npy')
labels_file = os.path.join(data_dir, fold_name + '_y.npy')
loaded_features = np.load(feature_file)
# flip the spectrogram for each channel
loaded_features = np.transpose(loaded_features, (0, 2, 1, 3))
loaded_labels = np.load(labels_file)
print("Adding ", fold_name, "New Features: ", loaded_features.shape)
if subsequent_fold:
train_x_loaded = np.concatenate((train_x_loaded, loaded_features))
train_y_loaded = np.concatenate((train_y_loaded, loaded_labels))
else:
train_x_loaded = loaded_features
train_y_loaded = loaded_labels
subsequent_fold = True
# use the penultimate fold for validation
valid_fold_name = 'fold' + str(valid_fold)
feature_file = os.path.join(data_dir, valid_fold_name + '_x.npy')
labels_file = os.path.join(data_dir, valid_fold_name + '_y.npy')
valid_x = np.load(feature_file)
# flip the spectrogram for each channel
valid_x = np.transpose(valid_x, (0, 2, 1, 3))
valid_y = np.load(labels_file)
# and use the last fold for testing
test_fold_name = 'fold' + str(test_fold)
feature_file = os.path.join(data_dir, test_fold_name + '_x.npy')
labels_file = os.path.join(data_dir, test_fold_name + '_y.npy')
test_x = np.load(feature_file)
test_x = np.transpose(test_x, (0, 2, 1, 3))
test_y = np.load(labels_file)
return train_x_loaded, train_y_loaded, valid_x, valid_y, test_x, test_y
# # Training a Convolutional Neural Network with Keras and TensorFlow
# This method defines a few evaluation metrics that will be used to evaluate the performance of a trained model.
def evaluate(model, test_x, test_y):
y_prob = model.predict(test_x, verbose=0)
y_pred = np.argmax(y_prob, axis=-1)
y_true = np.argmax(test_y, 1)
# evaluate the model
score, accuracy = model.evaluate(test_x, test_y, batch_size=32)
print("\nAccuracy = {:.4f}".format(accuracy))
print("\nError Rate = {:.4f}".format(1. - accuracy))
return accuracy
# We use a similar DNN architecture on featurized data as the winning solution to [DCASE 2016 Track 4](http://www.cs.tut.fi/sgn/arg/dcase2016/task-audio-tagging). DCASE is the audio challenge for sound domain and is held every year. The architecture is as below:
#
# ![CNN Architecture](https://msdnshared.blob.core.windows.net/media/2018/01/013018_0211_HearingAIGe10.png)
def build_model():
model = Sequential()
# section 1
model.add(Convolution2D(filters=32, kernel_size=5,
strides=2,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal",
input_shape=(frames, bands, num_channels)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(filters=32, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
# section 2
model.add(Convolution2D(filters=64, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(filters=64, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
# section 3
model.add(Convolution2D(filters=128, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(filters=128, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(filters=128, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(filters=128, kernel_size=3,
strides=1,
padding="same",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
# section 4
model.add(Convolution2D(filters=512, kernel_size=3,
strides=1,
padding="valid",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(filters=512, kernel_size=1,
strides=1,
padding="valid",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
# section 5
model.add(Convolution2D(filters=10, kernel_size=1,
strides=1,
padding="valid",
kernel_regularizer=l2(0.0001),
kernel_initializer="normal"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))
# compile and fit model, reduce epochs if you want a result faster
# the validation set is used to identify parameter settings (epoch) that achieves
# the highest classification accuracy
return model
# apply scaling factor to a dataset - train, validation or test
def do_scale(x4d, verbose = True):
"""Do scale on the input sequence data.
Args:
x34d: ndarray, input sequence data, shape: (n_clips, n_time, n_freq, channel)
verbose: boolean
Returns:
Scaled input sequence data.
"""
t1 = time.time()
(n_clips, n_time, n_freq, n_channel) = x4d.shape
x4d_scaled = np.zeros(x4d.shape)
for channel in range(n_channel):
x2d = x4d[:,:,:,channel].reshape((n_clips * n_time, n_freq))
x2d_scaled = scaler_list[channel].transform(x2d)
x3d_scaled = x2d_scaled.reshape((n_clips, n_time, n_freq))
x4d_scaled[:,:,:,channel] = x3d_scaled
if verbose == 1:
print("Scaling time: %s" % (time.time() - t1,))
return x4d_scaled
# +
# earlystopping ends training when the validation loss stops improving
model_checkpoint = ModelCheckpoint(
'./sound_classification_epoch_{epoch:03d}_val_loss_{val_loss:.4f}.hdf5',
monitor='val_loss', save_best_only=True)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, min_lr=1e-7)
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
callbacks = [reduce_lr_on_plateau, early_stopping]
acc_list = []
# preliniary estimation of performance
# use this if you want to test on 10 folds and obtain standard deviation estimate
# for test_fold in range(1, 11):
# use this if you just want to test performance on one fold
for test_fold in [10]:
keras.backend.clear_session()
model = build_model()
# compile the model
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=Adamax(0.01))
train_x, train_y, valid_x, valid_y, test_x, test_y = load_all_folds(test_fold)
# for each channel, compute scaling factor
scaler_list = []
(n_clips, n_time, n_freq, n_channel) = train_x.shape
for channel in range(n_channel):
t1 = time.time()
xtrain_2d = train_x[:, :, :, channel].reshape((n_clips * n_time, n_freq))
scaler = sklearn.preprocessing.StandardScaler().fit(xtrain_2d)
# print("Channel %d Mean: %s" % (channel, scaler.mean_,))
# print("Channel %d Std: %s" % (channel, scaler.scale_,))
# print("Calculating scaler time: %s" % (time.time() - t1,))
scaler_list += [scaler]
train_x = do_scale(train_x)
valid_x = do_scale(valid_x)
test_x = do_scale(test_x)
print(train_x.shape)
# use a batch size to fully utilize GPU power
history = model.fit(train_x, train_y, validation_data=(valid_x, valid_y), callbacks=callbacks,
batch_size=256,
epochs=100)
acc = evaluate(model, test_x, test_y)
acc_list += [acc]
# -
acc_array = np.array(acc_list)
print("acc mean %.4f acc std %.4f" % (acc_array.mean(), acc_array.std()))
# +
% matplotlib inline
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
#model.fit(train_x, train_y, validation_data=(valid_x, valid_y), callbacks=[earlystop], batch_size=32, nb_epoch=50)
acc = evaluate(model, test_x, test_y) #evaluate(model)
labels = ["air conditioner", "horn", "children", "dog", "drill", "engine", "gun", "hammer", "siren", "music"]
print("Showing Confusion Matrix")
y_prob = model.predict(test_x, verbose=0)
y_pred = np.argmax(y_prob, axis=-1)
y_true = np.argmax(test_y, 1)
cm = confusion_matrix(y_true, y_pred)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=' ')
for label in labels:
print("%{0}s".format(columnwidth) % label, end=' ')
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=' ')
for j in range(len(labels)):
cell = "%{0}s".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=' ')
print()
print_cm(cm, labels)
df_cm = pd.DataFrame(cm, labels, labels)
plt.figure(figsize=(16, 8))
sn.heatmap(df_cm, annot=True, annot_kws={"size": 14}, fmt='g', linewidths=.5)
# +
fig = plt.figure(figsize=(16,8))
print("History keys:", (history.history.keys()))
# summarise history for training and validation set accuracy
plt.subplot(1,2,1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# summarise history for training and validation set loss
plt.subplot(1,2,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss', fontsize = 'large')
plt.xlabel('epoch', fontsize = 'large' )
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
tics that you generated in this section and answer the following question.
#
# * How much do the outliers change the mean value of the California average total payments for diabetes?
#
#
# #### Step 1: Use the `describe` function to find the mean value of the average total payments.
# Use the describe function to calculate the mean value of California's average total payments.
# YOUR CODE HERE
# #### Step 2: Use the interactive bar plot (for the sorted values of the California average total payments) to estimate a payment value that you can use to filter out the highest three data spikes.
# Using the ca_average_total_payments DataFrame, create a conditional statement
# that can be used to filter out the three largest payments
# YOUR CODE HERE
# #### Step 3: Use `loc` to filter out the three outlier payments from the California average total payments. Then recalculate the summary statistics by using the `describe` function.
# +
# Create a DataFrame that filters out the 3 largest payments from the California data
filtered_california_payments = # YOUR CODE HERE
# View the filtered DataFrame in a plot
# YOUR CODE HERE
# -
# Use the describe function to calculate summary statistics for the filtered data.
# YOUR CODE HERE
# #### Step 4: Review the two sets of summary statistics that you generated in this section and answer the following question.
# **Question** How much do the outliers change the mean value of the California average total payments for diabetes?
#
# **Answer** # YOUR ANSWER HERE
| 16,010 |
/Notebook/RF/RF_NoFE_ori | e47e42a279aa9891cf28ff5dd2b02d7eebda3c36 | [] | no_license | ngonhi/TrafficSignRecognition | https://github.com/ngonhi/TrafficSignRecognition | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 19,311 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # K-Means Clustering Example
# -
# ## Goal
# -implementation of k-means algorithm
# + [markdown] deletable=true editable=true
# Mari kita membuat beberapa data dummy yang mencakup orang-orang yang dikelompokkan berdasarkan pendapatan dan usia, secara acak.
# + deletable=true editable=true
from numpy import random, array
#Buat cluster pendapatan / umur palsu untuk N orang di cluster k
def createClusteredData(N, k):
random.seed(10)
pointsPerCluster = float(N)/k
X = []
for i in range (k):
incomeCentroid = random.uniform(20000.0, 200000.0) # membuat random centroid income antara 20ribu-200ribu
ageCentroid = random.uniform(20.0, 70.0) # membuat random centroid age antara 20-70
for j in range(int(pointsPerCluster)):
X.append([random.normal(incomeCentroid, 10000.0), random.normal(ageCentroid, 2.0)])
X = array(X)
return X
# + [markdown] deletable=true editable=true
# Kami akan menggunakan k-means untuk menemukan kembali kluster ini pada unsupervised learning :
# + deletable=true editable=true
# %matplotlib inline
from sklearn.cluster import KMeans # fungsi untuk mengaktifkan KMeans
import matplotlib.pyplot as plt # memanggil fungsi untuk membuat grafik
from sklearn.preprocessing import scale # memanggil fungsi untuk scaling data
from numpy import random, float # memanggil fungsi untuk mengubah dataset menjadi tipe data float
data = createClusteredData(100, 5) # membuat clustered data untuk 100 orang secara acak dengan 5 cluster
model = KMeans(n_clusters=5)
# Kita melakukan data scaling sehingga kita dapat perbadingan antara data pendapatan dengan umur sebanding
# Ini dilakukan karena K-Means akan sangat baik digunakan apabila data sudah terskala
model = model.fit(scale(data))
# Kita dapat melihat cluster yang disiapkan untuk setiap titik data
print(model.labels_)
# Kita membuat visualisasi untuk melihat hasilnya
plt.figure(figsize=(8, 6))
plt.scatter(data[:,0], data[:,1], c=model.labels_.astype(float))
plt.show()
# -
# ## Summary
# - Sebelum melakukan operasi K-Means, kita perlu melakukan scaling pada tiap data untuk menghasilkan perbandingan yang setara.
| 2,473 |
/BERT Summarizer.ipynb | 63bbe6d959464ebf15ff067d0eaa0821110b63f8 | [] | no_license | KabyleAI/ner | https://github.com/KabyleAI/ner | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 30,107 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import bs4
with open('data/bleak.htm', encoding='ISO-8859-1') as f:
content = f.read()
print(content[:100])
bs = bs4.BeautifulSoup(content)
paragraphs = bs.find_all('p')
# -
for i, p in enumerate(paragraphs[:50]):
print(i, p)
ARTICLE = paragraphs[37].text.replace('\n', ' ')
print(len(ARTICLE.split()))
ARTICLE
# +
from transformers import pipeline
summarizer = pipeline("summarization")
summary_text = summarizer(ARTICLE , max_length=200, min_length=30, do_sample=False)
# -
summary_text
summary_text[0]
from itertools import zip_longest
for a, b in zip_longest(ARTICLE.split(), [' ']+summary_text[0]['summary_text'].split() ):
print(a, b.upper())
ate the datatype(s) for the parameter
# param2 : state the datatype(s) for the parameter
# param3 : state the datatype(s) for the parameter and
# continue with more details if necessary on a new
# set of indented lines.
#
# Output:
# A desription of the output of the function including
# the datatype(s) of the output. Also describe special
# behaviour.
#
# Example:
# >>> function_name(1,2,3)
# 1.2345
# '''
#
# ```
#
# See these [examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) and these [examples](https://google.github.io/styleguide/pyguide.html?showone=Comments#Comments).
# ## 2. Keyword arguments
#
# When we define functions, we list the input parameters. These are called positional parameters (or positional arguments) because the position in the `def` statement determines which parameter is which.
def poly(x,y):
"Compute x + y**2."
return x + y**2
poly(1,2)
poly(2,1)
# A [keyword argument](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments) allows us to insert default values for some parameters and we call them by name and the order doesn't matter.
def greeting(first_name,last_name,salutation='Hello, '):
return "{0}{1} {2}!".format(salutation, first_name, last_name)
greeting('Patrick','Walls')
greeting('Walls','Patrick')
greeting('LeBron','James',salutation='I love you ')
# In this function, `first_name` and `last_name` are positional arguments and `saluation` is a keyword argument.
# For example, the function `pandas.read_csv` in the `pandas` package has *many* keyword arguments:
import pandas as pd
# +
# pd.read_csv?
# -
# So *many* keyword arguments! The keyword arguments I use most often are `encoding`, `skiprows` and `usecols`.
# ## 3. Numerical integration
#
# We've already seen left, right and midpoint [Riemann sums](https://en.wikipedia.org/wiki/Riemann_sum). For example, the left Riemann sum of $f(x)$ over the interval $[a,b]$ using a partition of size $N$ is:
#
# $$
# \int_a^b f(x) \, dx \approx \sum_{k=1}^{N} f(x_{k-1}) \Delta x_k
# $$
#
# where $x_0 = a, x_1, \dots, x_N = b$ and $\Delta x_k = x_k - x_{k-1}$.
# A better approximation is the [trapezoid rule](https://en.wikipedia.org/wiki/Trapezoidal_rule):
#
# $$
# \int_a^b f(x) \, dx \approx \frac{1}{2} \sum_{k=1}^{N} (f(x_k) + f(x_{k-1})) \Delta x_k
# $$
#
# where $x_0 = a, x_1, \dots, x_N = b$ and $\Delta x_k = x_k - x_{k-1}$.
# Notice that the trapezoid rule is the average of the left and right Riemann sums!
# Let's write a function called `trapz` which takes input parameters $f$, $a$, $b$ and $N$ and returns an approximation of $\int_a^b f(x) dx$ using the trapezoid rule with a partition of length $N$ (evenly spaced points). Set default values $a=0$, $b=1$ and $N=50$.
def trapz(f,a=0,b=1,N=50):
'''Approximate integral f(x) from a to b using trapezoid rule.
The trapezoid rule used below approximates the integral \int_a^b f(x) dx
using the sum: \sum_{k=1}^N (f(x_k) + f(x_{k-1}))(x_k - x_{k-1})
where x_0 = a, x_1, ... , x_N = b are evenly spaced x_k - x_{k-1} = (b-a)/N.
Parameters
----------
f : vectorized function of a single variable
a,b : numbers defining the interval of integration [a,b]
N : integer setting the length of the partition
Returns
-------
Approximate value of integral of f(x) from a to b using the trapezoid rule
with partition of length N.
Examples
--------
>>> trapz(np.sin,a=0,b=np.pi/2,N=1000)
0.99899979417741058
'''
x = np.linspace(a,b,N)
y = f(x)
Delta_x = (b - a)/N
integral = 0.5 * Delta_x * (y[1:] + y[:-1]).sum()
return integral
trapz(np.sin,a=0,b=np.pi/2,N=1000)
# Notice that we have used the NumPy style of docstring here.
| 4,854 |
/code/cluster_model_poorDF.ipynb | 7d7cd31ce275df8e6450500255d3cbcc8f348bb2 | [
"CC0-1.0"
] | permissive | rwright914/Project_5 | https://github.com/rwright914/Project_5 | 0 | 0 | CC0-1.0 | 2021-02-05T20:28:19 | 2021-02-04T21:44:52 | null | Jupyter Notebook | false | false | .py | 23,476 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="NYnQhQFNI8tg"
# # Como o Aprendizado de Mรกquina pode ajudar na descoberta de novos remรฉdios?
#
#
# ![dna.jpg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAUDBAQEAwUEBAQFBQUGBwwIBwcHBw8LCwkMEQ8SEhEPERETFhwXExQaFRERGCEYGh0dHx8fExciJCIeJBweHx7/2wBDAQUFBQcGBw4ICA4eFBEUHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh7/wgARCALQBQADASIAAhEBAxEB/8QAHAAAAQUBAQEAAAAAAAAAAAAAAQACAwQFBgcI/8QAGgEAAgMBAQAAAAAAAAAAAAAAAQIAAwQFBv/aAAwDAQACEAMQAAAB8cRGpEkpEiZE5NUFB0CCRiSUiSUiSUiSUiaQSglCkkIUiYUkFSSkSSMRBMRBgLgWVEIgFKREEwkFgnAxXPjktVyRtQAthegWDnNNquALglpIJa5wkC0c+KS9XFqdXhhMf0XNyMk9To8xxQMaoskMUjB/Y5sgzLnesideVfYuNbWvdNAvKp3KkNlGlVx8ddW1k5l3J168HpnR85/ELnvxoq8Lsew2FTxuv7S8r4L3PabbT5kHvmFbb4+fReWGrCfLUF9l9F0GlNkP01bAzH3V3IGJSI3sotagyh3GNKzwxCO7nhO86GKxyelg9TPHA+LkdSTRzdDVVqaNS/1+RZp6VCgHz30jzXi9VxjXK3yhoIeAIdN2ft9nLmJzK30Me+XGUguHqe+ElZWNQia9tbNILEua4AIAR6CgSSMSBkSSkSSkSSkQKkaioQUTAkZEkgEkpEkjEQTCgYC5pKlEMEkpCQjHFrmUoEgvYXEpjNqPATBxY4x5YrVeWmwPTTYrnMTh6YjHujTCcwm5ZHQkiVsakk1sZEdJgu6G2vL06WKssxxFbLHT8roPn6rc5focPH3OYscvK7eXVz9nUfd6/wBB427j+nhoY8OtlV3W0WYcF+k6o56pc21BiLQ+113l/bUTOr4tPfO5n89mUd2zhNymUeb7HaS7yJnsvNY9fArZx11E1wj2RCjJWxpS5NCEgNDWvQ+I73r8vlcHWxtFsTEuRvmu09PoZtC1VudbnaVK7TypZ4HtuS53Qx3RnidR6agHpqkdqZE+pJ4rVbbW7UyNC1MYXKPF1uLDWXppAQLa40hMzi0qECiUUoqSUgIdIEkYkVIEVIEVIA5SBFSBJSJJSJJCJImJFEAgmIgwFAmFJEFAkFzSyuQJBSTAoOMRCcOLXOpcDYpc19qog2AkJwQUQEU0CemjS5QNcXWBicmDe74jdsprZPT84jRyPuurdU3ruXFehiqywZdqxOhj+taT/O6YKrso8y5SqWNLQNya2y3Tq16V7z56j0XitM+y89hx3b4avORLW16UYo62tSZbqm07XPSmvrtPipbcnotnzPXz1Web6zerfx+P1LEybeIWtm0a4g41WMbJKp6HotPk+9wsjLsUKOq9RPy329Sj0XZ51CSKrnbqQyzfiy8rRp0a+PKXmu0iDI4FABFomxALXocefI+vnt0sLcyKDEXTYLq5mjQAEUloIZiQ5Q0pGEFQJJSJJGFFEJJEJJSJJSJJApJSBFSNREKSSxEExJIgpKRJKBEOMKBYIgkJzVA4tcwKBIKSYEhMCQXDi1zqSHOCQbFRDmCSTRByYIFzBiejGF5YMUigj3sbcurwVLIStJmjqwKV4bDHTtQrefXY5PM6a9FZ1vNuw5FzQ9ehpc9stNcN1aY2Qxm5lZ9LVfZ6Hl+zZJ9zcXmsvkNLtM/t28tX6PLTRlR2IOdsZq4+xF9Y5XX7eri/O03Y8E3Yt6OC+0dZq8INGX1Orw2y2aHA6zVru8x9UrR5Za5TX5/q0ZlGxBxO4x6sVNZ6PF2vQ8jBrCnwun39itP3OLnUrsE0cOkfHd8JIxEKRJISf0fzbrO1hZzcsZse+vfrbP18Xo1lsdNyPR5vMAt8p2GpJmLmoAoGQpIAoFoQUQklAklIgUSEUIklIkkAgUS1FQhJCJJSFAkIhGFJCJzSQUkYUkQiCQXMcwJaSCQSpSTQpJgXNLq8tLq4tdaCWlwSE4JaWBILQoIhxYWBIe4HQZUmqmrZbfeqxYikfnPYKwEfp/LeicLbFjT5dPOtMzm6pHTtZey2Ah+m6Fuxt5Ty1vsszDZyWB0nU69fmN70rmtVd/vub6rzk4qj1jbquX5f13G0V+fYnp3H9GcJf6aW3Tn+j+d6tWT0nx30/W87o+Zl1/JaewHRBWtGo2waVzDubKOi6fL3tvIxeW18Oy/Nhnh4XYFutdsXXvw6ve4vn0O3n+b7fW3Kep6LjZFC/l3Xc02aHxPcSCQlAyJJ7Rt2G7sqhit0NqEMOG3ZrUhuq1qdaVRWBHCvaCGZFKROaYEUYEUiEkoEkpEkpEkpEkpEkpEkpEkoWpIlJJYkEYSFIQkYUjAiFA5AmIhEEgmEgsqKTAlEqii4CcCCQ5wSCylzHWByCsVxaXDi0uHFhYPTUwcWpo+7QNq9Q7l59OburXFWqudZUTNCvVP0PHq6ms7A4FM+TXsdSjGknq7bTXht2ONNl3DNziOJzuV2LtVit0yegeedNup9Z822dGc/k+94joNFWh499B+N5dcXXedWjd1+54q7CfoW18/9Ccfdc91MevNl73Maar2fjPqehzN/zYep5/odGo2xBRatfF3L6vVMDo+L6HAxcbVxh14WpnE32NbK3exj6dreb28zdn4l9Grp1gau+g8/tc9Rq0bOPpZLbFOxFYk9mm90szY1dp0+TkSVWOzrEWLZWc1c+98ZQilZI4gBHLZpBJKBASSaEtcQklAkkYkRIQ4GJFSBESJJSJISJJSIFQgOEIICwlpkISaEggJJSJzXEIgsEQSC5rmVEOYJwcyoouockwRSIJDrAnB1qoo2hJxdUUWVEpwkiQknOAUWDrle/bnsTsdbzyxVo1/1rmt/zd2Xz+rg7MbIq7+hbfhrERlyrWayJnN776uYZq5XH6SCVbJzBdLfvPz93OmvVw/R+BOLodnlvQ+dV880vQvPel1Iwhx9CSSGX1/xqUU+t0tmvv4drW5HXpTqvLvQL1d3gtH0PC7unkh1GXRpv2o5OjgxMnTyeN1WoP5emz0OJ0PoOZpcJ2PEZ2iRHF6b+h5mbpVdJzxq2oyRh5Ou0Kw0JZlomGxFCqjckr7u6rnS12C5qL5GGezpRN6rnd+XISXmNrQ8QtJRiRJDXIkBIwApGFAmBOBgKJjU5SNRUgBUgRQgRQjQ4QtTlCCkIEUQikIknQJJNEQSCQWVOSIJRdU4OdS4F1SJYNLiwaXm1WOcXVpebA1Pcysc6SxY1MblrmUrIVMYITJMwV9t7RhjbZa2arbg7zJf0uLZwONQ2jWp9pdKpWu2PTnbZZnchuuo08ZYhdOlt8r0lNKsZTrmbIBM6GvMF0J7tj8T7Vhx+cdXi52mjv8A56+gOLx6vJW2Y22xCYZTEJVDY9w8Ivij1DP6nm+lw9za4rXqq3vNfSs6u7m8HmrfW16Gdczd60qFit5vrGaGyrXOpyu57nF4nkew5jndCqJnc3bUFmJWiUjQ7USsapE8icXiRp89gZ3HIWunktNY3ZXsDpuRpzP6DHuaqtrnOoq56PKQR5T0gRQiSTREIxySgSRISRMBQMKSMSSkSSMSQESIkBSkCKkQIkaioySICSSxEFgigYQSQHIsEiiCQ5lRRYJwcykouicnWqC82K0vVitLiQ0uTqE4vBIH2I1zjarGvSxqepGTtt2Vy2Wy3YGNkjId69xfZ8C3B5zVztuTEF6p03dZgkQ2q+hyea7n+/8AP9zn9ShW3+f6Uu6mJuXU8sL1Lh72dpx3dLXwkWrSssZ7h4Zt3t69yno/H5cNjr+N6fMPn3L9Y8o17mhyx3tTksYnKTX9X8S27MvcP0+Z7fF6ze4nfxLR8x9k4y3TzmPp427XVhezzvTdcq6u2noOmwNTr8Krh9Kkfjl3N4P5fW7iM3cNJ1j1s4gdlUSzAZ08Ng52xraMnOx9Bbtr565p7Er5a3tMFevStcbym6PL4OvRv6DEidh3BJZWSSMRCMJa4hEKQoIhyCaFBSFAmIhGEKa0NZp1+olRNXHscmqRyagCC2EoIQoKRJIQoGRFEhFEhEOYIgspc0uCQ5lJSZXOa6xXua++txRuQoplRDiE5qYPMbmBcwsJDEWEhYCJTEoLFujbsovPrTWZHxzaFQ72huct585GXczPQo+zl2rWndaNCt4Xf5bN03tYzHv77kNI9DHiXIGW6NPD6LnsgGnit52r0fF5IGqWSqbb/Y+1+ePc8+bF43Gr+jp7PhNx+ZeSUa4nUkUSQymF0EqjaR1fc+N9Xdi6DSGT2eV0XEbHNW2NyrNDP06yUvF2y7FXW73NuZW7w6VQPrHgdjQ1uZNtXWW+HD1+iaHlKC+os8vDn2PP8sQHqJ8sSt6llcCVbtKnLKu3RosdVf1nMdPzm7LXfJILm2Wu305iS8zqSRkCKMSIIKBkIsWOgtAvjylya5YCEwKCkMsJumrWkodyoIHz9pSEhCUiBUgSSwIqEFKBFIRFIglFlRRcIpxUFFwnIsqIcwLg+1HPZJfW5yddWCjYqKUBSRBQJjkEyvTUZIY0Q8xmS1apWrM1uzRsHJL2nC+ucq+Dhuw42qrDhlregawW2Qt7Ty7HPHFUkyrtJqjotf2fD9fKuZg28foNftU9K7PzAdHwek5MajPUZkk6/jFavU4/ccn6Dnw9Zj9zSPOcP23xbna4gG4tL3QurMrWiBwa2N2PU+Sdxs57sTdwe3mo05q3F6rLVa7U2r0OBt+h417zTrOQ4PQe6M8/bIYTYLD6ptSwq5skzGutgDzYY1K61YHTJxE4taSPrsM7XndDmRRpNyG5dWpDQWSwpLk2EpQJJSAhGFAmWtbnz1q9TNlta1z0lw7UiiEiSGlGydHh7uF6LPXSXnNCRUgRMgDkAxOQLU5CNJQgJUAJRCJThEllDk51RRZQSWAcnOA5OtV00Ul9V9j10c1RyfmdqeiGp6YMLkY0uUCRQgRBCcHkTTxPbPJaoyPX0HpnF9L56zG5LT5PrUOlpSdKzQsVdfNTJzvWef5L6Yc1emxsjamicWU2dnzmxmdfDR0M+Y3z4nTc3zrIg8c/U1ORjA9Q9J2vlHvNuTj7PbZD0rgel1Hq8Tj0L93R59ehYnOHLKVgvanJS3Yyen6OV+bJW6+XNr3qnB6bdWjt7KLnQ502/k8TTV7z3cbR6XnmkafNTZWBVRDUCyCMIV2paGuYK2kDAkeowheGpD1HOdDzuvOxIY9JQMjklUhLSYkRIEgSZI9/rJht14rlzrcztS0obEPMcJyyFpciGl0143MLuuS7mbKTz5/VEZFIxSPcWVsHu5eWUi4GpiekDE9SNLlA0uc0YXFla4uZWlxZWlxcNLnOrS4sGOkTq1znWonp9tbHudYrDIWSNTIiAzIyBTgSITIGITCSKQykFSJqYhLZU+kjT5jjYuayNKl6SyvYq27Lp9zE2cmTqfK/VOZ5FvngnWnrV22Wq1dlptbHpOX6K+jLg1KHSFyrLOicsZh53pwmUyQCwIYu343X11++8F2OXiy8/wBPzcPQy1fOPZfLtGnO7XieyzW8GJxh1wCcK1zYuZ/pOVVzrlSjVXcpcmnQ1KG91+T0PHeieRcaYr3rl9uMvTzpNGld9LxadG9Tstike1bmXruFnXfrzCU0LidU9PZdYFXnudoS8jt5a3HMJeb6Dna41FYLwnEhIpFCKMCIkRCMM9c6RNEEYblXR6qQ6WE5Fs1diwyYCs362gmp1LhJezVkbZyX629MW1L00XmL8OQ0sQhc21JKiJEwBFGIowJOTKHIkIlOpILAuaXVxBdUUSCWucFzXOr3NLo9zHWo8tTo9NTRyCASQhITVLgAC6WGSLMIw6O6jlPSM1fScX0HCJhrVW1e1vvWampKV0FDf5uZss1TAvl0XW8jf20HA2NaQjNv0W1nrsa6O3z82xWM0jI7LjeDoSBz3pISBzVceh9u+dPUrssWT1PN9DLaZU6IL5KNzATptQHGuVqpva6dCvDH6DnUq16pzd8NyDSEudDm9br4+N5p0HO8LsJJZNaCQLkxPHhqET2CHTzWkwhKqEBAlALGseyp0kgUkFJCUhLSQ9JBUgmhIUhCRiIJlvTybvoqFQUWV3Fq5ZdPArx1nPXYvSZs8tXmtDixSOLFZOiouy+1n0KLDx7nGMrHphkeWGAkIhxaoHOY5g4tLK5zC6yFhdZDGWEhjcyudGmEiYjJHQuZZ3QOsSw6AulhQF1nUKInUCknEKElEQDTNiahsvquYWWV2kXPWvKvVsOPA5Pc5rp1VY2Ou6Vy5Vt2Y+m6Pmej4WN1a/mZ0Z5T7H5ld0sowLRvlDGgvaxtbaWlzXS7M9KnoVN01cK5cxTmnwP5G+VrWkERhHmv5TumvuHM0uvq5XKWa2b07d7zjv8Am67Oeawcjo3t2KftczNrJqXl9c1PoaNDoN2DSvanB8ijj4Y1zvQyCNIXqNAvUahemERwQWAtClyAEKCUpBJGsewOkErFBCFBSFBGSJJUSIMSKMSSIBKMSKeJIkApEEguNiKSL1WfPSPmbkkpAUmmrmamV060Q7nOep5eS+vqDy5tr6g8sQOoPLGDqlyzgOpPLFh1R5V5XqjypK9W7lSV6o8qSOqfybgOqdyii9aeUUHXO5LSKUaPV8rba4hWxxCZXIKAlBgggCQlC0OCMiFIWlgnU+g8vpYeXhczbyu3dG+O3Zrm0ItBOfZ08fG5zer49+PlZKOXsSW2+PqeHf2wHiRjZGKY9XMFbdJmbed1+fm72FOt+dDv87xdcrGiu0aFDWizYXVcvdJvXfHum359jA7bk9OY9h5/6fRPFZemz7dkkcVbRmDJq+bRHZi0lk29l9LbzNLxzu/PuJ0Wpyo6DE5AsDwCEUkDmukAIAYCEdJJSEksCKUxskYrgEIySQiSIiQRkgcgjSk0JBIQIkRDjEkSAnJg1yRiSLjYimoenz1kT5u8JyIaXJpp5nQUe1nzHWIeXa0vUDU4yNL0QxSCBpcSGucSrS8uGOe6xYzISsRk0CMwd50+avyK37lXqz+YafeMFHIX+gKpR5X0OEP55F6A3TR5lne2VDb4qz2qpY/j59Ex7rOTW9Nc/NjWzHdjZRW8SeAY3G0Z6fla3OV8Xns2zR6fTNuleV9rouX6Wvmczzeln5ep7Lrec+j8nl40Ghk25sHhvWfKr+wwSrTqibOBK7LfQ1nIuYvWbc2HT18vS/S8Vu2eeeRUi5XQZcrBh6bl8Q16O06nyL0O+vuMHgPSLMVTM5+t07tWo2jrkVO23FfDoZ80l69U6W/AzoL/ACHGr8+qSrH6GFTJTEpUJE2cCQqZqmMkKQCAWgqtgilgSQgDgpayRgZqSDBJIUkhEkZJEU1YRRgKRiBUiKLAIoxIogEpgCU0s6OM/p1vi1orVzk+3jarrvzOjVoW+eeB0+PLrbquTfuPzvzxlbzbGF6AYpFI0SorG5zmjDISsbn9iU4zsfQbWPHmbedQTFt0MVXUagxauhdNmfn6X6S95/shuix3YRssWc2tuXtIuQFQ6Dd4e8F6a/5xerHqF3zGXn3dNxOp0zr5DR9vzGv8hXonOW6ed38ntmnQ8f23C2cznYZ4dXYbpUNFq9XVpNXn8jDIud2bXtXhXqGXF1OLv42bls857ulp1ebGRberGJgQe04loEPRYCqfpcXrcHq4czewLi6Odb0OHwN8SkS2RKVCQ+jefehvn4DveA0+0Oldax9XPzaxY2lyDqWVhaNtDtfP7rPioeQdDzfG68SlNOyJSlhGpVBEpEJEJWqYg9qOwOCMA5IWJwWBIqQCBA1zQzUQrIFKQipAiJJkk1SSUiSRiIUhILQoEgi6d60S+RJCZlcITM2sMuU5LBu1dfnu7mrAHzuhxBKm1VN67rsE9Ct7Wnnu5NUjw1QOLEZIY3NH6HS+lU48Doos7Ny7Wbnwbs+xmUq2iydUa2uyWKtX036tEV3axeoXlmtyHa8bnsUczNZY10aO58DazIa1tDLL02JZl0aBoMujoco1T6rN5u7FR3GtxvXZacHkdnnuvpoxiNOne2MDevyb+L1/nvOz5BgWTsz9BzJC/QlCDU53D5y7HQ2U+bw9TyGvuTiJWNI1rUYtDKn7STlu8087ho72Tu19PxPTZ3OOWI1y98gjAkvofm/pzUcD6DHndnBnVoh0mrl5yaFZZPoptW6fSjDqs3vCfOaKohdV2pVESJHRFhKGIhyalKQCFNTa2QQRkglKLUCkEsLUAUEowBQISCwhJSklJMinqCSMSRkSRMSBMKBYa1rDHoqN9ZlXYnQLn3Vzp83MVUa+I8W3oGZtbqUuc0853lpdXFqsDywsHmNESBBwjGq2kUbxJfRb3Y5+dFlHEbk6FHNi6BsxQZ+q17IZdFtdMNlrobEBYFstzX9iz0/DxYnNei5+VuDb17dycfT7jJts5Mb8Nt3N9Fi6ss9Sou6DzmDwCDtuM6/WiexC57EwHpe75Dqpx+awNPD6V1dofj6Eu/k9FswbnB+jP5ebyVvp2HXv40288au79L8A98wYc3A6jk9HL0PLvROH1dCmIjZvkEYkcwMqd3VcfaUbvO9bz3TzVui5jXz388rmdyN0w1uylOF6pwu/p5nEZwp9q5Ocyi+aSK7oqitvu25r/a0Mbz9XK8lCcfoJlCY0yiDycwFlsCJKJGsQLg1LExNrsSARimpYUEpISETUoQHNUhERkEhEgUiCUM6KeoIqRqJMBSkSRISRYBEmAkkAouASXAJLLq5WzkdWoOJ57JONqtcS4anpwJWzWIEVoWGOeLHYPUK/bY+fBgT5OzmTQ0ae+yemWarZRSsMYJzfBp3dXe59PFrvcuo+f6vYc5stm7nhup5lHQVcfzKnd7IzyR92X1iv5nfVe6yqN1K+epd9HsbK67i+pw1t8k9180u38TFtw9x8ePUZRd2l+ri0czNyLlDb0GSq8tkvR1Olr5eLgz86Nfd6/noFXeXvO99KdboeYsYR1/P9HnYKuFrvj9Do5Vd1mc/fzK3a7NkN0o4+YzZVFmhU1Jeng4yPRyaOnq7vM3NGfrcOlXuos9v576BlPAQzK3QpS++t9hmzZmf21QcGgeJugwd9pJNyey/qWWn1dbu5cSXosh5Qk0I3mfR6zleTZHp5msHrZmrl02NSXIsQIBSIWJJCAOULWvQkacAwSQIRAiBUM6SNSSUiBUgKLAFIxIohIkgFFgiiQkS4a65f61UObqT7asV1+jyrEiUgTiwYnpoyUFlekrkj7XE9hyZbWPm5FnMny3xdU1q9iLTaBHMzw6Tuly583oxwvIs7HlOcN+oxyO6NnrGrxXTefz5+/wAd1xo1vLfSOKGvO2fNex61XEQXq411zMEsj9D8+v1J6LpW8rFw95kerk28pSw9rqV8/B2drUuXyfT8vpOJRuUW6016hftTo+w5Pdw8rz/Ev1x2Yi5q2z2s+bTVZ0sF9tXsF7zb0bjcvjuU7Diuzo6TqPLNikepDgRlzehScAEnbji54N7iy3ffhUtKO7p1nTJWYlO60vQeE9ExJwbNobKqWidE1LoZDwxa8HdQwdtikQ0sLyRHKFaNDOS0K5oNLFBJHMLVh0M7Z6aUKWrlZXCI51iBQiSMgDmiEFSAOELWvAZqIhCKWNKQM6SsqSSESRMSRKgomJFEJIkIhzghEhFFhYvZS61evawDZXoUWuzMUlUUQjHJhgcmmQzQ+rCnXbqc0vDyM2bH791h0MV1lgNewGzQt56upoc7zXMumjY+zc97XW1ucx1ya3S8NadPXKG5zHm8+7Lh7lS+L2ep4H1OxRluW4BCpgEFb2K55z6nyubXtZWjVTxvmn0T8+6OwfQ/M++6OXW5Pa5/oY8irbgXqnRpa9tPR6L73N5fk0PRP6HQ5dmpWp01zdiMpve6iwdpxJerrOUlgdG2YpFtllqv01WzVNiaApxxX2aViPVlDq3dX0JQmTrK2q0u8wOpxU8rds6CZ8ro73mnO1dL4xC2juIgi4pIhEJgS0idDTXQegy8WCODpSCrKCQI2sXb7FUGVqZeV0CudYERInsuXCCLqua6KRIHjWAFQtSSkAiFAqEJJZYSVlSSMgRRCKJCJTAEohJJgiHGAgkOIdYqRLKCEY4sTx7owQ9MQMiiUkwimYbvr+ZXPBhw62Z2FmjqP16JGy12M9yvpJTn481arZv06mwtWA+BYddqSq6+u06s66udQHQPS9jzD2bzy8nsxS5s1vxP3vyrX0ORa0dcoCPI72sFbTe2+F+gYU7WwocnF0fMfR6Y6Hh3e8V2/fOBlaOV0BDJXdl27HT8t0F/M7qFePcFfT7fkvX3WXs/pdR8vnlH0/F0t52e8pX38Y3ZZotoVtyNZjzzE2VnWnlase5VVaZs6IGFNoSAVm7UlFeTpS7FC40tq/QkW/Ho8zTn5fE+fVb9jGaD0nmNEyBiAkUZhemIh6Y55N1By+7ky4OlixW8+jHzbiDpXSn1mJk9CrpObfpE5CtVeTaErRNe1YzunXp5rVz2jRGFwkoUgASEISEgUghLCKupRSgRRIDkSEkXCILBFIhIkwFFgSk6khMEgjHJqkcY0S5MUjw0Qy97xHtjc88np83v5bMox9LVDIyWX3DHefJG4c7jv38TpMO1qunlSJdrZXVcvRU+SvIzTPhfZXo5/Z8WqS+q+TdKL/R8jq+d81RoQ0uoXR8/V+q5P1imJ0eexNTaLDYqpG9+h5vp+TxXyQyIvD0ez4/uNz+TfzOnuhkgsZdmr0OJ113HueTeiebcnc90bhtfapF6+o3vOUtXsuh4XbTN63X4vZOfYo2N2UceuonsHCrvqzHmcvurKDiNLemrXmW9rWqfKZt16XryZnNTT6ZznkWem/sbHC9vbbw7CF1gFqkoJSkFIS1SOTTI7paur2csXLS1c1pt0xht6vPxek6metnyZ0tRC4lqSRmhJl6PZqdCqMKeyXjvKyRttdZrmZb0CAUCFIDmwpAghJCWU8X0AkkByLKCSwCcnAJJDS5MAnKAJyMaSnARUgDkC1OUkYkUaMPRLBJYg7Hr2UNvnsPFsZ3W0RPjdTrt2IbevIugy8zNXnwSjB0rengdRbm5xujQ1vuxZm/M3MySrLpD3Ouq9AycWbPlpMkb07fYpuH9E8hso6vAZF1G95V3HI9VaDJw2qATtpaF8xebHrHhvsOHFoTQjJg53ktfmfR3MzbNW7oMvRbUl3sqHUcjl+XcXt5S9mMyuW6ITIiFTgSBSqGITIyJ7kY69RLV71nmSE7McalXt4OOCnpczMaLZaUzRZAJktkAmSGDtuP7iU8A3vYg/Cjt2q3FLtGg8YuzaTxx7FScfo2dzco5qfItga5cTQEVINbK1ughxtrGtASXHtSKkFqta2IytarAh7TkMrWtcNBVLtRAIRAIa8AtDmxkkhLpcdeWNPEgJRCKLAJ5YAuLqxOEgKcQ0lNAHCQByBaHAFoeAWJyBYHpTH3XHew68Fji9nmepy8itNXPXNivaQXb9LodvNy+auU+ZuQkVFzLtUEdVhbOf18NDRz5Fu1cbqedzVxuMrO0SFkhkJsk/qfknUZEqV9DG259Gtb08489bpVR06wnbleNyELOq5Y0z3OGIZuJxGJvYfpNGaxMz9KfWzdrTk6jdpZ/Dx+WMc9O/GZSViUggYnoSNPQaMvQkZengJJVpeGDU9GRiULIhKFaJsoRomytDMEgQsegI9rWkuDQGcmNUytYFMhiMkvUcnb111GdRzNDNCWSwJIRa2TrdFH42zjXKkTx7WlGQWq1rasde1WWBEY3QIkTXOJY2aMlqSpgDgC0OarBFQ33MWzIUCYSCQS0uC5pKvIVykJojk0wlBEFAAlNQJTAC8NClxiQL1GYek7/AC5utwefxreNv0pRuwbZr9PY3ZJJ7XJV0ROhfzejKoi6ypiIta/O7urPQbq5uxNC3hdBXVgSCDPotms90mUKYSyVwZ3mDe7XDj5zq8+7gnJcJ7L491NNZoadjmKPM7mNZTZ6Jq8B1GvkV8mWp1ZTZLKNh3s3pK8fT8D6X4jwmpugcetMYUyyCIKZlCobBSuSMxNoecQqScwCSyq6cWFXMkyiUkoiQj1GEZ4jCtIIwpkTAI4AKxAahIAVigFJcx8BQTDVs4u70KOcGtk4NADtUnK6yPF6VIp6kTHPRHFtRfr71p6Ls3uUOzb7+bbmh7ONagpWMV59bsps4LoqYxEcS0AgFNcFLUlGvoHbkRBkJBZXJqYOLSQ4tTByajHFqkcmgxwAUkAAlNUJTQCU0KXa2N6Poz7nO9HyHY4uHSnrZ+y+SOwJY3crf6HNqctuc/ytk5gfXdMYi6SqJNJJaoh6fLlsdbBmX6bzZtYPS85irc6tI1s6iLpKo040/avBfW+Zb0NBuLyKNPheun25/Gd7Dg727vsrley5NXDMSHQ6i1pY/U5FPOvUtOiK7WuR9fs+O7bmc6TxPv8AznB1LDoTbplY1hjhGKLJDCobyqHVW9sayvIolDMGCB5jRkqiMkijRkgYoHtDVLgGqzk0KXhiUuDUCWoIUAFYpqUlzEJImAiUxkjqMtl/uZ5srMZz73NRwWK7SVg2s6HqO7TBDHkXrLCl5rQpYU01YKd7sVRxdJzBkbCODegRUyBAASQISAZpCjXyDtyIokIglUimiSJAJJjXBMCEpEEgUElKSUgBCkAqFqLQZvT+I7zqcnn8O9k9RYI2jl9Gxaj09+V3R5e0/Ns+cd9Bz7+AIGbpyOicweGpo5ACS7nP7OzO2vs5W+iXUwNqpcOWWDJfK6N99bi1zquk5ySye25pHk6xLzx1Z+a5X1by7t7ItvBjo0Ms1dpH6jJkq7+TnwWq+jYr1W1E3+55fpPPZPJ8SeO3sguTlkckNbtaRnsQSBIQkKAhcmugISEKCaJSNcNIKFIIgiZOsAKpcAhGAIBQQBAIQgIIySCFIIEgIQlqE3tHltL0efIQPntKThAkCIen5ibqVa2Jdo0FArE6RUAexMbLa0/QWAaVJGhFmDMzUhkZBIQJKFqIDXlYtdTDnu27+rLzEvYXbsfEz9zM2ThZe2Y9XIHq2Gcqzqons5WDrYpdyMPXV00cozpatWrCGpDjvoC1HRbCJWo0alnJ7abc5PveexqEtNOqkH4rrujNv9DnSWLEtXOzrEcbHJ4j1nhcHTwS41bWKRGNTyyxypMN+Ctqd3n5M6rC3qOT67GwU50hlstjfI62uN73aE6TuvJfVONTg0N3MNW3593Elb+Ox3q/W6lbuOb7XJlwa89fp5WsRe2fpuf6nDTvYvSeYcR+Nlnl29aqLYdKcNuHLdXEzabY1IhI1IgYhKJGJ6kYnoxjnFhYjlj2VVk8YrQ5PYb1WSt2MeUJRxtsYkSGJPClgeEZocFLWvahanBWAcFgRRISQjtPN0urXlODuU6RRDU5CAFGApSJFSBESIECAOALrue7cN3DEeiIJcWwIiQIiEByU91oOh9p468/nqNb9TT5eGjV0VTFiza9eDPVF91lRK9sVhDbNMmXX5wcaiyy66bKKWWYo1W6YW02N3MTtNVHScve57tcmGg+Pm9dzxcZdDs+Psbeba7jxTvea/QZPW8zZgi1Oc0rn86q+u+RcntExuN7y1xVEFhJ0/KaezNp0tfH62RamJvVznpb+divmkhlvqfLF0L1YvWY+eyeq4XSc3wM1bU5iz0LOVy/QqWjVj7TaT51n2q2oRSqZmvdVz2/yatbxP0ry/Jvc5HZpDXxAwxPjw6GoipwioQCBC1AFEKFEARxaYJGsBji0yOdG4ixHGbFKKrjQRCgkrAFqFBBSHsvNLuR1vH66mJLm6AkgUla0Cvo1j1UqmaLlMkBRHJKRJIRJKRJKRAiRBKFIIEJBSgQIEkCklCEkImkBuyzs6Pu8y1HFZyXsf1e3bk86n9MGijjHdjG1HO6Cp6Bv1Ktc17Fnk9CS9TtYzHQno66Tm83o9GXcLR9ZoUv5c30ajXo4nv2XtObEwdHG12QJq4vRnv0b3Qy9HSkwNOKq+uOH1fXtTxr29uNzmD3PL9DOqUfVpd42dvnuZ3LDqzrBYNcsLDqxdergodB2ObhTqpfZ1fLdFm4KaktGa2250HMXmz9Bk71emje0uX9K5GjzODpsboZ9mC/RxTBzr+X3BAZItLXtHE38i7O/QHAHnfO1pOp1rBrrQJqyrUWujjbg0SiIK0whAMwiAMwiCmQMAMqiUkqiJkijAkqjMkhiLCV8M1iPQNiNY+OtiGipnNADFqbWXFgBs1kASmpCQkCdDOW1duPIHQToMaFZWJaeYzkkAkkYkkIklIgWyIJQpAKSEAUggUkhECIUkhAEI2r0/SydzgZ+vn4+vLr6HJ0LTsjOgtuurOYXsxVoq79SvSeJYgqOz26teox10bOTYvQSURRZ0dfJhup6SxyerZV6Ln1stMWTj6VJetVUp5umXUoanVw2uZ6jlqAxIcveuq5QKPf8Cv184Pn9Drea7E6LynutXm6/JXMNPVeWl1cQWC7Dj9rVm1cPpMPqY4Os4/papzs2rj1WWJIZNVLkU6r1Tyr0HC0vGei8DVkvm3m2kMqWNEz49Z7nP6aj0GJNPg++8XwaK04PW2Jr42kVSavh1MBbmuQQUoFqlBBWIChLZY2gQVJKCkSEtkYXRiOTSBJPXmtreWq9HRSRVloe+l4QRWQ1wVmgpC1FKQCgQgliSQKQIgILRIgB4IgKCMKBkSQESRkBe5xA2aJGYCK3ISESSkQKhCQBakY3o2fFU9l563Fnvr0yQNbmtuMiDBOgOdyyeJTG8OBjL2ISCCJ31xcsbTcz2VYuz5/TRmaOfrpZ0mTq872uTTrSQ8Xrus055NXUydzt8rcvcTpZaNHnta/U3nNT1qhj2+ce4+Xw5rfYua63HHG4/Zg5/s2WOD9e4Dj9TDJcusFznVheWXs6Wd1fT5XHTyVdl/e8Jr2ceXEe52mxpcmRnb8Z2uZbkmbYw5M5sMu4x1HV720dPJ0c03tbB3+KOQ8z2K3X6UKtDbXUgu06rqUViHm7mNkFbsDwpYHgFgcFYTwutno/HV7nQy4CtQcrXGHiorvOXh6+fo+OvUcTpFZbTNC+xJi1XVpjm1nf6rgK1qNBWa1rXhS1rwjNDkpaiFgDgCA4AghCIgiIhGFFEJIiBFEBFCApSSOYbkEMkVVjElS6SUiBQISRgSCkFqLdBFG70HOieySp4nuCwtc1wxzrLB0c9vfnonSmtqyG7kwXnYNxJZhx9bSScxuw2g/fcx2GOOZ5b0mX2i9HMw+q5bbRnwzQ8LrPtVLt9ens43pfT4+LudPX4C+f8x6d5L2T0U3IWLre2vcNsCjut3zbpeRWuT9H5Vl5bsOF3Old5/X9b8p4nXjcXtaxznOjPR/PdSzNr4XofEdfDU67k9wtkv6rma1gUrrGr+nZXX8evnM6CfZiqSvNrPDdCuR27N7EJ+a6vxSnbUsyzd+yvFagMpU7tPDugZMMOiFSqFp7yvuzcZD6Vlw8W31h1D+Rs77lw+RPGcl/R5VLqd9HK9Dc5Ow284nmaGEmmNJJDSSQiCwKRICcYGpwkaHBS0PCFieAWJwUtTgpaiFIKQiSUiSQiIJBSUiSJCSQgBAKQUgBCs1EKUkpECIUkgQ1wBCSh1QHd3E2Zl6+uqr962jAi38am5tmCxaNDc53Euzd5T4xc27srHCpZ3Fnz4kenw+bgp6Hi8903Uq7evLax4OAvR1OtfawrmJTpEcZ4nRlvV9jqZZPQeZ3Jy7uayplTC5nf5vT1HyQu5uqWzQk1Jo3MabXn9pf5L6/wAjn8Hgeh8H2JrXeF62u3Fx/WZecfF17NTD+Tu7xj2tqWek08/zmS1ndPV6TxEfUYMuf0eQyunT2+T6/OOWkfHqrcZLaFr69yoaWlUzeddy3JQW+90pCxuyh9R1GjQK7Wcre9Rqp3Jik34cYa69mrnpm6+nzraxrZ0AqteWLIzr+c7SOu4/XxbUKaude5NUj0x0Dk1SPMZIeWJhImIh5YYCEFhCQKQQKCSRBJSglCEkCkkIklIiHQBFSIhQEBQhJSJJKQCgWhwBCSBCQkKQUtIUZJJZ0N2zs+04GJ02DyuJu0w+fPO2T7vNbmyUy6vcNek+1tow1EvNb5VGoJBGoXqMA2O14P0Xrc/c0MvdTkeZMvZnc2VM2zU5PSjIk5+ifZztfu83e6inuc7nx1qnEwZmJvN2dPJdagyXwRSx57W2K7kNrpeQn00++8dldGvH84q9Jy/T36HV+f6Va+hWOHunnd5b4t+GntY+VkpTO5jqMbta69ya45jvKbHmXV8z1HOnLym9dXBcqx1jckj0sNsHjmrz3R6dyeuu1nnihqix1BtXmdCRQHHomEREeGNhlUSEt6mX3XRo4KG5n4tD1GKGkUKUymE2Hdw9jCvSVRrHZIGIGRRoyRRqSUwmCUwuIkURglURMlEZAemKRyaVhACEoBSQgsICUlBQlBQOLTIUFIUEIQFIkEIQlCEkCAQkQIjBJAhpbCUCCgkJ3/OUz1eeA5UXNIRhuUjYN+hv4PdwrRyrTGhX1cni6yks7gESIEKR6f5f6htw3NXOfp42Hhz0Ovrzqd6vyupXsMsVtb1qWt2OZzNN9fz/AFNXd45xq9Fseca+3Fcq6ettp4+n6VzdT8o3oqy6cR9wU3VOo597ppY8jrBVmE1VguVpddN6zn3bcl6WnLTRpNU9FMcheiPksTUJV6rK3cNvO2nRtmlll0898HAz8Lr6TL8knfoibJBYsFCWhh6DIXt521qKViQiC1AQJIMeiqb2/NylLs6tD8oZYc2gNdKrQbF2/wBTOuNt7BPMLSzeVoQ3qNooIrKwRIgIRhCRhIQBQICKRCREhSQiBCwIoRockLE5AtRSwJykCKkQcAECoQipGoiRIgEJJSAUIAQCAUrNDhCEkCEkJaMi2ZoxKiY1IpI1IYN6rW1O7izZGA2aXOdLloucieJsYngRrXqGP1LzD0rRjtCbO7fE52hdoX9OmWLlbpbufe007t7Obv5nKBzvL9yMyKCNSIyOdhcdFtcGdWT0i15nvaMevW0NfRm46v6DzVx51vYZbWc/a2kxxLMz2ifPYrpYZ7uVK9l01NU7LcuaoR2btIpdDhdBk257bOUF0/McWHp762hat9fJSber3008+5mZehBWlj4/RjEjarGJ6JCJIaHiSO9D2+qo83Z5zSrLtUc3T2WRibmyipuOyNq3ubjbztDCVht1bGFodWnQ18jH011QXcDWxPSRicjGp6AYnqRqcpASiAUREHCAFKRAgQIpCA4SBFApFCBIGFIwBJSJISJIiBJAhELACAQkgzQ4KQ5uxZMYTwVMkkDbQWrO5BGJJSFBSO67kOi6OWpBdq9OqZ1eyThB487uClBWIhI49D887m7Pv4uri+k4WXQ08pejTBHH3nSqbPRy62N0fGWZM9wPnusUkwKRYBEyBFQBEmCaMuu70vnx0ZfYrXiuouP0K1w+8+ee1Z0JRjR9PAiUXaMlC02XZ6xStyS121pqnOjT2rvHejs00eO3L3XTC0tyxdzMZupnvXn5lzKfrVaduPldSqLQotqi0iaitqGorSMrOk6mxbNKTlN2djCuTsDXBCJY5bxfydbJYBJZLQS4hm5i7m6puDu4UYpDnWFBSJJGJFAhFCNKUiRUiIMBQMCSUiSQiSQARClJEQIoQJzZAUhAijAkiSklASQISAKa4AhJAtSCkywIkJJCglDoM08fpZZVEqHlURkkURkltUHMvq+Bo8T3OTM+jYs1spbPPcq/rdnzvqrc/NRuiw7H9bx/RFOx5rpOW9Tw4aFihk6MgilzX3tjK3urzNPzjv8Azrk6Xuhdy98phLiUxIiVREiVMRjzGYJHROZZVG6xXuEtyMM7rq4HzBkfoZYC9De4+RF62HnJWrv0prlleNL0ek2Tlusu2M/MvHJzKKOkpcdj6On22bxNWvsdtBxjc23sW8cBZ2C44Q9m3jQD2a4wmdieMLTe2OJmsktfsOMJcGNyWyKNKXy15tA0crTySHhizPI6IwP28He31tw9vBDPTRzrHJihkDFI8NQjk1AuTVI5NQD56uzateloZpD01Us5NQDghIUEIS0yFBCEJCJJCEISEIsUkFCSAKCQZAtWJJQhpCkJIMEkIglDuQQ6voMOKWrk6HJqgeGmQpqk6WlnbfdxZrSxLtXDvpVztDOXM0PagsOvj3HTu+Z6HnvU8aCheqYN8LzNTbb3M7b6/JzOT2cXg9NzmHHoempg8sLBxaWDk0yOTUQ8tLLI+F1iTz0jcmtcwX6s3TW+UnuydZZ5SwuLqbPK2KsnVS8qaqOtfxlcTuYeCqNf3mfxMB3dJlZbJ0rVVjaNjmBuewtTK3IAViE0M5NULk0GPUZMeYy0v9ByGvrpyhuc/Ta8MkqZXtZ/QoGTcv8AQTkxuYXH1IsGVtDqed6LtY83mrFbmaimLK7wFIU1LHAKEpokcmkQppUmWAkWI4yQ8sIDk0iEEQEBAksMjgEsKSEIIAQQhKBEQShQLRCEgQElKBAKaQCElGCIWJBQ271GTrZtDI6DDZYkVz7kipAHohu5i3uhSRLFtrOjnbDJy5nh4e0CRAMswaGivoaUtP1PIgqzxc3cLte/dXob+bsX8zzalOzznfanqtmkkgElw0uTAJyIaSYGuKIRTnCci6JydYoTiyhJGIpQFBEOAUKBQjWvClocAzA5tTRtlZU7GvajtDgrMThC1OUjU4mMEikYnpjGpBJv0qPW9PPlXsrJBnqOPN0RX6qadOub6LvZec1NLBost4zxzLo1IszxqQwRp6kjUiWRqQCRp6BYJACwSCRjiRGkmBpeoGOTiAioGpwUhFCJFQNTkpanAQJIwFIRNIhSQUpJCNRAKBAIDgCwpRgiFgSMNuKzF06J7mVpaKsl1ynzb0kliRc0lZ0HN7atynbp9PPHYrOz2Xc/efK+WEreVrZuY/TdPKqlin2MdUFc3ZLeqaG/LvKfATHygefOd5hemDSXEMLiQ1PRDU9MGJxhCcSrSSQiS6hxNihOJADk4CcBAipAnAQBwEaioQ1wUta9qM1r2ozARU4a4AgOAYImQByIYJWgxqQAsEiksdjxO326Mildrcy+NPGdmlwEb1fLWtqb/Kb+DerS5cu5qcpAiYGJ6haiRGB6kjUiBjErVkakaS0uQjXFABwJBIJCSMCBAgIIiQRhSQgRQgSUKSEhaQCkkpCCESQhSSEQLQWooMEQIElD/8QAMhAAAQQCAgEDAgUEAwEBAQEAAQACAxEEEgUhEwYQIhQgFSMwMVAWMjNAJDRBYCUmQv/aAAgBAQABBQL+bpUqVKlS1VKlSoLVUqC1Wq41m+LkmRztVqtVqtUGWfTeIXTchgNLiwhaIRpmNajxEzFCGOEYgFJQUrgFK5O7VWhE5yZxubIvwTk0309yZX9N8kpuA5GNHis5h5HHkb6XMTlotVqtUGqkEEKWoK0CMQRiXiWi0Wq1Wq1C1Wvfp/4LNmt07k5ytN/dnahjtMb8ng+F4sObvxlKlSpUqVLCNOc0glfsoHUpGFj/AGHvX3D2P8YP1e137d+wXf2fso5HRuZPBlty8WSFdrtWVGHOdHjx47WcvNG6KpE+OLJM+PJE/DgLjHh/GaMtFkJzypZqUkxT5HFYWDmZiwvTDdW8PxMTYIsaGNz3BfVsC8hKc56BeWukmD3OBxsiDCKn4XAlWR6aOuRxGXEX4s7EQR7dqygSg4rYqyrK7Xa7Xa7Xa7XaF3hNMeNkONSG3H2Yo1AU94aQ8vll/txh5AeirPtfvC4slyCHexUZo57b/mh9/S6XXv17Wul0rCBCsKwrCsKwrCsBYuaWCbFila7o4uK6YvyMbEbJMZHbNTJdHQ5MGSJdmMxIMWISmKFZM8Ti+aMKXIjuWZpXHYE/IPxfT+Bjoyw40Em4Xmj3/MZJLkY4TpeQenAhZEmE5seThBDMxzLPJF+FnIidFFklpizXRukyw131ETllcZgZceTwLmqbAnjT | 20,478 |
/L1_RNN.ipynb | 937843dc7ff61340d8a9fe51b4b10ad1a36553b7 | [
"MIT"
] | permissive | CISC-372/Notebook | https://github.com/CISC-372/Notebook | 4 | 13 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,213 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="NizlsIoFLzP5" colab_type="text"
# ## Setup
# + id="5LciWxRrHCm4" colab_type="code" colab={}
import tensorflow as tf
from tensorflow import keras
import numpy as np
print('Tensorflow version is', tf.__version__)
# + [markdown] id="FPdKoy1yL52r" colab_type="text"
# ## Load Dataset
# + id="BmXRq9_ZHQwf" colab_type="code" colab={}
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
print('Training data', len(train_data))
print('Testing data', len(test_data))
# + [markdown] id="7SD10H3AL9Id" colab_type="text"
# ## Data Exploration
# + id="8KiAi4qJHWvP" colab_type="code" colab={}
print('The first training sample:', train_data[0])
print('The first training sample\'s label:', train_labels[0])
# + id="eIMxoPXONiAJ" colab_type="code" colab={}
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
# + [markdown] id="rQCCePDuMGO_" colab_type="text"
# ## Padding
# + id="NpBJOZHvHneg" colab_type="code" colab={}
train_data_pd = keras.preprocessing.sequence.pad_sequences(
train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data_pd = keras.preprocessing.sequence.pad_sequences(
test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
print('Shape of padded training set', train_data_pd.shape)
print('Shape of padded testing set', test_data_pd.shape)
print(train_data_pd[0])
print(decode_review(train_data_pd[0]))
# + [markdown] id="YQ2pJaCzOq9s" colab_type="text"
# ## Experimental Protocol
# + id="jtJLJ9ysIG0j" colab_type="code" colab={}
x_validation = train_data_pd[:10000]
x_train = train_data_pd[10000:]
x_test = test_data_pd
y_validation = train_labels[:10000]
y_train = train_labels[10000:]
y_test = test_labels
# + [markdown] id="Mk9mMx-XMoSv" colab_type="text"
# ## Training
# + id="OrdCPYL_IwHp" colab_type="code" colab={}
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 25))
model.add(keras.layers.CuDNNGRU(100))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
epochs=15,
batch_size=512,
validation_data=(x_validation, y_validation),
verbose=1)
results = model.evaluate(x_test, y_test)
print(results)
| 3,144 |
/MissionToMars.ipynb | cf78a4a55e96be346cd0c3e43e4dbcbf0fc7802d | [] | no_license | ivyfong/Mission-to-Mars | https://github.com/ivyfong/Mission-to-Mars | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 20,866 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import time
# Save urls
news_url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
image_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
weather_url = 'https://twitter.com/marswxreport?lang=en'
facts_url = 'https://space-facts.com/mars/'
hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# ## NASA Mars News
# #### Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables called news_title and news_p that you can reference later.
# +
# Open chrome browser
executable_path = {'executable_path':'chromedriver.exe'}
browser = Browser('chrome',**executable_path,headless=False, incognito=True)
# Visit specified url
browser.visit(news_url)
# Save html from browser in object
html = browser.html
# Pass HTML string to bs
news_html = bs(html,'html.parser')
# Close chrome browser
browser.quit()
# -
# Collect information for the latest news article
latest_article = news_html.find('li',class_="slide")
latest_article
# Collect the lastest news title
news_title = latest_article.find('div',class_="content_title").text
news_title
# Collect the lastest news paragraph text
news_p = latest_article.find('div',class_="article_teaser_body").text
news_p
# ## JPL Mars Space Images - Featured Image
# #### Visit the url for JPL Featured Space Image. Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called featured_image_url. Make sure to find the image url to the full size .jpg image. Make sure to save a complete url string for this image.
# +
# Open chrome browser
executable_path = {'executable_path':'chromedriver.exe'}
browser = Browser('chrome',**executable_path,headless=False, incognito=True)
# Visit specified url
browser.visit(image_url)
# Save html from browser in object
html = browser.html
# Pass HTML string to bs
image_html = bs(html,'html.parser')
# Close chrome browser
browser.quit()
# -
# Collect the featured image href
featured_image_href = image_html.find('a',id="full_image")['data-fancybox-href']
featured_image_href
# Save the complete featured image url
featured_image_url = f"https://www.jpl.nasa.gov{featured_image_href}"
featured_image_url
# ## Mars Weather
# #### Visit the Mars Weather twitter account here and scrape the latest Mars weather tweet from the page. Save the tweet text for the weather report as a variable called mars_weather.
# +
# Open chrome browser
executable_path = {'executable_path':'chromedriver.exe'}
browser = Browser('chrome',**executable_path,headless=False, incognito=True)
# Visit specified url
browser.visit(weather_url)
# Save html from browser in object
html = browser.html
# Pass HTML string to bs
weather_html = bs(html,'html.parser')
# Close chrome browser
browser.quit()
# -
# Collect the latest Mars weather tweet
mars_weather = weather_html.find('p',class_="tweet-text").contents[0]
mars_weather
# ## Mars Facts
# #### Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc. Use Pandas to convert the data to a HTML table string.
# +
# Scrape the Mars facts table and save as a df
mars_facts_df = pd.read_html(facts_url)[0]
# Specify column names
mars_facts_df.columns =['Description','Value']
# Print df
mars_facts_df
# -
# Convert and save Pandas df to HTML table
mars_facts = mars_facts_df.to_html(index=False,justify='left',classes='table table-striped table-bordered')
mars_facts
# ## Mars Hemispheres
# #### Visit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres. You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image. Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title. Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.
# +
# Create list of hemisphere names
hemispheres_list = ['Cerberus Hemisphere Enhanced',
'Schiaparelli Hemisphere Enhanced',
'Syrtis Major Hemisphere Enhanced',
'Valles Marineris Hemisphere Enhanced']
# Create empty list for hemisphere names and urls
hemispheres_name_url = []
# +
# Open chrome browser
executable_path = {'executable_path':'chromedriver.exe'}
browser = Browser('chrome',**executable_path,headless=False, incognito=True)
# Visit specified url
browser.visit(hemispheres_url)
# Loop to save the hemisphere image urls
for hemisphere in hemispheres_list:
# Navigate to hemisphere image
browser.click_link_by_partial_text(hemisphere)
# Save html from browser in object
html = browser.html
# Pass HTML string to bs
hemisphere_html = bs(html,'html.parser')
# Collect and save hemisphere name
hemisphere_name = hemisphere_html.find('h2',class_="title").text
# Collect and save image url
hemisphere_image_src = hemisphere_html.find('img',class_="wide-image")['src']
hemisphere_image_url = f'https://astrogeology.usgs.gov{hemisphere_image_src}'
# Save url and name in dictionary
hemisphere_dict = {"title":hemisphere_name, "img_url":hemisphere_image_url}
# Add dictionary to list created above
hemispheres_name_url.append(dict(hemisphere_dict))
# Move back through browsing history to return to main page
browser.back()
# Close chrome browser
browser.quit()
# Print list
hemispheres_name_url
# -
# ## Save output in a dictionary
scrape_data = {"news_title":news_title,
"news_p":news_p,
"featured_image_url":featured_image_url,
"mars_weather":mars_weather,
"mars_facts":mars_facts,
"hemispheres_name_url":hemispheres_name_url}
scrape_data
| 6,529 |
/Pandas_and_CSV/Andres-Project_2_Work.ipynb | 86e30847897d5cf94cfb59b50de6744277989950 | [] | no_license | MichFig/Data-Science-Job-Search | https://github.com/MichFig/Data-Science-Job-Search | 0 | 0 | null | 2021-03-16T12:41:39 | 2021-03-16T12:37:49 | null | Jupyter Notebook | false | false | .py | 71,027 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Modules
import pandas as pd
csv_file = "cleaned_data_2.0.csv"
df = pd.read_csv(csv_file)
df.head()
# +
df["relevent_experience_bool"] = df["relevent_experience"] == "Has relevent experience"
df.head()
# +
##Introduce "Training Hours" bins
TH_bins = [0, 19, 39, 59, 79, 10000]
TH_group_names = ["<20","20-40","41-60","61-80",">80"]
##Introduce "Years of Experience" bins
YE_bins = [0, 0.99, 4, 9, 19, 10000]
YE_group_names = ["<1","1-5","6-10","11-20",">20"]
#Categorize the existing players using bins
df["Training Hours"] = pd.cut(df["training_hours"], TH_bins, labels=TH_group_names,include_lowest=True)
df["Years of Experience"] = pd.cut(df["experience"], YE_bins, labels=YE_group_names,include_lowest=True)
df.head(100)
# +
major_multiplier = {
"STEM":4,
"Business":2,
"Other":1,
}
RE_Y_training_hours_score = {
"<20":0,
"20-40":5,
"41-60":10,
"61-80":15,
">80":20
}
RE_N_training_hours_score = {
"<20":0,
"20-40":20,
"41-60":40,
"61-80":60,
">80":80
}
RE_Y_years_exp_score = {
"<1":2,
"1-5":4,
"6-10":8,
"11-20":16,
">20":20
}
RE_N_years_exp_score = {
"<1":0.5,
"1-5":1,
"6-10":2,
"11-20":4,
">20":5
}
Relevant_Experience = 0
df["Relevant Experience Score"] = Relevant_Experience
df['major_multiplier']= df['major_discipline'].map(major_multiplier)
df['RE_Y_training_hours_score']= df['Training Hours'].map(RE_Y_training_hours_score)
df['RE_N_training_hours_score']= df['Training Hours'].map(RE_N_training_hours_score)
df['RE_Y_years_exp_score']= df["Years of Experience"].map(RE_Y_years_exp_score)
df['RE_N_years_exp_score']= df["Years of Experience"].map(RE_N_years_exp_score)
df['RE_Y_training_hours_score']= df['RE_Y_training_hours_score'].astype("float")
df['RE_N_training_hours_score']= df['RE_N_training_hours_score'].astype("float")
df['RE_Y_years_exp_score']= df['RE_Y_years_exp_score'].astype("float")
df['RE_N_years_exp_score']= df['RE_N_years_exp_score'].astype("float")
#df['Relevant Experience']= df['Relevant Experience'].astype("float")
df.head(100)
#"Relevant Experience" formula calculation SCORE ELEMENTS
# +
#df.loc[df["relevent_experience_bool"] == "True", "Relevant Experience Score"] = (df["major_multiplier"] * df["RE_Y_years_exp"] + df["RE_Y_training_hours"])
#df.loc[df["relevent_experience_bool"] == "False", "Relevant Experience Score"] = (df["major_multiplier"] * df["RE_N_years_exp"] + df["RE_N_training_hours"])
#df["Relevant Experience Score"] = df["relevent_experience_bool"].apply(lambda x: (df["major_multiplier"] * df["RE_Y_years_exp"] + df["RE_Y_training_hours"]) if x=="True" else (df["major_multiplier"] * df["RE_N_years_exp"] + df["RE_N_training_hours"], axis=1))
####TEST
# df = df.assign(Relevant Experience Score=lambda x: (x["major_multiplier"] * x["RE_Y_years_exp"]) + (x["RE_Y_training_hours"]))
# def RE_Score(relevent_experience_bool,RE_Y_training_hours,RE_N_training_hours,RE_Y_years_exp,RE_N_years_exp):
# if 'True' in relevent_experience_bool:
# return (df["major_multiplier"] * df["RE_Y_years_exp"] + df["RE_Y_training_hours"])
# else 'False' in relevent_experience_bool:
# return (df["major_multiplier"] * df["RE_N_years_exp"] + df["RE_N_training_hours"])
# df["Relevant Experience Score"] = df.apply(lambda x: RE_Score(x["major_multiplier"], x["RE_Y_years_exp"], x["RE_Y_training_hours"], x["RE_N_years_exp"], x["RE_N_training_hours"], axis=1)
###TEST
# df["Relevant Experience Score"] = df.relevent_experience_bool.apply(
# lambda x: ((df["major_multiplier"] * df["RE_Y_years_exp"] + df["RE_Y_training_hours"]) if x == 'True' else (df["major_multiplier"] * df["RE_N_years_exp"] + df["RE_N_training_hours"])))
Rel_Exp_Score_Y = (df["major_multiplier"] * df["RE_Y_years_exp_score"]) + df["RE_Y_training_hours_score"]
Rel_Exp_Score_N = (df["major_multiplier"] * df["RE_N_years_exp_score"]) + df["RE_N_training_hours_score"]
df["Rel_Exp_Score_Y"] = Rel_Exp_Score_Y
df["Rel_Exp_Score_N"] = Rel_Exp_Score_N
df["Relevant Experience Score"] = Relevant_Experience
df.loc[df["relevent_experience_bool"] == True, "Relevant Experience Score"] = df["Rel_Exp_Score_Y"]
df.loc[df["relevent_experience_bool"] != True, "Relevant Experience Score"] = df["Rel_Exp_Score_N"]
df.head(100)
# -
clean_df = df[["id",
"enrollee_id",
"gender",
"relevent_experience",
"education_level",
"major_discipline",
"experience",
"Training Hours",
"Years of Experience",
"Relevant Experience Score"
]]
clean_df.head()
# Push the remade DataFrame to a new CSV file
clean_df.to_csv("DataScienceScores.csv",
encoding="utf-8", index=False, header=True)
| 5,092 |
/Clustering.ipynb | bd78dd1ad0ff7d5f773f79ca5fdef99394da18fe | [] | no_license | Tkpro/CheungGarrett | https://github.com/Tkpro/CheungGarrett | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 69,161 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment - Linear Regression
# +
# import required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
sns.set_style('whitegrid')
import warnings
warnings.filterwarnings('ignore')
# -
# ## Contents
# ***
# <a href='#I'>**I. Data Understanding**</a>
#
# <a href='#II'>**II. Data Preparation and EDA**</a>
#
# <a href='#III'>**III. Spliting dataset and Scaling**</a>
#
# <a href='#IV'>**IV. Model Building**</a>
#
# <a href='#V'>**V. Residual Analysis**</a>
#
# <a href='#VI'>**VI. Making Predictions and Model Evaluation**</a>
#
# <a href='#VII'>**VII. Interpretation**</a>
# *****
# ### I. Data Understanding<a id='I'></a>
# +
# imoport data into a dataframe
data = pd.read_csv('CarPrice_Assignment.csv')
# lets have a priliminary look at the dataset
data.head()
# -
data.info(verbose = True, null_counts = True)
# There are 16 numerical and 10 non-numerical variables in the raw form of this dataset. Also, as we can notice, each variable consists of 205 entries which is the total number of entries. This suggests that there are no null values in the dataset.
#lets confirm the number of null values in the dataset
print('Number of null values in the dataset = ' + str(data.isnull().sum().sum()))
data.describe()
# Values of variables have high variance accross the dataset, hence, it will require Scaling.
# Lets have a look at the number of unique values that each column has.
data.nunique().sort_values(ascending=False)
# 1. There are 205 unique car_IDs. This suggests that there are no duplicate entries in our dataset.
# 2. Some of the car names are repetative. This suggests that same cars with multiple generation may exists in the dataset.
# ### II. Data Preparation and EDA<a id='II'></a>
# Lets understand each variable individually and prepare the dataset. First we'll take care of all the categorical variables.
# ### 1. car_ID
# As there are no duplicates in the dataset, we can drop car_ID.
data = data.drop(['car_ID'], axis=1)
data.head()
# ***Create function to plot countplot***
def make_countplot(dataframe, variable):
fig, ax1 = plt.subplots(1,1,figsize = (6,6))
sns.countplot(x=variable, data = dataframe, ax =ax1, order=dataframe[variable].value_counts().index)
for nr, p in enumerate(ax1.patches):
ax1.text(p.get_x() + p.get_width()*0.5, p.get_y() + p.get_height(), str(p.get_height()), fontsize=10,\
color='black', ha='center', va='bottom')
plt.xticks(rotation=60)
plt.show()
# ### 2. symboling
# lets first have a look at the unique values of this variable
make_countplot(data, 'symboling')
# Due to the range of the variable (starting -ve and ending +ve), it will be difficult to interpret the result. To make it all +ve, we'll shift variable by 3, i.e. add 3.
data['symboling'] = data['symboling'] + 3
data['symboling'].unique()
# By doing this we have made a constant change in the variable which doesnot effect the regression but imrpoves interpretability.
# ***Before proceeding lets make a function to create dummy variables, concatinating with main dataset, and dropping original variables***
# +
# dummy variable with least count amongst all class will be dropped
def make_dummies(dataframe, variable):
dummy_to_drop = data[variable].value_counts().index[-1]
dummy_data = pd.get_dummies(dataframe[variable], prefix=variable)
dataframe = pd.concat([data, dummy_data], axis = 1)
dataframe = dataframe.drop([variable], axis = 1)
dataframe = dataframe.drop([variable + '_' + dummy_to_drop], axis = 1)
return dataframe
# -
# ### 3. CarName
# lets first have a look at the values of this variable
data['CarName'].head()
# The variable consists of 2 parts, car's company and car's model. We will consider only car's company for regression.
# lets create a new variable CarCompany in the dataset
data['CarCompany'] = data['CarName'].str.split(' ', n=1, expand=True)[0]
data['CarCompany'].head()
# lets have a look at the unique values of this variable
data['CarCompany'].unique()
# As we can notice, there are multiple errors in the name of car company.
# +
# lets correct the names by replacing the errors with standard names
carcompany_correction_map = {'alfa-romero': 'alfa-romeo', 'maxda': 'mazda', 'Nissan': 'nissan',\
'porcshce': 'porsche','toyouta': 'toyota', 'vokswagen': 'volkswagen',\
'vw': 'volkswagen'}
data = data.replace({'CarCompany': carcompany_correction_map})
# lets have a look at the unique values of this variable
data['CarCompany'].unique()
# -
# lets have a look at the count of each company the data
carcompany_value_count = data['CarCompany'].value_counts()
make_countplot(data, 'CarCompany')
# Even after correcting car company name, there are still 20+ companies. These if converted directly to dummy variables, will create large number of columns. Lets combine these companies based on the median car price that the company is offering.
carCompany_price = data.groupby(by='CarCompany')['price'].median().sort_values(ascending=False)
carCompany_price
# dividing and creating intervals using pd.cut
cut = pd.cut(carCompany_price, 3)
cut
# getting the intervals
carCompany_price_bins = cut.dtypes.categories
carCompany_price_bins
# Now, lets use the derived intervals and apply them to price column to create a new column called `CarClass` that will represent if the car is from a high range company(S), medium range company(A) or a low range company(B).
bins = [0, carCompany_price_bins[0].right, carCompany_price_bins[1].right, np.inf]
data['CarClass'] = pd.cut(data['price'], bins=bins, labels=['B','A','S'])
# Lets create a countplot for our new CarClass column
# lets have a look at the counts again
make_countplot(data, 'CarClass')
# +
# drop CarName and CarCompany as now they are redundant variables
data = data.drop(['CarName','CarCompany'], axis=1)
# make dummy variables out of CarClass column
data = make_dummies(data, 'CarClass')
# lets have a look at the data again
data.head()
# -
# ### 4. fueltype
# lets have a look at the unique values of this variable
make_countplot(data, 'fueltype')
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'fueltype')
# lets have a look at the data again
data.head()
# -
# ### 5. aspiration
# lets have a look at the unique values of this variable
make_countplot(data, 'aspiration')
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'aspiration')
# lets have a look at the data again
data.head()
# -
# ### 6. doornumber
# lets have a look at the unique values of this variable
make_countplot(data, 'doornumber')
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'doornumber')
# lets have a look at the data again
data.head()
# -
# ### 7. carbody
# lets have a look at the count of each company the data
carbody_value_count = data['carbody'].value_counts()
make_countplot(data, 'carbody')
# Considering a category significant only if has atleast 5% count in the dataset. `5% of 205 = 10`
# +
# combining categories with less than threshold value
data['carbody'] = pd.Series(np.where(data['carbody'].isin(carbody_value_count.index[carbody_value_count <= 10]),\
'other', data['carbody']))
# lets have a look at the counts again
make_countplot(data, 'carbody')
# -
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'carbody')
# lets have a look at the data again
data.head()
# -
# ### 8. drivewheel
# lets have a look at the unique values of this variable
make_countplot(data, 'drivewheel')
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'drivewheel')
# lets have a look at the data again
data.head()
# -
# ### 9. enginelocation
# lets have a look at the unique values of this variable
make_countplot(data, 'enginelocation')
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'enginelocation')
# lets have a look at the data again
data.head()
# -
# ### 10. enginetype
# lets have a look at the unique values of this variable
enginetype_value_count = data['enginetype'].value_counts()
make_countplot(data, 'enginetype')
# +
# combining categories with less than threshold value
data['enginetype'] = pd.Series(np.where(data['enginetype'].isin(enginetype_value_count.index[enginetype_value_count <= 10]),\
'other', data['enginetype']))
# lets have a look at the counts again
make_countplot(data, 'enginetype')
# -
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'enginetype')
# lets have a look at the data again
data.head()
# -
# ### 11. cylindernumber
# lets have a look at the unique values of this variable
make_countplot(data, 'cylindernumber')
# cylindernumber is an ordinal categorical variable and represent numbers hence can be converted to numerical values. But this operation should not change the nature and order in this variable.
# +
cylindernumber_map = {'two': 0, 'three': 1, 'four': 2, 'five': 3, 'six': 4, 'eight': 5, 'twelve': 6}
data = data.replace({'cylindernumber': cylindernumber_map})
# lets have a look at count plot again
make_countplot(data, 'cylindernumber')
# -
data['cylindernumber'].describe()
# ### 12. fuelsystem
# lets have a look at the unique values of this variable
fuelsystem_value_count = data['fuelsystem'].value_counts()
make_countplot(data, 'fuelsystem')
# +
# combining categories with less than threshold value
data['fuelsystem'] = pd.Series(np.where(data['fuelsystem'].isin(fuelsystem_value_count.index[fuelsystem_value_count <= 10]),\
'other', data['fuelsystem']))
# lets have a look at the counts again
make_countplot(data, 'fuelsystem')
# -
# Replace this variable with its dummy values.
# +
data = make_dummies(data, 'fuelsystem')
# lets have a look at the data again
data.head()
# -
# Lets have a look at the data info after treating all the categorical variables
data.info()
# Now lets have a look at numeric variables
# +
# ploting distribution plot for each variable
numerical_data = data.select_dtypes(include=['int64', 'float64'])
for col in numerical_data.columns:
sns.distplot(numerical_data[col])
plt.show()
# -
# All the numeric variables are almost normally distributed. Also none of them have outliers which needs to be treated.
# ### III. Spliting dataset and Scaling<a id='III'></a>
# Spliting into training and testing
# +
# import required libraries
from sklearn.model_selection import train_test_split
# spliting data into train and test set with train size as 75% of original data
np.random.seed(0)
data_train, data_test = train_test_split(data, train_size = 0.75, random_state = 100)
print('Shape of training data: ' + str(data_train.shape))
print('Shape of testing data: ' + str(data_test.shape))
# -
# Rescaling numeric features. We'll use MinMaxScaler.
# import required libraries and create a scaler object
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
# +
# Apply scaler() to all the columns except the and 'dummy' variables
num_vars = data.select_dtypes(include=['int64', 'float64']).columns
data_train[num_vars] = scaler.fit_transform(data_train[num_vars])
# lets have a look at the data
data_train.head()
# -
# lets have a look at data description after scaling
data_train.describe()
# ### IV. Model Building<a id='IV'></a>
# To start with, lets have a look at correlation of price with other variables.
price_corr = pd.DataFrame(data.corr()['price'].sort_values(ascending=False))
plt.figure(figsize = (5, 20))
sns.heatmap(price_corr, cmap='YlGnBu', annot=True)
plt.show()
# Many variables such as eniginesize, curbweight, horsepower, carwidth etc. are highly correlated with price. Lets have a look at the visual representation of the same.
# +
# take all independent variables in decreasing order of collinearity
independents = price_corr.index[price_corr.index != 'price']
# plot independent variables against target
for i in range(independents.size//5):
sns.pairplot(data_train, y_vars='price', x_vars=independents[i*5:i*5+5], kind='reg')
plt.show()
# -
# Again it is evident from these graphs that there is a clear linear relationship between price and atleast the top 10 independent variables.
# Lets also explore the correlation between each variable and check if the there are any clusters present based on that correlation.
sns.clustermap(data.corr(), cmap='YlGnBu', figsize=(13, 13))
# There are clearly multiple clusters present in the correaltion matrix. Major ones are as follows -
# 1. Cluster 1
# - CarClass_A
# - boreratio
# - drivewheel_rwd
# - wheelbase
# - carlength
# - enginesize
# - price
# - carwidth
# - curbweight
# - cylindernumber
# - horsepower
# 2. Cluster 2
# - enginetype_ohc
# - fuelsystem_2bbl
# - citympg
# - highwaympg
# - CarClass_B
# - drivewheel_fwd
#
# Both clusters have high positive intra-correlation and high negative inter-correlation. It can be understood from price perspective. Cluster 1 has positive correlation with price and Cluster 2 has negative.
#
# Also few variables have high correlation such as compressionratio and fuelsystem_idi, citympg and highwaympg, etc.
# These correlation are expected as per the domain knowledge.
# ***Recurrsive Feature Elimination (RFE)***
# We require RFE in this case because number of features is high and RFE provides an automatic removal of features based on significance and Variance Inflation Factor (VIF). We will be using the **LinearRegression function from SciKit Learn** for its compatibility with RFE (which is a utility from sklearn).
# Importing RFE and LinearRegression
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
# seperating independent variaboles(X) and target variable(y)
y_train = data_train.pop('price')
X_train = data_train
# +
# Running RFE with the output number of the variable equal to 20
lmRFE = LinearRegression()
lmRFE.fit(X_train, y_train)
# running RFE
rfe = RFE(lmRFE, 20)
rfe = rfe.fit(X_train, y_train)
# -
rfe_support = pd.DataFrame({'Variable': X_train.columns,'RFE_Support': rfe.support_,'RFE_ranking': rfe.ranking_})
rfe_support = rfe_support.sort_values('RFE_ranking').reset_index(drop=True)
rfe_support
# Lets have a look at the variables that were eliminated by RFE.
# variables that are not supported by RFE
print('Non supported variables: ' + str(list(X_train.columns[~rfe.support_])))
# RFE eliminated variables that were not significant for our regression model. Some such as symbolying, fueltype_gas, carheight etc. are not significantly correlated with price(as evident from the graph earlier), others such as horespower, fuelsystem_idi etc. are just redundant due to multicolinearity.
# Now we have the top 20 variables supported by RFE. Lets do manual feature elimination based on significance and VIF.
col = X_train.columns[rfe.support_]
# ### Building model using statsmodel, for the detailed statistics
# ***Model 1***
# +
# Creating X_train dataframe with RFE selected variables
X_train_rfe = X_train[col]
# Adding a constant variable
import statsmodels.api as sm
X_train_rfe = sm.add_constant(X_train_rfe)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# `R-squared` of first model came out to be `0.961`. But many of the variables are not significant also there may be multicollinearity amongst variables. We'll eliminate variables untill we get a stable model. Initially we'll consider p-value for feature elimination and later we'll consider VIF too.
# Variable with highest p-value is `wheelbase`. As we can notice fro the pairplot made earlier, wheelbase doesnot have a strong linear relation with price, it is rather random. Lets remove that variable and run the model again.
# ***Model 2***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop('wheelbase', axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# R-squared didn't change after dropping wheelbase. Also significance of other variables have also increased. Next variables with highest p-values are `compressionratio` and like wheelbase, its relation with price is weak. Lets drop it and observe the difference.
# ***Model 3***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['compressionratio'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# R-squared didn't change after dropping compressionratio. Next variables with highest p-values are `peakrpm` and like compressionratio, its relation with price is weak. Lets drop it and observe the difference.
# ***Model 4***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['peakrpm'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# Next variable with highest p-value is `fuelsystem_other`. Lets drop it and observe the difference.
# ***Model 5***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['fuelsystem_other'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# After this step, before moving forward, lets also check the VIF for each variable.
# +
# Calculate the VIFs for the new model
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
X = X_train_rfe
vif['Features'] = X.columns
vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False).reset_index(drop=True)[1:]
vif
# -
# Again the R-squared value of the model is not affected much. Also, citympg and highwaympg are insignificant because high performance costly cars have low milage and price decreases with increase in milage till certain point but then it levels off and costly family cars are also designed to have high milage. citympg and highwaympg may have higher order relationship. Also, it is evident from their VIF that they are causing multicolinearity. Lets drop them and re run the model.
# ***Model 6***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['citympg','highwaympg'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# +
# Calculate the VIFs for the new model
vif = pd.DataFrame()
# leaving const out of this calculation
X = X_train_rfe
vif['Features'] = X.columns
vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False).reset_index(drop=True)[1:]
vif
# -
# `carlegth` has both high p-value and VIF. Lets drop it and run the model again.
# ***Model 7***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['carlength'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# +
# Calculate the VIFs for the new model
vif = pd.DataFrame()
# leaving const out of this calculation
X = X_train_rfe
vif['Features'] = X.columns
vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False).reset_index(drop=True)[1:]
vif
# -
# `stroke` is at the border as far as the p-value is concerned and its VIF is also in acceptable range but as per the pairplot made earlier, the relation between price and stroke is not significant. Lets drop it and re run the model.
# ***Model 7***
# +
# Drop the variable with highest p-value
X_train_rfe = X_train_rfe.drop(['stroke'], axis=1)
# Running the linear model
lm = sm.OLS(y_train,X_train_rfe).fit()
#Let's see the summary of our linear model
print(lm.summary())
# -
# Calculate the VIFs for the new model
vif = pd.DataFrame()
X = X_train_rfe
vif['Features'] = X.columns
vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False).reset_index(drop=True)[1:]
vif
# Now as we can notice, both p-values and VIF of independent variables are in acceptable ranges. Also, statistics such as R-squared, Adjusted R-squared, F-statistic, AIC and BIC are in acceptable ranges. Lets move on to residual analysis to understand our model better.
# ### V. Residual Analysis<a id='V'></a>
# So, now to check if the error terms are also normally distributed (which is infact, one of the major assumptions of linear regression), let us plot the histogram of the error terms and see what it looks like.
# predicting target variable based on our latest model(lm) and our latest independent variables(X_train_rfe)
y_train_pred = lm.predict(X_train_rfe)
# Plot the histogram of the error terms
fig = plt.figure()
res = (y_train - y_train_pred)
sns.distplot(res, bins = 20) # Plot heading
plt.xlabel('Error') # X-label
# We can observe that the error terms are ***normally distributed*** with a slight outlier near the right tail. It is also ***centered around zero***. This means that we can safely derive interpretations from our model.
# Lets also plot the relation between the predicted variable and residuals.
plt.figure()
sns.regplot(x=y_train_pred,y=res)
plt.xlabel('Predicted value', fontsize=16)
plt.ylabel('Residuals', fontsize=16)
plt.title('Predicted value vs Residual Plot', fontsize=20)
plt.show()
# We can observe the following from the above plot -
# 1. Mean of Residuals is zero
# 2. Scatter of residuals is completely random and also Regression line coincides with y = 0 and hence there is no relation/pattern in the residuals i.e. most of it is explained by our model. ***Error terms are independent of each other.***
# 3. Spread of residuals is mostly contained between 0.10 and -0.10 with a couple of exceptions. This suggests that the ***variance of error terms is constant i.e. homoscedastic***
# ### VI. Making Predictions and Model Evaluation<a id='VI'></a>
# Scale the test data using the scaler fitted on train data
# applying only transform on test dataset
data_test[num_vars] = scaler.transform(data_test[num_vars])
# Dividing test data into X_test and y_test
y_test = data_test.pop('price')
X_test = data_test
# Add constants and keep only those variables in test data that were chosen during the model building step
# +
# add constant
X_test = sm.add_constant(X_test)
# filtering columns
X_test = X_test[X_train_rfe.columns]
# -
# Make prediction using the above built X_test
y_test_pred = lm.predict(X_test)
# Lets evaluate the model's accuracy by ploting the actual y_test and the predicted y_test_pred
# +
# Plotting y_test and y_pred to understand the spread.
fig = plt.figure()
ax = sns.regplot(y_test,y_test_pred)
# baseline of x = y
x = np.arange(0,1.1,0.01)
y = x
plt.plot(x,y,'r-')
plt.title('y_test vs y_test_pred', fontsize=20) # Plot heading
plt.xlabel('y_test', fontsize=16) # X-label
plt.ylabel('y_test_pred', fontsize=16) # Y-label
plt.show()
# -
# As we can observe, the regression line for y_test_pred vs y_test is very close to our base line of x = y. Also, all the data points are very much close to the baseline and hence the difference between y_test_pred and y_test is very small.
# ### Model metrics on test data
# +
# import required libraries
from sklearn.metrics import r2_score, explained_variance_score, max_error, mean_absolute_error, \
mean_squared_error, mean_squared_log_error, median_absolute_error
metrics = [explained_variance_score(y_test, y_test_pred), max_error(y_test, y_test_pred), \
mean_absolute_error(y_test, y_test_pred), mean_squared_error(y_test, y_test_pred), \
mean_squared_log_error(y_test, y_test_pred), median_absolute_error(y_test, y_test_pred), \
r2_score(y_test, y_test_pred)]
index = ['Explained Variance Score', 'Max Error', 'Mean Absolute Error', 'Mean Squared Error', \
'Mean Squared Log Error', 'Median Absolute Error', 'r2 Score']
metricsdf = pd.DataFrame({'Metrics': metrics}, index=index)
metricsdf
# -
# ### VII. Interpretation<a id='VII'></a>
# With r2 score of 0.95 on train data and 0.91 on test data, our model is:
#
# <hr>
# $ price = 0.6266 + 0.3583 \times curbweight + 0.1631 \times enginesize - 0.4078 \times CarClass\_B - 0.2483 \times CarClass\_A - 0.0356 \times carbody\_wagon - 0.1767 \times enginelocation\_front - 0.1144 \times enginetype\_dohc - 0.1404 \times enginetype\_l - 0.0764 \times enginetype\_ohc - 0.0931 \times enginetype\_ohcf
# - 0.1322 \times enginetype\_ohcv + 0.0455 \times fuelsystem\_mpfi $
# <hr>
# After creating dummy variables, coefficients of variables derived from the same column can be interpreted relative to each other and considering the eliminated dummy variable as the base.
# Following is the interpretation of model:
# 1. `curbweight` has a positive coefficient and with increase in curbweight, price increases. curbweight is defined as the weight of a car without occupants or baggage. curbweight can also indirectly be influenced by:
# - wheelbase(distance between the centers of the front and rear wheels) which has high correlation with curbweight.
# - carlength which has high correlation with curbweight.
# - carwidth which has high correlation with curbweight.
# - carbody
# <hr>
# 2. `enginesize` has a positive coefficient and with increase in enginesize, price increases. enginesize can also indirectly be influenced by:
# - boreratio(the ratio between cylinder bore diameter and piston stroke) which has high correlation with enginesize.
# - stroke(the length that piston travels when moving from bottom position to the top position).
# - compressionratio(the ratio of the maximum to minimum volume in the cylinder).
# - horsepower which has high correlation with enginesize.
# - cylindernumber which has high positive correlation with enginesize.
# <hr>
# 3. `CarClass_B` has a negative coefficient. It means that it attracts lower price as compared to cars of class S which was dropped. This inference is aligned with the fact that cars of class S are from companies like Jaguar, Buick and Porche which produces costly cars where as cars of class B are from companies like Honda, Nissan and Toyota which produces daily use family cars.
# <hr>
# 4. `CarClass_A` has a negative coefficient but its absolute value is lower than that of CarClass_B. It means cars of class A(BMW, Audi, Peugeot) are costlier than cars of class B but still attracts lower price than cars of class S.
# <hr>
# 5. `carbody_wagon` has a negative coefficient. It means that it attracts lower price as compared to cars with carbody type as hardtop and convertible. This is aligned with the fact that high end sports/luxery cars are hardtop or convertible. Also, carbody_wagon is a significant indicator that is it not prefered in the market. Whereas carbody types such as sedan and hatchback doesnot influence price.
# <hr>
# 6. `enginelocation_front` has a negative coefficient. It means that it attracts lower price as compared to cars with enginelocation rear. Although number of cars with engine location rear are very less in the dataset, it plays a huge role and is aligned with the fact that high perfomace cars have their engine at the rear end to provide higher stability. These cars are also costlier than usual.
# <hr>
# 7. `enginetype` influence the price in the following order
# - (rotor, dohcv) > l > ohcv > dohc > ohcf > ohc
# <hr>
# 8. `fuelsystem_mpfi` has a positive coefficient and with presence of fuelsystem_mpfi, price increases. It is aligned with the fact that it is the latest and most sofisticated fuelsystem and designed for most appropriate amount of fuel injection.
# ***Other important characteristics observed while creating the model***
#
# 1. **Manufacturer** of the car i.e. company plays an important role in determining the price of the car as it represents the complete build style starting from engine, carbody, fuelsystem, horsepower etc.
# 2. **cylindernumber and horsepower** also have high positive correlation with price and its aligned with the fact that these variable directly affects the performance and as the performance increases, price increases. These parameters however were encapsulated in enginesize.
# 3. **citympg and highwaympg** have a strong relation with price but is not linear as is evident from their scatterplot and also price increases with milage till a certain point then family cars with high milage gets cheaper. A higher degree polynomial can fit their relation better.
| 29,662 |
/Heaps.ipynb | 030d850ab0ec65e20bc22cb853b69b6e99dc3eff | [] | no_license | sagarviveksahu/test | https://github.com/sagarviveksahu/test | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 25,573 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Heaps
# ## Overview
#
# For this assignment you will start by modifying the heap data stucture implemented in class to allow it to keep its elements sorted by an arbitrary priority (identified by a `key` function), then use the augmented heap to efficiently compute the running median of a set of numbers.
# ## 1. Augmenting the Heap with a `key` function
#
# The heap implementation covered in class is for a so-called "max-heap" โ i.e., one where elements are organized such that the one with the maximum value can be efficiently extracted.
#
# This limits our usage of the data structure, however. Our heap can currently only accommodate elements that have a natural ordering (i.e., they can be compared using the '`>`' and '`<`' operators as used in the implementation), and there's no way to order elements based on some partial or computed property.
#
# To make our heap more flexible, you'll update it to allow a `key` function to be passed to its initializer. This function will be used to extract a value from each element added to the heap; these values, in turn, will be used to order the elements.
#
# We can now easily create heaps with different semantics, e.g.,
#
# - `Heap(len)` will prioritize elements based on their length (e.g., applicable to strings, sequences, etc.)
# - `Heap(lambda x: -x)` can function as a *min-heap* for numbers
# - `Heap(lambda x: x.prop)` will prioritize elements based on their `prop` attribute
#
# If no `key` function is provided, the default max-heap behavior should be used โ the "`lambda x:x`" default value for the `__init__` method does just that.
#
# You will, at the very least, need to update the `_heapify` and `add` methods, below, to complete this assignment. (Note, also, that `pop_max` has been renamed `pop`, while `max` has been renamed `peek`, to reflect their more general nature.)
# + nbgrader={"grade": false, "grade_id": "heap", "locked": false, "schema_version": 1, "solution": true}
#<GRADED>
class Heap:
def __init__(self, key=lambda x:x):
self.data = []
self.key = key
@staticmethod
def _parent(idx):
return (idx-1)//2
@staticmethod
def _left(idx):
return idx*2+1
@staticmethod
def _right(idx):
return idx*2+2
def heapify(self, idx=0):
while True:
l = Heap._left(idx)
r = Heap._right(idx)
maxidx = idx
if l < len(self) and self.data[l] > self.data[idx]:
maxidx = l
if r < len(self) and self.data[r] > self.data[maxidx]:
maxidx = r
if maxidx != idx:
self.data[idx], self.data[maxidx] = self.data[maxidx], self.data[idx]
idx = maxidx
else:
break
def add(self, x):
self.data.append(x)
i = len(self.data) - 1
p = Heap._parent(i)
while i > 0 and self.data[p] < self.data[i]:
self.data[p], self.data[i] = self.data[i], self.data[p]
i = p
p = Heap._parent(i)
def peek(self):
return self.data[0]
def pop(self):
ret = self.data[0]
self.data[0] = self.data[len(self.data)-1]
del self.data[len(self.data)-1]
self.heapify()
return ret
def __bool__(self):
return len(self.data) > 0
def __len__(self):
return len(self.data)
def __repr__(self):
return repr(self.data)
#</GRADED>
# + nbgrader={"grade": true, "grade_id": "heap_test_1", "locked": true, "points": 1, "schema_version": 1, "solution": false}
# (1 point)
from unittest import TestCase
import random
tc = TestCase()
h = Heap()
random.seed(0)
for _ in range(10):
h.add(random.randrange(100))
tc.assertEqual(h.data, [97, 61, 65, 49, 51, 53, 62, 5, 38, 33])
# + nbgrader={"grade": true, "grade_id": "heap_test_2", "locked": true, "points": 1, "schema_version": 1, "solution": false}
# (1 point)
from unittest import TestCase
import random
tc = TestCase()
h = Heap(lambda x:-x)
random.seed(0)
for _ in range(10):
h.add(random.randrange(100))
tc.assertEqual(h.data, [5, 33, 53, 38, 49, 65, 62, 97, 51, 61])
# + nbgrader={"grade": true, "grade_id": "heap_test_3", "locked": true, "points": 2, "schema_version": 1, "solution": false}
# (2 points)
from unittest import TestCase
import random
tc = TestCase()
h = Heap(lambda s:len(s))
h.add('hello')
h.add('hi')
h.add('abracadabra')
h.add('supercalifragilisticexpialidocious')
h.add('0')
tc.assertEqual(h.data,
['supercalifragilisticexpialidocious', 'abracadabra', 'hello', 'hi', '0'])
# + nbgrader={"grade": true, "grade_id": "heap_test_4", "locked": true, "points": 2, "schema_version": 1, "solution": false}
# (2 points)
from unittest import TestCase
import random
tc = TestCase()
h = Heap()
random.seed(0)
lst = list(range(-1000, 1000))
random.shuffle(lst)
for x in lst:
h.add(x)
for x in range(999, -1000, -1):
tc.assertEqual(x, h.pop())
# + nbgrader={"grade": true, "grade_id": "heap_test_5", "locked": true, "points": 2, "schema_version": 1, "solution": false}
# (2 points)
from unittest import TestCase
import random
tc = TestCase()
h = Heap(key=lambda x:abs(x))
random.seed(0)
lst = list(range(-1000, 1000, 3))
random.shuffle(lst)
for x in lst:
h.add(x)
for x in reversed(sorted(range(-1000, 1000, 3), key=lambda x:abs(x))):
tc.assertEqual(x, h.pop())
# -
# ## 2. Computing the Running Median
#
# The median of a series of numbers is simply the middle term if ordered by magnitude, or, if there is no middle term, the average of the two middle terms. E.g., the median of the series [3, 1, 9, 25, 12] is **9**, and the median of the series [8, 4, 11, 18] is **9.5**.
#
# If we are in the process of accumulating numerical data, it is useful to be able to compute the *running median* โ where, as each new data point is encountered, an updated median is computed. This should be done, of course, as efficiently as possible.
#
# The following function demonstrates a naive way of computing the running medians based on the series passed in as an iterable.
#<GRADED>
def running_medians_naive(iterable):
values = []
medians = []
for i, x in enumerate(iterable):
values.append(x)
values.sort()
if i%2 == 0:
medians.append(values[i//2])
else:
medians.append((values[i//2] + values[i//2+1]) / 2)
return medians
#</GRADED>
running_medians_naive([3, 1, 9, 25, 12])
running_medians_naive([8, 4, 11, 18])
# Note that the function keeps track of all the values encountered during the iteration and uses them to compute the running medians, which are returned at the end as a list. The final running median, naturally, is simply the median of the entire series.
#
# Unfortunately, because the function sorts the list of values during every iteration it is incredibly inefficient. Your job is to implement a version that computes each running median in O(log N) time using, of course, the heap data structure!
#
# ### Hints
#
# - You will need to use two heaps for your solution: one min-heap, and one max-heap.
# - The min-heap should be used to keep track of all values *greater than* the most recent running median, and the max-heap for all values *less than* the most recent running median โ this way, the median will lie between the minimum value on the min-heap and the maximum value on the max-heap (both of which can be efficiently extracted)
# - In addition, the difference between the number of values stored in the min-heap and max-heap must never exceed 1 (to ensure the median is being computed). This can be taken care of by intelligently `pop`-ping/`add`-ing elements between the two heaps.
# + nbgrader={"grade": false, "grade_id": "running_median", "locked": false, "schema_version": 1, "solution": true}
#<GRADED>
def running_medians(iterable):
return
#</GRADED>
# + nbgrader={"grade": true, "grade_id": "running_median_1", "locked": true, "points": 2, "schema_version": 1, "solution": false}
# (2 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual([3, 2.0, 3, 6.0, 9], running_medians([3, 1, 9, 25, 12]))
# + nbgrader={"grade": true, "grade_id": "running_median_2", "locked": true, "points": 2, "schema_version": 1, "solution": false}
# (2 points)
import random
from unittest import TestCase
tc = TestCase()
vals = [random.randrange(10000) for _ in range(1000)]
tc.assertEqual(running_medians_naive(vals), running_medians(vals))
# + nbgrader={"grade": true, "grade_id": "running_median_3", "locked": true, "points": 4, "schema_version": 1, "solution": false}
# (4 points) MUST COMPLETE IN UNDER 10 seconds!
import random
from unittest import TestCase
tc = TestCase()
vals = [random.randrange(100000) for _ in range(100001)]
m_mid = sorted(vals[:50001])[50001//2]
m_final = sorted(vals)[len(vals)//2]
running = running_medians(vals)
tc.assertEqual(m_mid, running[50000])
tc.assertEqual(m_final, running[-1])
| 9,297 |
/notebooks/learning_supervised/lib_lightgbm/notebook-lightgbm-classification.ipynb | 58111fb2249eaf41b35aa78796d05a0fad9f2f25 | [
"MIT"
] | permissive | jmquintana79/utilsDS | https://github.com/jmquintana79/utilsDS | 0 | 1 | MIT | 2023-07-06T23:03:58 | 2022-01-07T16:16:24 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 5,837 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: prediction
# language: python
# name: prediction
# ---
# # Supervised Learning with Light Gradient Boosting - Classification
#
# Lo que pretendo es sentar las bases para un algoritmo ganador que valga para todo de tal manera que pueda centrar mas esfuerzos en:
# - features engineering
# - interpretability (SHAP)
# - model evaluation (simple train-test is not enough)
#
# El modelo escogido es Light Gradient Boosting por las siguientes razones:
# - Gradient Boosting suele tener un **buen desempeรฑo** en multiples tipos de problemas. En Kaggle el ranking de modelos ganadores es el siguiente: 1-Keras, 2-LightGBM, 3-GBoost.
# - Al estar basado en arboles de decision:
# - **Es inmune a los missing values** por lo que no hay que preocuparse por su imputaciรณn (sรณlo tener en cuenta de NO admitir missing values en test si no los hay en el training).
# - **Simple categorical encoding**: Las variables categoricas pueden ser codificadas como ordinales. El one-hot-encoding no se suele sentar muy bien ante una elevada cardinalidad.
# - La libreria lightgbm admite ademas features muy interesantes:
# - Pesado de label ante desbalanceo (no necesario under/over-fitting).
# - Posible seleccionar en multiples loss functions segun tipo de problema. Tb admite custom.
# - Hiperparametros mas interesantes: num de arboles, learning rate. Los demas son para evitar el over-fitting de los propios arboles y los valores por defecto son suficientes. NOTA: no usar random-search para optimizacion de hiperparametros.
# - Admite Spark.
# - Monotone constrains.
# - En el caso de que la mayoria de los features sean categoricos, puede ser usado CatBoost.
#
# #### References:
# - [GitHub - Light Gradient Boosting Machine](https://github.com/microsoft/LightGBM)
# - [lightgbm - ReadDocs](https://lightgbm.readthedocs.io/en/latest/index.html)
# - [MachineLearningMasgtery - Gradient Boosting with Scikit-Learn, XGBoost, LightGBM, and CatBoost
# ](https://machinelearningmastery.com/gradient-boosting-with-scikit-learn-xgboost-lightgbm-and-catboost/)
# - [Paper - LightGBM: A Highly Efficient Gradient Boosting Decision Tree](https://papers.nips.cc/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html)
# %pip freeze > requirements.txt
# check lightgbm version
import lightgbm
print(lightgbm.__version__)
# ### Test LightGBM Sklearn API (example by MachineLearningMastery)
# lightgbm for classification
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from matplotlib import pyplot
# define dataset
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
# evaluate the model
model = LGBMClassifier()
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(model, X, y, scoring='f1', cv=cv, n_jobs=-1, error_score='raise')
print('F1: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
# fit the model on the whole dataset
clf = LGBMClassifier()
clf.fit(X, y)
# make a single prediction
row = [[2.56999479, -0.13019997, 3.16075093, -4.35936352, -1.61271951, -1.39352057, -2.48924933, -1.93094078, 3.26130366, 2.05692145]]
yhat = clf.predict(row)
print('Prediction: %d' % yhat[0])
| 3,613 |
/Basic_Python_Assignment_17.ipynb | c58d9f76f242e1e5326d928206e3b21edaa08803 | [] | no_license | eng-nikhil/In-Assignments | https://github.com/eng-nikhil/In-Assignments | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,430 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 1. Assign the value 7 to the variable guess_me.
# Then, write the conditional tests (if, else, and elif)
# to print the string 'too low' if guess_me is less than 7, 'too high' if greater than 7, and 'just right' if equal to 7.
guess_me = 7
if guess_me < 7:
print('too low')
elif guess_me ==7:
print('just right')
else:
print('too high')
# +
# 2. Assign the value 7 to the variable guess_me and the value 1 to the variable start.
#Write a while loop that compares start with guess_me.
#Print too low if start is less than guess me.
#If start equals guess_me, print 'found it!' and exit the loop.
#If start is greater than guess_me, print 'oops' and exit the loop.
#Increment start at the end of the loop.
guess_me=7
start =1
while True:
if start < guess_me:
print('too low')
elif start == guess_me:
print('found it!')
break
elif start > guess_me:
print('oops')
break
start+=1
# +
# 3. Print the following values of the list [3, 2, 1, 0] using a for loop.
for i in [3, 2, 1, 0]:
print(i)
# -
# 4. Use a list comprehension to make a list of the even numbers in range(10)
lst = [x for x in range(10) if x%2==0]
print(lst)
# 5. Use a dictionary comprehension to create the dictionary squares.
#Use range(10) to return the keys, and use the square of each key as its value.
dict={x:x**2 for x in range(10)}
print(dict)
# 6. Construct the set odd from the odd numbers in the range using a set comprehension (10).
odd=set(x for x in range(10) if x%2!=0)
print(odd)
# 7. Use a generator comprehension
# to return the string 'Got ' and a number for the numbers in range(10).
# Iterate through this by using a for loop.
gen_comprehension=('Got ' + str(x) for x in range(10))
for i in gen_comprehension:
print(i)
# +
# 8. Define a function called good that returns the list ['Harry', 'Ron', 'Hermione'].
def good():
return ['Harry', 'Ron', 'Hermione']
good()
# +
# 9. Define a generator function called get_odds that returns the odd numbers from range(10).
# Use a for loop to find and print the third value returned.
get_odds=(x for x in range(10) if x%2!=0)
for i in get_odds:
print(i)
# +
# 10. Define an exception called OopsException.
#Raise this exception to see what happens.
#Then write the code to catch this exception and print 'Caught an oops'.
class OopsException(Exception):
pass
def with_exception(a):
if a < 0:
raise OopsException(a)
try:
with_exception(-1)
except OopsException as err:
print('Caught an oops')
# +
# 11. Use zip() to make a dictionary called movies that pairs these lists: titles = ['Creature of Habit', 'Crewel Fate'] and plots = ['A nun turns into a monster', 'A haunted yarn shop'].
titles = ['Creature of Habit', 'Crewel Fate']
plots = ['A nun turns into a monster', 'A haunted yarn shop']
movies = {}
for title, plot in zip(titles, plots):
movies[title] = plot
print(movies)
| 3,248 |
/chapter_natural-language-processing/beam-search.ipynb | 6bd82e1a3d0ef3206ace0d921198179cd44469e0 | [] | no_license | middleprince/d2l-zh | https://github.com/middleprince/d2l-zh | 0 | 1 | null | 2023-03-01T20:35:15 | 2020-12-04T09:06:43 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 5,070 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ๆๆ็ดข
#
# ไธไธ่ไป็ปไบๅฆไฝ่ฎญ็ป่พๅ
ฅๅ่พๅบๅไธบไธๅฎ้ฟๅบๅ็็ผ็ ๅจโ่งฃ็ ๅจใๆฌ่ๆไปฌไป็ปๅฆไฝไฝฟ็จ็ผ็ ๅจโ่งฃ็ ๅจๆฅ้ขๆตไธๅฎ้ฟ็ๅบๅใ
#
# ไธไธ่้ๅทฒ็ปๆๅฐ๏ผๅจๅๅค่ฎญ็ปๆฐๆฎ้ๆถ๏ผๆไปฌ้ๅธธไผๅจๆ ทๆฌ็่พๅ
ฅๅบๅๅ่พๅบๅบๅๅ้ขๅๅซ้ไธไธไธช็นๆฎ็ฌฆๅทโ<eos>โ่กจ็คบๅบๅ็็ปๆญขใๆไปฌๅจๆฅไธๆฅ็่ฎจ่ฎบไธญไนๅฐๆฒฟ็จไธไธ่็ๅ
จ้จๆฐๅญฆ็ฌฆๅทใไธบไบไพฟไบ่ฎจ่ฎบ๏ผๅ่ฎพ่งฃ็ ๅจ็่พๅบๆฏไธๆฎตๆๆฌๅบๅใ่ฎพ่พๅบๆๆฌ่ฏๅ
ธ$\mathcal{Y}$๏ผๅ
ๅซ็นๆฎ็ฌฆๅทโ<eos>โ๏ผ็ๅคงๅฐไธบ$\left|\mathcal{Y}\right|$๏ผ่พๅบๅบๅ็ๆๅคง้ฟๅบฆไธบ$T'$ใๆๆๅฏ่ฝ็่พๅบๅบๅไธๅ
ฑๆ$\mathcal{O}(\left|\mathcal{Y}\right|^{T'})$็งใ่ฟไบ่พๅบๅบๅไธญๆๆ็นๆฎ็ฌฆๅทโ<eos>โๅ้ข็ๅญๅบๅๅฐ่ขซ่ๅผใ
#
#
# ## ่ดชๅฉชๆ็ดข
#
# ่ฎฉๆไปฌๅ
ๆฅ็ไธไธช็ฎๅ็่งฃๅณๆนๆก๏ผ่ดชๅฉชๆ็ดข๏ผgreedy search๏ผใๅฏนไบ่พๅบๅบๅไปปไธๆถ้ดๆญฅ$t'$๏ผๆไปฌไป$|\mathcal{Y}|$ไธช่ฏไธญๆ็ดขๅบๆกไปถๆฆ็ๆๅคง็่ฏ
#
# $$y_{t'} = \operatorname*{argmax}_{y \in \mathcal{Y}} P(y \mid y_1, \ldots, y_{t'-1}, \boldsymbol{c})$$
#
# ไฝไธบ่พๅบใไธๆฆๆ็ดขๅบโ<eos>โ็ฌฆๅท๏ผๆ่
่พๅบๅบๅ้ฟๅบฆๅทฒ็ป่พพๅฐไบๆๅคง้ฟๅบฆ$T'$๏ผไพฟๅฎๆ่พๅบใ
#
# ๆไปฌๅจๆ่ฟฐ่งฃ็ ๅจๆถๆๅฐ๏ผๅบไบ่พๅ
ฅๅบๅ็ๆ่พๅบๅบๅ็ๆกไปถๆฆ็ๆฏ$\prod_{t'=1}^{T'} P(y_{t'} \mid y_1, \ldots, y_{t'-1}, \boldsymbol{c})$ใๆไปฌๅฐ่ฏฅๆกไปถๆฆ็ๆๅคง็่พๅบๅบๅ็งฐไธบๆไผ่พๅบๅบๅใ่่ดชๅฉชๆ็ดข็ไธป่ฆ้ฎ้ขๆฏไธ่ฝไฟ่ฏๅพๅฐๆไผ่พๅบๅบๅใ
#
# ไธ้ขๆฅ็ไธไธชไพๅญใๅ่ฎพ่พๅบ่ฏๅ
ธ้้ขๆโAโโBโโCโๅโ<eos>โ่ฟ4ไธช่ฏใๅพ10.9ไธญๆฏไธชๆถ้ดๆญฅไธ็4ไธชๆฐๅญๅๅซไปฃ่กจไบ่ฏฅๆถ้ดๆญฅ็ๆโAโโBโโCโๅโ<eos>โ่ฟ4ไธช่ฏ็ๆกไปถๆฆ็ใๅจๆฏไธชๆถ้ดๆญฅ๏ผ่ดชๅฉชๆ็ดข้ๅๆกไปถๆฆ็ๆๅคง็่ฏใๅ ๆญค๏ผๅพ10.9ไธญๅฐ็ๆ่พๅบๅบๅโAโโBโโCโโ<eos>โใ่ฏฅ่พๅบๅบๅ็ๆกไปถๆฆ็ๆฏ$0.5\times0.4\times0.4\times0.6 = 0.048$ใ
#
#
# ![ๅจๆฏไธชๆถ้ดๆญฅ๏ผ่ดชๅฉชๆ็ดข้ๅๆกไปถๆฆ็ๆๅคง็่ฏ](../img/s2s_prob1.svg)
#
#
# ๆฅไธๆฅ๏ผ่งๅฏๅพ10.10ๆผ็คบ็ไพๅญใไธๅพ10.9ไธญไธๅ๏ผๅพ10.10ๅจๆถ้ดๆญฅ2ไธญ้ๅไบๆกไปถๆฆ็็ฌฌไบๅคง็่ฏโCโใ็ฑไบๆถ้ดๆญฅ3ๆๅบไบ็ๆถ้ดๆญฅ1ๅ2็่พๅบๅญๅบๅ็ฑๅพ10.9ไธญ็โAโโBโๅไธบไบๅพ10.10ไธญ็โAโโCโ๏ผๅพ10.10ไธญๆถ้ดๆญฅ3็ๆๅไธช่ฏ็ๆกไปถๆฆ็ๅ็ไบๅๅใๆไปฌ้ๅๆกไปถๆฆ็ๆๅคง็่ฏโBโใๆญคๆถๆถ้ดๆญฅ4ๆๅบไบ็ๅ3ไธชๆถ้ดๆญฅ็่พๅบๅญๅบๅไธบโAโโCโโBโ๏ผไธๅพ10.9ไธญ็โAโโBโโCโไธๅใๅ ๆญค๏ผๅพ10.10ไธญๆถ้ดๆญฅ4็ๆๅไธช่ฏ็ๆกไปถๆฆ็ไนไธๅพ10.9ไธญ็ไธๅใๆไปฌๅ็ฐ๏ผๆญคๆถ็่พๅบๅบๅโAโโCโโBโโ<eos>โ็ๆกไปถๆฆ็ๆฏ$0.5\times0.3\times0.6\times0.6=0.054$๏ผๅคงไบ่ดชๅฉชๆ็ดขๅพๅฐ็่พๅบๅบๅ็ๆกไปถๆฆ็ใๅ ๆญค๏ผ่ดชๅฉชๆ็ดขๅพๅฐ็่พๅบๅบๅโAโโBโโCโโ<eos>โๅนถ้ๆไผ่พๅบๅบๅใ
#
# ![ๅจๆถ้ดๆญฅ2้ๅๆกไปถๆฆ็็ฌฌไบๅคง็่ฏโCโ](../img/s2s_prob2.svg)
#
# ## ็ฉทไธพๆ็ดข
#
# ๅฆๆ็ฎๆ ๆฏๅพๅฐๆไผ่พๅบๅบๅ๏ผๆไปฌๅฏไปฅ่่็ฉทไธพๆ็ดข๏ผexhaustive search๏ผ๏ผ็ฉทไธพๆๆๅฏ่ฝ็่พๅบๅบๅ๏ผ่พๅบๆกไปถๆฆ็ๆๅคง็ๅบๅใ
#
# ่ฝ็ถ็ฉทไธพๆ็ดขๅฏไปฅๅพๅฐๆไผ่พๅบๅบๅ๏ผไฝๅฎ็่ฎก็ฎๅผ้$\mathcal{O}(\left|\mathcal{Y}\right|^{T'})$ๅพๅฎนๆ่ฟๅคงใไพๅฆ๏ผๅฝ$|\mathcal{Y}|=10000$ไธ$T'=10$ๆถ๏ผๆไปฌๅฐ่ฏไผฐ$10000^{10} = 10^{40}$ไธชๅบๅ๏ผ่ฟๅ ไนไธๅฏ่ฝๅฎๆใ่่ดชๅฉชๆ็ดข็่ฎก็ฎๅผ้ๆฏ$\mathcal{O}(\left|\mathcal{Y}\right|T')$๏ผ้ๅธธๆพ่ๅฐไบ็ฉทไธพๆ็ดข็่ฎก็ฎๅผ้ใไพๅฆ๏ผๅฝ$|\mathcal{Y}|=10000$ไธ$T'=10$ๆถ๏ผๆไปฌๅช้่ฏไผฐ$10000\times10=10^5$ไธชๅบๅใ
#
#
# ## ๆๆ็ดข
#
# ๆๆ็ดข๏ผbeam search๏ผๆฏๅฏน่ดชๅฉชๆ็ดข็ไธไธชๆน่ฟ็ฎๆณใๅฎๆไธไธชๆๅฎฝ๏ผbeam size๏ผ่ถ
ๅๆฐใๆไปฌๅฐๅฎ่ฎพไธบ$k$ใๅจๆถ้ดๆญฅ1ๆถ๏ผ้ๅๅฝๅๆถ้ดๆญฅๆกไปถๆฆ็ๆๅคง็$k$ไธช่ฏ๏ผๅๅซ็ปๆ$k$ไธชๅ้่พๅบๅบๅ็้ฆ่ฏใๅจไนๅ็ๆฏไธชๆถ้ดๆญฅ๏ผๅบไบไธไธชๆถ้ดๆญฅ็$k$ไธชๅ้่พๅบๅบๅ๏ผไป$k\left|\mathcal{Y}\right|$ไธชๅฏ่ฝ็่พๅบๅบๅไธญ้ๅๆกไปถๆฆ็ๆๅคง็$k$ไธช๏ผไฝไธบ่ฏฅๆถ้ดๆญฅ็ๅ้่พๅบๅบๅใๆ็ป๏ผๆไปฌไปๅไธชๆถ้ดๆญฅ็ๅ้่พๅบๅบๅไธญ็ญ้ๅบๅ
ๅซ็นๆฎ็ฌฆๅทโ<eos>โ็ๅบๅ๏ผๅนถๅฐๅฎไปฌไธญๆๆ็นๆฎ็ฌฆๅทโ<eos>โๅ้ข็ๅญๅบๅ่ๅผ๏ผๅพๅฐๆ็ปๅ้่พๅบๅบๅ็้ๅใ
#
#
# ![ๆๆ็ดข็่ฟ็จใๆๅฎฝไธบ2๏ผ่พๅบๅบๅๆๅคง้ฟๅบฆไธบ3ใๅ้่พๅบๅบๅๆ$A$ใ$C$ใ$AB$ใ$CE$ใ$ABD$ๅ$CED$](../img/beam_search.svg)
#
# ๅพ10.11้่ฟไธไธชไพๅญๆผ็คบไบๆๆ็ดข็่ฟ็จใๅ่ฎพ่พๅบๅบๅ็่ฏๅ
ธไธญๅชๅ
ๅซ5ไธชๅ
็ด ๏ผๅณ$\mathcal{Y} = \{A, B, C, D, E\}$๏ผไธๅ
ถไธญไธไธชไธบ็นๆฎ็ฌฆๅทโ<eos>โใ่ฎพๆๆ็ดข็ๆๅฎฝ็ญไบ2๏ผ่พๅบๅบๅๆๅคง้ฟๅบฆไธบ3ใๅจ่พๅบๅบๅ็ๆถ้ดๆญฅ1ๆถ๏ผๅ่ฎพๆกไปถๆฆ็$P(y_1 \mid \boldsymbol{c})$ๆๅคง็2ไธช่ฏไธบ$A$ๅ$C$ใๆไปฌๅจๆถ้ดๆญฅ2ๆถๅฐๅฏนๆๆ็$y_2 \in \mathcal{Y}$้ฝๅๅซ่ฎก็ฎ$P(A, y_2 \mid \boldsymbol{c}) = P(A \mid \boldsymbol{c})P(y_2 \mid A, \boldsymbol{c})$ๅ$P(C, y_2 \mid \boldsymbol{c}) = P(C \mid \boldsymbol{c})P(y_2 \mid C, \boldsymbol{c})$๏ผๅนถไป่ฎก็ฎๅบ็10ไธชๆกไปถๆฆ็ไธญๅๆๅคง็2ไธช๏ผๅ่ฎพไธบ$P(A, B \mid \boldsymbol{c})$ๅ$P(C, E \mid \boldsymbol{c})$ใ้ฃไน๏ผๆไปฌๅจๆถ้ดๆญฅ3ๆถๅฐๅฏนๆๆ็$y_3 \in \mathcal{Y}$้ฝๅๅซ่ฎก็ฎ$P(A, B, y_3 \mid \boldsymbol{c}) = P(A, B \mid \boldsymbol{c})P(y_3 \mid A, B, \boldsymbol{c})$ๅ$P(C, E, y_3 \mid \boldsymbol{c}) = P(C, E \mid \boldsymbol{c})P(y_3 \mid C, E, \boldsymbol{c})$๏ผๅนถไป่ฎก็ฎๅบ็10ไธชๆกไปถๆฆ็ไธญๅๆๅคง็2ไธช๏ผๅ่ฎพไธบ$P(A, B, D \mid \boldsymbol{c})$ๅ$P(C, E, D \mid \boldsymbol{c})$ใๅฆๆญคไธๆฅ๏ผๆไปฌๅพๅฐ6ไธชๅ้่พๅบๅบๅ๏ผ๏ผ1๏ผ$A$๏ผ๏ผ2๏ผ$C$๏ผ๏ผ3๏ผ$A$ใ$B$๏ผ๏ผ4๏ผ$C$ใ$E$๏ผ๏ผ5๏ผ$A$ใ$B$ใ$D$ๅ๏ผ6๏ผ$C$ใ$E$ใ$D$ใๆฅไธๆฅ๏ผๆไปฌๅฐๆ นๆฎ่ฟ6ไธชๅบๅๅพๅบๆ็ปๅ้่พๅบๅบๅ็้ๅใ
#
#
#
# ๅจๆ็ปๅ้่พๅบๅบๅ็้ๅไธญ๏ผๆไปฌๅไปฅไธๅๆฐๆ้ซ็ๅบๅไฝไธบ่พๅบๅบๅ๏ผ
#
# $$ \frac{1}{L^\alpha} \log P(y_1, \ldots, y_{L}) = \frac{1}{L^\alpha} \sum_{t'=1}^L \log P(y_{t'} \mid y_1, \ldots, y_{t'-1}, \boldsymbol{c}),$$
#
# ๅ
ถไธญ$L$ไธบๆ็ปๅ้ๅบๅ้ฟๅบฆ๏ผ$\alpha$ไธ่ฌๅฏ้ไธบ0.75ใๅๆฏไธ็$L^\alpha$ๆฏไธบไบๆฉ็ฝ่พ้ฟๅบๅๅจไปฅไธๅๆฐไธญ่พๅค็ๅฏนๆฐ็ธๅ ้กนใๅๆๅฏ็ฅ๏ผๆๆ็ดข็่ฎก็ฎๅผ้ไธบ$\mathcal{O}(k\left|\mathcal{Y}\right|T')$ใ่ฟไปไบ่ดชๅฉชๆ็ดขๅ็ฉทไธพๆ็ดข็่ฎก็ฎๅผ้ไน้ดใๆญคๅค๏ผ่ดชๅฉชๆ็ดขๅฏ็ไฝๆฏๆๅฎฝไธบ1็ๆๆ็ดขใๆๆ็ดข้่ฟ็ตๆดป็ๆๅฎฝ$k$ๆฅๆ่กก่ฎก็ฎๅผ้ๅๆ็ดข่ดจ้ใ
#
#
# ## ๅฐ็ป
#
# * ้ขๆตไธๅฎ้ฟๅบๅ็ๆนๆณๅ
ๆฌ่ดชๅฉชๆ็ดขใ็ฉทไธพๆ็ดขๅๆๆ็ดขใ
# * ๆๆ็ดข้่ฟ็ตๆดป็ๆๅฎฝๆฅๆ่กก่ฎก็ฎๅผ้ๅๆ็ดข่ดจ้ใ
#
#
# ## ็ปไน
#
# * ็ฉทไธพๆ็ดขๅฏๅฆ็ไฝ็นๆฎๆๅฎฝ็ๆๆ็ดข๏ผไธบไปไน๏ผ
# * ๅจ[โๅพช็ฏ็ฅ็ป็ฝ็ป็ไป้ถๅผๅงๅฎ็ฐโ](../chapter_recurrent-neural-networks/rnn-scratch.ipynb)ไธ่ไธญ๏ผๆไปฌไฝฟ็จ่ฏญ่จๆจกๅๅไฝๆญ่ฏใๅฎ็่พๅบๅฑไบๅช็งๆ็ดข๏ผไฝ ่ฝๆน่ฟๅฎๅ๏ผ
#
#
#
#
# ## ๆซ็ ็ด่พพ[่ฎจ่ฎบๅบ](https://discuss.gluon.ai/t/topic/6817)
#
# ![](../img/qr_beam-search.svg)
| 4,202 |
/exercise-machine-learning-competitions.ipynb | cf57b42ddf9c61ea515f816cffafd8ecb580c0e6 | [] | no_license | meghhhna/INTRO-TO-MACHINE-LEARNING | https://github.com/meghhhna/INTRO-TO-MACHINE-LEARNING | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 11,304 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b><font size="5">Face Detetcion and Recognition with CNN</font><br></b>
# <font size="4">Introduction:</font><br><br>
# <p><font size="3">With the development of deep learning, face recognition technology based on CNN (Convolutional Neural Network) has become the main method adopted in the field of face recognition. A Convolutional Neural Network consists of an input and an output layer, as well as multiple hidden layers. The hidden layers of a CNN typically consist of a series of convolutional layers that convolve with a multiplication. The activation function is most commonly a ReLU layer, and is subsequently followed by additional convolutions such as pooling layers, fully connected layers and normalization layers. CNN can be efficiently used in the field of Computer Vision such as image and video recognition, recommender systems and image classification.</font></p>
# <p><font size="3">In this project, we are going to develope and examine the workflow of a face recognition system with CNN. The data used for this project is an open source dataset which can be downloaded from the link below:</font></p>
# <a href="https://gitlab.com/knork/data">Click here</a></font></p>
# <p><font size="3">The ORL_faces.npz dataset contains 400 images of 20 different person's face which means there are 20 images belonging to every individual.</font></p>
# +
# Import libraries
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
# +
# Load dataset
data = np.load('ORL_faces.npz')
# Load the "Train Images"
x_train = data['trainX']
x_train = np.array(x_train, dtype='float32')/255
x_test = data['testX']
x_test = np.array(x_test, dtype='float32')/255
# Load the Label of Images
y_train = data['trainY']
y_test = data['testY']
# +
# Let's inspect images from 4 different persons (in grayscale)
plt.figure(figsize=(12,10))
for count, index in enumerate(range(0, 40, 10)):
# Plot images
plt.subplot(221+count)
plt.imshow(x_train[index].reshape(112, 92), cmap='gray')
# -
# Let's create a validation set which will be used for validation during the training process
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2,
random_state=42)
# +
height = 112
width = 92
shape = (height, width, 1)
# Change shape of images
x_train = x_train.reshape(x_train.shape[0], *shape)
x_test = x_test.reshape(x_test.shape[0], *shape)
x_val = x_val.reshape(x_val.shape[0], *shape)
print('X_train shape: {}'.format(x_train.shape))
print('X_test shape: {}'.format(x_test.shape))
print('X_val shape: {}'.format(x_val.shape))
print('------------------------------------')
print('Y_train shape: {}'.format(y_train.shape))
print('Y_test shape: {}'.format(y_test.shape))
print('Y_val shape: {}'.format(y_val.shape))
# +
# Create model
model = Sequential([
Conv2D(filters=36, kernel_size=7, activation='relu', input_shape=shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=54, kernel_size=5, activation='relu', input_shape=shape),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(2024, activation='relu'),
Dropout(0.4),
Dense(1024, activation='relu'),
Dropout(0.4),
Dense(512, activation='relu'),
Dropout(0.4),
Dense(20, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# -
# <p><font size="3">The model has 9 distinct layers. The first one is a 2-dimensional Convolutional Layer with a Rectified Linear Unit activation function. Then, this fed into a 2-dimensional MaxPooling layer which reduces the computational cost by reducing the number of parameters to learn and provides basic translation invariance to the internal representation. The same process is repeated one more time. The next step is to flatten out the previous layer's output. This reshapes the tensor to have the shape that is equal to the number of elements contained in tensor, thus we'll be able to feed into fully connected Dense Layer with 'relu' activation functions. We used Dropout functions to avoid overfitting which means 40% of the nodes will be set to 0 at each update of the training phase. The last layer is our output layer with Softmax activation function for multi-class classification.</font></p>
# <p><font size="3">Finally, we compile our model using Adam optimizer and Sparse Categorical Cross Entropy as a cost function. Categorical Cross Entropy loss function shall be used for multi-label classification.</font></p>
# Display model
model.summary()
# Train our model monitoring validation accuracy in the meantime.
training = model.fit(x_train, y_train, batch_size=256, epochs=75, verbose=1,
validation_data=(x_val, y_val))
# +
# Evaluate on test set
scores = model.evaluate(x_test, y_test)
print('Test loss {:.4f}'.format(scores[0]))
print('Test accuaracy {:.4f}'.format(scores[1]))
# +
plt.figure(figsize=(15,8))
plt.subplot(121)
plt.plot(training.history['accuracy'])
plt.plot(training.history['val_accuracy'])
plt.title('Model accuracy', size=15)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='best')
plt.subplot(122)
plt.plot(training.history['loss'])
plt.plot(training.history['val_loss'])
plt.title('Loss function', size=15)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='best')
# -
# <p><font size="3">The accuracy graph shows that the higher the number of epachs the model was trained the more accurate the model becomes. As we can see there is no sign of overfitting since both validation and training set's accuracy has the same trend. If validation set's accuracy were lower, we'd have talked about overfitting which means the model we trained on training set too much (learned irrelevent features). On the other hand, if validation set's accuracy were higher than training set's accuracy, we'd have talked about underfitting which means the model was not trained enough.</font></p>
# <p><font size="3">It seems 75 epochs was enough to train since the curves became constant 35-40 epoch.</font></p>
# +
# Make predictions on testing set
prediction = model.predict_classes(x_test)
accuarcy = accuracy_score(y_test, prediction)
print("Accuracy:", accuarcy)
# -
print('Confusin matrix:\n', confusion_matrix(y_test, prediction))
print('')
print('Classification report:\n', classification_report(y_test, prediction))
# Create a heatmap of the confusion matrix for a better understanding with visualization
cm = confusion_matrix(y_test, prediction)
plt.figure(figsize=(12,12))
sns.heatmap(cm, cmap='viridis', annot=True)
plt.title('Confusion Matrix', size=16)
plt.xlabel('Predicted Label', size=13)
plt.ylabel('True Label', size=13)
# <font size="3"><p>We can clearly see that most of the images were correctly classified (which are located diagonally represented by 8). The most frequently missclassifed person was with ID of number 4 (was correctly classified only twice). The system mixed up his face with ID number of 8. Let's have a look at them.</font></p>
plt.figure(figsize=(10,8))
plt.subplot(121)
plt.imshow(x_test[34].reshape(112, 92), cmap='gray')
plt.subplot(122)
plt.imshow(x_test[66].reshape(112, 92), cmap='gray')
# <font size="3"><p>At first glance, no similarities can be noticed. The model probably found similarities between shape of mouths, eye-distnces, shape of noses which might could confuse the face recognition process.</font></p>
# <font size="4">Conclusion:</font><br><br>
# <p><font size="3">Our face recognition model using deep learning, CNN could reach a 87.5 % accuracy after training the model for 75 epochs. Even if the model's time complexity is pretty high (took roughly half an hour to run) we got a very similar, slighly lower accuracy compared what we got with the LBPH algorithm (88.75 %). Despite of the LBPH algorithm there are many parameters to tune in the developement of a CNN model such as choosing the number of layers, finding appropriate dropout ratio, specifying the batch size etc. Playing around with these parameters might leed a slighly higher accuracy ratio than we got with the LBPH algorithm.</font></p>
# <p><font size="3">In overall, CNN face recognition tools are among the most robust and accurate systems available which are still under developement to this day.<font></p>
| 8,911 |
/assignment 20 (python basic).ipynb | b725ca64c2db7af9025f60452ecd8518ed6a9479 | [] | no_license | coderita/Data-Science-Assignment | https://github.com/coderita/Data-Science-Assignment | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,741 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.7.4 32-bit
# name: python374jvsc74a57bd00573fab6c61a770748bfcdda5a22a6f0994280481be73221145b8e05796edb27
# ---
# 1. Set the variable test1 to the string 'This is a test of the emergency text system,' and save test1 to a file named test.txt.
test1 = 'This is a test of the emergency text system'
outfile = open('test.txt', 'wt')
outfile.write(test1)
outfile.close()
# 2. Read the contents of the file test.txt into the variable test2. Is there a difference between test 1 and test 2?
# +
with open('test.txt', 'rt') as infile:
test2 = infile.read()
test1==test2
# -
# 3. Create a CSV file called books.csv by using these lines:
# title,author,year
# The Weirdstone of Brisingamen,Alan Garner,1960
# Perdido Street Station,China Miรฉville,2000
# Thud!,Terry Pratchett,2005
# The Spellman Files,Lisa Lutz,2007
# Small Gods,Terry Pratchett,1992
#
text = '''title,author,year, The Weirdstone of Brisingamen,Alan Garner,1960
Perdido Street Station,China Miรฉville,2000
Thud!,Terry Pratchett,2005
The Spellman Files,Lisa Lutz,2007
Small Gods,Terry Pratchett,1992'''
with open('books.csv', 'w') as outfile:
outfile.write(text)
# 4. Use the sqlite3 module to create a SQLite database called books.db, and a table called books with these fields: title (text), author (text), and year (integer).
# +
import sqlite3
db = sqlite3.connect('books.db')
curs = db.cursor()
curs.execute("DROP TABLE IF EXISTS book")
curs.execute('''create table book (title CHAR(20), author CHAR(20), year INT)''')
db.commit()
# -
# 5. Read books.csv and insert its data into the book table.
import csv
ins_str = 'insert into book values(?, ?, ?)'
with open('books.csv', 'rt') as infile:
books = csv.DictReader(infile)
for book in books:
curs.execute(ins_str, (book['title'], book['author'], book['year']))
db.commit()
# 6. Select and print the title column from the book table in alphabetical order.
sql = 'select title from book order by title asc'
for row in db.execute(sql):
print(row)
# 7. From the book table, select and print all columns in the order of publication.
#
for row in db.execute('select * from book order by year'):
print(row)
# 8. Use the sqlalchemy module to connect to the sqlite3 database books.db that you just made in exercise 6.
import sqlalchemy
conn = sqlalchemy.create_engine('sqlite:///books.db')
sql = 'select title from book order by title asc'
rows = conn.execute(sql)
for row in rows:
print(row)
# 9. Install the Redis server and the Python redis library (pip install redis) on your computer. Create a Redis hash called test with the fields count (1) and name ('Fester Bestertester'). Print all the fields for test.
import redis
conn = redis.Redis()
conn.delete('test')
conn.hmset('test', {'count': 1, 'name': 'Fester Bestertester'})
conn.hgetall('test')
# 10. Increment the count field of test and print it.
conn.hincrby('test', 'count', 3)
conn.hget('test', 'count')
| 3,155 |
/Untitled.ipynb | 986d0045002d1a5fe74ae6f5cb10d3602fc6d52f | [] | no_license | nitinpathania007/Math-for-Data-Scientists | https://github.com/nitinpathania007/Math-for-Data-Scientists | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 39,892 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
[dot[z[0],z[1] for z in zip(a,x)]
a=[col1,col2...]
b=smul(x[0],a[0])
for col in range(1,len(x):
z=smul(x[i],a[i])
b=vadd(b,z)
a=dot(v,w) -computes dot product of two vectors return scalar
u=vadd(v,w)-add two vectors return vector
w=smul(alpha,v) - multiply vector by scalar and return vector
# -
# +
a=[row1,row2..]
b=[col1,col2...]
def funct(a,b):
[dot[z[0],z[1] for z in zip(a,b)]
# -
import numpy
a=[1,2,3]
b=[3,4,5]
numpy.dot(a,b)
m=5
n=10
[[i*j for i in range(0,n)] for j in range(0,m)]
m=5
n=10
[[j*i for i in range(0,n)] for j in range(0,m)]
m=5
n=10
s=[]
p=[]
for i in range(0,m):
row=[]
for j in range(0,n):
row.append(0)
p.append(row)
for i in p:
print(i)
import numpy as np
b=np.random.randn(5,10)
b
D=np.random.randn(5,10)
D
d=b.transpose()
d
b
for i in range(0,6):
for j in range(0,10):
out=b[i][j]*d[i][j]
c=[[]]
m=5
n=10
for i in range(0,m):
for j in range(0,n):
c[i][j]=b[j][i]
D
b=np.round(10*np.random.randn(5,10)/10)
b
# +
w = np.array([[2,-6],[-1, 4]])
v = np.array([12,46])
w*v
# -
b
1.29824913*2.89823632
d
mvmul(b,d)
len(b[0])
len(b)
s=[]
for i in range(0,5):
t=[]
for j in range(0,10):
t.append(b[i][j]*d[j][i])
s.append(t)
s
b
d
# +
c=[]
for i in range(0,len(b)):
e=[]
for j in range(0,len(d)):
e.append(np.dot(b[i][j]*d[j][i])
c.append(e)
c
# +
def myzeroes(m,n):
D=[[0 for i in range(0,n)] for i in range(0,m)]
return D
def mytranspose(B):
m=len(B)
n=len(B[0])
D=myzeroes(n,m)
for i in range(0,n):
for j in range(0,m):
D[i][j]=B[j][i]
return D
# -
b
mytranspose(b)
# +
A=np.round(10*np.random.randn(10,5))/10
B=np.round(10*np.random.randn(10,5))/10
BT=mytranspose(b)
# +
C=[[0 for i in range(0,len(B[0]))] for j in range(0,len(A))]
for i in range(0,len(A)):
for j in range(0,len(B[0])):
for k in range(0,len(A[0])):
C[i][j] += A[i][k]*B[k][j]
len(C[0])
# +
import time
t=time.perf_counter()
for i in range(0,len(A)):
for j in range(0,len(B[0])):
for k in range(0,len(A[0])):
C[i][j] += A[i][k]*B[k][j]
np.round(np.array(C)*10)/10
print(time.perf_counter()-t)
# +
t-time.perf_counter()
np.matmul(A,B)
print(time.perf_counter()-t)
mvmul
# -
whos
who
L=[0]
[L*5]*7
np.random.randint(1,10)
# +
Amys=[]
C=[[0 for i in range(0,n)] for j in range(0,m)]
#not sparsh approach
m=5
n=8
k=6
for ik in range(0,k):
row=np.random.randint(1,m)
column=np.random.randint(1,n)
A[row][column]=np.random.randn()
A[(row,column)] = r
A[row][column] = r
A
# -
from scipy.sparse import csr_matrix
As=csr_matrix(A)
A
from scipy.sparse import coo_matrix
As1=coo_matrix(A)
As1
| 3,084 |
/feature-engineering/exercise-mutual-information.ipynb | bab000d6ead61e726e5fde9e6eae05d203085337 | [] | no_license | MiesnerJacob/kaggle-courses | https://github.com/MiesnerJacob/kaggle-courses | 0 | 0 | null | 2021-02-14T19:25:22 | 2021-02-14T19:24:28 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 446,730 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="BOetPxvylfeb"
# # Amazon Fine Food Reviews Analysis
#
#
# Data Source: https://www.kaggle.com/snap/amazon-fine-food-reviews <br>
#
# EDA: https://nycdatascience.com/blog/student-works/amazon-fine-foods-visualization/
#
#
# The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.<br>
#
# Number of reviews: 568,454<br>
# Number of users: 256,059<br>
# Number of products: 74,258<br>
# Timespan: Oct 1999 - Oct 2012<br>
# Number of Attributes/Columns in data: 10
#
# Attribute Information:
#
# 1. Id
# 2. ProductId - unique identifier for the product
# 3. UserId - unqiue identifier for the user
# 4. ProfileName
# 5. HelpfulnessNumerator - number of users who found the review helpful
# 6. HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not
# 7. Score - rating between 1 and 5
# 8. Time - timestamp for the review
# 9. Summary - brief summary of the review
# 10. Text - text of the review
#
#
# #### Objective:
# Given a review, determine whether the review is positive (rating of 4 or 5) or negative (rating of 1 or 2).
#
# <br>
# [Q] How to determine if a review is positive or negative?<br>
# <br>
# [Ans] We could use Score/Rating. A rating of 4 or 5 can be cosnidered as a positive review. A rating of 1 or 2 can be considered as negative one. A review of rating 3 is considered nuetral and such reviews are ignored from our analysis. This is an approximate and proxy way of determining the polarity (positivity/negativity) of a review.
#
#
#
# + [markdown] colab_type="text" id="CSLdiilDlfec"
# # [1]. Reading Data
# + [markdown] colab_type="text" id="l2TPdoDflfed"
# ## [1.1] Loading the data
#
# The dataset is available in two forms
# 1. .csv file
# 2. SQLite Database
#
# In order to load the data, We have used the SQLITE dataset as it is easier to query the data and visualise the data efficiently.
# <br>
#
# Here as we only want to get the global sentiment of the recommendations (positive or negative), we will purposefully ignore all Scores equal to 3. If the score is above 3, then the recommendation wil be set to "positive". Otherwise, it will be set to "negative".
# + id="QpPFNzD7UJx-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="43a8e0a0-2b59-4687-a583-63e38f3376e7"
# Code to read csv file into Colaboratory:
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="nDD6P0UdUZFS" colab_type="code" colab={}
link = 'https://drive.google.com/open?id=1cpwGHmONMCohLX-EQu9ubkB58ZoVc9pI' # The shareable link
# + id="Z2doRsE2UcTv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="32832d9e-04b8-4a61-81ca-f6d4556da594"
fluff, id = link.split('=')
print (id) # Verify that you have everything after '='
# + id="gOmU7dPxUecH" colab_type="code" colab={}
import pandas as pd
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('opendata.csv')
df3 = pd.read_csv('opendata.csv')
# + colab_type="code" id="JfreTkMblfee" colab={}
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import sqlite3
import pandas as pd
import numpy as np
import nltk
import string
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import os
# + id="ju92kfH8UnRU" colab_type="code" colab={}
filtered_data=df3
# + colab_type="code" id="StXOCb9Glfej" outputId="6e22ef7d-dc9b-44cd-c240-ab609e851500" colab={"base_uri": "https://localhost:8080/", "height": 996}
# using SQLite Table to read data.
con = sqlite3.connect('database.sqlite')
# filtering only positive and negative reviews i.e.
# not taking into consideration those reviews with Score=3
# SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000, will give top 500000 data points
# you can change the number to any other number based on your computing power
filtered_data = pd.read_sql_query(""" SELECT * FROM Reviews WHERE Score != 3 LIMIT 500000""", con)
# for tsne assignment you can take 5k data points
filtered_data = df3
# Give reviews with Score>3 a positive rating(1), and reviews with a score<3 a negative rating(0).
def partition(x):
if x < 3:
return 0
return 1
#changing reviews with score less than 3 to be positive and vice-versa
actualScore = filtered_data['Score']
positiveNegative = actualScore.map(partition)
filtered_data['Score'] = positiveNegative
print("Number of data points in our data", filtered_data.shape)
filtered_data.head(3)
# + colab_type="code" id="9rOGfYsAlfe0" colab={}
display[display['UserId']=='AZY10LLTJ71NX']
# + colab_type="code" id="CmLfx_WElfe6" colab={}
display['COUNT(*)'].sum()
# + [markdown] colab_type="text" id="r0kfpBuilfe-"
# # [2] Exploratory Data Analysis
# + [markdown] colab_type="text" id="gaKEsV7Vlfe_"
# ## [2.1] Data Cleaning: Deduplication
#
# It is observed (as shown in the table below) that the reviews data had many duplicate entries. Hence it was necessary to remove duplicates in order to get unbiased results for the analysis of the data. Following is an example:
# + colab_type="code" id="yY3iRtAAlffA" colab={}
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND UserId="AR5J8UI46CURR"
ORDER BY ProductID
""", con)
display.head()
# + [markdown] colab_type="text" id="Zn4BzyPFlffE"
# As it can be seen above that same user has multiple reviews with same values for HelpfulnessNumerator, HelpfulnessDenominator, Score, Time, Summary and Text and on doing analysis it was found that <br>
# <br>
# ProductId=B000HDOPZG was Loacker Quadratini Vanilla Wafer Cookies, 8.82-Ounce Packages (Pack of 8)<br>
# <br>
# ProductId=B000HDL1RQ was Loacker Quadratini Lemon Wafer Cookies, 8.82-Ounce Packages (Pack of 8) and so on<br>
#
# It was inferred after analysis that reviews with same parameters other than ProductId belonged to the same product just having different flavour or quantity. Hence in order to reduce redundancy it was decided to eliminate the rows having same parameters.<br>
#
# The method used for the same was that we first sort the data according to ProductId and then just keep the first similar product review and delelte the others. for eg. in the above just the review for ProductId=B000HDL1RQ remains. This method ensures that there is only one representative for each product and deduplication without sorting would lead to possibility of different representatives still existing for the same product.
# + colab_type="code" id="_QwRW3RFlffF" colab={}
#Sorting data according to ProductId in ascending order
sorted_data=filtered_data.sort_values('ProductId', axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
# + colab_type="code" id="loiXzYmzlffK" outputId="aee6eb29-8762-4aa3-dcc4-1d8975bc982b" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Deduplication of entries
final=sorted_data.drop_duplicates(subset={"UserId","ProfileName","Time","Text"}, keep='first', inplace=False)
final.shape
# + colab_type="code" id="zJXHGtLqlffP" outputId="e94e71d2-d576-42ad-f143-aa8ebe346e40" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Checking to see how much % of data still remains
(final['Id'].size*1.0)/(filtered_data['Id'].size*1.0)*100
# + [markdown] colab_type="text" id="6BmtZ8u8lffT"
# <b>Observation:-</b> It was also seen that in two rows given below the value of HelpfulnessNumerator is greater than HelpfulnessDenominator which is not practically possible hence these two rows too are removed from calcualtions
# + colab_type="code" id="yqjWBOUilffa" colab={}
final=final[final.HelpfulnessNumerator<=final.HelpfulnessDenominator]
# + colab_type="code" id="3NgUYSqklfff" outputId="ccc51371-dece-4911-cb5b-caaf70427d64" colab={"base_uri": "https://localhost:8080/", "height": 119}
#Before starting the next phase of preprocessing lets see the number of entries left
print(final.shape)
#How many positive and negative reviews are present in our dataset?
final['Score'].value_counts()
# + [markdown] colab_type="text" id="tEJo2qovlffk"
# # [3] Preprocessing
# + [markdown] colab_type="text" id="98ogqQNvlffm"
# ## [3.1]. Preprocessing Review Text
#
# Now that we have finished deduplication our data requires some preprocessing before we go on further with analysis and making the prediction model.
#
# Hence in the Preprocessing phase we do the following in the order below:-
#
# 1. Begin by removing the html tags
# 2. Remove any punctuations or limited set of special characters like , or . or # etc.
# 3. Check if the word is made up of english letters and is not alpha-numeric
# 4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)
# 5. Convert the word to lowercase
# 6. Remove Stopwords
# 7. Finally Snowball Stemming the word (it was obsereved to be better than Porter Stemming)<br>
#
# After which we collect the words used to describe positive and negative reviews
# + colab_type="code" id="toJT1pm7lffo" outputId="77671bd0-4c09-4e14-8921-f8330c62bcf5" colab={"base_uri": "https://localhost:8080/", "height": 173}
# printing some random reviews
sent_0 = final['Text'].values[0]
print(sent_0)
print("="*50)
sent_1000 = final['Text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = final['Text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = final['Text'].values[4900]
print(sent_4900)
print("="*50)
# + colab_type="code" id="veaXSSGSlffu" outputId="27d8d17d-ca45-47b6-f93d-01d9842ea39b" colab={"base_uri": "https://localhost:8080/", "height": 54}
# remove urls from text python: https://stackoverflow.com/a/40823105/4084039
sent_0 = re.sub(r"http\S+", "", sent_0)
sent_1000 = re.sub(r"http\S+", "", sent_1000)
sent_150 = re.sub(r"http\S+", "", sent_1500)
sent_4900 = re.sub(r"http\S+", "", sent_4900)
print(sent_0)
# + colab_type="code" id="PSDTpeZElffx" outputId="627e9701-702e-4c05-cc08-a744b1cfce9d" colab={"base_uri": "https://localhost:8080/", "height": 156}
# https://stackoverflow.com/questions/16206380/python-beautifulsoup-how-to-remove-all-tags-from-an-element
from bs4 import BeautifulSoup
soup = BeautifulSoup(sent_0, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1000, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1500, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_4900, 'lxml')
text = soup.get_text()
print(text)
# + colab_type="code" id="P2fiflxxlff1" colab={}
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# + colab_type="code" id="YFFhQsI5lff3" outputId="86acde5c-4196-41bd-ef81-f493388ea3a3" colab={"base_uri": "https://localhost:8080/", "height": 71}
sent_1500 = decontracted(sent_1500)
print(sent_1500)
print("="*50)
# + colab_type="code" id="tOXUuH2Llff9" outputId="3b8f556b-f5a0-4cd7-cb88-9892b0211d25" colab={"base_uri": "https://localhost:8080/", "height": 54}
#remove words with numbers python: https://stackoverflow.com/a/18082370/4084039
sent_0 = re.sub("\S*\d\S*", "", sent_0).strip()
print(sent_0)
# + colab_type="code" id="Rjbj4y72lfgB" outputId="5f7f99ed-f85b-4da5-c2b8-1c0ece135ca2" colab={"base_uri": "https://localhost:8080/", "height": 54}
#remove spacial character: https://stackoverflow.com/a/5843547/4084039
sent_1500 = re.sub('[^A-Za-z0-9]+', ' ', sent_1500)
print(sent_1500)
# + colab_type="code" id="uvvaKYT0lfgF" colab={}
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
# <br /><br /> ==> after the above steps, we are getting "br br"
# we are including them into stop words list
# instead of <br /> if we have <br/> these tags would have revmoved in the 1st step
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"])
# + colab_type="code" id="Z9rdZXeFlfgH" outputId="cc71b921-d7b0-4325-ee86-41133bec35da" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Combining all the above stundents
from tqdm import tqdm
preprocessed_reviews = []
# tqdm is for printing the status bar
for sentance in tqdm(final['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews.append(sentance.strip())
# + colab_type="code" id="eFr9XTF5lfgK" outputId="59dfc26f-d1d7-4e02-d598-2c82d74b8bc3" colab={"base_uri": "https://localhost:8080/", "height": 54}
preprocessed_reviews[1500]
# + [markdown] colab_type="text" id="zbdUHU_wlfgP"
# <h2><font color='red'>[3.2] Preprocessing Review Summary</font></h2>
# + colab_type="code" id="bpuHpiSvlfgP" colab={}
def func(x):
if x>3:
return 1
else:
return 0
# + colab_type="code" id="UnpnKucKlfgU" colab={}
x=preprocessed_reviews
y=final['Score'].apply(func)
# + colab_type="code" id="LuKLpvllnY23" colab={}
from sklearn.model_selection import train_test_split
x1,xtest,y1,ytest=train_test_split(x,y,test_size=0.3,random_state=1)
# + colab_type="code" id="oNgO3I-fnhll" colab={}
xtrain,xcv,ytrain,ycv=train_test_split(x1,y1,test_size=0.2,random_state=1)
# + colab_type="code" id="7cXBcvSPncRe" outputId="7be314df-e28a-4bd2-abab-11fb943eadfe" colab={"base_uri": "https://localhost:8080/", "height": 119}
print(len(xtrain))
print(ytrain.shape)
print(len(xtest))
print(ytest.shape)
print(len(xcv))
print(ycv.shape)
# + [markdown] colab_type="text" id="sMGUs5illfgT"
# # [4] Featurization
# + [markdown] colab_type="text" id="gFcnNu9TlfgT"
# ## [4.1] BAG OF WORDS
# + colab_type="code" id="RYdnb55hnTp7" outputId="f885a3d5-2d3f-4171-e722-61c337902549" colab={"base_uri": "https://localhost:8080/", "height": 68}
from sklearn.feature_extraction.text import CountVectorizer
count_vect=CountVectorizer()
xtrainonehotencoding=count_vect.fit_transform(xtrain)
xtestonehotencoding=count_vect.transform(xtest)
xcvonehotencoding=count_vect.transform(xcv)
print(xtrainonehotencoding.shape)
print(xtestonehotencoding.shape)
print(xcvonehotencoding.shape)
# + id="Eb2ThiAVUEBw" colab_type="code" colab={}
vect=CountVectorizer(min_df=10,max_features=50)
xtrainonehotencoding1=vect.fit_transform(xtrain)
xtestonehotencoding1=vect.transform(xtest)
xcvonehotencoding1=vect.transform(xcv)
# + colab_type="code" id="Ay2Vha5SHoY7" colab={}
xtrainonehotencoding11=xtrainonehotencoding1.toarray()
xtestonehotencoding12=xtestonehotencoding1.toarray()
xcvonehotencoding13=xcvonehotencoding1.toarray()
# + colab_type="code" id="BBpNepNmIKIe" outputId="a2a59e0b-5d31-4271-f939-c0114541fcc3" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(type(xtrainonehotencoding11))
# + [markdown] colab_type="text" id="DKgP5yfilfgc"
# ## [4.2] Bi-Grams and n-Grams.
# + colab_type="code" id="PkxOOKhzlfgc" outputId="7bbf87e2-2174-4b71-859a-93f4d0da201f" colab={"base_uri": "https://localhost:8080/", "height": 68}
#bi-gram, tri-gram and n-gram
#removing stop words like "not" should be avoided before building n-grams
# count_vect = CountVectorizer(ngram_range=(1,2))
# please do read the CountVectorizer documentation http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
# you can choose these numebrs min_df=10, max_features=5000, of your choice
count_vect = CountVectorizer(ngram_range=(1,2), min_df=10, max_features=5000)
final_bigram_counts = count_vect.fit_transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_bigram_counts))
print("the shape of out text BOW vectorizer ",final_bigram_counts.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_bigram_counts.get_shape()[1])
# + [markdown] colab_type="text" id="nF4vm-sRlfgf"
# ## [4.3] TF-IDF
# + colab_type="code" id="lqhf01CWlfgg" outputId="6063e4b4-4f51-48db-fc84-65fdaf94a9e8" colab={"base_uri": "https://localhost:8080/", "height": 68}
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf= TfidfVectorizer()
xtraintfidfencoding=tfidf.fit_transform(xtrain)
xtesttfidfencoding=tfidf.transform(xtest)
xcvtfidfencoding=tfidf.transform(xcv)
print(xtraintfidfencoding.shape)
print(xtesttfidfencoding.shape)
print(xcvtfidfencoding.shape)
# + id="fqlIX_Z_UECG" colab_type="code" colab={}
vect=CountVectorizer(min_df=10,max_features=50)
xtraintfidfencoding1=vect.fit_transform(xtrain)
xtesttfidfencoding1=vect.transform(xtest)
xcvtfidfencoding1=vect.transform(xcv)
# + colab_type="code" id="WpkJanYsV7KN" colab={}
xtraintfidfencoding11=xtraintfidfencoding1.toarray()
xtesttfidfencoding12=xtesttfidfencoding1.toarray()
xcvtfidfencoding13=xcvtfidfencoding1.toarray()
# + [markdown] colab_type="text" id="a-3iTpLylfgj"
# ## [4.4] Word2Vec
# + colab_type="code" id="lCj148PMlfgk" colab={}
# Train your own Word2Vec model using your own text corpus
i=0
list_of_sentance=[]
for sentance in xtrain:
list_of_sentance.append(sentance.split())
# + colab_type="code" id="aIhn-P8Tlfgm" outputId="fbde3c6a-420c-4de2-cb62-2623643d684f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Using Google News Word2Vectors
# in this project we are using a pretrained model by google
# its 3.3G file, once you load this into your memory
# it occupies ~9Gb, so please do this step only if you have >12G of ram
# we will provide a pickle file wich contains a dict ,
# and it contains all our courpus words as keys and model[word] as values
# To use this code-snippet, download "GoogleNews-vectors-negative300.bin"
# from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
# it's 1.9GB in size.
# http://kavita-ganesan.com/gensim-word2vec-tutorial-starter-code/#.W17SRFAzZPY
# you can comment this whole cell
# or change these varible according to your need
is_your_ram_gt_16g=True
want_to_use_google_w2v =True
want_to_train_w2v = False
if want_to_train_w2v:
# min_count = 5 considers only words that occured atleast 5 times
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
elif want_to_use_google_w2v and is_your_ram_gt_16g:
if os.path.isfile('GoogleNews-vectors-negative300.bin'):
w2v_model=KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
print(w2v_model.wv.most_similar('great'))
print(w2v_model.wv.most_similar('worst'))
else:
print("you don't have gogole's word2vec file, keep want_to_train_w2v = True, to train your own w2v ")
# + colab_type="code" id="JFyMseLi4un8" outputId="e6745542-fec8-436f-dd48-aaa8df8bb56d" colab={"base_uri": "https://localhost:8080/", "height": 88}
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
# + colab_type="code" id="Xu-f9IAllfgp" outputId="0b67259b-9ecd-45f1-dde6-b275d0f65b53" colab={"base_uri": "https://localhost:8080/", "height": 71}
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:50])
# + [markdown] colab_type="text" id="iODluLuXlfgt"
# ## [4.4.1] Converting text into vectors using Avg W2V, TFIDF-W2V
# + [markdown] colab_type="text" id="hipow2XSlfgu"
# #### [4.4.1.1] Avg W2v
# + colab_type="code" id="5HLCdpHwlfgu" outputId="8ff3e1e3-c04a-4a6b-b20e-dd89ef5c797b" colab={"base_uri": "https://localhost:8080/", "height": 68}
# average Word2Vec
# compute average word2vec for each review.
sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(xtrain): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors.append(sent_vec)
print(len(sent_vectors))
print(len(sent_vectors[0]))
# + colab_type="code" id="e9voeToooI3O" outputId="fa33a9e9-c023-44f7-eb5e-17f251c10cf5" colab={"base_uri": "https://localhost:8080/", "height": 68}
# average Word2Vec
# compute average word2vec for each review.
sent_vectorstest = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(xtest): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectorstest.append(sent_vec)
print(len(sent_vectorstest))
print(len(sent_vectorstest))
# + colab_type="code" id="onnS8vnboJge" outputId="a0ad67d2-9a75-45c3-b1d9-1df766a33a93" colab={"base_uri": "https://localhost:8080/", "height": 68}
# average Word2Vec
# compute average word2vec for each review.
sent_vectorscv = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(xcv): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectorscv.append(sent_vec)
print(len(sent_vectorscv))
print(len(sent_vectorscv))
# + colab_type="code" id="4CINjhxDoWAH" colab={}
xtrainy=sent_vectors
xtesty=sent_vectorstest
# + [markdown] colab_type="text" id="wDPAYMGvlfg2"
# #### [4.4.1.2] TFIDF weighted W2v
# + colab_type="code" id="266lGFcilfg3" colab={}
model = TfidfVectorizer()
xtraintfidfw2v = model.fit_transform(preprocessed_reviews)
#xtesttfidfw2v=model.transform(xtest)
#xcvtfidfw2v=model.transform(xcv)
tfidf_feat = model.get_feature_names()
dictionary = dict(zip(model.get_feature_names(), list(model.idf_)))
# + colab_type="code" id="ZCYBu_-Blfg7" outputId="7a6fd4c2-e573-44b9-d1b0-4e6266039ab8" colab={"base_uri": "https://localhost:8080/", "height": 34}
xcvtfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(xcv): # for each review/sentence
sent_vec = np.zeros(50)
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent.split(' '): # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
xcvtfidf_sent_vectors.append(sent_vec)
row += 1
# + colab_type="code" id="oXsYnPSfoHGn" outputId="c780ee9f-88de-424d-c087-8f762065f9f9" colab={"base_uri": "https://localhost:8080/", "height": 34}
xtraintfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(xtrain): # for each review/sentence
sent_vec = np.zeros(50)
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent.split(' '): # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
xtraintfidf_sent_vectors.append(sent_vec)
row += 1
# + colab_type="code" id="4pyQUVvvoiZl" outputId="0fb96722-7659-4273-f6fe-8e9db31dfa51" colab={"base_uri": "https://localhost:8080/", "height": 34}
xtesttfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(xtest): # for each review/sentence
sent_vec = np.zeros(50)
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent.split(' '): # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
xtesttfidf_sent_vectors.append(sent_vec)
row += 1
# + [markdown] colab_type="text" id="2dPesZXslfhD"
# # [5] Assignment 3: KNN
# + [markdown] colab_type="text" id="jpkZsO0ZlfhF"
# <ol>
# <li><strong>Apply Knn(brute force version) on these feature sets</strong>
# <ul>
# <li><font color='red'>SET 1:</font>Review text, preprocessed one converted into vectors using (BOW)</li>
# <li><font color='red'>SET 2:</font>Review text, preprocessed one converted into vectors using (TFIDF)</li>
# <li><font color='red'>SET 3:</font>Review text, preprocessed one converted into vectors using (AVG W2v)</li>
# <li><font color='red'>SET 4:</font>Review text, preprocessed one converted into vectors using (TFIDF W2v)</li>
# </ul>
# </li>
# <br>
# <li><strong>Apply Knn(kd tree version) on these feature sets</strong>
# <br><font color='red'>NOTE: </font>sklearn implementation of kd-tree accepts only dense matrices, you need to convert the sparse matrices of CountVectorizer/TfidfVectorizer into dense matices. You can convert sparse matrices to dense using .toarray() attribute. For more information please visit this <a href='https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.sparse.csr_matrix.toarray.html'>link</a>
# <ul>
# <li><font color='red'>SET 5:</font>Review text, preprocessed one converted into vectors using (BOW) but with restriction on maximum features generated.
# <pre>
# count_vect = CountVectorizer(min_df=10, max_features=500)
# count_vect.fit(preprocessed_reviews)
# </pre>
# </li>
# <li><font color='red'>SET 6:</font>Review text, preprocessed one converted into vectors using (TFIDF) but with restriction on maximum features generated.
# <pre>
# tf_idf_vect = TfidfVectorizer(min_df=10, max_features=500)
# tf_idf_vect.fit(preprocessed_reviews)
# </pre>
# </li>
# <li><font color='red'>SET 3:</font>Review text, preprocessed one converted into vectors using (AVG W2v)</li>
# <li><font color='red'>SET 4:</font>Review text, preprocessed one converted into vectors using (TFIDF W2v)</li>
# </ul>
# </li>
# <br>
# <li><strong>The hyper paramter tuning(find best K)</strong>
# <ul>
# <li>Find the best hyper parameter which will give the maximum <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/receiver-operating-characteristic-curve-roc-curve-and-auc-1/'>AUC</a> value</li>
# <li>Find the best hyper paramter using k-fold cross validation or simple cross validation data</li>
# <li>Use gridsearch cv or randomsearch cv or you can also write your own for loops to do this task of hyperparameter tuning</li>
# </ul>
# </li>
# <br>
# <li>
# <strong>Representation of results</strong>
# <ul>
# <li>You need to plot the performance of model both on train data and cross validation data for each hyper parameter, like shown in the figure
# <img src='train_cv_auc.JPG' width=300px></li>
# <li>Once after you found the best hyper parameter, you need to train your model with it, and find the AUC on test data and plot the ROC curve on both train and test.
# <img src='train_test_auc.JPG' width=300px></li>
# <li>Along with plotting ROC curve, you need to print the <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/confusion-matrix-tpr-fpr-fnr-tnr-1/'>confusion matrix</a> with predicted and original labels of test data points
# <img src='confusion_matrix.png' width=300px></li>
# </ul>
# </li>
# <br>
# <li><strong>Conclusion</strong>
# <ul>
# <li>You need to summarize the results at the end of the notebook, summarize it in the table format. To print out a table please refer to this prettytable library<a href='http://zetcode.com/python/prettytable/'> link</a>
# <img src='summary.JPG' width=400px>
# </li>
# </ul>
# </ol>
# + [markdown] colab_type="text" id="h_KKyBkolfhG"
# <h4><font color='red'>Note: Data Leakage</font></h4>
#
# 1. There will be an issue of data-leakage if you vectorize the entire data and then split it into train/cv/test.
# 2. To avoid the issue of data-leakag, make sure to split your data first and then vectorize it.
# 3. While vectorizing your data, apply the method fit_transform() on you train data, and apply the method transform() on cv/test data.
# 4. For more details please go through this <a href='https://soundcloud.com/applied-ai-course/leakage-bow-and-tfidf'>link.</a>
# + [markdown] colab_type="text" id="yn4mn23HlfhH"
# ## [5.1] Applying KNN brute force
# + [markdown] colab_type="text" id="8TvNRg20lfhH"
# ### [5.1.1] Applying KNN brute force on BOW,<font color='red'> SET 1</font>
# + colab_type="code" id="bxwvDPY6lfhI" outputId="f3c8c38a-b71a-43a2-f17c-6884ca4e18c0" colab={"base_uri": "https://localhost:8080/", "height": 551}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='brute')
knnx.fit(xtrainonehotencoding,ytrain)
predict1=knnx.predict_proba(xtrainonehotencoding)[:,1]
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(xcvonehotencoding)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="jDhDN1YHzpZe" outputId="cd6fee0e-6023-4836-940b-e537d8fe908d" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=alpha[optimal_k],algorithm='brute')
knne.fit(xtrainonehotencoding,ytrain)
predictrain=knne.predict(xtrainonehotencoding)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(xtestonehotencoding)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="IC6JQT5kzyMk" outputId="262a512a-8d9d-4288-c6f1-6055eb86a6d2" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="_aMjw0adnc3l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="f8ce5e16-45ee-4e86-94a8-b545b0805f21"
print('train confusion matrix')
rest=confusion_matrix(ytrain,predictrain)
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="ybe1bqMIlfhK"
# ### [5.1.2] Applying KNN brute force on TFIDF,<font color='red'> SET 2</font>
# + colab_type="code" id="_ysRnbUhlfhL" outputId="aa32275f-f8eb-4510-ab30-d8520ed42376" colab={"base_uri": "https://localhost:8080/", "height": 551}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,30,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='brute')
knnx.fit(xtraintfidfencoding,ytrain)
predict1=knnx.predict_proba(xtraintfidfencoding)[:,1]
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(xcvtfidfencoding)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="eZ6I8gsCzoWA" outputId="5039a3f4-ae68-440c-d658-c0a1db4071f4" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=9,algorithm='brute')
knne.fit(xtraintfidfencoding,ytrain)
predictrain=knne.predict(xtraintfidfencoding)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(xtesttfidfencoding)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="-Gi2JmEj0t9b" outputId="717785d7-bf17-4f5e-aabd-4ba9fcaa6a4a" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="t0daZDD1xZum" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="07295e56-63ad-4106-9c69-341760aa3190"
print('train confusion matrix')
rest=confusion_matrix(ytrain,predictrain)
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="tsj9D0SSlfhO"
# ### [5.1.3] Applying KNN brute force on AVG W2V,<font color='red'> SET 3</font>
# + colab_type="code" id="LNsBNTnplfhO" outputId="819dea95-8e00-4a8a-9946-d5d18c28e42d" colab={"base_uri": "https://localhost:8080/", "height": 551}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='brute')
knnx.fit(sent_vectors,ytrain)
predict1=knnx.predict_proba(sent_vectors)[:,1]
predictz=knnx.predict(sent_vectors)
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(sent_vectorscv)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="7yZzlwzz2CsO" outputId="2af72320-f201-49f4-cd72-6c193a6fff49" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=15,algorithm='brute')
knne.fit(sent_vectors,ytrain)
predictrain=knne.predict(sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(sent_vectorstest)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="JxL4GVay2Eyq" outputId="91506045-0ac6-4495-bc58-bc6d0273b5f8" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="gZ8A7I849dAz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="5cfb3e75-d37b-4bf8-ae86-b0d6daca8071"
print('train confusion matrix')
rest=confusion_matrix(ytrain,predictrain)
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="weCo7MhNlfhT"
# ### [5.1.4] Applying KNN brute force on TFIDF W2V,<font color='red'> SET 4</font>
# + colab_type="code" id="9cyy1zkqlfhW" outputId="0931d76e-db60-41c6-ccd1-8557791f012c" colab={"base_uri": "https://localhost:8080/", "height": 551}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='brute')
knnx.fit( xtraintfidf_sent_vectors,ytrain)
predict1=knnx.predict_proba( xtraintfidf_sent_vectors)[:,1]
predictz1=knnx.predict(xtraintfidf_sent_vectors)
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba( xcvtfidf_sent_vectors)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="ykHBmJdU2tPB" outputId="7c9b91f2-5ba8-48e5-9c91-a86665daacde" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=19,algorithm='brute')
knne.fit( xtraintfidf_sent_vectors,ytrain)
predictrain=knne.predict( xtraintfidf_sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict( xtesttfidf_sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="UW0mKd_a2vLa" outputId="8e96de66-8aad-4591-ebb4-37553b158482" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="mRJuZPIIPyma" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="76eb61ca-f97e-407d-b1d7-ff377f938352"
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytrain,predictrain)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="3CeCeGjZlfhe"
# ## [5.2] Applying KNN kd-tree
# + [markdown] colab_type="text" id="iXXNWxPDlfhg"
# ### [5.2.1] Applying KNN kd-tree on BOW,<font color='red'> SET 5</font>
# + colab_type="code" id="kIYpnH0blfhk" outputId="f909ee62-e15c-46a9-99ca-d20911dc045d" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(ytrain.shape)
# + colab_type="code" id="CLmr0vK7I2Sk" outputId="22d45451-3390-43a7-c101-1bee106886d6" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(xtrainonehotencoding11.shape)
# + cellView="code" colab_type="code" id="u82l3xRc5H-m" outputId="a0b7ce31-f7d7-4368-a88d-d49a62f0ead9" colab={"base_uri": "https://localhost:8080/", "height": 551}
#@title Default title text
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='kd_tree')
knnx.fit(xtrainonehotencoding11,ytrain)
predict1=knnx.predict_proba(xtrainonehotencoding11)[:,1]
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(xcvonehotencoding13)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="KSoc7QWz5PRR" outputId="33d97817-39ab-4c87-977a-087e1b3a9d36" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=alpha[optimal_k],algorithm='kd_tree')
knne.fit(xtrainonehotencoding11,ytrain)
predictrain=knne.predict(xtrainonehotencoding11)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(xtestonehotencoding12)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="2mz3kF7I5VK1" outputId="42774c44-917a-4bb4-c42a-c066f21d310b" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="K1Xx5rt_oTwX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="8bb81081-4336-4995-9ae8-371a11d96ebe"
print('train confusion matrix')
rest=confusion_matrix(ytrain,predictrain)
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="jKwI9xo5lfhn"
# ### [5.2.2] Applying KNN kd-tree on TFIDF,<font color='red'> SET 6</font>
# + colab_type="code" id="L-aY3alalfho" outputId="182c75b4-4ea2-4249-f065-9984827e89f7" colab={"base_uri": "https://localhost:8080/", "height": 551}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='kd_tree')
knnx.fit(xtraintfidfencoding11,ytrain)
predict1=knnx.predict_proba(xtraintfidfencoding11)[:,1]
predix=knnx.predict(xtraintfidfencoding11)
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(xcvtfidfencoding13)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="NJgzvC2cWayu" outputId="96c6e0dc-a073-412d-be4c-c89921d66f28" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=19,algorithm='kd_tree')
knne.fit(xtraintfidfencoding11,ytrain)
predictrain=knne.predict(xtraintfidfencoding11)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(xtesttfidfencoding12)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="I30dUCmAWbPY" outputId="c0379747-88e5-48eb-fb89-f347f2fc3223" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="SM_Uo2hNHWzF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="4fb56935-fabf-4dee-eb8f-c6a6768972ac"
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytrain,predictrain)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="EO2QgFPtlfhr"
# ### [5.2.3] Applying KNN kd-tree on AVG W2V,<font color='red'> SET 7</font>
# + colab_type="code" id="3XHxNtYflfhu" outputId="305591ff-ef9d-47c4-ad15-9725aae63eaa" colab={"base_uri": "https://localhost:8080/", "height": 531}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,10,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='kd_tree')
knnx.fit(sent_vectors,ytrain)
predict1=knnx.predict_proba(sent_vectors)[:,1]
predicq=knnx.predict(sent_vectors)
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba(sent_vectorscv)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots(figsize=(10,8))
ax.plot(alpha,cvscores,label='training')
for i,txt in enumerate(np.round(cvscores,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores[i]))
plt.grid()
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
for i,txt in enumerate(np.round(cvscores1,3)):
ax.annotate((alpha[i],str(txt)), (alpha[i],cvscores1[i]))
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
optimal_k=np.argmax(cvscores1)
print(alpha[optimal_k])
print(cvscores1)
# + colab_type="code" id="cF5Dz2x3Otpo" outputId="8c671467-4b66-44b8-b22e-513f782804df" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=9,algorithm='kd_tree')
knne.fit(sent_vectors,ytrain)
predictrain=knne.predict(sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict(sent_vectorstest)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="4ES_cz-tO2fe" outputId="52fbac19-26d9-40d0-9068-f6db3f58479f" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytrain,predictrain)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="RMs8sBydKGlC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="eaab9107-0e42-4b9e-fdba-38393650ce68"
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="Dv1cWk-Xlfhx"
# ### [5.2.4] Applying KNN kd-tree on TFIDF W2V,<font color='red'> SET 8</font>
# + colab_type="code" id="qDN7PvNelfhz" outputId="55f80839-8a9c-44de-baa8-03cb35fb09d7" colab={"base_uri": "https://localhost:8080/", "height": 350}
#with hyper parameter tuning
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
cvscores=[]
cvscores1=[]
alpha=[i for i in range(1,20,2)]
for i in alpha:
knnx=KNeighborsClassifier(n_neighbors=i,algorithm='kd_tree')
knnx.fit( xtraintfidf_sent_vectors,ytrain)
predict1=knnx.predict_proba( xtraintfidf_sent_vectors)[:,1]
predictm=knnx.predict(sent_vectors)
cvscores.append(roc_auc_score(ytrain,predict1))
predict2=knnx.predict_proba( xcvtfidf_sent_vectors)[:,1]
cvscores1.append(roc_auc_score(ycv,predict2))
optimal_k=np.argmax(cvscores1)
fig,ax=plt.subplots()
ax.plot(alpha,cvscores,label='training')
ax.legend()
ax.plot(alpha,cvscores1,label='cross validation')
ax.legend()
plt.xlabel('hyper parameter')
plt.ylabel('auc')
plt.show()
print(cvscores1)
optimal_k=np.argmax(cvscores1)
optimal_k=alpha[optimal_k]
print(optimal_k)
print(cvscores1)
# + colab_type="code" id="LWCXs5ldPHIU" outputId="8bed928a-e4ad-4c91-ca49-ba940e256b72" colab={"base_uri": "https://localhost:8080/", "height": 311}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
knne=KNeighborsClassifier(n_neighbors=19,algorithm='kd_tree')
knne.fit( xtraintfidf_sent_vectors,ytrain)
predictrain=knne.predict( xtraintfidf_sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytrain, predictrain)
auc = metrics.roc_auc_score(ytrain, predictrain)
plt.plot(fpr,tpr,label="roc of train")
plt.legend()
predic=knne.predict( xtesttfidf_sent_vectors)
fpr, tpr, thresh = metrics.roc_curve(ytest, predic)
auc = metrics.roc_auc_score(ytest, predic)
plt.plot(fpr,tpr,label="roc of test)")
plt.legend()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print(roc_auc_score(ytest, predic))
# + colab_type="code" id="NtiuV9KqPP-W" outputId="e3387df7-b159-498c-98f9-d3f0a537aaf7" colab={"base_uri": "https://localhost:8080/", "height": 294}
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytest,predic)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + id="0P98B-0yQAYJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="c864b1de-2a30-43e4-aefd-2bb8a7972109"
#plotting confusion matrix aftyer performing knn on top of svd data
from sklearn.metrics import confusion_matrix
rest=confusion_matrix(ytrain,predictrain)
import seaborn as sns
classlabel=['negative','positive']
frame=pd.DataFrame(rest,index=classlabel,columns=classlabel)
sns.heatmap(frame,annot=True,fmt="d")
plt.title("confusion matrix")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.show()
# + [markdown] colab_type="text" id="IsVgh2Wulfh2"
# # [6] Conclusions
# + colab_type="code" id="m1TxPE7xlfh3" outputId="02f7bddf-7eb2-45e1-c172-bab36149d3a5" colab={"base_uri": "https://localhost:8080/", "height": 297}
data = [['brute',13, 0.555], ['brute',9, 0.5],['brute',15, 0.502],['brute',19, 0.65],['kd_tree',19, 0.55],['kd_tree',19, 0.55],['kd_tree',9, 0.549],['kd_tree',19, 0.65]]
pd.DataFrame(data, columns=["model", "hyperparameter",'auc'],index=['bow','tfidf','word2vec','averageword2vec','bow','tfidf','word2vec','averageword2vec'])
# + colab_type="code" id="tDFyyBiEqIFD" colab={}
| 61,294 |
/lecture_1/1. SPA RSA Introduction.ipynb | 7d8f85e0ef562b8dd37ff542f40de1afdec288f6 | [] | no_license | hackenbergstefan/securec_ws2021 | https://github.com/hackenbergstefan/securec_ws2021 | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,662 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 1: Breaking RSA with SPA - Introduction
# ## RSA Cryptosystem
#
# RSA (RivestโShamirโAdleman) is a public-key cryptosystem that is widely used for secure data transmission. It is also one of the oldest. The acronym RSA comes from the surnames of Ron Rivest, Adi Shamir, and Leonard Adleman, who publicly described the algorithm in 1977. An equivalent system was developed secretly, in 1973 at GCHQ (the British signals intelligence agency), by the English mathematician Clifford Cocks. That system was declassified in 1997. (from https://en.wikipedia.org/wiki/RSA_(cryptosystem))
#
# ### How it works
#
# #### Parameters
#
# The parameters defining a RSA cryptosystem are given by a few integers:
# * a prime $p$, a prime $q$, the so called _modulus_ $n = p \cdot q$,
# * a _public exponent_ $e$ with $\mathrm{gcd}(e, \phi(n)) = 1$,
# * a _private exponent_ $d$ with $d \cdot e \equiv 1 \mod \phi(n)$.
#
# In real applications $n$ has a bit-length of 2k to 4k and $e$ is often set to the 4th Fermat number $F_4 = 2^{2^4} = 65537 = 0x10001$.
#
# #### Encryption
#
# Given a message $m \in [0..n]$ the _encryption_ function is given by:
# $$ c := m^e \mod n,$$
# where $c$ is the resulting ciphertext.
#
# #### Decryption
# Given a ciphertext $c \in [0..n]$ the _decryption_ fcuntion is given by:
#
# $$ m := c^d \mod n.$$
#
# #### Proof
# Fermat's little theorem ;-)
# ## How to do in C?
#
# In C it's not obvious how to do modular exponentiation. But a well-known algorithm called _Square-and-Multiply_ helps:
#
# ```
# // Calculate x^k
# // b: Binary representation of k
# // res: Result
#
# function bin_exp(x,b)
# res = 1
# for i = n..0
# res = res^2
# if b_i == 1
# res = res * x
# end-if
# end-for
# return res
# end-function
# ```
# (Pseudocode from https://de.wikipedia.org/wiki/Bin%C3%A4re_Exponentiation)
# ## Capture and Attack!
#
# ### Implementation
#
# This leads us to the following concrete implementation in C where we used only integers of size `uint8_t`:
# ```c
# uint8_t exponent = private_exponent;
# uint8_t message = 0xA0;
#
# uint16_t tmp;
# uint8_t result = 1;
# while (exponent)
# {
# if (exponent & 1)
# {
# tmp = result * message;
# result = tmp % modulus;
# }
#
# tmp = message * message;
# message = tmp % modulus;
# exponent >>= 1;
# }
# ```
# <div style="background: #f0ffe0; padding: 15px; border: 1px solid slategray;">
# <div class="h2" style="font-variant: small-caps;">Exercise 1</div>
#
# 1. Explain why the above code works.
# 2. Explain why `tmp` is of type `uint16_t`.
#
# </div>
# ### Record a trace
import securec
import securec.util as util
scope, target = util.init()
securec.util.compile_and_flash('./1_rsa_uint8_fixed.c')
# +
import struct
import time
import warnings
scope.default_setup()
scope.adc.samples = 5000
def capture():
scope.arm()
target.simpleserial_write('r', b'')
return util.capture()
# -
trace = capture()
# +
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
from bokeh.models import CrosshairTool
from bokeh.palettes import Category10_10
output_notebook()
# -
p = figure(width=900, height=800)
p.add_tools(CrosshairTool())
p.line(range(0, len(trace)), trace)
show(p)
# <div style="background: #f0ffe0; padding: 15px; border: 1px solid slategray;">
# <div class="h2" style="font-variant: small-caps;">Exercise 2</div>
#
# 1. Try to explain the picture above! What do you see? Can you tell the exponent? If not, have a look into the code. Do you "see" the exponent now?
# 2. Make familiar with the capture code. You'll need it often...
#
# </div>
util.exit()
| 3,933 |
/News Classification.ipynb | 46612070db7c98c79977f35785f8d05d473cbeef | [] | no_license | gowshi1412/project7th-sem | https://github.com/gowshi1412/project7th-sem | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,532,126 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PIAIC ISLAMABAD CLASS 1
#
# My first python program in piaic class.
print("hello world")
print("pakistan zinda bad")
Name = "QUART-UL-AIN"
Name = "simba"
Father = "GULZEB AKHTAR"
University = "BAHRIA UNIVERSITY"
message="""
PIAIC ISLAMABAD BTACH3
Name:{}
Father:{}
University:{}
""".format(Name,Father,University)
print(message)
UNIVERSITY = "UNIVERSITY INSTITUTE OF INFORMATION TECHNOLOGY , ISLAMABAD"
Name = input("enter your name: ")
Father = (input("enter your father name: "))
Class = int(input("enter your class: "))
Age = int(input("enter your age: "))
x="""
UNIVERSITY:{}
Name:{}
Class:{}
Father:{}
Age:{}
""".format(UNIVERSITY,Name,Father,Class,Age)
print(x)
| 968 |
/coursera/ai/AssignmentAnomalyDetection.ipynb | c0d4b7797a184be4f3fe4e0196d9ad5fef89df5c | [] | no_license | jyuan0128/developerWorks | https://github.com/jyuan0128/developerWorks | 0 | 0 | null | 2018-02-02T10:07:15 | 2018-01-25T11:11:59 | null | Jupyter Notebook | false | false | .py | 27,136 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ็ฌฌ9็ซ : RNN, CNN
# >ๅ่: Pytorchใใฅใผใใชใขใซ https://yutaroogawa.github.io/pytorch_tutorials_jp/
# # 80. ID็ชๅทใธใฎๅคๆ
# >ๅ้ก51ใงๆง็ฏใใๅญฆ็ฟใใผใฟไธญใฎๅ่ชใซใฆใใผใฏใชID็ชๅทใไปไธใใใ๏ผๅญฆ็ฟใใผใฟไธญใงๆใ้ ปๅบใใๅ่ชใซ1๏ผ2็ช็ฎใซ้ ปๅบใใๅ่ชใซ2๏ผโฆโฆใจใใฃใๆนๆณใง๏ผๅญฆ็ฟใใผใฟไธญใง2ๅไปฅไธๅบ็พใใๅ่ชใซID็ชๅทใไปไธใใ๏ผใใใฆ๏ผไธใใใใๅ่ชๅใซๅฏพใใฆ๏ผID็ชๅทใฎๅใ่ฟใ้ขๆฐใๅฎ่ฃ
ใใ๏ผใใ ใ๏ผๅบ็พ้ ปๅบฆใ2ๅๆชๆบใฎๅ่ชใฎID็ชๅทใฏใในใฆ0ใจใใ
# !head -3 ../Chap8/train.txt
# +
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import collections
import nltk
import torch.optim as optim
import time
nltk.download('punkt')
# +
first_line = True # 'TITLE CATEGORY'ใฎ่กใ่ชญใฟ้ฃใฐใใใใฎๅคๆฐ
with open('../Chap8/train.txt') as f:
word_count = collections.Counter()
for line in f:
if first_line:
first_line = False
continue
# ใใพใTITLEใจCATEGORYใฎ2ใคใซ็ตใใฆใใชใ่กใใใใฎใง, ใจใฉใผๅ้ฟใใใใใซsplitๆฐใงๆกไปถๅๅฒ
if len(line.split('\t')) != 3:
# print('Line excluded.')
continue
_, title, category = line.strip('\n').split('\t')
words = nltk.word_tokenize(title)
word_count.update(words)
dict_word_to_id = {}
c = 1
for word, count in word_count.most_common():
if count >= 2:
dict_word_to_id[word] = c
c+=1
else:
dict_word_to_id[word] = 0
def text_to_ids(text: str) -> list:
return [dict_word_to_id[word] if (word in dict_word_to_id.keys()) else 0 for word in nltk.word_tokenize(text)]
def list_to_tensor(input_data: list):
return torch.tensor([input_data])
# -
input_line = 'Gurlitt Wants to Return Nazi-Looted Art'
print(text_to_ids(input_line))
print(list_to_tensor(text_to_ids(input_line)).topk(1))
# # 81. RNNใซใใไบๆธฌ
# >ID็ชๅทใง่กจ็พใใใๅ่ชๅx=(x1,x2,โฆ,xT)
# ใใ ใ๏ผTใฏๅ่ชๅใฎ้ทใ๏ผxtโโVใฏๅ่ชใฎID็ชๅทใฎone-hot่กจ่จใงใใ๏ผVใฏๅ่ชใฎ็ทๆฐใงใใ๏ผ
# ๅๅธฐๅใใฅใผใฉใซใใใใฏใผใฏ๏ผRNN: Recurrent Neural Network๏ผใ็จใ๏ผๅ่ชๅxใใใซใใดใชyใไบๆธฌใใใขใใซใๅฎ่ฃ
ใใ
# +
def one_hot_vectorizer(text: str):
token_ids = text_to_ids(text)
vocab_len = max(word_to_id.values()) + 1
tensor = torch.zeros(len(token_ids), vocab_len)
for i, idx in enumerate(token_ids):
tensor[i][idx] = 1
return tensor
class RNN(nn.Module):
def __init__(self, vocab_size, emb_dim, hidden_size, output_size):
super(RNN, self).__init__()
# ๅคๆฐใฎ่จญๅฎ
self.num_layers = 1
self.hidden_size = hidden_size
# vocab_sizeใ่ชๅฝใฎใตใคใบใง, emb_dimใๅใ่พผใๆฌกๅ
ๆฐ
self.embedding = nn.Embedding(vocab_size, emb_dim)
# ๅ
ฅๅใinput_sizeๆฌกๅ
ใง, ้ ใ็ถๆ
ใฎ่กจ็พใhidden_sizeๆฌกๅ
, num_layersๅฑคใฎRNN.
# biasใงใใคใขใน้
ใฎๆ็ก,non_linearityใงๆดปๆงๅ้ขๆฐใฎๆๅฎใใงใใ(tanh or ReLU)
self.rnn = nn.RNN(input_size=emb_dim, hidden_size=hidden_size,
num_layers=self.num_layers, bias=True, nonlinearity='tanh', batch_first=True)
# ๅ
จ็ตๅๅฑค
self.fc = nn.Linear(hidden_size, output_size, bias=True)
def forward(self, x):
embed = self.embedding(x)
init_hidden = self.initHidden() # h0ใใฏใใซใฎไฝๆ
output, final_hidden = self.rnn(embed, init_hidden)
model_output = self.fc(final_hidden[0]) # ๅ้กใฎๆ็คบใซใใ, ไฝฟ็จใใใฎใฏ้ ใๅฑคใใฏใใซ
model_output = F.softmax(model_output, dim=-1)
return model_output
def initHidden(self):
return torch.zeros(1, self.num_layers, self.hidden_size)
# +
# ใขใใซใฎๅบๅใใฉใฎใซใใดใชใใ่ฟใ้ขๆฐ
def category_from_output(output):
idx = torch.argmax(output).item()
all_categories = {0:'b', 1:'t', 2:'e', 3:'m'}
return all_categories[idx]
params = {
'vocab_size': max(dict_word_to_id.values()) + 1,
'emb_dim': 300,
'hidden_size': 50,
'output_size': 4
}
my_rnn = RNN(**params)
input_x = 'Gurlitt Wants to Return Nazi-Looted Art, Sueddeutsche Reports'
input_x = text_to_ids(input_x)
input_x = list_to_tensor(input_x)
pred_y = my_rnn(input_x)
print(f'ใขใใซไบๆธฌ: {category_from_output(pred_y)}') # ใชใใใฎไบๆธฌใฏใใฟใฉใกใชใใฎ
print(pred_y)
print('ๆญฃ่งฃ: e')
# print(f'topk: {y.dim.topk(1)}')
# -
# # 82. ็ขบ็็ๅพ้
้ไธๆณใซใใๅญฆ็ฟ
# >็ขบ็็ๅพ้
้ไธๆณ๏ผSGD: Stochastic Gradient Descent๏ผใ็จใใฆ๏ผๅ้ก81ใงๆง็ฏใใใขใใซใๅญฆ็ฟใใ๏ผ่จ็ทดใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็ใ่กจ็คบใใชใใใขใใซใๅญฆ็ฟใ๏ผ้ฉๅฝใชๅบๆบ๏ผไพใใฐ10ใจใใใฏใชใฉ๏ผใง็ตไบใใใ
# !head -3 ../Chap8/valid.txt
# +
def get_data(file):
titles = []
categories = []
first_line = True
with open(file) as f:
for line in f:
if first_line:
first_line=False
continue
if len(line.split('\t')) != 3:
continue
contents = line.strip('\n').split('\t')
titles.append(contents[1])
categories.append(contents[2])
return titles, categories
def get_accuracy(net, x_list, y_list):
acc = 0.0
for x, y in zip(x_list, y_list):
output = category_from_output(net(list_to_tensor(text_to_ids(x))))
if output == y:
acc += 1.0
return acc / len(x_list)
def get_dataset_acc_loss(net, criterion, x_data, y_data):
y_pred = []
running_loss = 0.0
for x, y in zip(x_data, y_data):
pred = net(list_to_tensor(text_to_ids(x)))
y_pred.append(pred)
running_loss += criterion(pred, category_to_num(y)).item()
acc = 0.0
for pred, true in zip(y_pred, y_data):
if category_from_output(pred) == true:
acc += 1.0
return acc/len(y_data), running_loss/len(y_data)
def category_to_num(category:str):
all_categories = {'b':0, 't':1, 'e':2, 'm':3}
return torch.tensor([all_categories[category]])
def category_to_vec(category:str):
all_categories = {'b':0, 't':1, 'e':2, 'm':3}
vec = torch.zeros(4)
vec[all_categories[category]] = 1.0
return vec
# +
# ใใผใฟใฎๅๅพ
train_x, train_y = get_data('../Chap8/train.txt')
valid_x, valid_y = get_data('../Chap8/valid.txt')
# ๆ้ฉๅๆๆณใฎๆๅฎ
optimizer = optim.SGD(my_rnn.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
epoch_size = 10
train_loss = list()
train_acc = list()
valid_loss = list()
valid_acc = list()
for epoch in range(epoch_size): # ใจใใใฏๆฐๅใซใผใใๅใ
print(f'epoch{epoch}')
running_loss = 0.0
for input_x, label_y in zip(train_x, train_y):
# print(input_x, label_y)
# ใใฉใกใผใฟใฎๅพ้
ใใชใปใใ
optimizer.zero_grad()
# ้ ไผๆฌ
output = my_rnn(list_to_tensor(text_to_ids(input_x)))
loss = criterion(output, category_to_num(label_y))
# ้ไผๆฌ
loss.backward()
# ใใฉใกใผใฟๆดๆฐ
optimizer.step()
running_loss += loss.item()
# ๅญฆ็ฟใฎ่จ้ฒ
train_loss.append(running_loss/len(train_x))
train_acc.append(get_accuracy(my_rnn, train_x, train_y))
acc, loss = get_dataset_acc_loss(my_rnn, criterion, valid_x, valid_y)
valid_loss.append(loss)
valid_acc.append(acc)
print('Done.')
# -
from pprint import pprint
pprint(valid_acc)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure(figsize=(16, 8))
epoch_size = range(1, 11)
plt.subplot(1,2,1)
plt.plot(epoch_size, train_loss, label='Train Loss')
plt.plot(epoch_size, valid_loss, label='Valid Loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.title('Loss')
plt.legend()
plt.subplot(1,2,2)
plt.plot(epoch_size, train_acc, label='Train Acc')
plt.plot(epoch_size, valid_acc, label='Valid Acc')
plt.xlabel('epoch')
plt.ylabel('Acc')
plt.title('Acc')
plt.legend()
plt.tight_layout()
plt.show();
# -
# # 83. ใใใใใๅใปGPUไธใงใฎๅญฆ็ฟ
# >ๅ้ก82ใฎใณใผใใๆนๅคใ๏ผB
# ไบไพใใจใซๆๅคฑใปๅพ้
ใ่จ็ฎใใฆๅญฆ็ฟใ่กใใใใใซใใ๏ผB
# ใฎๅคใฏ้ฉๅฝใซ้ธใน๏ผ๏ผใพใ๏ผGPUไธใงๅญฆ็ฟใๅฎ่กใใ
# `torch.tensor(train_x_vec)`ใฎใณใผใใๅฎ่กใใใจ
# `ValueError: expected sequence of length 12 at dim 1 (got 9)`ใจใชใ.
# ใใใฏ`torch.tensor([[1], [2, 0, 2], [8, 8]])`ใฟใใใชใฎใใใฃใๆ,
# `torch.tensor([[1, 0, 0], [2, 0, 2], [8, 8, 0]])`ใจใใฆๅ่ฆ็ด ใฎ้ทใใๆใใฆใใใชใใจใ ใ
def batch_trainee(batch_size):
# Dataloaderใไฝฟใฃใฆtrain_x, train_yใใพใจใใฆๆฑใ [[x_vec, y], ...[x_vec, y]]
train_y_vec = [category_to_num(y) for y in train_y]
train_x_vec = [text_to_ids(x) for x in train_x]
dataset = torch.utils.data.TensorDataset(list_to_tensor(train_x_vec), train_y_vec)
# ใใใใตใคใบใฎๆๅฎ
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
# ๆ้ฉๅๆๆณใฎๆๅฎ
optimizer = optim.SGD(my_rnn.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
epoch_size = 10
train_loss = list()
train_acc = list()
valid_loss = list()
valid_acc = list()
start_time = time.time()
for epoch in range(epoch_size): # ใจใใใฏๆฐๅใซใผใใๅใ
print(f'epoch{epoch}')
running_loss = 0.0
for input_x, label_y in data_loader:
# print(input_x, label_y)
# ใใฉใกใผใฟใฎๅพ้
ใใชใปใใ
optimizer.zero_grad()
# ้ ไผๆฌ
output = my_rnn(input_x)
loss = criterion(output, label_y)
# ้ไผๆฌ
loss.backward()
# ใใฉใกใผใฟๆดๆฐ
optimizer.step()
running_loss += loss.item()
run_time = time.time() - start_time
# ๅญฆ็ฟใฎ่จ้ฒ
train_loss.append(running_loss/len(train_x))
train_acc.append(get_accuracy(my_rnn, train_x, train_y))
acc, loss = get_dataset_acc_loss(my_rnn, criterion, valid_x, valid_y)
valid_loss.append(loss)
valid_acc.append(acc)
print('Done.')
print(f'Run Time : {run_time}')
print(f'Train_Acc: {train_acc.mean()}')
print(f'Valid_Acc: {valid_acc.mean()}')
batch_trainee(100)
train_x_vec = [text_to_ids(input_x) for input_x in train_x]
list_to_tensor(train_x_vec)
| 9,762 |
/topic-modeling-for-custom-data.ipynb | d2d83ac1bc611bfc4972f2c902d8b4694ad10d3f | [
"CC-BY-4.0"
] | permissive | gmorse11/intro_text_mining | https://github.com/gmorse11/intro_text_mining | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 15,843 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SPARQL Index Pipeline Dev
# +
import sys
# !{sys.executable} -m pip install SPARQLWrapper
from SPARQLWrapper import SPARQLWrapper, JSON
import pandas as pd
import numpy as np
import datetime
# import func_lib
endpoint_url = "https://query.wikidata.org/sparql"
item = "item"
class Relation:
"""
The class returned when createRelation is called.
It contains string field with query.
We call Relation.query when we need to do the query.
"""
def __init__(self, entity_id: str, property_id: str, isSubject: bool, rowVerbose: bool,
colVerbose: bool, time_property: str, time: str, name: str, label: bool, limit=10000):
self.entity_id = entity_id
self.query_str = ""
self.dic = {}
self.result_dic = {"Entity ID": []}
self.df = pd.DataFrame()
self.count = 0
self.time_property = time_property
self.time = time
self.limit = limit
self.focus = "Entity ID"
if property_id:
self.extend(property_id, isSubject, name, rowVerbose, colVerbose, limit, time_property, time, label)
def generate_html(self, name: str):
html = (self.df).to_html()
text_file = open(name, "w", encoding='utf-8')
text_file.write(html)
text_file.close()
def query(self, require=None):
if self.query_str == "":
self.result_dic = {"Entity ID": ['http://www.wikidata.org/entity/' + str(self.entity_id)]}
return self.result_dic
results = get_results(endpoint_url, self.query_str)
result_dict = {"Entity ID": ['http://www.wikidata.org/entity/' + str(self.entity_id)]}
for i in range(1, self.count + 1):
result_dict[self.dic[i]["name"] + '_' + self.dic[i]['property_id']] = []
if self.dic[i]["colVerbose"]:
result_dict[self.dic[i]["name"] + '_rank_' + self.dic[i]['property_id'] + '_rank'] = []
for key, value in self.dic[i]["property_name_dic"].items():
result_dict[
self.dic[i]["name"] + "_" + value + '_' + self.dic[i]['property_id'] + '_' + str(key)] = []
for key, value in self.dic[i]["ref_dic"].items():
result_dict[self.dic[i]["name"] + "_ref_" + self.dic[i]['property_id'] + '_' + str(key)] = []
if self.dic[i]["label"]:
result_dict[self.dic[i]["name"] + '_' + self.dic[i]['property_id'] + 'Label'] = []
for result in results['results']['bindings']:
for key, value in result_dict.items():
if key in result.keys():
result_dict[key].append(result[key]['value'])
else:
result_dict[key].append('NA')
result_dict["Entity ID"] = ['http://www.wikidata.org/entity/' + str(self.entity_id)] * len(
result_dict[self.dic[self.count]["name"] + '_' + self.dic[self.count]["property_id"]])
self.result_dic = result_dict
self.df = pd.DataFrame.from_dict(self.result_dic)
for i in range(1, self.count + 1):
if self.dic[i]["colVerbose"] and not self.dic[i]["rowVerbose"]:
col = self.dic[i]['name'] + '_rank_' + self.dic[i]['property_id'] + '_rank'
if any(self.df[col] == 'http://wikiba.se/ontology#PreferredRank'):
self.df = self.df.loc[self.df[col] == 'http://wikiba.se/ontology#PreferredRank']
else:
self.df = self.df.loc[self.df[col] == 'http://wikiba.se/ontology#NormalRank']
# if require is not None:
# for r in require:
# self.df = self.df.loc[self.df[r] != 'NA']
self.df = pd.DataFrame(data=self.df)
# if self.df.shape[0] >= 10000:
# print("Warning: Your query leads to too many results. Only 10,000 returned.")
return self.df
def extend(self, property_id: str, isSubject: bool, name: str, rowVerbose=False, colVerbose=False, limit=None,
time_property=None, time=None, search=None, label=False):
self.count += 1
self.dic[self.count] = {}
self.dic[self.count]["name"] = name
self.dic[self.count]["focus"] = self.focus
self.dic[self.count]["property_id"] = property_id
self.dic[self.count]["isSubject"] = isSubject
self.dic[self.count]["limit"] = limit
self.dic[self.count]["rowVerbose"] = rowVerbose
self.dic[self.count]["colVerbose"] = colVerbose
self.dic[self.count]['time_property'] = time_property
self.dic[self.count]['time'] = time
self.dic[self.count]['search'] = search
self.dic[self.count]['label'] = label
if rowVerbose or colVerbose:
self.dic[self.count]["property_name_dic"], self.dic[self.count][
"ref_dic"] = self.search_property_for_verbose()
if time_property and time:
self.time_property = time_property
self.time = time
if limit:
self.limit = limit
self.query_str = self.define_query_relation()
def changeFocus(self, name="Entity ID"):
self.focus = name
def applyFunction(self, objcolumn, func, name):
if type(func) == str:
if func.startswith('F'):
try:
func_id = int(func[1:])
if func_id == 0:
self.df[name] = self.df[objcolumn]
else:
if func_id >= func_lib.func_num():
print("Not available.")
else:
self.df[name] = self.df[objcolumn].apply(func_lib.func_list[func_id])
except:
raise Exception("Not a valid function id, a valid function id should be 'Fn', n is an integer.")
else:
raise Exception("Not a valid function id, a valid function id should be 'Fn', n is an integer.")
else:
self.df[name] = self.df[objcolumn].apply(func)
def define_query_relation(self):
rdf_triple, time_filter, limit_statement = """""", """""", """"""
if self.count < 1:
return None
focusChanges = 0
for i in range(1, self.count + 1):
if self.dic[i]["rowVerbose"] or self.dic[i]["colVerbose"]:
if self.dic[i]["search"] is None and not self.dic[i]["isSubject"]:
rdf_triple += """OPTIONAL {"""
if self.dic[i]["focus"] == "Entity ID":
# if self.dic[i]["search"] is None:
# rdf_triple += """OPTIONAL {"""
rdf_triple += """wd:""" + self.entity_id + """ p:""" + self.dic[i][
'property_id'] + """ ?statement_""" + str(i) + """. """ \
+ """?statement_""" + str(i) + """ ps:""" + self.dic[i][
'property_id'] + """ ?""" + \
self.dic[i]['name'] \
+ """_""" + self.dic[i]['property_id'] + """. """
else:
rdf_triple += """?""" + self.dic[i]["focus"] + """ p:""" + self.dic[i][
'property_id'] + """ ?statement_""" + str(i) + """. """ \
+ """?statement_""" + str(i) + """ ps:""" + self.dic[i][
'property_id'] + """ ?""" + \
self.dic[i]['name'] \
+ """_""" + self.dic[i]['property_id'] + """. """
for key, value in self.dic[i]["property_name_dic"].items():
rdf_triple += """OPTIONAL { """ + """?statement_""" + str(i) + """ pq:""" + str(key) \
+ """ ?""" + self.dic[i]['name'] + """_""" + value + """_""" + self.dic[i][
'property_id'] + """_""" + str(key) + """.} """
for key, value in self.dic[i]["ref_dic"].items():
rdf_triple += """OPTIONAL { ?statement_""" + str(
i) + """ prov:wasDerivedFrom ?refnode_""" + str(
i) + """. ?refnode_""" + str(i) \
+ """ pr:""" + str(key) + """ ?""" + self.dic[i]['name'] + """_ref_""" + \
self.dic[i][
'property_id'] + """_""" + str(key) + """.} """
rdf_triple += """OPTIONAL { ?statement_""" + str(i) + """ wikibase:rank ?""" + self.dic[i][
'name'] + """_rank_""" + self.dic[i]['property_id'] + """_rank. } """
# none-verbose version
else:
if self.dic[i]["focus"] == "Entity ID":
if self.dic[i]["isSubject"]:
# if self.dic[i]["search"] is None:
# rdf_triple += """OPTIONAL {"""
rdf_triple += """?""" + self.dic[i]["name"] + """_""" + self.dic[i][
'property_id'] + """ wdt:""" + self.dic[i][
"property_id"] + """ wd:""" + self.entity_id + """. """
else:
if self.dic[i]["search"] is None:
rdf_triple += """OPTIONAL {"""
rdf_triple += """wd:""" + self.entity_id + """ wdt:""" + self.dic[i][
"property_id"] + """ ?""" + \
self.dic[i]["name"] + """_""" + self.dic[i]['property_id'] + """. """
else:
if self.dic[i]["isSubject"]:
# if self.dic[i]["search"] is None:
# rdf_triple += """OPTIONAL {"""
rdf_triple += """?""" + self.dic[i]["name"] + """_""" + self.dic[i][
'property_id'] + """ wdt:""" + self.dic[i]["property_id"] + """ ?""" + self.dic[i][
'focus'] + """. """
else:
if self.dic[i]["search"] is None:
rdf_triple += """OPTIONAL {"""
rdf_triple += """?""" + self.dic[i]['focus'] + """ wdt:""" + self.dic[i][
"property_id"] + """ ?""" + self.dic[i]["name"] + """_""" + self.dic[i][
'property_id'] + """. """
if not self.dic[i]["isSubject"]:
if i < self.count and self.dic[i]["focus"] != self.dic[i + 1]["focus"] and self.dic[i]["search"] is None:
focusChanges += 1
elif self.dic[i]["search"] is None:
rdf_triple += """} """
for i in range(focusChanges):
rdf_triple += """} """
for i in range(1, self.count + 1):
if self.dic[i]['search'] is not None and self.dic[i]["search"] != '!NA':
if isinstance(self.dic[i]['search'], tuple):
if isinstance(self.dic[i]['search'][0], str):
rdf_triple += """FILTER (YEAR(?""" + self.dic[i]['name'] + """_""" + self.dic[i][
'property_id'] + """) >= """ + \
self.dic[i]['search'][0] + """ && YEAR(?""" + self.dic[i]['name'] + \
"""_""" + self.dic[i]['property_id'] + """) <= """ + self.dic[i]['search'][
1] + """) """
else:
rdf_triple += """FILTER (?""" + self.dic[i]['name'] + """_""" + self.dic[i]['property_id'] + \
""" >= """ + str(self.dic[i]['search'][0]) + """ && ?""" + self.dic[i]['name'] + \
"""_""" + self.dic[i]['property_id'] + """ <= """ + str(
self.dic[i]['search'][1]) + """) """
else:
rdf_triple += """FILTER (?""" + self.dic[i]['name'] + """_""" + self.dic[i][
'property_id'] + """ = """ + \
"""wd:""" + self.dic[i]['search'] + """) """
if self.time_property is not None:
time_filter = """?""" + self.dic[1]["name"] + """ p:""" + self.time_property + """ ?pubdateStatement.
?pubdateStatement ps:""" + self.time_property + """ ?date
FILTER (YEAR(?date) = """ + self.time + """)"""
if self.limit is not None:
limit_statement = """LIMIT """ + str(self.limit)
label_statement = """Service wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" }"""
query = """SELECT DISTINCT"""
for i in range(1, self.count + 1):
if self.dic[i]["rowVerbose"] or self.dic[i]["colVerbose"]:
query += """ ?""" + self.dic[i]["name"] + """_""" + self.dic[i]['property_id']
if self.dic[i]["label"]:
query += """ ?""" + self.dic[i]["name"] + """_""" + self.dic[i]['property_id'] + """Label"""
for key, value in self.dic[i]["property_name_dic"].items():
query += """ ?""" + self.dic[i]["name"] + """_""" + value + """_""" + self.dic[i][
'property_id'] + """_""" + str(key)
for key, value in self.dic[i]["ref_dic"].items():
query += """ ?""" + self.dic[i]["name"] + """_ref_""" + self.dic[i]['property_id'] + """_""" + str(
key)
query += """ ?""" + self.dic[i]["name"] + """_rank_""" + self.dic[i]['property_id'] + """_rank"""
else:
query += """ ?""" + self.dic[i]["name"] + """_""" + self.dic[i]['property_id']
if self.dic[i]["label"]:
query += """ ?""" + self.dic[i]["name"] + """_""" + self.dic[i]['property_id'] + """Label"""
query += """ WHERE {""" + rdf_triple + time_filter + label_statement + """} """ + limit_statement
return query
def search_property_for_verbose(self):
property_to_name = {}
ref_to_name = {}
rdf_triple, time_filter, limit_statement = """""", """""", """"""
if self.dic[self.count]["rowVerbose"] or self.dic[self.count]["colVerbose"]:
for i in range(1, self.count):
if self.dic[i]["focus"] == "Entity ID":
if self.dic[i]["isSubject"]:
rdf_triple += """?""" + self.dic[i]["name"] + """ wdt:""" + self.dic[i][
"property_id"] + """ wd:""" + self.entity_id + """ ."""
else:
rdf_triple += """wd:""" + self.entity_id + """ wdt:""" + self.dic[i]["property_id"] + """ ?""" + \
self.dic[i]["name"] + """ ."""
else:
last = self.dic[i]["focus"].rfind('_')
focus = self.dic[i]["focus"][:last]
if self.dic[i]["isSubject"]:
rdf_triple += """?""" + self.dic[i]["name"] + """ wdt:""" + self.dic[i][
"property_id"] + """ ?""" + focus + """ ."""
else:
rdf_triple += """?""" + focus + """ wdt:""" + self.dic[i][
"property_id"] + """ ?""" + self.dic[i]["name"] + """ ."""
if self.dic[self.count]["focus"] == "Entity ID":
rdf_triple += """wd:""" + self.entity_id + """ p:""" + self.dic[self.count][
'property_id'] + """ ?statement.""" + \
"""?statement """ + """ps:""" + self.dic[self.count]['property_id'] + """ ?item.""" + \
"""?statement """ + """?pq """ + """?obj.""" + \
"""?qual wikibase:qualifier ?pq.""" + \
"""OPTIONAL{ ?statement prov:wasDerivedFrom ?refnode. ?refnode ?pr ?r.}"""
else:
last = self.dic[self.count]["focus"].rfind('_')
focus = self.dic[self.count]["focus"][:last]
rdf_triple += """?""" + focus + """ p:""" + self.dic[self.count][
'property_id'] + """ ?statement.""" + \
"""?statement """ + """ps:""" + self.dic[self.count]['property_id'] + """ ?item.""" + \
"""?statement """ + """?pq """ + """?obj.""" + \
"""?qual wikibase:qualifier ?pq.""" + \
"""OPTIONAL{ ?statement prov:wasDerivedFrom ?refnode. ?refnode ?pr ?r.}"""
if self.time_property is not None:
time_filter = """?""" + self.dic[1]["name"] + """ p:""" + self.time_property + """ ?pubdateStatement.
?pubdateStatement ps:""" + self.time_property + """ ?date
FILTER (YEAR(?date) = """ + self.time + """)"""
if self.limit is not None:
limit_statement = """LIMIT """ + str(self.limit)
label_statement = """Service wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" }"""
query = """SELECT DISTINCT """
if self.dic[self.count]["rowVerbose"] or self.dic[self.count]["colVerbose"]:
query += """?item""" + """ ?qual""" + """ ?qualLabel""" + """ ?obj """ + """?pr ?prLabel"""
query += """ WHERE {""" + rdf_triple + time_filter + label_statement + """} """ + limit_statement
query_result = get_results(endpoint_url, query)
for result in query_result['results']['bindings']:
if 'qual' in result:
property_to_name[result['qual']['value'].split('/')[-1]] = result['qualLabel']['value'].replace(' ',
'_')
if 'pr' in result:
ref_to_name[result['pr']['value'].split('/')[-1]] = result['prLabel']['value'].replace(' ', '_')
else:
query += """?""" + self.dic[self.count]["name"] + """ """
return property_to_name, ref_to_name
def __str__(self):
return str(self.df)
def __getattr__(self, col_name):
if col_name in self.df.columns:
return self.df[col_name]
else:
print(col_name + " has not been found.")
return None
def createRelation(entity_id: str, property_id=None, isSubject=None, rowVerbose=None, colVerbose=None,
time_property=None, time=None, name=None, label=False, limit=None):
if property_id and not name:
print("Please specify the name of the first column")
return None
return Relation(entity_id, property_id, isSubject, rowVerbose, colVerbose, time_property, time, name, label, limit)
def get_Firstname(name: str):
return name.split(' ')[0]
def get_Lastname(name: str):
return name.split(' ')[-1]
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def get_results(endpoint_url, query):
user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
# TODO adjust user agent; see https://w.wiki/CX6
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
def get_name(id: str):
query = """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX wd: <http://www.wikidata.org/entity/>
select *
where {
wd:""" + id + """ rdfs:label ?label .
FILTER (langMatches( lang(?label), "EN" ) )
}
LIMIT 1"""
results = get_results(endpoint_url, query)
result = ''
for res in results["results"]["bindings"]:
result = res['label']['value']
return result
# -
# # Query US Gov Buildings
# ### Find every direct subclass of `government building` in the world.
gov_building_class = 'Gov_Building'
qnum = 'Q16831714' # qnum of government building
r = createRelation(qnum, label=True)
r.extend('P279', True, gov_building_class, label=True) # extend via property P279 = is subclass of
r.query()
r.df
# ### Find every instance of each of these subclasses of government buildings.
#
# I don't actually need to do the operations to check whether lon and lat are in the US because these instances tend to have country as an attribute.
#
# ### Concatenate all these dataframes together.
#
# Maybe when this is working for real, it is likely we would publish each individual subclass's dataframe to the KNP individually and then union them as another step later.
# +
column_names = [
'Entity ID', 'gov_building_subclass_P31',
'gov_building_subclass_P31Label', 'Country_P17', 'Country_P17Label',
'State_P131', 'State_P131Label', 'Lon_Lat_P625'
]
df_total = pd.DataFrame(columns=column_names)
for ind in range(len(r.df)):
gov_building_subclass = r.df.Gov_Building_P279Label[ind]
qnum = r.df.Gov_Building_P279[ind].split('/')[-1]
r2 = createRelation(qnum, label=True)
r2.extend('P31', True, 'gov_building_subclass', label=True) # extend via property P31 = is instance of
r2.changeFocus('gov_building_subclass_P31')
r2.extend('P17', False, 'Country',label=True, search="Q30") # extend via property P17 = is in country
r2.extend('P131', False, 'State', label=True)
r2.extend('P625', False, 'Lon_Lat')
r2.query()
df2 = r2.df
df2['building_type_label'] = [gov_building_subclass for _ in range(len(df2))]
print('There are %s instances of %s in the US.' % (str(len(df2)), gov_building_subclass))
df_total = pd.concat([df_total, df2], axis=0, ignore_index=True)
# -
df_result = df_total.rename(columns={"Entity ID": "building_type",
"gov_building_subclass_P31": "building",
"gov_building_subclass_P31Label": "building_label",
"Country_P17": "country",
"Country_P17Label":"country_label",
"State_P131":"administrative_entity",
"State_P131Label": "administrative_entity_label",
"Lon_Lat_P625": "lon_lat",
})
df_result = df_result[columns]
df_result
| 22,834 |
/.ipynb_checkpoints/hospitals-checkpoint.ipynb | b2324e09531b44e4fe242373e8f5d6c774da350f | [] | no_license | corneliusagrippa/hospitals | https://github.com/corneliusagrippa/hospitals | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 750,133 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib as plt
# %matplotlib inline
hospitals_data = pd.read_csv('hospitals.csv')
hospitals_data
# -
hospitals_data.index = np.arange(1, len(hospitals_data)+1)
hospitals_data
hospitals_data.ix[1]
hospitals_data.sort_values(by = 'City', ascending = True)
hospitals_data.sort_values(by = 'State', ascending = True)
hospitals_city = hospitals_data.groupby(['City'])[['City']].count()
hospitals_city
hospitals_name = hospitals_data.groupby(['Hospital Name'])[['Hospital Name']].count()
hospitals_name
hospitals_state = hospitals_data.groupby(['State'])[['State']].count()
hospitals_state
hospitals_state.plot(kind='pie', autopct = '%.00f', subplots = True, figsize= (18, 18), fontsize = 10)
hospitals_state.plot(kind = 'bar', figsize= (12, 8), color = 'violet')
hospitals_state.sum(axis=0)
len(hospitals_data)
len(hospitals_data.columns)
hospitals_data.City.value_counts()
hospitals_data.State.value_counts()
hospitals_data.State.describe()
hospitals_data.rename(columns={'Hospital Type': 'hospital_type'}, inplace=True)
hospitals_data
hospitals_data.hospital_type.value_counts()
state = hospitals_data[hospitals_data.State == 'MA']
state
len(state)
state.ix[1966]
state.City.value_counts()
state.hospital_type.value_counts()
state.to_csv('state_test.csv', sep='\t', encoding='utf-8')
state.head()
| 1,644 |
/bilder/Natural Language Processing/Vector model and methods for reducing dimensionality in it. Information Search. Thematic Modeling (LSA, LDA, HDP) paraphrase.ipynb | 2f3719559d5eb09f8e3134c4dba4f26f7acf070f | [
"MIT"
] | permissive | sibalex/introduction_neural_network | https://github.com/sibalex/introduction_neural_network | 2 | 1 | MIT | 2023-07-06T21:27:28 | 2021-12-13T18:45:41 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 14,463 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ะะฐัะฐััะฐะท
#
# ะะฐะณััะทะธะผ [ะดะฐะฝะฝัะต](http://paraphraser.ru/download/get?file_id=1).
# !unzip paraphraser.zip
with open('paraphrases.xml') as f:
data = f.read()
# ะะฐะบ ััะพ ะฒัะณะปัะดะธั:
# !tail paraphrases.xml
# ### ะกะพะทะดะฐัะผ ะดะฐัะฐัะตั ะดะปั ะพะฑััะตะฝะธั
import pandas as pd
from xml.etree import ElementTree
root = ElementTree.XML(data)
par_data = {'text1': [], 'text2': [], 'class': []}
for par in root[1]:
par_data['text1'].append(par[3].text)
par_data['text2'].append(par[4].text)
par_data['class'].append(int(par[6].text))
parphrase_df = pd.DataFrame(par_data)
parphrase_df.head(10)
# ะ ัะตะฟะตัั ะดะฐะฒะฐะนัะต:
# * ะฒะตะบัะพัะธะทัะตะผ ะบะฐะถะดัะน ะดะพะบัะผะตะฝั
# * ะฟะพััะธัะฐะตะผ ัะฐัััะพัะฝะธะต ะผะตะถะดั ะบะฐะถะดะพะน ะฟะฐัะพะน
# * ะฝะฐ ััะพะผ ะพะฑััะธะผ ะบะปะฐััะธัะธะบะฐัะพั
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vec = TfidfVectorizer()
text1_vecs = tfidf_vec.fit_transform(parphrase_df.text1)
text2_vecs = tfidf_vec.transform(parphrase_df.text2)
from sklearn.decomposition import TruncatedSVD
from gensim import similarities
index = similarities.MatrixSimilarity(text1_vecs)
| 1,331 |
/Taxi tip prediction.ipynb | 9e6c381324250a4498b0c1095dad9072d34acb9f | [] | no_license | RosaChaves/NYC-taxi-tip-predictor | https://github.com/RosaChaves/NYC-taxi-tip-predictor | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 212,955 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# @author: Rosa Chaves
# # TASK 1: DATA EXPLORATION AND CLEANING
# # 1. DOWNLOAD AND ASSES THE DATA
# Yellow cab data from the months of March, June and November (2017) has been used. Data from these files is concatenated in the dataframe named "df". First lines of raw data are shown.
#
# +
from glob import glob
import os
import sys
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font='sans')
all_files=glob("/Users/rosa/Desktop/CARTOTask/*.csv") #it grabs all the csv files from the directory
f1=all_files[0]
f2=all_files[1]
f3=all_files[2]
df=pd.concat(pd.read_csv(f) for f in all_files)
df.head() #it depicts the first lines of code
# -
# # 2. IDENTIFY AND DOCUMENT ANY ISSUES WITH THE DATA
# In order to make a first exploration of data, some bar diagrams are shown.
# For example, as the vendor is concerned the number of trips is similarly distributed.
ax = df.groupby(['VendorID']).size().plot(kind='bar')
ax.set_xlabel('vendor_id', fontsize=18)
ax.set_ylabel('Number of trips', fontsize=18)
ax.tick_params(labelsize=12)
# With regards to the "payment_type", the records come from credit card, followed by cash. It is interesting to observe that the payment type "card" has a tip amount while this feature is always 0 in the case of cash. We will use this information for building and testing the model as explained in next sections. The reality is that cash tip might induce to fraud.
# +
ax = df.groupby(['payment_type']).size().plot(kind='bar')
ax.set_xlabel('payment_type', fontsize=18)
ax.set_ylabel('Number of trips', fontsize=18)
ax.tick_params(labelsize=12)
# -
# In the following, a raw data description with some statistical measurements as the mean, std, min, max, quantiles etc
df.describe()
# For example, it is observed that the toll quantity can have a variability of 6.32 dollards that indicates a possible correlation between the regions of pickup and dropoff (PULocationID, DOLocationID) that could be studied. Maybe if the toll is high, the tip decreases (things like that)
tolls = df.groupby(['tolls_amount']).size()
tolls.describe()
# 0.30 dollars of improvement surcharge assessed trips at the flag drop. The improvement surcharge began being lived in 2015. As data is of 2017, every taxi should have 0.30 or 0 (possible fraud). The rest of values must me cleaned in the following section.
isurcharge = df.groupby(['improvement_surcharge']).size()
isurcharge
# Store_and_fwd_flag is a feature that can be eliminated as it does not add any interesting information for the model (it is always "no")
# +
ax = df.groupby(['store_and_fwd_flag']).size().plot(kind='bar')
ax.set_xlabel('storefwdflag', fontsize=18)
ax.set_ylabel('Number of trips', fontsize=18)
ax.tick_params(labelsize=12)
# -
fares=df.groupby(['fare_amount']).size()
# Fare_amount contains negative values. This must be cleaned in the next section
mtax=df.groupby(['mta_tax']).size()
mtax.head()
# As mta_tax is concerned, 0.5 dollars is automatically triggered for a meter rate in use. In order to construct a clean model in which it does exist a correlation with the trip duration, regions of picking and dropoff etc. just 0.5 will be considered. One can see more details in the cleaning part.
# # Date and time obtaining
# The date and time format is not easy to drive with the raw format. The function calculate_datetime_extra, obtains the pick and drop hour (this can be interesting for further studies of busy hour in New York). The month is also interesting (maybe in a summer month there is less traffic or the tips are higher for some reason. This correlations could be studied in more detail, for instance).
# +
column_pickup='tpep_pickup_datetime'
column_dropoff='tpep_dropoff_datetime'
def calculate_datetime_extra(column_pickup, column_dropoff):
rng=pd.DataFrame()
rng['date']=df[column_pickup]
df['Time'] = pd.to_datetime(rng['date'])
month1=df['Time'].dt.month
day1=df['Time'].dt.day
hour1=df['Time'].dt.hour
minute1=df['Time'].dt.minute
rng['date']=df[column_dropoff]
df['Time'] = pd.to_datetime(rng['date'])
month2=df['Time'].dt.month
day2=df['Time'].dt.day
hour2=df['Time'].dt.hour
minute2=df['Time'].dt.minute
newdate = pd.concat([hour1,minute1,hour2,minute2,month1],axis=1, join='inner')
newdate.columns=['hpick','mpick','hdrop','mdrop','month']
return newdate
# -
times=calculate_datetime_extra(column_pickup, column_dropoff)
times.head(2)
# This new feature will be added to the cleaned dataset: trip duration. Maybe, a busy executive appreciates to arrive at his destiny in a shorter period.
#this function calculates the duration of a taxi ride in minutes
def calculate_duration(times):
duration=abs(times['hdrop']-times['hpick'])*60+abs(times['mdrop']-times['mpick'])
nextday = ((times['hpick'] >12) & (times['hdrop'] < 12))
duration[nextday]=abs(times['hdrop']+24-times['hpick'])*60+abs(times['mdrop']-times['mpick'])
return duration
duration=calculate_duration(times)
durationdate=pd.concat([times,duration],axis=1, join='inner')
durationdate.columns=['hpick','mpick','hdrop','mdrop','month','duration']
durationdate.head(3)
newdf=pd.concat([durationdate,df],axis=1, join='inner')
newdf.head(2)
# # 3. DOCUMENT HOW YOU RESOLVED THESE ISSUES
# Taking into account all the analysis done above, we can clean the database with the range explained in the following.In order to construct a reliable model, i will just use payment_type card because we have registered the tip. And in a test phase, I will use cash data to see performance results, possible causes of fraud etc.
# +
payment_type = (newdf.payment_type == 1) #payment_type=2 (cash) has a tip of 0 always, so it will add noise to the model
fare_amount = ((newdf.fare_amount >= 5.0) & (newdf.fare_amount <= 500.0))
surcharge = ((newdf.improvement_surcharge == 0.0) | (newdf.improvement_surcharge == 0.3))
mta_tax = (newdf.mta_tax == 0.5)
tip_amount = ((newdf.tip_amount >= 0.0) & (newdf.tip_amount <= 100.0))
tolls_amount = ((newdf.tolls_amount >= 0.0) & (newdf.tolls_amount <= 30.0))
newdf.describe()
# -
# In the clean dataframe named "newdf", columns as minutes of picking/dropoff, store_and_fwd_flag etc will not be considered. The month, duration, hour of picking/dropping will be added. The day information has not been added as I am not considering holidays, weekends or workdays in my studies. Undoubtedly this could be interesting to take into account in the future.
# +
# Let's save it in another variable.
newdf.drop(newdf.columns[[1,3,7,8,12,23]],axis=1,inplace=True)
data_aux = newdf[payment_type & fare_amount & surcharge & mta_tax & tip_amount & tolls_amount]
payment_type = None
fare_amount = None
surcharge = None
mta_tax = None
tip_amount = None
tolls_amount = None
data_aux.head(3)
# -
# # TASK 2: DATA SUMMARY
# Taking into account all the comments done above we can appreciate that the different feature show a better structured and cleaner statistical meaning.
data_aux.describe()
# # Map representation
# As previously commented, location information is important to understand and correlate which are the busy regions in which the most of the traffic happens: for this database Manhattan seems to be the one in which more pickings and dropoffs happen.
ax = df.groupby(['PULocationID']).size()
newax=(ax>=1159313) #50% of the histogram is the PULocationIDs most repeated
ax[newax]
ax2 = df.groupby(['DOLocationID']).size()
ax2.describe()
newax2=(ax2>=1090510)
ax2[newax2]
# Location 236 (the most repeated DOLocationID) and 237 (the most repeated PULocationID) correspond to Manhattan (as shown below in the arc file). We depict in a map the areas inside Manhattan Location.
import geopandas as gpd
gdf = gpd.read_file('/Users/rosa/Desktop/CARTOTask/taxi_zones.shp')
print (gdf)
# +
import matplotlib
# %matplotlib inline
gdf.plot()
# -
# Paint areas in region of Manhattan (the most of the taxi traffic)
gdf = gdf[(gdf.borough=="Manhattan")]
gdf.plot(column='Shape_Area', cmap='OrRd');
# # TASK 3: MODEL BUILDING
# # SELECTION OF A CLASSIFIER
# Random Forests is a flexible, easy to use machine learning algorithm that produces great results. It can be used as regressor or classifier. It is a supervised learning algorithm. The forest it builds is an ensemble of decision trees trained with the bagging method. We can conclude that the combination of learning models increases the overall result.
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import cm as cmap
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc
from sklearn.preprocessing import LabelEncoder
sns.set(font='sans')
# -
# I will use classification in two classes taking into account the tip distribution obtained in the model description for credit card payment: bad tip ('0' label, 0-3 dollars) or good tip ('1' label, more that 3 dollars). In this sense there is higher probability of building a reliable model that with many classes or the values or the tips (regression).
# +
feature_columns = ['hpick','hdrop','month','duration','VendorID','passenger_count','trip_distance','RatecodeID','PULocationID','DOLocationID','fare_amount','extra','mta_tax','tolls_amount','improvement_surcharge','total_amount']
label_column = 'tip_amount'
# -
class1 = ((data_aux[label_column] >= 0.0) & (data_aux[label_column] < 3))
class2 = (data_aux[label_column] >= 3.0)
# +
y=(data_aux[label_column] >= 0.0)
y[class1]='0'
y[class2]='1'
X=data_aux[feature_columns]
# -
# When analysing big datasets, it is important to reduce dimensionality. Principal component analysis is a well established mathematical technique for reducing the dimensionaliyty of data, while keeping as much variation as possible. I have used 10 components. Feature Ranking with Random Forest gini index would have been an interesting possibility to know which features are more correlated with a tip recommender tool.
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
principalComponents = pca.fit_transform(X)
principalDf = pd.DataFrame(data = principalComponents,columns = ['1', '2','3','4','5','6','7','8','9','10'])
# Train (70%) and test (30%) have been randomly selected from card data. 100 estimators for building the RF model.
from sklearn.model_selection import train_test_split
# +
train, test, train_labels, test_labels = train_test_split(principalDf,
y,
stratify = y,
test_size = 0.3,
random_state = 42)
# -
model = RandomForestClassifier(n_estimators=100,
random_state=42,
max_features = 'sqrt',
n_jobs=-1, verbose = 1)
# +
model.fit(train, train_labels)
# -
# # MODEL PERFORMANCE
# In the following, the performance of the algorithm is analysed. 98% of accuracy. As the classes are well ballanced, we can see a similar performance for recomending to the client if the taxi driver deserved a good tip or a bad tip (F1 score is a good indicative). We leave some flexibility to the client as a tip is always subjective.
# + active=""
# predictions = model.predict(test)
# -
y_pred=model.predict(test)
from sklearn import metrics
metrics.accuracy_score(test_labels, y_pred)
from pandas_ml import ConfusionMatrix
confusion_matrix = ConfusionMatrix(list(map(int, test_labels)), list(map(int, y_pred)))
confusion_matrix.print_stats()
# # LIMITATIONS OR CAVEATS OF THE MODEL WHICH MIGHT BE AN ISSUE
# this model created for the card sample size will be tested with the "cash" payment_type in which the tip is set to 0
# (this could be a possible cause of fraud)
# +
payment_type = (newdf.payment_type == 2) #payment_type=2 (cash) has a tip of 0 always, so it will add noise to the model
fare_amount = ((newdf.fare_amount >= 5.0) & (newdf.fare_amount <= 500.0))
surcharge = ((newdf.improvement_surcharge == 0.0) | (newdf.improvement_surcharge == 0.3))
mta_tax = (newdf.mta_tax == 0.5)
tip_amount = ((newdf.tip_amount >= 0.0) & (newdf.tip_amount <= 100.0))
tolls_amount = ((newdf.tolls_amount >= 0.0) & (newdf.tolls_amount <= 30.0))
data_cash = newdf[payment_type & fare_amount & surcharge & mta_tax & tip_amount & tolls_amount]
payment_type = None
fare_amount = None
surcharge = None
mta_tax = None
tip_amount = None
tolls_amount = None
feature_columns = ['hpick','hdrop','month','duration','VendorID','passenger_count','trip_distance','RatecodeID','PULocationID','DOLocationID','fare_amount','extra','mta_tax','tolls_amount','improvement_surcharge','total_amount']
label_column = 'tip_amount'
class1 = ((data_cash[label_column] >= 0.0) & (data_cash[label_column] < 3))
class2 = (data_cash[label_column] >= 3.0)
ycash=(data_cash[label_column] >= 0.0)
ycash[class1]='0'
ycash[class2]='1'
testcash=data_cash[feature_columns]
pca = PCA(n_components=10)
principalComponentscash = pca.fit_transform(testcash)
principalDfcash = pd.DataFrame(data = principalComponentscash,columns = ['1', '2','3','4','5','6','7','8','9','10'])
# +
predictionscash = model.predict(principalDfcash)
metrics.accuracy_score(ycash, predictionscash)
# -
# As expected, the accuracy for the cash model has decreased due to the fraud cases (and not just fraud, the taxi driver can not ask more money he deserved if the client is a bit miserly). By default the tip is "bad"(=0) for cash dataset. But the algorithm predicts that 35% of tips should be good and it's not the case. Additionally, they are not being declared (fraud cases). Confusion matrix supports the accuracy results.
confusion_matrixcash = ConfusionMatrix(list(map(int, ycash)), list(map(int, predictionscash)))
confusion_matrixcash.print_stats()
# # POSSIBLE IMPROVEMENTS OF THE ALGORITHM
# Some of the improvements have been commented during this analysis. For example, use more geolocation information to know the shorter trajectories between two points. Use calendar information: work days, holidays dates, weekends etc. Add more months and in the case of big data use new technologies as spark or cloudera etc. Study in depth the features we have, establish correlation analysis. Make some feature engineering, combine and create new KPIs that can improve models. Use different machine learning algorithms, for example deep learning to establish with more accuracy what could be a tip with more accuracy (than just good/bad tip). Improve models with label noise reduction, study fraud techniques based on anomaly detection. Use a semi supervised learning to predict non-labeled data (cash for example) etc etc
# # HOW TURN A MODEL INTO AN API THE COMPANY CAN USE
# Training a machine learning model is a heavy task for a mobile device and not all ML libraries have APIs build for accessing the model stored on the mobile phone.
# The best possibility consists in using a Client-Server architecture where the trained model is stored on the server and the web server accepts requests from the client (taxi driver) which is the input for the model and the model predicts the response that is sent to the driver. Client can be a web browser or a mobile app (the latter i think is the best from the business point of view). In this case, GPS should be connected to the app to introduce automatically the features just pressing one button when the trip starts and ends.
#
# -A way of making a model is as in the following:
#
# 1.Write the machine learning code. It can be a scikit-learn, Tensorflow, Keras, Theano or just using Numpy code, whatever you choose for the task.
#
# 2.Train the model on your system or any cloud.
#
# 3.Create a web server. You can use Flask/Django/php or any other framework.
#
# 3.1. You loaded the dataset and selected the best features.
#
# 3.2.You did the necessary data preprocessing.
#
# 3.3.You built a RF classifier and serialized it.You also serialized all the columns from training as a solution to the less than expected number of columns is to persist the list of columns from training. I think this with an automatic connection to GPS could be solved automatically.
#
# 3.4. You then wrote a simple API using Flask that would predict if a tip should be good or bad.
#
# 4.Store your model onto the server and extract input parameters from incoming requests and feed it to the model.
#
# 5.The model predicts the result which is sent back to the client.
#
#
| 16,996 |
/analyticsvidhya.ipynb | fcd02bf81be84d0688f01151c804f383a6bcc9cb | [] | no_license | vgramu/AnalyticsVidhya | https://github.com/vgramu/AnalyticsVidhya | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,184 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Ll2HI0d3aQHR" colab_type="code" colab={}
#Import Libraries
from bs4 import BeautifulSoup
import requests
import csv
# + id="R0vgrD19xlHb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d0da2cfc-0070-4f11-e736-1988d695a84c" executionInfo={"status": "ok", "timestamp": 1551261222746, "user_tz": -330, "elapsed": 1028, "user": {"displayName": "ramu vadlagattu", "photoUrl": "", "userId": "17027593844500865040"}}
## Open and write to a file
av_file = open('av.csv','w')
av_writer = csv.writer(av_file)
av_writer.writerow(['Category','Date','Article','Link','Tags','Author'])
# + id="VTH9qcSMjhdJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="6c0cdac7-4ec6-4f74-b037-9b011b2dd123" executionInfo={"status": "ok", "timestamp": 1551261321028, "user_tz": -330, "elapsed": 91520, "user": {"displayName": "ramu vadlagattu", "photoUrl": "", "userId": "17027593844500865040"}}
# Read article category, created date, article title, link, tag associated to title and Author and write to file
#Reading two categories
category =['machine-learning','deep-learning']
for cat in category:
# Articles spread to total no. of pages
for i in range(0,26):
av_source=''
# Retrieve articles from first page of a category
if i == 1:
av_source = requests.get('https://www.analyticsvidhya.com/blog/category/'+cat+'/').text
else:
# Retrieve articles from second page onwards...
av_source = requests.get('https://www.analyticsvidhya.com/blog/category/'+cat+'/page/'+str(i)+'/').text
av_soup = BeautifulSoup(av_source)
# Read only page has articles
if av_soup:
av_article = av_soup.find_all('article', class_='item-medium post-box-big')
for article in av_article:
#Read article name, url, entry date and author
av_text=article.find('h3',class_='entry-title').text
av_url = article.find('a')['href']
av_author = article.find('span',class_='entry-author').text
av_entry_date =article.find('time',class_='entry-date').text
# Read tag names
av_span = article.find_all('span',class_='mh-cat-item')
span_text=[]
for span in av_span:
span_text.append(span.find('a').text)
av_writer.writerow([cat,av_entry_date,av_text, url, span_text,av_author])
av_file.close()
| 2,685 |
/LS_DS_111_A_First_Look_at_Data.ipynb | 9ff2e27206803a8c239c4d081745ac34be624dcb | [
"MIT"
] | permissive | tallywiesenberg/DS-Unit-1-Sprint-1-Dealing-With-Data | https://github.com/tallywiesenberg/DS-Unit-1-Sprint-1-Dealing-With-Data | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 260,682 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/twiesenb/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/LS_DS_111_A_First_Look_at_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - A First Look at Data
#
#
# + [markdown] id="9dtJETFRhnOG" colab_type="text"
# ## Lecture - let's explore Python DS libraries and examples!
#
# The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
# + id="WiBkgmPJhmhE" colab_type="code" outputId="6ce2aa73-bde6-4daa-b0a4-3af3654ebb13" colab={"base_uri": "https://localhost:8080/", "height": 212}
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
#reading dataset
drinks = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/alcohol-consumption/drinks.csv')
print(drinks.shape)
drinks.head()
# + id="_OY8k4Oou6mT" colab_type="code" outputId="efe88e2f-02c8-41d0-b4d2-6356a616b547" colab={"base_uri": "https://localhost:8080/", "height": 343}
drinks.columns
drinks.sort_values('beer_servings', ascending = False).head(10)
# + id="6CxAoPIAyxdq" colab_type="code" outputId="1bc5a1cc-b5b6-4321-86aa-ea8165e891e3" colab={"base_uri": "https://localhost:8080/", "height": 195}
#creating new column
drinks['drinks_alcohol'] = np.where(drinks['total_litres_of_pure_alcohol'] > 9, 'High',
np.where(drinks['total_litres_of_pure_alcohol'] > 6, 'Medium',
np.where(drinks['total_litres_of_pure_alcohol'] == 0, 'None', 'Low')))
drinks.head()
# + id="eIveifI8LqcV" colab_type="code" outputId="4c2c09c1-f9df-4f51-9c42-e89919140639" colab={"base_uri": "https://localhost:8080/", "height": 330}
##joining new dataset
#read countries csv
countries = pd.read_csv('https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv')
print(countries.shape)
#rename usa
drinks.at[184, 'country'] = 'United States of America'
#time to merge ---use merge function, best practice
df = pd.merge(drinks, countries[['name', 'region', 'sub-region']], how='left', left_on='country', right_on='name') #merge into left df, merge name into country
df.head()
# + id="CF1wvMY-QIvh" colab_type="code" outputId="06159457-22fb-4fdb-c10d-8d3f32cd4e66" colab={"base_uri": "https://localhost:8080/", "height": 821}
#which countries did not get joined properly?
df[df.region.isna()]
# + id="sMKIIgbqQoNV" colab_type="code" outputId="8a245be3-7bd7-4d94-c268-da322d8bc2bc" colab={"base_uri": "https://localhost:8080/", "height": 596}
## plot time
#plot subregions
df.groupby('sub-region').beer_servings.mean().plot(kind='bar', figsize =(20,6))
plt.title('Average Beer Servings Per Region')
plt.ylabel('Average Beer Serving')
plt.xlabel('World Subregion')
# + id="mRaN4UjQSzRu" colab_type="code" outputId="db18c0f9-2865-40d2-9991-b1aa553d4b4b" colab={"base_uri": "https://localhost:8080/", "height": 437}
##still plotting
#box plot
df.boxplot(column='beer_servings', by='region', figsize = (10,6))
# + id="9vjP6DQ0TOjE" colab_type="code" outputId="d584fa0c-2303-4609-c18e-96a54f7869ef" colab={"base_uri": "https://localhost:8080/", "height": 458}
#sns color sorted plot
sns.pairplot(x_vars=["beer_servings"], y_vars=["wine_servings"], data=df, hue="region", height= 6)
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - now it's your turn
#
# Pick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
# + id="TGUS79cOhPWj" colab_type="code" outputId="4b72899e-7366-420b-caa6-af0583e41889" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7Ci8vIE1heCBhbW91bnQgb2YgdGltZSB0byBibG9jayB3YWl0aW5nIGZvciB0aGUgdXNlci4KY29uc3QgRklMRV9DSEFOR0VfVElNRU9VVF9NUyA9IDMwICogMTAwMDsKCmZ1bmN0aW9uIF91cGxvYWRGaWxlcyhpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IHN0ZXBzID0gdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKTsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIC8vIENhY2hlIHN0ZXBzIG9uIHRoZSBvdXRwdXRFbGVtZW50IHRvIG1ha2UgaXQgYXZhaWxhYmxlIGZvciB0aGUgbmV4dCBjYWxsCiAgLy8gdG8gdXBsb2FkRmlsZXNDb250aW51ZSBmcm9tIFB5dGhvbi4KICBvdXRwdXRFbGVtZW50LnN0ZXBzID0gc3RlcHM7CgogIHJldHVybiBfdXBsb2FkRmlsZXNDb250aW51ZShvdXRwdXRJZCk7Cn0KCi8vIFRoaXMgaXMgcm91Z2hseSBhbiBhc3luYyBnZW5lcmF0b3IgKG5vdCBzdXBwb3J0ZWQgaW4gdGhlIGJyb3dzZXIgeWV0KSwKLy8gd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIGFzeW5jaHJvbm91cyBzdGVwcyBhbmQgdGhlIFB5dGhvbiBzaWRlIGlzIGdvaW5nCi8vIHRvIHBvbGwgZm9yIGNvbXBsZXRpb24gb2YgZWFjaCBzdGVwLgovLyBUaGlzIHVzZXMgYSBQcm9taXNlIHRvIGJsb2NrIHRoZSBweXRob24gc2lkZSBvbiBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcCwKLy8gdGhlbiBwYXNzZXMgdGhlIHJlc3VsdCBvZiB0aGUgcHJldmlvdXMgc3RlcCBhcyB0aGUgaW5wdXQgdG8gdGhlIG5leHQgc3RlcC4KZnVuY3Rpb24gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpIHsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIGNvbnN0IHN0ZXBzID0gb3V0cHV0RWxlbWVudC5zdGVwczsKCiAgY29uc3QgbmV4dCA9IHN0ZXBzLm5leHQob3V0cHV0RWxlbWVudC5sYXN0UHJvbWlzZVZhbHVlKTsKICByZXR1cm4gUHJvbWlzZS5yZXNvbHZlKG5leHQudmFsdWUucHJvbWlzZSkudGhlbigodmFsdWUpID0+IHsKICAgIC8vIENhY2hlIHRoZSBsYXN0IHByb21pc2UgdmFsdWUgdG8gbWFrZSBpdCBhdmFpbGFibGUgdG8gdGhlIG5leHQKICAgIC8vIHN0ZXAgb2YgdGhlIGdlbmVyYXRvci4KICAgIG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSA9IHZhbHVlOwogICAgcmV0dXJuIG5leHQudmFsdWUucmVzcG9uc2U7CiAgfSk7Cn0KCi8qKgogKiBHZW5lcmF0b3IgZnVuY3Rpb24gd2hpY2ggaXMgY2FsbGVkIGJldHdlZW4gZWFjaCBhc3luYyBzdGVwIG9mIHRoZSB1cGxvYWQKICogcHJvY2Vzcy4KICogQHBhcmFtIHtzdHJpbmd9IGlucHV0SWQgRWxlbWVudCBJRCBvZiB0aGUgaW5wdXQgZmlsZSBwaWNrZXIgZWxlbWVudC4KICogQHBhcmFtIHtzdHJpbmd9IG91dHB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIG91dHB1dCBkaXNwbGF5LgogKiBAcmV0dXJuIHshSXRlcmFibGU8IU9iamVjdD59IEl0ZXJhYmxlIG9mIG5leHQgc3RlcHMuCiAqLwpmdW5jdGlvbiogdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKSB7CiAgY29uc3QgaW5wdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoaW5wdXRJZCk7CiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gZmFsc2U7CgogIGNvbnN0IG91dHB1dEVsZW1lbnQgPSBkb2N1bWVudC5nZXRFbGVtZW50QnlJZChvdXRwdXRJZCk7CiAgb3V0cHV0RWxlbWVudC5pbm5lckhUTUwgPSAnJzsKCiAgY29uc3QgcGlja2VkUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBpbnB1dEVsZW1lbnQuYWRkRXZlbnRMaXN0ZW5lcignY2hhbmdlJywgKGUpID0+IHsKICAgICAgcmVzb2x2ZShlLnRhcmdldC5maWxlcyk7CiAgICB9KTsKICB9KTsKCiAgY29uc3QgY2FuY2VsID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnYnV0dG9uJyk7CiAgaW5wdXRFbGVtZW50LnBhcmVudEVsZW1lbnQuYXBwZW5kQ2hpbGQoY2FuY2VsKTsKICBjYW5jZWwudGV4dENvbnRlbnQgPSAnQ2FuY2VsIHVwbG9hZCc7CiAgY29uc3QgY2FuY2VsUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBjYW5jZWwub25jbGljayA9ICgpID0+IHsKICAgICAgcmVzb2x2ZShudWxsKTsKICAgIH07CiAgfSk7CgogIC8vIENhbmNlbCB1cGxvYWQgaWYgdXNlciBoYXNuJ3QgcGlja2VkIGFueXRoaW5nIGluIHRpbWVvdXQuCiAgY29uc3QgdGltZW91dFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgc2V0VGltZW91dCgoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9LCBGSUxFX0NIQU5HRV9USU1FT1VUX01TKTsKICB9KTsKCiAgLy8gV2FpdCBmb3IgdGhlIHVzZXIgdG8gcGljayB0aGUgZmlsZXMuCiAgY29uc3QgZmlsZXMgPSB5aWVsZCB7CiAgICBwcm9taXNlOiBQcm9taXNlLnJhY2UoW3BpY2tlZFByb21pc2UsIHRpbWVvdXRQcm9taXNlLCBjYW5jZWxQcm9taXNlXSksCiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdzdGFydGluZycsCiAgICB9CiAgfTsKCiAgaWYgKCFmaWxlcykgewogICAgcmV0dXJuIHsKICAgICAgcmVzcG9uc2U6IHsKICAgICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICAgIH0KICAgIH07CiAgfQoKICBjYW5jZWwucmVtb3ZlKCk7CgogIC8vIERpc2FibGUgdGhlIGlucHV0IGVsZW1lbnQgc2luY2UgZnVydGhlciBwaWNrcyBhcmUgbm90IGFsbG93ZWQuCiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gdHJ1ZTsKCiAgZm9yIChjb25zdCBmaWxlIG9mIGZpbGVzKSB7CiAgICBjb25zdCBsaSA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2xpJyk7CiAgICBsaS5hcHBlbmQoc3BhbihmaWxlLm5hbWUsIHtmb250V2VpZ2h0OiAnYm9sZCd9KSk7CiAgICBsaS5hcHBlbmQoc3BhbigKICAgICAgICBgKCR7ZmlsZS50eXBlIHx8ICduL2EnfSkgLSAke2ZpbGUuc2l6ZX0gYnl0ZXMsIGAgKwogICAgICAgIGBsYXN0IG1vZGlmaWVkOiAkewogICAgICAgICAgICBmaWxlLmxhc3RNb2RpZmllZERhdGUgPyBmaWxlLmxhc3RNb2RpZmllZERhdGUudG9Mb2NhbGVEYXRlU3RyaW5nKCkgOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnbi9hJ30gLSBgKSk7CiAgICBjb25zdCBwZXJjZW50ID0gc3BhbignMCUgZG9uZScpOwogICAgbGkuYXBwZW5kQ2hpbGQocGVyY2VudCk7CgogICAgb3V0cHV0RWxlbWVudC5hcHBlbmRDaGlsZChsaSk7CgogICAgY29uc3QgZmlsZURhdGFQcm9taXNlID0gbmV3IFByb21pc2UoKHJlc29sdmUpID0+IHsKICAgICAgY29uc3QgcmVhZGVyID0gbmV3IEZpbGVSZWFkZXIoKTsKICAgICAgcmVhZGVyLm9ubG9hZCA9IChlKSA9PiB7CiAgICAgICAgcmVzb2x2ZShlLnRhcmdldC5yZXN1bHQpOwogICAgICB9OwogICAgICByZWFkZXIucmVhZEFzQXJyYXlCdWZmZXIoZmlsZSk7CiAgICB9KTsKICAgIC8vIFdhaXQgZm9yIHRoZSBkYXRhIHRvIGJlIHJlYWR5LgogICAgbGV0IGZpbGVEYXRhID0geWllbGQgewogICAgICBwcm9taXNlOiBmaWxlRGF0YVByb21pc2UsCiAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgYWN0aW9uOiAnY29udGludWUnLAogICAgICB9CiAgICB9OwoKICAgIC8vIFVzZSBhIGNodW5rZWQgc2VuZGluZyB0byBhdm9pZCBtZXNzYWdlIHNpemUgbGltaXRzLiBTZWUgYi82MjExNTY2MC4KICAgIGxldCBwb3NpdGlvbiA9IDA7CiAgICB3aGlsZSAocG9zaXRpb24gPCBmaWxlRGF0YS5ieXRlTGVuZ3RoKSB7CiAgICAgIGNvbnN0IGxlbmd0aCA9IE1hdGgubWluKGZpbGVEYXRhLmJ5dGVMZW5ndGggLSBwb3NpdGlvbiwgTUFYX1BBWUxPQURfU0laRSk7CiAgICAgIGNvbnN0IGNodW5rID0gbmV3IFVpbnQ4QXJyYXkoZmlsZURhdGEsIHBvc2l0aW9uLCBsZW5ndGgpOwogICAgICBwb3NpdGlvbiArPSBsZW5ndGg7CgogICAgICBjb25zdCBiYXNlNjQgPSBidG9hKFN0cmluZy5mcm9tQ2hhckNvZGUuYXBwbHkobnVsbCwgY2h1bmspKTsKICAgICAgeWllbGQgewogICAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgICBhY3Rpb246ICdhcHBlbmQnLAogICAgICAgICAgZmlsZTogZmlsZS5uYW1lLAogICAgICAgICAgZGF0YTogYmFzZTY0LAogICAgICAgIH0sCiAgICAgIH07CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPQogICAgICAgICAgYCR7TWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCl9JSBkb25lYDsKICAgIH0KICB9CgogIC8vIEFsbCBkb25lLgogIHlpZWxkIHsKICAgIHJlc3BvbnNlOiB7CiAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgIH0KICB9Owp9CgpzY29wZS5nb29nbGUgPSBzY29wZS5nb29nbGUgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYiA9IHNjb3BlLmdvb2dsZS5jb2xhYiB8fCB7fTsKc2NvcGUuZ29vZ2xlLmNvbGFiLl9maWxlcyA9IHsKICBfdXBsb2FkRmlsZXMsCiAgX3VwbG9hZEZpbGVzQ29udGludWUsCn07Cn0pKHNlbGYpOwo=", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 74}
# TODO - your code here
# Use what we did live in lecture as an example
from google.colab import files
uploaded = files.upload()
# + id="LY0CmGhUYjoS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 414} outputId="2df83ca3-f606-4daf-9606-57a948629cfb"
df = pd.read_csv('Behavior of the urban traffic of the city of Sao Paulo in Brazil.csv', sep=";")
df.head(10) #when I printed the head, it appeared I might have loaded the data incorrectly.
#However, when I asked python for some statistical facts about each column,
#I discovered that many values are 0 because the columns measure rare events:
#road accidents such as a broken-down bus, an accident injury, or an electricity blackout.
df.describe()
# + id="sAopKZF0b23o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ccd8f156-6bbd-4d4f-f59e-e0e797685d86"
df.boxplot()
# + id="QbnzTsWobvkU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="fe32508c-5fc6-4906-a43d-593153489a7b"
#checking for nan values
df.isna().sum() #I looked for NaN values, but there were none in the dataset.
# + id="fFRMqswedqXW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 579} outputId="f1a1fd41-2588-4c8d-8ee3-ba62248e6489"
###plot time -- I want to make a boxplot of each column in the dataset, without the Hour or Slowness columns because their range is much larger than the ranges of the other columns, which will make the chart less readable.
##box plot
#drop the slowness and hour columns
df1 = df.drop(['Hour (Coded)', 'Slowness in traffic (%)'], axis=1) #axis=1 tells python to look at columns
#plot box plot
df1.plot(kind='box', figsize=(30,6))
plt.xticks(rotation=60) #rotating the xticks
plt.title('Frequency of Road Malfunctions in Sao Paolo')
plt.xlabel('Type of Road Malfunction')
plt.ylabel('Frequency of Malfunction')
plt.show()
# + id="UkIwvcE2lVqv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 695} outputId="a4256ae8-9e23-453b-9d7d-195d74e0d49a"
##line plot
df.plot.line(x='Hour (Coded)', y='Lack of electricity', figsize=(10,6))
df.plot.line(x='Hour (Coded)', y='Slowness in traffic (%)', figsize=(10,6))
#I tried to plot instances of "Lack of Electricity" against "Slowness in Traffic", but because the data collectors culturally use commas in decimals where we use periods, python does not think that the numbers are floats or ints. An interesting case where Python is eurocentric!
#The line graph I did make did not seem to connect the scatterpoints in a readable fashion.
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.
#
# In this project I sought to explore a dataset on traffic slowness in Sao Paolo, Brazil. I found this dataset to be important because traffic is a serious probelm in megacities around the world. First, I looked at the shape of the dataset, and I noticed that the dataset is indexed by consecutive hours over the course of roughly a week. The first peculiarity I noticed was that many of the values in the dataset are 0. However, when I asked python for some statistical facts about each column, I discovered that many values are 0 because the columns measure rare road events such as a broken-down bus, an accident injury, or an electricity blackout. This indicates that most of the time, there was no reported accident for many of the indexed hours.
# After I looked at the shape of the data, I checked for empty (Nan) values to make sure I did not need to impute data. The data required no imputing; all the values were filled.
# After checking for missing values, I decided to make a box plot of each column in the dataset to demonstrate the rarity of each event.
# Finally, I tried to plot on a line graph the correlation over time between electricity outages and traffic slowness. However, I ran into trouble because the writers of the dataset denote decimals with a comma, which Python does not understand.
#
# 2. What was the most challenging part of what you did?
#
# I ran into four roadblocks while I worked on this assignment: semicolons seraprating values in the csv, and overlapping ticks on the x axis, non-American denotation of decimals, and an unreadable line graph. I was able to find code online that fixed the first two problems.
# The third problem was that the writers of the CSV denotated decimals with commas instead of periods like Americans and Python. I did't know how to fix this problem, because it seemed I would have to replace in that column every comma with a period.
# The fourth problem was that the dots of my scatterplot that make up my line graph were connected incorrectly. I think I had a similar problem during the Precourse, but I don't remember the solution.
#
# 3. What was the most interesting thing you learned?
#
# The most interesting thing I learned from this project was how to learn through troubleshooting. I feel like each time I complete an assignment I learn one part from online troubleshooting and two parts from the lecture.
# 4. What area would you like to explore with more time?
#
# I'd like to explore more time understanding the difference between pandas plotting and matplotlib plotting, especially the differences in syntax.
#
#
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).
#
# - [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)
# - [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)
# - [matplotlib documentation](https://matplotlib.org/contents.html)
# - [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources
#
# Stretch goals:
#
# - Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!
# - Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.
# + id="-J-_qpiE-qZU" colab_type="code" colab={}
joe = { 'name': 'Joe', 'is_female': False, 'age': 19 }
alice = { 'name': 'Alice', 'is_female': True, 'age': 20 }
sarah = { 'name': 'Sarah', 'is_female': True, 'age': 20 }
students = [joe, alice, sarah]
for s in students:
print(s.values())
# + id="J8NChWgaEw6h" colab_type="code" colab={}
GM = {'name': 'Grow Mart', 'founding year' : 1973, 'revenue' : 2.65e5, 'expenses' : 1.83e5}
PD = {'name': 'Plant Depot', 'founding year' : 1973,'revenue' : 3.02e5, 'expenses' : 2.4e5 }
TRU = {'name': 'Trees R Us', 'founding year': 1985, 'revenue': 1.23e5, 'expenses': 1.3e5 }
stores = [GM, PD, TRU]
for s in stores:
print(s['expenses'] > s['revenue'])
s.update( {'is_profitable' : (s['expenses'] > s['revenue'])} ) #use update to add new key/value pair to dictionary
print(stores)
# + [markdown] id="UgDb72Ha-r1P" colab_type="text"
# ^ Following along with Training Kit
# + id="iU1qQ7WtbaKk" colab_type="code" colab={}
import random
words = [
'supplant',
'undulate',
'xenon',
'asymptote', # โ rotates here!
'babushka',
'kart',
'other']
def rotate_point(s = words):
sorted_words = sorted(s)
first_word = sorted_words[0]
print(first_word)
index_number = words.index(first_word)
print('The index in the unsorted list of the first word of the sorted list is', index_number)
rotate_point(words)
words_shuffled = random.shuffle(words)
print(words_shuffled)
rotate_point(s = words_shuffled)
# + id="8LsckX2-bfp7" colab_type="code" colab={}
| 19,844 |
/notebooks/CoronaLSTM.ipynb | b06725f9c7fe2127bc0f23d9cf54198a11c3cb7a | [] | no_license | adzuci/task-ts | https://github.com/adzuci/task-ts | 0 | 0 | null | 2020-04-27T21:22:46 | 2020-04-27T16:34:00 | null | Jupyter Notebook | false | false | .py | 222,144 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://pl.wikipedia.org/wiki/Dyskretna_transformacja_kosinusowa
#
# http://grzegorzsokol.name/blog/dct/
#
# https://pl.wikipedia.org/wiki/JPEG
import matplotlib.pyplot as plt
import scipy.fftpack as ff
import math
import numpy as np
# +
from skimage import io, color
lena = io.imread("../images/001.jpg")
plt.imshow(lena)
plt.axis('off')
plt.show()
lenagray = color.rgb2gray(lena)
print(lenagray.shape, lenagray.dtype)
plt.imshow(lenagray, cmap="gray")
plt.axis('off')
plt.show()
# +
image = lenagray
windowsize = 8
counter = 0
windows = []
for r in range(0,image.shape[0] - windowsize, windowsize):
for c in range(0,image.shape[0] - windowsize, windowsize):
windows.append(image[r:r+windowsize,c:c+windowsize])
counter += 1
print(counter)
# -
len(list(range(0,image.shape[0] - windowsize, windowsize)))
len(windows)
# +
frag = windows[0]
plt.imshow(frag, cmap="gray")
plt.axis('off')
plt.show()
# -
res = ff.dct(ff.dct(frag,norm='ortho').T,norm='ortho').T
np.set_printoptions(suppress=True, precision=2)
print(res)
# +
res2 = np.round(res[:],2)
bias = .03
# windowsize = 8
counter = 0
for i in range(windowsize):
for j in range(windowsize):
if res2[i,j]>-bias and res2[i,j]<bias:
res2[i,j]=0
counter += 1
print('Liczba modyfikacji: ', counter)
np.set_printoptions(suppress=True, precision=2)
print(res2)
print('Wartoลci niezerowe: ', np.sum(res2 != 0), ' na ', res2.size)
# -
orig = ff.idct(ff.idct(res2,norm='ortho').T,norm='ortho').T
print(orig)
plt.imshow(frag, cmap="gray")
plt.axis('off')
plt.show()
print(np.mean(frag == orig))
np.set_printoptions(suppress=True, precision=2)
print("%.2f %.2f %.2f" % (np.mean(frag - orig), np.max(frag - orig), np.sum(frag - orig)))
def show2imgs(im1, im2, title1='Obraz pierwszy', title2='Obraz drugi', size=(10,10)):
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(1,2, figsize=size)
ax1.imshow(im1, cmap='gray')
ax1.axis('off')
ax1.set_title(title1)
ax2.imshow(im2, cmap='gray')
ax2.axis('off')
ax2.set_title(title2)
plt.show()
show2imgs(frag, orig, 'Oryginaล', 'Obraz odtworzony')
ight": 137}
# !pip install tsaug
# + id="aXNkSdTFvZ1g" colab_type="code" colab={}
from tsaug.visualization import plot
from tsaug import TimeWarp, Crop, Quantize, Drift, Reverse
my_augmenter = (TimeWarp() * 5, # random time warping 5 times in parallel
Crop(size=300), # random crop subsequences with length 300
Quantize(n_levels=[10, 20, 30]), # random quantize to 10-, 20-, or 30- level sets
Drift(max_drift=(0.1, 0.5)), # with 80% probability, random drift the signal up to 10% - 50%
Reverse()) #0.5 # with 50% probability, reverse the sequence)
# + id="VdVWNhk2XRJw" colab_type="code" colab={}
#X_aug = my_augmenter[0].augment(antwerp_relevant)
print(antwerp_relevant.shape)
X_aug = TimeWarp(antwerp[:70])
# + [markdown] id="-A6ICggnYBlX" colab_type="text"
# ## Models and Forecasting
# We will now define some simple models in Keras for forecasting.
# + id="VO7ZOB41az1W" colab_type="code" colab={}
import numpy as np
from sklearn.preprocessing import RobustScaler
scaler_dict = {}
config_default = {"epochs":30, "validation_split":0.1,
"loss":"mean_squared_error", "optimizer":'adam',
"geo_segment":"antwerp", "seq_len":7, "train_steps":70,
"test_steps":27, "scaler":"RobustScaler",
"beta":0.899}
r = RobustScaler()
x_train_full = antwerp_df[['deaths', 'cases']][:config_default["train_steps"]]
x_train_full = pd.DataFrame(r.fit_transform(x_train_full))
y_train_full = x_train_full
r_test = RobustScaler()
test_orig = antwerp_df[['deaths', 'cases']][70:]
test = pd.DataFrame(r_test.fit_transform(test_orig))
# + id="eUR6eM4MZZJJ" colab_type="code" colab={}
def create_dataset(X, y, time_steps=1):
Xs, ys = [], []
for i in range(len(X) - time_steps):
v = X.iloc[i:(i + time_steps)].values
Xs.append(v)
ys.append(y.iloc[i + time_steps])
return np.array(Xs), np.array(ys)
X_train, Y_train = create_dataset(x_train_full, y_train_full, config_default["seq_len"])
X_test, y_test = create_dataset(test, test, config_default["seq_len"])
# + id="-OOG9RAp4-ap" colab_type="code" outputId="d37f1d49-1f45-45dc-ea11-d21c5e1de166" colab={"base_uri": "https://localhost:8080/", "height": 71}
sweep_config = {
"name": "Default sweep",
"method": "grid",
"parameters": {
"batch_size": {
"values": [2, 3, 4, 5]
},
"learn":{
"values":[0.001, 0.0015, 0.002, 0.003, 0.004, 0.01]
}
}
}
sweep_id = wandb.sweep(sweep_config)
# + id="nXhSxkqdYJbd" colab_type="code" colab={}
def train():
run = wandb.init(project="covid-forecast", config=config_default, magic=True)
config = wandb.config
opt = keras.optimizers.Adam(learning_rate=config["learn"], beta_1=config["beta"], beta_2=0.999, amsgrad=False)
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
units=128,
input_shape=(X_train.shape[1], X_train.shape[2])
)
)
)
model.add(keras.layers.Dropout(rate=0.2))
model.add(keras.layers.Dense(units=2))
model.compile(loss=config["loss"], optimizer=opt)
history = model.fit(
X_train, Y_train,
epochs=config["epochs"],
batch_size=config["batch_size"],
validation_split=config["validation_split"],
callbacks=[WandbCallback()],
shuffle=False
)
evaluate_single(model, X_test, y_test, r)
evaluate_plot_multi(model, test, config, X_test, r_test)
return model
def evaluate_single(model, x_test, y_test, scaler):
y_preds = model.predict(x_test)
y_preds = scaler.inverse_transform(y_preds)
y_test = scaler.inverse_transform(y_test)
complete_mse = tf.keras.losses.MSE( y_preds[:, 1], y_test[:, 1])
wandb.run.summary["test_mse"] = complete_mse
return complete_mse
def evaluate_plot_multi(model, test_df, config, x_test, scaler):
arr = predict_multi(model, len(test)-config["seq_len"], x_test[0, :, :])
test_orig['predicted_cases'] = 0
test_orig['predicted_cases'][config["seq_len"]:] = scaler.inverse_transform(arr.squeeze(0))[:, 1]
plt.plot(test_orig['predicted_cases'], label='predicted_cases')
plt.plot(test_orig['cases'], label='actual_cases')
plt.legend();
wandb.log({"test":plt})
large_mse = tf.keras.losses.MSE(
y_multi[:, 1], test_orig['predicted_cases'][config["seq_len"]:].values
)
wandb.run.summary["test_mse_full"] = large_mse
return large_mse
# + id="Q0N5tLiZB_IE" colab_type="code" outputId="cbca51a1-6556-49cc-d222-a96d1d007d5a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
wandb.agent(sweep_id, function=train)
#train()
# + id="tR_vweL-bOD1" colab_type="code" outputId="f6d40e8b-d959-48ff-85d9-d828cd00779b" colab={"base_uri": "https://localhost:8080/", "height": 286}
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend();
# + [markdown] id="zPaMFelHjm-m" colab_type="text"
# ### Examining Results
# We will now predict both one step ahead and 20 steps ahead.
# + id="F5zjK1JYf52W" colab_type="code" outputId="00fa37da-16a3-428d-bb6c-e6f10371bb94" colab={"base_uri": "https://localhost:8080/", "height": 319}
res = model.predict(X_test)
res = r_test.inverse_transform(res)
res
# + id="Z_9dG21n1bJC" colab_type="code" outputId="aee9ba88-9e6e-4ec8-ad2c-a114d299538a" colab={"base_uri": "https://localhost:8080/", "height": 319}
y_true = r_test.inverse_transform(y_test)
y_true
# + id="JVKR-Hw_1lQ8" colab_type="code" outputId="7e86523f-421b-47f5-a08a-8c5aaaa057b3" colab={"base_uri": "https://localhost:8080/", "height": 368}
def predict_multi(model, time_steps, start_rows):
start_rows=np.expand_dims(start_rows, axis=0)
for i in range(0, time_steps):
out = model.predict(start_rows[:, i:, :])
out = out[np.newaxis, ...]
start_rows = np.concatenate((start_rows, out), axis=1)
return start_rows[:, config["seq_len"]:, :]
arr = predict_multi(model, len(test)-config["seq_len"], X_test[0, :, :])
test_orig['predicted_cases'] = 0
test_orig['predicted_cases'][config["seq_len"]:] = r_test.inverse_transform(arr.squeeze(0))[:, 1]
plt.plot(test_orig['predicted_cases'], label='predicted_cases')
plt.plot(test_orig['cases'], label='actual_cases')
plt.legend();
wandb.log({"test":plt})
# + id="aoIE9nyy3gVB" colab_type="code" outputId="fccf7eb0-be54-446c-f780-611b7b7d1e37" colab={"base_uri": "https://localhost:8080/", "height": 153}
r_test.inverse_transform(X_test[0, :, :])
# + id="nW4Nwm9yWPia" colab_type="code" outputId="76a383f0-a538-4098-9806-a3f3806f353b" colab={"base_uri": "https://localhost:8080/", "height": 319}
y_multi = r_test.inverse_transform(arr.squeeze(0))
y_multi
# + id="5x_QvIsutf1a" colab_type="code" colab={}
import tensorflow as tf
x_test = y_true[:, 1]
wandb.run.summary["test_mse"] = tf.keras.losses.MSE(
x_test, res[:, 1]
)
# + id="6BiFffVB3Jc1" colab_type="code" colab={}
wandb.run.summary["test_mse_full"] = tf.keras.losses.MSE(
y_multi[:, 1], x_test
)
# + [markdown] id="bkob-4bnYdeT" colab_type="text"
# ### PyTorch models
#
# + id="aGXjVazoJ9i3" colab_type="code" colab={}
import torch
import math
from torch.nn.modules import Transformer, TransformerEncoder, TransformerDecoder, TransformerDecoderLayer, TransformerEncoderLayer, LayerNorm
class CustomTransformerDecoder(torch.nn.Module):
def __init__(self, seq_length, output_seq_length, n_time_series, d_model=128, output_dim=1):
super().__init__()
self.dense_shape = torch.nn.Linear(n_time_series, d_model)
self.pe = SimplePositionalEncoding(d_model)
encoder_layer = TransformerEncoderLayer(d_model, 8)
encoder_norm = LayerNorm(d_model)
self.transformer_enc = TransformerEncoder(encoder_layer, 6, encoder_norm)
self.output_dim_layer = torch.nn.Linear(d_model, output_dim)
self.output_seq_length = output_seq_length
self.out_length_lay = torch.nn.Linear(seq_length, output_seq_length)
self.mask = generate_square_subsequent_mask(seq_length)
def forward(self, x):
""""""
x = self.dense_shape(x)
x = self.pe(x)
x = x.permute(1,0,2)
x = self.transformer_enc(x, mask=self.mask)
x = self.output_dim_layer(x)
x = x.permute(1, 2, 0)
x = self.out_length_lay(x)
return x.view(-1, self.output_seq_length)
class SimplePositionalEncoding(torch.nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(SimplePositionalEncoding, self).__init__()
self.dropout = torch.nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x:torch.Tensor)->torch.Tensor:
"""Creates a basic positional encoding"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def generate_square_subsequent_mask(sz:int)->torch.Tensor:
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
# + id="RK-U7-QhfP5b" colab_type="code" colab={}
c = CustomTransformerDecoder(50, 1, 3)
# + id="VtZbZ55tfe0i" colab_type="code" outputId="8e42b01b-303a-4fbf-f55e-217198c364e0" colab={"base_uri": "https://localhost:8080/", "height": 71}
c(torch.rand(2, 50, 3))
# + id="7mFCWLbkktra" colab_type="code" colab={}
class LSTMForecast(torch.nn.Module):
def __init__(self, seq_length: int, n_time_series: int, output_seq_len=1, hidden_states=20, num_layers=2, bias=True):
super().__init__()
self.num_layers = num_layers
self.forecast_history = seq_length
self.n_time_series = n_time_series
self.hidden_dim = hidden_states
self.lstm = torch.nn.LSTM(n_time_series, hidden_states, num_layers, bias, batch_first=True)
self.final_layer = torch.nn.Linear(seq_length*hidden_states, output_seq_len)
def init_hidden(self, batch_size):
# even with batch_first = True this remains same as docs
hidden_state = torch.zeros(self.num_layers,batch_size,self.hidden_dim)
cell_state = torch.zeros(self.num_layers,batch_size,self.hidden_dim)
self.hidden = (hidden_state, cell_state)
def forward(self, x: torch.Tensor) -> torch.Tensor:
print(x.size()[0])
batch_size = x.size()[0]
out_x,self.hidden = self.lstm(x, self.hidden)
x = self.final_layer(out_x.contiguous().view(batch_size, -1))
return x
| 13,489 |
/T-Student.ipynb | 71ab6ef5fec0d14ad8acd1e44dad3984629b2449 | [] | no_license | hcpassos/Python-practice | https://github.com/hcpassos/Python-practice | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,691 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Crop Yield Prediction
# ### Md. Rubel Rana 1712661642
# ### Navid Al - Musabbir 1721853042
# +
import graphviz
import sklearn
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.io as pio
import plotly.express as px
from sklearn import metrics
import matplotlib.pyplot as plt
from scipy import stats
import autosklearn.regression
import autogluon.core as ag
from tpot import TPOTRegressor
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from hpsklearn import HyperoptEstimator
from hpsklearn import any_classifier
from hpsklearn import any_preprocessing
from hyperopt import tpe
import autosklearn
import sklearn.metrics
from lightgbm.sklearn import LGBMRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, cross_val_score
from autosklearn.classification import AutoSklearnClassifier
from supervised.automl import AutoML
from supervised.preprocessing.eda import EDA
from autogluon.tabular import TabularDataset, TabularPredictor
from sklearn.metrics import accuracy_score
# -
# Dataset
dataset = 'dataset/Aus/aus.csv'
# Load dataset into dataframe
data = pd.read_csv(dataset)
# ## Data Exploration
data.shape
data.columns
data.head()
data.tail()
data.info()
data.describe()
# ## Data Preprocessing
# #### a. Removing 0's and NaN values
# Checking 0 values
(data == 0).sum(axis=0)
# Replace 0's with NaN
data.replace(0, np.nan, inplace=True)
data.isnull().sum().sum()
# Drop all NaN values
data = data.dropna()
data = data.reset_index(drop=True)
data.isnull().values.any()
# Be ensure about 0's and NaN values
(data == 0).sum(axis=0)
data['Area'] = data.Area.astype(int)
data['Productions'] = data.Area.astype(int)
data['PPH'] = pd.to_numeric(data['PPH'])
data.shape
data.head()
data.tail()
data.info()
# #### b. Removing Outliers
z = np.abs(stats.zscore(data))
np.where(z > 4)
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
(data < (Q1 - 1.5 * IQR)) | (data > (Q3 + 1.5 * IQR))
data.shape
data = data[(z < 4).all(axis=1)]
data.shape
# ## Spliting Data
X = data[['District', 'Year', 'Max_Temp', 'Min_Temp', 'Rainfall', 'Humidity', 'Wind', 'Cloud', 'Sunshine', 'ALT']]
y = data['PPH']
# #### a. Train data & Test data
X_train, X_rem, y_train, y_rem = train_test_split(X, y, train_size=0.3)
# #### a. Train data & Validation data
X_train, X_test, y_valid, y_test = train_test_split(X_rem, y_rem, test_size=0.3)
# ## 1. Performing Automating EDA
EDA.extensive_eda(X_train,y_valid,save_path="content/mljar-supervised/aus")
# ## Creating AutoML Models
automl = AutoML(mode='Compete',
total_time_limit=10,
results_path="AutoML_classifier/Aus")
automl.fit(X_train, y_valid)
predictions = automl.predict(X_test)
# #### a. RMSE
metrics.mean_squared_error(y_test, predictions)
# #### b. MAE
metrics.mean_absolute_error(y_test, predictions)
# #### c. R2
metrics.r2_score(y_test, predictions)
y_pred = automl.predict(X_train)
plt.figure(figsize=(15,10))
plt.scatter(y_pred, y_valid, label="Train", color='#d95f02')
plt.scatter(predictions, y_test, label="Test", color='#7570b3')
plt.title('AUS EDA AutoML Scatter Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.legend()
plt.savefig("fig_content/aus_automl_scatter.png")
plt.show()
plt.figure(figsize=(15,10))
sns.regplot(x=predictions, y = y_test, data = data)
plt.title('AUS EDA AutoML Reg Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.savefig("reg_content/aus_eda_automl_reg.png")
# ## 2. AutoSklearn Regression
autosk = autosklearn.regression.AutoSklearnRegressor(
time_left_for_this_task=120,
per_run_time_limit=30,
tmp_folder='autosklearn_regression/aus',
resampling_strategy='holdout',
resampling_strategy_arguments={'folds': 5},
)
autosk.fit(X_train, y_valid, dataset_name='data')
autosk.leaderboard()
print(autosk.show_models())
predictions = autosk.predict(X_test)
# #### a. RMSE
metrics.mean_squared_error(y_test, predictions)
# #### b. MAE
metrics.mean_absolute_error(y_test, predictions)
# #### c. R2
metrics.r2_score(y_test, predictions)
y_pred = autosk.predict(X_train)
plt.figure(figsize=(15,10))
plt.scatter(y_pred, y_valid, label="Train", color='#d95f02')
plt.scatter(predictions, y_test, label="Test", color='#7570b3')
plt.title('AUS AutoSk Scatter Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.legend()
plt.savefig("fig_content/aus_autosk_scatter.png")
plt.show()
plt.figure(figsize=(15,10))
sns.regplot(x=predictions, y = y_test, data = data)
plt.title('AUS AutoSk Reg Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.savefig("reg_content/aus_autosk_reg.png")
# ## 3. AutoGluon
train_data = TabularDataset('dataset/Aus/train_data.csv')
test_data = TabularDataset('dataset/Aus/test_data.csv')
# Checking 0 values
(train_data == 0).sum(axis=0)
(test_data == 0).sum(axis=0)
# Replace 0's with NaN
train_data.replace(0, np.nan, inplace=True)
test_data.replace(0, np.nan, inplace=True)
train_data.isnull().sum().sum()
test_data.isnull().sum().sum()
# Drop all NaN values
train_data = train_data.dropna()
train_data = train_data.reset_index(drop=True)
test_data = test_data.dropna()
test_data = test_data.reset_index(drop=True)
train_data.isnull().values.any()
test_data.isnull().values.any()
# Be ensure about 0's and NaN values
(train_data == 0).sum(axis=0)
(test_data == 0).sum(axis=0)
train_data['Area'] = train_data.Area.astype(int)
train_data['Productions'] = train_data.Area.astype(int)
train_data['PPH'] = pd.to_numeric(train_data['PPH'])
test_data['Area'] = test_data.Area.astype(int)
test_data['Productions'] = test_data.Area.astype(int)
test_data['PPH'] = pd.to_numeric(test_data['PPH'])
label = 'PPH'
data[label].describe()
save_path = 'autogluon/aus'
hyperparameters = {
'NN': {'num_epochs': 10, 'activation': 'relu', 'dropout_prob': ag.Real(0.0, 0.5)},
'GBM': {'num_boost_round': 1000, 'learning_rate': ag.Real(0.01, 0.1, log=True)},
'XGB': {'n_estimators': 1000, 'learning_rate': ag.Real(0.01, 0.1, log=True)}
}
predictor = TabularPredictor(label=label, path=save_path).fit(
train_data, hyperparameters=hyperparameters, hyperparameter_tune_kwargs='auto', time_limit=60
)
predictor.fit_summary()
perf = predictor.evaluate(test_data)
y_pred = predictor.predict_proba(test_data)
perf = predictor.evaluate_predictions(y_true=test_data[label], y_pred=y_pred, auxiliary_metrics=True)
perf
# ## 4. Hyperopt
n_iter=10
num_folds=2
kf = KFold(n_splits=num_folds, random_state=None)
model = LGBMRegressor(random_state=42)
# #### a. RMSE
abs(cross_val_score(model, X, y, scoring='neg_mean_squared_error')).mean()
# #### b. MAE
abs(cross_val_score(model, X, y, scoring='neg_mean_absolute_error')).mean()
# #### c. R2
abs(cross_val_score(model, X, y, scoring='r2')).mean()
# ## 5. TPOT
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, test_size=0.25, random_state=42)
tpot = TPOTRegressor(generations=5, population_size=50, verbosity=2, random_state=42)
tpot.fit(X_train, y_train)
predictions = tpot.predict(X_test)
# #### b. RMSE
metrics.mean_squared_error(y_test, predictions)
# #### b. MAE
metrics.mean_absolute_error(y_test, predictions)
# #### c. R2
metrics.r2_score(y_test, predictions)
y_pred = tpot.predict(X_train)
plt.figure(figsize=(15,10))
plt.scatter(y_pred, y_train, label="Train", color='#d95f02')
plt.scatter(predictions, y_test, label="Test", color='#7570b3')
plt.title('AUS TPOT Scatter Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.legend()
plt.savefig("fig_content/aus_tpot_scatter.png")
plt.show()
plt.figure(figsize=(15,10))
sns.regplot(x=predictions, y = y_test, data = data)
plt.title('AUS TPOT Reg Plot')
plt.xlabel("Predicted value")
plt.ylabel("Actual value")
plt.savefig("reg_content/aus_tpot_reg.png")
# ## 6. EvalML => AutoMLSearch
import evalml
from evalml import AutoMLSearch
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='regression', test_size=0.3, random_seed=0)
automl = AutoMLSearch(X_train = X_train, y_train=y_train, problem_type = "regression",max_batches=1,optimize_thresholds=True)
automl.search()
automl.rankings
best_pipeline = automl.best_pipeline
best_pipeline
automl.describe_pipeline(automl.rankings.iloc[0]["id"])
automl.describe_pipeline(1)
automl.results
evalml.objectives.get_all_objective_names()
# ## Graphical Representation
# +
correlation_data=data.select_dtypes(include=[np.number]).corr()
mask = np.zeros_like(correlation_data)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.palette="vlag"
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(correlation_data, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5});
# -
plt.figure(figsize=(10, 10))
sns.pairplot(data, hue='PPH');
pio.templates.default = "seaborn"
plt.figure(figsize=(16, 16))
fig = px.line(data, x = "Year", y = "Rainfall", color = "Year")
fig.show()
pio.templates.default = "seaborn"
plt.figure(figsize=(10, 10))
fig = px.line(data, x = "Cloud", y = "Rainfall", color = "Cloud")
fig.show()
pio.templates.default = "seaborn"
plt.figure(figsize=(16, 16))
fig = px.line(data, x = "Humidity", y = "Rainfall", color = "Humidity")
fig.show()
pio.templates.default = "seaborn"
plt.figure(figsize=(16, 16))
fig = px.line(data, x = "Year", y = "PPH", color = "Year")
fig.show()
guments
for name in range(1,n):
# print(name)
proc = Process(target=create_n_threads, args=(name,))
procs.append(proc)
proc.start()
# complete the processes
for proc in procs:
proc.join()
# -
# ## Merging all three table to create final dataset
######################merging the three tables create final dataset and normalize it
ds=pd.read_sql_query("SELECT a.* , b.size , c.Class FROM id_ngram a INNER JOIN id_size b ON a.Id=b.id INNER JOIN id_Class c ON a.Id=c.id ",conn)
data_y=ds['Class']
ds.head()
######normalize above table
def normalize(df):
result1 = df.copy()
for feature_name in df.columns:
if (str(feature_name) != str('Id') and str(feature_name)!=str('Class')):
max_value = df[feature_name].max()
min_value = df[feature_name].min()
result1[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
return result1
ds_n = normalize(ds)
# ## Normalize the dataset
# +
ds_n.head()
# -
# ## Multivariate analysis using t-sne
# +
#########Multivariate analysis of datset using T-SNE
xtsne=TSNE(perplexity=50)
dims=xtsne.fit_transform(ds_n.drop(['Id','Class'], axis=1))
vis_x = dims[:, 0] ####first priniciple component
vis_y = dims[:, 1] #####second principle component
plt.scatter(vis_x, vis_y, c=data_y, cmap=plt.cm.get_cmap("jet", 9))
plt.colorbar(ticks=range(10))
plt.clim(0.5, 9)
plt.show()
# -
#xtsne=TSNE(perplexity=20)
xtsne = TSNE(n_components=2, verbose=1, perplexity=30)
dims=xtsne.fit_transform(ds_n.drop(['Id','Class'], axis=1))
vis_x = dims[:, 0] ####first priniciple component
vis_y = dims[:, 1] #####second principle component
plt.scatter(vis_x, vis_y, c=data_y, cmap=plt.cm.get_cmap("jet", 9))
plt.colorbar(ticks=range(10))
plt.clim(0.5, 9)
plt.show()
xtsne=TSNE(perplexity=100)
dims=xtsne.fit_transform(ds_n.drop(['Id','Class'], axis=1))
vis_x = dims[:, 0] ####first priniciple component
vis_y = dims[:, 1] #####second principle component
plt.scatter(vis_x, vis_y, c=data_y, cmap=plt.cm.get_cmap("jet", 9))
plt.colorbar(ticks=range(10))
plt.clim(0.5, 9)
plt.show()
# ## Split the data in to test and train datasets
###############Test Train split
data_y = ds_n['Class']
# split the data into test and train by maintaining same distribution of output varaible 'y_true' [stratify=y_true]
X_train, X_test, y_train, y_test = train_test_split(ds_n.drop(['Id','Class'], axis=1), data_y,stratify=data_y,test_size=0.25) #stratify maintains same proportion/classis ratio acroos the splits
# split the train data into train and cross validation by maintaining same distribution of output varaible 'y_train' [stratify=y_train]
X_train, X_cv, y_train, y_cv = train_test_split(X_train, y_train,stratify=y_train,test_size=0.25)
print('Number of data points in train data:', X_train.shape[0])
print('Number of data points in test data:', X_test.shape[0])
print('Number of data points in cross validation data:', X_cv.shape[0])
# ## Class distrubution in test and train data sets
# +
#######distribution of data points in train dataset
df_y_train=pd.DataFrame({'id':y_train.index, 'Class':y_train.values})
sns.set(style="darkgrid")
ax = sns.countplot(x="Class", data=df_y_train)
plt.title('Class counts in Train dataset')
plt.show()
# +
Y=df_y_train
total = len(Y)*1.
ax=sns.countplot(x="Class", data=Y)
for p in ax.patches:
ax.annotate('{:.1f}%'.format(100*p.get_height()/total), (p.get_x()+0.1, p.get_height()+5))
#put 11 ticks (therefore 10 steps), from 0 to the total number of rows in the dataframe
ax.yaxis.set_ticks(np.linspace(0, total, 11))
#adjust the ticklabel to the desired format, without changing the position of the ticks.
ax.set_yticklabels(map('{:.1f}%'.format, 100*ax.yaxis.get_majorticklocs()/total))
plt.title("Class distribution in Train dataset")
plt.show()
# +
df_y_test=pd.DataFrame({'id':y_test.index, 'Class':y_test.values})
sns.set(style="darkgrid")
ax = sns.countplot(x="Class", data=df_y_test)
plt.title('Class counts in Test dataset')
plt.show()
# +
Y=df_y_test
total = len(Y)*1.
ax=sns.countplot(x="Class", data=Y)
for p in ax.patches:
ax.annotate('{:.1f}%'.format(100*p.get_height()/total), (p.get_x()+0.1, p.get_height()+5))
#put 11 ticks (therefore 10 steps), from 0 to the total number of rows in the dataframe
ax.yaxis.set_ticks(np.linspace(0, total, 11))
#adjust the ticklabel to the desired format, without changing the position of the ticks.
ax.set_yticklabels(map('{:.1f}%'.format, 100*ax.yaxis.get_majorticklocs()/total))
plt.title("Class distribution in Test dataset")
plt.show()
# +
df_y_cv=pd.DataFrame({'id':y_cv.index, 'Class':y_cv.values})
sns.set(style="darkgrid")
ax = sns.countplot(x="Class", data=df_y_train)
plt.title('Class counts in cv dataset')
plt.show()
# +
Y=df_y_cv
total = len(Y)*1.
ax=sns.countplot(x="Class", data=Y)
for p in ax.patches:
ax.annotate('{:.1f}%'.format(100*p.get_height()/total), (p.get_x()+0.1, p.get_height()+5))
#put 11 ticks (therefore 10 steps), from 0 to the total number of rows in the dataframe
ax.yaxis.set_ticks(np.linspace(0, total, 11))
#adjust the ticklabel to the desired format, without changing the position of the ticks.
ax.set_yticklabels(map('{:.1f}%'.format, 100*ax.yaxis.get_majorticklocs()/total))
plt.title("Class distribution in cv dataset")
plt.show()
# -
# ## ML Models
def plot_confusion_matrix(test_y, predict_y):
C = confusion_matrix(test_y, predict_y)
print("Number of misclassified points ",(len(test_y)-np.trace(C))/len(test_y)*100)
A =(((C.T)/(C.sum(axis=1))).T)
B =(C/C.sum(axis=0))
labels = [1,2,3,4,5,6,7,8,9]
cmap=sns.light_palette("blue")
# representing A in heatmap format
print("="*50, "Confusion matrix", "="*50)
plt.figure(figsize=(10,5))
sns.heatmap(C, annot=True, cmap=cmap, fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
print("-"*50, "Precision matrix", "-"*50)
plt.figure(figsize=(10,5))
sns.heatmap(B, annot=True, cmap=cmap, fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
print("Sum of columns in precision matrix",B.sum(axis=0))
# representing B in heatmap format
print("-"*50, "Recall matrix" , "-"*50)
plt.figure(figsize=(10,5))
sns.heatmap(A, annot=True, cmap=cmap, fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
print("Sum of rows in precision matrix",A.sum(axis=1))
# +
###############################KNN#######################
alpha = [x for x in range(1, 15, 2)]
cv_log_error_array=[]
for i in alpha:
k_cfl=KNeighborsClassifier(n_neighbors=i)
k_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(k_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_cv)
cv_log_error_array.append(log_loss(y_cv, predict_y, labels=k_cfl.classes_, eps=1e-15))
for i in range(len(cv_log_error_array)):
print ('log_loss for k = ',alpha[i],'is',cv_log_error_array[i])
best_alpha = np.argmin(cv_log_error_array)
fig, ax = plt.subplots()
ax.plot(alpha, cv_log_error_array,c='g')
for i, txt in enumerate(np.round(cv_log_error_array,3)):
ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i]))
plt.grid()
plt.title("Cross Validation Error for each alpha")
plt.xlabel("Alpha i's")
plt.ylabel("Error measure")
plt.show()
k_cfl=KNeighborsClassifier(n_neighbors=alpha[best_alpha])
k_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(k_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_train)
print ('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y))
predict_y = sig_clf.predict_proba(X_cv)
print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y))
predict_y = sig_clf.predict_proba(X_test)
print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y))
plot_confusion_matrix(y_test, sig_clf.predict(X_test))
# +
########
alpha=[10,50,100,500,1000,2000,3000]
cv_log_error_array=[]
train_log_error_array=[]
from sklearn.ensemble import RandomForestClassifier
for i in alpha:
r_cfl=RandomForestClassifier(n_estimators=i,random_state=42,n_jobs=-1)
r_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(r_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_cv)
cv_log_error_array.append(log_loss(y_cv, predict_y, labels=r_cfl.classes_, eps=1e-15))
for i in range(len(cv_log_error_array)):
print ('log_loss for c = ',alpha[i],'is',cv_log_error_array[i])
best_alpha = np.argmin(cv_log_error_array)
fig, ax = plt.subplots()
ax.plot(alpha, cv_log_error_array,c='g')
for i, txt in enumerate(np.round(cv_log_error_array,3)):
ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i]))
plt.grid()
plt.title("Cross Validation Error for each alpha")
plt.xlabel("Alpha i's")
plt.ylabel("Error measure")
plt.show()
r_cfl=RandomForestClassifier(n_estimators=alpha[best_alpha],random_state=42,n_jobs=-1)
r_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(r_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_train)
print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y))
predict_y = sig_clf.predict_proba(X_cv)
print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y))
predict_y = sig_clf.predict_proba(X_test)
print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y))
plot_confusion_matrix(y_test, sig_clf.predict(X_test))
# +
#####################Gradient boosting(XGBOOST)
alpha=[10,50,100,500,1000,2000]
cv_log_error_array=[]
for i in alpha:
x_cfl=XGBClassifier(n_estimators=i,nthread=-1)
x_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(x_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_cv)
cv_log_error_array.append(log_loss(y_cv, predict_y, labels=x_cfl.classes_, eps=1e-15))
for i in range(len(cv_log_error_array)):
print ('log_loss for c = ',alpha[i],'is',cv_log_error_array[i])
best_alpha = np.argmin(cv_log_error_array)
fig, ax = plt.subplots()
ax.plot(alpha, cv_log_error_array,c='g')
for i, txt in enumerate(np.round(cv_log_error_array,3)):
ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i]))
plt.grid()
plt.title("Cross Validation Error for each alpha")
plt.xlabel("Alpha i's")
plt.ylabel("Error measure")
plt.show()
x_cfl=XGBClassifier(n_estimators=alpha[best_alpha],nthread=-1)
x_cfl.fit(X_train,y_train)
sig_clf = CalibratedClassifierCV(x_cfl, method="sigmoid")
sig_clf.fit(X_train, y_train)
predict_y = sig_clf.predict_proba(X_train)
print ('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y))
predict_y = sig_clf.predict_proba(X_cv)
print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y))
predict_y = sig_clf.predict_proba(X_test)
print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y))
plot_confusion_matrix(y_test, sig_clf.predict(X_test))
# -
| 21,586 |
/examples/benchmark/logreg.ipynb | 5953a6c51c3bd5da04e2465e1920f03cd846f4af | [] | no_license | skale-me/skale-ml | https://github.com/skale-me/skale-ml | 6 | 4 | null | 2016-05-20T09:51:06 | 2016-05-06T09:16:20 | JavaScript | Jupyter Notebook | false | false | .js | 5,437 | // ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.15.2
// kernelspec:
// display_name: Javascript (Node.js)
// language: javascript
// name: javascript
// ---
// # Logistic regression with Skale
// In this example we will:
// * Load a SVM data file
// * Parse the file data to produce a label/features dataset
// * Compute a logistic regression model from the cleaned-up data
// We first establish a connection to our local skale cluster.
var sc = require('skale-engine').context();
// Later we will use LogisticRegression from skale-ml package.
var LogisticRegressionWithSGD = require('skale-ml').LogisticRegressionWithSGD;
// We will process SVM data from the following file.
var file = '1MB.dat';
// Let's configure now the number of iterations of the Gradient Descent.
var nIterations = 100;
// Next step is to load the file, parse its data and make it persistent to speedup SGD computation.
// Here we have to:
// * declare a parse function to apply on each line of file
// * read, parse and make data persistent
// * Instantiate the logistic regression model
// +
function featurize(line) {
var tmp = line.split(' ').map(Number);
var label = tmp.shift(); // in the current implementatuon we use [-1,1] labels
var features = tmp;
return [label, features];
}
var points = sc.textFile(file).map(featurize).persist();
var model = new LogisticRegressionWithSGD(points);
// -
// We can now train the logistic regression model, display the corresponding weights and end the skale context session.
// +
$$async$$ = true;
console.log('Training the model')
model.train(nIterations, function() {
$$done$$('Model weihgts');
console.log(model.weights);
// sc.end();
});
| 1,831 |
/04.1-modulos.ipynb | eae2a98926b6df5305607126f5870eb3b53c82ec | [] | no_license | javipena21/introduccion-a-python | https://github.com/javipena21/introduccion-a-python | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,167 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
def poly(x):
#return (3*x**2 - 1)/2.
return x**x - 100
# +
xi, xf, Npoints = 0.1,4,10
h = (xf-xi)/float(Npoints)
x = np.linspace(xi,xf,Npoints)
y = poly(x)
#print(x)
# -
plt.plot(x,y)
#plt.plot(x,np.zeros(len(x)),'--')
def Derivada(f,x,h):
d = 0.
if h!=0:
d = (f(x+h)-f(x-h))/(2*h)
return d
# +
# Definamos el metodo
def NewtonMethod(f,df,xn,error,it,precision=0.001,iterations=1000):
h = 1.0e-4
while error > precision and it < iterations:
try:
xn1 = xn - f(xn)/df(f,xn,h)
error = np.abs( (xn1- xn)/xn1 )
#print(error)
except ZeroDivisionError:
print('Hay una division por cero')
xn = xn1
it += 1
return xn1
# -
root = NewtonMethod(poly, Derivada, 2, 10, it = 1)
print(root)
# +
Xtest = np.linspace(1,6,10)
print(Xtest)
for i in Xtest:
print(NewtonMethod(poly, Derivada, i, 10, it = 1))
# -
list()
# ### Splitting the data into train, test and validation sets
#
# We will train RDSM on 70% of the Data, use a Validation set of 10% for Model Selection and report performance on the remaining 20% held out test set.
# +
n = len(x)
tr_size = int(n*0.70)
vl_size = int(n*0.10)
te_size = int(n*0.20)
x_train, x_test, x_val = np.array(x[:tr_size], dtype = object), np.array(x[-te_size:], dtype = object), np.array(x[tr_size:tr_size+vl_size], dtype = object)
t_train, t_test, t_val = np.array(t[:tr_size], dtype = object), np.array(t[-te_size:], dtype = object), np.array(t[tr_size:tr_size+vl_size], dtype = object)
e_train, e_test, e_val = np.array(e[:tr_size], dtype = object), np.array(e[-te_size:], dtype = object), np.array(e[tr_size:tr_size+vl_size], dtype = object)
# -
# ### Setting the parameter grid
#
# Lets set up the parameter grid to tune hyper-parameters. We will tune the number of underlying survival distributions,
# ($K$), the distribution choices (Log-Normal or Weibull), the learning rate for the Adam optimizer between $1\times10^{-3}$ and $1\times10^{-4}$, the number of hidden nodes per layer $50, 100$ and $2$, the number of layers $3, 2$ and $1$ and the type of recurrent cell (LSTM, GRU, RNN).
from sklearn.model_selection import ParameterGrid
param_grid = {'k' : [3, 4, 6],
'distribution' : ['LogNormal', 'Weibull'],
'learning_rate' : [1e-4, 1e-3],
'hidden': [50, 100],
'layers': [3, 2, 1],
'typ': ['LSTM', 'GRU', 'RNN'],
}
params = ParameterGrid(param_grid)
# ### Model Training and Selection
from dsm import DeepRecurrentSurvivalMachines
# +
models = []
for param in params:
model = DeepRecurrentSurvivalMachines(k = param['k'],
distribution = param['distribution'],
hidden = param['hidden'],
typ = param['typ'],
layers = param['layers'])
# The fit method is called to train the model
model.fit(x_train, t_train, e_train, iters = 1, learning_rate = param['learning_rate'])
models.append([[model.compute_nll(x_val, t_val, e_val), model]])
best_model = min(models)
model = best_model[0][1]
# -
# ### Inference
out_risk = model.predict_risk(x_test, times)
out_survival = model.predict_survival(x_test, times)
# ### Evaluation
#
# We evaluate the performance of RDSM in its discriminative ability (Time Dependent Concordance Index and Cumulative Dynamic AUC) as well as Brier Score on the concatenated temporal data.
from sksurv.metrics import concordance_index_ipcw, brier_score, cumulative_dynamic_auc
# +
cis = []
brs = []
et_train = np.array([(e_train[i][j], t_train[i][j]) for i in range(len(e_train)) for j in range(len(e_train[i]))],
dtype = [('e', bool), ('t', float)])
et_test = np.array([(e_test[i][j], t_test[i][j]) for i in range(len(e_test)) for j in range(len(e_test[i]))],
dtype = [('e', bool), ('t', float)])
et_val = np.array([(e_val[i][j], t_val[i][j]) for i in range(len(e_val)) for j in range(len(e_val[i]))],
dtype = [('e', bool), ('t', float)])
for i, _ in enumerate(times):
cis.append(concordance_index_ipcw(et_train, et_test, out_risk[:, i], times[i])[0])
brs.append(brier_score(et_train, et_test, out_survival, times)[1])
roc_auc = []
for i, _ in enumerate(times):
roc_auc.append(cumulative_dynamic_auc(et_train, et_test, out_risk[:, i], times[i])[0])
for horizon in enumerate(horizons):
print(f"For {horizon[1]} quantile,")
print("TD Concordance Index:", cis[horizon[0]])
print("Brier Score:", brs[0][horizon[0]])
print("ROC AUC ", roc_auc[horizon[0]][0], "\n")
# -
| 5,105 |
/Image/find_image_by_path_row.ipynb | 26ae77e6bb90acec3e329b020cd4c3161d890d3d | [
"MIT"
] | permissive | levi-manley/earthengine-py-notebooks | https://github.com/levi-manley/earthengine-py-notebooks | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 13,828 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/find_image_by_path_row.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/find_image_by_path_row.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/find_image_by_path_row.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/find_image_by_path_row.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
# The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
# +
# # %%capture
# # !pip install earthengine-api
# # !pip install geehydro
# -
# Import libraries
import ee
import folium
import geehydro
# Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
# if you are running this notebook for the first time or if you are getting an authentication error.
# ee.Authenticate()
ee.Initialize()
# ## Create an interactive map
# This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
# The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# ## Add Earth Engine Python script
# +
# Load an image collection, filtered so it's not too much data.
collection = ee.ImageCollection('LANDSAT/LT05/C01/T1') \
.filterDate('2008-01-01', '2008-12-31') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34))
# Compute the median in each band, each pixel.
# Band names are B1_median, B2_median, etc.
median = collection.reduce(ee.Reducer.median())
# The output is an Image. Add it to the map.
vis_param = {'bands': ['B4_median', 'B3_median', 'B2_median'], 'gamma': 1.6}
Map.setCenter(-122.3355, 37.7924, 9)
Map.addLayer(median, vis_param, 'Median Image')
# -
# ## Display Earth Engine data layers
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
lor(str(al), title)
markersize=10
eporange = np.arange(len(list(acclist[al]))+1)
thelist = np.insert(acclist[al],0,0)
subplot.plot(eporange, thelist , modshape, color = colmap ,label=str(al), alpha=0.8,linewidth=linewidth,markersize=markersize)
subplot.legend(loc='lower right',fontsize=legend_size)
# +
def bar_plot_acc(labels, cen, fl1, fl2, fl3):
x = np.arange(len(labels)) # the label locations
width = 0.2
#plt.figure(figsize=(10, 8))
fig, ax = plt.subplots(figsize=(12, 8))
rects1 = ax.bar(x - width, cen, width, label='BSP',color=next_color(cm.get_cmap('Set2'), 1))
rects2 = ax.bar(x , fl1, width, label='FedAvg with 8 clients and 40% skewness',color=next_color(cm.get_cmap('Set2'), 2))
rects3 = ax.bar(x + width, fl2, width, label='FedAvg with 8 clients and 60% skewness',color=next_color(cm.get_cmap('Set2'), 3))
rects4 = ax.bar(x + 2*width, fl3, width, label='FedAvg with 8 clients and 80% skewness',color=next_color(cm.get_cmap('Set2'), 4))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('F1 Score %',fontsize=18)
#ax.set_title('Max F1 score achieved by FedAvg for the different skewness in compare to BSP',fontsize=20)
ax.set_xticks(x)
ax.set_xticklabels(labels,fontsize=18)
ax.legend(loc='lower right')
ax.set_ylim(bottom=.6)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.annotate('{:0.3f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
def autolabel2(rectcent, rects):
for reo, rect in zip(rectcent, rects):
height_dif = reo.get_height() - rect.get_height()
height = rect.get_height()
ax.annotate('- {:0.2f}%'.format(height_dif),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel2(rects1, rects2)
autolabel2(rects1, rects3)
autolabel2(rects1, rects4)
_=plt.show()
#cen = [list(acc_dic.values())[0][24]*100,list(acc_dic2.values())[0][24]*100,list(acc_dic3.values())[0][24]*100]
#fl1 = [list(acc_dic.values())[1][24]*100,list(acc_dic2.values())[1][24]*100,list(acc_dic3.values())[1][24]*100]
#fl2 = [list(acc_dic.values())[2][24]*100,list(acc_dic2.values())[2][24]*100,list(acc_dic3.values())[2][24]*100]
labels = ['ResNet34', 'Alexnet', 'LeNet']
#bar_plot_acc(labels, cen, fl1, fl2)
# +
def bar_plot_comumication(labels, cen, fl1, fl2, bsp_ideal=None):
x = np.arange(len(labels)) # the label locations
width = 0.2
#plt.figure(figsize=(10, 8))
#collist1 = [next_color(cm.get_cmap('tab10'), 0),]
fig, ax = plt.subplots(figsize=(12, 8))
rects1 = ax.bar(x - width, cen, width, label='BSP',color=next_color(cm.get_cmap('Set1'), 1))
rects2 = ax.bar(x , fl1, width, label='FedAvg/FedProx with c=0.75',color=next_color(cm.get_cmap('Set1'), 2))
rects3 = ax.bar(x + width, fl2, width, label='FedAvg/FedProx with c=0.5',color=next_color(cm.get_cmap('Set1'), 3))
if bsp_ideal:
rects1 = ax.bar(x - 2* width, bsp_ideal, width, label='BSP - Max Communication',color=next_color(cm.get_cmap('Set1'), 0))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('KiloBytes',fontsize=14)
#ax.set_title('model communication cost on our dataset',fontsize=20)
ax.set_xticks(x)
ax.set_xticklabels(labels,fontsize=14)
#ax.set_ylim()
ax.set_axisbelow(True)
ax.grid(axis='y',color=next_color(cm.get_cmap('gist_rainbow'),6), linestyle='dashed')
ax.legend()
#plt.ylim(top=.5e9)
_=plt.show()
ep = 1
cln = 8
btchnr = 1000
bsp_ideal = [222954*btchnr*2*80,17587*btchnr*2*50,83332*btchnr*2*80]
bsp = [222954*2*cln*80,17587*2*cln*50,83332*cln*2*80,]
#fl1 = [list(acc_dic.values())[1][24]*100,list(acc_dic2.values())[1][24]*100,list(acc_dic3.values())[1][24]*100]
frackl = 6
fl2 = [222954*frackl*ep*2*100,17587*frackl*ep*2*50,83332*frackl*ep*2*100]
frackl = 4
fl1 = [222954*frackl*ep*2*100,17587*frackl*ep*2*80, 83332*frackl*ep*2*100]
labels = [ 'Alexnet', 'LeNet','ResNet34']
bar_plot_comumication(labels, bsp, fl2, fl1, bsp_ideal=bsp_ideal)
bar_plot_comumication(labels, bsp, fl2, fl1)
# +
import pandas as pd
df = pd.read_excel ('./multilabels/LandUse_Multilabeled.xlsx')
df_label = np.array(df)
image_perlabel = np.sum(df_label[:, 1:], axis=0)
class_names = np.array(["airplane", "bare-soil", "buildings", "cars", "chaparral", "court", "dock",
"field", "grass", "mobile-home", "pavement", "sand", "sea", "ship", "tanks", "trees", "water"])
x = np.arange(17)
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_axisbelow(True)
ax.grid(axis='y',color='red', linestyle='dashed')
plt.bar(x, image_perlabel)
ax.set_ylabel('Image Number',fontsize=14)
plt.xticks(x, class_names, rotation=60, fontsize = 14)
plt.show()
# +
fig, ax = plt.subplots(2, 2, figsize=(25,15))
#fig.suptitle("Centralised and Federated Learning ALgorithms comparison with 8 clients and 40% skewness", fontsize=24)
bsp_results ={"AlexNet" : np.genfromtxt('cfrac_results/BSP_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_22_27.csv',delimiter=',')[2,:],
"LeNet" : np.genfromtxt('cfrac_results/BSP_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_15_49.csv',delimiter=',')[2,:],
"ResNet34" : np.genfromtxt('cfrac_results/BSP_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_18_45.csv',delimiter=',')[2,:]
}
fedavg_results = {
"AlexNet": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_07_50.csv',delimiter=',')[2,:],
"LeNet": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_18_03.csv',delimiter=',')[2,:],
"ResNet34": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_23_38.csv',delimiter=',')[2,:]
}
fedprox_results = {"AlexNet": np.genfromtxt('FedProx_runs/FedProx_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_09_18.csv',delimiter=',')[2,:],
"LeNet": np.genfromtxt('FedProx_runs/FedProx_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_03_39.csv',delimiter=',')[2,:],
"ResNet34": np.genfromtxt('FedProx_runs/FedProx_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_07_17.csv',delimiter=',')[2,:]}
centralised_results ={
"AlexNet": np.genfromtxt('centralised_runs/Centralised_CNN_alexnet_bs_4_epochs_100_01_03_17_42.csv',delimiter=',')[2,:],
"LeNet": np.genfromtxt('centralised_runs/Centralised_CNN_lenet_bs_4_epochs_100_01_03_16_11.csv',delimiter=',')[2,:],
"ResNet34": np.genfromtxt('centralised_runs/Centralised_CNN_resnet34_bs_4_epochs_100_01_03_17_06.csv',delimiter=',')[2,:]}
restart_colors()
plot_curves(bsp_results, ax[0][1], ' BSP', 0, 1, "F1 Score", axis_size = 18)
restart_colors()
plot_curves(fedavg_results, ax[1][0], 'FedAvg', 0, 1, "F1 Score", axis_size = 18)
restart_colors()
plot_curves(fedprox_results, ax[1][1], 'FedProx', 0, 1, "F1 Score", axis_size = 18)
restart_colors()
plot_curves(centralised_results, ax[0][0], 'Centralised', 0, 1, "F1 Score", axis_size = 18)
# +
fig, ax = plt.subplots(figsize=(25,15))
#fig.suptitle("Federated ALgorithms comparison with 8 clients and 40% skewness", fontsize=24)
results ={
#"AlexNetBSP" : np.genfromtxt('cfrac_results/BSP_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_22_27.csv',delimiter=',')[2,:],
#"LeNetBSP" : np.genfromtxt('cfrac_results/BSP_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_15_49.csv',delimiter=',')[2,:],
# "ResNet34BSP" : np.genfromtxt('cfrac_results/BSP_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_18_45.csv',delimiter=',')[2,:],
#"AlexNet_Centralised": np.genfromtxt('centralised_runs/Centralised_CNN_alexnet_bs_4_epochs_100_01_03_17_42.csv',delimiter=',')[2,:],
"AlexNet_FedAvg": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_07_50.csv',delimiter=',')[2,:],
"AlexNet_FedProx": np.genfromtxt('FedProx_runs/FedProx_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_09_18.csv',delimiter=',')[2,:],
#"LeNet_Centralised": np.genfromtxt('centralised_runs/Centralised_CNN_lenet_bs_4_epochs_100_01_03_16_11.csv',delimiter=',')[2,:],
"LeNet_FedAvg": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_18_03.csv',delimiter=',')[2,:],
"LeNet_FedProx": np.genfromtxt('FedProx_runs/FedProx_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_03_39.csv',delimiter=',')[2,:],
#"ResNet34_Centralised": np.genfromtxt('centralised_runs/Centralised_CNN_resnet34_bs_4_epochs_100_01_03_17_06.csv',delimiter=',')[2,:],
"ResNet34_FedAvg": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_23_38.csv',delimiter=',')[2,:],
"ResNet34_FedProx": np.genfromtxt('FedProx_runs/FedProx_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_07_17.csv',delimiter=',')[2,:],
}
restart_colors()
plot_curves(results, ax, ' ', 0, 1, "F1 Score",True,22,3,20)
# +
restart_colors()
lenet_cfrac_results = {"0.5": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_18_03.csv',delimiter=',')[2,:],
"0.75": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_19_36.csv',delimiter=',')[2,:],
"1": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_1.0_bs_4_22_02_21_35.csv',delimiter=',')[2,:]}
resnet_cfrac_results = {"0.5": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_22_02_23_38.csv',delimiter=',')[2,:],
"0.75": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_23_02_02_40.csv',delimiter=',')[2,:],
"1": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_1.0_bs_4_23_02_06_39.csv',delimiter=',')[2,:]}
alexnet_cfrac_results = {"0.5": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_07_50.csv',delimiter=',')[2,:],
"0.75": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_23_02_09_37.csv',delimiter=',')[2,:],
"1": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_1.0_bs_4_23_02_12_02.csv',delimiter=',')[2,:]}
fig, ax = plt.subplots(3, 1, figsize=(12,16))
#fig.suptitle("FedAvg with 8 clients and 40% skewness with varying C_fraction", fontsize=24)
plot_curves(lenet_cfrac_results,ax[1],'LeNet',0.0,1.0,"F1 Score", axis_size = 18)
plot_curves(resnet_cfrac_results,ax[2],'ResNet',0.0,1.0, "F1 Score", axis_size = 18)
plot_curves(alexnet_cfrac_results,ax[0],'AlexNet',0.0,1.0, "F1 Score", axis_size = 18)
# +
restart_colors()
lenet_clients_results = {"10 clients": np.genfromtxt('nclients_results/FedAvg_CNN_lenet_clients_10_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_15_38.csv',delimiter=',')[2,:],
"25 clients": np.genfromtxt('nclients_results/FedAvg_CNN_lenet_clients_25_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_16_36.csv',delimiter=',')[2,:],
"50 clients": np.genfromtxt('nclients_results/FedAvg_CNN_lenet_clients_50_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_17_35.csv',delimiter=',')[2,:],}
resnet_clients_results = {"10 clients": np.genfromtxt('nclients_results/FedAvg_CNN_resnet34_clients_10_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_19_39.csv',delimiter=',')[2,:],
"25 clients": np.genfromtxt('nclients_results/FedAvg_CNN_resnet34_clients_25_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_21_42.csv',delimiter=',')[2,:],
"50 clients": np.genfromtxt('nclients_results/FedAvg_CNN_resnet34_clients_50_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_23_02_23_48.csv',delimiter=',')[2,:],}
alexnet_clients_results = {"10 clients": np.genfromtxt('nclients_results/FedAvg_CNN_alexnet_clients_10_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_24_02_01_00.csv',delimiter=',')[2,:],
"25 clients": np.genfromtxt('nclients_results/FedAvg_CNN_alexnet_clients_25_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.5_bs_4_24_02_02_10.csv',delimiter=',')[2,:],
}
fig, ax = plt.subplots(3, 1, figsize=(12,16))
#fig.suptitle("FedAvg with 8 clients and 40% skewness with varying number of clients", fontsize=24)
plot_curves(lenet_clients_results,ax[1],'LeNet',0.0,1.0,"F1 Score", axis_size = 18)
plot_curves(resnet_clients_results,ax[2],'ResNet',0.0,1.0, "F1 Score", axis_size = 18)
plot_curves(alexnet_clients_results,ax[0],'AlexNet',0.0,1.0, "F1 Score", axis_size = 18)
# +
restart_colors()
batch_size_comparision = {"1":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_1_26_02_01_27.csv',delimiter=',')[2,:],
"4":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_26_02_15_25.csv',delimiter=',')[2,:],
"8":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_8_25_02_23_02.csv',delimiter=',')[2,:],
"16":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_16_25_02_21_49.csv',delimiter=',')[2,:],
"32":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_32_25_02_20_41.csv',delimiter=',')[2,:],
"64":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_64_25_02_19_24.csv',delimiter=',')[2,:],
"128":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_128_26_02_12_42.csv',delimiter=',')[2,:],
"256":np.genfromtxt('batchsize_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_256_26_02_13_50.csv',delimiter=',')[2,:],}
fig, ax = plt.subplots(figsize=(15,10))
#fig.suptitle("FedAvg with 4 clients and 40% skewness with varying batch size", fontsize=24)
plot_curves(batch_size_comparision,ax,'LeNet',0.0,1.0,"F1 Score", axis_size = 18)
# +
x = np.arange(8) # the label locations
width = 0.5
#plt.figure(figsize=(10, 8))
fig, ax = plt.subplots(figsize=(12,8))
rects1 = ax.bar(x, [150,100,75,56,57,71,74,78], width, label='FedAvg for LeNet',color=next_color(cm.get_cmap('gist_rainbow'), 3))
ax.set_ylabel('Runtime (in mins)',fontsize=14)
#ax.set_title('Run Time for Different Batch Sizes',fontsize=20)
ax.set_xlabel('Batch Size',fontsize=14)
ax.set_xticks(x)
ax.set_axisbelow(True)
ax.grid(axis='y',color='red', linestyle='dashed')
ax.set_xticklabels(["1","4","8","16","32","64","128","256"],fontsize=14)
ax.legend()
_=plt.show()
# +
restart_colors()
lenet_skewness_results = {"0 % (IID)": np.genfromtxt('large_skewness_results/FedAvg_CNN_lenet_clients_4_skew_0_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_26_02_22_39.csv',delimiter=',')[2,:],
"20 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_lenet_clients_4_skew_20_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_25_02_17_13.csv',delimiter=',')[2,:],
"40 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_lenet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_23_02_20_11.csv',delimiter=',')[2,:],
#"LENET_60": np.genfromtxt('skewness_runs/FedAvg_CNN_lenet_clients_4_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_28_02_03_07.csv',delimiter=',')[2,:],
#"LENET_80": np.genfromtxt('skewness_runs/FedAvg_CNN_lenet_clients_4_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_28_02_04_31.csv',delimiter=',')[2,:],
}
resnet_skewness_results = {"0 % (IID)": np.genfromtxt('large_skewness_results/FedAvg_CNN_resnet34_clients_4_skew_0_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_26_02_21_14.csv',delimiter=',')[2,:],
"20 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_resnet34_clients_4_skew_20_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_26_02_02_32.csv',delimiter=',')[2,:],
"40 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_resnet34_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_04_45.csv',delimiter=',')[2,:],
#"RESNET_60": np.genfromtxt('skewness_runs/FedAvg_CNN_resnet34_clients_4_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_27_02_22_41.csv',delimiter=',')[2,:],
#"RESNET_80": np.genfromtxt('skewness_runs/FedAvg_CNN_resnet34_clients_4_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_28_02_01_42.csv',delimiter=',')[2,:],
}
alexnet_skewness_results = {"0 % (IID)": np.genfromtxt('large_skewness_results/FedAvg_CNN_alexnet_clients_4_skew_0_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_27_02_00_25.csv',delimiter=',')[2,:],
"20 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_alexnet_clients_4_skew_20_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_27_02_02_12.csv',delimiter=',')[2,:],
"40 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_alexnet_clients_4_skew_40_smallskew_False_epochs_100_cepochs_5_cfrac_0.75_bs_4_24_02_17_52.csv',delimiter=',')[2,:],
#"ALEXNET_60": np.genfromtxt('skewness_runs/FedAvg_CNN_alexnet_clients_4_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_28_02_06_17.csv',delimiter=',')[2,:],
#"ALEXNET_80": np.genfromtxt('skewness_runs/FedAvg_CNN_alexnet_clients_4_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_28_02_08_04.csv',delimiter=',')[2,:],
}
fig, ax = plt.subplots(3, 1, figsize=(12,16))
#fig.suptitle("FedAvg with 4 clients with varying amount of data skew % on common label", fontsize=24)
plot_curves(lenet_skewness_results,ax[1],'LeNet',0,1,"F1 Score", axis_size = 18)
plot_curves(resnet_skewness_results,ax[2],'ResNet',0,1, "F1 Score", axis_size = 18)
plot_curves(alexnet_skewness_results,ax[0],'AlexNet',0,1, "F1 Score", axis_size = 18)
# +
restart_colors()
lenet_skewness_results = {
"40 %": np.genfromtxt('cfrac_results/FedAvg_CNN_lenet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_22_02_19_36.csv',delimiter=',')[2,:],
"60 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_lenet_clients_8_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_05_13.csv',delimiter=',')[2,:],
"80 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_lenet_clients_8_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_06_39.csv',delimiter=',')[2,:],
}
resnet_skewness_results = {
"40 %": np.genfromtxt('cfrac_results/FedAvg_CNN_resnet34_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_23_02_02_40.csv',delimiter=',')[2,:],
"60 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_resnet34_clients_8_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_00_39.csv',delimiter=',')[2,:],
"80 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_resnet34_clients_8_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_03_46.csv',delimiter=',')[2,:],
}
alexnet_skewness_results = {
"40 %": np.genfromtxt('cfrac_results/FedAvg_CNN_alexnet_clients_8_skew_40_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_23_02_09_37.csv',delimiter=',')[2,:],
"60 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_alexnet_clients_8_skew_60_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_08_28.csv',delimiter=',')[2,:],
"80 %": np.genfromtxt('large_skewness_results/FedAvg_CNN_alexnet_clients_8_skew_80_smallskew_True_epochs_100_cepochs_5_cfrac_0.75_bs_4_02_03_10_18.csv',delimiter=',')[2,:],
}
fig, ax = plt.subplots(3, 1, figsize=(12,16))
#fig.suptitle("FedAvg with 8 clients with varying high amount of data skew % on less common label", fontsize=24)
plot_curves(lenet_skewness_results,ax[1],'LeNet',0,1,"F1 Score", axis_size = 18)
plot_curves(resnet_skewness_results,ax[2],'ResNet',0,1, "F1 Score", axis_size = 18)
plot_curves(alexnet_skewness_results,ax[0],'AlexNet',0,1, "F1 Score", axis_size = 18)
# +
cen = np.array([0.902,0.9475,0.938])*100
fl1 = np.array([0.824,0.924,0.911])*100
fl2 = np.array([0.809,0.9217,0.887])*100
fl3 = np.array([0.804,0.903,0.891])*100
labels = ['Alexnet', 'LeNet','ResNet34' ]
bar_plot_acc(labels, cen, fl1, fl2, fl3)
# -
| 26,246 |
/Preprocess/rating.ipynb | 037b5f2f1996980d2d506f94cd9d20e2816033ac | [] | no_license | parkchanghyup/2020bigcontest | https://github.com/parkchanghyup/2020bigcontest | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 30,656 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ํ๊ฐ ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ
# ---
ํ๊ฐ๋ฐ์ดํฐ = pd.read_excel ('ํ๊ฐ๋ฐ์ดํฐ.xlsx')
ํ๊ฐ๋ฐ์ดํฐ.head()
nan = list(np.where(ํ๊ฐ๋ฐ์ดํฐ['๋
ธ์ถ(๋ถ)'].isna())[0])
for i in nan:
ํ๊ฐ๋ฐ์ดํฐ.iloc[i,1] = int(ํ๊ฐ๋ฐ์ดํฐ.iloc[i-1,1])
ํ๊ฐ๋ฐ์ดํฐ.head()
ํ๊ฐ๋ฐ์ดํฐ = ํ๊ฐ๋ฐ์ดํฐ[ํ๊ฐ๋ฐ์ดํฐ['์ํ๊ตฐ']!='๋ฌดํ']
ํ๊ฐ๋ฐ์ดํฐ.reset_index(drop=True,inplace=True)
# ### ํ๊ฐ ๋ฐ์ดํฐ์ ์๋ก์ด ์ปฌ๋ผ ์ถ๊ฐ
# ---
# +
# ํ๋งค ๋จ๊ฐ ๋ฒ์ํ ์ปฌ๋ผ
def get_str(num):
if num<50000:
return '5๋ง์์ดํ'
elif num < 100000:
return '10๋ง์์ดํ'
elif num <300000:
return '30๋ง์์ดํ'
elif num <500000:
return '50๋ง์์ดํ'
elif num <1000000:
return '100๋ง์์ดํ'
else :
return '100๋ง์์ด์'
ํ๋งค๋จ๊ฐ๋ฒ์ = [ get_str(x) for x in ํ๊ฐ๋ฐ์ดํฐ['ํ๋งค๋จ๊ฐ'] ]
ํ๊ฐ๋ฐ์ดํฐ['ํ๋งค๋จ๊ฐ๋ฒ์'] = ํ๋งค๋จ๊ฐ๋ฒ์
# -
ํ๊ฐ๋ฐ์ดํฐ.head()
def get_time(n):
๋ช์ = str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[11:13]
๋ฌด์จ์์ผ = datetime.date(int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[:4]),int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[5:7]),int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[8:10])).strftime('%A')
time = [๋ช์, ๋ฌด์จ์์ผ]
return time
ํ๊ฐ๋ฐ์ดํฐ.reset_index(drop=True,inplace=True)
ํ๊ฐ๋ฐ์ดํฐ.head()
# +
import datetime
def get_time(n):
๋ช์ = str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[11:13]
๋ฌด์จ์์ผ = datetime.date(int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[:4]),int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[5:7]),int(str(ํ๊ฐ๋ฐ์ดํฐ['๋ฐฉ์ก์ผ์'][n])[8:10])).strftime('%A')
time = [๋ช์, ๋ฌด์จ์์ผ]
return time
๋ช์ =[]
๋ฌด์จ์์ผ = []
for i in range(len(ํ๊ฐ๋ฐ์ดํฐ)):
time = get_time(i)
๋ช์.append(time[0])
๋ฌด์จ์์ผ.append(time[1])
ํ๊ฐ๋ฐ์ดํฐ['๋ช์']= ๋ช์
ํ๊ฐ๋ฐ์ดํฐ['๋ฌด์จ์์ผ']=๋ฌด์จ์์ผ
ํ๊ฐ๋ฐ์ดํฐ.head()
# -
ํ๊ฐ_์ ์ฒ๋ฆฌ = ํ๊ฐ๋ฐ์ดํฐ[['๋
ธ์ถ(๋ถ)','์ํ๊ตฐ','ํ๋งค๋จ๊ฐ๋ฒ์','๋ช์','๋ฌด์จ์์ผ']]
ํ๊ฐ_์ ์ฒ๋ฆฌ.head()
# ### ์ํซ์ธ์ฝ๋ฉ
# +
ํ๊ฐ_์ ์ฒ๋ฆฌ = pd.get_dummies(ํ๊ฐ_์ ์ฒ๋ฆฌ)
ํ๊ฐ_์ ์ฒ๋ฆฌ.info()
# -
# ### ์์ ๊ฐ๋ฐํ ์์ฒญ๋ฅ ์์ธก ํ๊ท ๋ชจ๋ธ๋ก ์์ฒญ๋ฅ ์์ธก
# ---
์์ฒญ๋ฅ ํ๊ท = list(ridge_reg.predict(np.array(ํ๊ฐ_์ ์ฒ๋ฆฌ )))
ํ๊ฐ๋ฐ์ดํฐ['์์ฒญ๋ฅ ํ๊ท ']=์์ฒญ๋ฅ ํ๊ท
ํ๊ฐ๋ฐ์ดํฐ.to_csv('ํ๊ฐ.csv',index=False)
ํ๊ฐ๋ฐ์ดํฐ = pd.read_csv('ํ๊ฐ.csv')
| 1,994 |
/03_classification.ipynb | 55076439ba94fe886ef27dbead706f8a184a4412 | [
"Apache-2.0"
] | permissive | wangruinju/handson-ml | https://github.com/wangruinju/handson-ml | 0 | 1 | null | 2017-08-25T20:27:10 | 2017-08-25T18:00:49 | null | Jupyter Notebook | false | false | .py | 351,808 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# **Chapter 3 โ Classification**
#
# _This notebook contains all the sample code and solutions to the exercices in chapter 3._
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "classification"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# -
# # MNIST
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
mnist
X, y = mnist["data"], mnist["target"]
X.shape
y.shape
28*28
# +
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
some_digit = X[36000]
some_digit_image = some_digit.reshape(28, 28)
plt.imshow(some_digit_image, cmap = matplotlib.cm.binary,
interpolation="nearest")
plt.axis("off")
save_fig("some_digit_plot")
plt.show()
# -
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary,
interpolation="nearest")
plt.axis("off")
# EXTRA
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = matplotlib.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]]
plot_digits(example_images, images_per_row=10)
save_fig("more_digits_plot")
plt.show()
y[36000]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# +
import numpy as np
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# -
# # Binary classifier
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
# +
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5)
# -
sgd_clf.predict([some_digit])
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_5[train_index])
X_test_fold = X_train[test_index]
y_test_fold = (y_train_5[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred))
# -
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy")
# +
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
# +
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_pred)
# -
y_train_perfect_predictions = y_train_5
confusion_matrix(y_train_5, y_train_perfect_predictions)
# +
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_5, y_train_pred)
# -
4344 / (4344 + 1307)
recall_score(y_train_5, y_train_pred)
4344 / (4344 + 1077)
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
4344 / (4344 + (1077 + 1307)/2)
y_scores = sgd_clf.decision_function([some_digit])
y_scores
threshold = 0
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
threshold = 200000
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,
method="decision_function")
y_scores = y_scores[:,1]
# +
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
# +
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="upper left", fontsize=16)
plt.ylim([0, 1])
plt.figure(figsize=(8, 4))
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.xlim([-700000, 700000])
save_fig("precision_recall_vs_threshold_plot")
plt.show()
# -
(y_train_pred == (y_scores > 0)).all()
y_train_pred_90 = (y_scores > 70000)
precision_score(y_train_5, y_train_pred_90)
recall_score(y_train_5, y_train_pred_90)
# +
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
save_fig("precision_vs_recall_plot")
plt.show()
# -
# # ROC curves
# +
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
# +
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
save_fig("roc_curve_plot")
plt.show()
# +
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
# -
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,
method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
plt.legend(loc="lower right", fontsize=16)
save_fig("roc_curve_comparison_plot")
plt.show()
roc_auc_score(y_train_5, y_scores_forest)
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
precision_score(y_train_5, y_train_pred_forest)
recall_score(y_train_5, y_train_pred_forest)
# # Multiclass classification
sgd_clf.fit(X_train, y_train)
sgd_clf.predict([some_digit])
some_digit_scores = sgd_clf.decision_function([some_digit])
some_digit_scores
np.argmax(some_digit_scores)
sgd_clf.classes_
sgd_clf.classes_[5]
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))
ovo_clf.fit(X_train, y_train)
ovo_clf.predict([some_digit])
len(ovo_clf.estimators_)
forest_clf.fit(X_train, y_train)
forest_clf.predict([some_digit])
forest_clf.predict_proba([some_digit])
cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy")
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
plt.matshow(conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_plot", tight_layout=False)
plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_errors_plot", tight_layout=False)
plt.show()
# +
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)
save_fig("error_analysis_digits_plot")
plt.show()
# -
# # Multilabel classification
# +
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
# -
knn_clf.predict([some_digit])
y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3)
f1_score(y_multilabel, y_train_knn_pred, average="macro")
# # Multioutput classification
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
some_index = 5500
plt.subplot(121); plot_digit(X_test_mod[some_index])
plt.subplot(122); plot_digit(y_test_mod[some_index])
save_fig("noisy_digit_example_plot")
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit)
save_fig("cleaned_digit_example_plot")
# # Extra material
# ## Dummy (ie. random) classifier
from sklearn.dummy import DummyClassifier
dmy_clf = DummyClassifier()
y_probas_dmy = cross_val_predict(dmy_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_dmy = y_probas_dmy[:, 1]
fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dmy)
plot_roc_curve(fprr, tprr)
# ## KNN classifier
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_jobs=-1, weights='distance', n_neighbors=4)
knn_clf.fit(X_train, y_train)
y_knn_pred = knn_clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_knn_pred)
# +
from scipy.ndimage.interpolation import shift
def shift_digit(digit_array, dx, dy, new=0):
return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784)
plot_digit(shift_digit(some_digit, 5, 1, new=100))
# +
X_train_expanded = [X_train]
y_train_expanded = [y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
shifted_images = np.apply_along_axis(shift_digit, axis=1, arr=X_train, dx=dx, dy=dy)
X_train_expanded.append(shifted_images)
y_train_expanded.append(y_train)
X_train_expanded = np.concatenate(X_train_expanded)
y_train_expanded = np.concatenate(y_train_expanded)
X_train_expanded.shape, y_train_expanded.shape
# -
knn_clf.fit(X_train_expanded, y_train_expanded)
y_knn_expanded_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_knn_expanded_pred)
ambiguous_digit = X_test[2589]
knn_clf.predict_proba([ambiguous_digit])
plot_digit(ambiguous_digit)
# # Exercise solutions
# **Coming soon**
| 12,595 |
/optional/ml_foundation/04 Training and Testing Data.ipynb | 093b5d8216e6a513eacf7780363bb95c8b4bf329 | [] | no_license | ifishlin/sprintdeeplearning | https://github.com/ifishlin/sprintdeeplearning | 2 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 9,050 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [tensorflow]
# language: python
# name: Python [tensorflow]
# ---
# %load_ext watermark
# %watermark -d -u -a 'Andreas Mueller, Kyle Kastner, Sebastian Raschka' -v -p numpy,scipy,matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# # SciPy 2016 Scikit-learn Tutorial
# Training and Testing Data
# =====================================
#
# To evaluate how well our supervised models generalize, we can split our data into a training and a test set:
#
# <img src="figures/train_test_split_matrix.svg" width="100%">
# +
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
iris = load_iris()
X, y = iris.data, iris.target
classifier = KNeighborsClassifier()
# -
# Thinking about how machine learning is normally performed, the idea of a train/test split makes sense. Real world systems train on the data they have, and as other data comes in (from customers, sensors, or other sources) the classifier that was trained must predict on fundamentally *new* data. We can simulate this during training using a train/test split - the test data is a simulation of "future data" which will come into the system during production.
#
# Specifically for iris, the 150 labels in iris are sorted, which means that if we split the data using a proportional split, this will result in fudamentally altered class distributions. For instance, if we'd perform a common 2/3 training data and 1/3 test data split, our training dataset will only consists of flower classes 0 and 1 (Setosa and Versicolor), and our test set will only contain samples with class label 2 (Virginica flowers).
#
# Under the assumption that all samples are independent of each other (in contrast time series data), we want to **randomly shuffle the dataset before we split the dataset** as illustrated above.
y
# Now we need to split the data into training and testing. Luckily, this is a common pattern in machine learning and scikit-learn has a pre-built function to split data into training and testing sets for you. Here, we use 50% of the data as training, and 50% testing. 80% and 20% is another common split, but there are no hard and fast rules. The most important thing is to fairly evaluate your system on data it *has not* seen during training!
# +
from sklearn.cross_validation import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
random_state=123)
print("Labels for training and testing data")
print(train_y)
print(test_y)
# -
# ---
# **Tip: Stratified Split**
#
# Especially for relatively small datasets, it's better to stratify the split. Stratification means that we maintain the original class proportion of the dataset in the test and training sets. For example, after we randomly split the dataset as shown in the previous code example, we have the following class proportions in percent:
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
# So, in order to stratify the split, we can pass the label array as an additional option to the `train_test_split` function:
# +
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
random_state=123,
stratify=y)
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
# -
# ---
# By evaluating our classifier performance on data that has been seen during training, we could get false confidence in the predictive power of our model. In the worst case, it may simply memorize the training samples but completely fails classifying new, similar samples -- we really don't want to put such a system into production!
#
# Instead of using the same dataset for training and testing (this is called "resubstitution evaluation"), it is much much better to use a train/test split in order to estimate how well your trained model is doing on new data.
# +
classifier.fit(train_X, train_y)
pred_y = classifier.predict(test_X)
print("Fraction Correct [Accuracy]:")
print(np.sum(pred_y == test_y) / float(len(test_y)))
# -
# We can also visualize the correct and failed predictions
# +
print('Samples correctly classified:')
correct_idx = np.where(pred_y == test_y)[0]
print(correct_idx)
print('\nSamples incorrectly classified:')
incorrect_idx = np.where(pred_y != test_y)[0]
print(incorrect_idx)
# -
print('Predicted label of sample 23 ->', pred_y[23])
print('True label of sample 23 ->', test_y[23])
# +
# Plot two dimensions
colors = ["darkblue", "darkgreen", "gray"]
for n, color in enumerate(colors):
idx = np.where(test_y == n)[0]
plt.scatter(test_X[idx, 0], test_X[idx, 1], color=color, label="Class %s" % str(n))
plt.scatter(test_X[incorrect_idx, 0], test_X[incorrect_idx, 1], color="darkred")
# Make xlim larger to accommodate legend
plt.xlim(3, 9)
plt.legend(loc=3)
plt.title("Iris Classification results")
plt.show()
# -
# We can see that the errors occur in the area where green (class 1) and gray (class 2) overlap. This gives us insight about what features to add - any feature which helps separate class 1 and class 2 should improve classifier performance.
| 5,814 |
/ud120-projects-master/svm/Mini-Project2.ipynb | 59dba00d6f4365c527d2f12cc327793f6da4fd91 | [] | no_license | alitabet/Udacity-Introduction-to-Machine-Learning | https://github.com/alitabet/Udacity-Introduction-to-Machine-Learning | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 11,126 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Mini-Project 2
# %run svm_author_id.py
# %run svm_author_id.py
# +
# # %load ../tools/email_preprocess
# #!/usr/bin/python
import pickle
import cPickle
import numpy
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
authors_file_handler = open(authors_file, "r")
authors = pickle.load(authors_file_handler)
authors_file_handler.close()
words_file_handler = open(words_file, "r")
word_data = cPickle.load(words_file_handler)
words_file_handler.close()
### test_size is the percentage of events assigned to the test set (remainder go into training)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print "no. of Chris training emails:", sum(labels_train)
print "no. of Sara training emails:", len(labels_train)-sum(labels_train)
return features_train_transformed, features_test_transformed, labels_train, labels_test
# +
# # %load svm_author_id.py
# #!/usr/bin/python
"""
this is the code to accompany the Lesson 2 (SVM) mini-project
use an SVM to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
#sys.path.append("../tools/")
#from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
print len(labels_train)
print len(labels_test)
print labels_train[0:10]
print labels_test[0:10]
# +
# # %load svm_author_id.py
# #!/usr/bin/python
"""
this is the code to accompany the Lesson 2 (SVM) mini-project
use an SVM to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
features_train = features_train[:len(features_train)/100]
labels_train = labels_train[:len(labels_train)/100]
#########################################################
### your code goes here ###
from sklearn import svm
clf = svm.SVC(kernel="rbf")
to = time()
clf.fit(features_train, labels_train)
print "training time: ",round(time()-to,3),"s"
t1 = time()
pred = clf.predict(features_test)
print "testing time: ",round(time()-t1,3),"s"
from sklearn.metrics import accuracy_score
print accuracy_score(pred,labels_test)
# +
# # %load svm_author_id.py
# #!/usr/bin/python
"""
this is the code to accompany the Lesson 2 (SVM) mini-project
use an SVM to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
print len(labels_train)
print len(labels_test)
#########################################################
### your code goes here ###
from sklearn import svm
C_list = [10000.0]#[10.0,100.0,1000.0,10000.0]
for C_val in C_list:
print "C = ",C_val
clf = svm.SVC(C=C_val,kernel="rbf")
to = time()
clf.fit(features_train, labels_train)
print "training time: ",round(time()-to,3),"s"
t1 = time()
pred = clf.predict(features_test)
print "testing time: ",round(time()-t1,3),"s"
from sklearn.metrics import accuracy_score
print accuracy_score(pred,labels_test)
#########################################################
# -
print pred[10]," ",pred[9]
print pred[26]," ",pred[25]
print pred[50]," ",pred[49]
#list(pred)
print len(pred)
print "Sarah: ",list(pred).count(0)
print "Chris: ",list(pred).count(1)
print len(pred)
print "Sarah: ",list(pred).count(0)
print "Chris: ",list(pred).count(1)
| 6,612 |
/rsna/colab_RSNA_pneumonia_detecion.ipynb | 1213f7e683ee63ed7686ce02d2388c184f217a14 | [] | no_license | samik-saha/kaggle | https://github.com/samik-saha/kaggle | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 274,853 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# load dataframes
properties = pd.read_csv('./properties_2016.csv')
train = pd.read_csv('./train_2016.csv')
# throw away all properties that aren't listed as sold in train_2016.csv
joined = train.merge(properties,on='parcelid', how='left')
joined.to_csv('./sold_properties_2016.csv')
# new df containing only sold properties
soldprop_df = pd.read_csv('./sold_properties_2016.csv', index_col=0)
# looking at the data
soldprop_df.head()
# defining a helper function to see the NaN ratio in our df
sold_count = soldprop_df.shape[0]
def nan_ratio(rec):
nan_count = sum(pd.isnull(rec))
return (nan_count/sold_count)*100
# +
# soldprop_df.apply(nan_ratio).sort_values().plot(kind='bar', figsize=(17, 5))
# -
soldprop_df.apply(nan_ratio)
# we want to keep the rows with 50% or less are NaNs
nan_df = soldprop_df.dropna(thresh=len(soldprop_df) - soldprop_df.shape[0]/2, axis=1)
nan_df.apply(nan_ratio).sort_values()
# make a preliminary correlation map
corrmat = nan_df.corr()
f, ax = plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax = 0.8, square=True)
# ### Fill NaN values with the mean of that column
# features with < 6% of NaNs
col_mean = ['parcelid', 'logerror', 'transactiondate','propertylandusetypeid', 'longitude', 'latitude', 'roomcnt', 'fips',
'rawcensustractandblock', 'assessmentyear', 'bedroomcnt', 'bathroomcnt', 'regionidcounty','taxvaluedollarcnt',
'landtaxvaluedollarcnt', 'taxamount', 'regionidzip',
'structuretaxvaluedollarcnt', 'censustractandblock', 'calculatedfinishedsquarefeet', 'yearbuilt',
'fullbathcnt', 'calculatedbathnbr', 'regionidcity', 'finishedsquarefeet12', 'lotsizesquarefeet', 'unitcnt']
# creating new df with those features
full_df = nan_df[col_mean]
full_df
# filling the missing values with the mean of the column
full_df = full_df.fillna(full_df.mean())
full_df.isnull().sum()
full_df.to_csv('clean_data_zillow.csv')
new = pd.read_csv('./clean_data_zillow.csv',index_col=0)
new
| 2,390 |
/6.3/Activities/10-Bank_Heatmap/Scripts/Untitled.ipynb | d3ef6d8715c04366a61699b30bad2b932eee9227 | [] | no_license | MariaSorensen/APIs | https://github.com/MariaSorensen/APIs | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 569 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python (pandas_env)
# language: python
# name: pandas_env
# ---
Natalie Olowokere
# ## Matric no.: 19100111227
# ### School Email: [email protected]
#
# # Exercise 1
# ## _Write a Python program to get the difference between a given number and 17, if the number is greater than 17, return double the absolute difference._
#
# ![image.png](attachment:image.png)
#
#
#
# +
def subtraction(b):
if b>17:
print(abs(17 - b)*2)
else:
print(17 - b)
subtraction(56)
subtraction(12)
# -
# # Exercise 2
# ## _Write a Python Program to calculate the sum of three given numbers, if the values are equal then return thrice their sum._
#
# ![image.png](attachment:image.png)
a = input("Enter desired num 1:")
b = input("Enter desired num 2:")
c = input("Enter desired num 3:")
addd = int(a) + int(b) + int(c)
if a==b==c:
print(addd*3)
else:
print (addd)
# # Exercise 3
# ## _Write a Python Program which return true if two given integer values are equal or their sum or difference is 5._
#
# ![image.png](attachment:image.png)
a = input("Enter desired num 1:")
b = input("Enter desired num 2:")
if a==b:
print (True)
elif a + b == 5:
print (true)
elif a - b == 5:
print (true)
else:
print(false)
# # Exercise 4
# ## _Write a Python Program to sort three integers without using conditional statements and loops._
#
# ![image.png](attachment:image.png)
x = [2,1,5]
print(min(x))
print(max(x))
Middle = sum(x) - max(x) - min(x)
print(Middle)
# # Exercise 5
# ## _Write a Python Program that takes a positive integer and returns the sum of the cube of all the positive integers smaller than the specified number._
#
# ![image.png](attachment:image.png)
sum = 0
for p in range(0,7):
sum = sum + p^3
print (sum)
| 2,000 |
/gratbot_client/notebooks/dev_nncalib_gyrus.ipynb | badaa495ec0f4282737403d4a4ccad160f7e597a | [] | no_license | grybka/gratbot | https://github.com/grybka/gratbot | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 460,954 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import yaml
import sys, os
#path2add = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'dir1')))
#print(os.getcwd())
parent_path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), os.path.pardir)))
#print(path2add)
sys.path.append(parent_path)
sys.path.append(os.path.normpath(os.path.join(parent_path,"gyrii")))
sys.path.append(os.path.normpath(os.path.join(parent_path,"gyrii","underpinnings")))
from Gyrus import Gyrus
def dict_array_upend(mydict,key,elem):
if key not in mydict:
mydict[key]=[]
mydict[key.append(elem)]
def load_sensor_log_file(fname):
response=[]
first_timestamp=0
f=open(fname,'r')
for line in f.readlines():
dat=json.loads(line)
timestamp=dat["timestamp"]-first_timestamp
response.append(dat)
if first_timestamp==0:
first_timestamp=timestamp
return response,first_timestamp
#sensor_log1,first_timestamp1=load_sensor_log_file("../to_study/initial_chase_calib.txt")
#sensor_log2,first_timestamp2=load_sensor_log_file("../to_study/random_move_test.txt")
sensor_log,first_timestamp=load_sensor_log_file("../to_study/calibration_motion.txt")
#sensor_log=sensor_log1
#sensor_log.extend(sensor_log2)
#sensor_log,first_timestamp=load_sensor_log_file("../to_study/track_stop_sign.txt")
# +
import torch
from torch.utils.data import Dataset,DataLoader,TensorDataset,random_split
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
#Todo: see if I can implement policytrainer as the same as StatePredictor, but with output_state and decision swapped!
#I then need to add the ability to dream
class WeightedLeakyMemoryWithDecision(Dataset):
def __init__(self,max_len,weight_bounds=[0,1]):
self.max_len=max_len
self.weight_bounds=weight_bounds
self.experiences=[] #[ input_state, decision, target_state, weight]
self.min_weight=np.inf
def get_average_weight(self):
thesum=0
for x in self.experiences:
thesum+=x[3]
return thesum/len(self.experiences)
def get_average_inv_weight(self):
thesum=0
for x in self.experiences:
thesum+=1/x[3]
return thesum/len(self.experiences)
def update_weight(self,index,weight):
self.experiences[index][3]=weight
if weight<self.min_weight:
self.min_weight=weight
def choose_by_inv_weight(self):
weights=[ x[3] for x in self.experiences]
return random.choices(self.experiences, weights=weights)[0]
def choose_by_weight(self):
weights=[ 1/x[3] for x in self.experiences]
return random.choices(self.experiences, weights=weights)[0]
def forget(self):
while len(self)>self.max_len:
self.experiences.remove(self.choose_by_weight())
def add_experience(self,experience_input,experience_decision,experience_target,weight):
#your chance of forgetting is propotional to the experience weight
#only record experience if it has a greater weight than the smallest weights
if len(self)<self.max_len or weight>self.min_weight:
if weight<self.min_weight:
self.min_weight=weight
self.experiences.append([experience_input,experience_decision,experience_target,weight])
self.forget()
return True
return False
def get_as_batches(self):
inp=[]
dec=[]
tar=[]
for i in range(len(self)):
a,b,c=self[i]
inp.append(a)
dec.append(b)
tar.append(c)
return torch.stack(inp),torch.stack(dec),torch.stack(tar)
def __getitem__(self,index):
return torch.tensor(self.experiences[index][0]).float(),torch.tensor(self.experiences[index][1]).float(),torch.tensor(self.experiences[index][2]).float()
def __len__(self):
return len(self.experiences)
class ScatteredLeakyMemory(Dataset):
def __init__(self,max_len,input_unc=[],decision_unc=[],output_unc=[]):
self.max_len=max_len
self.experiences=[] #[ input_state, decision, target_state, nearest_neighbor_dist,nearest_neighbor_triple]
self.input_unc=np.array(input_unc)
self.decision_unc=np.array(decision_unc)
self.final_unc=np.array(output_unc)
def add_experience(self,initial,decision,final,forget=True):
min_dist=np.inf
elem=[initial,decision,final]
#print("elem is {}".format(elem))
for x in self.experiences:
#print("initial {}".format(final))
#print("input_unc {}".format(self.final_unc))
#print("xzero {}".format(x[1]))
#dist=np.sum(((np.array(initial)-np.array(x[0]))/self.input_unc)**2)+np.sum(((np.array(decision)-np.array(x[1]))/self.decision_unc)**2)+np.sum(((np.array(final)-np.array(x[3]))/self.final_unc)**2)
dist=np.sum(((np.array(initial)-np.array(x[0]))/self.input_unc)**2)+np.sum(((np.array(decision)-np.array(x[1]))/self.decision_unc)**2)+np.sum(((np.array(final)-np.array(x[2]))/self.final_unc)**2)
#print("dist {} to {}".format(dist))
#print("dist is {}".format(dist))
if dist<x[3]: #if I'm adding something closer, update that
x[3]=dist
if dist<min_dist:
min_dist=dist
elem=[x[0],x[1],x[2]]
self.experiences.append([ initial,decision,final,min_dist,elem])
if forget:
todel=self.forget()
if todel is not None and todel[0:3]==elem:
return False
return True
def get_as_batches(self):
inp=[]
dec=[]
tar=[]
for i in range(len(self)):
a,b,c=self[i]
inp.append(a)
dec.append(b)
tar.append(c)
return torch.stack(inp),torch.stack(dec),torch.stack(tar)
def forget(self):
if len(self)>self.max_len:
toresort=[]
dists=[ x[3] for x in self.experiences ]
min_ind=dists.index(min(dists))
todel=self.experiences.pop(min_ind)
for x in self.experiences:
if x[4][0]==todel[0] and x[4][1]==todel[1] and x[4][2]==todel[2]:
toresort.append([ x[0],x[1],x[2] ])
self.experiences.remove(x)
for x in toresort:
self.add_experience(x[0],x[1],x[2],forget=False)
return todel
return None
def __getitem__(self,index):
return torch.tensor(self.experiences[index][0]).float(),torch.tensor(self.experiences[index][1]).float(),torch.tensor(self.experiences[index][2]).float()
def __len__(self):
return len(self.experiences)
def weighted_mse_loss(inputs, target, weight):
#print("weight shape {}".format(weight.shape))
#print("input shape {}".format(inputs.shape))
#print("target shape {}".format(target.shape))
return torch.sum( ( (inputs - target)/weight ) ** 2)
class StatePredictorWithPolicy:
def __init__(self,predictor,policy,loss_bounds=[1,100],decision_bounds=[ [-1,1] ],memory_size=64,input_unc=None,decision_unc=None,output_unc=None):
#predictor is a module that given in input state and action state, predicts the output state
self.predictor=predictor
self.policy=policy
self.input_unc=torch.tensor(input_unc)
self.output_unc=torch.tensor(output_unc)
self.decision_unc=torch.tensor(decision_unc)
self.experience_memory=ScatteredLeakyMemory(memory_size,input_unc=input_unc,decision_unc=decision_unc,output_unc=output_unc)
self.loss_bounds=loss_bounds
self.decision_bounds=decision_bounds #array of [min,max]
self.train_batch_size=64
self.train_epochs=64
self.fresh_memories=0
self.target_bounds=[ [-2.0,2.0],[-1.0,1.0] ]
def fantasy_train_policy(self):
#generate a bunch of scenarios from real starting points, but with different targets
loader=DataLoader(self.experience_memory,batch_size=64,shuffle=True)
optimizer = optim.Adam(self.policy.parameters(), lr=0.01)
for epoch in range(self.train_epochs):
for inputs,decisions,targets in loader:
#generate new target here
new_targets=[]
for i in range(targets.shape[0]):
newtarget=[]
for d in self.target_bounds:
newtarget.append(random.uniform(d[0],d[1]))
new_targets.append(torch.tensor(newtarget))
new_targets=torch.stack(new_targets)
#the_input=torch.cat( (inputs,targets),1)
the_input=torch.cat( (inputs,new_targets),1)
policy_out=self.policy(the_input)
predicted_out=self.predictor( torch.cat( (inputs,policy_out),1))
loss=weighted_mse_loss(predicted_out,new_targets,self.output_unc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def loss_to_weight(self,loss):
return 1/np.clip(loss,self.loss_bounds[0],self.loss_bounds[1])
def predict_output(self,input_state,decision):
function_input=[ *input_state,*decision]
predicted_output=self.predictor(torch.unsqueeze(torch.tensor(function_input).float(),0))[0]
return predicted_output.detach().numpy()
def predict_decision(self,input_state,output_state):
function_input=[ *input_state,*output_state]
predicted_output=self.policy(torch.unsqueeze(torch.tensor(function_input).float(),0))[0]
return predicted_output.detach().numpy()
def observe(self,input_state,decision,output_state):
with torch.set_grad_enabled(False):
#loss_function = torch.nn.MSELoss()
motion_function_input=[ *input_state,*decision]
decision_function_input=[ *input_state, *output_state]
predicted_output=self.predictor(torch.unsqueeze(torch.tensor(motion_function_input).float(),0))[0]
predicted_decision=self.policy(torch.unsqueeze(torch.tensor(decision_function_input).float(),0))[0]
output_loss=weighted_mse_loss(predicted_output,torch.tensor(output_state).float(),self.output_unc)
decision_loss=weighted_mse_loss(predicted_decision,torch.tensor(decision).float(),self.decision_unc)
loss=(output_loss+decision_loss).item()
#if self.experience_memory.add_experience(input_state,decision,output_state,self.loss_to_weight(loss)):
if self.experience_memory.add_experience(input_state,decision,output_state):
self.fresh_memories+=1
# def decide_random(self,input_state):
# my_decision=[]
# for d in self.decision_bounds:
# my_decision.append(random.uniform(d[0],d[1]))
# return my_decision
# def decide_by_probing(self,input_state,desired_output_state,pts_per_dim):
# pts=[ input_state ]
# for i in range(len(self.decision_bounds)):
# toadd=np.linspace(self.decision_bounds[i][0],self.decision_bounds[i][1],pts_per_dim)
# newpts=[]
# for i in range(len(toadd)):
# for j in range(len(pts)):
# newpts.append( [*(pts[j]),toadd[i]] )
# pts=newpts
# inputs=torch.tensor(pts).float()
# predictions=self.predictor(inputs)
# loss_function = torch.nn.MSELoss()
# best_inv_loss=0
# best_index=0
# for i in range(predictions.shape[0]):
# loss=loss_function(predictions[i],torch.tensor(desired_output_state).float())
#print("action {} loss {}".format(inputs[i],loss))
# if 1/loss > best_inv_loss:
# best_index=i
# best_inv_loss=1/loss
# return pts[best_index][len(input_state):]
def train(self):
self.train_decision_predictor()
self.train_motion_predictor()
self.fresh_memories=0
#self.reweight_memory()
def reweight_memory(self):
loss_function = torch.nn.MSELoss()
with torch.set_grad_enabled(False):
inputs,decisions,targets=self.experience_memory.get_as_batches()
predictor_out=self.predictor( torch.cat( (inputs,decisions),1) )
policy_out=self.policy( torch.cat( (inputs,targets),1))
for i in range(len(self.experience_memory)):
output_loss=weighted_mse_loss(predictor_out,targets,self.output_unc)
decision_loss=weighted_mse_loss(policy_out,decisions,self.decision_unc)
loss=(output_loss+decision_loss).item()
self.experience_memory.update_weight(i,self.loss_to_weight(loss))
self.fresh_memories=0
def train_decision_predictor(self):
print("training")
loader=DataLoader(self.experience_memory,batch_size=64,shuffle=True)
#loss_function = torch.nn.MSELoss()
optimizer = optim.Adam(self.policy.parameters(), lr=0.01)
for epoch in range(self.train_epochs):
for inputs,decisions,targets in loader:
the_input=torch.cat( (inputs,targets),1)
out=self.policy( the_input )
#loss=loss_function(out,decisions )
loss=weighted_mse_loss(out,decisions,self.decision_unc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def train_motion_predictor(self):
loader=DataLoader(self.experience_memory,batch_size=64,shuffle=True)
#loss_function = torch.nn.MSELoss()
optimizer = optim.Adam(self.predictor.parameters(), lr=0.01)
for epoch in range(self.train_epochs):
for inputs,decisions,targets in loader:
the_input=torch.cat( (inputs,decisions),1)
out=self.predictor( the_input )
#loss=loss_function(out,targets )
loss=weighted_mse_loss(out,targets,self.output_unc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#now recalculate everything in memory
#with torch.set_grad_enabled(False):
#inputs,targets=self.experience_memory.get_as_batches()
#out=self.predictor( inputs )
#for i in range(len(self.experience_memory)):
# self.experience_memory.update_weight(i,self.loss_to_weight(loss_function(out[i],targets[i]).item()))
# self.fresh_memories=0
# return loss_function(out,targets).item()/len(self.experience_memory)
# +
from Gyrus import ThreadedGyrus
from NNCalibration import StatePredictor
import torch
import torch.nn as nn
import numpy as np
class VisualMotionCalibGyrus(ThreadedGyrus):
def __init__(self,broker):
#self.motionpredictor=nn.Sequential(nn.Linear(5,5),
#nn.Tanh(),
#nn.Linear(5,2))
self.policy=nn.Sequential(nn.Linear(4,4),
nn.Tanh(),
nn.Linear(4,2),
nn.Tanh())
self.motionpredictor=nn.Sequential(nn.Linear(4,2,bias=False))
#self.policy=nn.Sequential(nn.Linear(4,3,bias=False))
decision_bounds=[ [-0.8,0.8], [-0.8,0.8], [0.1,0.5]]
#self.state_predictor=StatePredictor(self.motionpredictor,decision_bounds=decision_bounds,memory_size=128)
self.state_predictor=StatePredictorWithPolicy(self.motionpredictor,self.policy,decision_bounds=decision_bounds,memory_size=128,input_unc=[0.01,0.1],output_unc=[0.01,0.1],decision_unc=[0.02,0.02])
#constants
self.min_update_interval=0.1 #don't record experiences more than this often
self.objects_to_watch=["sports ball","stop sign","chair"]
self.object_heights={ "stop sign": [0.081,0.005], "sports ball": [0.115,0.01], "chair": [1.0,0.5]}
self.fresh_memories_to_train=16
self.camera_focal_length_pixels=630 #V1 raspicam
self.camera_x_pixels=640
self.camera_y_pixels=480
#scales of things
#self.position_scale=640
#temporary storage
self.last_unique_objects={}
self.last_gyro_z=[0,0] #value, stdev
self.last_motors_active=[1,1] #left, right
self.last_frame_motion_command=[0,0,0] #left,right, duration
self.next_update_time=0
self.last_motor_command={}
#debugging storage
self.all_input_vectors=[]
self.all_target_vectors=[]
self.all_decision_vectors=[]
super().__init__(broker)
def get_keys(self):
return [ "drive/motors_active","position_sensor/gyro", "tagged_objects", "motor_command", "clock_pulse" ]
def get_name(self):
return "VisualMotionCalibGyrus"
def holding_still(self,timestamp): #return true if I think I'm holding still
if abs(self.last_gyro_z[0])> 0.05 or abs(self.last_gyro_z[0])>0.05:
return False
if self.last_motors_active[0]!=0 or self.last_motors_active[1]!=0:
return False
if timestamp<self.next_update_time:
return False
return True
def get_unique_tagged_objects(self,tagged_objects):
ret={}
label_list=[ x["label"] for x in tagged_objects]
for key in self.objects_to_watch:
if label_list.count(key)>1 or label_list.count(key)==0:
continue
elem=next(filter(lambda x: x["label"]==key,tagged_objects))
ret[key]=elem
return ret
def read_message(self,message):
if "clock_pulse" in message:
#don't think about training if not holding still
if not self.holding_still(message["timestamp"]):
return
if self.state_predictor.fresh_memories>self.fresh_memories_to_train:
self.state_predictor.train()
#print("training")
if "motion_request" in message:
m=message["motion_request"]
if m["motion_type"]=="turn":
input_vector=[m["from_angle"]]
output_vector=[m["to_angle"]]
decision=self.state_predictor.decide_by_probing(input_vector,output_vector,10)
self.last_motor_command={"timestamp": time.time(),"motor_command": {"lr_throttle": [decision[0],decision[1]], "duration":decision[2] } }
#broker.publish({"timestamp": time.time(),"motor_command": {"lr_throttle": [decision[0],decision[1]], "duration":decision[2] } },"motor_command")
if "position_sensor/gyro" in message:
gyro_z=message["position_sensor/gyro"][2]
gyro_z_stdev=message["position_sensor/gyro_stdev"][2]
self.last_gyro_z=[gyro_z,gyro_z_stdev]
if "drive/motors_active" in message:
left_motor=message["drive/motors_active"][0]
right_motor=message["drive/motors_active"][1]
self.last_motors_active=[left_motor,right_motor]
if "motor_command" in message:
m=message["motor_command"]
if m["lr_throttle"][0] or m["lr_throttle"][1]!=0:
#self.last_motion_time=message["timestamp"]
self.last_frame_motion_command=[ m["lr_throttle"][0],m["lr_throttle"][1],m["duration"]]
self.next_update_time=message["timestamp"]+m["duration"]
if "tagged_objects" in message:
#only check if holding still
if not self.holding_still(message["timestamp"]):
return
unique_objects=self.get_unique_tagged_objects(message["tagged_objects"])
for key in unique_objects:
if key in self.last_unique_objects:
#exists in both this and last set
obj_height=self.object_heights[key][0]
#get 'center of mass' x f
prev_startx=self.last_unique_objects[key]["startx"]
prev_endx=self.last_unique_objects[key]["endx"]
prev_starty=self.last_unique_objects[key]["starty"]
prev_endy=self.last_unique_objects[key]["endy"]
#prev_x=0.5*(prev_startx+prev_endx)
next_startx=unique_objects[key]["startx"]
next_endx=unique_objects[key]["endx"]
next_starty=unique_objects[key]["starty"]
next_endy=unique_objects[key]["endy"]
#next_x=0.5*(next_startx+next_endx)
startx_cut=5
endx_cut=self.camera_x_pixels-startx_cut
starty_cut=5
endy_cut=self.camera_y_pixels-starty_cut
#I have to have some notion of distance from object
if prev_starty>starty_cut and next_starty>starty_cut and prev_endy<endy_cut and next_endy<endy_cut:
#hpx / focal = height/dist
# dist = focal * (height) / hpx
#TODO far future, I could make the object hight per class a free parameter that is fit. Ha!
#print("height {}".format(obj_height))
prev_objdist=obj_height*self.camera_focal_length_pixels/(prev_endy-prev_starty)
next_objdist=obj_height*self.camera_focal_length_pixels/(next_endy-next_starty)
delta_dist=next_objdist-prev_objdist
startx_good=prev_startx>startx_cut and next_startx>startx_cut
endx_good=prev_endx<endx_cut and next_endx<endx_cut
if startx_good and endx_good:
prev_heading=np.arctan( 0.5*(prev_startx+prev_endx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
next_heading=np.arctan( 0.5*(next_startx+next_endx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
elif startx_good and not endx_good:
prev_heading=np.arctan( (prev_startx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
next_heading=np.arctan( (next_startx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
elif endx_good and not startx_good:
prev_heading=np.arctan( (prev_endx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
next_heading=np.arctan( (next_endx-self.camera_x_pixels/2)/self.camera_focal_length_pixels )
else: #neither are good
continue
delta_heading=next_heading-prev_heading
last_input_vector=[ prev_heading,prev_objdist ]
#decision_vector=self.last_frame_motion_command
decision_vector=[self.last_frame_motion_command[0],self.last_frame_motion_command[1]]
target_vector=[ delta_heading,delta_dist ]
self.state_predictor.observe(last_input_vector,decision_vector,target_vector)
self.all_input_vectors.append(last_input_vector)
self.all_decision_vectors.append(decision_vector)
self.all_target_vectors.append(target_vector)
self.last_unique_objects=unique_objects
self.last_frame_motion_command=[0,0,0]
self.next_update_time=message["timestamp"]+self.min_update_interval
from tqdm import tqdm
import time
vmgyrus=VisualMotionCalibGyrus(None)
for message in tqdm(sensor_log):
vmgyrus.read_message(message)
vmgyrus.read_message({"timestamp": time.time(),"clock_pulse": 0.1})
# -
print(len(vmgyrus.state_predictor.experience_memory))
# +
from matplotlib import pyplot as plt
import numpy as np
all_input_vectors=np.array(vmgyrus.all_input_vectors)
all_decision_vectors=np.array(vmgyrus.all_decision_vectors)
all_target_vectors=np.array(vmgyrus.all_target_vectors)
fig, ax = plt.subplots()
plt.title("decision")
ax.plot( all_decision_vectors,'*')
plt.show()
fig, ax = plt.subplots()
plt.title("input_vectors")
ax.plot( all_input_vectors,'*')
plt.show()
fig, ax = plt.subplots()
plt.title("input_vectors -heading")
ax.plot( all_input_vectors[:,0],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("input_vectors -distance")
ax.plot( all_input_vectors[:,1],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("target_vectors - heading")
ax.plot( all_target_vectors[:,0],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("target_vectors - distance")
ax.plot( all_target_vectors[:,1],'*')
plt.show()
inputs,decisions,targets=vmgyrus.state_predictor.experience_memory.get_as_batches()
motion_out=vmgyrus.motionpredictor( torch.cat( (inputs,decisions),1 ))
decision_out=vmgyrus.policy( torch.cat( (inputs,targets),1))
motion_out=motion_out.detach().numpy()
decision_out=decision_out.detach().numpy()
inputs=inputs.detach().numpy()
decisions=decisions.detach().numpy()
targets=targets.detach().numpy()
fig, ax=plt.subplots()
plt.title("weights in memory")
plt.plot( [x[3] for x in vmgyrus.state_predictor.experience_memory.experiences],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("inputs in memory")
ax.plot(inputs,'*')
plt.show()
fig, ax = plt.subplots()
plt.title("decisions in memory")
ax.plot(decisions,'*')
plt.show()
fig, ax = plt.subplots()
plt.title("targets in memory")
ax.plot(targets,'*')
plt.show()
fig, ax = plt.subplots()
plt.title("Motion Predictor Targets and Predictions")
ax.plot( targets[:],'*')
ax.plot( motion_out[:],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("Decision Predictor Targets and Predictions")
ax.plot( decisions[:],'*')
ax.plot( decision_out[:],'*')
plt.show()
fig, ax = plt.subplots()
plt.title("Err")
ax.plot( targets[:,0]-motion_out[:,0],'*',label="heading")
ax.plot( targets[:,1]-motion_out[:,1],'*',label="distance")
plt.legend()
plt.show()
fig, ax = plt.subplots()
plt.title("Heading Decision Time*Mag vs delta p")
ax.plot( (decisions[:,0]-decisions[:,1]),targets[:,0],'*',label='truth')
ax.plot( (decisions[:,0]-decisions[:,1]),motion_out[:,0],'*',label='predictions')
plt.legend()
plt.show()
fig, ax = plt.subplots()
plt.title("Distance Decision Time*Mag vs delta p")
ax.plot( (decisions[:,0]+decisions[:,1]),targets[:,1],'*',label='truth')
ax.plot( (decisions[:,0]+decisions[:,1]),motion_out[:,1],'*',label='predictions')
plt.legend()
plt.show()
fig, ax = plt.subplots()
plt.title("Decision Prediction")
ax.plot( targets[:,0],decisions[:,0],'*',label='l truth')
ax.plot( targets[:,0],decision_out[:,0],'*',label='l pred')
#ax.plot( targets[:,0],decisions[:,0]-decisions[:,1],'*',label='lminusr truth')
#ax.plot( targets[:,0],decision_out[:,0]-decision_out[:,1],'*',label='lminusr pred')
plt.legend()
plt.show()
fig, ax = plt.subplots()
plt.title("Decision Prediction")
ax.plot( targets[:,0],decisions[:,1],'*',label='r truth')
ax.plot( targets[:,0],decision_out[:,1],'*',label='r pred')
#ax.plot( targets[:,0],decisions[:,0]-decisions[:,1],'*',label='lminusr truth')
#ax.plot( targets[:,0],decision_out[:,0]-decision_out[:,1],'*',label='lminusr pred')
plt.legend()
plt.show()
# +
#let's make a plot of just heading changes and just distance changes presumably
#turns first
turn_inputs=[]
turn_outputs=[]
for t in np.linspace(-0.6,0.6,100):
turn_inputs.append(t)
input_vector=[0,1.0]
decision=[ t,-t]
out=vmgyrus.state_predictor.predict_output(input_vector,decision)
turn_outputs.append(out)
turn_outputs=np.array(turn_outputs)
fig, ax = plt.subplots()
plt.title("Turn Results")
ax.plot( turn_inputs,turn_outputs[:,0],'*',label="dHeading")
ax.plot( turn_inputs,turn_outputs[:,1],'*',label="dDistance")
plt.legend()
plt.show()
# -
# +
#now forward backward
turn_inputs=[]
turn_outputs=[]
for t in np.linspace(-0.6,0.6,100):
turn_inputs.append(t)
input_vector=[0,1.0]
decision=[ t,t]
out=vmgyrus.state_predictor.predict_output(input_vector,decision)
turn_outputs.append(out)
turn_outputs=np.array(turn_outputs)
fig, ax = plt.subplots()
plt.title("Forward Backward Results")
ax.plot( turn_inputs,turn_outputs[:,0],'*',label="dHeading")
ax.plot( turn_inputs,turn_outputs[:,1],'*',label="dDistance")
plt.legend()
plt.show()
# +
#Driving prediction
decisions=[]
turn_inputs=[]
for deltat in np.linspace(-1.0,1.0,100):
input_state=[0,1.0]
output_state=[deltat,0.0]
decision=vmgyrus.state_predictor.predict_decision(input_state,output_state)
decisions.append(decision)
turn_inputs.append(deltat)
decisions=np.array(decisions)
fig, ax = plt.subplots()
plt.title("Turn Decision")
plt.xlabel("desired turn")
ax.plot( turn_inputs,decisions[:,0],'*',label="Left")
ax.plot( turn_inputs,decisions[:,1],'*',label="Right")
plt.legend()
plt.show()
# -
vmgyrus.state_predictor.fantasy_train_policy()
vmgyrus.state_predictor.train()
# +
#Driving prediction
decisions=[]
turn_inputs=[]
for deltat in np.linspace(-1.0,1.0,100):
input_state=[0,1.0]
output_state=[deltat,0.0]
decision=vmgyrus.state_predictor.predict_decision(input_state,output_state)
decisions.append(decision)
turn_inputs.append(deltat)
decisions=np.array(decisions)
fig, ax = plt.subplots()
plt.title("Turn Decision")
plt.xlabel("desired turn")
ax.plot( turn_inputs,decisions[:,0],'*',label="Left")
ax.plot( turn_inputs,decisions[:,1],'*',label="Right")
plt.legend()
plt.show()
# +
#Driving prediction
decisions=[]
turn_inputs=[]
for deltat in np.linspace(-1.0,1.0,100):
input_state=[0,1.0]
output_state=[0,deltat]
decision=vmgyrus.state_predictor.predict_decision(input_state,output_state)
decisions.append(decision)
turn_inputs.append(deltat)
decisions=np.array(decisions)
fig, ax = plt.subplots()
plt.title("FB Decision")
plt.xlabel("desired motor")
ax.plot( turn_inputs,decisions[:,0],'*',label="Left")
ax.plot( turn_inputs,decisions[:,1],'*',label="Right")
plt.legend()
plt.show()
# -
| 31,217 |