hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7f11eee6e5998c68cd1f2746b37244b40244200 | 2,836 | ipynb | Jupyter Notebook | test/1_get_infos.ipynb | gaemapiracicaba/norma_pl_251-21 | 6ed86277b225cff4f467fae5edd3a479cdc222a4 | [
"MIT"
] | null | null | null | test/1_get_infos.ipynb | gaemapiracicaba/norma_pl_251-21 | 6ed86277b225cff4f467fae5edd3a479cdc222a4 | [
"MIT"
] | null | null | null | test/1_get_infos.ipynb | gaemapiracicaba/norma_pl_251-21 | 6ed86277b225cff4f467fae5edd3a479cdc222a4 | [
"MIT"
] | null | null | null | 20.113475 | 103 | 0.513399 | [
[
[
"<br>\n\n# Introdução",
"_____no_output_____"
]
],
[
[
"import os\nimport requests\nimport pandas as pd",
"_____no_output_____"
],
[
"from paths import *",
"_____no_output_____"
]
],
[
[
"<br>\n\n# Função",
"_____no_output_____"
]
],
[
[
"# Lê o arquivo csv com o nome dos municípios\ndf = pd.read_csv( \n os.path.join(input_path, 'tab_pl251.csv'),\n)\n\n# Deleta Coluna\ndf.drop(['municipio_nome'], axis=1, inplace=True)\nprint(list(set(df['unidade'])))\ndf",
"_____no_output_____"
],
[
"# Lê o arquivo csv com o nome dos municípios\ndf_mun = pd.read_csv(\n 'https://raw.githubusercontent.com/michelmetran/sp/main/data/tabs/tab_municipio_nome.csv',\n usecols=['id_municipio', 'municipio_nome']\n)\n\n# Merge\ndf = pd.merge(\n df_mun,\n df,\n how='left',\n left_on='id_municipio',\n right_on='id_municipio'\n)\n\n# Results\ndf.head()",
"_____no_output_____"
],
[
"# Escreve Tabela\ndf.to_csv(\n os.path.join(tabs_path, 'tab_municipio_pl251.csv'),\n index=False,\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f136f8dff7683c40f22c8f69ec96b144ee0999 | 10,920 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Supervised_Learning-checkpoint.ipynb | ZLTM/Study_Machine_Learning | c37d38ca7aae005112d23f82d88a925beef4bd89 | [
"Apache-1.1"
] | null | null | null | .ipynb_checkpoints/Supervised_Learning-checkpoint.ipynb | ZLTM/Study_Machine_Learning | c37d38ca7aae005112d23f82d88a925beef4bd89 | [
"Apache-1.1"
] | null | null | null | .ipynb_checkpoints/Supervised_Learning-checkpoint.ipynb | ZLTM/Study_Machine_Learning | c37d38ca7aae005112d23f82d88a925beef4bd89 | [
"Apache-1.1"
] | null | null | null | 45.5 | 1,328 | 0.592949 | [
[
[
"from sklearn import datasets\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Loading IRIS dataset from scikit-learn object into iris variable.\niris = datasets.load_iris()\n\n# Prints the type/type object of iris\nprint(type(iris))\n# <class 'sklearn.datasets.base.Bunch'>\n\n# prints the dictionary keys of iris data\nprint(iris.keys())\n\n# prints the type/type object of given attributes\nprint(type(iris.data), type(iris.target))\n\n# prints the no of rows and columns in the dataset\nprint(iris.data.shape)\n\n# prints the target set of the data\nprint(iris.target_names)\n\n# Load iris training dataset\nX = iris.data\n\n# Load iris target set\nY = iris.target\n\n# Convert datasets' type into dataframe\ndf = pd.DataFrame(X, columns=iris.feature_names)\n\n# Print the first five tuples of dataframe.\nprint(df.head())",
"<class 'sklearn.utils.Bunch'>\ndict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names'])\n<class 'numpy.ndarray'> <class 'numpy.ndarray'>\n(150, 4)\n['setosa' 'versicolor' 'virginica']\n sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)\n0 5.1 3.5 1.4 0.2\n1 4.9 3.0 1.4 0.2\n2 4.7 3.2 1.3 0.2\n3 4.6 3.1 1.5 0.2\n4 5.0 3.6 1.4 0.2\n"
],
[
"from sklearn import datasets\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Load iris dataset from sklearn\niris = datasets.load_iris()\n\n# Declare an of the KNN classifier class with the value with neighbors.\nknn = KNeighborsClassifier(n_neighbors=6)\n\n# Fit the model with training data and target values\nknn.fit(iris['data'], iris['target'])\n\n# Provide data whose class labels are to be predicted\nX = [\n [5.9, 1.0, 5.1, 1.8],\n [3.4, 2.0, 1.1, 4.8],\n]\n\n# Prints the data provided\nprint(X)\n\n# Store predicted class labels of X\nprediction = knn.predict(X)\n\n# Prints the predicted class labels of X\nprint(prediction)",
"[[5.9, 1.0, 5.1, 1.8], [3.4, 2.0, 1.1, 4.8]]\n[1 1]\n"
],
[
"from sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Load dataset\ndiabetes = datasets.load_diabetes()\n\n\n# Select 1 training feature\ndiabetes_X = diabetes.data[:, np.newaxis, 2]\n\n# split x and y in test and training\ndiabetes_X_train = diabetes_X[:-20]\ndiabetes_X_test = diabetes_X[-20:]\n\ndiabetes_y_train = diabetes.target[:-20]\ndiabetes_y_test = diabetes.target[-20:]\n\n# linear reg object\nregr = linear_model.LinearRegression()\n\n# fits trains x and y train WE NEED TO FIT SO WE CAN PREDICT\nregr.fit(diabetes_X_train, diabetes_y_train)\n\n# Input data\nprint('Input Values')\nprint(diabetes_X_test)\n\n# linear regresion to predict y according to x\ndiabetes_y_pred = regr.predict(diabetes_X_test)\n\n# Predicted Data\nprint(\"Predicted Output Values\")\nprint(diabetes_y_pred)\n\n# Plot outputs\nplt.scatter(diabetes_X_test, diabetes_y_test, color='black')\nplt.plot(diabetes_X_test, diabetes_y_pred, color='red', linewidth=1)\n\nplt.show()",
"Input Values\n[[ 0.07786339]\n [-0.03961813]\n [ 0.01103904]\n [-0.04069594]\n [-0.03422907]\n [ 0.00564998]\n [ 0.08864151]\n [-0.03315126]\n [-0.05686312]\n [-0.03099563]\n [ 0.05522933]\n [-0.06009656]\n [ 0.00133873]\n [-0.02345095]\n [-0.07410811]\n [ 0.01966154]\n [-0.01590626]\n [-0.01590626]\n [ 0.03906215]\n [-0.0730303 ]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7f13ce08fab66f5b202d7d95188b43b48bbb8da | 262,981 | ipynb | Jupyter Notebook | mini_project.ipynb | prathusb/TensorFlow_NNs | 19dd769015b108d3cdd09e5eb08d8f1471b86d88 | [
"MIT"
] | null | null | null | mini_project.ipynb | prathusb/TensorFlow_NNs | 19dd769015b108d3cdd09e5eb08d8f1471b86d88 | [
"MIT"
] | null | null | null | mini_project.ipynb | prathusb/TensorFlow_NNs | 19dd769015b108d3cdd09e5eb08d8f1471b86d88 | [
"MIT"
] | null | null | null | 617.326291 | 246,789 | 0.580133 | [
[
[
"# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"mini_project\"\n\ndef save_fig(fig_id, tight_layout=True):\n path = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id + \".png\")\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format='png', dpi=300)",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom sklearn.datasets import fetch_lfw_pairs\n# import fetch_lfw_people\n\nlfw_pairs_train = fetch_lfw_pairs(subset='train')\nlfw_pairs_test = fetch_lfw_pairs(subset='test')\n#lfw_people_test = fetch_lfw_people(min_faces_per_person=70, resize=0.4)\n#lfw_people.images.shape\nX_train = lfw_pairs_train.data\ny_train = lfw_pairs_train.target\nX_test = lfw_pairs_test.data\ny_test = lfw_pairs_test.target\nX_train = X_train.astype(np.float32).reshape(-1, 5828) / 255.0\nX_test = X_test.astype(np.float32).reshape(-1, 5828) / 255.0\ny_train = y_train.astype(np.int32)\ny_test = y_test.astype(np.int32)\nX_valid, X_train = X_train[:500], X_train[500:]\ny_valid, y_train = y_train[:500], y_train[500:]",
"_____no_output_____"
],
[
"reset_graph()\n\nhe_init = tf.variance_scaling_initializer()\n\ndef dnn(inputs,n_hidden_layers,n_per_h_layer,n_outputs,activation=tf.nn.elu,initializer=he_init,seed=42,learning_rate=0.01):\n with tf.name_scope(\"dnn\"):\n training = tf.placeholder_with_default(False, shape=(), name='training')\n for layer_num in range(n_hidden_layers):\n # Dropout\n #inputs = tf.layers.dropout(inputs, 0.5, training)\n # Dense layer\n inputs = tf.layers.dense(inputs, n_per_h_layer, \n activation=activation, kernel_initializer=initializer, name=\"hidden%d\" % (layer_num+1))\n # Batch Normalization hidden layers\n bn = tf.layers.batch_normalization(inputs,training=training,momentum=0.9,name=\"hidden%d\" % (layer_num+1))\n #outputs = inputs\n outputs = bn\n logits = tf.layers.dense(outputs, n_outputs, kernel_initializer=he_init, name=\"logits\")\n y_prob = tf.nn.softmax(logits, name=\"y_prob\")\n with tf.name_scope(\"loss\"):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n loss = tf.reduce_mean(xentropy, name=\"loss\")\n with tf.name_scope(\"train\"):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(loss, name=\"training_op\")\n loss_summary = tf.summary.scalar('log_loss', loss)\n with tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(logits, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n with tf.name_scope(\"init\"):\n init = tf.global_variables_initializer()\n with tf.name_scope(\"save\"):\n saver = tf.train.Saver()\n return y_prob, logits, loss, training_op, loss_summary, accuracy, init, saver",
"_____no_output_____"
],
[
"# For TensorBoard\nfrom datetime import datetime\n\ndef log_dir(prefix=\"\"):\n now = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n root_logdir = \"tf_logs\"\n if prefix:\n prefix += \"-\"\n name = prefix + \"run-\" + now\n return \"{}/{}/\".format(root_logdir, name)",
"_____no_output_____"
],
[
"# Graph for TensorBoard\n# (not using an enhanced X, using X from before)\nn_inputs = 5828 # lfw\nn_hidden_layers = 3\nn_per_h_layer = 100\nn_outputs = 2\nlogdir = log_dir(\"mini_project\")\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\ny = tf.placeholder(tf.int32, shape=(None), name=\"y\")\n# inputs = X ???\ny_prob, logits, loss, training_op, loss_summary, accuracy, init, saver = dnn(X,n_hidden_layers,n_per_h_layer,n_outputs)\n\nfile_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())",
"_____no_output_____"
],
[
"n_epochs = 1000\nbatch_size = 5\n\nmax_checks_without_progress = 20\nchecks_without_progress = 0\nbest_loss = np.infty\n\ncheckpoint_path = \"/tmp/mini_project.ckpt\"\ncheckpoint_epoch_path = checkpoint_path + \".epoch\"\nfinal_model_path = \"./mini_project\"",
"_____no_output_____"
],
[
"def shuffle_batch(X, y, batch_size):\n rnd_idx = np.random.permutation(len(X))\n n_batches = len(X) // batch_size\n for batch_idx in np.array_split(rnd_idx, n_batches):\n X_batch, y_batch = X[batch_idx], y[batch_idx]\n yield X_batch, y_batch",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n if os.path.isfile(checkpoint_epoch_path):\n # if the checkpoint file exists, restore the model and load the epoch number\n with open(checkpoint_epoch_path, \"rb\") as f:\n start_epoch = int(f.read())\n print(\"Training was interrupted. Continuing at epoch\", start_epoch)\n saver.restore(sess, checkpoint_path)\n else:\n start_epoch = 0\n sess.run(init)\n \n for epoch in range(n_epochs):\n for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n #acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n #acc_val = accuracy.eval(feed_dict={X: X_valid1, y: y_valid1})\n loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid, y: y_valid})\n if loss_val < best_loss:\n save_path = saver.save(sess, \"./mini_project.ckpt\")\n best_loss = loss_val\n checks_without_progress = 0\n else:\n checks_without_progress += 1\n if checks_without_progress > max_checks_without_progress:\n print(\"Early stopping!\")\n break\n save_path = saver.save(sess, \"./mini_project.epoch\")\n print(\"{}\\tValidation loss: {:.6f}\\tBest loss: {:.6f}\\tAccuracy: {:.2f}%\".format(\n epoch, loss_val, best_loss, acc_val * 100))\n\nwith tf.Session() as sess:\n saver.restore(sess, \"./mini_project.ckpt\")\n acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n print(\"Final test accuracy: {:.2f}%\".format(acc_test * 100))\n\n save_path = saver.save(sess, \"./mini_project_final.ckpt\")",
"0\tValidation loss: 1.217947\tBest loss: 1.217947\tAccuracy: 0.00%\n1\tValidation loss: 1.165395\tBest loss: 1.165395\tAccuracy: 0.00%\n2\tValidation loss: 1.150668\tBest loss: 1.150668\tAccuracy: 0.00%\n3\tValidation loss: 1.106409\tBest loss: 1.106409\tAccuracy: 0.00%\n4\tValidation loss: 1.170681\tBest loss: 1.106409\tAccuracy: 0.00%\n5\tValidation loss: 1.091462\tBest loss: 1.091462\tAccuracy: 0.00%\n6\tValidation loss: 1.069508\tBest loss: 1.069508\tAccuracy: 0.00%\n7\tValidation loss: 1.090690\tBest loss: 1.069508\tAccuracy: 0.00%\n8\tValidation loss: 1.331298\tBest loss: 1.069508\tAccuracy: 0.00%\n9\tValidation loss: 0.879847\tBest loss: 0.879847\tAccuracy: 5.60%\n10\tValidation loss: 0.921522\tBest loss: 0.879847\tAccuracy: 5.60%\n11\tValidation loss: 0.872936\tBest loss: 0.872936\tAccuracy: 14.00%\n12\tValidation loss: 1.307646\tBest loss: 0.872936\tAccuracy: 0.00%\n13\tValidation loss: 0.749941\tBest loss: 0.749941\tAccuracy: 54.60%\n14\tValidation loss: 0.823845\tBest loss: 0.749941\tAccuracy: 43.20%\n15\tValidation loss: 0.926627\tBest loss: 0.749941\tAccuracy: 26.20%\n16\tValidation loss: 1.049506\tBest loss: 0.749941\tAccuracy: 16.00%\n17\tValidation loss: 0.850352\tBest loss: 0.749941\tAccuracy: 36.20%\n18\tValidation loss: 0.887080\tBest loss: 0.749941\tAccuracy: 35.40%\n19\tValidation loss: 1.152944\tBest loss: 0.749941\tAccuracy: 11.40%\n20\tValidation loss: 1.423164\tBest loss: 0.749941\tAccuracy: 3.20%\n21\tValidation loss: 1.396204\tBest loss: 0.749941\tAccuracy: 7.40%\n22\tValidation loss: 1.172634\tBest loss: 0.749941\tAccuracy: 17.20%\n23\tValidation loss: 0.705237\tBest loss: 0.705237\tAccuracy: 63.80%\n24\tValidation loss: 1.200282\tBest loss: 0.705237\tAccuracy: 21.60%\n25\tValidation loss: 0.556571\tBest loss: 0.556571\tAccuracy: 77.80%\n26\tValidation loss: 1.426366\tBest loss: 0.556571\tAccuracy: 13.20%\n27\tValidation loss: 1.327885\tBest loss: 0.556571\tAccuracy: 16.00%\n28\tValidation loss: 0.920612\tBest loss: 0.556571\tAccuracy: 47.00%\n29\tValidation loss: 0.807322\tBest loss: 0.556571\tAccuracy: 55.80%\n30\tValidation loss: 0.897187\tBest loss: 0.556571\tAccuracy: 48.40%\n31\tValidation loss: 1.068796\tBest loss: 0.556571\tAccuracy: 36.60%\n32\tValidation loss: 0.958421\tBest loss: 0.556571\tAccuracy: 40.20%\n33\tValidation loss: 0.848137\tBest loss: 0.556571\tAccuracy: 54.00%\n34\tValidation loss: 1.276650\tBest loss: 0.556571\tAccuracy: 25.00%\n35\tValidation loss: 1.305983\tBest loss: 0.556571\tAccuracy: 25.60%\n36\tValidation loss: 0.774679\tBest loss: 0.556571\tAccuracy: 57.40%\n37\tValidation loss: 1.130938\tBest loss: 0.556571\tAccuracy: 37.20%\n38\tValidation loss: 1.451930\tBest loss: 0.556571\tAccuracy: 28.80%\n39\tValidation loss: 1.325116\tBest loss: 0.556571\tAccuracy: 26.60%\n40\tValidation loss: 1.080248\tBest loss: 0.556571\tAccuracy: 48.20%\n41\tValidation loss: 1.720881\tBest loss: 0.556571\tAccuracy: 11.60%\n42\tValidation loss: 1.156627\tBest loss: 0.556571\tAccuracy: 41.20%\n43\tValidation loss: 1.194438\tBest loss: 0.556571\tAccuracy: 43.40%\n44\tValidation loss: 0.950688\tBest loss: 0.556571\tAccuracy: 55.80%\n45\tValidation loss: 1.754581\tBest loss: 0.556571\tAccuracy: 26.00%\nEarly stopping!\nINFO:tensorflow:Restoring parameters from ./mini_project.ckpt\nFinal test accuracy: 66.20%\n"
]
],
[
[
"Tests:\n\n(Chapt 11 conditions: seed 42, elu, learning rate = 0.01, he init, RGB normalization, BN, momentum = 0.9, AdamOpt, 5 layers, 100 neurons per layer, 1000 epochs, batch size 20)\n\nWith Chapt 11 conditions & 2 outputs:\n49.70%\n\nWithout BN:\n49.80%\n\nWithout BN or RGB normalization:\n50.00%\n\nWithout normalization and with Glorot Normal init (instead of He init):\n50.00%\n\nWith He init and learning rate = 0.05:\n50.00%\n\nWith He init, RGB normalization, and learning rate = 0.05:\n54.40%\n\nWith BN again:\n50.00%\n\nWithout BN and with 1140 outputs:\n50.20%\n\nSame as Chapt 11 with GradientDescent instead of AdamOpt and without BN:\n58.50%\n\nWith learning rate = 0.05:\n59.20%\n\nSame as Chapt 11 with GradientDescent and momentum = 0.8:\n58.90%\n\nWith batch size 5:\n64.20%\n\nChapt 11 + GD + batch size 5 + 3 layers instead of 5:\n66.20%\n\n",
"_____no_output_____"
]
],
[
[
"with tf.Session() as sess:\n saver.restore(sess, \"./mini_project_final.ckpt\") # or better, use save_path\n X_new_scaled = X_test[:20]\n Z = logits.eval(feed_dict={X: X_new_scaled})\n y_pred = np.argmax(Z, axis=1)",
"INFO:tensorflow:Restoring parameters from ./mini_project_final.ckpt\n"
],
[
"from tensorflow_graph_in_jupyter import show_graph\n\nshow_graph(tf.get_default_graph())",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f141a6d21e39c27e764ffeafd8ed83f38fb744 | 984 | ipynb | Jupyter Notebook | Untitled.ipynb | BmMn01111110001/bmmn | b2bd2efefaecf6118650ce96a18288eba8d94673 | [
"MIT"
] | null | null | null | Untitled.ipynb | BmMn01111110001/bmmn | b2bd2efefaecf6118650ce96a18288eba8d94673 | [
"MIT"
] | null | null | null | Untitled.ipynb | BmMn01111110001/bmmn | b2bd2efefaecf6118650ce96a18288eba8d94673 | [
"MIT"
] | null | null | null | 19.294118 | 61 | 0.537602 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
],
[
"dataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,:1].values\n\nfrom sklearn.model_selection import train_test_split\nX_train, X",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7f14ace82cf64d511eb4cc8094502cf83ad8248 | 61,193 | ipynb | Jupyter Notebook | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio | 268dba88355689d7134494ecfe6f71fa075a62a6 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio | 268dba88355689d7134494ecfe6f71fa075a62a6 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio | 268dba88355689d7134494ecfe6f71fa075a62a6 | [
"Apache-2.0"
] | null | null | null | 151.843672 | 15,496 | 0.890543 | [
[
[
"# \"Working with NumPy\"\n> \"Looking at Bangor preciptiation data using only NumPy and matplotlib.\"\n\n- toc: false\n- badges: true\n- comments: true\n- author: Antonio Jurlina\n- categories: [learning, python]",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os",
"_____no_output_____"
],
[
"os.chdir('/Users/antoniojurlina/Projects/learning_python/data/')\n\ncsv = \"BangorPrecip.csv\"\nbangorprecip = pd.read_csv(csv, index_col=0)\n\nmonths = bangorprecip.index.to_numpy()\nyears = bangorprecip.columns.to_numpy()\nbangorprecip = bangorprecip.to_numpy()\nprint(bangorprecip.shape)\nbangorprecip",
"(12, 10)\n"
]
],
[
[
"**1.\tWhat was the total cumulative precipitation over the ten years?**",
"_____no_output_____"
]
],
[
[
"total_precip = np.sum(bangorprecip)\nprint(\"Total cumulative precipitation over the ten years was\", total_precip, \"inches.\")",
"Total cumulative precipitation over the ten years was 425.26 inches.\n"
]
],
[
[
"**2.\tWhat was the driest year?**",
"_____no_output_____"
]
],
[
[
"yearly_totals = bangorprecip.sum(0)\nprecip = float(yearly_totals[yearly_totals == yearly_totals.min()])\nyear = int(years[yearly_totals == yearly_totals.min()])\nprint(\"The driest year was\", year, \"with a total of\", precip, \"inches of precipitation.\")\n\n",
"The driest year was 2016 with a total of 34.35 inches of precipitation.\n"
]
],
[
[
"**3.\tWhat are the yearly precipitation means?**",
"_____no_output_____"
]
],
[
[
"averages = bangorprecip.mean(0)\n\n%matplotlib inline\nplt.style.use('ggplot')\n\nplt.bar(years, averages)\nplt.title(\"Average yearly precipitation\")\nplt.ylabel(\"Inches\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**4. What are the monthly min, mean, and max values over the ten years?**",
"_____no_output_____"
]
],
[
[
"mins = bangorprecip.min(1)\nmeans = bangorprecip.mean(1)\nmaxs = bangorprecip.max(1)\n\n%matplotlib inline\nplt.style.use('ggplot')\n\nplt.bar(months, mins, alpha = 0.8)\nplt.bar(months, means, alpha = 0.6)\nplt.bar(months, maxs, alpha = 0.4)\nplt.title(\"Monthly precipitation\")\nplt.ylabel(\"Inches\")\nplt.legend([\"min\", \"mean\", \"max\"])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**5.\tWhat was the smallest monthly precipitation value and in which month and year did this occur?**",
"_____no_output_____"
]
],
[
[
"yearly_mins = bangorprecip.min(0)\nmonthly_mins = bangorprecip.min(1)\n\nyear = int(years[yearly_mins == yearly_mins.min()])\nmonth = int(months[monthly_mins == monthly_mins.min()])\nmin_precip = bangorprecip.min(1).min()\n\nprint(\"The smallest monthly precipitation was \", min_precip, \n \" inches and it occured during \", month,\"/\",year, \".\", sep = \"\")",
"The smallest monthly precipitation was 0.58 inches and it occured during 7/2012.\n"
]
],
[
[
"**6. How many months had precipitation amounts greater than 5 inches?**",
"_____no_output_____"
]
],
[
[
"answer = np.sum(bangorprecip > 5)\nprint(answer, \"months had precitipation amounts greater than 5 inches.\")",
"26 months had precitipation amounts greater than 5 inches.\n"
]
],
[
[
"**7. How many months had precipitation greater than zero and less than 1.5 inches? What were these values and in what months and years did they occur?**",
"_____no_output_____"
]
],
[
[
"answer = np.logical_and([bangorprecip > 0], [bangorprecip < 1.5])\n\nprint(np.sum(answer), \"months had precipitation greater than 0 and less than 1.5 inches.\")\nprint(\"\")\n\nfor count,val in enumerate(years):\n month = months[bangorprecip[:,count] < 1.5]\n values = bangorprecip[:,2][bangorprecip[:,count] < 1.5]\n if sum(values) != 0:\n print(\"In\", years[count], \", month(s)\", month, \n \"had rainfalls of\", values, \", respectively.\");",
"9 months had precipitation greater than 0 and less than 1.5 inches.\n\nIn 2012 , month(s) [ 3 7 11] had rainfalls of [1.4 0.58 1.13] , respectively.\nIn 2013 , month(s) [ 1 10] had rainfalls of [1.95 6.96] , respectively.\nIn 2014 , month(s) [9] had rainfalls of [6.33] , respectively.\nIn 2015 , month(s) [3 7] had rainfalls of [1.4 0.58] , respectively.\nIn 2016 , month(s) [9] had rainfalls of [6.33] , respectively.\n"
]
],
[
[
"**8. How different were monthly precipitation values in 2019 from 2018?**",
"_____no_output_____"
]
],
[
[
"nineteen = np.concatenate(bangorprecip[:,years == '2019'])\neighteen = np.concatenate(bangorprecip[:,years == '2018'])\n\n%matplotlib inline\nplt.style.use('ggplot')\n\nplt.bar(months, nineteen, alpha = 0.7)\nplt.bar(months, eighteen, alpha = 0.7)\nplt.title(\"Monthly precipitation (2018 vs. 2019)\")\nplt.ylabel(\"Inches\")\nplt.legend([\"2019\", \"2018\"])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**9. Create a heatmap of the 12 x 10 array**",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nplt.style.use('ggplot')\n\nimgplot = plt.imshow(bangorprecip, extent=[2010,2019,12,1], aspect='auto', cmap='viridis')\nplt.colorbar();\n",
"_____no_output_____"
]
],
[
[
"For the data and other notebooks, see [github.com/antoniojurlina/learning_python](https://github.com/antoniojurlina/learning_python).",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7f1679a9002bf69aaa20b3da0decb2bb77b5267 | 8,268 | ipynb | Jupyter Notebook | sqlite3 demo.ipynb | steffmul/SpicedAcademy_Exercises | 0daf63bd23045130fd954e7344e49d84181b2dc0 | [
"MIT"
] | 1 | 2019-09-13T18:40:43.000Z | 2019-09-13T18:40:43.000Z | sqlite3 demo.ipynb | steffmul/SpicedAcademy_Exercises | 0daf63bd23045130fd954e7344e49d84181b2dc0 | [
"MIT"
] | null | null | null | sqlite3 demo.ipynb | steffmul/SpicedAcademy_Exercises | 0daf63bd23045130fd954e7344e49d84181b2dc0 | [
"MIT"
] | 1 | 2019-09-13T18:39:50.000Z | 2019-09-13T18:39:50.000Z | 23.895954 | 127 | 0.481616 | [
[
[
"import sqlite3\nconn = sqlite3.connect('example3.db')",
"_____no_output_____"
],
[
"c = conn.cursor()\n\n# Create table\nc.execute('''CREATE TABLE stocks\n (date text, trans text, symbol text, qty real, price real)''')\n\n# Insert a row of data\nc.execute(\"INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)\")\n\n# Save (commit) the changes\nconn.commit()\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\n#conn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('example3.db')\n\nt = ('RHAT',)\nc.execute('SELECT * FROM stocks WHERE symbol=?', t)\nprint(c.fetchone())",
"('2006-01-05', 'BUY', 'RHAT', 100.0, 35.14)\n"
],
[
"# Larger example that inserts many records at a time\npurchases = [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),\n ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),\n ('2006-04-06', 'SELL', 'IBM', 500, 53.00),\n ]\nc.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases)",
"_____no_output_____"
],
[
"for row in c.execute('SELECT * FROM stocks ORDER BY price'):\n print(row)",
"('2006-01-05', 'BUY', 'RHAT', 100.0, 35.14)\n('2006-03-28', 'BUY', 'IBM', 1000.0, 45.0)\n('2006-04-06', 'SELL', 'IBM', 500.0, 53.0)\n('2006-04-05', 'BUY', 'MSFT', 1000.0, 72.0)\n"
],
[
"persons = [\n (\"Hugo\", \"Boss\"),\n (\"Calvin\", \"Klein\"),\n (\"Ralph\", \"Lauren\")\n ]\n\nsubbrands = [\n (\"Hugo Boss\", \"Boss\"),\n (\"Hugo Boss\", \"Boss Orange\"),\n (\"Hugo Boss\", \"Hugo\"),\n (\"Hugo Boss\", \"Baldessarini\"),\n (\"Calvin Klein\", \"Calvin Klein Jeans\"),\n (\"Calvin Klein\", \"Calvin Klein Underwear\")\n]\n\ncon = sqlite3.connect(\":memory:\")\n\n# Create the table\ncon.execute(\"create table person(firstname, lastname)\")\ncon.execute(\"create table subbrands(designer, brand)\")\n \n# Fill the table\ncon.executemany(\"insert into person(firstname, lastname) values (?, ?)\", persons)\ncon.executemany(\"insert into subbrands(designer, brand) values (?, ?)\", subbrands)",
"_____no_output_____"
],
[
"# Print the table contents\nfor row in con.execute(\"select firstname, lastname from person\"):\n print(row)",
"('Hugo', 'Boss')\n('Calvin', 'Klein')\n('Ralph', 'Lauren')\n"
],
[
"# select statement with where condition\nfor row in con.execute(\"select firstname, lastname, length(firstname) from person where length(firstname)<6\"):\n print(row)",
"('Hugo', 'Boss', 4)\n('Ralph', 'Lauren', 5)\n"
],
[
"# Print the table contents\nfor row in con.execute(\"select *from subbrands\"):\n print(row)",
"('Hugo Boss', 'Boss')\n('Hugo Boss', 'Boss Orange')\n('Hugo Boss', 'Hugo')\n('Hugo Boss', 'Baldessarini')\n('Calvin Klein', 'Calvin Klein Jeans')\n('Calvin Klein', 'Calvin Klein Underwear')\n"
],
[
"# Print the table contents\nfor row in con.execute(\"select * from person p left join subbrands b on p.firstname||' '||p.lastname = b.designer\"):\n print(row)",
"('Hugo', 'Boss', 'Hugo Boss', 'Baldessarini')\n('Hugo', 'Boss', 'Hugo Boss', 'Boss')\n('Hugo', 'Boss', 'Hugo Boss', 'Boss Orange')\n('Hugo', 'Boss', 'Hugo Boss', 'Hugo')\n('Calvin', 'Klein', 'Calvin Klein', 'Calvin Klein Jeans')\n('Calvin', 'Klein', 'Calvin Klein', 'Calvin Klein Underwear')\n('Ralph', 'Lauren', None, None)\n"
],
[
"# delete rows from table\nprint(\"I just deleted\", con.execute(\"delete from person where firstname in ('Hugo','Ralph')\").rowcount, \"rows\")",
"I just deleted 2 rows\n"
],
[
"# Print the table contents\nfor row in con.execute(\"select * from person\"):\n print(row)",
"('Calvin', 'Klein')\n"
],
[
"from sqlite3 import dbapi2 as sqlite\n\ngeo = [\n (\"NYC\", \"USA\"),\n (\"London\", \"UK\"),\n (\"Berlin\", \"Germany\")\n ]\n\n\ncon = sqlite3.connect(\":memory:\")\n\n# Create the table\ncon.execute(\"create table geo(city, country)\")\n \n# Fill the table\ncon.executemany(\"insert into geo(city, country) values (?, ?)\", geo)\n\ncur.execute(\"select * from geo\")\ncol_name_list = [tuple[0] for tuple in cur.description]\nprint(col_name_list)",
"['city', 'country']\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f167fa2d04fb960282c38c94f6608b8aab1dfe | 2,115 | ipynb | Jupyter Notebook | Prelim_Exam.ipynb | NicoleShairaTabligan/OOP-58002 | 1f4a66d85218259a979073ce24af7770df84779b | [
"Apache-2.0"
] | null | null | null | Prelim_Exam.ipynb | NicoleShairaTabligan/OOP-58002 | 1f4a66d85218259a979073ce24af7770df84779b | [
"Apache-2.0"
] | null | null | null | Prelim_Exam.ipynb | NicoleShairaTabligan/OOP-58002 | 1f4a66d85218259a979073ce24af7770df84779b | [
"Apache-2.0"
] | null | null | null | 31.567164 | 238 | 0.519622 | [
[
[
"<a href=\"https://colab.research.google.com/github/NicoleShairaTabligan/OOP-58002/blob/main/Prelim_Exam.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"class Student:\n def __init__ (self, name,student_number,age, school,course):\n self.name = name\n self.student_number= student_number\n self.age= age\n self.school= school\n self.course=course\n def myself(self):\n print(\"My Name is\", self.name, self.age, \"years old.\", \"My Student Number is\", self.student_number,\".\")\n print(\"I'm taking\", self.course, \"at\", self.school)\n\nS = Student(\"Nicole Shaira A. Tabligan\", 202150371,19, \"Adamson University\", \"Bachelor of Science in Computer Engineering\")\nS.myself()",
"My Name is Nicole Shaira A. Tabligan 19 years old. My Student Number is 202150371 .\nI'm taking Bachelor of Science in Computer Engineering at Adamson University\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e7f185a7c9b338c1539edb795bf73fbe6e80c80f | 11,164 | ipynb | Jupyter Notebook | model_training/0.prepare_processing/run1/.ipynb_checkpoints/take_look-checkpoint.ipynb | cxf514/ER_hopping | fd7a4e36b718dcf12b403737c8d05fee4eb1b14c | [
"MIT"
] | null | null | null | model_training/0.prepare_processing/run1/.ipynb_checkpoints/take_look-checkpoint.ipynb | cxf514/ER_hopping | fd7a4e36b718dcf12b403737c8d05fee4eb1b14c | [
"MIT"
] | null | null | null | model_training/0.prepare_processing/run1/.ipynb_checkpoints/take_look-checkpoint.ipynb | cxf514/ER_hopping | fd7a4e36b718dcf12b403737c8d05fee4eb1b14c | [
"MIT"
] | null | null | null | 64.531792 | 1,285 | 0.610623 | [
[
[
"# author=cxf\n# date=2020-8-8\n# file for take a look\n\n# subplot1 RNCU distribution\nimport numpy as np\nimport matplotlib.gridspec as mg\nimport matplotlib.pyplot as mp\n\n\n# import warnings filter\nfrom warnings import simplefilter\nmp.switch_backend('TkAgg')\n# ignore all future warnings\nsimplefilter(action='ignore')\n\n###########################################################################\ndata1 = []\nname = []\nRNCU_value=[]\n# Read RNCU index\nwith open('machine_X_index.txt', 'r') as fx1:\n for line in fx1.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('int32')\n data1.append(each_sample)\n name.append(np.array(line[0:-1].split(','))[0])\nx1 = np.array(data1)\n# Read RNCU value\nwith open('machine_X_values.txt', 'r') as fy1:\n for line in fy1.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('int32')\n RNCU_value.append(each_sample)\ny1 = np.array(RNCU_value)\n\n# subplot2 error rate at different cutoff\ndata2 = []\nwith open('error.txt', 'r') as fx2:\n for line in fx2.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('f8')\n data2.append(each_sample)\nx2 = np.array(data2)\n\n# subplot2 number of sites which could be genotypes with 90% homozygotes\ndata3 = []\nwith open('a90.txt', 'r') as fx3:\n for line in fx3.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('int32')\n data3.append(each_sample)\nx3 = np.array(data3)\n\n# subplot3 number of sites which could be genotypes with 95% homozygotes\ndata4 = []\nwith open('a95.txt', 'r') as fx4:\n for line in fx4.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('int32')\n data4.append(each_sample)\n #print(each_sample)\nx4 = np.array(data4)\n\n# subplot4 number of sites which could be genotypes with 99% homozygotes\ndata5 = []\nwith open('a99.txt', 'r') as fx5:\n for line in fx5.readlines():\n each_sample = np.array(line[0:-1].split(','))[1:].astype('int32')\n data5.append(each_sample)\nx5 = np.array(data5)",
"_____no_output_____"
],
[
"# draw pictures\ngs = mg.GridSpec(3, 2)\nfor i in range(1, 192):\n # 1\n mp.figure(figsize=(10,5))\n mp.subplot(gs[0, :2])\n mp.grid(ls=':')\n mp.title(f'{name[i]}')\n mp.plot(x1[i][:40],y1[i][:40], label='RNCU')\n ax = mp.gca()\n ax.xaxis.set_major_locator(mp.MultipleLocator(1))\n mp.legend()\n # 3\n mp.subplot(gs[1, 0])\n mp.grid(ls=':')\n mp.plot(np.arange(0, 11), x2[i], label='Error_rate')\n ax = mp.gca()\n ax.xaxis.set_major_locator(mp.MultipleLocator(1))\n mp.legend()\n # 4\n mp.subplot(gs[1, 1])\n mp.grid(ls=':')\n mp.plot(np.arange(0, 11), x3[i], label='90% sites')\n ax = mp.gca()\n ax.xaxis.set_major_locator(mp.MultipleLocator(1))\n mp.legend()\n # 5\n mp.subplot(gs[2, 0])\n mp.grid(ls=':')\n mp.plot(np.arange(0, 11), x4[i], label='95% sites')\n ax = mp.gca()\n ax.xaxis.set_major_locator(mp.MultipleLocator(1))\n mp.legend()\n # 6\n mp.subplot(gs[2, 1])\n mp.grid(ls=':')\n mp.plot(np.arange(0, 11), x5[i], label='99% sites')\n ax = mp.gca()\n ax.xaxis.set_major_locator(mp.MultipleLocator(1))\n mp.legend()\n mp.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7f1986c62100d11c2c314e33d6d216948494821 | 216,132 | ipynb | Jupyter Notebook | DiagramGeneratorExample.ipynb | gharib85/ufss | 9aea2da19127c697e5b344548dbd8e152925b8b2 | [
"MIT"
] | 1 | 2020-08-18T15:16:01.000Z | 2020-08-18T15:16:01.000Z | DiagramGeneratorExample.ipynb | gharib85/ufss | 9aea2da19127c697e5b344548dbd8e152925b8b2 | [
"MIT"
] | null | null | null | DiagramGeneratorExample.ipynb | gharib85/ufss | 9aea2da19127c697e5b344548dbd8e152925b8b2 | [
"MIT"
] | null | null | null | 123.363014 | 2,872 | 0.703195 | [
[
[
"%matplotlib notebook\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import numpy as np\nimport ufss\nimport os",
"_____no_output_____"
],
[
"# DiagramGenerator class, or DG for short\nDG = ufss.DiagramGenerator",
"_____no_output_____"
]
],
[
[
"This notebook begins with an example of using the Diagram Generator to generate diagrams for optical nonlinear spectroscopy using the 2D photon echo as an example. We then move on to the fluorescence-detected analogue of 2D photon echo as a counter-point. Following that are further examples. A list of all examples included in this notebook follows, in order of appearance:\n1. Tranditional 2D photon echo (2DPE)\n2. Fluorescence-detected 2DPE (or any action detection method)\n3. Transient Absoroption (TA)\n4. 5th-order correction to TA in the pump amplitude\n5. 5th-order correction to TA in the probe amplitude\n6. Exciton-exciton interaction 2D spectroscopy\n7. 2DPE for IR vibrational spectroscopy",
"_____no_output_____"
],
[
"# 1. 2DPE",
"_____no_output_____"
],
[
"## Generic case",
"_____no_output_____"
]
],
[
[
"# initialize the module\ntdpe = DG()",
"_____no_output_____"
],
[
"# DiagramAutomation needs to know the phase-matching/-cycling condition\n# 2DPE example\ntdpe.set_phase_discrimination([(0,1),(1,0),(1,0)])\n# Set the pulse durations\nt0 = np.linspace(-1,1,num=11)\nt1 = np.linspace(-2,2,num=21)\nt2 = np.linspace(-2,2,num=11)\ntlo = np.linspace(-3,3,num=31)\n# set the pulse durations of each pulse\n# the local oscillator does not impact diagram generation, but is still required at this time\ntdpe.efield_times = [t0,t1,t2,tlo]",
"_____no_output_____"
],
[
"# using a list of pulse arrival times, we can generate the diagrams that contribute for \n# that set of arrival times\n# note the arrival time of the local oscillator is irrelevant, but needed by the code at this time\n# here we choose for the local oscillator to \"arrive\" simulltaneously with the 3rd pulse\ntime_ordered_diagrams = tdpe.get_diagrams([0,100,200,200])\ntime_ordered_diagrams",
"_____no_output_____"
],
[
"#display the diagrams for visual inspection (takes a few seconds to render)\ntdpe.display_diagrams(time_ordered_diagrams)",
"_____no_output_____"
],
[
"all_diagrams = tdpe.get_diagrams([0,1,2,2])\nprint('There are ',len(all_diagrams),' diagrams in total')",
"There are 16 diagrams in total\n"
],
[
"# Check in this folder after running this cell to see 16 individual diagrams saved as pdf files\ntdpe_diagrams_folder = 'TDPE_all_diagrams'\nos.makedirs(tdpe_diagrams_folder,exist_ok=True)\n\n# rendering and saving the diagrams takes a few seconds\ntdpe.save_diagrams(all_diagrams,folder_name=tdpe_diagrams_folder)",
"_____no_output_____"
]
],
[
[
"To play with different cases where only some of the pulses overlap, uncomment and execute any of the following:",
"_____no_output_____"
]
],
[
[
"#ab_overlap = tdpe.get_diagrams([0,1,6,6])\n#bc_overlap = tdpe.get_diagrams([0,4,6,6])\n#ab_bc_overlap = tdpe.get_diagrams([0,3,6,6])",
"_____no_output_____"
]
],
[
[
"And uncomment the following for the case you want to see",
"_____no_output_____"
]
],
[
[
"#tdpe.display_diagrams(ab_overlap) #<--- change the argument of display diagrams to the case you have uncommented and executed",
"_____no_output_____"
]
],
[
[
"## Time-ordered example for only one electronic excited state",
"_____no_output_____"
],
[
"If the system under study has only one excited electronic state, then the excited-state absoroption process cannot take place. This is captured by setting the attribute 'maximum_manifold' (default value $\\infty$) as follows",
"_____no_output_____"
]
],
[
[
"tdpe.maximum_manifold = 1\ntime_ordered_diagrams = tdpe.get_diagrams([0,100,200,200])\ntdpe.display_diagrams(time_ordered_diagrams)",
"_____no_output_____"
]
],
[
[
"Note that even for the case of a single electronic excitation, if there is a significant electronic relaxation rate, 'maximum_manifold' should not be set to 1, but left at the default value $\\infty$",
"_____no_output_____"
],
[
"# 2. Action-detected 2DPE",
"_____no_output_____"
]
],
[
[
"tdfs = DG(detection_type='fluorescence')\n\ntdfs.set_phase_discrimination([(0,1),(1,0),(1,0),(0,1)])",
"_____no_output_____"
],
[
"t3 = np.linspace(-2.5,2.5,num=25)\ntdfs.efield_times = [t0,t1,t2,t3]\ntime_ordered_diagrams = tdfs.get_diagrams([0,100,200,300])\ntdfs.display_diagrams(time_ordered_diagrams)",
"_____no_output_____"
],
[
"# and all possibly relevant diagrams can be generated by setting the pulse delays so that all pulses overlap\nall_diagrams = tdfs.get_diagrams([0,1,2,2])\n# Check in this folder to see 16 individual diagrams\ntdfs_diagrams_folder = 'TDFS_all_diagrams'\nos.makedirs(tdfs_diagrams_folder,exist_ok=True)\ntdfs.save_diagrams(all_diagrams,folder_name=tdfs_diagrams_folder)",
"_____no_output_____"
]
],
[
[
"To play with different cases where only some of the pulses overlap, uncomment and execute any of the following:",
"_____no_output_____"
]
],
[
[
"#ab_overlap = tdfs.get_diagrams([0,1,6,12])\n#bc_overlap = tdfs.get_diagrams([0,5,5,12])\n#cd_overlap = tdfs.get_diagrams([0,5,10,12])\n#ab_bc_overlap = tdfs.get_diagrams([0,3,6,12])\n#ab_cd_overlap = tdfs.get_diagrams([0,1,10,12])\n# and so on",
"_____no_output_____"
]
],
[
[
"And uncomment the following for the case you want to see",
"_____no_output_____"
]
],
[
[
"#tdfs.display_diagrams(ab_overlap) #<--- change the argument of display diagrams to the case you have uncommented and executed",
"_____no_output_____"
]
],
[
[
"# TA",
"_____no_output_____"
]
],
[
[
"ta = DG()\nta.set_phase_discrimination([(1,1),(1,0)])\npump_interval = t0\nprobe_interval = t1\nta.efield_times = [t0,t1]",
"_____no_output_____"
]
],
[
[
"# TA 5th-order corrections",
"_____no_output_____"
],
[
"## Higher order in pump amplitude",
"_____no_output_____"
]
],
[
[
"ta5order_pump = DG()\nta5order_pump.set_phase_discrimination([(2,2),(1,0)])\nta5order_pump.efield_times = [t0,t1]\n# Time-ordered diagrams\nta5order_pump.get_diagrams([0,100,100])",
"_____no_output_____"
]
],
[
[
"## Higher order in probe amplitude",
"_____no_output_____"
]
],
[
[
"ta5order_probe = DG()\nta5order_probe.set_phase_discrimination([(1,1),(2,1)])\nta5order_probe.efield_times = [t0,t1]\nta5order_probe.get_diagrams([0,100,100])",
"_____no_output_____"
]
],
[
[
"# EEI2D",
"_____no_output_____"
]
],
[
[
"eei2d = DG()\neei2d.set_phase_discrimination([(0,2),(2,0),(1,0)])\neei2d.efield_times = [t0,t1,t2,tlo]\neei2d.get_diagrams([0,100,200,300])",
"_____no_output_____"
]
],
[
[
"# 2DPE for IR vibrational spectroscopy",
"_____no_output_____"
],
[
"For IR vibrational spectroscopy, the 'maximum_manifold' should be set to the default of $\\infty$. In addition, the 'minimum_manifold' should be set to a negative number. This is because, outside of zero temperature limit, the initial state of the system is a Boltzmann distribution of vibrational occupational states. The $n=1$ vibrational state can be de-excited once, the $n=2$ vibrational state can be de-excited twice, and so on. Depending on the ratio of $k_BT/\\hbar\\omega$, where $\\omega$ is the vibrational frequency, the initial distribution will contain appreciable weight in the first $n$ vibrational ladder states. This information should be used in setting 'minimum_manifold'. Here are two examples",
"_____no_output_____"
]
],
[
[
"tdpe.maximum_manifold = np.inf\ntdpe.minimum_manifold = -1\ntdpe.display_diagrams(tdpe.get_diagrams([0,100,200,200]))",
"_____no_output_____"
],
[
"# or \ntdpe.maximum_manifold = np.inf\ntdpe.minimum_manifold = -2\ntdpe.display_diagrams(tdpe.get_diagrams([0,100,200,200]))",
"_____no_output_____"
]
],
[
[
"Going below minimum_manifold=-2 has no further effect on 2DPE, but one would need to consider smaller minimum_manifold values for higher-order spectroscopies. To be safe, one can simply set minimum_manifold=-np.inf",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7f19b4e992f0cb4f3eccd426c8366790c9a805f | 6,950 | ipynb | Jupyter Notebook | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses | 6f48079778ad13f4bb86929018073258273e5d26 | [
"MIT"
] | null | null | null | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses | 6f48079778ad13f4bb86929018073258273e5d26 | [
"MIT"
] | 1 | 2018-09-07T16:35:24.000Z | 2018-10-06T13:02:31.000Z | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses | 6f48079778ad13f4bb86929018073258273e5d26 | [
"MIT"
] | null | null | null | 28.958333 | 124 | 0.457986 | [
[
[
"# Supplemental Tables\n\nThis Jupyter notebook reproduces a number of Supplemental Tables that are not included in any of the other notebooks. ",
"_____no_output_____"
]
],
[
[
"%reload_ext autoreload\n%autoreload 2\n\n%matplotlib inline\n\nimport sys\nsys.path.append('../src')\n\nfrom io import StringIO\n\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## Supplementary Table S2 - ILC insertions\n\nOverview of all insertions identified by IM-Fusion in the ILC dataset.",
"_____no_output_____"
]
],
[
[
"insertion_column_map = {\n 'transposon_anchor': 'feature_anchor',\n 'id': 'insertion_id', \n 'seqname': 'chromosome', \n 'orientation': 'gene_orientation'\n}\n\ncol_order = ['insertion_id', 'sample', 'chromosome', 'position', 'strand', \n 'support', 'support_junction', 'support_spanning', \n 'feature_name','feature_type', 'feature_anchor', 'feature_strand',\n 'ffpm', 'ffpm_junction', 'ffpm_spanning', \n 'gene_id', 'gene_name', 'gene_strand', 'gene_orientation',\n 'novel_transcript']\n\ninsertions_sb = (\n pd.read_csv('../data/processed/sb/star/insertions.txt', sep='\\t')\n .rename(columns=insertion_column_map)[col_order]\n .rename(columns=lambda c: c.replace('_', ' ').capitalize()))\ninsertions_sb.to_excel('../reports/supplemental/tables/table_s2_insertions_sb.xlsx', index=False)",
"_____no_output_____"
]
],
[
[
"## Supplementary Table S3 - ILC CTGs\n\nOverview of the CTGs identified by IM-Fusion in the ILC dataset.",
"_____no_output_____"
]
],
[
[
"ctgs = pd.read_csv('../data/processed/sb/star/ctgs.txt', sep='\\t')\n\nctg_overview = (ctgs\n .assign(de_direction=lambda df: df['de_direction'].map({-1: 'down', 1: 'up'}))\n .drop(['de_test', 'gene_id'], axis=1)\n .rename(columns={\n 'gene_name': 'Gene',\n 'p_value': 'CTG p-value',\n 'q_value': 'CTG q-value',\n 'n_samples': 'Num. samples',\n 'de_pvalue': 'DE p-value',\n 'de_direction': 'DE direction'\n }))\n\nctg_overview.head()",
"_____no_output_____"
]
],
[
[
"## Supplementary Table S5 - B-ALL insertions\n\nOverview of all insertions identified by IM-Fusion in the B-ALL dataset.",
"_____no_output_____"
]
],
[
[
"insertions_sanger = (\n pd.read_csv('../data/processed/sanger/star/insertions.txt', sep='\\t')\n .rename(columns=insertion_column_map)[col_order]\n .rename(columns=lambda c: c.replace('_', ' ').capitalize()))\ninsertions_sanger.to_excel('../reports/supplemental/tables/table_s5_insertions_sanger.xlsx', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f1a621e4fe09cdc739bcd1f7651e9243d02f5a | 287,228 | ipynb | Jupyter Notebook | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci | f11c2528aa2eab4fb6655eb57a56438d165b3a74 | [
"MIT"
] | 4 | 2022-02-10T15:41:16.000Z | 2022-02-24T13:59:18.000Z | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci | f11c2528aa2eab4fb6655eb57a56438d165b3a74 | [
"MIT"
] | null | null | null | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci | f11c2528aa2eab4fb6655eb57a56438d165b3a74 | [
"MIT"
] | 54 | 2022-02-15T09:30:31.000Z | 2022-03-03T21:53:21.000Z | 250.416739 | 158,529 | 0.915854 | [
[
[
"<a href=\"https://colab.research.google.com/github/tbeucler/2022_ML_Earth_Env_Sci/blob/main/Lab_Notebooks/S1_3_Matplotlib.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"![images.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAW8AAACJCAMAAADUiEkNAAABU1BMVEX///8RVHsAS3UASXQATnf8/PwAR3IAUHgARXH19fX/qnD+/v/+3W/4+PgAQm/39/dyk6pmiqPs8vVSeJQAP23f6O3B0Nqswc7n7vLW4ObM2OC3yNNIcpCetMTc5epbfpkzZIaTq7x9m7Dk/4cANmiXrr9ujqWjt8X6pWslXoK7zdiIqLy4/7Z7obY2aosAO2v62Wvrz2PMysnFvrrAknfYkVzaik/Y2tvAqJvSkmW/sajFj2u3iWfvnF/OlGrj4uPCnYWptm66zX2/wrbS6XOxuJnIurK8z3bJzMKstpC3wo/K4Hm9w6yuoXu6n5C2rpj/62q+rXnNtmLkxmSnumqeqYKfqcJmfbdXTEOHioxLbLhId99Yiu91cFtKS0zMuHPFt4qYsa98kZCPppCsqKOoxr9gsJ2IvIZf17yY4peQuK119+J2wrKfwJ+gy5+g3p6p+6fUte40AAAWD0lEQVR4nO1c+WPbxpXGSQCCQYqgeIA3GVMURYlOZLuMnTh0a7dxWsdunG6P7e4m3W02aZqk/f9/2hlg5r03A1BNoqjVOvPZiWQQHAy+efNuwLIMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAyuE/G/egI3BLHlZ/1Op5Ol13SBNGOj97OkfGENjeudx03BbOU5rut69nGJkh8AadP22OiOtxqpHzQi7XJzW8xj7l/DPG4Mel5gF6j1r2H4bl2M7tnkaH8+GW6Gk3UbD41vixODYHwN87gxyDxbMpJdw/ALV9K4QQ0yO3ICBq/mIbUduTD2ql050muCviTEtq/jPueO5LuFlzySV3SO4eBA8h1MX2sN3gG+V9dxn8dy+3hNOHYo18C2h3DNbij5HjauYR43BgPY8NN/ZKe+j0s3Ab7nMMwwAL5Rlkc12Aivteu4JAq2Av640+0ul93Ty/y0SwhqSb6dmTyUToHuYAOjrqXQk41wo5ANuqPRctC5ohc3g/ts6R+lp7PzRbeTtdvtrDNYnq+7ezwHvz9Y8rlUGNyNlGV3KQ/1VsC3N4E9dQgLc3i1G7oexIduzc1hX03tzisULEfSWS/6jYKO4v9xMl6uB73yEO1pMZNarSSaCciyO5DH0inok9pa3o7VlPNw5/ogNwG9lZz0nau5zc2gSq6SwbqD64iKPclmoxLjAzB1tv5RCrLsduAg6HQ7hB3htyAMmOmD3AS0YU8eXS08gPt0iFx1zgt2ehc51cg309Tj9VLT111pArzp/mk6KBadkLvfduAcLeBYAoqnttQHuQkYg4zUK3b4t0ZsbUAAIeBOz/O937t77/5Ptvw3zXHJzvuKiZyByR3q41dHU+1ZazPdtNZkZzZgI4QDfZCbALyR8GoG0waD1hVH+nPuAO/efnBycHBylx/xNcL9rpIMQRMw0UfvwzTVaCqOfV/ZJRHo9PA60gpXBoYpztUGgvt0T4sDXa4uooecbYYXfhQljaQRRQ1Ken9NrDSYOq/kWnQwtFHMesmDTOF+wutIK1wZGKaUbNR3QiMEvgu5WnPNvb2Xs31ycvAOZ4Yz7fuMc6S8N0f+0ASsLQ0DjF7jS8OYsQx3bOe60idXCqNGl4cp3xo9SBMVuSOmmq344X3B9sHBfW4dgOYGMh4dAuHoY2s518ujKeX++7Duwbfzb/m32cYjR9qdxXy+7vZ1e9buzPLjlwVsvWwwWs/n89lyTwIebFQ5TOHfT5Td3+sv2WCLQVaO2DPgO1ewo76VWOm7ku2Dg0cXlmIvJeN+0jiUh8E1dUumDqIpYkrj4uuJRSk/Bb5lGmfcGTECFp3xHrnsHE+Gw+FE7LNkMLFd13Ec15keEo8t6U7smjg+r945aWfeWuVJen6a602bA4Xy2SEDyFQwPSxwDHam22yxmTRHxeL7neaqGKvmDUf64pE0KJOLASMsSaztewcF24z2hxbwvYv473mdwGe/9pj2aDfZlWHFgomYy6FMOVWa0vaaMzWcHBLLCOkqe8qvEgF9NXvSrWK8d8f1eE73KNdh/WGIltlxFvIbp5s6HA/cYFlWKtl8Fbr43Xyu4Yr4pJ3bnDtM+QROgdpQbK5+3ctncmdQmkkQTjXrD2lQe9pjrjU/xAi/uH8g8TiB+PKnP3vCuWhEOd3su8yjadYdNIkwlztSsaApxcxrugq9PP3t3EGBw3QVv43BhkzaC1sVgikVkMvCI/+wjnxw1Jv5lNPJkXZcj13HrbpjV+BoAknKeeUJNt+IxeLJ8INHdMnhHfWKwR01nFhKueKpaKEh/Mi/+0jy/SBNkqiRsE+2T9944ze5eonEZEYZDc4JIHYaVkRTGbp+uPHXoOgnZZo8D4NTCamAwhHPJ+gTcLmn1A88/XioWphlWDpDzldKrz+pukEOqfikKWUORzYtL06oEL6QfHs9ayRuP2Eq42GhUE5OHm19P8mdk5+/wfD05z7jOxdvK47OSTCvzFayi6odQ8k+zMlF0wZS5M37q9Kkg6DklEsFFHazVQVpR5k1CCqYculWGe+j0sY0TjLcd4bMJcvMptPuVF3RDqiDK+XKXef6OF/RiKuUxzzaYUbz5BdCn6Tv53w/j9nnvhDw/sBKWhXXAMewHE3R0MFD9+IYDeuqYsBgpUd1S6GA3POq8+2g2XWqjivZsGVpX9Azi5WJpns+D+QOkJnN1azyirZHo25p0MLUWvQk3VyHp+8IF+Wx4PvZU873B3H+uXTEmICf1suXqMlti6J8CpdEn5yEDrhrK2WEy4MKcNCqb5JZxz08EXcRCk2BF9Y5amQst8ibRZvKUUhwB+m3fYo+RG0Yi/QgUwBjwVHh8CX+7kGhwN9Jc77jX3K6f5oWptIq/m9lTGwrJhQKdhNYCxcVAvrkKzhWuUsU1DS3WjdjXi2sVSljr1Zz6dg14rMW1jxw3enhaNDP+oPZEFdJeLDxpMYcADKyV7gEwh2xiI0ScGpuTbELxDeT9+ll1qLYP4mQ3Ea0LZyU+7uc792vGN3v7xJBtKDdOm9YXQgNpXvi1IS+IdEUKjH0yTEGSjeaWXdCNmmFJ03AjxVyA28y6o4mJQnzguaouxiSc6lC4fIduKs5hkLJjPhFBRPZpNlsEqPZFJhL11Kdumc3u1k2aNKphDB+o7AGQctKRdoZ+EyEk8IiHp6ves7o/tVF3JCaRARUp30rBf6mx2IucukzqMTb6IpAJYeEaqTmw+Gs5qdZ1m3RSQ/VBIziNjibYvt0NVFzW/kyxzMUWxoIjMKgttLy+biOITGtaONLhsRS7Ic7FN/qEj+iLo1XLGqJLBA/LWa8+3AbC9qT+G2uwE9eWFEjSn/GbOWTOJL6Rgp4usAaJalYiLXHzgfSVNKsqChhnjyf31xwMCOEO0oey6e7uA4XHine0hGULlD7eEMMedLWphRLZbA0dXLB0/1V9YReMcRiyQjnjs5wKiYxts7zdYtf/vqtjy7iQnqT9AX3UN7h+uTiKbeVDbEU+Y/iwrMeOvylSlhlUwmoaizZK55ZQNICa9BVWh0iIRuC3GRK98kRjtMDFoON2m6h8407rU7D3/15oB6ZupK7R4kIJjL53CssxrBhFeoxenXr1i3GeMGOn97jCrzN9MkHzFb2ipn6yo8O8/IlI6VKWGVTCeg7B33yjNgpm9wnqeWrdWQiVbQwhcqKHacLBDdPWgKqMoZxCuaf8j2qyAMJYOWHxQJ0yC7eE0jb2BW3khUq5uKtWxxvffIsl+Jo9x5X4D53vt/fyuRgRH+0l1ZDMFiuhMH2pk0lq0t9ci20WYCAq4m5BioIZR1GmNdd0OOQyKF8V4HwTfTJ+pKSCoYTqklvE8GXNqLIQrAot1sM/jKn+9abjPG7EVcZF/fziOfZG0+f+VKFCFMpFPgaNHK5EoZZTFTVPsZAVT65NgjmL4MNFcce8KpEjMRMrRQ7uEABvbwehhopJAOAFSUdeAIdmInWoJaiQvHkUKf5yfWOVWTU0o9uAd58dZefxZyUx37jl2888cFGih+C9gUEqeUGWzBTRBmkMAunwie3V+oIMVodpa8ww3y5wiv04zI5puuD9YLW5YWHFK7nEkWPMU3ZRkGmTWt8jLEPAWoohe/MVrLYCrtXQrwF4092lvXw5EG6e/pBLhURlWyhwJk/JTxwV6+ExdBsQUxpGzUB8j3ba4/QAK4ogX2MapW73NeGiG1NJYXAdPa4M1ofMi+bA8a1iSeyge6Yhf5tYkq1hUTX0pHecKEevSg5z/+5+1jhmzH+spe++Mn2+W96Oct+sRcV2pkm6hQ36elpU2wqcdGUEtOIPjm20ep04KZUtiumyVSpAotBWrfyC+xVCO1Rc+WEoctCRg50KlaEPmyj6WpfJ5pKr/ygEwryLTRBEgn5fovyXcj4y+zdX/zbtnHx29/tpCZJKN+DPlCoV7JIUwnmQYm+q/LJdTpa1Xwv98lxhcXgAFnTnNbTVj2sTiUoOw2r6qXM8HzvziFOf085tEqY1eMcbkt837r1648//P0ffvvvf/yPP15wvn3+h/J92pF8lxqaK5tKQN8xyQQJqmij1fkOlOFnoDUniv2b71G01e1b2XAP2bYmrmidSzZqvyklvqnku0n4ZizqfL/51sevPvrwP//rD9mWIfJ99tfP8+H87PwfOd9OIRA63xFcj6TIIN0SbJApyHA4un4cehWn0/rEPjlWnLO4sn2ra+8rN9jqTktBe5UfAoHEQsmUwqZFSSzkalWhTxjVn3z68tlu+6f/3v3Pn1+IAEjRJ8VGPpX6JGjpnlYKF6zhNNFTgNAhJiV+XT9CwKNGGpBKVd1vrMaEysL55Z0Wk52Wjx8EVH0rO61dlQcSo2AJSwv3Gmh6pM6TWYhEsZdvfvzqk0+fbHfcRn72v3+Kos93L97dcTdK2MuGZi/7hdVt6qmFNja3oGqv8hQa+0v86MMq6rFZLcd4j2oZywe+sH2rfQfJdurOasgTgc1pFX0kD1TqHoSV1MM9zAx4zUJxxiIN6qSx4PvVx68+fXmxFZWH3WdffLVjMc9fnln3HlzoRFvgDxbOQrm7Cp1kD1XNYTldFePUHM0eYUZXMXT75BjjfzVuamA7mtxpJOPl2fO+3JuwcVxao7/k4SJYs5omKhh4gnfWK6ZXH1vFEX/b421QuT2Md19+8cVnvDT85Oy51Xvv0UNfjeRFpnzhiyC6HAqUm0osIpnk/DEKmmaPsHpUo5om2iPHGK2obYg9XHnpFHXgkNMkjixMj14PoigtK8xZwBKWNvU1ZhrlSrRXUha6xSVFKoqnXb/84q9fbXlME3999nVsbe8/eiH2kuIOJmvp5VySriqaSgoAU0QTkDZaTT+uy4KZ8wpyXFekCi1GqAxE2pqkQsBimuJRoiOD2QbifbZ0nYmm1NGmjlUOaGEWrQnhjFd+OaTCYKL81V+/jPLGh+Tzs28iHtifvLMtPrXIjx4kwMNSNxumq0jSAvJBxCfHdL7mwzeweKJoTpI1VRRQG/ewMhCGo3KnYV7qiMolahmanVgQr14LIscwsqPeP2kNhhxF4cjxUXyhUDDXur0QjQ+7v539jcX11otHB/d5e7KvhPP9jnzWol4KvUhTCYoFzILoO1KyV/U3bno1fsH6RF1x0DKs7UX0OIaj0mlFFebRE7GfxiXiijFNiW+SWHDIVovxSTGSN5TtwmzRz0WoXnyQR+y5WmlYz87O/rK1ksi/d3LwiDmGar1h0ZYrWeYbp4m+rI87nvjkGOO7VEHGGF2qenoMG0J93ADXx1bCTqIQZBcaOsf0RGK5ycCYzS1lF0l7gvIkOunLQcUu06DsiHj2TD4hFUk59qPnZ2dnT7gn2H7v4ODk8U58V6jvGXh4tZI+Oa4wjdjoTdJbpPbkMYsEIjTCdXBU/QAf1BReicVQiIE0OqRVsDNd4RuYVfp0MVIsPWxO65TBipMY87+ZjYmYKZws06DM12rLdojih9+QFfjoG8b38/xXngw/yR1D6EDhel9swVLqAxOSJF3VLnsK9C4ZsS240T72lmhhJCZhPGV/Y7lhqBg20GwQdKPJqNNHBzDNS/vqcX7SyPfk8ErFNLCFShkg3VQOZewRMGkQTyzIRuZI6j+mvs/Ovi5+ZzaT6ZQ/55+LG0nRy5b51bYQUGwqIcqgwlNQMg2MkqkQ/AFJbqhmkSQF1Hw5sRjKOoAzAjsNXSLMzfc3pKy3qhqWEd7s9PvLIXR+ktJ//vGw2+93SRuL4rKDgmVhnaioQXJbdppccL6/EbN/++SAEf64J7V3b0SVm73u9zvrlTRh6AKEpHKmNEDniC21JzIIJ11+T0RyPK1siIpGzZfPq8N8Eo5C6IGJded4nPp+Oj4kQml7dOAlnQpvkw4g2p1rfVwB+1hpMKKGB7pS2DaL16JfPLKk/s455eq7cFDYIjTuMRV+wBxDsQuYeGO9xg4c12NzkaFdD51k9GWxIEDSW1q3T35PSlO2p8cSJOlMBfmwwmJYSjgKmxurL7bjDSeTDW2s1sqlGM/DrcqPjy9JeRXjEMUGPgtPzmRiIVjcKJvWGOMs2jnLHZTi2Ybxg7yt8P5Ffpdc6astHxwyMmuDCBEXF08nYQacuCc76uqpAgxWVL0OJNaUMD+CJld0ogZK3yN2LYpfqP8ZW8pS5B9P9JWsRkCroFprwrkQuCiRTPiNaPf52dnfz/7+xBdPS+U2k+mUt9k5/nkjrujXlPKNTSVHaBpnJU/BQp98s6mUFm+je2FNKiiEmCqLYSnhKOw0X+/7K4jYiLVXN8haJ1wq/QZptKjq61XzE+jc82X3xSX8lNyev33G8GQH5Nx9lEv4A6ZhutyIxaV+YOlvZlUuQJVPLtP5waRX1b7vlQpHVa+dyHmFu1fD/MomnnFFZ204ySTfah5Mv0nxaFhMbdSiZmsI7FOq7jArXKRBO/lmY8qkcUkPUpzbzPvMK8yKKQ10AQ+ENBMnGUW5SsOmYgSmHNrl5nlnVX5iHZMwe+T4SA3zYbA68Z87ernBc+a+bJfSEvEdTyVcym1KEwtLrVrkTtXqBGbT8jRozB8QEf3fSjSsuLIJf3wtD+znYhvMVQ0O+R9sKsFncuOqxLVM53Pl0NMeyQhq5ad3YmtaLx4Bcm8rvPbsUB5XvtSuu8XxsE4lqT25Q2TcOZpkfC75mc4dLTGcTY9I6+wdqd2xm43vnP4KHyQKQnemJbfSUEyjJh7q9c970h+MyMTI1/jjae0HTLoTCx/AHNXITnJuy608uC2Gr0+BqXhSz98VFji3UYDGt4vzbnPl4M/JI0xBPah8fUFvdjhptSbHc52V2XF+fKaVvfrzZnFcW7tsPnXDGkNYm87z7/jdfITDrrarmfac2Ow09sdZQQMw6e7I22n95ZCPx07yWqVnAdnl1tr0/ENQlQm+IBD4FquwZd5Kck52eTobemHIrhIGwzkcjwfFXR7SK6eLnKnmnKQ7rW7BnvDV2vMVG4mTEEyWe1/y5CeJX9W5w4/vO18/xr6fjk9Hs9liME4vPTM/zs7tdgf9NmmEwe4Onvhhx5P2YDGbLTu9PW1c2uA+oZG7JPg+Dv7MFJ7qr1UJavT6AzaXrKdp/qq5/+Njadblk+739rJ9U4AJsuD7vTcioW/J8JOIIUn4+woihZB5qW764wSmAVffUzbipVan8a2SOBav7DCgz+luvvdezOaatdHN7LqU6f7RAh4IuMpr/JLRQokwFL4bg/VV3ij0moFWfq6A9po+RiT45gvY6K7Lz1b/iAHP6ZYa8b4j2qNzeC2FlG+/M1uX+ud+3JhUJxa+F/qj80WHeZuNpJH2sk73fHZ6Ha8I//+MGBI55XftfNeh2H8Jf+/NaLRg/3X7vYjHhz/ELF8fYL9G+EP4EPEl/zKwaPvEzXyN3+sGLKmExo34JwDfnXN0I1/j97ohaTrFu4qDG/oW7dcO486AoWNCQAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDg38p/g95/bVZdp0a6gAAAABJRU5ErkJggg==)",
"_____no_output_____"
],
[
"**Matlotlib**: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python.\n\nWebsite: https://matplotlib.org/\n\nGitHub: https://github.com/matplotlib/matplotlib",
"_____no_output_____"
],
[
"In the previous notebook, we saw some basic examples of plotting and visualization in the context of learning `numpy`. In this notebook, we dive much deeper. The goal is to understand how `matplotlib` represents figures internally.",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Figure and Axes",
"_____no_output_____"
],
[
"The *figure* is the highest level of organization of `matplotlib` objects. If we want, we can create a figure explicitly.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(13, 5))",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_axes([0, 0, 1, 1])",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_axes([0, 0, 0.5, 1])",
"_____no_output_____"
],
[
"fig = plt.figure()\nax1 = fig.add_axes([0, 0, 0.5, 1])\nax2 = fig.add_axes([0.6, 0, 0.3, 0.5], facecolor='g')",
"_____no_output_____"
]
],
[
[
"# Subplots",
"_____no_output_____"
],
[
"Subplot syntax is one way to specify the creation of multiple axes.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.subplots(nrows=2, ncols=3)",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12, 6))\naxes = fig.subplots(nrows=2, ncols=3)",
"_____no_output_____"
],
[
"axes",
"_____no_output_____"
]
],
[
[
"There is a shorthand for doing this all at once, **which is our recommended way to create new figures!**",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()",
"_____no_output_____"
],
[
"ax",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(ncols=2, figsize=(8, 4), subplot_kw={'facecolor': 'g'})",
"_____no_output_____"
],
[
"axes",
"_____no_output_____"
]
],
[
[
"# Drawing into Axes",
"_____no_output_____"
],
[
"All plots are drawn into axes. It is easiest to understand how matplotlib works if you use the [object-oriented](https://matplotlib.org/faq/usage_faq.html#coding-styles) style.",
"_____no_output_____"
]
],
[
[
"# create some data to plot\nimport numpy as np\nx = np.linspace(-np.pi, np.pi, 100)\ny = np.cos(x)\nz = np.sin(6*x)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nax.plot(x, y)",
"_____no_output_____"
]
],
[
[
"This does the same thing as",
"_____no_output_____"
]
],
[
[
"plt.plot(x, y)",
"_____no_output_____"
]
],
[
[
"This starts to matter when we have multiple axes to worry about.",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(figsize=(8, 4), ncols=2)\nax0, ax1 = axes\nax0.plot(x, y)\nax1.plot(x, z)",
"_____no_output_____"
]
],
[
[
"# Labeling Plots",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(figsize=(8, 4), ncols=2)\nax0, ax1 = axes\n\nax0.plot(x, y)\nax0.set_xlabel('x')\nax0.set_ylabel('y')\nax0.set_title('x vs. y')\n\nax1.plot(x, z)\nax1.set_xlabel('x')\nax1.set_ylabel('z')\nax1.set_title('x vs. z')\n\n# squeeze everything in\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"# Customizing Line Plots",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(x, y, x, z)",
"_____no_output_____"
]
],
[
[
"It’s simple to switch axes",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(y, x, z, x)",
"_____no_output_____"
]
],
[
[
"A “parametric” graph:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(y, z)",
"_____no_output_____"
]
],
[
[
"## Line Styles",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(figsize=(16, 5), ncols=3)\naxes[0].plot(x, y, linestyle='dashed')\naxes[0].plot(x, z, linestyle='--')\n\naxes[1].plot(x, y, linestyle='dotted')\naxes[1].plot(x, z, linestyle=':')\n\naxes[2].plot(x, y, linestyle='dashdot', linewidth=5)\naxes[2].plot(x, z, linestyle='-.', linewidth=0.5)",
"_____no_output_____"
]
],
[
[
"## Colors",
"_____no_output_____"
],
[
"As described in the [colors documentation](https://matplotlib.org/2.0.2/api/colors_api.html), there are some special codes for commonly used colors:\n\n* b: blue\n* g: green\n* r: red\n* c: cyan\n* m: magenta\n* y: yellow\n* k: black\n* w: white",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(x, y, color='k')\nax.plot(x, z, color='r')",
"_____no_output_____"
]
],
[
[
"Other ways to specify colors:",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(figsize=(16, 5), ncols=3)\n\n# grayscale\naxes[0].plot(x, y, color='0.8')\naxes[0].plot(x, z, color='0.2')\n\n# RGB tuple\naxes[1].plot(x, y, color=(1, 0, 0.7))\naxes[1].plot(x, z, color=(0, 0.4, 0.3))\n\n# HTML hex code\naxes[2].plot(x, y, color='#00dcba')\naxes[2].plot(x, z, color='#b029ee')",
"_____no_output_____"
]
],
[
[
"There is a default color cycle built into `matplotlib`.",
"_____no_output_____"
]
],
[
[
"plt.rcParams['axes.prop_cycle']",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 10))\nfor factor in np.linspace(0.2, 1, 11):\n ax.plot(x, factor*y)",
"_____no_output_____"
]
],
[
[
"## Markers",
"_____no_output_____"
],
[
"There are [lots of different markers](https://matplotlib.org/api/markers_api.html) availabile in matplotlib!",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(figsize=(12, 5), ncols=2)\n\naxes[0].plot(x[:20], y[:20], marker='.')\naxes[0].plot(x[:20], z[:20], marker='o')\n\naxes[1].plot(x[:20], z[:20], marker='^',\n markersize=10, markerfacecolor='r',\n markeredgecolor='k')",
"_____no_output_____"
]
],
[
[
"<a name=\"Label\"></a>\n## Label, Ticks, and Gridlines",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(12, 7))\nax.plot(x, y)\n\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_title(r'A complicated math function: $f(x) = \\cos(x)$')\n\nax.set_xticks(np.pi * np.array([-1, 0, 1]))\nax.set_xticklabels([r'$-\\pi$', '0', r'$\\pi$'])\nax.set_yticks([-1, 0, 1])\n\nax.set_yticks(np.arange(-1, 1.1, 0.2), minor=True)\n#ax.set_xticks(np.arange(-3, 3.1, 0.2), minor=True)\n\nax.grid(which='minor', linestyle='--')\nax.grid(which='major', linewidth=2)",
"_____no_output_____"
]
],
[
[
"## Axis Limits",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(x, y, x, z)\nax.set_xlim(-5, 5)\nax.set_ylim(-3, 3)",
"_____no_output_____"
]
],
[
[
"## Text Annotations",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.plot(x, y)\nax.text(-3, 0.3, 'hello world')\nax.annotate('the maximum', xy=(0, 1),\n xytext=(0, 0), arrowprops={'facecolor': 'k'})",
"_____no_output_____"
]
],
[
[
"# Other 1D Plots",
"_____no_output_____"
],
[
"<a name=\"Scatter\"></a>\n## Scatter Plots",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\n\nsplot = ax.scatter(y, z, c=x, s=(100*z**2 + 5))\nfig.colorbar(splot)",
"_____no_output_____"
]
],
[
[
"## Bar Plots",
"_____no_output_____"
]
],
[
[
"labels = ['first', 'second', 'third']\nvalues = [10, 5, 30]\n\nfig, axes = plt.subplots(figsize=(10, 5), ncols=2)\naxes[0].bar(labels, values)\naxes[1].barh(labels, values)",
"_____no_output_____"
]
],
[
[
"<a name=\"2D_Plotting_Methods\"></a>\n# 2D Plotting Methods",
"_____no_output_____"
],
[
"## imshow",
"_____no_output_____"
]
],
[
[
"x1d = np.linspace(-2*np.pi, 2*np.pi, 100)\ny1d = np.linspace(-np.pi, np.pi, 50)\nxx, yy = np.meshgrid(x1d, y1d)\nf = np.cos(xx) * np.sin(yy)\nprint(f.shape)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,4), ncols=2)\nax[0].imshow(f)\nax[1].imshow(f, origin='bottom')",
"_____no_output_____"
]
],
[
[
"## pcolormesh",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(ncols=2, figsize=(12, 5))\npc0 = ax[0].pcolormesh(x1d, y1d, f)\npc1 = ax[1].pcolormesh(xx, yy, f)\nfig.colorbar(pc0, ax=ax[0])\nfig.colorbar(pc1, ax=ax[1])",
"_____no_output_____"
],
[
"x_sm, y_sm, f_sm = xx[:10, :10], yy[:10, :10], f[:10, :10]\n\nfig, ax = plt.subplots(figsize=(12,5), ncols=2)\n\n# last row and column ignored!\nax[0].pcolormesh(x_sm, y_sm, f_sm, edgecolors='k')\n\n# same!\nax[1].pcolormesh(x_sm, y_sm, f_sm[:-1, :-1], edgecolors='k')",
"_____no_output_____"
],
[
"y_distorted = y_sm*(1 + 0.1*np.cos(6*x_sm))\n\nplt.figure(figsize=(12,6))\nplt.pcolormesh(x_sm, y_distorted, f_sm[:-1, :-1], edgecolors='w')\nplt.scatter(x_sm, y_distorted, c='k')",
"_____no_output_____"
]
],
[
[
"## contour / contourf",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(12, 5), ncols=2)\n\n# same thing!\nax[0].contour(x1d, y1d, f)\nax[1].contour(xx, yy, f)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5), ncols=2)\n\nc0 = ax[0].contour(xx, yy, f, 5)\nc1 = ax[1].contour(xx, yy, f, 20)\n\nplt.clabel(c0, fmt='%2.1f')\nplt.colorbar(c1, ax=ax[1])",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12, 5), ncols=2)\n\nclevels = np.arange(-1, 1, 0.2) + 0.1\n\ncf0 = ax[0].contourf(xx, yy, f, clevels, cmap='RdBu_r', extend='both')\ncf1 = ax[1].contourf(xx, yy, f, clevels, cmap='inferno', extend='both')\n\nfig.colorbar(cf0, ax=ax[0])\nfig.colorbar(cf1, ax=ax[1])",
"_____no_output_____"
]
],
[
[
"## quiver",
"_____no_output_____"
]
],
[
[
"u = -np.cos(xx) * np.cos(yy)\nv = -np.sin(xx) * np.sin(yy)\n\nfig, ax = plt.subplots(figsize=(12, 7))\nax.contour(xx, yy, f, clevels, cmap='RdBu_r', extend='both', zorder=0)\nax.quiver(xx[::4, ::4], yy[::4, ::4],\n u[::4, ::4], v[::4, ::4], zorder=1)",
"_____no_output_____"
]
],
[
[
"## streamplot",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(12, 7))\nax.streamplot(xx, yy, u, v, density=2, color=(u**2 + v**2))",
"_____no_output_____"
]
],
[
[
"# Exercise 3: Replicating Plots using `Matplotlib` and `Numpy`",
"_____no_output_____"
],
[
"The goal here is to replicate the figures you see as closely as possible. Note that the data in *Part I* is hosted online and updated automatically - your figures may not look exactly the same!\n\nIn order to get some data, you will have to run the code in the cells below. There is no need to focus on how this code exactly works. In the end, it will give you some `numpy` arrays, which you will use in your plots. \n\nThis exercise should be done using **only `numpy` and `matplotlib`**.",
"_____no_output_____"
],
[
"## Part I: Line and Contour Plots to Visualize Global Temperature Data",
"_____no_output_____"
],
[
"The temperature data are from the [NCEP/NCAR atmospheric reanalysis 1](https://psl.noaa.gov/data/gridded/data.ncep.reanalysis.html).",
"_____no_output_____"
]
],
[
[
"import xarray as xr\nds_url = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.MONTHLY/.Diagnostic/.surface/.temp/dods'\nds = xr.open_dataset(ds_url, decode_times=False)\n\n#########################################################\n#### BELOW ARE THE VARIABLES YOU SHOULD USE IN THE PLOTS\n#### (numpy arrays) \n#### NO XARRAY ALLOWED :)\n#########################################################\n\ntemp = ds.temp[-1].values - 273.15\nlon = ds.X.values\nlat = ds.Y.values",
"_____no_output_____"
]
],
[
[
"Below is the figure to replicate using the `numpy` variables `temp`, `lon`, and `lat`.\n\nHint 1: Zonal-mean is synonymous with longitudinal-mean, i.e. the mean must be taken along the `axis` corresponding to `lon`.\n\nHint 2: To create subplots of different sizes, consider reading the [`plt.subplots` documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html).\n\nHint 3: For the left subplot, check out the [2D Plotting Methods section](#2D_Plotting_Methods).\n\nHint 4: For the right subplot, check out the [Label, Ticks, and Gridlines subsection](#Label).\n\nHint 5: Don't spend too too much time making your figure perfect as there is still a lot of ground to cover in the next notebooks 😀",
"_____no_output_____"
],
[
"![fig2.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA5gAAAGnCAYAAADSe/jsAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd3iVRdqH7zkl7aQXQgpJ6L2DdCkqq1gAEQQV1F37urq67uru56pbdN1mXV0XUMGuSBEUFJWO9N57ekJ6L6fN98d7Ek7qOekhzH1duSBvmXne55y8M795Zp4RUkoUCoVCoVAoFAqFQqFoKrq2NkChUCgUCoVCoVAoFB0DJTAVCoVCoVAoFAqFQtEsKIGpUCgUCoVCoVAoFIpmQQlMhUKhUCgUCoVCoVA0C0pgKhQKhUKhUCgUCoWiWVACU6FQKBQKhUKhUCgUzYISmAqF4rJCCCGFED3cuO4eIcS2RtYxSQiR3Jh7FQqFQqHoiDSlXVVcWSiBqVB0EIQQdwgh9gohioQQaUKIdUKI8W1tVwVCiHghxLUurvETQrziuLZYCJEohPhSCHFVa9lZH0KIGId/K36kw86K3ye0tY2NRQjh5Xie6La2RaFQKBS1I4S4s1o75NwePdeGdsU5bNhf7XioEMIshIhvZXvecfKNWQhhcfp9XWva0twIIV4WQixuazvqQwlMhaIDIIR4EngNeAkIB2KAt4HpjSjL4M6x5kYI4QlsAAYCNwH+QF/gM2BaS9fvDlLKRCmlb8WP4/Bgp2Nb29TAemilz1Df0nUoFArFlYyU8mPndsjRFv0auAgsamPzAExCiAFOv98BXGhtI6SUDzn55yXgcyef3dDa9rhLK7XVLV6HEpgKxWWOECIA+DPwSynlCillsZTSIqVcI6X8reOaJUKIvzrdU2UKqCNi+LQQ4jBQLIQw1HEsUgixXAiRKYS4IIR4zKmMF4QQXwghPhBCFAohjgkhRjjOfYgmetc4Rg9/V8ujzAeigRlSyqNSSpvjWb6UUr5Q17M76ssUQiQIIZ4VQuiqXiLeFELkCyFOCiGucTpxrxDihMPW80KIBxvs/Npt8hZCvCaESBJCpDvq93Scu14IcdZhZ5YQIkUIMU0IMV0IcU4IkS2E+I1TWS8LIT51+LxQCLFHCNHf6XwXIcRXjrLOCyEeqnbvJ0KIz4UQhcBcIcQ4IcQuhz9ShRCvOjU0Wxz/nnJ8RjOEEA8JIX5wKrNKlFMI8ZkQ4g0hxHohRDEwpr7nVygUCkXzIoQYCrwKzJVSpjmORQohVgshchxtzv1O19fZVjvOP+NojwqFEMeFEDMbaNKHwN1Ovy8APqhmc319iauEEDuEEHlCm431HyGEh9N56WibzgghcoUQbwkhRANtrChrgqNNzBNC7BdCjHM6t9Phq92ONnGFECLE4bsCx/mKtrCibXxUaH2nTCHEi852CSEeFEKccnwm3wghoqrd+7AQ4hxw1HH8v0KIZEddu4UQox3HZwBPAnc77NrtOJ4unGatCacopxCijxDCKoS4XwiRBKx19fxNRQlMheLyZwzgBaxsYjnzgBuBQCmltfoxwA6sAQ4BUcA1wK+FED9zKuMWtIhjILAa+A+AlHI+kAjc7Bg9/Ect9V8LfCelLG6AzW8CAUA3YCJaQ3av0/lRwHkgFHgeWCGECHacy+BSpPRe4FUhxLAG1F0Xr6IJ5YFAb6AX8IzT+VjAAnQGXgbeA24DBqH54MWKhsfBLGApEAx85XgGvdCihWuBn4BI4HrgD0KIibXcGwAsd9T7qKOsCcDNwH2Oa692/Nvb8RmtcvN57wL+CPgBe9x4foVCoVA0A0KIQOBL4K9Syk1Opz4FktHahtuAl4TTACt1tNUOzqG1DwHAn4CPhBARDTDrI7QBTb0Qoi9a27DLyWYd9fclbMATaO32GMf5R6rVcRMwEhgMzAF+RgMRQsQBq4D/Q2sTnwVWCSGCnC673VF+DFqbth14y3F9guNeZ24GhgBXofWf7nTUNRctynwz2iyzA2h+qv5Mw4Ghjt93OOoMQWv7lwkhjI62+RVgqaOtdncJkR6tT9QbmO7m8zcaJTAVisufECDLSRQ2ljeklElSytI6jo0EwqSUf5ZSmqWU59Gm48x1un6blHKtlNKGNoo5uAH1hwLpFb8IIYY4RtUKhBCnql/sEFi3A7+XUhZKKeOBf6NFQivIAF5zRHQ/B06hCWaklN9IKc9Jjc3AerRGtdEILRr4c+BxKWWelDIfTUQ6+6gE+Kfj8/oMrbH5lyNaewCtcR/odP1PUsrVUkqLo6xQYBgwHvCSUv7d8XmcBt6vVtdmx+dhl1KWSil3Syn3OKLD54DFaMK8KXwppdwlpbSjdQxcPb9CoVAomogjOrYULeL1D6fjXdDah6ellGVSyoNo73rntrHOtlpKuUxKmepoNz4HzqAJJndJRmtrr0WLZH5Q7Xy9fQkp5T4p5U4ppdXRrv+Pmu3Uy442JhHYiCbqGsrdwAop5Q+OZ10LHAemOl2zWEoZL6XMQesjnJBSbna0319ySQxW8DeHXRfQRPs8x/EH0QYBTjva8j8B44UQ4U73vui4t9Thhw+klLmO619C6+t1a8RzOvOclLLEUYc7z99oWnwOrkKhaHGygVAhhKGJIjPJxbFYIFIIked0TA84rztMd/p/CeDVALuygcpRUkejGCi0xEC1LWYPBTzQRhErSEAbEa0gRUopq52PBBBC3IAW1eyFNtjmAxxxw876iASMwDHnmTGA8/NnOsQYQIWYv+h0vhTwdfq98jOQUlqFEKmOegKAuFo+jx9quxdACNEPTYQPA7zR2oDt7j5cHTjX4c7zKxQKhaLpPA0MAIZXa+cigRwpZaHTsQRghNPvdbbVQogFaFMw4xznfdHa24bwAXAPMBZtdkxPp3P19iWEEL3QInQj0NplA7CvWvnV7fel4cQC84QQs52OGXH0ERxUb5vra6uhantY2d9w1PWOEOItp/NWtNk++bXcixDi92g+7AxItJlqoWjivTHYpZSpTr+78/yNRkUwFYrLnx1AGTCjnmuK0V7UFXSu5Rrp4lgScEFKGej04yeldDcBT23lO/MjMFUIYXKzvCy0KZ+xTsdigBSn36Oqrc2IAVKFtiZwOfAvIFxKGYg23bRR6zicSENrNLo7+ShAShnShDK7VPzHEbWNBFLRPo+TtXwezutlqvt8EbDfYZ8/2tpdUce10PDvTUs8v0KhUCicEEJMQpvaeJuUMq/a6VQgWAjh53SsettYV7mxaO3Eo0CIo208SsPbxuVos4XOSykTqp1z1Zf4L3AS6Olop/7QiPrdIQktQulsh0lK+WoTyuzi9P8YtM+ioq57qtXlLaV0Fs6VbakQ4jrgV8BMtGnMwWiCtintdfV7WuL5K1ECU6G4zHFMQ3wOeEtoiVl8hBBGIcQNQoiKaTMHgWlCiGAhRGe0tQANZTdQILTEP96O9RUDhBAj3bz/IvVP7/gATaCsdJSrF0J4UXXUtRLH1J4v0NYs+jkaxiepuq6hE/CYwx+z0bLSrkWLfHoCmYDVEc1s8rQQx1SW94DXhZaaXQgtEc91TSh2rBDiJiGEEfgdWqR3P7ANQAjxa0eSAIMQYpCLdaR+QL6UskhoyYIqEz9IKcvRRlKdP6ODwFAhRH8hhA/a96xOWuj5FQqFQuHAsR7yM+DXjmUVVZBSJqGtzf+bo20YBPwC+NiN4k1oQiTTUde9aFHSBiG1XApTuLTG3xlXfQk/oAAoEkL0AR5uaP1ushSYLYS4xmGDt+P/tQ2kusvTQks+GIcm0j93HH8HeFYI0RtACBEkhJhVTzl+aAPomWj9lT+jRTAruAh0rTaAfhAtImkQWkIgV7sItMTzV6IEpkLRAZBSvoImrp5FeyElob3cKhK1fIi2oD4ebR3B5zVLcVmHjUsL2C+gRRAXo03VdIe/ob1g84QQT9VSfhkwGW0NwDdoDcwptPUac+oo81doo3bn0QTXJ2gCp4JdaFNzsoAX0UZ7sx1Thx5DE6i5aGnUV7v5HK74Ndqo5V40wfYt0KMJ5S1HW9eYi5a0Z5ZjDaUFbfuWsWhTcTLRRn7rmyr0BHCfEKIILVFB9e/Bc2iJBPKEELdIKY+gre3ZijaivMkNe5v7+RUKhUJxifvR1u6/LmruhfmO45p5aFNcU9ESAD4vpfzeVcFSyuNoyyh2oImYisQ2DUZKudex1r/6cVd9iafQ2uRCtGhqg/srbtp3Hq1N/ZPDhgTgcZqmjb5B62vtBZbhGPCWUn6KtiZzhRCiAE0M1jfwugYts/s5tP5NFg7R7+AztGhljhDiJ8exP6B9XnnA7x3X1EkLPX8louq0bYVCoVC0F4QQLwOhUsraRoEVCoVCoVC0MY7ZVqVAFyllsqvrrwRUBFOhUCgUCoVCoVAoFM2CEpgKhUKhUCgUCoVCoWgW1BRZhUKhUCgUCoVCoVA0CyqCqVAoFAqFQqFQKBSKZkEJTIVCoVAoFAqFQqFQNAuGtjagOTAZTDLIGNjWZigUCoVCoVBUIaUsNUtKGdbWdijaP6GhoTIuLq7ea4qLizGZTK1jkAuULVU5e/Ys+fn5REVF0blzs2wn2WRa0i/79u2r893WIQSmQeh5LO7BtjbjimJT9jYmhYxvazOuKJTPWxfl79ZH+bz1UT5veZ4++XxCW9uguDyIi4tj79699V6zadMmJk2a1DoGuUDZUpXp06ezevVqHnroIZ599tk2taWClvSLEKLOd1uHmCLrpfNqaxOuOHqbera1CVccyueti/J366N83voonysUCkXzoNfrAbDZbG1sSdvTIQSmRVra2oQrjtTytLY24YpD+bx1Uf5ufZTPWx/lc4VCoWgedDpNVqkdOjqIwNR3jMe4rPAz+La1CVccyueti/J366N83voonysUCkXzYDabATAYOsQKxCahlJlCoVAoFAqFQqFQNIGysjIAPDw82tiStqdDCEwb9rY24Yqj0FrU1iZccSifty7K362P8nnro3yuUCgUzYMSmJfoEDFcozC2tQlXHJGeEW1twhWH8nnrovzd+lzJPg+KC8avkz96Dz16Dz06gx69QYfQCWwWG9ZyG0Iv8O3kh18nP3yCfTB6e2DwNpK8N4G9S3c1qt4r2ecKhULRnCiBeYkOITDL7GVtbcIVx6niM0R4hbe1GVcUyueti/J363Ol+rzX1L7MeH2229fbrTbM+aVYSrT1Pr2u7UN5UTlHlh9scN1Xqs8VCoWiuSktLQXA09OzjS1pezqEwDTpfdrahCuOEQFD29qEKw7l89ZF+bv1uRJ9HhAVyLS/3kzWkWQOvfk9NrMVu9mK3WrDbrWDXaIz6tF5aOnvSzMLKc8pRtq1LIVCJ5j45nymPjeNzNMZpB9JbVD9V6LPFQqFoiVQAvMSHWINplpD0vpszN7S1iZccSifty7K363PleZznUHHrFdvBSQ/PfMFGXsvkH04idyTaeSfzaAwPovCxGzyz2WQeyKN3BNplGUVVYpLAGmX/PSHZZRmFnLbm7PxCTE1yIYrzecKhULRUiiBeYkOEcEMMAa0tQlXHDeH39DWJlxxKJ+3Lsrfrc+V5vPwfhGEDoxm94urKU7La3Q55vxStj75CVM/eIBHt/0Gc0EpQq9j+ztb2b34p3rvvdJ8rlAoFC2B1WolJycHAB8fNbOyQwjMHHNuW5twxfFZ6nLmRs5qazOaneCuIeg9DGSeutjWptSgo/q8vaL83fq0Z5/7hvky+sHx6D0M2Cw2kBIPkyceJg/0ngbsFjt2qw2b2Yal1Iyl1AICPHw8MHp7cHTVIS5sO1elzJLsYgCkremZ0PPOXGTXC6sIGx6H3WIjqFdnrn5sMmd/OEVOfHad97VnnysUCsXlwqlTpygrKyMuLg6TqWEzSToiHUJgBnsEtbUJVxwdsUMS1juce1c9CMAHcxY3eC1TS9MRfd6eUf5ufdqzz2/9x3TChsdSnluCzqhH6ASW4nKsxWZsZis6gw6dQY/Ow4DBy4jBW8tubikxo/cw0GNiDxbd+F+KMgoryyxIz8duteEb5boNC+wZTuSE3hx/fytIWes1Cd8dIeG7IwB4BZu4ceXj3PCH6/j4gc/qLLc9+1yhUCguF/bt2wfA8OHD29iS9kGbCkwhxBPAfYAEjgD3Aj7A50AcEA/MkVLWG6JUEczWpyOOet/w+2uxW2zYLFZueelmlsxbgrmovK3NqqQj+rw9o/zd+rRXnw+ePYzOo7uz58XVnF2+t8H3+0YHccMXj3LTs9fx2WMrKo9Lm6QkPR9TZP0C0+DjwdSPHkRvNCDtkhNLtrqssyynmGOLNjH0yevpOqEHF7aerfW69upzheJyobn6sorLGyUwq9JmSX6EEFHAY8AIKeUAQA/MBZ4BfpRS9gR+dPxeLyqC2fp0tA6Jd5APnUd15+jCTWz77ef4x4Vy31cP0HlAZFubVklH83l7R/m79WmvPo8YpL0HSjILXVxZO0XJuRxbvJmY6wbQY3KvyuNGHyNGP686I5IVxN4wCL3RgKW4nCGPXcfUDx+k2/ShdJs+jJ63jyJ0cEyNe4RBh19cKACh3UPrLLu9+lyhuBxozr6s4vJGCcyqtHUWWQPgLYQwoI32pALTgaWO80uBGa4KybM0PjlCUwnuGkLP6/q0Wf1txcr0NW1tQrMSFBsMQO6pNNJ3nOWHX7wLEua+eych9XTOWpOO5vP2jvJ369Nefb7h5fVkH0th3MtzCOzZuD0jT3ywjbwz6Vz/wjS8ArwAGDx7OJ4BPpz+fFe993aZ3JfCpGxeu+offPPMKryCTYx6fiajnp/BiKdv5JpF9xIzdUDl9R4B3kx8/S563DqCHf/bxp6lO+ssu736XKG4jGiWvqzi8sVqtbJ//34ARowY0cbWtA/abIqslDJFCPEvIBEoBdZLKdcLIcKllGmOa9KEEJ1qu18I8QDwAIC3zovzJfGU2cqwSCvBxkASS5Pp79eXHbm7mRo2heVpq5kTObNyOtAXqSuZFXEL6zM3MCboKo4VniDGO5ocSx5GYcBL70WmOYvuPl05UHCYq4PHsjZjPTM731xZxorsNXy//ltMUUG8deOrrNv3Hb1NPUktT8PP4AtoW6hEekZwqvgMIwKGsjF7CzeH31BZRsW/K9PXMK3TVLbk/MRQ/0GcK7lAmEdoo57ph+xN3H7tHMyRVrpGxeER4kVgaCChof4Y/LwQJTbOpybjkW3gfx8spG96jxr2rLm4jskhV7M3/0CtzzQ8YAibsre12jM15XNy55l8QrVMxKsP/kAvSwgfrnuPsqwixr47n7mL7uDWybO5Rkxo02ea1mlqgz+n1v7utfTn1JrPVGYvxyZtHeqZ2vvn1MkjrE3e5S6fqRievv3/eG3j6/R97md8f9dCVqZsYX70VN6KX8kv42ZW/vtu0lrujLyWbzJ2MDZoAMeLEoj0CqHEVkbyb1/lyeV/47Gdv6M0pwjhpSd9z3le/PotHoqdztvxq3gkbgbvJHzF/TE3syxtIzfGjSdsRByb3t/AwbwjHPvoBOuWr0WE6+lMJ47kH+e19//O6BdnIXSCxT8t442P/odHqIl1/7eal/77txZ5l2MLpVyWYZVW/PWBXDQn0827L0eKdjM6YAo/5qzmupCZrM9eztSQWXyfvZJrgm9hZ/4GBvpexfnSE4R7RFNgy8MgDHgKL3KtWUR7duVUyWGG+o1le9565ne5/P+eFB2XpvZloWp/Njw8nE2bNtVbZ1FRkctrWgtli8b58+cpLS0lMjKSw4cPK78AQrqYmtNiFQsRBCwHbgfygGXAl8B/pJSBTtflSinrnQPrb/STz/b4bf316QReAd74BPvg28mv8sfoZQSdACnJOptJ0p4EijPd21dzzMMTmPDYZGxmK0k/HOOL33zl1n0tgc6gI6xXOL2m9mXQ9IGYOl/auqU8r4Sy3GLKc4sxF5Ri9PXCO8QXn4hADF5GMg8lsuGNLST8dN7t+r7P2sh1oZNb4lHahHGPTmTcLyfy+ag/YbfYKo8H9e7MNYt/QXFqLotvXaxtfO6EzqhDb9A2QLdZbdgtTc8GWRcdzeftHeXv1qe9+7z7pF7M+u9cTnywjYOvrW9UGWHD4ggbGoNPJ3+8Qnw5/u4Wck7UnVAs9vqBjH1pNh/Ne4/Ug8m1XmP0NnLH/+YSPrIrAMXp+Sx/bJlbicpc+Ty52GURrUb0ZZqY8emTz++TUqqwRgekOfuyACNGjJB799a/znvTpk1MmjSpKWY3G8oWjffff5+f//znzJkzh88///yK8YsQos53W1sm+bkWuCClzAQQQqwAxgIXhRARjhGfCCDDVUE+uqr7zfhF+BM1JJrwfhFE9w4joHsYPuEB6PQ1ZwRLux0pQQgQOu18YVI2ZVlFFBZZKMooZPO/f6Q0t6TKfZ36dmbswxNI+O4IpohATG5kAXQLAQYPAzarDWmrXfwbvAwExQTTeUAk4f0jiBkYQVCvcPSeRuxWG2k7znHg1W/JOpREeU4xdqut9nJMnnS7eQi97xzL7e/eRca+eDa8uYXEXfEuzRzqP6gpT9lo9EY9kUOiKS8qJ/PUxSobjjeFgrR8AHrfMYYTS7dVHvcI8EHa7XgGmjB4GDBbzZXnIodEM2fhHXj4adPdzIVlfH7/x6QdSmkWm6rTVj6/UlH+bn3au8/PbTrNmWW76btgPEIIMg4kYCu1IKXE4OOB0eRJYUI22UdrF4IAmfvjydwfD0DcjYMpySiot87uM0dQnJZH6qGqZToLv2gsfPzApzx54A8AvD9rESU5Vdusuqju8/YkKKtTYdvlKjQVHZJm68sqLl+ys7WtoCIj20/ejramLQVmIjBaCOGDNq3gGmAvUAzcDbzs+NdlWNCMme6TetLz2j70GN8Nn3AtemezWCm4kEXWwUSKUnIpy9GieKVZRZRmFlCaWYitzAKA0OsI7BVO+PCuhAyMxsPfG/9AT2JHxREc6s0nD31RWZ/B08DMf82gPLeEg6+v5+avfs3uJXWvcXHGN9yP0B6dCIwJIjA6iICoAEK7BODTyR+jyRODt0fltXabHbvZiq3citVhp2eAd5VrzIVl5J5M4/Tnu8g5nkrGvnjKst2LwFqLyzn92S7OrthHj1kj6Hv3eOYuWUDmgQS2vbeTsxtO1yngzpVcINQjxK16XNGpTzj9bxlERNdA/GJCMPh4YnZEXXOzSynNK6W8oIwuvUPoPLoHRpMnAJbicrIOJ3HsxzOcXHfM7Q5VbRxZfpB+E7oy+FfXYvDxwFJUjmeQD33uHEtBfBafPfQZ5pJL4tLobWT636djKSrj2LubAeh1+yim/+0WFs9chLXc2jSn1EJjfG70MWLwNGIuLsdmrn2gQVE7zfkdV7jH5eDzVS98xyxPI33mj6PP/HE1zltLzXw9/XVKs+pPCBQ2LJYxf5nFmS/3sPelS+sgQwfHMOnNuzAXllGaWUjooC5s/Mf3Wn5KJ6qLLGuZlTfG/BO71d6g7NfOPm+quEwutri8JtpkbFolKKGpaFc0W19WcfkSGqrl6sjKUlPiK2jLNZi7hBBfAvsBK3AAWAj4Al8IIX6B9oc721VZ0d2imPXfeZgLy0jbcYbMA4lkH04i90w60urelEVps5N7Io3cE2lVjve+YwzDnrqBwbOHcWiZtoB3xp9vIKBbJzY+vBT/rmHojHoSd16os2yfYB96X9+fIbf0J8wp25+1zEJxWh4laXnknkzDUlSGtdSCrdyxp5qHAb2HAb2XAYOnEYQ23bU8t4SSi/nkHE+lMCnHZQZCV9jNVk5/upOzy/fSfcYw+swfx8w3b6cwKYc9H+/l1PrjFKZVHWUP82h64hudUceYBycw5oHx2G12ipJyKIjPqhR3noEmukQF4RHgjYefFyUZhcSvPUTa9jMYvD0IHRpD+PCuXPvsDUx5ZioXd5/n+1c3k360cftXLn9mDfM7BzDg/kmVx1K2nmLZEysxF5urXDvtt5Pxiwnh6GPvUHpA2zx996l0Jv/3bsY9OpHN//6x0X6pizCPUEyhJvrdPAivAG+M3kY8TB7aRu4mD/xMRnRGPTqjHoOPB96hfpViHMBusVGYmM3JTWc5t+k0KQeS6oySK5rnO65oGJeDz6Vd8uXTawj8zzY8fD0xehvR6XWUF5Vj8DQwb+kCBj16LbteWFlvOb3vGANA7M8GcuCVb7GVWRA6wfDfTcNSYubCrgTCov3JOJDA4S/3u2VbWV5pg5+nKT53R1C6uqcpgjO5WIlMRdvSnH1ZxeVLWFgYAJmZmW1sSfuhTffBlFI+Dzxf7XA52giQ2xRk5rHp0Q+4uPtCndNBG8upT3cSOb4X1/x+KmUFpXSf1ItutwzmyDsbSN91jqFPXg9Ajym98I8MwFxixifIB58QE76d/OjcNYjQQV3QGfTknk7n4OvryTyUSHFyrssR7tbGbrZy5ovdnF2+l+hJfeh911imPDOVKc9MJfNQIp888BnlBWUAlNm0f6OGdaFTn85YyyxYSs1YSi2Yi7V/kRJ0AiGE43g5UkpCe3SiU+9wBt/Sn8CenbnwzUH2/3Md5oK6O0dCJ2pEUys2FA/o3onY6wfS7ZZh3PXpvax7dg3HvjpcowwPkwc9JvdC2iWWcivWMis2sxVruRWbxYa02Vn97NcYPAzYbRK7zY6t3IpPiAlTqC84plF37h9B73mjSV22lQKHuATwPHqUi2t2MfLeMSTsjCflQCJ2qx27za7ZXmG+cDxPA8RdzKg4RkwZy9Wzr8bg7YHdYsNaZsZaasFaXI6lxIy11IyluFw7V2omNes0ZVlFWMssGB3T94L7RzFywShG/WIs+eczWHrHEsryy9y240qi4juuaD0uJ5/nJdW+pd3+T/Yy8p7RnPxoO/lna58V5xcbQtTEPiTujifmqjhipg7gwuoDxN00hOC+kaz+zXJOrj3WkuZX0hifN0ZYulNWY8Rme4xmegd6E9qzE0FxwQTHhsB91bs5io5Ec/VlFZcvFQLzzJkzlJVdPu1YS9KmArO5KC0sJu2n2jeRro1Yk+tR3oRib+0/UrLz+RVc++4vmP7abGzlFk58sJ2jCzcBkH/uItZSM4NnD/Cmdq4AACAASURBVEPvccmddpudspwiilPzOPnBduK/PVxnZ6O9IW12kn48TtKPx/HtEkzvO8fQa84oguNCSDusrS+0SCud+nZm3pIF6Iz6RtVTcCGTLU98Qsrmk65tqmetZf65DA6/9SPxaw9x4/LHCIyuuR5W6ATz/juH8JHdGmVrdQqPJ5L4v3U1jse/9TV+A2KZs/hOl2Vk7Ivn4/s/1cR4LeiNevrdPJCxPx9NQPdOlJeUkbThOMcWbaYwMbvRthtMnkz6z3zCBsdg8Kj9FRAxOIpbX7sNg5cRa5mFsqxCVv1+Ddlna47O6Y16Bs8ZRpeRsUi7RNolxdlFpB1KIfVQCvkpbbeNUFOwyOaf5qyoH1c+1xl1GL09MHjosZptlQNeFQTFBjPsrqswhZgIDPai4EImX/3le5drtX3D/bhj8R3s+XQfBz6pP8EGQGBMEEPnjQRZMVhlwW6xYbfZGXBjf8rzS7DVMU1eZ9Az5sXbsBSV8fVTK5i3cB4DH5xM8oYTDHxwMllHkt0Wl1HDujDh7pH4xYVizi8lL6cUnUFPcLgJrxBf8s9lsPOT/ZzbcqbOAS1nn0ebXE+TbU5xWV/ZDRWbLR3NDIgKpPOACEK6hxHSLRS956V3p04ntH1HTUb840LxCvGtPGcrt8B9LWeXQqFoewYMGEBERATnz5/ntttu4/HHH29rk9qcDiEwDaJugeOOmHR5X0kp383/H1ETepOy9RTm/Evnzn91gPNfHUDoBD7hAei9jJXZWpsrAU1bUpSUQ/KGE/SaMwq9xyU/h/mEMP1vN1OeV8L3P18Mdone24jB28Pxo3UOKnxg8DJiMHmiM+opuJBJ3umLWIoudQ6D+0cR0DUM3y7BFCXlcOHrgw22NXKctoH5iW+O1jg39uGrCR/Zjb0vf83FPee1qceexsppyDoPPUKnQ2fQIYQg1NviSPqk2S8lYLcjpUSabeTtOYW9vGZHy1ZcxtFH/0voNUPIFz5aeXodCIHQCa0smx2Djwd95o9jzmu38skjX1Tp/Hn4eDBk7nBG3TMa7zA/ck+ns+OPy9m0+gfi9GEN9kt1rMXl2Mqt5J1Jp6iWjMmBMUHM/u9crMXlJHx7Ar2XkcjxvZi76A6W3v4eRRmOyLuAPtf3Y8qTU/CNDqYwMRu71YbQ6fAJ92fEgtGAljTr8FdHOf71EXITcppsf2sRbAx0fZGbePh4EN4/At8wX0xhvpjC/AiIDCA0OgCvEEev2C6xllrITMglLymX7PNZpB9NJetsZo3sxR2V+nzeeUAkc9+7Ew8/bfDPbrGx6tfLOLvhNKCtjZ/z9hxMkUGUpOVhKTXTa+5oxmaXs/0/m+ss1+Bp4Pa35xDYI5wpT08laU8CWWfqnubkHeTDHe/eiXe4P9JqR+9pqEwQB1CclseGh5dQlFT7d33Qo9cS0i+KFY9+TlFmEd++tJ55H9zNNYvuxRQRyDfPfl3rfb6d/Ajr1Qn/iAD8IgPoM7E7wX0jKc8vIetwEh6+XnTuFYa02ijNKqIwMZtOI7py69tzKU7P5/i3Jzi/5QzJexOxOWXKru7zCpFWl9CMNhlbVGRWUFFHQ4Rmc4tMvVFPz+v6MGru0MrBSWm3U5yah8WxJl8IsFvtSJsdm8VGytZT5J/LJP98BoXxWRSn5zefQQqFol3i4+PD+vXrmTRpEt988w2FhYVMnjwZg6FDyKxG0SGevNx+qbFrrKB0RYQ1B/vGHZgrIpvVkHZJcdrlGalxhc2sjXA7R2ivun8cQb0j2PLEJxSn1D5VzF263jyE0X+6tcoxD39vTn2yo0HlxE0bTPbRZHLiq0b3YkZ3ZewjE8hYtxfzN5txJ9+vq2XaUYGF4FX7uZQ8SF/5k8s6LmRl0P2pWcz80/WseHYdeg89V/1iLKPuGY2HvzdpO86y44/Lubhb2z7mZPYF4jo1XWAChPSL5PCqmtOIvQK9mbtwHgLY+MsPKjvJgb06c+3in3PH4nksuWMplhIzs/95C3HTBpN7Op2Njywlfeel6cJCryOgeydCB3Why5R+jH1kAuMenUjGvnh++mAPZ3482e7XfyaWJtPFO7pR9xp9jHQZEUvsmG50u6oLQb0jqmSxtpmtFKflUZyaR2FCFhWprI2+noTFBdN1XLfKZF62cgup28/w7T82XFYCvTHU5XODl4EZ/5yBpdjMkXc2Yjdb6X7rCG586Rbem7mQwrQCrn5ySuXa+PRd2ndx1AszGPvwBFL2JxFfxzZMt750I8F9Itj1p5UM/tV1zPj7dN6b816tol5v1HP7W7PxCvXjx1+8V5ktVmfQ1j7rPPREyjyCbHaCnIROxYwY707+9L5zDGdX7OXsj6cASNqTQOL3R4m5bgCZBxIq7QyOCyF2bFd6j44hZGCXKltP2a028s9nsvvF1cR/c6gyWV11hF5H5IRedJ85gmHzhjPyntFYisvZsWg7u9/9CbvVXqfP64tmtpbIBE1oNkdiIIDgriEMGhPNthXHsJbVHy33jwzgjnfvxD8ulKLkHA795wfStp+mID6rzui0QqG4chkwYADr169nypQpbNmyhXvuuYelS5ei1zdult/lTocQmD56rxYTltWpXk9CHYLT1X114W55rYnd0Zhede8YYkbG4uHryZDbhxO/9pBb01vrRQgGPXINRam5fPrzjylIy2fOKzMY9tQN2MxWzn65x61ifDoHENQnAoCff34vBXnlWErNWMut9LqmN6UJGZx/ZYUmDFuY2upIyfOrceziVzvxDA+kx/xr+OXYnhi8jHgGmUjeeIJj720h51jV7U5GBvZuFvtCB8dg9PUi+3xNGT3zxRsxRQay4YElhOSkUBFcI+UCW5/6jElvzufWF2+kNKuQuGmDOfz2jxx/b0uNaL202ck7nU7e6XTOfrkH7zA/4qYNpsdtI5nx+mxKMgsoOJ+JTyd/PAN9uLgvnrV/+4GC1PYz2t/fr2+D74kb152rHxhD2NAY9EYDtnILWUeSOf7eFrIOJVKclkdZdnG9640BEALfqECC+0UROqgL3W4Zyi/W9Obgsv3sXbqTvMSmDeq0V2r1uYCZf5mGf1woGx58n4t7tIRq6bvOccPnv+SWP03j+1c2MmLBaM58uadSXALsffkbgvtGccu/ZvLWpNdqZFLud9MA4qYN5tB/fsD2w0/sKSxjwr/mMf5Xk9jy6oYapsz40/V0GhrLqec/wvfCGXyrR8usNZK9Apfe/95Tx6LT69j8ftX32tqXf2RWqB/fvvwDOoOO+1beT2CPcACKUnPJPJDAyaPJ5J5Mozg1l9KsIqTNdVRb2uykbDpJyqaT6L2MhI/sSrdbhnL1r6cwcFo/Nr6+iYIf6n4nNpfIjJcXK/8fJ8LduseZhkQz64tiXvN/19N1XHeO708n4+TFWq8J6x1Ov5sGMHjmYHRGPZt//TGpW083OZmeQqHo+AwbNoxvv/2WKVOm8PHHHxMSEsLrr7/e1ma1CR1CYBbb3NuWoyVwFo4V4rApYrexArYlyT2TTtpPZ+g6viexo+OQdsnRHfs5+48fmlx2cN8IfMID+PrplZXRmS+eXMXc/xgY+YebCegWxoFXvnOZvKksq4gTS7dhigjEw9+bgBBvDF7+6L2MmNNzOP38R0R4tV30x1l0OovNxIXfIm12dN1iKM8rJmHdkSodZGe+z9rL3Mim5QwIv6obE/49j6LkHM58f6LG+fSEfLpMMWA0edQ4d3H3eeLXHSb2ZwPQexo59ckOji2ue+qhM6WZhZxYuo2TH24nckJv+swfi8HHg7yzF7GWmImZOoD7vunFjoXbOPTFfkqy234zvh25u7mh03WYQk34hftjLjFjLjFTlldaZRsar0BvoofHMOG+MYQNiaE4LY9TH+0gfdc5Mg8mYjc3ItohJUXJuRQl55K4/ijH39/KgAcnM2T2MIbdMZK8sxc58cNp0o6kkJ+cR35yXpVtdNo73kE+jsRgFkxhvkQMjKTzwChyvHOJC4zF4GHAWm6lvLCMzrGBRF3dm8Nv/1gpLgFMkUEYvD0oTMyiOKuI8txiwgZ3Qe9lrIzo2cosnPliFyP/7xa8A7xrTAn3i9Cigqc+2UG0HvS79nN2RU9GPzCei8fTOPXdpb+RfjcNoPvM4SR/8CPZGw416rlzTqQi7XamPDCaZb+7tDVJQWo+79+xVPtFgN7TSFl2Ed/fu4ii5OYZTLCVWUjdeprUraeJntKXIY9P5da35nJt9vUkbk3g1HcnOLfpdI37mioyncWl8++NEZpNZfO/f0TkF9UYXPMK9GbA9MEMvXUQQb06Y7fYSN91jkNvfk/emdqFqEKhUNTG6NGjeemll/jd737HG2+8wYABA7j//vvb2qxWp0MIzDCPmtGhtqAloqi1CdjmKKshZUqrnU2PfljlmE3a0QtdHXe4T9TVfbDb7FzYcilJk81s45OHvmDSU9cy8t4xBPeP5qdnvqh1CrLzM+Uu/oq6umKtEbl0lwpbKoRm0rvrK895ArHVRt8rPqPZEZNrlOXugITOqCfuxsGMeOYmChOy+eQXH1OcVbPXuOW1DfQc341RL8xk3ZxUyvOq7i1acjEfvaeRwiRtylhDkXZJyuaTNSLfR97ZwNAnr2fCY5MZ/+hEsg4lceib4xz4dG/zrWUW0Kl3ODGjutJ5QAT5yXmkH08j43g6RZmFWnRLQGiPMKKGdmHasOnEDe+Cb3RwjaJKswopTsvDM9AHvy7aHoLF6fnseXE157860OzZrMuyi9j70hqOv7uZLtf0J2piH0bdNxad4dLUG5vFirXYjKWkXNvOKKeY7IwS8pJzyUvIISc+m8zTGW26nlPvoeeOd24nYkyPGufsVhvluSXYzFbsFhs6Dz0evl7oPAxapHzJVnrePgqfcH+EThB7/SDyL2Sy9l+bsJZZWf27Vdz2v3mMeOZGdr2wqrJcWU/kqdghOL1DfSFX24op880vCewRzg1/uYm0I6kUpOYT2CWIn71wIxkHEkh8b32d5bnC++QJji7azMAHJzPgp0SOrqpFqErYvnA71//lZny7hDSbwHQmecMJUjafImpiH6Im9aHn5F4MmDGYTf/6gd3v1pze3xSRGSfCa4hMaDmhWd8azIwT6Xzxm0vbEQq9YMjtw5n4+GQ8/L3JOpLM3pe/JmH9Ucx5jd9X2ZnWml2lUCjaD0OGDGHhwoXce++9PPLII/Tq1YuJEye2tVmtSocQmGnl7WdaXUvS0gLWGVfCc1HiGh6Knd7k+iMn9CL7cBKl1fZvk3bJxn98T8qBJKa9eAvXf/ow23+/jPQdl4Sou/5oT+LSmepC0xXVfV7b88eaSqt8dqbIQHrNG03XaYPxDDKReTCRTx/6rM6tSWxmGyufWsmCZfdx1fMz2P+Ptdo2K1Yb5QWllWsy97y4us51X42h5GIB25/+gmOLNxM1qS9dJvfl2mdvwNPfix3/3dqksv0jAxhy+3CGzBpSmd2x5GI+Xtf3qyLQLCXlYJcYfbXFtdkXs8g9msbpz3dTmJSNwcuI0eSJZ7AJU0QgvlFBlGYWcm7FPrKPJpN1OAm7pXmFZXVKLhZw6pMdnPpkB0ZfT/xiQvCNDtYi935eGEyemo2BPngGmejeMxyf8CGV91tLzWQfTeH0jngOfb6Pkpzm6US7g9AJ5rwyg4gxPTj27mYsReUYvD0wF5SSfTSZ3NPpvHXqyzrfKz1mj2TE0zdiK7cg7ZLyvBJWPbWqci3dhW3n2LnoJ8Y8OJ7Tn+4k91S6S5sqElZ5h/lDrrZ/rrTaSPjrhwx670lm/XsmH97zEbNemYm02Un860dQbWpq8NUDKPQJwrZpJ3Y3/iYKP1nHxWGxTH3uBtKOpJB9ruZU9aNfHWLCL69mwH0Tq7zzmhNps5O84TjPvv83Hu4+kzF/ncXEJ6eQE59duT7UmZYQmdC8QrMhCX469Qln+t+nE9SrM+m7znHglW+bLVqpRKVCobjnnns4evQo//73v5k1axZ79uyha9eubW1Wq9EhBGaEV4DrixQNwlXktDnEpYe/N8F9I9n6xsbKYzGjuzLk9uEEhXjh4e+N0c8LDz+twz/5rQV8ftWfsFttlfa1V/HYEOp7hpQ8v0rR6Epc1sbNq3+N0OlI2nCcnz7eT8IOLYHIrL/dSKdhcVp2W73QkpQYdOiMeoReh97DQPTEPkRP7FNZlrXUzJcTXyLraDKF8a7SIDWOvDMXyTtzkWOLNjH6L7cy/tGJ9JnQjbO7EikvKnds6SLx8vfGK8Abm8VGzvkssi9kkXE8vUp2TNA6kQuW3QdCkLL5JMkbT3BxzwVKMwrQexoI7NmZgB6d8Aoy4Rnog86oJ+d4KpmHEuvMAtpesBSVk3M8lZzjqfVep/c0YIoKIrBHOKGDuhA6OIbxj05kzP3j2P/pPna/91OrTEm+6hdj6XJNf/b9cy2nP91Z6zX1vVf63DUOgK1vXpqWHXNVLNHDu2C3SaTdTnmhNnDSbeZw9r38DUDlWsXQnp1qTJEtytT+9gJ6dMJy+lJUvTwtlwv/Xk6v5+/kyYN/AODUHz/AnFF1FoVvn2j6vHg3APHBelI/2VSvDwCwSxJf/IiAd59k9ltzOLruBN6B3ngHeBMY5IXR3wtPf2+8gk2YOgfgFxfaqL83d2esPBQ7HWm1s+v5lZgigrjlnzP59rmvOf51zWzcrkQm1L2NSX0iEzSh2RSR2RBx2X1SL2755wwsxeVsfeozkjccd/teJR4VCoW7/P3vf+f48eOsW7eOKVOmsHbtWvr2bXh+h8uRDiEw08qujAhmW1HRoDp3Tt6OX8UjcTOaVK6luJzy/BKCYrUpiAFRgcx6czbWMgtFSTkUp+djOXOR1C2nsJlt5J/PqDL1sCOIS1dEBRZWisw/n1rLc72nubzHOYp55H+bGPTwFM4fSCPBkZ1y+IJRdJ8xXNtyp6AU7BK71a7t42e1IW12pF3SeVR3PIN8QKcJ0IOvfYe02ltMXFZnz4trKErOJerq3ox5aEKN89ZSM8KgQ2/UXmMZBxJYcufSGllWdAY9O19YyYXVB6oct5VbyT6aXJkJtDrN8R1vD9jKrRScz6TgfCaJ6zXR4BcbQv+fX82Iu0cx8p5R5J2+yNmdCVw8nkZJdjEluSUUZxZSnF1ce9aaBqIz6Bh510hSt5+pU1xC/T4/+eF2hv3meiY9dW29dVmKykhYd6TyvSX2HKTgwnimv3Ir79+6qEoiqayzmWQdSWLgQ1M4vH0v1vxLEd19X52C3jvpNXc06St/InvTkSr1CA8Dsc/Moyy3GK8gEwWyjrTStdmYXciZZ5fQ66V7GX3/OMwFpZU/5TnFFMZnUZ5XQlFKbpOzdLuiwue2citbn/iY8f+cy03/vJWrH5nAro/2cmzVoSrre11tY9IUmioya8M7yIfBs4dRVlhGXmIuUUOjGffLiWQfT2Hrk59SmlHgsgwlKhUKRWPQ6/V8+umnTJ06ld27dzN27FhWrFjB5Mk1lzx1NER961MuF/r4dZbvDZnf1mZc8biaVltbIx35+7uIGNOD/0x8jQUfzCegeyeO/PwVytPr71RdCeLSmdqm0bqbrbbXn+8iaGw/3p+xEATcs+IBCvae5uTvl7SEqbXS1PXDQifQexodew4KzAVl2p6beh2myEC6XNOfIY9dx7fPreHwsktC0uBp4In9z3B04SaOLtzUxKfoePjFhhBz3QDCR3YldFAX9J5Vs3TaLFZKMwo5u/UcOxdtb3SW3wEzBzPtpelaRs4tNadfuovOqEdn0FeuqxR6HUIn0Dn+FTod5qIyovVVRYNnZAgDFz9OYVIOS+YtqZJNNrRnGHcvv5+k74+x49nlVe4TOsGAMZ0pPBJfI2trzIM3EH3XFNa/8A1TX7iRsy9/QcY37mW9vvRAgoQib7cylNY20NcSCIOO2KkD6Hn7aEIHRlOSWcCS2969tP+tE3WJzLqimPVFMJ2pS2TWlUW2ruil0cfIgg8XENIvqsrxC2sOsOelNXVuN9LcgnLctn/tk1KOaNZCFR2SESNGyL1799Z7zaZNm5g0aVLrGOQCZUvtVLelpKSEu+66i5UrV2I0Gnn33XeZP791dEtL+kUIUee7TUUwFY3ir6fX8WyvG6ocq68DVFeDXb7rCJ7TBnPr23MJGxLD6Rc+VuKyFqICC/nNwY38e0j9o161reu88NpXBAzvyYy/34Iw6JBlZs79c3mV6xuLu+tHm4q0S6ylZqylVTOlSpudoqQcTizZSsTYHkx+8hrOfH+yck2vtdxKUUouAd06NbjOdxK+atRU8IZ0Tts6S3RhQjbHFm/m2OLN6DwMmCIC8Awy4RVkwivUF1N4AL5dghk4awiDZg3lwtcH+eGt7eSnuLfnr2+4Hzc8NZmuNw0h91QaadtqZil1xpXP7RYbdoutqo/tjp8KatlyrDw1m3Mvfkbfl+9l+nNTWfHsuspzWWcy2blwO+N+ORFTRCB6LyNCCPb9ay2Z+xMoOFhz/0zffl2InDeJc6v2kXFKE03mLNeRsJoPJIn1adw6WKHXubVVSXWqfz9re5fHrz1M/NrDhA2NZeIbdzH7jVksnf8BdkvV+uqaMlvXmkxX02Qr629AJLMucakz6Jj75m0E9erMll9/TM6JVC1hlxBk7o+vcb2KUioUipbCx8eHZcuW8dvf/pZXX32VBQsWcOHCBf74xz8ihGhr81qEDiEwIzzdX4NZX2IVdyNCCnim58+apZy8XVo0o+v47mR+f4CsHw9ekQLSHf4+yP0MZFV8aC8k/s3V9Py/uQCc+uOHdLKnQmDTbarrs6r+d1M9+VBLsPflr7nhs0cYPn8U297cVHm8KDkX3y41M8G64v6Ym+s93xwd0ubMEt1U7GYrhQnZFCZk1zjnE+5P37vH033mcO67aTD7PtzDjne2aNlcR3clckgXbRCgzAI6QUzfMIJ6R2DqHIDNYuXo4s0cf3ezy4zAtfm8uTr+uduPk/zRBnrcNYVhJ7LY//GlaOPO/20jMsYfn4hA8tOL6DahO4N/dR1nHn2z1rJC7roenV7Hd69sJqy3JoQM/j41rqv+99Ec7UmsqRTriKF4Bvpw/qv9TS6vtnd5xd9r5oEEdv1pFeP/cTvz/jObZU+srLEdTkNFZmvR98YBRIztye6/fkWKI2pemlnzfaWEpUKhaA30ej2vvPIK3bp14/HHH+f555+nc+fOPPDAA21tWovQIQRmprluQVJXB9hdEVOxBq6++69EEbooYRsPx11d67mGiAlb8aVspnl76o9uXOn8+9Qenu47qlH3euzcRMqnnbGXW/A6uL2ZLatJbR3rlhaZBeczKUzIJqZvWJXjPp0DGrVudFnaxhr7jrZkZ7Q97oFbQcnFAvb9Yy3H39vKwIenMPKeUQy9fRgGbyNCp6uMLBu8PZB2OwXxWWTuj+f0qXRSNp+kMLGmaK2N6j5vbn8nLv4O79hOTHlmKunH0kg9qK2/tVlsVfalvOXV24joFVpnOfmrNhM5rgfXPzWZlX9cS86JVGIfn0Hhkfh6Z2DU1Z40FFPSBXLymmetYn3vcoCkH46x56U1DP/dNOZ/MJ8ld1SdYtxa1DY9tr7EPhXR3Yz9CXVeo8SlQqFobR599FH8/f25++67efzxxxk3bhz9+/dva7OanQ4hMIOMVUeOmzsCVr1TUL385qrvchKqt0YMcX1RA5EWq4pe1sP8uKa9gKyffNZMljQc52RFDcVZaLm6vyA+s8p0WL2HHr8uwST9cKzee2sTc9eFVl1W0Nqd0cbuW9uSlGYVsvsvX3Hmi130mjea4tQ80naeI+dYSmWHviFTN6tPq6/u82bHZufsi5/Tb9GvmfnqLN6bubDGFkkASFnvtKW83adJXvoj3e69jr47Eln22Jfcu+J+ev3pLo7+8m2kIxlZSp5fre1FU9/15RfzMF3MIwtvhj11A73vGMPqm16hONX11OWEYu8q36363uVCr2WWPvvlHkqzCrn6lTu45Y9TWfnHdVWuqy+7bGNwZ3qsq6yx+Y71wr6RgbUOMClxqVAo2ooFCxawceNGlixZwu23387u3bvx8ak5C+ZyRtfWBjQHJfZSogILK39akpYs/3ISV1uy69+brb7Gu67PqqX3ELzc+S79Qlub0CScP/fqP+7gToewICEb3+hgdAbt1RbSPQydQU/e2frXfdVW9p68U/WebwtiTaUt/uMOuafS2fXCKo4u3ET24aQqgrIx6wIrcPZ5S2ErLuP8Cx/iFWxi1j+nQy060m6zg67+dTFJS74nf/9Zpj53AwZPA9/8YTV+/WLo+vcHCPvVbEJ/OYuA4T1d2pNQ7F3jpyH0vmMMAFe/eicGbw+37nGuo653eayplBnfPcWcn/5IUJ8IUjad5PiSrfS8bSR9pzVusMvdJD/VqSu5T31UJKTyiai5FqC1/p5bo0+iUCguT95880169+7NsWPHeOKJJ9ranGanQ0QwffQNb3waimokqtLfL8LlNRVTIp0b8/r8KC21Z/RTaAwNbN70/e0JV39fFR1iV2vaDImJ6IxXM3vhneSn5hEeo3UuveMvUO7ChupTeHuYotqNsGxNGjqV2R0fuUr8VVFnD1NUreebm+LTKSS8uZpuv7mVx7Y9SVl2EdYyC7ZyK8WlNmJHx1EWn1F/IXbJ6T9/wsB3n+QXXz9Cea4Wwut8VTc6X9UNgMJxPTlyx5k6v991+bn6e7MuYk2llOcW4xlkIrBnOOFXdSNl80mX9znXUd+7XOi1gZrwq7qRezKNI29voPuM4Yy/fywn1h5zq56GUFvksrq4rCtyKXSC8H4ReAd6Y/AyEDU0BqBysKmClv6bVn0FhULhDr6+vnz22WeMGjWKhQsX0q9fPx5//PG2NqvZ6BAC0yw7TuSrudbotDSpZfn0c1NkVuCq4bWVmes9f6WTVFrAkKCGZ0PtCMSaSmv9/lRfC529Wz2atQAAIABJREFU6QgXv95N0MBuhHULRmfUE//fbyhPdW8NoPP39UxJBtDw5EAdAXdFprud9erl1XZfrKmUHbl5rSbs01ftAL0OXa+u6L2MGLw90Hsa8A/0IvdEGumLvnZZhiW7kFO/W4xp2jjsNjv2cqu2p6zVht1qh4NHG22fuyLz3EufYevZjaxDSaTvqH9mSW3U9y4/9+ePMYwaxNllWkKkuBsH4Rnow4//+L7B9dSHO8JSO1bz3i4jY+kzrT99r+uDV4hvlXPnVu3j/KpLiZCUuFQoFO2JIUOGsGjRIu6++26eeOIJIiMjmT17dlub1Sx0CIGpq22O02VMfZlu2wueuoZ9ddxpeO1lbZdx8HLAq4E+7yg0JCEXwLm/L2uWehv6He9oNHdn3J3yorztrRo1Tl++HWh40ivnd3Tx6RSKT3/RzJa5T97Ok7DzJJ5AbDUB5mqQIKHYu97ved7u07D7NNZSbzwCfRjy+M/I2B/P0VWHXNrlTgbZxgpLvYeefjcPZMzdowjsGY6lpJy0bWdI3niCorQ8bGUWLIVlFKdpa1KVsFQoFO2VBQsWkJKSwh/+8AfuuusuwsPDufrquhOvXS50iB6UvoPuIeOq0apPgLZ09ls/g6fb17rb+NrLlcCsD3+je+urrnSaaxZAQ77j7qK2QqqflvB5Be5uqePq+tqucfczbOhnXV0gNlQouROJdtfng395DUaTJ2ueXwf17zbjksYKy4DoQPpc35+r7r4K71A/ck+ns/P5FSR+fwxbLQOUrTFYocSlQqFoKs888wzJycm8/fbbTJ8+nR07dtCnT5+2NqtJdAiBWW7vOFNkG4IrEdnY7LfudIISSnMYEtClXtvcISqwkKyNhwidPBhLTsMb6pCIYrLTXKQTvAwJiaiZkjE9L4OQiIAO+bzNTXPsP+jqO95QXG2ZpIRm8/u8gvreR80hEOorIyXPr9k+W3enzTaoTDd8HmsqJXpKP05+d5zss5kuy6wretnQNZZGHyMxV8URN647Pcd3wz9O2zombcdZdvzfl1zcUzPxWWtGwJW4VCgUzYEQgjfeeIPU1FRWrVrFc889xxdftN3MmOagQwhMUysk+WmvuCMia0uV76rM2nCuZ1hAjNv3uaLkrXc5sDiaTjKtQfdViLDaxNjlKsJqe5YKro4Mr3LN5fqMbYE7383qIqDiO95aUUcV3az9vXI54tYgXSO2nGkJ8eSuz6XNjrm49nXyjdmipK6IpV+EP4NuHUrPcXGEDohGZ9RjLTWTsS+eM8t2k7rtNEVJOY6sxw2vV6FQKNojer2eN998kzVr1rBixQqSk5OJjo5ua7MaTYcQmHkWV/khOzauIiDNNcrqXM57iSf4eczYZilflpsJLj/v9vX1ibDq11xOIszVcy07G8/vhw+qcf3l9Iztmerf42UZBxkdUfu+jK2VjKs5IrGXE99nXnqvtHda+7Noyh6y1fcbdcZdn9vMVgyerrsM7qy9rCtq2WXk/7N35vFdFOcf/0yAQAhHSIAQECHIIYcVJAJyCW1R0KLiiVbECypaFaXWo2KtWMUqnkUt+JNTi1pFsUo9CYjIEcED5JRwCOEw4UgkIQmZ3x/Jhm82e+/s7ux+n/fr9X19r92ZZ5+dnZ3PPnO0w6XPXY56TZJwaONebJr3Jfat3o6krZvBSytnGU8DkCZBlUfRS4IgRHPKKafgsssuw5tvvomXX34Zjz76aNAmOSYSArNF/WAXH5cFP294d3c7Hcl1/b/BWhGXWtvLLsKsHNftv+pquK/sxxg2xnU40/D/ICKOUReco9toC3q32O3FYSU9N9iNXqrFpZ1yYCZMrfq8ovSEJYFpRqy4jO0O+6vLe+G8hy5A4U8F2HDrP1Hy088AgAZwPeSTIAgiNNx+++148803MWPGDDz44INo0KBB0CY5IsF8E/nJK3HQP4dwxWMbV/qep11xqd7Xzf4y8OCqtYb/e3l8yjgyrVdUCaKM2yVqC7lPz10atAme46RrbCxmy/UY5aMlNq36/Je8w+j02y4YeOdQJCZrTzhmFr3UEpdterXF6OcuxfApI7F/zXZsmvB8tbiUkahdcwRByMWAAQPQs2dPHDx4EG+9JWZW/CCIRASzTVIj840Iofz9jEG+5idKPImM9unZ5FUk8dmBfU238SKaaSYiRUeHZEFkGffaP1GZKOiejsOCNsESbrpIW11jNJbYyX2s5qvkE7uv1qy0Vn2+6uGF6DnxfPS/ZRDOurIXFv/1A2z9dLPlY1CLy9QOzTHqyYuR1q0NSguLseHVZTg69wPwExW19hV1/bi9PqJYzxEEIReMMfzxj3/EzTffjH/+858YM2ZM0CY5IhIRzN3H5Kv0lYhZ7CtKTFz3mW95eeE7J+fE6vl02o3XjJuXLLeVpgi/ebX8QhiwW8a9HgNthbA3gP+2+QPP0hZdRsPg69iIpZ6oterzlscO4Ku//Acf/f5llOT/gvMfGgH1EtRaE/eofz8lGUhsVB9Xvnglkls1xZq/L8LaS6fgyKz3q8WlEiUUHS10k14YzjdBENHg6quvRrNmzbB69WqsXr06aHMcEYkIZtuG8jRurQiPKIyVe7bXb3zJx2th7lX6Xiyf8srQgY7sAPwpc1GLZN6TeQn2HHY2A22Q+DUBkUgUH8/oOxiA8ZIfbpCljBpNuqOH0yVK9CKmSlp/7XKhrfQKNu7FD7OWof9jV6D1madg7zc/GW6vJTov/8dFaNS6GTbc+TLKvt9R/btf58ZOxF+G8kIQRHzRsGFD3HTTTXjqqacwffp09OnTJ2iTbEMRTBc4jVJGIZrpZwQzrIiOdtqJYLrJh6hERDQtqMZpWBrF6oiSWb2ijmx5Fenyi8qlNop9XbtRjZ1yrti5d/kWnCgrR+ff1l4I/JTketWiUmu22LNvOAenDO2KnS/+F4UBiMtYzMpTGMsUQRDRYMKECQCAN954AwUFBQFbYx+KYDqAGuv+RDCj5GcRx+IkgqllQxQi6H7w1y4XWm5gmq1HS9RGy0du6xU//S46SuyVyDSb7MduBLNdcjF2FgH7V21H1/NOR/ZTn+KU5NprYWqJy1P7tMe5d/8a+dnfIe8/zh+YEQRBRJ0OHTrgvPPOw8cff4x58+bhzjvvDNokW0Qigrm32B8hInIsZdjF0yMbVniaftj94wX3fZUjJB2vfBs1UfV/ez6ytb1sUQ9Z7NBCzzav65V4w0oX3Oe2L3GU9u7Pf0DjtqloeXo6gEoBeYrGsyvl9+adWuDSf16Bwp352PZEzZkRZS6rBEEQQTF+/HgAwIwZM8B5uBZsioTAbFm/oed5eDXRTFi5rWMvz9IOs1+8ZFLPHsLSsurjeGn4aUWjvCzjfiHj+TOyKSw+92O9U7OHFcoMsUYvK1zX1nx2ajXtkouxZ+lmVJyowK8ur3nOFEEZKzgbpTfGVTOuQXlJGbb+eSZOFJXUOFaCIAiiNhdddBFatWqFH374AcuXh6vXR6ACkzGWwhj7D2NsE2NsI2PsHMZYKmPsE8bY1qr3ZmbpHCorMdvEFV4KnrDOMvvWbuvT0wPa41X1XoQ287f8GEi+VsYphbmRqIgF9bqedsu4rITp/ITB536IS/V3L8/fh/vXO9qvVenP2P7eWpz1+z743ZOj0OX8rqjfpOaC4I3SG+OMy3ri97OuRWKj+tjy5/9D6YHDIswmCKkQ1Z4liFjq1auHm2++GQDw/PPPB2yNPYIeg/kcgP9xzi9njCUCaAjgAQCfcc6nMsbuA3AfgHuNEmlcV3vRZxH4KXi8mHnUK4a0bGtpOxKM4jivbWuh6YWpvHmJXuPdahmXEa3ZUrWO0+/ZZs2EUph97jXqcb5O1tPUol+zTMf7HnjuTZwoKUPHkT3R7XdnoOJEBUqPFuPE8XKAcyRnpAAAftl3BJsfmI1j2/bW2D8sDz4IwgJC2rMEoWbChAmYOnUq3nnnHezYsQPt27cP2iRLBCYwGWNNAAwGcD0AcM5LAZQyxi4GMKRqszkAsmFyQRafKPPExiDEUVga/euP/IzTm6QZbkPiUizf/FyA7qn0ANQvrJRxGbEjGmMb+DIsbSK7z/2OXur9r9ghQmRuLtqP05JbONu5giP/pXew7pmPkNa9DVr1Ow31U5NRJ7EuEurVweEt+5G3YitS9u10ZSNByIzI9ixBqGndujWuuuoqvPbaa/jnP/+Jp556KmiTLBFkF9kOAA4CmMUYW8cYe4UxlgwgnXOeBwBV7y3NEqqbcPIwojAJTxiEWcsGxuNew3AMYaNVQ/fRCsI6ZmVcRtwIIBm60obR50Hg9jzFitK0xEZuzcGpDX5B8o9bUPjaYnw99QPsn7YAeVNfQ/E7n+qKy6DLGkEIRFh7liC0mDhxIgBg5syZKCoqCtgaawTZRbYugLMA3M45X8UYew6V3QcswRgbD2A8ACTXqYeN2I4jpaUo+fEE2jVuhDUHfsbI9m3x/OpcTOpyNu79bimm9RyKu9Z9jmd6/RqTvlmCJ351LqZtXoMx7bvjo3256JWSjsP1D6JB3TpompeILYePYnDrdLy1bQdu/1VXPLhqLZ4d2Bc3L1mOV4YOrH6/76scTOrZA/O3/Ijz2rbGNz8XVIuBfceK0bN5Kj7evRfXdj4N075Zj6nnZNVKY+LyVXi071l44buNuKJjeyw7vBWtTrTE0bJSlFSUo21SE6w7vB/nt8rEvB0bbB3T7uKjaJBQF03qJeLHosM4J60N3tu7FeM6nInHNq7E388YhInrPsOzvX5T/f7IhhW4rWMvvLV7M4a0bIv1R36ubvwdKDmG4xXleHnbN7iibRdM37YOD3XvX73vvZs+wisZGse0dz86pzSpPE/lNc/TzB+2YHLWmbht2Ur8a0h/jFvyJWYOHYA/ZK/A9MH9MCXnW4zr1hnv79iNs1s2x87CosrzlBjwefLxmEa2byv0mMZ8uBLP9voN/vL9F3igaz/M3P4tLm7dCV/l78FpjVJ8LXv3ZF6Cv23+ADP6DjYtez2aNkf2gd2aZU95F3FMM378Fv3T2vhyPYk7pu9xZsMe+KRgNSY1cX6eOiR2xicHN2J0myzM3vtpjWMav2oZ/trlQjy3fQmua9sXH+5fj37NMrG5aH+1WMkvLUKXRulYeSgXF6T3wNzdq3Bnh6Gmx7TkwC7UT6gjbb3XMqFdrWP62+YP8NcuF1a/P7ntE9yWeS4W7MnBsBZdsfbILrRLSkVh+XEcryhH6wZNsaEwD4PTOuKdvG8wrt1ATN36ER7sPMLRefosfwUGpAzCa3s/xU1tL8D0HQtxW/tR1e/zfvoYF6cPwNKCb3Fmk9Ow49g+pNRrhH2lpcgvLcLxihN47afVwo7pw4PLMK7hyWN6ZPOHeKjLBXh0y2Lc1+l8zNy5HJdm9MTHuetDcD2JKXtE5BHWnk1PT0d2drbh9kVFRabb+AXZoo0XtnTv3h0bNmzAww8/jN/97neB2mIFFtS0t4yxVgBWcs7bV30fhMoLsiOAIZzzPMZYBoBsznkXo7TaNW7EN1w9ylK++XnJoYquydpd9j+7N+PytjVPS5j8GkZe2/Ijft/5NNPtZC0zRiiRN5miGlplnDiJk2ip2fkNg8+97CbrpPzbsUfdnbZdcjE+3L8eF6SLm6HaCjJd537Q/r//+ppznhW0HYQ3iGzPZmVl8Zwc4yXJsrOzMWTIEBGmu4Zs0cYLW+bPn48xY8agZ8+eWLt2LRhjgdmiwBjTrdsCi2ByzvcxxnYzxrpwzjcD+A2AH6peYwFMrXp/zyythnWtH0bYRJCsYzJ7NG1e/TlsPpURK+e4PW8jZVkQgXoCExmILeNBYnZ96ZUJK9elm/KkHg8oAll8boRsZdWOPVpjNrs0SvfCLF3iTVwS0Udke5Yg9Lj88stx11134ZtvvsGqVavQr1+/oE0yJOh1MG8H8Bpj7DsAPQE8hsoLcRhjbCuAYVXfDTlaWuqpkUEjo4DLPrAbgJy2hYn8vGTLjXzF51FFtoZnmP1t9boUsTyQyPMWZp8HiZ3lg9olF9f4vvJQrl9mSneNE4RAhLRnCUKPBg0a4KabbgIAzJ49O1hjLBDoMiWc828AaIVWf2MnndQGDcw3Cjl2G4BeR7rGZ7VFWhKJS6c4OT9XSN51MGrEm7/d9JYQFdULi8+9imJqLS/jFqNI885fknzrHkvikogyotqzBGHE8OHD8cQTT+Dbb78N2hRTgo5gCmH/sWLzjeKM2MiElZfddKd942xx7njHTsRSzfRt6wRbQxghi7+VMqP30tvHCUFHMmXxuRWsRAydLCHiVffbWPtio5hzd6/yND8ZZicmCIKIAt26dQMA/PDDDwhqDh2rBBrBFMUpjWhqe7doNSzVjdTYbaaeQ/MV+M1D3fsHbUJcEa/+Vq5zJyLVbWQvCj5X+0ARmequqUGh2KeMx7yzw1Bh6RK1oWEkBEGIokWLFkhLS0N+fj727t2LNm3aBG2SLpGIYO4oDMeaMGHDKMp585LlAVkVXtx2W5647jNBlhBWCLu/3ZY3p2Mz3QiNsPtcQe0Du+LS60mEYiOZf9v8geu0oi4u3fT4IQiCEAVjDN27dwcAmM02HDSRiGC2b+x+oWjCHq8MHRi0CXHHs71oKIefRMHfish009B1EtF0Kjii4HMF2UWXEsn8a5cLgzbFd9wKP1lndycIIvqcd955WLZsGd555x1cfPHFQZujC0UwCUdQBNN//Iju0BP3k0QlmgbUHsfpBD/Khp8+Fz0+XQurM7tqbesHbVIK8WTuu47zlFFEe31OY/Nx8z9BEIQTrrjiCgDAe++9h+PHjwdsjT4UwSQcQRFM+7h96v1sr9/YarTYzYsaRDWJUjRNjVbZsHL+3YzPtIIfPrdbztXbizh29TjNIIVarM9lW+PTDkHUXxTJJAjCbzp37oxf/epX+O677/Dpp5/iwgvl7IUSiQjm7iJqGPvNxOXezDyY1LGepZeXOJm102vSMn7BlB8/t72Pk0YXCc1K/vL9F0Gb4Ct2yrdXZcRrn4uwW2R0UwbUPrdqlyz2A8HWWV5GSQmCILRQophvv/12wJboEwmB2TqZZpH1m0f7niU0PbvC0SuRaaWB7bXI1GusOPW5lQaP+n9qIAEPdO0XtAlS40UZcetzvbLuVaM/CteJls9lEo9mROEcEARB2GHUqFEAgEWLFqG8vDxga7SJhMA8UFwStAlxxwvfbaz1m9NIo1OxKFJk2o1OOo1mGjV+zRrBWj63m7eTNU/j9cn8zO3yL2TsBXbKtejy4MTnWmXTzzJrNX1ZryE9nxuJTFkEqGy+JAiC8INu3bqhY8eOyM/Px4oVK4I2R5NIjMFsVj8xaBNChVqYFW8rs53GFR3ba6ZllJdWPm5FYlLHepbs9yrqmJ+X7HpMl1UUn7tFPY7Oif3xMO7o4tadgjYhMOyUa5HjMu36XBaBoXdNOFnSwu9ry8jnMo0VVSPLuScIgvAbxhguueQSPPXUU3j33XcxePDgoE2qRSQimEVlcoaHZcIoquhkbOOyvftti0O/x1IqREUMLdu7X2h6MkZTZOKr/D1BmxAqRJQlOz6XrezaifbH/tcmpTDQYzHzud8z25pB9RZBEASqlyj54AN3axl7RSQimA3qREInC8ONcLMaETyje6rjPERiZKtIYak8xddqZDmJYjqhc0oTz/MgTnJao5SgTQgEt2tmurnurPo8CgJD7xj87iEQpnIehfNOEAQhgr59+6Jx48bYsmULdu3ahVNPPTVok2oQCYF5gvOgTfAcv6J96rz0urUe+aHUN3uc4HfU0orIdCtEj5TK7fOocbQs2v72qrHuRiBZ8XmURIZfx6KVj3KOwlLOo3TeCYIg3FKvXj0MHToUixYtwqeffoobb7wxaJNqEInQX0XE9aWf4lIrb61urcUSdEvWi156KS6drBEnanmTkvITrtMgrFNSEXwZF42sE80omPlcVrtF4/Y47UxyFIZyHi/nnSAIwg7Dhg0DAHzyyScBW1KbSEQwExPCp5ODFI0iaJ8azsW4nWJl/JHWpDlqYekmitmucSNH+7kl9hjiqaHXNom6JDvF6YQ1ej6Pp3Kn4CQSbHdSofy85MDKuVE9qbcdQRAEcZIRI0YAAN59910cOHAALVu2DNiik4RPmWnwi6RrwOgRdnEJAKt2HQzaBOn96HZWSTVrDvzsxhzCJusOi51UKWiCaKhbWZYn9rWlYndcL42jxu4SKE7S97uca9lqVCYIgiAIbU477TSMHDkSJSUleOGFF4I2pwaREJgpIVqmRHZRZJVLuss1mDiWoBslXnXRHdm+rSfp2kFUd98wcH6rzKBNiATq9SmNcFvGtbr0h73ONRJaIkTY6F+lGwo8kfVp0HUzQRBE1Lj33nsBANOnT0dRUVHA1pwkEgLz5+KS6s9+NSacNGLC3tCJ5aWvNgVtAgB9nwYVrXHSNdBqWZr5wxYhdhLWmLdjQ9AmCCPohr1VoeKmjBvVr1ESmqKFnxWfi8gr6DJIEAQRRQYMGIABAwbg0KFDmD59etDmVBOJMZgZyQ0BnGxgVE9EY2G5DbuYNWL08pWlcZOQmYaK3HzX6Txy3lkCrBGDnt/9WLhcrwugF0zOOtOTdM0IagH4oJnU5eygTXBN2Br1Xpdxsxmy4xE/6pWwlUOCIIgw8eCDD2LEiBF4+OGHMXLkSHTr1i1ok6IRwdxVWOSLgLOaR+zTchmenCdkplW/lO9uGf/2l67TEI2er826fzmJDIiKIOiVDa3fb1u20nV+boi3sVH3frc0aBNsEYXxa36W8ah1pXWK1z4PYzkkCIIIE8OHD8f111+PkpISXHvttSiVYFm7SEQwM5trz2ia1LGesKfUThofMjdY1JFMu5HNV68c5IVZQhARwfajUWRWPtTH8a8h/T23yQ5Rj2xO6zk0aBMAxEcDXSnrczueG2hkUeQ9IyzIVq9ECZnbAARBRIvnnnsO2dnZWLduHR5++GE89thjgdoTiQhmboE8g1qdEBth1Hu5SdtK3lp2GDF2wTLHNlmxR8SxyxydsGOPYv8tOV/ViozLcIxhjZaZcde6z4M2IZJ+VdAqt2MXLAv8Wg06f1FYrRPGLfGuN0qUy68eQdfHBEHEJ02aNMG8efOQkJCAqVOnIicnJ1B7IiEwM1P11wd0W8nLcqMQ0a3VSZ56+c4ZPdhna9wjgyBzk6/icxnKYzzwTK9fB5p/PDbOw17GZblfxNpgZs/MoQMspRnEEAKCIAjCOgMHDsTEiRPBOcfkyZMDtSUSAnPHIeMIpt0bvt8CRMSkO16iJTRvfPMLT/KqyM2v9fISN+daL4po9HKDFZ8H2biNWqNy0jdLHO/rZoxv1Pyoh1ZXVDf1isiurWEeEqFlh5Ftf8heYTsPv5Y1CRuylAGCIOKX+++/H40aNcL//vc/rFhhv34XRSQEZrtm+hFMBavLiAR1g7AiqpxEMfXSciLcYrutzrjM2lPvMGF2/oOOfobF51FpYD7xq3Ntbe8kwqNeIzIqvrOKWhTGlnEn11hQ4yfVtuqJPK/rDyuznKuZPrifJ7YQBEEQ/tO8eXNMnDgRAPDXv/41MDsiITD3HDlmaTu9G7ss3ZpiES0yRUcF/7p6o+Y4SSvjSfX2kQWZxjfG8tDHa13t7+cxOYlmyBYJmbZ5jek2ImyV4ViDJFYUui3jIrEza7iTbURfh07tmJLzrZD8CYIgCDm4++670aRJE3z66af4/vvvA7EhErPItmzUwNb2MogFK1Tk5kslvGK59byeNb67sdOJ4NWLVITl3OpRvK1M9xgmnHO6z9acxM3smm7Fl4Lfs9WOad+91m/xLAT9QF3GncwIbXQN2cUsf7OIoUhbzLCal/paHtets5dmVedplbDO4hvGew9jrDeAQs75lqBtIQhCHM2aNcPEiRPxyCOPYP78+bj99tt9tyESEcxDxeLWexExe6mdvMLKwtXbHO/rNIpavK2s+hVFlOPSO753N+xynLZW48fMj+oIS9DRXL8jmx/ty5UqohpVYsuTXhkPuvHutFeDyCioFazWj7H5vb9jt5C8tfJwUl8Efa6dEEabGWN1zzzztJzf/jZrc9C2EAQhnjvuuAPJyclYvXo11q71v3dQJCKYjRLFHIZa8NldG9JJHmbYiWLGNiy8vuH165zhaD+7/rQrJq0+xVd8KssES1aOs++pLXywJByNJadrcNoRiUPqNLGVdtSJLRdePeQxKuN2ouh+Rg6DwCyyqvxuZUzmwPqtkNRWLl9ZOXdeP2j0o7wHyezZ95et35CLHbl5WLrsBX7u4NtZ0DYRBCGOtLQ03HLLLZg2bRoef/xxvPXWW77mHwmBefzECcf7WlknMmgREisytWzRuvl53cDKPXAEfTraE5lW/ejmZm5HXCqfgz6/Wmidvx0FhejT1lhkavnOiyiKmy6zIvEyqrizsAhZLZt7ln4Y0CsTohrfdsu4LOUuCPTGcRr5w8p9wEq9IiN2yqDdHhx6kza5qV9lgjFWt1evTvhw8T+wf/8h/GnSizj3E/+70BEE4S133303nn/+ebz99ttYuXIl+vXzb1K3SAjMBGb/wVtQ60o6xWmXUs9mK7QZNfZSXNo5xrB0S9ZsTNYz97mdBrjbsmElLydj6GShQd06QZsgFK9nLrV7jv0o42FC67icjKm0ul/1thZ8LjtulpXxs+ePLCjRyxYtUtCiRQpSUpIpikkQEaR169a4/PLL8e9//xs33HAD1q1bhwYN7M1b45Tw31kA1LEhMJ0IDBFRLq+EjZuGlhubUhrWd7yvFiIap2rCIibV6B1b0waJwtLyCisRLyAcgrNpon1/y4hfZUDEww2rZdyKqA1jN1mn9ro5Tif1SpRw47uwlS+gZvRS4cHJY21FMRljdQDkANjDOf8dYywVwBsA2gPYAeBKzvkh0bYTBGGf66+/HuvWrcOmTZvwt78HSdbKAAAgAElEQVT9DY8//rgv+QY+yQ9jrA5jbB1j7L9V31MZY58wxrZWvTczS6OkXL+LrN7SGHZxuq9RvkF2zXQrvjbtLRBih96kFPl5yZrj67QmjTBbBiUMWJkQY/PBI5bTin33EqfLnsi4DIyaLYePWtpOtmMI0q9u87NaxmPzk8XvYcXI52GuUwltZs++v+w3v+2NFi1Sqn/r0SOzOoppMZk7AWyM+X4fgM84550AfFb1Pa4Q0ZYlCC9ITEzEq6++CsYY/vGPf2DNGvMl2EQgQwRTqaiUGTWUimoqY+y+qu/3GiXQuH5tweEFWpHM2LyM/osaQ7ufanlbPSGtFpZmE7ZoCct4YshprSxvG8ZGt2yTapx3ThskNfdmBkyvxxkHiVkk08h+O2XcSp5hjGL6jdrnRvWq1n8yjmH3ithyFsZypRW9VLAaxWSMnQLgQgB/B3B31c8XAxhS9XkOgGyYtNsiiOu2LEF4xTnnnIO77roLTz/9NCZMmIDVq1cjIcHbGGOgEcyYiuqVmJ8vRmUFhar3S8zSKTh2HIBxtNALtGadNRKcXmBllkA1Inz0+vKN5hsZIIu49Ltx5EZY/HvddoGWyI2fETi9JSi89LfdpS5ki5K6wega8LuMRzlCp9ezQ/1asGOfKx9E1X8RZbQ6eqmgRDEZY/1N0ngWwJ8BVMT8ls45zwOAqveWogwOA6LasgThJY888ghat26Nr7/+GgsWLPA8v6AjmEpF1TjmtxoVFWNMs6JijI0HMB4AmiUlYnnxMRzOOYTi0nJktmyKlVvyMKpPR7z48TeYctVAjHv5I8y6bQSue+FDzL39AtwwfTFm3nI+Jr+xHLee1xMLV29Dv84ZyD1wBEmJdZHSsD427S3A0O6n4vXlGzFpZBbumbcUL40bhjHPf4B5d1xY/X7X7CV44NK+mLVkPUb0ysTXX/+IVo0bAgD2fb0VvfuchsXrcnHD0B547J1VeOb6odX7Xvv4O5g7+lzctnAFnrjgbDy9bD2u7tUB2T/uQ5cWTXGkpBTFZeVon9oYq3YdxCXdT8VLX23CI+edhfFvf4lXrxyEW3K+wpzRg3Hjm1/guZ59MCXnW9x+QTe8++VW9D21BXYUFCKpXl00bZCIzQeP4NcNuto+prXb9yOjWSMAQN6hIgzv2R7/eG+15jEp7xNmfoInx5yLpz5ZV+OYDuQeQ0n5CbRr3AhrDvyMgQ27YN6OVZjU5Wzc+91STOs5FHet+xzP9Po1/pC9AtMH98MDi3Mw4ZzT8V5efuV5WrHJ0Xm6r3dnzM7ZiuFd2mDd3vyT56nwGHq1TsP/Nu/BVWntMe2b9Zh6ThZuXrIcrwwdiD/krHB9nsYuWIaXs87Bda8sxas3DMJDH6/FhHNOx7sbdmmepyGntcK/123H3YN74N4P1+CJC87GdQuWYu7oc6vf735/Fe4feqbpMV2f1QmPL/kWT4/si7Hvr9Q8T9Pez8E1A7vi86UbbR2TUvZmXDbA9jFNH9Xf/JiOnDymXdsK0bN5Kj7evRfXdj6t1nlS3icuX4VH+56FF77biCs6tseyvfvROaUJjpSWoqI5ah7T4m81j2nrz0dRfqLCm2OycJ7Uabgte56fp73Wz5PeMaU2rI9l2/c5OqYpW7/FDRkd8f6O3Ti7ZXPsLCxCg7p10DKhoeYxvXT3yFp1xKSl39Wq99KPV0h/nv503WBf6nKljliyYRdOb52Kw8eOm95zxzy+0Ney5/V5mn5WP4xb8iXmdxxi+Zgk4qzhA7oBh7SHRw4f3AP/+c/S2xljz8f8PINzPgMAGGO/A3CAc/41Y2yI9+aGBsdtWaBmezY9PR3Z2dmGmRUVFZlu4xdkizay2vL73/8eTz75JO6++240b94ciR7ONcE4t9rlXnDGlRXVBZzzW6sqqj9VDRY/zDlPidnuEOfcsO96iyYN+YH/u81ji+1htbtsEN2LTJ80d2gDbN9juMmEmZ/gpXHDLOWnPsbYCIZR5FJZgkKJ2mja3aFN5buBvXZ87GS6ezs4mbFQOW4tn9stP1ajDGHu9iaqC9ttC1dg+iizh/mEFlYi9Vrnx63PrS4j4STaFvQ14VWE0E5d7oag/ScKvfpFq/eS8lvClU99zTnP8slEXRhjT3/23pS7zh3QXfP/Of/+HDfd/s87OOcv6Oz/OIAxAMoBNEBld9B3AJwNYEiVkMoAkM057+LJQUiGyLYsAGRlZfGcnBzDbbKzszFkyBCXlouBbNFGVltOnDiBXr164fvvv8eTTz6JP/3pT67SZozp1m1BdpEdAOAixtgOAAsA/JoxNh/A/qoKClXvB8wSatdCvgXRrTYG3HRJChKn4jIWs26xpiji0mH+aqw0ikWODbSblpbP7ZQFO2Um6PLlBlFdSUlceotW+Xfrcy9nlw7ymvAybz/EJRDuOsUIrTpYhnu0F3DO7+ecn8I5bw9gNIDPOefXAlgEYGzVZmMBvBeQiUEgrC1LEF5Tp04dPPbYYwCAWbNmeZpXYAJTZEWVe0CqLii2sXIj0hrnEuTsqWOe/8DRfnZElVqA1hKL2/ecfKmoyM23tfamHbvsbh+L3fF0sefTqc+dYOY7q+O7wtzQum7B0qBNCC1OBb7sPg/b+slW8LNeCXN9oBCVsdCCmQpgGGNsK4BhVd/jAhLdRNgYPHgwAGD79u3wshdr0GMwtZgK4E3G2E0AdgG4wmyHzKYuI2ESoLfWptMIp4JWmhW5+cbpmnSPBYB5d1xouo2ILlH5eclIwy/VN3Strsda+biNNGpFV5Uuu7F5+NnQMPK5iLVaFYzSER39kbnb3NzR5wZtQtwhwud21uN0glKW/Si7fogxK3W5aPz0IeENnPNsVM4WC855PoDfBGmPhNhuyxKEHzRp0gSpqakoKCjAvn37kJGR4Uk+UghMtxVVbkGhoWgKy/Ihtuwy6h4aIxD1hIepyDRBmfxBC9GNBkVkKsSKuti87MxMqxaLVvbJz0sOVGQa+RwwFpluy7yXS/+okaXRqUwGQviHKJ8rItPLpY28Fkl+9kYJQmQC4R8TLmtbgggGEt1EWMjMzERBQQFyc3OjLTDdkpnRzPbaXaHFwrhD9SQ9RiJT+d8uc0b28+ymv+dwY7RJKazxW6y4M4pOxArEPYcba27TJqXQ8fhPZb9YoemXyLTSCHQbyZShISdLdIPEpf+Y+dxO+bZ0TWrVpxZ6cDi1yU6afhGUuLSD38t/EQRBRJlOnTrh66+/xg8//ID+/b2ZbyLQdTBFsfvnQvONAiBQYatqOOnZ4tTGSUu/cz2eRiuKqIjCPYcbV78U8vOSTV+x++ph9J9V1ALVyy55CnfNXmJpO9HlLqhyHPQYzrvfXxVIvlHByUMXPZ/HlgOn43trbav3sM7KQzyztF3gd3m3Wq/IgkwPjEnsEgQRRvr06QMAWLlypWd5RCKCmdFM3jGYQm+Gdhs+FiOZuvkYPMl/4NK+9tPVIC3jlxpirU1KYS0BqHxXRzW1UO+785ekGt/bJRfbTlMPrS6zXhLrczNERVVkaczZjWCo7Xayz1+uHwLkF9veTwtqiGqjjv7fP/TMGv9bnQBNuH8tLNXkBUFcb3bqFVmQpZeDGbLbRxBEfHLOOecAAL766ivP8oiEwPy5UKcRaFWQBdCQsI3GsVS0b6e7ecKOnSf3s3N8sfkYCM1ZS9bjzxf3sZ6uAOxGHtXiUvlNEZlW07QqQr3uKmvX5540vCXA7cRXgLUxyFr+dtqt3IngjQJWJtyJvW5m52zFPUPOAGB/WR1bk1M5GGoQVYKoy0Uhw8MdvbokXq5xgiDCR69evZCYmIgffvgBBQUFSE1NFZ5HJLrINm1Y/+SXDm1OvqzioEuU7NQQnzHHp17mxCkjemXW+G41La0p3tMyfqkRCdQTdDt/SdIUjXrbOvlPC6Mut67X8rSB2udWcNKNMOiuqX5g5di0/C3KL/HgYycM7+K8LrbsS4/qezfnM8iy4KReCRN0jREEQdSkfv36GDRoEADg2Wef9SSPSAjMY8fL7IvKOMBIZOredGPXltRZYxIA1m7fX+s3O40krbUgjbqbxopCK0IzNkpp5z+ZUPtSy+eEd/jlb6tiM8riX4lyrtvrLuqj5RdXvvJhPGbQ55LqFW8I+rwSBEEY8cgjjwAAnnrqKfz000/C049EF9l6DRLdJyJ7d6jte2pP3LNjp2E3WaBSZDruLmtARrNGuv/ZHR+jiMzibWU1RKZZ91V1d1c1ooWk1uy2sfa66R5rpTFi5HNCPEH4282kNbJ2ybO6LmXxtjKkFp6syzW7HqoFn0Z9Fttd1lHXWAFY7Z4ugwiJh3rFy+ECYZvBPmHfPiTs0D7n7Gc56xCCIMTTv39/XHHFFXjrrbfw4IMPYvbs2ULTj0QE0yoV7dvVeNUiohFQvUim15jdeNWRGHVE08rYR7vdXWVExkYIET6iEt0s3lamLUi16i6d+iwMfpDdPoIgCCLaTJ06FYmJiZg7dy7WrVsnNO1ICMyy8grD/3UFZRwhVGR2aIO8evVqjnfV6aKsHvOp1fDTEprq7rLtkourX/FK3qGioE2IK8Lsb9nEi9Xo/r5jDq5vSR8MGolcmc5PmMt50JieRxq6QxCExHTo0AF//OMfwTnHQw89JDTtSAjMhkkCZ+6U+WbgsnurSJF51umnaP+hIzKtLjegbKeIzDYphbUimbIITRHdY+00NM/qkO4oD8IZYfe3TCIGsHaN9GzucCY7q7PCBoB67Kxs5yXs5dwKXnSPNTyPJCwJgggJ9913H5KTk/Hf//4Xq1aJW/87EgLzSFGJ7n9Rj1xWj6+0iGuRWbXP4i83mm5jmo5O9FMtMgFILTS1sCqo7bB4Xa5TcwgHRMHfsokZM5H58e69zhOnBr0jolDO/cZUXBIEQYSEFi1a4I477gAAoVHMSAjM5k39WyoilDRrVuOriEjmDReZLM6tN17K6MmugciMFZqyoW40K7aLbtzfMLSH0PQIY6Li7zCJzGs7n+ajJQQQnXKuh6+TX5G4JAgihEyaNAmNGzfGxx9/jJUrVwpJMxICMy//aNAmaEfknKzJ6QC7UcxaOLDvsVc/8SYPje2UBqlRNDNsOGn0P/aOuK4LhDlR8rdsIlOPad+sr/Fd1plxo0SUyrka38oPdYklCCLEpKWl4ZZbbgEAzJo1S0iakRCYbdNTgsvc6o1FxA3I6f6HDglP+5lJo6yl6eK41RP/ADXHPXohMmUWr89cPzRoE+IK8rf/TD0nq/qzlaVNCPdQObdHUEvfEARBeMnYsWMBAG+++SZKSvSHHlolEgIzd99h22MtXUf9AOdjGD24Idk5HmXbWj6zYdeYyfMtb2sbja6yQE2RKTKaqaQRm47Wb6JwGk0a8/wHgi0hjIiav2WKYqqXJFK4ecnyWr8Ji0LJvM5xgEStnPtKHIpLxlgDxthqxti3jLENjLG/Vf2eyhj7hDG2teq9mVlaBEHIQ/fu3dGrVy8cPnwYH3zg/r4QCYGZeWrlLHihWo7Ezo1JsCiN9ZMtkRnz37wp17rO2xATkQm4j2ZaFZDKNrHbqpdR8YN5d1zoe57xDPnbexShqbxeG1czmlYjihmEQIzN082wB4mFCJVzh1g4p6Fqk1jnOIBfc87PBNATwHDGWD8A9wH4jHPeCcBnVd8JgggR115b2bafP999ECkSAjN31/4a380q9cCil07S8KFhoikyTcaROolgurnRqkWmUTTTSDSabRObnno/v6nIza8RvaFIg7+Qv/3nugVLNaObUkYxJRaNdqBybp3q+1BEzr0TeCXK4qn1ql4cwMUA5lT9PgfAJQGYRxCEC0aPHg3GGBYvXoyjR93NbxMJgalEMNVUtG9XQ0wm7Ngpj7iMTUtJz+XkQG6Oze6TVqcRzNj8DNFoCKrXkFNHM9URTa2XFsq+sfvrCU0rKMJQq1FstZui1r4UafCXKPpbpm6yWswdfW71Z83ZZoPq5mph5mtDJO6eG8Vy7gV2r50QRy77M8ZyYl7j1Rswxuowxr4BcADAJ5zzVQDSOed5AFD13tJfswmCcEvr1q0xaNAgHD9+HIsWLXKVViQE5s6fDhr+L0xYeokg0er2WK0KzQmPv+U4j9i8NDFpjOlFMwF7wtDKtkF0hdVjwkybM/cSriB/+89tC1fU+B57bUsZxQRCH82icm4T1flW7pnql7Tk/Vx5DWi9Dh4CgBWc86yY1wx1EpzzE5zzngBOAdCHMRbttW4IIo646qqrAFRO9uOGSAjMU1rrP1kUXtGHpDHhVlCb3TCfvPMiV+m7RSuaqSU0jV6xqMeCxeK3yNRrSD855lzN3wlviKq/ZY5iPnHB2Yb/V+Tm2xKIuqLUahp2llcKyb1BTVTLuUj0usZKLSR9gHN+GEA2gOEA9jPGMgCg6v1AgKYRBOGQyy67DAkJCfjoo49czSYbCYG5/8BhsQmGtKHgNbFCc9r8Jd5kYjO6oCc0jRZzj0VvWzsi06/lFKa9n+NLPkQl5G//eXrZevONANsiU1NoWknDbrRTS2j6tB6yU6JazkVFvGV+IBMEjLEWjLGUqs9JAH4LYBOARQDGVm02FsB7wVhIEIQb0tPT0alTJ5SWlmLTpk2O06kr0KbASG3WSPc/4V1jt++RtqGgJmHHTncT6+jsX9G+Ha4Z3ttxel50V1YaAbGNCqsiU4+kjvVqiEe3kUwrDRV1oyh2n2sGdnWVP2EP8rf/XN2rg2dpV+Tm174GtQSkUr+76UprNGZTsvGYUSznnohLil4qZACYwxirg8ogxZuc8/8yxr4C8CZj7CYAuwBcEaSRBEE4p0ePHti8eTPWr1+Pnj17OkojEhHMwiL3C4ISJ7EyjvPz3ALHaesioOGlRDRjX26wKlJFRDHNGkVLNuxynQdhnSj7W9aoTPaP+0y3qb5OYseOKd+t7mtEbJpeINkDyqiVc+HiUuLocxBwzr/jnPfinP+Kc96Dc/5I1e/5nPPfcM47Vb07ayQQBBE4PXpUDqvesGGD4zQiITAbNHAQqVI3TiKK22ih3v5dOrUR+wTXw/PgVnC6jYRaQStyqbb19NapnttBnIT87T9dWjR1tqOIcZl6eCEuJBIsUSrnwiaCUtA5T3EcvSQIIg7o0qULAGDr1q2O04iEwDxxokLzd11xpW6MqGdSMyNkotSuyFRP6qO1/5Ejv8g/M68OIiKbasxEqFl+VkTw4WPHHdtH2Cfq/pYxinmkpNSXfHTHZarxUghKIjKjXs6dkJCZJs35IQiC8BvGGACgTp06jtOIhMCsqOD+ZxoykemEWKGpdJtVXiW794rLKCBf2mlgGwlIt+LSKsWl5ULSIawRD/6WTWQWl4n1udnxGQrNOBEY8VDO7WAmLil6SRBE1FFmj23QoIHjNCIhMOsn1p6ryHL00uk2draLIJmiulUF7EO3ItMvcQkAmS0ddh8kHBEv/pZJZLZPbWxpO9FdIYV3rbSKBCI2Xsq5FUhcEgRBiBGYkZhFtkjVxcexuFTfWOyKUQkaC15QPfNrzLGuzP4OfRraLD6x/gmpOLczHlN0w33lljz06ZghNE1Cn3jyd0JmWnAiK4ZVuw6iT9sWptt5IYo1Z5k1QtSM4iJmrXVBPJVzN5C4JAgiXjhx4gQAICHBeRwyEgKzWdNkaxvqTREfUWEonBj/jerT0f7+LhpQSuNXpmiLEV7Y6cjnhGPizd8yiMxLup9quo3da0vruGJnfXY1iZeVOk3y+0u8lXO7kLAkCCLeSElJAQAcOnTIcRqR6CJ74Ocj1Z9NJ57RWgTb6rZ20o04L378ja/5aa1zKSteiWC/fR7vxKO/g36A89JXJxd11lpyKGj7HCF5b414LOdWIXFJEEQ80rx5cwBAfr7zNnckIphtWlWOB7Q8q6ld4Sh5A8EMoTfJKn9MuWqguDQtokQi9ESmDI1PL20IwufxTLz6O8hI5iPnnVVtg0iMjql4W1l1FNN2N1mriOpO6wFRKucyROFlpyLvKCqStH3EDxb5bA1BEDKSllZ5H3QjMCMRwdy5+2CwS2ZI2nCwRbNmtjYf9/JHHhlijFEUI+oNi6B8Hq/Es7+Delgz/u0vA8k7tstsNSF/sGiVeC7nRlD0kiCIeCU9PR0AsGeP8/tgYAKTMdaWMbaEMbaRMbaBMXZn1e+pjLFPGGNbq95NlU/7U1t6a6yobrQyY7WfdVWja9ZtIzwzRYlSGglGtdB0031OhsinFbz0OVGbePd3ENfF7Hsv8T1PBUVkWn5Q1aFN7ZcRkgrWeC/nCmG5DxDyIbI9SxAy0KZNGyQlJeHAgQM4fPiwozSCjGCWA5jEOe8KoB+A2xhj3QDcB+AzznknAJ9VfTckNzfPU0PDjNCnsDENpOte+FBcui4QNS5LljSMkMXnjrHTGJeA0PtbAGbXl9MxknrjK/3yueWJfeyKwpCU7ViiVs5JKBIBIKw9SxAykJCQgC5dugAANm/e7CwNkQbZgXOexzlfW/W5EMBGAG0AXAxgTtVmcwCYPtLObCNoTUa7hKwh4RSt7sdzb7/Ak7zU0QM/u716MomIwAanVz73HD0fSH79hNbfHmBnwh29ba2IUC99rs7XSGTWqHecRB4lL9uxUDmPIUTnjZAHke1ZgpAFRWBu2rTJZEttpJjkhzHWHkAvAKsApHPO84DKi5Yxptn/lTE2HsB4AEisVwfL1v6Iw4XFKD5ehszWqVi5fidGDTkDL/7nS0y5ZQTGPfoGZj18Da6bPB9zp1yLGx5+HTMfvAqTX16MWy8fgIXZ36Nfj3bI3VuApPr1kNI4CZt27MfQrE54/X9fY9K1Q3HPc4vw0lX9Meb5DzDv2fEYM3k+5k25FndNW4gHbhyGWYtWYcSArli76SdkNG8CAMj7+SjOOv0ULP5yI264qC8ee/UTPDNpFMZMno858/6C6255FvPvG4UJj7+FJ++8CNPmL8E1w3tjSc5WnN4+3fUx9TmvH3bs3I+kBolo2jQZm7fuwZCBPfDvt7/A3bdehHsfnovpT/0B193yLOa+PLH6/e6/vIr777oMs1//HBd0a4W1y75H+vEKJLRqgrxDRVi4aiv6n94GNwztgcfeWYVnrh9a6Zc7Lqx+nzDzEzw55lxMez8H1wzsiiUbduH01qk4fOw4ikvLkdmyKVZuycOoPh3x4sff4G99umL821/i1SsHYeyCZZgzejBufPMLzLhsAP66eiNuPa8nFq7ehn6dM5B74AiSEusipWF9bNpbgKHdT8Xryzdi0sgs3DNvKV4aN6yWPXfNXoIHLu2LWUvWY0SvTKzdvh8ZzRpVnqdDRTirQzoWr8vFDUN74O+zs/H0yL64bsFSzB19bvX7bQtX4IkLzsbTy9bj6l4dkP3jPnRp0RRHSkpRknew1jFNue/Kk+fprpmYe/sFuGH6Ysy85XxMfmO5rWMqKS1H+YkKx8ekeZ6eHX+y7L2wyNJ5mnLVQIx7+SPMum0ErnvhQ/Nj2n3I/HoSeUwOyp7WMXW6/RVsfPZG2+fJbdnz8piclj2/jun6f36Ix38/2LtjytmMu87ogHs/XIPpo/rjDzkr8K+s/rh5yXK8MnQg7pz3FR4c1Quzc7ZieF4bfFN6vPKYvt2BvHr1dOvyeVOurX6vvp7eWlH7PG07oF2X3zsrsPPkRV0edNnrmVgf/9u8B9dndcLjS761VJcv/fEnnJ5VUn3PbZdVilU5W3DJhX3x0qv/wyP3X43xE1/Eq/+8HWMnPIc5L92JG//4AmY8eyseevzfmHDjcLz7wSr0zepc655LxA9u27Pp6enIzs42zKOoqMh0G78gW7SJgi2ccwDAV199hXbt7PeGZEoCQcEYawRgKYC/c87fYYwd5pynxPx/iHNu2G+9d9e2PGf+JK9NDSVuu8hWRy+376kxw2L5iQrUrSM2AG425tJPnEROdW0UNBOxcJ+rn9Z7MUYspOPSAG/KOGGMXz7XWxdTHdWscU3biW4ZlWs/rjsbRLGcO66/VedG6/6p3BPN7q2x29VtfunXnPMs20YJhjH29Gd/GHHXuR1aaf4/J2crbnpr+R2c8xd8Ni0SiGjPZmVl8ZycHMN8srOzMWTIEBEmu4Zs0SYKttxzzz146qmn8MQTT+DPf/6z5jaMMd26LdC7CmOsHoC3AbzGOX+n6uf9jLGMqv8zABwwS2fvwSNmmxBuUIlLAJj8xvIADfIeoYJWUANSqM/96AoWYnEJRL+My4hwn+uUQTvdZW1jVq6376n5CpgolnNR9XfCjp21XrH/Ge1jtl1YoQlttBHVniUIWWCMATgZybRLkLPIMgD/B2Aj5/zpmL8WARhb9XksgPfM0mpR1TUmDPg99bmbm1vCjp3V4lLNref1dGOWbaK+BIkVPPe5SNGpl5ZEDWsz/C7jhL8+tyIybY3FDEm5VkPl3LkgNRKgEYYmtFEhsj1LELKgCEynBBnBHABgDIBfM8a+qXpdAGAqgGGMsa0AhlV9N+RwYbG3lgpGuMj0oNFuJC4BYOHqbcLysmQPzQzou88doTehTwgb36Hwd8QIZb0SwrIdS1TLuZVz68nEbhGHJrTRRFh7liBkYefOygdmzZs3d7R/YJP8cM6XA9CTx7+xk1ZyUqL1jdUNAZ9njfP8Cef2PbWOKWHHTluiVhGXRvTrnOHIPMN8M9OkilTKZo8wnxtFF0WnGft/yBrhXpRxwhjhPrdQ5hSBYelaD1kZtkKUy7leHa4rKjXun4Q+Tia0iSIi27MEIQurV68GAPTp08fR/lLMIuuW42UnTn6x2wCIwg1F3Xh3KDLV4teowZV74Aj6dIxuw8QqforQ0Ps8ZCIz9P4OIZ753MJDFVtCM0JEvZxThLImpXvKUVxRpv3fgRMA0J8xNjbm5xmc8xnqbasmtHkbwETO+VG33ekIgpCHgwcPIjc3F8nJyejWrZujNCIhMBMYc9dwlVlkxh6X3VkMddmOx6UAACAASURBVESmaRRVp2ts7EQ/SYneFB0twSZjAyHWJr/s88rnALyNXoYUT/1NaOK7z5WyqxKa8SQyqZxbQKt+jGi9B2AF5/xqow2MJrSpil7ShDYEEWLWrFkDAOjduzfq1KnjKI1I3FnqnDhhvpEZbkWmFzcgdZp2xaaOyLSVJ7QFVErD+ub5RwCZGpuR8HmIopiR8HfICMznqnJp9aFRbN0g44MwK1A5V2G1fjIbbhOSes4uFia0mQqa0IYgQs3y5ZWzi/fr189xGpFY/KqkrFxMQk4mazDaxyw99XT1dqav92JiCVUDy2gChE17C8TmLTFqH+j5pCI331MxKszn6u7UbstRRBtS8VTGZcEzn1spow4eCMbWBbI8iLILlXNBSLb8jIfQhDYEEXG++OILAMCgQYMcpxGJCGbjJMFPYD0UbsJx2zXYIUO7n+o83xCiRDKtRCmsbmcXoT4PsgEUkihmvJVxGfDU51Z6qWh0mY06VM4JO9CENgQRbUpKSrB69WowxjBgwADH6UQigllQJM8yJUoUS2Q0S2h6gp6wvr58o3tbQkbQXeBMfR7dMUGBEI9lPGik8bmy3I76pYE6iqn3khVpfE4QBEEEzrp161BaWoru3bujWbNmjtOJRASzVdNkS9vp3eRFCAe9tJXfneShN9GOrfRin9oLfCo/aWSWsLQIa0jtc7tjmEMQxZTa3xHFc5+7HWuvU27V9bHMglINlXOCIAhC4eDBgwCAzMxMV+lEIoK5O7/Q8H+zJ8huGwNW9lfb4OtTbQ/GhNwzb6nQ9GIJa2PN6winqc9FCza7DXHJBaNdvCzjhDbkc/8hn1sjTFFpgiAIpxQVFQEAGjVq5CqdSEQw27VoUv3ZaaXvNNJoNz/19nbG6xVvK0NSx3q29/OCl8YNCyzveMVXnzuN8igiMwLddamM+48vPndbRi1E32WafdoMKufmmPVQAoIfQkEQBCECUQIzEhHM3LxDwp4oGqXj1RNMrbTU34u3ldV419rGaJyQaMY8/4Gn6Yf1Zu2l3V77XBO3QtMsbR/LrF0C8Xec46vPPY64h6UOo3Kuj537fBjH3xIEQahRBGbDhg1dpROJCGZmamPhaQZxU7CaZ2wkswZmDabYhrzLxtW8Oy50tb8dwtJQU/DKXj99Xo3VMmW2JpwZVsumj7N8BuLvOMd3n7sttyaEIZJJ5ZwgCIJQOHHiBACgXj0NnWGDaEQwC4zHYIad2KilGkuNF60okcvIET319h/fozt2HlhYmG3TMnppqPPzGCrj/hO4z62uXWwD2R+QBe5zgiAIQhoqKioAAIzprUZkDYpghhTdKKYajxrifjz1lr1h5jdxF2mwOyutgqDoZtz5WwKk8XlsGRIw47HMdZk0Pid84Uh+A+RDe+b9osOC1xQnCCJ0cM4BAAkJ7mKQkYhg7j7yS9AmBIpuFNNKA92hAL1r9hJH+xHOIZ+r0IuYChrXSf72Hyl9LtnsyKLH+Enpc4IgCCIQysvLAQB16tRxlU4kBGarxu4GosqMUfdYU6w0jBw2nh64tK+j/QjnSOdzyRretXApMqXzdxxAPjfGbAI6J5DPCYIgCIXDhw8DAJo2beoqnUgIzPxfSoI2IRAsiU+jMUMuBMKsJesd70s4Q0qfK+VLVrHpQmRK6e+IQz7Xx6vJgsjnBEEQhEJBQQEAIDU11VU6kRCYTRq4m+koCpg2PtQCwKUgGNEr09X+hH3I5w5xKDLJ3/5DPtfG7lIZdiCf6yPz2FmCIAgvOHToEACfBCar5FrG2ENV309ljPVxlbNAjpWdCNqEcCAw2rR2+34BBjlE0nUTvSZQn4cd9Sy3FsZpkr/9h3xeGyeRSztjNMnnBEEQhIIiMJs1a+YqHasRzBcBnAPg6qrvhQCmu8pZIPXqRCIQ6xo/11vLaNbIt7xqIWt3TI8J1OdWCON5MRCa0vs7gpDPayKqTjcSnORzYxIy0yiSSRBE3HDkyBEA/o3B7Ms5vw1ACQBwzg8BSHSVMyEcugkShEMEzTxLEKLw8oGhnw8jowIJTYIg4oGjR48CAJo0aeIqHasCs4wxVgcABwDGWAsAFa5yFkjZCWlMiRvyDhUFbULcEQqfhzGKGUtM99m8ejS2229CUcYjgiIyyeeEHRhjrzLGDjDG1sf8lsoY+4QxtrXq3V3fOoIgAsNvgfk8gIUAWjLG/g5gOYDHXOUskIb13K3VQtjnrA7pQZsQdwTqczvRvbCLzCrOOv0UR2M3CedQvVKJOsJYvK3M3ZJVBvn0TKwvPN0oQ1FMzAYwXPXbfQA+45x3AvBZ1XeCIEJGRUVF9TIlKSkprtKyJDA5568B+DOAxwHkAbiEc/6Wq5wFcrSk9o1X6c4S+wojSR31oyhG/3nN4nW5geUdrwTqc7uiMXZCKdmXMtFh8Zcb9f8kkekJVK/UJlZYeiEy/7d5j6t1NOORsLYnRMA5XwagQPXzxQDmVH2eA+ASX40iCEII+fn5KC8vR2pqKhIT3Y2ENBSYVd0eUhljqQAOAPg3gNcB7K/6TQrSkhsAQOjFpBv8PuYbhvbwNT8iRD43ivKFSGTecJHJAvQkMoUTmjIuEW7veddndar+TCLTOhFuZ/RnjOXEvMZb2Cedc54HAFXvLb01kSAIL9i3bx8AoFWrVq7TMotgfg0gp+r9IIAtALZWff7ade6C2PdLiaXKPqw3BK1IZZDRSwB47J1VgeYfj4TC5xESXY+9+on5RhE6XhkIRRn3mFiR50XEUs3jS77VzZ8wJoxtip+LGmLP4caar0PHGgDACs55VsxrRtA2EwThD3l5eQCAjIwM12nVNfqTc54JAIyxlwEs4px/WPV9BIDfus5dEG2bN7a8bUJmWihvoEkd66F4W5mmsAziJvfM9UN9zzP0KGLEJIqnlE/1eY2Mz7fvCYUwe2bSqKBNiDsiU8Y9wosHi0+PrB2pj71HhlFEBYGWn8LY1nDIfsZYBuc8jzGWgcoebwRBhAwlgpme7n4+BKuT/JytiEsA4JwvBnCu69wFkXvgiK3tw3rDDDpqGcuY5z8I2oRIYtQgCYXPY8WzkYgMwdjMMZPnm28kqe1hJRRlPGJct2Cp4f9xJJIcYdRFOY6G7CwCMLbq81gA7wVoC0EQDlEEpogIplWB+TNj7EHGWHvGWDvG2F8ASHPXyWxpfzHQqFT6QR3HvDsuDCTfKGMWNQilz61GKiUUavOmXGu8gYQ2h51QlnGf0HvA6HaCnrmjzZ8Vk8gkFBhj/wbwFYAujLGfGGM3AZgKYBhjbCuAYVXfCYIIGX6OwVS4GkALVC5V8i4qB3Bf7Tp3QdiNYCqEXWQGaT9FGsRhtYEYWp9bXeJDMsFWK4IZa59ktkaF0JZxQeiNv/Sy94pZBFOBRCYBAJzzqznnGZzzepzzUzjn/8c5z+ec/4Zz3qnqXT3LLEEQIUAZgymii6zhGEyFqsriTte5eYSTCGbYCVocU6RBDHYabeRzf6kRwVQEJQlLT6EyfhK/hkRYiWASBEEQ0efrryvnb+3cubPrtCxFMBljSxhjn6tfrnMXxM6DRx3vG7RQCysTZlqYYTNiuH6CrxpvaDe90PjczfhKiQTchMdjlvoNwaREUUBdxpXovpOXHla2CYKg7Llt4YpA8iUIPRhjHRljAzR+H8QYOy0Imwgi6uzZswdbt25F48aN0bt3b9fpWYpgAvhTzOcGAC4DUO46dwMYY8MBPAegDoBXOOe6ffrbplmfRTYKyCCKnxxDT73d4KQxGRmfd2gjlZDU48k7LwrahLgg9lp4YtAZwoSWlXREijo39XKQYveJC862vG1Fbr4U9x8i8jwL4AGN34ur/hvprznOsdOWJYggyc7OBgAMGjQIdetalYf6WIpgcs6/jnl9yTm/G4DJKuTOYYzVATAdwAgA3QBczRjrprf9viO/eGWK5/ixzpkXTHs/J2gTAsGvhqBWPr753OqYyYgzbf6SoE2IHGaRxqeXrQ/IMvc4jagGTZh9TkSW9pzz79Q/cs5zALT33xxn2G3LEkSQKAJzyJAhQtKzJFEZY6kxXxMA9AbgfoohffoA2MY5316V/wIAFwP4QWvj1EZJHpriPX5N5iCSawZ2DdoE31HWUHW7RpyVxqZWur75PHadShGRxpCse6nmmuHuu4jEI27E1NW9Ogi0JBwELT7J5+bE1sda+1JUVzgNDP4LU4PPVluWIIIkJ6cyiNG/f38h6VmdRfZrADlV718BmATgJiEWaNMGwO6Y7z9V/aZJYfFxD03xFrWgLN5WVuMlK0s27ArahLjDV5+LXp/S6gysEgnRJTlbgzYhVIiI1GX/uE+QNfKh5ZugxSUQbZ9r4cTnZpFo2aPUIWQNY2yc+seqJVG+DsAep9hqyxJEUJSWlmLDhg1gjOHMM88UkqbVTrZdOeclsT8wxuoLsUAbpvEbV+U/HsB4AGiclIhlP+zG4WPHUVxajsyWTbFySx5G9emIFz/+BlOuGohxL3+EWbeNwHUvfIi5t1+AG6YvxsxbzsfkN5bjlq7t8O6GXeh7agvsKChEUr26aNogEZsPHsGQ01rh3+u24+7BPXDvh2swfVR/XLdgKeaOPrf6/e73V+H+oWdids5WDO/SBuv25qNV44YAgH2Fx9CrdRr+t3kPrs/qhMeXfIunR/atkca/svpj4vJVeLTvWXjhu424omN7LNu7H51TmuDIzlJUNAfapzbGql0HcUn3U/Hy6xtMj+nW83pi4ept6Nc5A7kHjiApsS5SGtbHpr0FGNr9VLy+fCMmjczCPfOW4qVxwzDm+Q8w744Lq9/vmr0ED1zaF7OWrMeIXplYu30/Mpo1AgDkHSpCYp0E/OO91bhhaA889s4qPHP90FppTJj5CZ4ccy6mvZ+DawZ2xZINu3B661TH58nrYzqrQzoWr8s1PKbbPl2LJwadgaeXrcfVvTpg6Y8/2T6mG9/8AjMuG4CHPl6LCeecXqvsNctsjk2f/lTrmK4ecLonx+Tbefr0W/3z9Ox4jJk8H/OmXIu7pi3EAzcOw6xFqzBiQFes3fQTMpo3qTymn4/irNNPweIvN+KGi/risVc/wTOTRlXvq7xPePwtPHnnRZg2fwmuGd4bS3K24vT26ThcWIzi42XIbJ2Klet3YtSQM/Dif77ElFtGYNyjb2DWw9fgusnzMXfKtVi45HvceFFfTH55MW69fAAWrv4R/VIbBlb2ZL6ezm/RzHa9N3f0ubht4Qo8ccHZ1dfT1oNHsGz7PhwpKUVxWXmNeu+lrzbhkfPOwvi3v8SrVw7C2AXLMGf0YNPryc+6XOuYsn/chy4tmlYe07fb0aHHKVi5JQ8XZ6T5ekxztu+pUfYq9h3FvsJjqJeQgCezv7d8TL8f2Vuqsmfnepozsp+18+S07M3/AX1PbYGddVHjmGRif3F97KyrHQTMP57oszW6TASwkDH2e5wUlFkAEgGMCswq+5i2ZYGa7dn09PTqrop6FBUVmW7jF2SLNmGzZevWrSgrK0Pbtm2rI5luYZzXKuu1N2JsLef8LLPfRMEYOwfAw5zz86u+3w8AnPPHtbbv2KoZ3/rCzTV+Uz9NNOrCIsOTR6NopTrKKUN3nEU523BRVsegzQgMpcx40UVWL81I+lyiiKWaRUvX46Jze9T8MQSTE/mNyPrz/R92YWS3U4WlR1SiVaco582uz2W4/zghyPt83Xtnfc05zwrMgCoYY09P7njdXV0bt9P8f2n+t/jXrkV3cM5f8Nk0TRhjQwEolfAGzrk0qxdYwW5bFgCysrK4WQM/Oztb2Dg5t5At2oTNljlz5uD666/HlVdeiTfeeMNy2owx3brNMILJGGuFynB+EmOsF04+jWkCoKFlC+yzBkAnxlgmgD0ARgO4Rm/jirJy05uHDCLSiKSO9XRFZvG2shoiU4aZ/IpLPZ1EODT4eS7I5/5SfFzeLuoyoYxNFkFxWfyWcXU97zXKedPyuVKnyX7ftEpUjiPe4JwvARDm2dZstWUJIiiOHq1c7rFFixbC0jTrIns+gOsBnALg6ZjfC6E9hbQQOOfljLE/AvgIlVM7v8o536C3ff06dbwyRRM9QeH2JhbbuJB5/CUAZLZsGrQJgeKmUW20r5FYFelzNxHYeCGzdWrNHyh6qYu6HDm9NtqnxteSU4A/db3eg7CEzDS0332w1m9an2V4sOkUkQ9BCH9hjLUGMLnq6xTO+d4g7bGD3bYsQQRFaWkpACAxUVw3eUOByTmfA2AOY+wyzvnbwnK1AOf8QwAfWtm2yKfIjtnNVVQjCzgpNv1+qm2VlVvy0KdjRtBmSIGThpdWg8csja9WbkNWnURL2xLuWbl+J/r00O5KRhjjtC5ctesg+rQV9wRVdmR4kLi66Bj6VH02qleoziEC4kkA96FSoD0BYEyw5tjDTluWIIKirKzyXuSbwGSMXcs5nw+gPWPsbvX/nPOnNXbznWZJ3g9Md3JzjfJT01F9IjYW0AEizq/VclWRm49LuosfmxYrjo2OJR4bl6OGnBG0CZHB6rXiRRkPCrvLT4l4kOika+uoPh2RUDWJllPCHN0kpGcrgMtROUQrN2BbCCKSlJdXBurqCOwRarZMSXLVeyMAjVWvRsKscMmBohLzjVzg5sbp9qYrY/QSAF78+JugTZCChMw0x+fYzn4JmZUzTtrdzwpWptj37EGJxN1OX/zPl0GbECmUa8Wo/CplPGroRSpF1u9m9YLeNSyiLg+DuAyDjURtOOcPA/gOwPec84cCNocgIklKSgoA4NChQ8LSNOsi+6+qj59yzmu0thhjA4RZ4ZI2Tb2bb0jETSmKkcwpVw0M2oS449E/DENCHatL1xrjpEzG27jNKbeMqPlDhzZSC+IwodeF9pHzPJmYXAr0hjt48RDRzvUdT3V5FO/F8QDn/LOgbSCIKNOyZUsAwMGDB022tI7V1qrWlNVSTGMNADsPFXmSrsiGdNQa5eNe/ihoE+IOWXwufFFxSUXbuEetT9VNuEOpH8e/He2osQxjLtXIUq/4RdTuxUHAGBvOGNvMGNvGGLsvaHsIgnCHIjAPHDggLE2zMZjnAOgPoIVqDGYTVA64loL2zaTprRs3zLpthPlGhFBE+9zt03yhEU1FZEq0Luash2k2eT9JyEzD7Hsv0f0/bJEnvaWngpq4Te86jce6nCKZzmGM1QEwHcAwAD8BWMMYW8Q5/yFYywiCcEqzZs0AAAUFBcLSNItgJqJyrGVd1Bx/eRSVg66lILfAmwimaKL05PS6F2hSNL+R1edRjWZeN3l+7R8lEsBRRNYyLho/IpnK/cZs3Gu8+FxNlO7HPtMHwDbO+XbOeSmABQAuDtgmgiBccPz4cQBAUlKSsDTNxmAuBbCUMTabc75TWK6CyUz1JoLpxcx4fs486iVzb78gaBPiDi98LupJvtBrRZJo5twp1waafzxiVMbVZVXkslBBIDqSqXUNWrkmqS4nVPRnjI2N+T6Dcz4j5nsbALtjvv8EoK8vlhEE4QlFRZWBuuTkZJMtrWN1DOYxxtiTjLEPGWOfKy9hVrhkh0djML1CBoHolhumLw7ahLjDK5+LKo/CG/gBRzNvePh17T86tDn5IoRiVsaNZqKVsV41E5CiI5lOrsF4rstlLDNec6C0DnYX19V8FZQmAMAKznlWzGuGKgmmkSz33nKCILxCEZiNGokL2FkVmK8B2AQgE8DfAOwAsEaYFS5p5+EYzLA9FfeLmbecH7QJcYeXPnez3EosURKZMx+8ynwjEplCicd6RS0yi7eVuRKedq/BePQ54YqfALSN+X4KgL0B2UIQhAAKCwsBBCMw0zjn/wegjHO+lHN+I4B+wqxwyZ4jxzxNX5k1U+/GHfu/lZt7FETr5DeWB22C1AifaRX++NzKWoW+s32P/stDJr8cv5GdoIjXekURlEHMMiubz6Nwf4w4awB0YoxlMsYSAYwGsChgmwiCcIEyuU9qaqqwNA3HYMag3PXyGGMXovJp1SnCrHBJy0YNfMvLroAUPU5Ilob/ref1DNqEUCByplW/fa5ls9Xy71s5tSIyHUYZb708Zqnf2HwoaukZUaxX9GaTVaMVyfRjtlmZfK7UL77WIYQtOOfljLE/AvgIlasJvMo53xCwWQRBuEARmGlpApdntLjdo4yxpgAmAfgTgFcATBRmhUsOFZcGbYIudqObYWHh6m1BmyAtWudZXQ6clAUZfG41uilVWdcToSbR0IXZ39tLj3CNDGXca+yIRi1hGtvLQO9atHP9mfo8dsyxBw9Xonh/jDqc8w85550556dxzv8etD0EQbgjP7+y/hUZwbQkMDnn/+WcH+Gcr+ecD+Wc9wZwmjArXNIo0WoglhBFv84ZQZsQeuw2rGTyeeiiC7GiUK97req3fj3aaf6u+xvhGq/KuCzdvhVxGcQ6mHoY+tzDaL0MojLo/AmCIGRg9+7KiaHT09OFpWk1gqnF3cKscMnxEyeCNiHuyD1wJGgTpMWrhqxsPpehwW4LK+M2YyKauXsLjLf3YRxovOFVGZdBzDhFHcWMPQ4Rx2Toc4/Kt5ndoatbCIIgQsz331f22DrjjDOEpelGYGpNVR0ICUwaU+KGJIoaG+JFA0lGn0e5IZh0+GjQJsQdMpZxkYiaxCdWMOtFZ62KTz99bkXoR7lOIQiCkI2ioiL8+OOPqFevHrp06SIsXTcCU5p1j+qQwPSdlIb1gzZBeqw2lKxuJ6vPZel+KBpZ/R1l3Phc1gilVVGZn6e/wLWVNJxeg6Y+FxTFlO38yGYPQRBEEKxfvx4A0LVrV9SrJ274hqHAZIwVMsaOarwKAbQWZoVLSsr96SKrrE8WxFTysrFpb0HQJoQCI/FlV5jJ7nPRMyYHjez+jiLx7vP8vGRDoekFlnzuUmRarQui+KCKIAhCZr744gsAQO/evYWma9g3hnPeWGhuHtG4vvcTJmhNIa8g04QNfjG0+6lBmxAqRDScyOf+Qv72H6c+D9PDDPW9Q/melvFLtbhU3tMyfqm1n/p+o3fsVuscyz53KDJlFJdhKi8EQRBe8vHHHwMAhg0bJjRdN11kpaHg2HHhadqJUvoZ2ZTlxvj68o1BmxB3hMHnUYpAhMHfUcOJz2WpE71AK6Ip+l4TT+U8zJM9EQRBiObYsWP44osvwBgjgalFeuMkoekpN2/1u519o86kkVlBmxB3+OVzOw0ws23D3JijMu4/dn0e5vJl1PNlz+GanYf0hKYedh70eFnOZYpehrmsiORgMfDTL9qvQ/IuKU4QhAcsXboUx48fR+/evdG8eXOhaUdCYP505BfTbZxGGWUUjDLcKO+ZtzRoE+IOP3yulK3YNTrdiMgwRzSpjPuPns/V5TGqkajYLrFaqIWmiGhm1Mt5VMsKQRCEWz744AMAwPDhw4WnHQmB2S6lkeH/RuMnzbZ1Qjx0lX1pnNhQOmGO1z63W6bU6/EpL2XyojCLS4DKeBBo+Tzous4rjO4TbVIKDffVEpqx2PGZV+VchvMmgw0EQRAywjnH+++/DwAYOXKk8PQjITBzC07ejGMjlUZPd70WgTJGPgEIe/o/5vkPBFlEWMWpz42iPiKilFGFyrj/xPo8HiJPVu4TRlFNoxlnrfouauU8yhFugiAIUXz33XfYtWsXWrVqhaws8UMlIrGqdbvkRlJGHou3lXk6w6wSLbKzvZv9Y5l3x4WO9iOcY9fneg0sEQ2veGi8URn3n7D6XOS9I3Y2WS1xGTs+s01KIfLzkpGW8Yvm/cZKHe+Fz/2uH+KhPiIIghDJe++9BwC48MILkZAgPt4YiQjmjsIiR/tZiXRGBdFiI2pPvcOAFZ97+fQ+3hpxVMb9R/F5vJU1pyhiUxGkWvcxva7sCmEv51RWCIIg7ME5x2uvvQYAuOyyyzzJIxICs31j4zGYQSKDcPXiBhzWSIMX+DX5iJHP/eoSFk/daamM+8+8Oy4MZfkR3VNFiVxqzR6rHp9pVWRq9WABxJfzMJ4/giCIeGLNmjXYsmUL0tPThS9PohAJgflT0bGgTTBEBpEpmrtmL3G0X5TGxxgdhxfHqOdzGX0po012cVrGCedMfP7DoE0IJVZEphYVuflCyzl1jY1vGGNXMMY2MMYqGGNZqv/uZ4xtY4xtZoydH5SNBEEA8+bNAwBcc801qFvXm9GSkRiDmd5Q7DqYXuDFeEwr4ye9ugE/cGlf2/voPUG3gwwzk4pcJxKwfkxaPqcGlnc4KeOEcypy83H/0DODNkMaYsdiWmHP4camYzK1uK93ZzdmVkPikgCwHsClAP4V+yNjrBuA0QC6A2gN4FPGWGfO+Qn/TSSI+Ka8vBwLFiwAAIwZM8azfCIRwSwoKQnaBEv4PdbTyxvwrCXrLeUvOmKptx6e0ctpml4eh1b+ZsT6PAxRYNntM8NKGSfEMjtna9AmSIXSVTYt45fqF2C+jImClfuNCJ97ea3rzXxNyAfnfCPnfLPGXxcDWMA5P845zwWwDUAff60jCAIAli5dip9//hmdO3dGz549PcsnEhHMJomJQZtgCxHRzKAjeSN6ZWr+LuON36lNQTyR1zuvFbn5OL9FMyn9a4SevUGXXyvolXFCPEo5Gd6lTcCWyIfRMiVq1FFMwPx+M7xLG1czivs19jusRHGIjAPaAFgZ8/2nqt8IgvCZt99+GwBw+eWXgzHmWT6REJjHysuDNsE2fixhImJ/vUbH2u37ccapLYTlR1Ri5Md1e/NxRkaqaRpKg8bL8uUWq+UlSCGqLuOE91gt44Q+dkWmzD6n+4p4DpaUg5/QFr0Fx08AQH/G2NiYn2dwzmcoXxhjnwJopbH7Xzjn7+lkq9WK5dYsJghCFCdOnMA777wDwLvZYxUiITDrebB+ix/YFQJeNbbVT6/V3UO18s1o1uj/2Tvv8DiK8/F/VneSTr13yZZkW7KNO264gA0Yg+khgEPoCYT+I5BAgCSEEBIS/+DoBQAAIABJREFUEhK+poWSBAjVAYIBh2Jwb1julm3Zsi3J6r2c2rX9/SFLluXrt3d7d5rP89wj3e3szLvvzs7Mu+/MO1bTC7xHekyk3eND35QHgqHpiMF1y9fG5uA6LvAeg++xozquNt5+MegM/Wszs+I7TtkTczCuGJn9Onc0g6IfW32FIGjYJMvyD2wdlGX5fDfyrARyBn3PBqrdyEcgEHjAxo0bqaurIy8vj6lTp3q1LFUsM0mSnpEk6aAkSXskSfpYkqT4QceGXaQxVyL+KZnO2nnWzrWXnxhg+Af26lCw7PMaTBGIBYG7ls4fniVX12J6gq01kN6+d4FYN6yh9gsJP2EFsFSSpHBJkvKAMcB3KsukCGI8Kwgk3n//fQCuvvpqr06PBfU8mF8Dj8iybJIk6Y/AI8DD7kYaM1osXhcYOOVtsFp4slbGUb6uUH24Fkus9bfnAu9Q22F9Ox5nB7yO0gXSQMgXU2xrWvRunyuwjqP7ZquOC2wz1JNZ3hnByKhup72Yg3VubzaLrwgWw3I4IknSlcAyIAX4XJKkXbIsL5ZluViSpA+A/YAJuDuIIsgqOp4VCLyFyWRi+fLlAFx77bVeL08VD6Ysy1/Jsty/cHILfdMlwM1IY5Fe2sPFGq6EjfcWanX6gz9TM/0/SEuwYU3nSnpT+j2d7uTpD14da3jiaZmWn6awNMMXZ+9DoLQrtqaj+5LBLzv7PZnHjTGExbs2zXiozvvvky/7GTE7ITiQZfljWZazZVkOl2U5TZblxYOOPSXL8ihZlgtlWf6fmnIqidLjWYHAW6xZs4aGhgbGjBnj9emx4B9rMG8F3j/xv9ORxiRJuh24HSBCo2FDTR1tBgM9JjMjY6LZVt/Ipbk5vLr/EL+aPpm7123h7wvmcNvqjby6cC4/WbOJF86ezZNFu7ltfAGflh1nRmoy5R16dFoNcWFhHGpt5+zMNJaXlvHD1On8/sB6npo4nxtWbuGtJbP58eoNvLZwHr/YXMSDUybw70NHuCAnk12NzaSf2JuztqubKcmJfHW8musLRvGXXft4+qzpA+f2/7374038cckMnl23jx9MzWfNkVoKU+Jo6zHQbTSRmxjD1ooGrjhjBC9tPsjvfrKI217+kn/efRE3LlvJm/cu4ZYX/serdyzml/8r4s6zxvLf4gpmjUihrLmDiFAtcbowShraWDAqnXd3HuWBsyfw8MptvHDlHG58by1vLj1n4O8Dn27lkYWT+VfRYS4szGJnddPAWp3aji52VzeTHhPJzdPH8IfVu3n20lmn5eHqNf32gmnc/uFG/nHNfG56bx1vLD2bWz9YzytXzeXXX+3w+jVNzUzii5Iqv72m3IQYnlm7lzeXnsMPX12tWN27f8NWfjdrGsv2HODq0bmsq66joCaWniSLw2u6/tU1J58ny2z+cGSvf96nrSUUpsTRHhdOt8FEXmocWw7VcOXM0bz41S6evHbe6c/Ti1+w5akf8qv3N3DXBVP4+LtSZhdkcKy+jYgwLfGR4RysbmbhGSN4Z8MBHrx0Oj9/ay0v3baIG/7vc9667+KBvz/912oe/d4s/rl6HxdNzWPH0bqBNZ41LXqm5afxv53HuGXhBH7/0Vb+evPC0/K489WveeaGc/jLp0VcN28cq4srGJuZSGtXr/PXdKKN8OU13ZSf5fR9+u3Xu7h33ni/bSPGm+NOPk9vFTn/PMXHKtI/1UR1E9LIKf3TG7uquC1/Ms+Ufs3HNbsAeHfar/lX5X94ZdbZ/LZ4E7+MLxxoI/Zvb3PYll//h4/8pt0LhrZcMGzweDyblpbGmjVr7Bai1+sdpvEVQhbr+KMsf/rTnwCYPXs2a9eu9Xq5kix7J5CXM5HGJEl6DJgOfE+WZVmSpBeAzbIs//vE8deBlbIsf2ivrIlJifLmqy5W9gKGYM1zqeR0WVenJzoTjMGbNOh7SInW+aQsQR8VeztIifC9zm3VTVtem0CaamuPBn0PaRMDL5K+M9PolZ5qr1S74+/tij966vv7ppquBGZ/8wcAVlzyLMltNUCfd3NwXzX0+fSWzofqKljaBVfon5Ksffif22VZnq62PJIkPTs/5oafJoeOtHq8vHc3Ozo/vU+W5WU+Fs2v8eV4dvr06XJRUZFdedasWcOCBQtcvxAvIGSxjr/JMmHCBLKysjAajZSWlpKfn69I3pIk2WzbvObBdBRp7EQY7EuA8+STVq5bkcbqurrdFdMjlFqT6U7Ha22A6MvpRf1vhQXep3+g1u+BVKt8V9IHw2DyD6t38+yQgbea26a48nw7k3ZoGmvXZi+KrzfaG9GuuE9YStzA/0kTsmFjjVPneUPn/miIq4XQReDjy/GsQOAN/vGPf2AwGFiyZIlixqUjVJkiK0nShcDDwDmyLA+O6rACeEeSpGfpWxTtVKSx7Gj1Qtv7Q+AfNRCDQO9ibVCihnHpLsFgZFqr484YZUri63VwnhxXAtGuuMbgmTVhqQPBK0kcn0XLxm1O5aGkzoUxJRhuKD2eFQiUxmKx8PLLLwNw1113+axctTaQfB6IAb6WJGmXJEkvA8iyXAz0Rxr7AicjjZV1KBvt0dVAPp4E/vFkED50v0pfcuN73p+/PVyxNUj78eoNHuXbVBN12sebBPpg05k6PjT4ldKf4YZoV07Flec0anQGAJ01raTNyHO6DKV0bu95D/SXTe4Q6O2fwGkUHc8KBEqzadMmjh07Rm5uLhdeeKHPylXFgynL8mg7x54CnnIlv9yYwN0Q3VNPj1qD0DeXnqNKucGOvUHJawvnuZyfo8Fp/3FveeED2ZMp6rjv8WedqxUp1lkDM+mcibQcqmX3in0s+Nn5hKXGY6hvdXieEjoXxpRguKL0eFYgUJoPPvgAgPvvvx+NRuOzctXyYCqK0h5Md/CH7Ut8ifA0KI+jQZozHkx3PZTe9GgG6uBT1HHf46869/c6XK/JImZiLvu+OMDhVQcBkGY7F4beFZ0P3srI2W2NAvUFk0AgEAQ6W7duZe/evcTFxXHrrbf6tGx/2KbEY5T2YLrrzRlO6zH92dMQiDgzgLXmwVTaKPS2RzOQEHXc9/ibzv3BsLT1LA5+9pPOnoAUEkLJlwdoKW+m9XAd2QvHcWTFaof5/336HIczDdzVgzAu/Y8auQW9bD1qcJPc7mNpBAKBN/nLX/4CwB133EFMTIxPyw4KD+Zxvf8MhoeLJ/PujzepLULQYG/wNtgbecdXO3y2hlLpvP1hoO4qoo77Hn/RuTOeOX+hqjWG2Ml56Kta2LO3kcpOKN9ZSVx+it3z+q/x/g1bB77bSucOwrgUCAQC9SgrK+PDDz9Eo9Fw7733+rz8oDAwM6PUiyJrDVcH54EykBnMH5fMUFuEoGdoPXp03GxVy3eU1pHxG2j1XNRx3+MPOg+0egqgy0qio7xx4HtYVDiGjh6b6Qdf4+9mTbP6u7XvzjLcjctArEMCgSC4WLZsGRaLhXPPPZesLN/v6R0UBmZ9t+2OVC2sDbh9EbnTVzy7bp/aIgQFtgYi1urJq0d3e1scp+QYetxWmkCv66KO+x6hc+cY+mzpspKpKTsZ0Cc8VofU2TX0NOD0NmfZngOnHQ8kD65AIBAITqWjo4PXXnsNgKuuukoVGYJiDWZCeJjaIrhEMKzV/MFU32zU6k1sDaDUfvtuyzC7PHOMjyXpY6g8rkS2HFrXAymqbDDU8UBDbZ0HmlFV1RqDNi4SbUwErRUtAGRHQWy8DlOHc33M1aNzFZUpUJ5vbxFodUggEAQf//znP2lvb2f+/PkUFhaqIkNQGJh6o0ltETwmkAbeAGuO1DImOU5tMdzC0QDA0wGCJ/fRnuG2uamKvGj1de7OPrGB+EIlkOt4oKKWzgPZKEhZfCYAVbuOkx0FEQmRJBSmU/fxJrLiOxyev666jtFxsU6VdUpgoQB8pr1BINcdgUAQfFgsFpYtWwb0bU2iFkExRVan8e5leGOqX6BPHyxMCcyBty8GA94qY1R0vFfy9TWBMiAL1DoeyPha54E+FVQK1ZC2dAF1245Ss7sKgIlXTkYTpqXus61O5VEQ77pxae37cEJMIxYIBP7KypUrKS0tJTc3l8svv1w1OYLCg2mWZa/lPZw7UXu09RjUFgE43Vix5T309UCgvzwlvdLtRv/QuTsEohfTX+r4cMJXOg8Gw6CqNYbUS88kMiWWT3+xou9HCc5cOo367WV0l9WDE++k2gying9mcN2w1n4HQ90RCATBy3PPPQfAPffcg0ajUU2OoDAwLd6zL71KIA66++lWYFqyvY7aXUPR3zp/e1OfXZW1xxLYU8EDrb4rUccFruFtnftb+2ALe8/K4JeeWUvPoWl/FeWbjgIwcnYeMTlJ1Lz+hdNl9ZjMTskTqDiz1l7pLVqGK5IkPQNcChiAI8Atsiy3njj2CPAjwAzcJ8vyl6oJKhAEKfv372fVqlVERUXxox/9SFVZgsLADAvxzhTZQO5UvU1uovMbtrrTSQdTx27NyHTn+nIinJvKJlAGV+q4QBm8qfNgalP6iRiRys4X1w18n/m9iRg6umlat9ep9ZdNNVGMjIl2mEYtHHkTraVzJ2+BYnwNPCLLskmSpD8CjwAPS5I0HlgKnAFkAqskSSqQZdnx2w2BQOA0zz//PAA33ngj8fHqLqsKCgOz0+Sdt96uRMv0lEAK8AOwtaKBmTm2N/IWnfepKKGPna11TElIVUAagTM4quPBjDemeDuDkjoP5DbIFU+/fGIKT3hMODnnjafsf3uQjc57JdeUtjM9NdluGqVxVL+s3btAvp/DBVmWvxr0dQvw/RP/Xw68J8tyL3BMkqRSYCaw2cciCgRBS1tbG2+++SbQNz1WbYLCwIz34jYlzhiZVa19b93tvTHuTzM4XaBMF7S2zvGKM0Y4TCdQlsXpeWqLoBi+iJrsqD46Kt9aHXe1jEDH2rOv1DVb03+/zq2VYWtKY//vwX4vTkOSAJBPxCA447JJaCPC6Px8g8NTB/dpi9PzrE7J9bZxOfh/Z6arCnzGHEmSbhr0/RVZll9xI59bgfdP/J9Fn8HZT+WJ3wQCgUK88cYbdHZ2cu655zJ+/Hi1xQkOA7Oxu0e1sgcbjoP/DwTsTT+y18l3lxpZ9t1+npg51WuyCU7nrbJiHh43S20xFMMdI1PJwacjL8pLmw/y+4ume638QMQb+h+MvXZFrJM7afRVtcZAn32JFNL3z/Sl02jaV0nnoSqn8uinv11x1aB05wWpuIfq0yiX0W5psXqsW64D2CTL8g9snS9J0iog3cqhx2RZ/uREmscAE/B2/2lW0gdo9AyBwP+QZZkXX3wRgLvvvltlafoICgMzIyrSq/krOVXWmXUx3kaJ6Ue/mj5ZKXEETvJg4Qy1RVANbw5AbeX9yKiJYuDrY0S7YpvT+iBZRl/ZzIixyZSOzyB+dBrfPdUXTdZWP2OtH1O6XVFqzbnAP5Fl+Xx7x094Py8BzpPlgRD/lUDOoGTZQLV3JBQIhh/r16+npKSEzMxMLrvsMrXFAYJkH8yKDr3aIrjM4Le/vlrnpOS+XXev2+I4kUAxqlpjeHjPWrXF8Dlq7jUn6rjvETp3jYad5aRMHcmEKyZh7jVS8dU+q+maaqJsviR1p11xxXspjMvhgyRJFwIPA5fJstw16NAKYKkkSeGSJOUBY4Dv1JBRIAhGXnvtNQBuueUWtFr/8B36hxQe4igKnr+ghvfSW5373xfM8Uq+glMZPO36L1MWqiiJ71F7YCrquO8ROneN+h3l5F06lTNvmEXFV/vItDH10R7eaFf6vZhqP8MCn/M8EA58LfWtEd4iy/IdsiwXS5L0AbCfvqmzd4sIsgKBMrS2trJ8+XIAbr31VpWlOUlQeDDLfODB9DQgz2Dj0t21K4M/zqb1Fret3ui1vAXW+enOb9UWwWXsrUv298jJoo77HqFz12gqrhz4v9yG99IR3mpXhHE5/JBlebQsyzmyLE858blj0LGnZFkeJctyoSzL/1NTToEgmHj33Xfp6enhvPPOIz8/X21xBggKD2aun3sw7XkunRlk+2PI9lcXzlW1/OHC4Lrz16nnqihJH1WtMU554j0NeKV2/QZRx9VguOvcWjRXeyQUnIy1EpkWS68bZbrargRK9HOBQCAYDvRvTXLzzTerK8gQgsKDWe6jNZjWOlZHg+2hx13tnP1hoG2Nn6zZpLYIw44Hd632eZlVrTGnfJxNbw1n1x37S50Xddz3DGed96+RtLdecihZZ4+lq6Gd9vJGMuaMGfjdlRc8arQrAoFAIPCcQ4cOsWXLFqKjo7nyyivVFucUgsKDOUJlD2ZWfMdpHbo1w3OocenIe+kvA21rvHD2bLVFGHb8cdI5aotgE0cDWmdfrPhTnRd13PcMV507MiitHT9uiGbG3DEUf7YPU6+JKddMoyJMi8Vgcqlsf25XBAKBQGCbt956C4Dvf//7REV5Z+9idwkKD2ZNZ5fjRApha6CcFd9xysfZ82zhTwNtazxZtFttETyi30tgz1swNI2j9N7mLyXbPDo/KaNT8eltzno2ncHf6nyg1/FAZDjq3F77Y4+40amERoVTvauS2r1VaHWhROZb257QPp62KwKBQCDwPbIs8+677wJwww03qCzN6QSFBzM5QufT8lzZF1PJzaj9idvGF6gtgkNcMQRdNRqHplfKcLNXt27IPcOjvJU0jJUyKvvxxzofCHU82BA6PxVrazL7n7220np6WjqZuGgMpm4DhvZuOo/WulyGp+2KQCAQCHzPwYMHOXLkCMnJyZxzjv/NRAkKA7O11+DzMu0ZAq5OBxw8VdYfB9rW+LTsOHdOGKu2GKp5E4cyWA5vBcH4svYYt+RN9EretrA2/VsJ4zIQ6rm/1PHhRCDp3NW2x1q74EwettJYjGaOfrKDsdfPwWKycGzFTmQnp8cODtalRrviz1jTt6M23VsvHAUCgcAWn376KQBLlixBo9GoLM3pBIWBGaXSpqJKdSKBMNgeyozUZMXy8hcjUSk8NTZtnbNAEwsq7Bxmzch05dxARck6LnAOb+rc1QittvJQqnwl2r0jHxUx/ub5hGg1HPtsF85EI+h/lvuNzKnxaR7LEei4swZWyfS+pKPnOJJk3dNtsfT4WBqBQOAu/QbmJZdcorIk1gkKA7PbKA806P745tBaMJ9AMyqHdpj7qmvIM7v+ptdaXsGMEoPafso79EwfpewAVWCb8g4904WR6ZZHx12U1vlQ2dV+ZpRsDwD0lS1UrSshMi2Wpn2VRLt4eVWtMRzvLmVKQqpT6f2xfx2KvXvcL7/a9UAgEAg8oaysjI0bNxIaGsrixYvVFscqQWFghiAN/O+PU1UCzZgcirXOWBdiveqIjvt0lHr5odNqTsnPlwz1RDrj0ew/xx+eQXfo1/dwxBWPjq376049dUbnzs4Q8Ne2SDG5JAlkmY2/+ABJ4368vs5uZddTq4Un040FAoEgkHjxxReRZZlrrrmG2NhYtcWxSlAYmBpJsnnMF2vjghlbHXJsaJiPJQl8PDE0m2qikDpi/WaA5Mm02UAhLiww67grL9mUqE9K1klX67i/PA++ov+Zi7jqfH5w/2J2PfcVB9/aiGyRXTp/MDHa8FPWZAYaw60OCASC4U1XVxevvfYaAPfdd5/K0tgmKAzMXotzC9P8eRptoHFE38qspEy1xQhInJ0mN3TgFEg691fvpSttwKHWduZl2F+f5s0ZE0oNnANpAB5IdVwtNNERTL2/b0rUlP93AVkLxrLxoffpbjh9loEzRmN5dzNT4nIGjE9b56j1LAdS/RUIBAJv884779DS0sLMmTOZOXOm2uLYJCgMzCjN6Wsc7aH0OphgxV7HflZSlg8lCT6sedYdDaT8Tef9A1G1PZn2jDxn9hi01RacnWnbuHRl78Kh+YsBs238rY77IzHjRwDw3k1vEpkcxaXPXMnoq2aw9+Vv3cpvWtyIU777kzdTPCsCgUBwErPZzDPPPAPAvffeq7I09nF/4YYf0Wrsdfmcppoo0Xl5wCfVh9UWIWhwti76q86z4jtO+/gCW3rr/93Z53tw+sGfN3ZV2TzmjpyizXGMv9Zxf6D/RY4s902HNRvNHFxZjNlgRhNu/V2xMy9/vm44oJyQCiGeFYFAIDid5cuXc+jQIfLy8rj22mvVFscuQeHBTAmPcPvc4Txt1p63xVHnflv+ZK/IJLCN0PlJfDH4FPr2PULnjqnVazkDCNGeeD8sy2A7DMGAR9KWsbk0a7riMjqaISCMR4FAIHANi8XC7373OwAeeeQRQkNdm73pa1T1YEqS9DNJkmRJkpIH/faIJEmlkiSVSJLkVOzdmh7PjcPh1OHZezvs7Jvj3x/YorRYAgcEos698Vz56lkNRH0HOkLnjrGY+mIOhIT2RdyVZRnpRKC78k7rL1vteTJfOLbWqfSuzAgY+l148YcHkiQ9KUnSHkmSdkmS9JUkSZmDjrk8tgsklBrPCgS2+OijjyguLiYnJ4ebbrpJbXEcopoHU5KkHGARUDHot/HAUuAMIBNYJUlSgSzLdqP4ZEU4s720YwJlbaYn+9Ip1bk/NXG+IvkInEfo3LcvgoS+fY/QuWMsxr7uUHPCwDR29hIWF+l2fj8fvUgRuWB4vagVWOUZWZZ/BSBJ0n3Ar4E73B3bBQpKjmcFAmsYDAYeffRRoM97GRYAUe7V9GD+FXgIGBxf/XLgPVmWe2VZPgaUAg5DJB3vUm7Nl7fesHq6lsvROfby9cab4/t3fqNYXgLnCFSde7oWUi3PR6DqO5AROneMxWACQBvWZ2C2H20gLj/F7fyeKPnc6bT2nkFhXApkWW4f9DWKk+M7t8Z2AYRi41mBwBovvfQShw8fprCwkB//+Mdqi+MUqngwJUm6DKiSZXm3dOoellnA4DlSlSd+s5bH7cDtAPGh4WxtqqbdaKDHYiInIpadrXUsTs/jrbJiHiycwcN71vKXKQv56c5v+evUc3lw12r+OOkc/lKyjRtyz+DL2mNMjU/jeHc7uhAtsU1h1GrqOTszjeWlZdw7aRy/3LqDv82bxY9Xb+C1hfMG/v5icxEPTpnAvw8d4YKcTHY1NpMe2TdVqbarm1w5izX1h7k6p5AXSnfy6zPmcP/Ob/jb1PO4YeUW/jb1PB7bu55Hx83m7foirh6dy7rqOtLNqe5fU/fga6rpu6bQMI7oWzkrKYtPqg9zW/5kfn9gC09NnD8gT//f3xZv4u7RU1l+vIQFqTnsa2skVdf3lry+p4s7Rk3h5dJdVq+p/2//Nb16dDeXZ45hc1MVo6Ljlb1PCl7ThLhk1tQf99trGixHoF1TNhYOtbZbfZ76nwF/uyaNFILJYhF1z4fXNDMxQ/m2PEjuU7Y2j+qeNsLCDFwElFsqaTQ0saeslDlnz+L14yv5Uc4Snij5nMcLLx74+9zR1dyYM4uVdfuYnZBHvaWcEFPfLL4mg57rs2fyduV3LEmbwJvHt/L/8hfyRMnnvDLrbKvXdFNkFuuq6yiIj6XNYKDHZGZkTDRrjh0V92nQfQoP0ZIXb+aIvtXdoZI3MMqyBdtbh8sARk8KkCTpKeBGoA1YeOJnp8d2gYbS49m0tDTWrFljt0y9Xu8wja8QslhHSVna29v51a9+BcCNN97Ixo0bVZPFFaT+iHSKZyxJq4B0K4ceAx4FLpBluU2SpDJguizLjZIkvQBslmX53yfyeB1YKcvyh/bKiguNkPdc6L35yPaCFAzXAAb9nbTAdwS6zgPtWQl0fQciQufWGbwmsiE6jcv/9zO++PWn7Fm+kwU/X8S066azfM6TAIyM6raax9Dozv159huizpzTjzNbAQ0nHEXrnbvhz9tlWVY+mpKLSJI0AaS9oLFyVAbMAA8C1w068Iosy68MysPm2E6W5U8GpXsE0Mmy/Li7Yzt/wZfj2enTp8tFRUV25VmzZg0LFixw/UK8gJDFOkrKct9997Fs2TLOPfdcVq1ahWT7DZHXZRmKJEk22zaveTBlWT7fhjATgTyg/21PNrBDkqSZ9L3hyRmUPBuodlRWhi7utAZeya0SxLSg0xGDQN8T6DoPtGcl0PUdiAidO6a3tQvZYiEmNRboC/oTonF9tUt/ZFlbxqU9Au1Z9hZq7wHsKrIs75OkEPqMyaGD1L7fZNnyLPCsnTysju2s8A7wOfA4bo7t/AVfjmcFgsGsX7+e559/Ho1Gw7PPPuuycakmPl+DKcvyXlmWU2VZzpVlOZe+h3CaLMu1wApgqSRJ4ZIk5QFjgO8c5Vnfe7oxWdUa49RnuOKpDn5bvElhiQSOeGhHkai7PkTUcd8jdO4Yc6+JjuPNJI/pW3epi9Vh6LDutRzM0Daj//tzR1crL2SQEVzjB3kiWIb+Rt9vcqonOUuSNGbQ18uAgyf+d2ts5+94YzwrEPSj1+u5+eabkWWZRx55hMmTA2sbL7/aB1OW5WJJkj4A9gMm4G5nIm4lhbn/NrW/k/DV5vCe4kynZmsqlDN5OquHu0dPdSqdwDMG35sbc2bZPOaIQKnf/oSo474n0HSuVv/RdqSe/HmjuOmtG4gamYyhvcep86ztiTm0XRkuBK6B6BnWvZgD3ssGD7N/WpKkQvqs1XLgjhNlujW2C2SG4zULlOUXv/gFR48eZfLkyQNrMAMJ1Q3ME299Bn9/CnjKlTzajI7f3jrC3wxNTzo/Jc51pIflx0u4Y/QUt8sJVhzp3tn6ZS2flXX7+GG2e0HorOXnL3XdV/QPrp1F1HHf40udK2lguPOSzhNKPyxCGxFGSKiG7uoWajYcHjhW3hlhcx0mnH7dnrQrjnD2mXN1iYsn7dlwNSxPRZ4IlhNrMfu9l3jkvQSQZfkqO8dcHtsFGkqMZwUCgFWrVvHCCy+g1Wp54403AmJbkqGobmAqQZQmXLG8XB2EelqWv+JIDwtSc2we82fc0bk7AyRX0vXn7yiP2QkD9c2aAAAgAElEQVR5TpXhLLbKCybD09a0QGeuMVDruC9x1bByZEh4qnN/aFN98bKyp7GD2q1HKF2+DVO34ZRj9oxLayjdrsCp98Gde6LEOc62q8ORU72YinkvBQKBAjQ3N3PzzTcD8Pjjjwfc1Nh+gsLA7LF4FFX7NDztkGwNLAKto7M3UNrX1sjY2CRfi2QVb+vVX/Iv0dcxKsr9/e6GE0rcM3+q4/6CPb0qYRSsrysnxpLrcj7+iLP6cMdbt+iN29HqQkmelMOGn78PHkSDD9Z2JdD6W9/T78WUQQHvpUAg8BxZlrnzzjupqqrirLPO4he/+IXaIrlNUBiYWslayG31CLaOzZqh2b/nmKNznEVJL2GwkhQW7ZNyfOnF9wbO1BNnrtFRHQ90/PF58lUd9yfs1cWh96i8M4Kw+Eg0YRq6WrrIOXc8E+9YyN6XvnW7fHs6D/S2QGCbk15M4b0UCPyFt99+mw8++IDo6GjeeusttNrANdMCV3KBS5R3Rtg97sy0KkdTv9RaNyoQ9CPq0UmELgIHVwy5kRdMQAoJ4cM73+WsayYz4bYFNO2tpHrDIS9L6Ryi3gUOsmwJnD0PBIIgp7a2lnvuuQeAv/3tb4waNUpliTwjKAxMkwjMdQqOjElH5zgyNqtaYyhptjAqVAwkfEmTQe+zsoaD58LRNdb3dPlQGs8JhoG9L+u4v+HM/YsvSGfK/Yup23aMmj1V/Hd/DbdNHsHEO89128AczjoXCAQCf+FnP/sZbW1tLFmyhFtvvVVtcTwmKAxMXUio2iKoijsGpaP8HBmZhdFpipYpcIyvdT5cjExbTIhL9qEk7hEMRuVgRLtim+qQBBY/sxRDWxfLf/oRyGAxWtjzWTHz7j2HsNgIDO2uR1R3pHNX2oFgq48CgUDgC9asWcPbb7+NTqdj2bJlSFLgTy4ICgOz09yrtgg+Q2lj0l459ozMLS3HgjIwhD+jhs79bfseX7Kiop6Y7Fy/vPZgHciLdsU2E25fQFR6HO/c+AZdTZ0Dvx/fVo4UEkLKlBFUrStxOV9ndO6MkRmsdbIfZ/peVyP4CgQCgdFo5O677wbgkUceIT8/X2WJlCEoDMy4UMcNvytTQP0RXxmWQ8u0paslaRMcnmuPQLwHauNI597EW4amM4NStbwn/fr2NyM7mAfyatZxfyYkPJRRl0+j4ut9VO+qPOVYzd4qzL1GUqaOpGpdiVMzUAbjrM5tbUmjdn1Uuq/xpK8N9HGGQCDwPcuWLWP//v2MGjWKhx56SG1xFCMoDMwmw8m3uc50DkPTKNUReMuoUsO4HFr2UNnfPL6V/5e/0GpaV/K1lrfAOtZ07muUMLZcHZCqZeAN1bc/TBlWezDvbfyhjvsjpqmTCI3WseWD3acdMxvM6KtaiM5OcCtvd3Sudj10t6/xJWr22wKBIDCor6/niSeeAOC5555Dp9OpLJFyBIWBGa+NU+ytozdxxajyt85pqOz/L3+hYjLaykcYnqfiTwNvW5uaO5PWkzJ9aeBZ07c3ZLCmn+G6Sbw/1XF/YsQFE+hu7OD4tnKrxzurW4nKdM/ADBSd+1ufKBAIBJ7y2GOP0d7ezkUXXcTFF1+stjiKEqK2AEpQb2hRWwSXKe+MsPvxZ8o7I3io+AuflBOoOvIGT5R8rrYINqlqjbH5UbocV373BFv6Vuq67OXjDd0FAv5cx9WiyhJH5rwCDnx5ANkiW01Td7yNqIx4t/L3d50P1/ZeIBAENzt27OD1119Hq9Xy17/+VW1xFCcoPJipYe69uRW4z925V6otgs3pu94ow12UlO3xwuB6u+UuvvJkOtK3PQPQFx7dYETU8dNJnpSDNiKMI2tLbabpbushPD6SEK0Gi8m1bbv8UefCoBQIBMGMLMs88MADyLLMfffdR2FhodoiKY7wYArc4oWyj9UWYQBveTWVyE9Jr6u/exp8yWAPn7cMNk/07SuPbrAh6vjpdNa0AhCdEm0zTf60LFoP12IxmV1+qaW2zsXsFIFAMNxYuXIla9euJTExkV/96ldqi+MVhAdT4Bb+4MG0hhIBnLw5wPFkvak/ehrUxpsGm9C37xE6P53E5iqMnb2kjU9n70enH9fqtKRMGcHh97e6lb+3dS4MRoFAIDiJ2Wzm4YcfBuCXv/wl8fHuLW/wd4LCg9lobFNbhGHH68dXqi2CU7j6RlzNiIOO3uQ/U/q1KrINV4S+fY/QuRVkmZaSGnLOSLd6OHvaCDRhWmq3HnUr+18fXO1wvbsrXkbhjRQIBALbvPHGGxQXF5Obm8tdd92ltjheIyg8mImhsWqLMOz4Yeb5aovgEvYi+PrzIGiwbHfnnaOiJMMPoW/fI3RunZaDNeRfMc3qsYxJWQDU7yhza8aGu225P7ebAoFA4I+YTCaefPJJAH73u98RHh6uskTeIyg8mG1GvdoiDDs+r9+stghuE6hv2F8+tivgZA5k3qsqUluEYYfQuXXayxoJjQwnOu30KeFh0eGYe42Ye4xu5R3IbblAIBAEEu+99x5lZWUUFBSwdOlStcXxKkHhwYzWKjTgliR0SVHoEqPpaeygp7nT4SkaXSjJk3JIKMwgoTCdEK2G1sN1tB6upX5nOcaOHmVk8zPmJExQW4Rhx2Cdu7KnqsA9FqWMU1uEYYfQuXU6yhsBSMxNQl93aoTiUJ0WU7d7xmV5Z4Rft+WSJoSIlBh0SdFEJEUjw0Df3F3fbnPbFl+SNjMfXUIUkjYE2SJTvf4QRn1w9vsCgcB9LBYLTz/9NAAPP/wwGo1GZYm8S1AYmD1mg8vnhIT1BUZImTKC+IJ04sekE5UeR0joyRtu6Oiho7yRhp3lVG88TP2OMmSTBYDEcZmMvnoGIxZNIDSqz8XdWduGxWhmxAUTTpzfzd6XvuXQe+4FX/Bn9uvLydAlqS3GsMKWzq15NF01OpUIjhRs7GirICdCBBDzJULn1mkvbwIgMS+Jiq1lpxwLjQjD5Kb3EvyvLddGhDH6qumkzRpFyuQcQqN1VtMZOnpo2ldJw64Kyr/Yg/54s0/lDI3RMfWnixl1xZmn/G7s6uXYip3UbDlC6+FaumpEjAiBQACfffYZxcXFZGdnc/3116stjtcJCgMzKTOFiz+8l7C4CDS6UCy9JswGE4b2bvSVLXQcb8bQ0U2IVkNIqIbEsRmkTMtFqwtFtljoKG+ial8NLSv301HbRldTJ9GpMSTkJpE9NoUx18xk7A1zMRtNAGhC+9Rm7OzlwBf7Ofi//dQVV9Pd2jcoD4sMI3V8OgvvmseZD12MvqqF6vWHVNOPN8j04YBEowtlzh+uJkQTwv5/radhR7nPyvYnXNG5p9NoXTk/WI3RkRGJaosw7BA6t05qZx3djR1MvWIiu5fvQDaf9Nx1t3YTkRxN8qQcOOJ6P+PLttwWmnAtWeeMJee88WTOK0AbEUbbkXr2fbqXugO16Ov1dDXqQZKISo4mOiWa1HFp5E7LZsLtC5j4kwVUrS3h4NubadhR5lVZY/OSyb9sGqOvmk5otI7NL6+neMUezEYzkYlRzLt+GqOumk7B0tkAGPU9XBfzpFdlEggE/s8f//hHAH72s58RFhamsjTeJygMTKPJTPXBerrbujF1G9GEadCGa0lMjiA6J5H0s0aj1YUCYDGa6TjexK4PtlO26RiVReUYOu17QEMjQhk5J5/MydnIFhmL0Uxnk579n+61eq6hy0BlUQVv/+Q9bvv4ds58+GLqth1ze42MP9Jl9s0UIEkbwtw/XkPm3DEY2ro5/7UfUb+znO1Pf0br4TqfyOAv+ErnrhKs03U7TL1qizDsEDq3zfEXP6Xg19dx2WOL+OS3Xw38vumldYxbVMicP1zNF9e9RIbJNU+emu1KTG4yo6+aTv6lUwmLjaC7sYM9H++m+JM91OypciqPqJRopv5gOmcuPZPshePoON7M8W+Kqfh6Hy0HahSRMzonkewF4xh54UQSx2ViMVs4/k0x3764iYaSk/1QW2Uryx+qIvQ3X5BamE7ymBRSClIVkUEgEAQuGzduZNOmTSQmJvLjH/9YbXF8QlAYmK31rax44EPbCSQI0YZgMVrcyt/YbaT0mxJKvylx6TyL0cLnv1nJdW/dzJT7L2D708GzibjBYvJ+IZLE7N9cSdb8Qr58/DOKV+xh4vemMv+u+cz78w/4YumLmLpdnx4dqPhE5x4STMZmbwDoO9gQOrdN49c7seSNZOwNc5lyqIld720HwKDv5aP7P+T6d29h9m+/x7r732FkZJfD/PqfVbXalbE3zGXqTxdjNpqo/GY/m9/bxfFt5S6vq+xs0LPh/9aw5ZUNjL3oDKZeMp6xP5zD+JvnU7n6ANv/vNLlaaqacC0pU0aSPnsUmfMLiMvvMxKb9lfxze+/4OD/iulstB2jwdhlpGrncap2HnepXIFAEJz86U9/AuCuu+4iKipKZWl8Q1AYmBrJwUJZGbeNS0+pLKrgwJsbGXfjXHpbu9j39zUgKxeYQJcYRURKDJ117RhaHQ8qlCIlzPsbw069/wJyl0xm7bPfsPuDHQDsfGcbDSV1/ODNG5ny08UU/f5Tr8vhL/hC50qixNpQNcnUxaktgss4mtqslP7LOyO8ci8DUee+pPm1T6jKTeb8xy6ks6mT8k1HMfWa6Khr59iGI4w+t5DCH8ym55Nv7eYzuJ6o0a4UXncWU3+6mIqv97HiN1/Q1ex532XqMbHv493s+3g3uvgIJn9/GnPvms/F/7mXA29tpGrNQVoP1do0YMPiIkifNYoRi84gc14BmvBQzEYTDTsr2PbuDkq/LaG9WqynFAgErrF//35WrFhBeHg49957r9ri+IygMDANFv/2Yn369DfoEiKZePtCUqflsv8f62jcW4mp0/3pYHGjUxl7/VxGXjRxYE2oqdtAR3kTLSU1NB+spnl/NS0ltVgMyr+hLu2sYnRUluL59jPp7vMYe8NcSj8qYuurG085Vrm9gu/+uYVZP5pD+7EGDr27xWty+BPe1rkv8GRtqK+N0+KOGsbHZPi0TG9vQaNk/t4IDKWGzgMKi8ymR//DBf+6jSv/7xqrSSIz4iix8wJg6H3zZbui0YUy47FLybt4CpWrD/DBA//FYlL+5W9PazdbX9vI/s/3cslji5h4+0Im3r4Qo76H5oM1GPU9fUtWQiQiUmKJTIkhKjMeKSSE3pZOdr6/nWMbjlJZVI7RzQi9AoFAAPD4448DcMstt5CaOnymzAeFgRmh8fN9AWVY/tCnTNhcwaJHF7PwxZuwmC20Hamns6qFnmY93U16uuva6axto6u2Ff3xFiwm86n5SBKZc8dQ8IPZZJw1GmNXLzvf3U5lUTkx6bHEZsaTXZhM5vwC8i/v25TbYjTTWlpHV307ps5ejJ29GPW9GDp6MLR10XywmtbDdQPRcZ1lRnyhUto5BSlE4syHL2bM1TMp/aiIj3/1P6vp1j37DZl58Ux78EK6Gzs4/nWxV+SxhSZcS2xeClGZCbQcqKazptXrZXpL54FC/8DYV4bm2UmjFctrOOxdqsT9UVLn3sBbnltXMHX28u0d/yJ74Ti0ulA04aGYug20HKql9XAthrbT5bNX/3zVrkSmxTL/2etIKExn/XOr2fz39eDlXUY6atp5954PiU77ipwzR5A9YyTZY1OJSo9HcyIuQ3dDBxU7q2j+eA/HNhyhdl+1X2x/IhAIAp9t27bxn//8B51Oxy9/+Uu1xfEpQWFg6k2O96scTKWV5Nk+mBK97+PdHPryABlTssmakk3+1EyisxNInpxDeHwkUkjIQFqLyYy+soXO6hbMRjOy2UL86DRiRiTRVd/Our9+y673i+hpsx6gITothvQJmWRMzCJ3UjqRqbGERocTGhVOaLQOTdjJW2/qNtC0r5LarUep3VxK88Eah9N4v24sYmnmecoohr5gDznnjWfEogkkFKSz5dWNrHv2G5vpZYvM8gf+y/X//CFnPXkVPU2dXo0eGBKmJXXaSDLnFZA+axQxucmEaE7er7ZjDVSvL6Hk35vpbuywk5P7KK3zQMVXg/yPanZxZ+7Zbp8/HIxKa3hiaPbr3FdTfZ1lsDyerjNWwhDvadJT+p9tTpXjCF+0K0mTcpj/56VowkP58M73OLqu1KvlDUVf18GBlcUcWOnbF5ECgWB48+ijjwJw3333kZUV2DPQXEWSFVwPqBbZukz5vrw77KaxZlRazUultbch2hCiUqKJzYgjLiuexPxkkvKTSc6KJUQTgqSR6G3tYut7Ozn01QGPpxVpwjREp8SQMSmTzCk5jJo5goSxfVPTepr1VHxdzMF/b6KzqsXq+WbZgkYKsXrMEZImhPjRqSRNyiFl8giSp4wgOrNv77uG3RVse28n+/6726m8dHE6bnrnZiJSYlh3/zs07HR+C5PY/BSSJ+WQPKlvr7XO6hb0VS10VrfSXd9Od0MHSZNyyL1oEpnzCwiNDMfUY6RhRxlHd1bTeLie9pp2MqdkM37BKFKn5yKbLOxe9rVX9j71ROfBiLeNDJNsQeuGvoerYWkPZ+5VeWeE23XcW3XBV/fSWfm9IY9X2hVJ6mvjJ2aTPGkEIy+cSFddGx/c+T5NRxqVLSsAePjg49tlWZ6uthwC/2f69OlyUVGR3TRr1qxhwYIFvhHIAUIW66xZs4aqqiquv/564uLiOHr0KImJ6mzD5U29SJJks20LCg9mk9H+9MShxmVl58k1FdlRoaelVcPItJgsdNS001HTTtUO70eeMxvMtFW10lbVysH/7edbIDIxktw5+Uw8bzSjvncmo78/g7rvjtJV20ZPs56e5k56W7vobenktaIPudQy1empRKHR4YxcPJERF0wgaUI22oi+PYC6Gzpo2F3B5tc3c3hVCfp617x/PW09vPPjd/jhP6/n3Fdu4ciHRex7ZTU9zfbfKMz+7ffIu2QKwMA1Zc3vC+xwWhnNeopX7KV09SEqvivD1HPqmtaaPVVsf3MrcdnxXPLrxZz50MUkjM3gyEfbadpf5fL0Y1u8WvEpd4y8XJG8ggFvezKfPvwlvyy4yKVzhHFpHWf14m4dVyp6sVr3T816o1S7EpUZT/qsUaTNyidtRj66hL6OtKelk/Iv9vLZH1bR0xo4Qb4EAoHAXT7//HOeffZZAH7zm9+oZlyqSVB4MNPCsuRr03/iMN1gw9IaQ43Nvt/cFiugiU6J5sybZlMwN4/wpCh0CVGEaE+N1mvqMdJ+rIHG3RVUbzhM/fZjmHtPGl+acC3ps0aRs+gMcs4djzYijNbSOko3lVGzu5KqXZWKReULiwzj7AfOZcq1Z2LuNXHgjQ2UvL3Z6jYm4390NpPvPp/v/rGZ3cu301J2Yt84CaJTYojNjCM6NYaYtBhaKloo23jEeY+xBPPvW8js2+cihYRg7Oqlfns5VWsOULnmIL0trk3nDkRCtBoiM+L61ofpQtEfb6bXyxGO1V4X5y+GZWxeCnkXT0YbGQaShMVoprm4ivrtZV6bui0YnkSkxpJ6Zi6pZ+aSNiOfmJy+AVRXfTtHNx2jfMsxKrdX0FbpvfXptmYm+UO/PVi2ZceFB1PgHMKD6T7+Istf//pXHnjgAQCefPJJHnvsMSRJUk0e4cH0gDaz9Wmc4NiotJZ2sKHpaGqtP3Rk3kDfoGftn1ex9s8nfpBAF6sjMiGKyKQoSmKOcPGMJYwYn0r+5dMoWDobU4+RrppWetu7sRhMA55KQ0c3+z7Zw57/7KS2WJmNr4di6DKw6ndfsOPf21j8swVMuus8Cq6dxb7X1nDkw+0DAZOyzx3H5LvPp2zlbtY88/Wpmcigr+9w2Ys6NI/1z62m6I0t5MwYyYhZuRScM5qs+Zcz/dFLqV5fwndPrnDL0Hyx7L/clXuF+7L5gNj8FOb/5QfEjkwe+K1/n7vD/yny2jpZbwQA+m3JSn5duMRuef5CyrRcxt00l6z5hViMZoxdvSD3veTRXj8HgI6KJuqKjlH33VHqi4459PKrQSDU8WDDWZ2HxuhInphDxpzRZMwZQ2xu3zNu6Oimfkc5W9/YStnmYzQf9XwKrLNLWuydr1Tf7KksAoFgeCDLMk888QRPPPEEAM899xz33XefylKpR1B4MBO0mfLCuB85lbZMrjvtt1wpzWZ6a17N09M4VXTQognTkDMjl7x5o4hJiyE+QYc2KoyKPTUc/vYQx7eV+Xwf0swp2Sx6cCFp0/P6pvc26TEbzMQXpNFysIY3rn8Ts8HsOCOFSClMY+yF45lx82yMHT3s/9d6jn6yA1OXf2+x4wrps0cx789LMXUZWPd/a+lu7cJsNDPyrDwmXTGZsNgIGvccZ/OvPkR/vNmrsqi5ps2XRKbFMvXBixhx/hn0NOsperuIne8W0d3S5zGWQiRSx6aRM2MkBWeNJHVaLmExOgCaiqs4+skOyr/ci7HDerAwwfAgfkwaGXPGEJESgy4xitCYCGSzBYvJTFhsBPGj0wiPjwT6Zq7UFx3j4IZjVHxXTkNJncdRVz0x4uwteXGnb1ZKlsF83Pw74cEUOIXwYLqPmrI0NjZyzz338P777xMSEsLPf/5znn76aVVkGYpaHkzVDExJku4F7gFMwOeyLD904vdHgB8BZuA+WZa/dJRXeEikfHHCgwPfrRmRrmLP6OxncGc23IzMD6o/5prMK9UWwyG5c0dxxmUTCY0MQxOmwdhp4Jvff0FnozqvpVMK07j48QtJnToSc6+R+u1lVK0voWFnBR0VTX17s9ng5fJP/HYNZtbZhcx5+ho6yht597Z30TfoTzmu1WkZf+lEFj54HiFaDUVPf0bZ584FclKCwQans0alP+s7IjmGvMunMv6W+UiSxKaXN1D0xhZMvfb3vJU0EmnjMvrWWi8ZR0JhX2CvroZ2Osqb6Kpppaeli97WLgxtXfS29f3fcqDG6nRzpfFnnQcb4QlRjLpiGjHn55E/rm97GKO+h56WTgztPUghEiFaDaYeA5X762kpb6ahpI7K7RUO65ktPPdMOj8jqb9/dqVvtiefK2UPRRiYwY2S41lhYLqPWrJ89NFH3HnnndTX1xMZGcmbb75JUlLSsNCL3xmYkiQtBB4DLpZluVeSpFRZluslSRoPvAvMBDKBVUCBLMt2XU2R2hR5bKx3jB1nDE1w7OkMNgPULJvRSBrHCQVWyZiUxdiLzqBw4ehTppN21rTS3diBqdOAsbMHc6/pxMdId7OengY93Q0ddNW30VXXrrrnKTRax7SfXUj+ZdNoPlDNv299224gj5j0WK768xWknplL6+E6KtcepHp9CW1HGzB19vpQcsf4W9ReXXI0mfMKGLl4Imkz8pBCQqhce5DPn/yKtir31rmlT8wk96x8EkYmkj4qkcjUWMLjIweCcPXT29rFwbc2cuj9rV71uvubzoOR2LwUxl4/h9wlk9CEh1K/o4xdnxZT8uWBAc+3krhqVHpiyA3FmRlI3pSh/2X3zpZXhYEZpCg9nhUGpvv4WpbBXkuAc845h3/84x/k5+cPG7344xrMO4GnZVnuBZBluf7E75cD7534/ZgkSaX0PZyb7WVmtDjuwaos+20eywoZb/OYI29ovwE6tEOyFp3WHfzVMP2q4VsuSl2kaJ6u6Mhf9eIsNXuqqNlTxeo/fkXCyERSx6WTmJdEYm4SkYlRRMeEokuORhMeiiZciyZcS3hc5Gn5GLt66appo7Omla66NnpaujC0nvQ8Gdq7Mep7sBjNmA1mJI2EVheGNjKsL+8wDZowLWHxkegSo9ElRBISqoEQqW9N6vFmWg/X0na0AUN7NxajGUkTQsLYDNKm51GwdBa6pGg2v7yeTS+uw2y0P+24o7adN258i8nXTGPykvGMv3keE358Tt+16HvobujAoO/F1NXbt9ffR9u9ur+pPZbXrFZt31FtZBgJYzOIHZlMbG4yKWfmkjS+bw+tjuNNbHppPQc+20dzWZNH5dTuraZ2b/Xp5eu0RMRHEhEfQXRaLGf9cBqT713E2BvmUvT0Z1R8tc+jcm2hps6DnejsBCbecS65SyZj6jaw5+PdbH9jK29/957HbbmaRmSZXGfzRbC1uAqeyqDEDClBUKHoeFbg/8iyzIcffshdd91FQ0MDkZGR/OlPf+LOO+8kJES8IO1HLQOzAJgvSdJTQA/wM1mWtwFZwJZB6SpP/GYXbcipU97sGZPWcNf4hFM7m8GdnDtTeayh1rYpjjgrYaZiebljfA8+xx/14wot5c20lDtek9gh6cnKyCImLZbotBhi0mKJzYglNjOO5Ow4EsdnEhYbcVq0X1cwdRsw95qQZZkQbQhhMac+W2ajCSzywHYuTfsq+c/dH7gUvEm2yOx6bzu73tuOLk7HyNl5xGXFE50WS3RKNOExOqJjQsk4azS5SyZj6Oihs7qFQ+9vpezz3VgcGLFKsSjZtw6HqIx4MucXkHV2IanT89CE9TXPph4jLQdrWPvsNxxZc5jGw/UOcvIcU4+Jjtp2OmrbqT9Yx9G1h0mfkMlFv7yAuU9fQ8554zn0/lYadji/76wz+Frnw4H4gnTGXj+HkYsnYjGZ2fz3DRT9azPdJ2YaeNKWO9N2K2FMOjLqHMVWcEUGYUAKXETR8azAf+nq6uLtt99m2bJl7N27F4AFCxbw+uuvk5+fr7J0/ofXDExJklYB6VYOPXai3ARgNjAD+ECSpHzAWhxfq3N4JUm6Hbi971sIxwybkWUTsmxBE6LDaO4gPDSJbkMNUeEj6eguJTZyDG1dh4iLLKC96zAxEaPp7C0nIiyDXmMToZoYzJYeJCkESdJitnRTrumm19hARHgWnT1lxESMGsij/6/c00aabgpbDV8RG5pDl6mR0JA+b1OcrCVek0GdsZSR4ZMp6d7IpKgLKNL/l+nRV1Ck/y9wBbs6VzIh8nwO92whJ2wCDcYyYjRJxIabONRlIlYTTy+VnBEzjs0t33FByrl8WLOCazKv5DSR55QAACAASURBVL3qD1maeRUfVH/MVRmX8VXDt5yVMJPijgOMiMim2dhKqKRFp9HRYGhkVGQeO9v3cHbiHFbWf8WV6ZcO5NH/99O6/7Ew6WyK2nZSGDWG6t4aYrTRAHSY9NT3NhChiWB63FRWN63j0rSLTsvj49pPWZJ6AeuaNzE1dhJHuo6BOZleuQeT3HdNdYZK8iPGsVf/HbPjzuWb5hUsSrqSr5o+5IKkq/i66WPOS7yMLW3fMjF6Jke7D5AWlk27uRWtpCVc0rFP30h2eB4lXXu4JN39a8oMz6Ck87DL15QSlkyPuQejbCIxNJ6Kbu/cp9SwFD6v+tLuNdUYaklNTCE6IYaQWA0jU0fQpG1lTNwojhiPMjHqDNZWb2CGbipfV33L/Og5rKpZzbSQSXxV+g3jtAWnXFNEUhRnTBhP6Ihw8pJzadQ2kx+dxycbVpB1OJV/7H3Ts7rHpTzxz99ZvaY9vcVcd/11ROfHMmnWeGb9+goKb5/Pjn99y1uvvcHsiEI+qdvIDdkX8ELZx9yde+XA39ePr+SHmefzef1m5iRMYL++nExdEl3mHgwWEylh8ZR2VjEjvpCvG4u4OmPhwH6A/VE1/3bsP/ym4BaW16xmUfJ0trWWMDoqiwZDK2EhWiI1Oqp7mhgfPZJNLfu4OPUs3q5exY9ylpwmz1uVX3F52lzWNu9mcuwoyrvryB+VT9aEPNKn5DFpwXSS8vqay6ojFex4extvfPYWE1oLeXX7P7k243uq1r0r0y/lb18to+FQHYk3Z3DdrUsZsWgC+tpWNn2xltYNZaxft55oSzgArUY9uZHp7G4/wjmJk52+T8tr1rAoZbpL9+nl8k+4bcSlXrlPZV21xIdGe3RN7tQ9T6/pganXsyOnjnvuuof0mfkYu3r59J8r2PHadxysLPFaWx4hj+ZY73bGRSxgZ+dnnBl92UBft12/gqlRl3Cgew154WdSYywhQZtFl7kVjRRKE130WFqJ1mbSYiglVTeJ6q6t5ETNo6xzNblRCwf+VnZtJk03hWbDodP6XKOli0htMluNu0kMK6CuZxfzovr62uSoswbyON65gczIWdT37CEhbDR6UzW6kHjMsgELJsJCYug01RMflktj734ydNOp6FrHyKgFA3mUd65hROTZ1PQUkRw+nvLeLVbHEYLAxpfj2bS0NNasWWNXHr1e7zCNrxgOstTW1vLf//6XlStX0tHRt8tAQkICN954I5dddhkVFRVUVFT4RBZ3UEsWtdZgfkHflII1J74foe/h/DGALMt/OPH7l8BvZFm2O6VAq4mUk2KmeVVmZ3Dk7bSHo7We7q4lOTWPk/97GmyhtreS9PBszzIZkMW9N9zO6sSRh9Of91IbzPHuSnIilNF5IDJyTj4L7phD2ox8elo6KXl7M4c/+A6j3jvrUEs7qxgd5fkL59BoHfEFacSPSSNuVBrxo1KJG502EM3V1G2gfnsZB9Yd5dj6Ix5Pe/UFWp2WwsXjmXRBARlnjUYbEYapx0jDjjJqtxyhYXcFLQdrXPY2K6XzYYUkEZuXTMqUEaRMHkHy5BHEjEgC+vak/O6t79j9wQ56260/J660K7baSlfb8ED3Ero6S6q+baNYgxmkKD2eFWsw3UdJWWRZ5ttvv2XZsmV8+umnWCx9OyHMmjWLe++9l6uvvpqwsDCb5werXobij2sw/wucC6yRJKkACAMagRXAO5IkPUvfougxwHeOMpNl326BYQtrnY6zRqe9DjdXSnO5A7e+5sT58+2Ffq/sNFLZ24jJ5FwAJG/haN3ryXTu5u/eeUox1MBtNrYGpIHpjB6dMebLNx3ljU1HyZySzcK75jL5nvOZeMdCmvdXU190jNqtR2gtrR+Y5ouHL88aDK1OGTthcRHE5qYQnRVPZFockelxRCTHoEuOITI1hsi0uIG0hvZuWkvrKP5sL3X7a6ktrqGxtN7n2/h4iqnHRPEneyj+ZA+aMA0jZuWRNy+fMfNHMfWBCwEwG0y0lNTQsLOc+h3lNOwsdxiUylmdD1c0ulDi8lNImphD4rgM4kalEZuXTGhkn+e4p1lP4+7jbH9/B1U7K6nZV+WwbtlqV5Se/urvRqWrRqNAcAJFx7MCddHr9fz73//m+eefp7i4GIDQ0FCuu+467r33XmbOVG55WLCjloH5D+AfkiTtAwzATXKfK7VYkqQPgP30hXu+21HELQBJsjYTwTmaO0tO+Z4YVWjzmK109nCl07JljLq6dyfY7vizo0JdNlatpW+iE5OCAwZno/XaQ8nAEeC519hZA9j2+ad+bzNoB37zN+/qYFwJ9d+vE2trjW2ts63eVcnbt79P6tg0ChaPZ8zskYy9YS7jbz37lPPNhr7ouxaDGYvJjMVoRrbIyGYLZoOJIx9vp/SjImST9QF4WIiN5lGSSJuRR94lU0g9M5eojPhTDve2dNLd2EF3o54jpY00HW2koaSehkN16Os6bCsnQDEbzBxbX8qx9aV8+weITokmY3IWmZOzyTszm4Klsxl34zwsZgvN+6uo2VxKzaZSmvdVnrZ/ok2dDyMSz8gi96JJA/tRhsVGoEuIJDw+ktBo3UC67oYO2o7Us/s/O6k/UEfVzuNOreUeSqh0qs6V2qrDXwxKYTgKvIyi41mBOhQXF/PSSy/x5ptvDkyDzcjI4M477+T2228nLU1dh0ogokpvLsuyAbjexrGngKeULM+eoehuWkfpnDVAB2OrI7RmeLrSeVsLdmAtOJEjL+rgNM1Uo3cQvdeVKcOuDkaUMEgd4WgwNdhgdGbg5Wngp3Dp5ODS28GfrA0y7RmAjvM79dr7Iz8OjvLYf03W8rVmWNcfrKP+YB0bnoPQyFBGzsojJiOO0MhQtGFatLpQtOHavki5oX0fKUQiRBNCcnYs039xCQXXzmL386uo23YUo/7UbVIiNbpTvuuSohl15ZnkXzGN6MwEDO3dHNlwhLp/b6OxtIHWihbaa9sw9bi3R2CwoG/Qc3hVCYdXlbAW0IRpyJiUxcjZeRSenc8ZPzqHibcvpKelk5qNh6krOkbb4TrajjUQqdc5zF8tpBAJbWTYafXEU8JiI4hIiSFuVCpjrp1F6tSRmLoNfRGV27sxdPRQX9ZCV0snnY2dtFY0U727io7adkXK12lObVes4S+GpTAWBf6Gr8ezAuUwGAx89NFHvPTSS6xbt27g97lz53LPPffwve99z+40WIF9guJ1sdlicMmI9AXW5HHH6ITTO1VX13q6E4HPXpoqy/4TgQvibJ+A53I7K89gfGF49qO0x9RW3v3GZoupkSxyB6XxWvE2ZBr63bNBZ/9vp04Bd+Rht+EF7jRy7LNDVg/ZMsRHLyzg/IfOZ/5ffgBAT5Oe9rJG2o7U03qkjoZDGkakjSM0OpzUabnkLDoDTaiWms2lfPuXbzm86iBmg3gh7QizwUxlUQWVRRVsfH4tujgduXNHMeqcMYyeP4a8S6YAIFssnFfdiKm2k87qFjrKm2g72kDb0Xo6q1pO83baIywugtiRyYTHR2LqMWLqNmDs7MXU2YtB34vFcOIlgAwhoZq+rYB0oYTF6AiN6fMYxoxMJi4/hZjcZCJTYglPjCJEE4KhvZv28r56UrvlCDWbS53ajzY8IYqMOaNJnz2KmJwkdEnR6JKi0epO1ml9VQvf/P4L9n64C4MX9xvtp7ITDukbCZNzh/yu7prKYDEk/W1cIhAIoLy8nFdeeYXXXnuN+vq+qOzR0dHccMP/b+/eo6Qq73SPf39VDQ10IwgIopAgKkQTcky8Z7xGE4lhEmc0E1dwJApBAcEcjWdiPCs5ThKjYaIm5IKDYaJGx6UyalaiwcuSaNYkEqJgBLxFzdiCiApKN9B0db3nj9oFRVuXXVX7VtXPZ61eFHXZ9davd+/9PvXu/e5/Zs6cOUyZMiXmFjaHpgiYxSfrSp5SO5tqg2e1O99iwa5Y+PNzuZb8cwaky4fLYqoZoa2Vn45OlCE0CPnOXkt2fF2vD4PfjmW585P3WkZXhdH0Ln+/u71Hl0s86bEXePmJlzjohIMZOXEUIw4aydhDRvHBz0zh0KHHcDR/v/upu7bt5On/XMXTd6yq6TBE2WPnuzt57oG1PPfAWixlDB+/L6MmjWa/Q0czeFwbH5gwmv2PP4SJn9szcVtvdw/vvfIW7726mWwmi6VTpAakaRk0gPSgAbl/W1tIDWyhdZ/BtO4bzPB+18atvPfqW7y+bhOdmzvZ1dnNsHHDGXvwSMadehgHn3Wkd+jvhtz1Yl/aRNcb7+bminSOIfsPY8RhB7DvYQew76TcJJQ73+5k60ub+NufX6Nrcyfb3txG55vb2PbGe2z8y+u43mAm3vP7BdS41oMKXhP9SGWzhMlCCpYiyfP444+zcOFCHnjggd2T9kyZMoU5c+Zw3nnnMXTo0Jhb2FyaJGBGPxNukIIc7SzGzw680nP6Pt7ds5mW9AfqalepZQcZOIsJe/QzyG/zC9v02q5nae8aEchywziMzc96ll/XR7RNLvp7z7erVCD1+wWCn8Obc4fkZvnrihf564oX93q8fcxQ1g18gSMGTGFXZzc7tmynt8brb/o9nLlvIKj3EOiglxcGl3W7rwP74sPP8fBbj/GpUacCMLBtICMmjmLUIfsx6pDRjJs8ipEfGQcpw2WyZHt6yezsoXdnD+9u2UmmO0Nvd4buzm7eeeVt3nn1bba/1UnL4AEMHDKQgW0DGdjWSmt7K6kB6dy5+wbZnl56dmbIdPfQva2bne/tZOe7O9j6P++wq6v0KKKljLEfPZCJJx3CwUePZ/yph3HIP75/Mr2dW7rYsn4DTzy4npcff4lN6zcGusuq50iGjq4e1u9YzWGDT6r8ZJovVJabgyGI5YlI/F555RWuuOIKli1bBsDAgQM555xzmDt3Lp/4xCfqmsdFSmuSgBneypHJbA1kOS0twys/qUDhjirIsBmUwa3hzfT4enZd6CGzmGLnpdby2iAVHkZ66KDjanptkPx2DCt1tAqDpt9l+xlhB5+fu2vMXud99tW5aRtjsiN4J/VW5WWVUNjxryUEBH0IdLnlJSV8njTiE7tv7+raxRt/2cAbf9kQY4tKc1nHhtUdbFjdwe+9+9r2a6dtVDv5/sr2LdvZtrG2cyXDPgQ+/yVMue1KkNuQpIbKUo9Vs99VsBRJns7OTr73ve/xgx/8gO7uboYMGcLXvvY15s2bx+jRo+NuXtNrkoBZ+xT/QQXIat6n1rCZpKDZtfNVhg4+OJRlxxEu+0rKDIh5r7pNvNb1e8a3nVD3ssLu6FU7qVap9brcY31Ve4mgV92mgsNti5/X+dg7D3HqiL8v+lizCepSMvV64M2H+If9G7fmXZs76drcWfPrwwyVpUb1n93+CEe0nbn7/802SlkojAn/RCRZstkst912G1deeSUbN24EYPr06Vx77bWMG9d4l3prVE0SMNNlH40qRPpVa9hMUtAMK1xKabWEyyR28vryO6IA1a37lc753d2RLnFe56GtU+no6ilzfdXazm2t9/I3cSl12ZggNXK49COqibmqWTeHDzkykFCZ1G2NgqJI//HUU09x8cUX86c//QmAY445hh/+8Iccd1x1R4BJ/ZokYGYSFyL9yre7lqAJwYXNakaLAN7d/gLDhkwK5L3zkjBymWSvdj3GgMHhTFBU7e+/3HLC5Hf55T5LYUe41HmdE2wMqzrv46j2swKfJMnv8pIcRMO6HuudG5Zx7gFnB7vQGEV1mGs1+q7vr3Y9xoS2U2t6/6hCpUKiiJSza9culi5dyh133EFvby9jx47l2muv5bzzziOVSsXdvH6pSQJm48tktlZ96CwEFwyqXZbCZXF+ZsotNalRpc5aPeHSTwet3HP8rhcj2iaH1hn024Z3up73PdrfN2zmveo2Mart+JpGdoKaLKpYeEha6Ax6VLPRw2W1gbJUQKz2GruVlL3GcZXhMuqRykYLl436ZbdIo3r66af58pe/zDPPPIOZcemll/Kd73yH9vb2uJvWrylgJkitITNIfjvmQYxgJj1UBtmRKrcsv+9Tbc2D7JhVc6hqsceqbUs9X5oEcb3ZA1OH1zyyU2pENAhhXXImiOBaKlxVEzzDGMGM+nqx5fj9/YUdKvd6XhXrucJlcQqVItHbtWsX3/3ud7nmmmvIZDIccMAB3HnnnZx44olxN01QwJQSKo1m1RoukxIqk3q+UDnlal5LR6xcp6jSFx3Vng+chPOGq/F6dh0DBo/ZvZ7Uu96GGTqDUC7Q1Bs+qwl4Jww7O1GBsB5hXn+2nGpH3f2Eyzi2l0kLlwqRIsmxZs0aZsyYwZo1awBYsGABU6dOVbhMEAXMhIh75LIa73Q9T29vJ+l07vCDpIeHejpHQXdy6qnV5vf+vLvmtaimg9T3uaXWzyAP0Q5bqd9lqfZv2/EyQwdPBPwd+lytvkEgSYGzkJ/DOIPyxJYHOXHfzwS+3ELVBL9KnzGuEFlMuWBZ6lBwgI7tf2DckON9vbYUP9vJRtlO5ClQiiRPJpPhuuuu4+qrr6anp4eJEyeydOlSTj75ZFasWBF386RAkwTMdKwBzW9nPCzVdPKrCUzFdrD5z5ZKDSm6zDg6EbUGyHrDY70TNFWrsOZ+BNlBKjfzcdAXKw9LqfNDS62/ba3jKy6zXMe9WoXXPG0ElcJVLQH0qH1Oqvt9g5SkAFlOqXBZbNvYd1R+zKAjfL+2GL/bNL/bibhHLuvdbhbbH2QytV9PV0Ry1q9fz4wZM3bPEDt37lyuu+46nWuZUE0SMF2s756E0cegD5Gs/JoskJuZq/Dzh30plaC+TS+n2rrU0yEp3hkptbw9NY9TpcvsBNFBjCukFq6/O3o20dbq/5pZQR9O2yhBs5RaAui6rqc5cp8TGibYJVmlbWX+8a7ujqrW80L1/K0HGSTLfRlazzL8SML+X6SZ9fb2cuONN3LVVVfR3d3N+PHjWbp0KaeffnrcTZMymiJgplOtiR01CVNUobI4e98y+wbNYr+T17Prqu6Al+so+alBkg91qq5tVvkpEQtr9D6sUXG/s9y+0/U8zmVq6niHcd5mo4fNYoqFyEHuIIXLKvUdvaz2iI7Wln1ret+kjzSGvd1XsBQJV09PD/fddx8LFy7cPWp5wQUXcMMNNzBs2LCYWyeVNEXAdC4bdxNiUc0lIYLf2Tr6Bp6+QbPe8/NKdZRKfeYkB8lgvL/mSRNG4Ax7VLwU5zJ1rcNhHD4LwYbNpJ0HurV3I8NaRvt6bi2XkMkL63OWvRxICO/ppwaVvqzp6e2iJV3dNWaSHi7DVO02bUTbZN58V4fIivj11ltvsWTJEn7605/S0dEBwP7778+SJUuYNm1azK0Tv5oiYJolu9NdSdA76yQFrWId9FquA1msRkn6nFJcLb+jchMKFRPG+cc5qb1eV++XJUHNoFxPsApi2WGG0EGp0ufSBPm5g5zVN8zfR7UKt6HlzjcuXJdTlqxrq1YS13a/li/L+uORVSK1Wr16NYsWLeL222+nu7sbgMmTJzN//nzOP/98hg4dGnMLpRpNETCrUW+Yq2WHEfa5JknT93qelUaBFCylUKnfb7XBM2hBjsgn5XI9tYgqhEYd3KoZJU5CqCx3aGzcI4xhiWPbX+tRGAqXIpVlMhnuv/9+fvSjH/H444/vvv+zn/0sCxYs4PTTTyeVin/uCaleUwRM594/yU9YO9godtzNEKCqDZmFFCqlmEqTCwXv/YfeB3VZlmYJm6XUGsDe7t3Ajpb4ZwRMQoAsp95wWbgeZ10457z62Wb7/TtupFFLULgUqWTz5s3cfPPN/OxnP+O1114DYOjQoVx44YXMmzePQw89NOYWSr2aImCapRr6G9vGDE+VD0suFjLz+u6A++95ldVo7EPBgxRF2DQrvnkM+pzQUoeM90dDWkbF3YTEqyb8Vrq80Ii2yQwI4fxLv9ttbd9F+pdVq1axaNEi7rzzTnbt2gXApEmTmD9/PjNmzNBhsE2kKQJmb7abltTeO0ntuMLmb8KZviEzr1InRb+/YpI/yU8zcW5XyZAJ4U0+VO0soM0USN/reY3B6RFxNyMU9Vx+xu91Lgu3q323ocW2xe90PU9rS1vVk/yU02ijjaXU8zka+QtvkaB1d3dzzz33sGjRIp588kkgN3fKtGnTuOSSS/jUpz6lw2CbUFMETMgqkETOf9ApdhmTcs+TUhQuo2Q2yNfz4prlNq9UIG3E4Dli4KS4mxC6IA6/LfY7Lxcuyxk8ILhJm6Lehod9qLz2SSK1e/3117npppu46aabePPNNwEYPnw4M2fOZO7cuUycODHmFkqYmiRgSvSyQLqqV2hnXa/qay61y2a3k077Px8wqPMzg9KIh95u2rmacUOOj7sZiVUpWEL129m3O59hv32O9PXcciNzzRYuW1qGa58lUqVsNsujjz7K4sWLuf/+++nt7QVgypQpzJ8/n+nTpzNkyJCYWylRUMCUGinoRE817yvMTqZzmapfE/doZjlBXiolLAqXe4tihu10ut3XlyP97bBPhUsR/95++21+8YtfsHjxYl566SUA0uk055xzDvPnz+fEE09s+EsKSnUUMKVGvSjwRE01LxRGuNy7U9lbtJPp532TGDSTHi4BXu16jAltp8bdjLKqPUc2DLWGy2LnYfb2biOdHlpzgGyWINYsn0MkKs451q5dy9KlS7nrrrt2X7ty3LhxzJ49m5kzZ3LAAQfE3EqJiwKm1EhBJ3qqOUQRLPOK17uaGWyTcthsVOGy3vA1YPAYX8uIOiwnIVRCOLNtp9PVz9rYDGGsGT6DSBy2b9/OrbfeyuLFi1mzZg2Qm7Rn6tSpzJkzhzPPPJOWFsWL/k5rgNRIo2nRU839yAc6vyMypTualevtZwKruEczwwpjYYSud7e/wLAhlSf6yb93mEGzls/Xd52r9XdezWhitUGp7yhmfgQz6PcJW6kZyv28TkSq55xj2bJlXHbZZbuvXTl8+HAuuugiZs+erUl7ZC8KmFIjBZ3oqeZ+1R8uoZp6++nsxjGaGXQAqzZ01XLYZTV1CuO8Uj+f0e/nCvu8xVrDUrFLmDS7/vAZRcKyfv16FixYwCOPPALAxz72Ma644gpGjhzJpz/96ZhbJ0mkgCk10mha9FRzP4Lr1FdX76SNZgYRvPwGyqBq3tvbuXvSmWKK1a2e2XKj/nzJ1JjbFb+jlwqWIrXbtm0b3/72t7nhhhvIZDKMGDGCa665hlmzZpFOp1mxYkXcTZSEUsCUGumiuNFTzYNUueNZW739Bs0wQmZUobJS4Kq9U+/KnuNa+L6l6hfUobvVjoKHfdmM8DTndkXBUqR2zjnuuusuLrvsMjZs2ICZMXv2bK655hpGjhwZd/OkAShgSo0coCmno6WaR6u+elcKHkGGzHqDZb0jecF15veuuZ+wGWRQr+c6j5UeT24AbbztSrlaKliK1Kerq4u5c+dy6623AnDsscfy4x//mKOOOirmlkkjUcCUGjVWh6Q5qOZB8dcJDabe5YKmnxE5P8Ke5TT8YJlXuual6lhrDf2MUAb5+YIe6QyubY21XUluUBdpfOvWreMLX/gC69atY/Dgwdx4443MmjWLVKo5j3SQ8ChgSo0a71vvxqeaQ+2zRxa+3p9g6933fcsFpUJxzT4b3mGw5dRX83rOlYxq5Kve9Te/jOA0xnbFT800eilSu1/+8pdcdNFFbN++ncMOO4y7776bD3/4w3E3SxpULAHTzI4AFgODgAww1zm30nvsSmAmuZkHFjjnlsfRRqkk+R2S5qOa16u6Dmi49S7WlkqjnH74CaT1TloTXkc+unVcYSQv+dsVjVpKUjVDf7anp4d58+axZMkSAKZPn87ixYtpb2+PuWXSyOIawfw+cLVz7kEzO9P7/ylmdjhwLvBh4ADgETOb5JzrjamdUlJjfOvdXFTzvFpGgaoPFNHXu1wb/X7esGY8jSaQla95NCPX4atnFDP4z5Hs7YrCpSRcQ/dnnXPMnDmT2267jdbWVhYtWsSsWbMwS+42QRpDXAHTAft4t4cBG7zbnwfudM51A6+Y2UvAMcAfom+iiCRVLZ3OlpbhiQoZ1Yp7EpkoOvrZbDepVGvdy2mE33O152Q2wmeKk+ojMWno/uyVV17JbbfdRltbG48++ijHHnts3E2SJhFXwPwqsNzM/o3cHOmf8O4/EPhjwfM6vPvex8xmA7P33OO8H8h9G5v/VjbrvUWW3LW+egv+TRU87gpe13cZhcvqu4zCZfVdBkWWVc0ySrUnCZ8J77XN9JmS/ntKNeFnqu33lM1ux2wQ2ex20ul2enu3kU4PLfi3k1SqDed2YNaKcz2YpUmlBpPNdvn8TPltSmOse5nMloqfKVeDdrLZ7aRSg3BuF2YtOJfNtcJSONeL2QCc68YsV6/SNR6CczsxG4hzGfZc8iLrLXdXlb+nDM6l+rQng9lAstmdpFJDyGY7dy8jNxjQ2NuI3O9N2/JSnymT2YpZuuS6l812F/lMSdvuSRMLtD87ZsyYiteW7OzsDOT6k/fccw8/+clPSKfTfPOb32THjh1VLzeotgRBbSkurraYc+Fs/MzsEWD/Ig9dBZwG/M45t8zM/gmY7Zw73cx+AvzBOfdLbxk/Bx5wzi2r8F5O8xVFLb9Dleio5hDlLJzNXe8kHnqYze4glRrs67kasQpKc6/nyZD5s3NO13hoUFH2Z4866ii3atWqsu1ZsWIFp5xySg2fZI97772Xs88+G+cct9xyC+eff35NywmiLUFRW4oLsy1mVnLbFloqc86dXqZBtwKXev+9G7jZu90BjC946jj2HG4giaLj86OnmgcpH7BKB5XmrnfQl82A+icYMhsYWFvEr+Zez0Xq1Yz92TvuuAPnHF/84hdrDpci5cT1teUG4GTv9ieBF73bvwLONbNWMzsIOBRYGUP7pKJs3A3oh1RzeE9srgAADQpJREFUCH7kqqVl+O6fvTV/vYMIlyPaJu/+8fv8UrLZnb6WodHLIDX/ei4Soobsz1588cUA/OY3v2HTpk0xt0aaUVwB8yvAD8xsDXAN3rHnzrm1wF3AOuC3wLykzbgleTqkKnqqeV5YAaMwbKbTI0J5j6SoN1xWEyqLvbaYVGpIxdcqXAZN2xWROjRkf/a0005j2rRpdHZ28q1vfSvu5kgTimXP4pz7vXPuSOfc/3LOHeuc+3PBY991zh3snJvsnHswjvaJH/rWO3qqeaGwg0Y227lX4Cw90tlY6v0M9QTLSrLZzrKPK1yGQdsVkVo1cn924cKFpNNplixZwvr16+NujjQZfXUpNUrH3YB+SDXvK8zAkU4PLflYIwbNINocVrDMK1dzhcuwaLsi0h996EMf4ktf+hLZbJYHH0xc/pUGp6lXpUb5adolOqp5McWCRxDhL39JhHIqTxQUrbBDb7EJevqGznKT+FTSt+ZJqWtz03ZFpL86/PDDAdi4cWPMLZFmo4ApNVKHJHqquV/lgonfEFYpXNayzGZUT6AslP+dKVRGTdsVkf5q7NixgAKmBE8BU2qkb72jp5oHoVKAyYdFPyOYUp3K4VHrePRUc5H+qKuri2effRZQwJTgKWBKjXT6bvRU8yjsCUGOTGZrvx6drFXto5Bax6Onmov0Jxs3bmTRokUsXryYLVu2ADBhwoR4GyVNRwFTapRF33pHTTWPVq7eSTpks5awm6T2V6Z1PHqquUh/8Mwzz3D99ddzxx130NPTA8Bxxx3H5ZdfzllnnRVz66TZKGBKjfStd/RU82glr96NFRZrkbyaNz/VXKRZOedYvnw5119/PQ8//DAAqVSKs88+m8svv5zjjz8+5hZKs1LAlBo5wOJuRD+jmkdL9Y6eah491VykmWzZsoVHHnmE3/72tzz00EN0dHQA0NbWxoUXXsill17KwQcfHHMrpdkpYEqN1CGJnmoeLdU7eqp59FRzkUbW29vLypUrWb58OcuXL2flypVks9ndjx944IFccsklXHTRRey7774xtlT6EwVMqZG+9Y6eah4t1Tt6qnn0VHORRtPR0bE7UD788MNs3brn9IkBAwZw0kknMXXqVM444ww++tGPkkrpUHiJlgKmiIiIiEhC7dixgyeeeILly5ezbNky/va3v+31+CGHHMIZZ5zBGWecwamnnkp7e3tMLRXJUcCUGukb7+ip5tFSvaOnmkdPNRdJurPOOouHHnpo9//b29v55Cc/uTtU6pxKSRpzzsXdhrqZ2Tbg+bjb0c+MAt6KuxH9jGoeLdU7eqp59FTz8H3QObdf3I2Q5DOzzcDfKjwtSX+zaktx/aUtJbdtzRIwVznnjoq7Hf2Jah491Txaqnf0VPPoqeYijSVJf7NqS3Fqiy6AJSIiIiIiIgFRwBQREREREZFANEvA/Pe4G9APqebRU82jpXpHTzWPnmou0liS9DerthTX79vSFOdgioiIiIiISPyaZQRTREREREREYtbwAdPMpprZ82b2kpl9Pe72NCMzG25m95jZc2a23syON7MRZvawmb3o/btv3O1sZGa21MzeNLNnC+5b6NX8GTO718yGFzx2pbfOP29mZ8TT6sZWouZHmNkfzWy1ma0ys2MKHlPN62Bm483sMW8bstbMLu3z+NfMzJnZqIL7VPM6mNkgM1tpZmu8ml/t3V9y+62aiySXmc33/jbXmtn3C+6P5e82CdvtpPWV4swlpfazsfTZnXMN+wOkgb8CE4GBwBrg8Ljb1Ww/wC3ALO/2QGA48H3g6959Xweui7udjfwDnAR8HHi24L5PAy3e7evyNQYO99b1VuAg728gHfdnaLSfEjV/CPiMd/tMYIVqHli9xwIf924PBV7Ib6+B8cByctd/G6WaB1ZzA9q92wOAJ4HjSm2/VXP96Ce5P8CpwCNAq/f/0d6/sfzdJmW7naS+Uty5pNR+ttQ2P8yfRh/BPAZ4yTn3snNuF3An8PmY29RUzGwfch3xnwM453Y557aSq/Mt3tNuAc6Kp4XNwTn3OPBOn/secs5lvP/+ERjn3f48cKdzrts59wrwErm/BalCsZoDDtjHuz0M2ODdVs3r5Jzb6Jx7yru9DVgPHOg9fAPwf8jVP081r5PL6fT+O8D7cZTefqvmIsk1B7jWOdcN4Jx707s/rr/bRGy3E9ZXijWXlNnPRt5nb/SAeSDwWsH/O9jTYZFgTAQ2A/9hZk+b2c1m1gaMcc5thNwKDYyOs5H9wIXAg95trffh+Sqw0MxeA/4NuNK7XzUPkJlNAD4GPGlmnwNed86t6fM01TwAZpY2s9XAm8DDzrknKb39Vs1FkmsScKKZPWlmvzOzo737I/+7TfB2O+6+Utyff7fC/Swx9Nlbwn6DkFmR+zQtbrBayB1GON8596SZ/ZDc8LpExMyuAjLA7fm7ijxN630w5gD/2zm3zMz+idzI/emo5oExs3ZgGbkwnwGuIneI0/ueWuQ+1bxKzrle4AjvvKR7zewjZZ6umovEyMweAfYv8tBV5Ppj+5I7zP1o4C4zm0hIf7cV2vINItxul2uLc+5+7zlJ6CslYhtauJ91zr1nVqxZ4Wr0gNlB7hjwvHHsOaRNgtEBdHjfegPcQy5gbjKzsc65jWY2lty34xIwM5sBTANOc97B82i9D9MMID/5zN3Azd5t1TwAZjaA3E7vdufcf5nZFHLnxqzxdoDjgKe8yZVU8wA557aa2QpgKqW336q5SIycc6eXeszM5gD/5fUFVppZFhhFSH+3pdoSx3a7XF28NiWlrxT7NrTvfta7O/I+e6MfIvsn4FAzO8jMBgLnAr+KuU1NxTn3BvCamU327joNWEeuzjO8+2YA98fQvKZmZlOBfwE+55zbXvDQr4BzzazVzA4CDgVWxtHGJrQBONm7/UngRe+2al4ny/VEfg6sd85dD+Cc+4tzbrRzboJzbgK5nfPHve2Oal4nM9svP6OimQ0mNxr/HKW336q5SHLdR26/hJlNIjeJzFtE/HebtO12wvpKseaSYvtZT+R99oYewXTOZczsEnKzWKWBpc65tTE3qxnNB273/lheBi4g9+XEXWY2E/gf4Asxtq/hmdl/AqcAo8ysA/gWufP/WoGHvW8J/+icu9g5t9bM7iIX9DPAPO8wOKlCiZp/BfihmbUAO4HZAKp5IP4O+GfgL945gQDfcM49UOzJqnkgxgK3mFkab5vtnPu1mf2BIttv1Vwk0ZYCSy13aa1dwAxvtC4xf7cxbUN+TEL6SgnIJUX3s8C1RNxntz0jySIiIiIiIiK1a/RDZEVERERERCQhFDBFREREREQkEAqYIiIiIiIiEggFTBEREREREQmEAqaIiIiIiIgEQgFTRBqOmXWGvPybzexw7/Y3anj9BG8qeREREZF+RZcpEZGGY2adzrn2pL6XmU0Afu2c+0gojRIRERFJKI1gikhTMLMPmtmjZvaM9+8HvPt/YWY/MrP/NrOXzewc7/6Umf3UzNaa2a/N7IGCx1aY2VFmdi0w2MxWm9ntfUcmzexrZvb/vNtHmtka7yL28wqekzazhWb2J69tF0VYFhERkX7PzPY3szvN7K9mts7b50+Ku13NSgFTRJrFj4FbnXMfBW4HflTw2FjgBGAacK133z8CE4ApwCzg+L4LdM59HdjhnDvCOTe9wvv/B7DAOdd3OTOBd51zRwNHA18xs4Oq+WAiIiJSGzMz4F5ghXPuYOfc4cA3gDHxtqx5KWCKSLM4HrjDu30buUCZd59zLuucW8eeHcoJwN3e/W8Aj9X6xmY2DBjunPtdwfvnfRo438xWA08CI4FDa30vERERqcqpQI9zbnH+DufcaufcEzG2qam1xN0AEZGQFJ5g3l1w2/r8W40Me38xN6hgWaVOaDdgvnNueQ3vJyIiIlUws6uBScAVzrkO4CPAn+NtVf+iEUwRaRb/DZzr3Z4O/L7C838PnO2dizkGOKXE83rMbIB3exMw2sxGmlkruUNucc5tBd41s/yoaeHhtMuBOfllmNkkM2ur4nOJiIiID2Z2OvAe8HXg/8bcnH5LI5gi0oiGmFlHwf+vBxYAS83sCmAzcEGFZSwDTgOeBV4gd/jqu0We9+/AM2b2lHNuupn9q/fcV4DnCp53gff+28mFyrybyZ3r+ZR3Hshm4Cxfn1JERESqkSJ35FD+X4C1wDmxtagf0mVKRKTfMrN251ynmY0EVgJ/552PKSIiIg3G+yL3X4GJwL845zq8+/4I3OycW+I972hgSMHcCRIgBUwR6bfMbAUwHBgIfN8594tYGyQiIiKBM7MDgBuBI4GdwKvAV51zL8bZrmalgCkiIiIiIiKB0CQ/IiIiIiIiEggFTBEREREREQmEAqaIiIiIiIgEQgFTREREREREAqGAKSIiIiIiIoFQwBQREREREZFAKGCKiIiIiIhIIBQwRUREREREJBD/H3bWE1Leje6PAAAAAElFTkSuQmCC)",
"_____no_output_____"
]
],
[
[
"# Replicate the figure here",
"_____no_output_____"
]
],
[
[
"## Part II: Scatter Plots to Visualize Earthquake Data",
"_____no_output_____"
],
[
"Here, we will make a map plot of earthquakes from a USGS catalog of historic large earthquakes. Color the earthquakes by `log10(depth)` and adjust the marker size to be `magnitude/100`",
"_____no_output_____"
]
],
[
[
"import pooch",
"_____no_output_____"
],
[
"fname = pooch.retrieve(\n \"https://unils-my.sharepoint.com/:u:/g/personal/tom_beucler_unil_ch/EW1bnM3elHpAtjb1KtiEw0wB9Pl5w_FwrCvVRlnilXHCtg?download=1\",\n known_hash='22b9f7045bf90fb99e14b95b24c81da3c52a0b4c79acf95d72fbe3a257001dbb',\n processor=pooch.Unzip()\n)[0]\n\nearthquakes = np.genfromtxt(fname, delimiter='\\t')\ndepth = earthquakes[:, 8]\nmagnitude = earthquakes[:, 9]\nlatitude = earthquakes[:, 20]\nlongitude = earthquakes[:, 21]",
"_____no_output_____"
]
],
[
[
"Below is the figure to replicate using the `numpy` variables `earthquake`, `depth`, `magnitude`, `latitude`, and `longitude`.\n\nHint: Check out the [Scatter Plots subsection](#Scatter) and consider reading the documentation for [`plt.scatter`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html) and [`plt.colorbar`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.colorbar.html). ",
"_____no_output_____"
],
[
"![fig3.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA2IAAAHwCAYAAADEntzzAAAMZWlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnltSSWiBCEgJvYnSCSAlhBZBQKogKiEJJJQYE4KKDdFlFVy7iGJZ0VURRVdXQNaCiN1FsffFgsrKuriKDZU3IQFdfeV75/vmzp8zZ/5TMnPvDAA6HXyZLA/VBSBfWiCPjwhhjU9NY5E6ARFoASbQBf58gULGiYuLBlAG+3/K62sAUfWXXVRc347/V9EXihQCAJB0iDOFCkE+xM0A4CUCmbwAAGIo1FtPK5CpsBhiAzkMEOJZKpytxstUOFONtw7YJMZzIW4EgEzj8+XZAGi3Qj2rUJANebQfQewqFUqkAOgYQBwoEPOFECdCPCI/f4oKF0PsAO1lEO+AmJ35BWf2P/gzh/j5/OwhrM5rQMihEoUsjz/j/yzN/5b8POWgDzvYaGJ5ZLwqf1jDG7lTolSYBnG3NDMmVlVriN9KhOq6A4BSxcrIJLU9aipQcGH94H8OUFchPzQKYlOIw6V5MdEafWaWJJwHMVwt6HRJAS9RM3ehSBGWoOFcL58SHzuIs+RcjmZuHV8+4Fdl36rMTeJo+G+IRbxB/ldF4sQUiKkAYNRCSXIMxNoQGyhyE6LUNphVkZgbM2gjV8ar4reBmC2SRoSo+bH0LHl4vMZelq8YzBcrFUt4MRpcWSBOjFTXB9sp4A/EbwRxvUjKSRrkESnGRw/mIhSFhqlzx9pE0iRNvtg9WUFIvGZujywvTmOPk0V5ESq9FcQmisIEzVx8dAFcnGp+PFpWEJeojhPPyOGPiVPHgxeCaMAFoYAFlLBlgikgB0jauhu64S/1SDjgAznIBiLgotEMzkgZGJHCZwIoAn9CJAKKoXkhA6MiUAj1H4e06qcLyBoYLRyYkQseQ5wPokAe/K0cmCUd8pYMHkGN5BvvAhhrHmyqsW91HKiJ1miUg7wsnUFLYhgxlBhJDCc64iZ4IO6PR8NnMGzuOBv3HYz2sz3hMaGd8IBwldBBuDlZUiL/KpaxoAPyh2syzvwyY9wOcnrhIXgAZIfMOBM3AS64J/TDwYOgZy+o5WriVuXO+jd5DmXwRc01dhRXCkoZRgmmOHw9U9tJ22uIRVXRL+ujjjVzqKrcoZGv/XO/qLMQ9lFfW2ILsf3YKewYdgY7hDUAFnYUa8TOY4dVeGgNPRpYQ4Pe4gfiyYU8km/88TU+VZVUuNa6drl+0IyBAtH0AtUG406RzZBLssUFLA78CohYPKlg5AiWu6u7GwCqb4r6NfWSOfCtQJhnP+vmWwIQMKO/v//QZ13URQD2H4bb/NZnnX0nfB2cBeD0GoFSXqjW4aoHAb4NdOCOMgbmwBo4wIzcgTfwB8EgDIwBsSARpIJJsM5iuJ7lYBqYBeaBUlAOloHVYB3YBLaAHWA32AcawCFwDJwE58BFcBXchuunEzwDPeA16EMQhITQEQZijFggtogz4o6wkUAkDIlG4pFUJAPJRqSIEpmFzEfKkRXIOmQzUoP8jBxEjiFnkHbkJnIf6UL+Rt6jGEpDDVAz1A4dhbJRDhqFJqIT0Wx0KlqELkCXoJVoNboLrUePoefQq2gH+gztxQCmhTExS8wFY2NcLBZLw7IwOTYHK8MqsGqsDmuC//RlrAPrxt7hRJyBs3AXuIYj8SRcgE/F5+CL8XX4Drweb8Uv4/fxHvwTgU4wJTgT/Ag8wnhCNmEaoZRQQdhGOEA4AXdTJ+E1kUhkEu2JPnA3phJziDOJi4kbiHuIzcR24kNiL4lEMiY5kwJIsSQ+qYBUSlpL2kU6SrpE6iS9JWuRLcju5HByGllKLiFXkHeSj5AvkZ+Q+yi6FFuKHyWWIqTMoCylbKU0US5QOil9VD2qPTWAmkjNoc6jVlLrqCeod6gvtbS0rLR8tcZpSbSKtSq19mqd1rqv9Y6mT3OicWnpNCVtCW07rZl2k/aSTqfb0YPpafQC+hJ6Df04/R79rTZDe6Q2T1uoPVe7Srte+5L2cx2Kjq0OR2eSTpFOhc5+nQs63boUXTtdri5fd45ule5B3eu6vXoMPTe9WL18vcV6O/XO6D3VJ+nb6YfpC/UX6G/RP67/kIExrBlchoAxn7GVcYLRaUA0sDfgGeQYlBvsNmgz6DHUN/Q0TDacblhleNiwg4kx7Zg8Zh5zKXMf8xrz/TCzYZxhomGLhtUNuzTsjdFwo2AjkVGZ0R6jq0bvjVnGYca5xsuNG4zvmuAmTibjTKaZbDQ5YdI93GC4/3DB8LLh+4bfMkVNnUzjTWeabjE9b9prZm4WYSYzW2t23KzbnGkebJ5jvsr8iHmXBcMi0EJiscriqMUfLEMWh5XHqmS1snosTS0jLZWWmy3bLPus7K2SrEqs9ljdtaZas62zrFdZt1j32FjYjLWZZVNrc8uWYsu2FduusT1l+8bO3i7F7nu7Brun9kb2PPsi+1r7Ow50hyCHqQ7VDlcciY5sx1zHDY4XnVAnLyexU5XTBWfU2dtZ4rzBuX0EYYTvCOmI6hHXXWguHJdCl1qX+yOZI6NHloxsGPl8lM2otFHLR50a9cnVyzXPdavrbTd9tzFuJW5Nbn+7O7kL3Kvcr3jQPcI95no0erzwdPYUeW70vOHF8Brr9b1Xi9dHbx9vuXedd5ePjU+Gz3qf62wDdhx7Mfu0L8E3xHeu7yHfd37efgV++/z+8nfxz/Xf6f90tP1o0eitox8GWAXwAzYHdASyAjMCfwzsCLIM4gdVBz0Itg4WBm8LfsJx5ORwdnGeh7iGyEMOhLzh+nFnc5tDsdCI0LLQtjD9sKSwdWH3wq3Cs8Nrw3sivCJmRjRHEiKjIpdHXueZ8QS8Gl7PGJ8xs8e0RtGiEqLWRT2IdoqWRzeNRceOGbty7J0Y2xhpTEMsiOXFroy9G2cfNzXu13HEcXHjqsY9jneLnxV/KoGRMDlhZ8LrxJDEpYm3kxySlEktyTrJ6ck1yW9SQlNWpHSMHzV+9vhzqSapktTGNFJactq2tN4JYRNWT+hM90ovTb820X7i9IlnJplMypt0eLLOZP7k/RmEjJSMnRkf+LH8an5vJi9zfWaPgCtYI3gmDBauEnaJAkQrRE+yArJWZD3NDshemd0lDhJXiLslXMk6yYucyJxNOW9yY3O35/bnpeTtySfnZ+QflOpLc6WtU8ynTJ/SLnOWlco6pvpNXT21Rx4l36ZAFBMVjQUG8PB+Xumg/E55vzCwsKrw7bTkafun602XTj8/w2nGohlPisKLfpqJzxTMbJllOWverPuzObM3z0HmZM5pmWs9d8HczuKI4h3zqPNy5/1W4lqyouTV/JT5TQvMFhQvePhdxHe1pdql8tLr3/t/v2khvlCysG2Rx6K1iz6VCcvOlruWV5R/WCxYfPYHtx8qf+hfkrWkban30o3LiMuky64tD1q+Y4XeiqIVD1eOXVm/irWqbNWr1ZNXn6nwrNi0hrpGuaajMrqyca3N2mVrP6wTr7taFVK1Z73p+kXr32wQbri0MXhj3SazTeWb3v8o+fHG5ojN9dV21RVbiFsKtzzemrz11E/sn2q2mWwr3/Zxu3R7x474Ha01PjU1O013Lq1Fa5W1XbvSd13cHbq7sc6lbvMe5p7yvWCvcu8fP2f8fG1f1L6W/ez9db/Y/rL+AONAWT1SP6O+p0Hc0NGY2th+cMzBlib/pgO/jvx1+yHLQ1WHDQ8vPUI9suBI/9Gio73NsubuY9nHHrZMbrl9fPzxK63jWttORJ04fTL85PFTnFNHTwecPnTG78zBs+yzDee8z9Wf9zp/4Dev3w60ebfVX/C50HjR92JT++j2I5eCLh27HHr55BXelXNXY662X0u6duN6+vWOG8IbT2/m3Xxxq/BW3+3iO4Q7ZXd171bcM71X/bvj73s6vDsO3w+9f/5BwoPbDwUPnz1SPPrQueAx/XHFE4snNU/dnx7qCu+6+MeEPzqfyZ71dZf+qffn+ucOz3/5K/iv8z3jezpfyF/0/734pfHL7a88X7X0xvXee53/uu9N2Vvjtzvesd+dep/y/knftA+kD5UfHT82fYr6dKc/v79fxpfzB44CGGxoVhYAf28HgJ4KAAOeIagT1He+AUHU99QBBP4TVt8LB8QbgDrYqY7r3GYA9sJmVwy5Ya86qicGA9TDY6hpRJHl4a7mosEbD+Ftf/9LMwBITQB8lPf3923o7/8I76jYTQCap6rvmiohwrvBj6q7Lri5cmIx+ErU99Avcvy6B6oIPMHX/b8AZ9+K4JLzqlQAAABcZVhJZk1NACoAAAAIAAQBBgADAAAAAQACAAABEgADAAAAAQABAAABKAADAAAAAQACAACHaQAEAAAAAQAAAD4AAAAAAAKgAgAEAAAAAQAAA2KgAwAEAAAAAQAAAfAAAAAAX71J9wAAArZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHRpZmY6UmVzb2x1dGlvblVuaXQ+MjwvdGlmZjpSZXNvbHV0aW9uVW5pdD4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6Q29tcHJlc3Npb24+MTwvdGlmZjpDb21wcmVzc2lvbj4KICAgICAgICAgPHRpZmY6UGhvdG9tZXRyaWNJbnRlcnByZXRhdGlvbj4yPC90aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj40OTY8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+ODY2PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CrNQhOoAAEAASURBVHgB7J0HYFXl2cf/uTd7MgJhh71BhqAMBRyIewuOCo5P7dDqV7XV2hZbq9avrW2tddSBe4IDEJmGIUtA9h6BhADZe931/Z9zc5Kb5GaScZM8D/xzzz3jfd/zO+O+zzsBNSWgBJSAElACSkAJKAEloASUgBJQAkpACSgBJaAElIASUAJKQAkoASWgBJSAElACSkAJKAEloASUgBJQAkpACSgBJaAElIASUAJKQAkoASWgBJSAElACSkAJKAEloASUgBJQAkpACSgBJaAElIASUAJKQAkoASWgBJSAElACSkAJKAEloASUgBJQAkpACSgBJaAElIASUAJKQAkogQYm8CTDe6OBw6xNcNdzpwQqlxpdmwM89mnINO9huFNLwvbj59tUBrWZuoA6QLUFi+dJXtIMJzqXcb7fDPFWFeWr3PC7qjZy/VyqodLbkPdxNUmu9aap3DPRY2/PZ8Nj9VkvLmEIs886lIYNoDeDc1H+9QzW165lPU/D62ENfb3iGMu9XmPSlUpACSgBJaAEmphAPOO7pIninMp4PDNaTRSt12iOcO21Xre4V8q27VQ2lUqtpHpTjWnieAmfsMaMpELYU/m9pmsyj/sUU+K0mtrB5fraPB74TIWD4/n9kgrrmuLrXEbyfiNGJI70LR7hT+KyZLgrrhOu/h77yeJUKlEWPGwul+uS3qa+j+cxfc9Q9bGpPKji+dYnHM9j5vJLXXh5HlvX5ak8wEnJtcyh5NrfRdXGenOn2jpiU7lvQ3NikF4tjmslXedU2PplyfqpFdY39tc5jGDdWUYSx+PvPcsw9HAloASaiYClmeLVaJWAEmhYArEMTkrcvVl/rnyX+hUVRfWh/kNJJqsxTdIUT+U1ZiT1DPsFHhfuoYoZs9oGa63tjq1kvzU8jyke53Ihl/d7Wbee6+we+zXEYnPdxw2R9pYaRhITLs9JJPUI9V9qENWS7SATf6fHCXTk8vlUisc6XVQCSkAJNAkBdcSaBLNG4gMEgpiGf1CSsRDJsqwzzbOkXWqXZpRskBLgfZSUCB+l7qfEpJZHmph0o8xaFVmeS71PmXYNF8RByqTiqCGUafFceJTaSWVRn1DBlDeTZ/Up6jiVTIljJU6VnIPELw6B1OpI2ivaKK44RkktmIuSc5lPnaDE5lKeaZZMisSTRv2OiqcuocTmUp9SEr+EI+d2LmVaPBdk33soaaI5gZL0PU1NpRIp03pyYQElGSCJ69+UWD9qFSXrpPbuA6odZVo8Fx6lKnKr6pqYx9X28zPueJqSa7KGGkaZNo8Lr1DfUOJgynneTj1OyXkupEwT7hXTaG57jAunKLkX76bkuvSnxOKoe2WhxObwc535hZ//pBIoqd3cSl1AebMArvyImk8FUnJ/yrLwlvvhIcq08VzYQkmYZ6i/U95MeFzosUHi/ouXdbKf2DzqGaq6ayNpe5fydj9xdanV5T6We0muhylxCudSYtVxcO/h/W9vrpbrNJs6Qcm9+VvKtBAuzKMyqL3UOMrT4vnlkpIVVn4+SR2h5LzlOsrzIFbV9ZV3khwzk5LzkuddLI66VxZoc6h11F8pSYdc58sp0/pwQa6NxLmCepl6n6rJ5Lzlnk+nRpbsbOHnbyg5hzRK3gsdKG92F1fW9z36LY/9RYVA5dxvKFk3mJ/LKUlbxRrbkl3Kfcj7RBjKNRC7lfqCKpYvJSbPwwYqk5LnVO4nuU9Nm84FiSuL+g+1mqrtNYgr2XcIP1+lzHekxCUWR5lhyfc5lFxT0y7lwn5K4pZ0+VGedje/CGu5/kupWEpNCSgBJaAElECTEIhnLJd4iemPXLeR6kx1otZTf6LE5EdXftTkB04yF90p+XEXu5LqR8mP3RQqnxpDiU2lEmXBw+Zy+f2S7wP5mUdJuAHU49RhKpASi6c2U5IxlAyM/Hg+QHkz+XGVY/tS4dQC6j3KNMko9Te/VPiUYwqpF6lplBzvaXP5xUzzUC5LJm8yJemUDJ2NuoQSm0tJWFdQVuo5SriaFs8Fc985XF5HmTaVC4klX+RYyUxJmiSTHkxJnGL9KWEWRMm1WkP9gzItngtVcZvKbWYcXPRq87j2Ga9b3CuFdQQl8Uu82ynT5nFB7pVJlIWSdM+jKoYXz3VVpXEGt52hhlNy7h9Sntcvjt/vpUybw4V15hd+3kF1pPypX1GnKUmH2FzqfUqcgsXUPEpYS1ols/97Sq5rX+oodRkltoH6ibHkvj/OL1mu+NGLK5yU3K8SZjIlcSVQ5jrJUF5Iic2jnpEF2lQqURY8bC6Xq7ufPHY10lzb+9jzuFH8kkKNpmri4HmcLM+jzPT35rJcp/9Scs7nUEXUEErseWotJRzEqdpNeZ5vPL9fQok9Ru2iBlHybpGw5JqK1eb6uvd0/43jx70lK+bwU57X/6Hkuv+USqLMzLpcZ3mm5R6Q5y2bep/yZlO50ky/cLuGkmsvHMUepjZSPagg6jXqI0qsNyWs/Cmxs3mP3snjvzdCcf8Zyg+5xyROeX7k3ruLkrjGUKnUMMqbxXHlvdQy6nJKTJ5TcYbkXKdSYmMpeQYkzN6UvJvlfMWiKeF2AyXbf0kJcwlXbA5V3TWI43bPfdfxu6fF8Yu5XdbPocx9zLhv4roA6hHKTpn7X8flw5Tck/7UU9R6Sk0JKAEfJSAvVzUl0BYI3M6T/CMlGUfJlD1NmRnPe7j8FrWckozGSWo/JSaZWSnxlUzFakp+wC+gamNS6irHS7jywywZoBBqImXav7ggGSUpzV1IjaK8maT/79RRKpd6gppFyY9tTSbHTKW6U59SklGZR4VTFU1+4CUd8sNfTP2eknP3NNn2DeWg3qMkE1lXE+e3GyUZ0jxKMtgSrphkJISZZHLlWsl5T6E8rbbcPI/xXH6UXyQzZ+odj41vcTmHkvjnUnJ+UZRpX3FBMoZOStJdlVWVxlt4wNvUbkrOfS5VF3ufO0sNhGTA/kZJhnQQZVokF76l5L69i5LrNI7qRMkzINdV7on/UnIPidmo/lQ0lUtJBtubneBKkTwDwuUQVUAJD3OdOIWbqNraOu5Ym/tJ0jyV6k7VdB9zF8PknL+kHqR+pGriwF1qNHl3yDnvKJFwEJPr+mdKnuUESq5/VXYvN0gm+QAlz5eEJddUrKbr696r6r/HuUmurVx3ua+7UjFUL0rO//eU3APC/WuqOpNnVJ4ROd8vqP+lhKPY/dRvqUTKfFZu4rI/VdEWc8URSs51NbWMkvulNibxjqJiS3a+nZ8LKInzKiqekudJnodt1HxK0lGdvcuNd1Ly3LSjNlCetpVf5BmQMOOp16gplNgV1B5K0iDb5TqfpjytqmvguU99liXuvdTnlI36B+UZ9/38/hy1j5K0PUt5suNXNSWgBHyJgMWXEqNpUQKNSEAyFPLjaJosyzqxnpRkEryZlJrKD7JkrjIp+SGUzGptrGKcTh4kGbTuHgd7/ojmc70350h2rxiWpF8yPJLBqo3JOUhGsRMlGaALKclEVTSJR9JomqTJzCCa6yqmOZgbvGW+zP29fQpzOQfJLFS0zlzxMXWSyqbepyoyr5iGqrjxUK/2V66VDJip2SV7Wfn5PCX3g8QdT4l5xu/Jx73V+9+q0liRsed96T2k8mt/xa+S0cqi5J6MojzTdz6/j6TkPFyUWCwl8cr+pp7ksnn/3MPlgdR+6gfqKqoqW8MNcv+I1pbstM5jnThhkkmurVXkFMwDq7qfNnJbbe5jiTuA+pz6kPqYEquJg3uv6v9WTK9579Xlusr9L/eYN6vp+no7xnNdxfTJNkmjpE/eY/JMm1bTvSyFRPKMRFLicFxEmSYsxUky7ye5Jx2UeU9xsdTO5j0qhSLiyJmFBvL5QUnIkobzKDMN8imOWheqOlvAjXIuD1LvedlRnoVFlLCU98CzlPmMVbzOLm5LpDytqmvguU99lr3F7XkNhcc/KZOHXG8/yvM3h1/VlIAS8BUC6oj5ypXQdDQ2AclQyI+Uab24IOvE5Iesn7FU/k8Qv0rp6l8pyVy0o76h5IdNTH6Aq7OKccpxkgETB6OuVjEsSb84MWfqGhD3l4y2ZESGezn2FNf18FgfwuWOHt8balGYyzl4y3BLia6wHUlFUndQJnMuVms1XZNqD+bG26hrqUsocXB6U2Ke8VeMo+J39xFV/xXGch+YJhw8LY9fQj1WeGYqL+D6X1PijLSn5J4Uh8wzfcv4XRiupMxMsfA+Rsn+piK4LAULYoeoW6nO1F+oz6kwypuZjpikZW3JDvJ5ISXrZLs3qysnb2F4rqvuPpb9XqIkE/+UfCmxmjiY+9Xns6br6hmmpKOf54qS5Zqu79kwlPR1oDzvrZ5e0uBtlTjWct+NoK4r2UHOQRws836Sz2Cq4vstiOvO5j3Kw40mj3J/TqDknfQdJSZpWE15pkGczp9S1Zk4o0so2e89Lzu+wnX7qQGUvIOepMxnTDh6viNlved3fq21ebue1T3/Fe8xidvzGgqP+ylPHsJrPaWmBJSADxJQR8wHL4om6awJBDAEyRCYksz+R5RkyKRGSEo2f0+9T4m9Sd1FXUzJMyGlh4OpQCqISqHs1OXUdMq0M1wQJ0Uy7N7sU668kpJwJU2/oiRDU58fRUn/I1QfSjIaz1KfUJKummwyd/gfSjLZYnJu11Ab5UsFkwz41dRESs7/aUp+7BvaNjPAU9TzlGT4g6lJlJg4CLlUJiXX4jGqtlbTNakpHIlbrlEaJRlW4VyTSZx9a9rJY7vcF3OooZTE8QfK07bzyw2UbOtP3UOZJumTay73pNzXch9LRrGivcAVH1IrKbnfN1NSsi+Z6RDKSg2nxlFid1DybDgp4S7mcH9U+iuO1mhqCvV9ydZd/JR7cxol273Z2V6butzH9zMBkj5xrOWcTKuJg7lffT4/5UFPUOIg96AepKqyN7jhT9QASp6vkZS8S2q6vsKwNyXvqbracR6whZpLybMtTs3VVG2tmDv+jZJ7TuxV6s9UrHyhyf0jhRgVTeI6m/eohPcNJfH8kfqEMq/pIi4PpH5CBZRI7ukhVE0mzpXcI/FedpTrIM+LvIfkfflTyrTFXBhBXUfJM/hzyrOwhF9rbXI95V4RRqZV9/xL3MMoeT9I3A9RnnHLNZF7UPYRk9+mm40l/aMElIBPEqjPy9wnT0QTpQQ8CMiPdoGH5nL5GUoyITspyTRuo2SdmGTO7qJepKR2YTUlP/o5lPzQSQYrg5JM3deUafu58BF1lJLMazfK0w7wyx3US1QqJZkekWRo6mpv8YD3KMnkHqMKqQep2pik7RpKzlsyFt9SX1AvUBVtD1dIuB9TpyhhkEwVUQ1pkskXFv2pE1QiNZMSe5oaQ8m1kIzHAqq2VtM1McN5nAvCwpRcH7F3KcmwnqT2UhupmuxN7jCUEs5f1rQzty+h/kGtog6XfPKj1OQ+lHtEMmnvUB9Qpi3lghx/kJJ0yn2QQHkzyehLelZQUZTwHkXJ/SPn+wYl68VmUHLthcc/qVmUhO3NJG65J+T+yCzZQTLGm6lIan3Juooftb02FY8zv0tctb2Pb+W+fakkyrzGT3LZvO+q4sBd6m1y38o1Eb7LKHleq7K/c8OnlOyXTck9FELVdH0/4z5iadQ2Y6luf27n7hMoOV7ef59QRVRtTd5DvSi5l+Q+kfehnIO8J+RZOY+qaLLtIUrON4O6jarLe5S7G2lcwM9LqA9lRYlJ2NMpuV/lWp+m/kIFUTWZ7L+uip0e5XpJp4T/X0o4mSbPjjg3L1DCcSi1haoLR+5umLwD5LmTdJvvoOqefzPu57m/xD2A+p4y7QsuyPl/TMl9tZu6nFJTAkpACSgBJaAEWiCBcKbZTvVpgWlvSUl2MbH9W1KCNa2tgoA4GOJAqtWfgIWHilM3rf5B6JFKQAkoASWgBJSAEnATkNLuUCqMepX6kfKj1BqPgDpijcdWQy4jMI6L/ShxHmZQUus5mlKrG4HLuHs7SmrenqKkdjiEUlMCSkAJKAEloASUwFkRkCZr0gxMmgaupAZRao1LQB2xxuWrobsJSCGLNGXNpw5Sd1FqdScwl4dI00BpuriJ8tYkk6vVlIASUAJKQAkoASWgBJSAElACSkAJKAEloASUgBJQAkpACSgBJaAElIASUAJKQAkoASWgBJSAElACSkAJKAEloASUgBJQAk1GoFV0wO/YsaOrd+/eTQatLUWUl5eHsDAZs0GtMQko58akWxa2ci5j0dhLyrqxCbvDV87KuWkINF0sek83DuutW7fK9AedGif0hg91+tRQV1q6zDhSs23dWSRTf8gARC3O/Ftcir0kWJywLVu2eNmiq86WQFxcHKZOnXq2wejxNRBQzjUAaqDNyrmBQNYiGGVdC0gNsItybgCItQhCOdcCUgPtoqwbCGSFYPz8/I5XWOXTX8UJ27xUpi2s2axdD0XXvJdv7tEqHDHfRKupUgJKQAkoASWgBJSAElACSqCuBGQoYSf/tXZTR6y1X2E9PyWgBJSAElACSkAJKAEl0KIIuOBwtX5HTCZ1VFMCSkAJKAEloASUgBJQAkpACSiBJiSgNWJNCFujUgJKQAkoASWgBJSAElACSqB6Au6mifK3dZvWiLXu66tnpwSUgBJQAkpACSgBJaAElIAPEtAaMR+8KJokJaAElIASUAJKQAkoASXQlgm0hcE6tEasLd/heu5KQAkoASWgBJSAElACSsDHCLggg3XUTkx6FPU6dbWPnUaNydEasRoR6Q5KQAkoASWgBJSAElACSkAJ+CiBLKbrPh9NW7XJUkesWjy6UQkoASWgBJSAElACSkAJKIGmJuBkrVhrN22a2NqvsJ6fElACSkAJKAEloASUgBJQAj5HQGvEfO6SaIKUgBJQAkpACSgBJaAElEDbJSB1YQ6tEWu7N4CeuRJQAkpACSgBJaAElIASUAJKoLEIaI1YY5HVcJWAElACSkAJKAEloASUgBKoF4G20EdMHbF63Rp6kBJQAkpACSgBJaAElIASUAKNQcBomsjh61u7qSPW2q+wnp8SaAYCm48l4q11P+BUVg4m9IvFPZPPRaeIsGZIiUapBJSAElACSkAJKAHfJKCOmG9eF02VEmixBL7evg9/+HoFCm124xyOpWZA1n35izvQOSK8xZ6XJlwJKAEloASUgBJoOgLOpouq2WLS4eubDb1GrARaHwG7w4lnv4krdcLkDG1cl1tYhDfXbml9J6xnpASUgBJQAkpACSiBehLQGrF6gtPDlIASqEzgVFY2iu2OShtsTic2HDlRab2uUAJKQAkoASWgBJRARQIuDl2vw9dXpKLflYASUALVEGgXGgIHnS5v1iUqwttqXacElIASUAJKQAkogfIEOE6Ho5bigVHU69TV5QPx/W/aNNH3r5GmUAm0GAIRwUG4dGh/BPlby6U5OMAf914wrtw6/aIElIASUAJKQAkogQYgkMUw7qMWNkBYTRqENk1sUtwamRJo/QSeuX66cZLL9x6G1eKHADplT14+FeP79Gj9J69nqASUgBJQAkpACZw1ARm43nv7mrMO2qcCUEfMpy6HJkYJtHwCUvv111uuQHZBITKpblGR8Ldq5XvLv7J6BkpACSgBJaAElEBDElBHrCFpalhKQAmUEogMCYZITQkoASWgBJSAElACdSPgx8E6/Op2SAvcWx2xFnjRNMlKQAkoASWgBJSAElACSqC1EjCaJsqfVm7aXqiVX2A9PSWgBJSAElACSkAJKAEloAR8j4DWiPneNdEUKQEloASUgBJQAkpACSiBNk2gLTRN1BqxNn2L68krASWgBJSAElACSkAJKAEl0BwEtEasOahrnEpACSgBJaAElIASUAJKQAl4JSDdw9pCjZg6Yl4vv65UAkpACSgBJaAElIASUAJKoLkIOF2tf9REbZrYXHeXxqsElIASUAJKQAkoASWgBJRAmyWgNWJt9tLriSsBJaAElIASUAJKQAkoAd8j0FaaJmqNmO/de5oiJaAElIASUAJKQAkoASWgBFo5Aa0Ra+UXWE9PCSgBJaAElIASUAJKQAm0JAIu+HGwjtZfX9T6z7Al3XWaViWgBJSAElACSkAJKAEloATqQiCKO79OXV2Xg3xhX60R84WroGlQAkpACSgBJaAElIASUAJKoJRAHUZNzOJB95Ue2IIW1BFrQRdLk6oElIASqC2BtYnx+PrwPkSHhOH+c8ahXXBIbQ/V/ZSAElACSkAJNCuBtjJYhzpizXqbaeRKQAkogYYnsDT+EH65chEKHXYEWCxYeHQ/lt98F0L8Axo+Mg1RCSgBJaAElIASqBcB7SNWL2x6kBJQAkrAdwm8un2z4YRJCm1OJzILC7D5VKLvJlhTpgSUgBJQAkqgHAEO1uGy1ErlDmthX7RGrIVdsOZI7g8HErAr/hSGxXbBeYN7NUcSNE4loATqQCDIai23twsuBFZYV24H/aIElIASUAJKQAk0OQF1xJocecuKMCuvEE+88iVsdgcC/K14/OapuH7SiJZ1EppaJdDGCDw2/gLcsfhTOJwu+LNp4uAOnTCuS482RkFPVwkoASWgBFoqAekj5mwDw9erI9ZS79AmSndaTj4Ki+1GbA5+frDqR3XEmoi9RqMEakPA4XIivSgfUQHBrPVyv9LHxnTHohtmY3XCMbQLCsbV/QcbDlltwtN9lIASUAJKQAn4AgEH5xJr7aaOWGu/wmd5flKabvHzg9PlMj7bh+vIa2eJVA9XAg1CILegCE8vWoqVxw7C0akY1o7AnP4T8OCQqfDjM9uvXQdDDRKZBqIElIASUAJKQAk0OAF1xBocaesKsGuHCERHhSHrVDYC8oB9p47j2l2v4M6bz8cNM0YZGb7WdcZ6NkrAtwmczsvB7C8/w6lNmSUJ5ZhLicEo7mTDPGxAx6Aw3N5vvG+fhKZOCSgBJaAElEA1BFwu92Ad1ezSKjbpqImt4jI23klIv7CZI4fCP4vd/e3SYhdIy8zDK++uxvsLNjVexBqyElACXgk8sOwrJO7OMLb5sdmG+c+SEoDCDAfeOrTe63G6UgkoASWgBJSAEvAtAuqI+db18MnUzPtsQ6V0FRbZ8e78TSi2ufuPVdpBVygBJdAoBA5mpMLP7na/KkVQbEGOvajSal2hBJSAElACSqClEXCyqLE2amnn5ZledcQ8aehyJQJ2hxNOjrxWlSWn5lS1SdcrASXQCAQGdYiGrYOTA9JXfi4tEU5c0LlfI8SqQSoBJaAElIASaDoC8gvn4KiJtVHTparhY1JHrOGZtqoQrRysgx3BvJ6Tg05a+6hQr9t0pRJQAo1D4NVLr8OgwTGwxYgrJj9V/GtxwjqmGB34PD4+YnrjRKyhKgEloASUgBJQAg1KQAfraFCcrS8w8cEuHNcPazYfoUNWdn6BAVZcNGkQwkKDylbqkhJQAo1OICYsHF9f/xPgepYWcuj6tWcOY1fGSfQM64DLug1FiH9Ao6dBI1ACSkAJKAEl0LgE2sZgHeqINe5d1CpC/8XPL8Lh7AwkHUxnyTtYUeyHYSO747H7L20V56cnoQRaKgGrnwVTuww01FLPQdOtBJSAElACSqCtElBHrK1e+Vqed4Hdjunz58HWzwFbdxf88znTOacSSw07iTf2bsXPR59fy5B0NyWgBJSAElACSkAJ+B6BfckpOJWTg+ExndE5PNz3EtgGUyQN750s+m/tpo5Ya7/CZ3F+NqcD8VkZyLUVG6G4Al1wGC0RXbDZnfj3jxswvmsPjOvSo1wsCRmZWHHgCKR/2WWDByAmUl9q5QDpFyWgBJSAElACSqDZCWQXFuKuzxfgYEqqkWcpdjjwk9Gj8JupF+o8qc1+ddpGAtQRaxvXuV5nuTz+MEsj3COzGYMCWD2C4eoCuw2vbN+EcTPKHLFPtu3En5fGwelywcIOZn9buQ4vXDcDlw7ub3z3CEEXlYASUAJKQAkoASXQbAR+t3wF9p5Jhs3pLE3Dhzt2Ymz3bpg+cEDpOl1oHgIOTurc2q256/zaEfDn1H5qHzWB6kAtpw6VfLbnp1ozEHjvwBbDoTKiNp8F+SyRuGg/njlVmrLT2Tl4eskqFNnZjJEjKspnIZs2PrRgIYYu+COuW/46juaklu6vC0pACSgBJaAElIASaA4CDjpfSw8eLueESToKbDZ8sH1ncyRJ4/Qg4GJmszZD18s+tCjqdepq+dKSrLkdsX8S1rfUYOocSpyx31ArKSmKkE/5rtZIBOxOO07kJyLHVn4+sDx7MbamnagxVn8Z3p7mYg3Y3fMXwFHVnGMFVhzIOYVrV7yKpPws45jM9FzkZLHTmZoSUAJKQAkoASWgBJqQgBQmS+sdb1bEQmS1FkVAMpb3UQtbVKqZ2OZsmhjJ+C+k5lBi0hFJdC01lRJ7h4qjfk2pNTCBXVl78a9Dr/FF5OAw2A5cED0Rd/W5nU0ILcgoykNQsEeE5rvK/GQtfoCfFTP6uKvufzx1CscyM415jfw8x7mXIOQYK/+wJs3OeJ7fthRdPrNhw8q9RgTX3jkR9z5+pbGsf5SAElACSkAJKAEl0NgEpCD5/F49sfFEQjmHLMTfHzcMH9rY0Wv4tSDglKG6W7k15xn2JdsU6m3qR+oNKoyKocz2bvLZmVJrYAL59gK8ePBl5DvyUegsgs1lx/dpG/F96kYjps4hkQgOoufkJ16UVBBz2cHFEvmx3W6A1Yp7R55r7L8hMQE2P3pn3M09yayx2jgWgVwfRIlx+7aVB/DD6gOw2xyGFn+4Eft+PO7ern+VgBJQAkpACSgBJdAEBJ67bDqiw0IRFhho5GlCAwJwHp2zG4YPa4LYNQolYGSLmw2D5OAl1z+J2kRJM8Vs6kFK+o6ZlsGF9uYXj0+pghQhJiZm7Mcff+yxSRdrIpDvKEBKUSpLgUocpJIDQqzB6BIsvjCQz+aJeXl5OMNPz9p7GYRDrHdkO4QFBBrLKbl5SMnKk3p+47tRiCFuvkgcMfchxraAXK7OEnfNva/F4oeY7u0RHslx8duo5ebmIlyHzG30q6+cGx1xaQTKuhRFoy4o50bFWxq4ci5F0egLTc1aciI5RUWQERPFERO1Rps2bdpWnpe79LwFnGCf4aGuZ74YXquU3j5gc4s6N8+Tas6miYlMiEicMDEZtEP6g52hulJSGyafyZQ3k055IvTo0cM1depUWfQ5yy+2YUvCSWMY1HE9uyM4oDmRl+E5kHMI8/cvNmrDzLVS6zWh4zjM6j/TXIVV332HfiP64XhmNn48fZrOmY3D1XfHNf2HIMTf/bI6mZqFX/2Zc41xgA7T3zLcrBAn7OPpnIm5fS6ZFAK3Ro3Cxr9sgqvYPSajNdiKlxb/En17tN3Kz7i4OPjqPey+gK3jr3JuuuuorJuGtXJWzk1DoOli0Xu66Vj7ckzGYB1tYNTE5vQKTvMGSKAGUQeoiynpNCSaTT1f8vkVP1ukSS3RDfM+ZEmLdH1zoUNoKObPvhXtQ5u/5mdAeD90COyAM0XJRv8wARxgCcAVXaeXYy21X2Oie1HA9f29l0w89N+vYPdwwiQAoyljkQV+Kf5wxZR0eqUT1jssGt+cSkLRxEhE7suHIxRImxaG2d9/iZU33YNANndUUwJKQAkoASWgBJSAElACrZ1AczpiwlaaIX5ASfu2o9RdlDRm+5S6hzpB3Uy1SHtmRRxS2bTPXtJcr9ieg7+v/h5/uvySZj0faY54LPc07u1zD9alrsGOrF3oGNgRM3tejz5hsXVKm5PndvhUmtdj/Jx+ONfaBznhucYcYj8ZMB6FeX54dhkHwwwKROaoILjYB60o2IG0gnwsjT+Eq/sN9hqWrlQCSkAJKAEloASUQH0JbE87iY+PbGOfeAdu6D0Sk2L61jcoPa6JCDgNl6CJImumaJrbEdvO8/bWXlVqx1q8xadnljphcjIyYeCxdOny1ny2OzMev9/1DgocRcZQ8x2s0fjt8F9iRMfu9UqUdBcTia/pZEtFp7vLGCysBAxmR7GL+g7EnIvLLvGvlnxj7Fw6siKP88/1Q16YDXvSktURq9dV0IOUgBJQAkpACSiBqgjMP7YDc7d9i0KHzegpsSzxAH46ZBJ+NnRyVYfoeiXQJASk9kmtkQic26MbgvzLmtoFc0jUsVzXXJZvL8Tj2/+L9OIcYyCOE8cjsGmPE9d+8jFe3Ly+Xsnyoxc2jX3I7Gxi6GCLSxdde5EsFwU7ceW55Wu4hnTpxB3MnmSMknegOHDBnEkhNjKqXmnQg5SAElACSkAJKAEl4I2AnYXgz2xfxgJotxMm+8jyy3vXIae40Nshus4HCMggcQ4W6NdGPpDceidBHbF6o6v5wEenTcbo7t2Mfk/S92lCbE/8fPL5NR/YSHusT93LkiDe2bT8nGAUFQRwNESLUZv1n62bkMxmlPWxm6acA78AqRrzOJrL1iArDqaUb7Y4c9hI+A1gnP5MCZslFkfwQeOxtlwXXt+wFUk55SeW9ghRF5WAElACSkAJtDkCO9OS8D9rP8HEr/+J65a9hYUn9vC32/1b3uZg1OOE0zkvapGj8gTNARYr4nPT6xGiHtI0BPw4vlvt1DTpaZxYmrtpYuOclY+EGsIhUN+99Uak5ecbg1d05FwVzWlFTpYGlby8XZ61UkyU1Gx5e1HVJr1bjp/kw1L5R6HI7sC6Q/GY1D+2NJiIwCB8fvsdeGDZVzh2SpppSiNFtweXmJ2Fny9eiC9m3Va6vy4oASWgBJSAEmhrBPYkncHag/E4WZiJ+RnbUWyxG7+yZwpy8cTmxdiakoC5Y2e0NSx1Pl8Zufr2jz5DcYgDMq2OMWMPsxycy5ndRRzoFe5tdqQ6R6MHKIF6E1BHrN7oanegODjRYTJPdfPb+R2HcLK2L4yEhIQXwmKNgFNGO2QaJ/fohR4RkfVKZFRosNEEUxwvTwu0WtDOywiRA9p3xKROsTh+Oou1cWUOnIPL+1JSWDOXi85h4Z5B6bISUAJKQAkogTZB4Nlv4vDZll0o5m+qFHK6wPb7sRx2OJyiSbO6T9nnafbAcegT0bFNMKnvST741UIcO5MBexQ7sHvkeJ1WJ24dORJRgc0/inV9z621Hye5Q2mW2NrN47Zs7afats7vSHo6/rFxPTYkJBiTE94ybDhmDR2BBwdch5cOfQkrnaT+/bORmxOAe/pPx+yh5xkOWX0oXTFiIP654vtKh/r5WXDNqCGV1ssKqX3zdMKMoj7+xrClgDEfmdeDdKUSUAJKQAkogVZMYEfCKcMJK7SVNaWTViP+CUGwDS4o7QIgox8vP3kQ9w2e0Ipp1P3UjmSm4/kfVmPbmSREsuvDaUc6rGyUY88NYmAe/SccFvyYmAKMq3sceoQSaEgC6og1JE0fCWtPcjJmfvYJCu1uZyczswCvfbYBr7s2QAYM+fVNsxDavRhB1kBMiObEzFZ5QdXfOkeE48WZV+LRT78pdeYcHEbxhZtmoGsU34Re7KqBg7H44EHk22zwZ9e0oFS+Ilmh5mdx4qt1u/HTyyeUhuXlcF2lBJSAElACSqDVEVi1/yiKPJyw0hNk9YBfoR9cIWWtSEq3tfIFp+0gmxSmwxIwkHmEDlWebVJuNq756j3k2YrZR92GAlcxLDKImI21KuKDlUPnh71sgSPdNaRVkJpvEnDo8PW+eWE0VdUTmBu3ynBwjL3o3IQk8x1U8gKSUrbnP1+LD345CwNjOIJhA9nUQX2x7jcPYPOxBONdN753D4QEsjlFFTa5Vy9cN3gIFvy4B/7JbB5pviBZK/bOqq2IDAnGHdPGVHG0rlYCSkAJKAEl0PoIhAb4w58dmGS6m3LG30jPVlpWtji5pNvAcru0ti9O+2EUZjwAlyORp8Z6AzpW/iHXIDDqWbaoseNkzsdIzl/GQuVO6Bk5G2/tzjFa20h2wj/YTgeLTduKLSjOY17EzGN4QJJWOQOff5Fr/BDG/Mofpk/D9SOGeeyhi0qg8QlojVjjM26SGM7k5mLJoYMoYC3YtqSk0jitttLF0gUpAdp65CQGdms4R0wCD+YPyIUD+5TGU92ClEA9c/ElyI7Pw5qEo+XekYXFdsMZU0esOoK6TQkoASWgBFobgavOGYxXV2+u4Iixn1ggPQkRLcQagOt6D0ffyNbbP8zlzEFB6o10oLJ4xmVelL1godGtYWf+ARTYEtiHzj38fFrBGtgdl5NbCRMOSFac588Rmdk3zDCzSsyj9ovBSsiyJo+Dejy+aBlkQLHM9HwcOpWGoT06Y/qogdUWKrvD1r+NQYB1lbzWHterMSLxgTDVEfOBi3C2SVhx5AgeWrLYqGKXEp6yVxZfMl7uYStL28KCzZfT2cZ+dsfbizmSkWeCS4LLLSw6u4D1aCWgBJQACazYfgj/WvQ9zhvYC0/cNI2jpXl5KSopJeAjBLq3j8Jf2Kz/yQVLjSZz0sy/XVgwYoYF41hRKqKDw3HvoPNwbexwH0lx4yTDlj+fGRjJB1TMIBTCUfglbPbwUidMUuB0FWJS12/xyaHbOU8q+6GLA2aXZ93zefdYZrCW/LJRmyUM6R7x6BuLWMNmRQELhKVVz8vfrseHD9+G6EjfGHRN0tmWTJsmtqWr3cLOVWq1trNTr+iFbetYHV9+xELzdJx8FzkoazHLFuiVWZkJ6RgeiktGDjB3adbPi0b2xzbWzslLzzRpTtCjSztjTrFuEd77mJn76qcSUAK+S0AGFLCwCVVz2pPvLWGfGweSM3MxffQAjBvQszmTo3E3MIFdx0/jeHIGLhzWB5Ecwbc12PRhAzCFrUt2nTyN8KAgDOoS3eb6MTltW3gpOTiJF5O5pYKQD/YAK7c1wOqHXhFFOJgeDFcBt3G+0nJ+mLm3OGG5zA8Vc3QwDwvIpUPncHFUSnd+pIC1ZMVsZfTiorX48206VYAHKl1sQAJaI9aAMM82qNp2Gk3NzcP/vPMFjqdlcm4RB4rC6IR5FPSY6ZBXlJPrCzsDobkWRPoF4fqRQ3HfpechNKjq/lvm8U3xec34oVi4eS8OJqUiv4jznDG90g5+nyUdF817C7eOGInfT5na5n6EmoK9xqEEGotAvr0Ix/NSMfabuegcHIGXx9+J/hExjRVdteFKSbY4YfJ+7aSl2tWyamkbV+44hCff+5Y/f35oHx6Cr347G4FsIt+Uls+BIZIL8tAlNJyDYTXc72oQz+Nc9rVuq+bnV3UhLIcsgR3Sqqd8AbSL/caePe9m3DaPLYT4rzia/ey85I14cCUnTDhbiivTlhrJ73YfqbxB1zQ6AakLdXp2jGz0GJsngqZ9YzXPOfp8rKlFp/FZwus4mrcXgZYgTOg4HZd3nQmrX+XLI5kJccIOJ6fCzheE0yq3amUL5WTS47t3x2EOYx8eGIjbLxqJW4aPQACr3H3J5EfzrV/egs827cIz366CjedjkynE/Pim5Dv20927MKxTZ9w0TDvQ+tJ107QogeoIvHboOwRzriMXn+czhdl4bOsn+GLqQ9Ud0mjb5j08E0u2HsDI3l3QO6bqEdcaLQEacKMRWLmDgzmYrSlyXUhKz26ya+zgYBrPbo3DBwd+ZK2vuAbAT4efhwdHTtSCwwa44n5Gs0TvAVmsPVgbls2N+aU7WMD5TK3T8IdFG+HHFo3inLPtotsR83TGuM5aaDH8M3dHDs+NpcGVWwjkaNOedvBkCr7bdcTIT102ZiC6d4zy3KzLSqBOBMrfXXU6VHduCAJFjkLO6/UU8h0sseW/Imchvk/9FjnFebAXjkNmUQGmdO+DkdFdjeh2JLIZBmvCxAkTszj4umFVukv8q5L3iXyII/ba1df6nOMlaa5o0mftUHEGiplHspf4lX4cZMTKVgl2J5sFrFyHqwcOgpQQqikBJeC7BIrZRPrlNRux+Hg8bmjHCeJLyn1SiiTT1DzWOSocsy8a2zyRa6yNSmDaiH4QZ0x++6RZYrcOvOeayF7etQEfHvwRhSXN2CTaV3ZvQueQcMwaeE4TpaIVR2M/xMsqDm7FwmY2S4z4A0ZHdcL+tN8ht/gAHeEAHDozFi+u7gV7cQobLMqRFk6LY0VRJ5bomr4WP2UKAL9id7iBkU50sIajsMCJkV27omPPIKzaediYSNskG+RvxfXjywqCX1myAW+v2MI+ag6jq8er327A3Fun44pzB5uH6GeDEfBjebx58RosUJ8LSHO2zXxJdmZthM3FkmOPl00hS5L/sikJhbZVMDI2fOG/MvU6TOvRD7vZZtzBfheeFsA8jo21+K6SqzmoUzT+fcVVLcIJM8/jRBady5JROyx0wAJy3FvkdZpxJh+3vvoxPnxgljEyo3mMfioBJdA8BKQ2QFSxpPijLTvw9satxqTsrtD2fJA5mqolAJd3G9k8CdVYWzWBS0cPRHRUGPuIZWLqiL5N2izxzb1bjFGKPQEX2G34z66NuL7fMOSyyWKHoJA2WTuWnpuP/y7fjK1HEznyYIzRHaKuTrKffx9Y7fuYM3KwYsvM8xjuFecIG4tISxTGd/uSTdeK8d6WXXTC1iK0TwpiOuUw1+BCdmIEUvZHIzjJCkcwWw8FsZ885xOTvvJm3t7mYB/WvvnYNOMRo1Yzr7AYp9KycfBUKq8bK9RY4D26Tzd07hGBi+e9jdTMPPjF2zmnmds5lP5k0nLn6Y+WG/dfaJA0l1RrKAJCWZsmNhRNDQfHMjKwJekkuoSHY3KvWOPlfDo/G0tZcpzK0VfDgv2w/WgsktLaMxNjhTOAL54ANs+jSYnbc2wCIY6YdNyVOUaK5ekvMXmxBNIZc3EyrqiQEHxzx53mphbzOaZrN3x/4gSdTzukw6w4YKXGp/F4aga+2rYHM8/TksZSLrqgBJqBwAnWyM966SMO91yM/7v1CkwfMcBoHuZvtfBH051BkadXhti+vfcE9I3ojOt7jmmGlGqUbYHA6L7dIWpKky4C2cXuYdMrxnsqLxvnfPRP41noEByC5ybMMH67K+7XWr9LU9FZf/8QqTls1UNH5xD7f6/cdRhf/2aO0Y+vtudtDb8PzsIVzAkUsmLd7FIRDEvw5ZzUuawp4NHMHPx97QZYu2UhpnM2HSp3DBHdclGYE4TspAhYOHqiy188K24rHd+D7yrms/LYn/XHtASMje5ljCb97kMzsTfxDOI5AMyArtFYGH8Qz6xbbTjdARxJP4hhlMufMEh598mgY5OH9ik9vXx7MdKKctElJBIBFq3zKAWjC5UI6N1RCcnZrZBS4p2pp42SlJGdu8CfI4bFHT+Gny1axIBdrMq24IoBAxEQ6cAHO/a4I3MNRwDHTXXYJSNT8sIp4kvDj0+8jPpDKypp/nDR4L6Y+/UK93EV/gZarLhxdFkVeoXNPv11JvuvvbblBxQX0Pl0n3K59BbQQftwww7ceO4I46VXbqN+UQJKoMkIrNpzBPl0wiST9cp3mxDHTNay9QcQHhKEtx69BSkcTCg9r4CjlwXg7mEXNVm6NCIlUF8Cci8npGcimMOVd42qepAIM3yZB3NI+87Ym5Fsrir9tLPFitS0iJ3Oz8XP4r7Ewqtmo3+76NJ9WvPCqt2HkZ1faLwf5DwddFoLOfrgl5v34K6Lzi09dReHmwcH1/CzSKfwymYJYL6o/SuwZT1FB0o4W2AJvRH+kU+x8PoIdmR8Sr7HsfQw5wsL6IGgUDsSkzoa2Yd2EfmIiixAUBf2IWP/LmcRs7oyqXOJk2bExmVLBxtXBSDHXuZUy7Ud1rOLoZyiIrwxf0vZqNRyvKhCHkUKoMwB0MRJ/+fe5fg8IQ4RwfnM8zkQHdgBDw2ahTHthxhR65/aE9CmibVnpXuSwIakE/j5iq9RyOFOHUWsUGe1tT+HUy0u4FNrFuiwD8WCo7vgYO1V6SRfXLTZxSeu8Jaw8TsdsRCrP+YMcfdxiAwJxuMzpuD/lq4xao9M8DIIRycOS3/fhePNVS3qswNr8j67ZRZ+8dVCnGDtYUWT997R5HT87I0v8Or/3KBzAVUEpN+VQBMRmDwwFi8v38ARWV04lJOGY4dTjDIjadazblc8Hr/kQiMlcXFxTZQijUYJ1J9AcnYufvLmZ5DRiO0sSL1kSD/8301X1Pgb8/R5l+Inyz9BEX/vwT7NRhkqf+flt8rTip0OvMcBPWT/tmAp2Xl0RMta7Mg5F7E/1WnWXIk5HadQlPkYnMUbjO9+/uz/3e4vsAaMML57/rEET0Fg0BpCZZMfP2nmGYiEvC1YcvIpOnjSpcOJsT0tsAYW4LvDQ3moOw+VkxvCKSsyOQk2V3Wgs5dOJ459w1yBbFZYzCqxQCesnQvhx08br8+YDr08oy1dPsLBziRvZU4PZAtjjVha6WZjQWIMDw7CqD7uWtkvTmzDZ8e/Q1RYHp0w992QZkvH07tfw2+H3oPxHSufZ/kQ9VtbI1BaSdvWTryhznfl4SP41cIleGLJMty9eD7SCwpQmOWAjZ0/2XSZTpiUjPFh9Hg7O4P4AvH4XmVauI80O/wjX+BzBpd1Nr+NzfP+fds1GNWzq9Fnqn1oCH5y/ijM/9kdaNeC51EZ0LEjlt49B/07d/DEZeAx/FabCzuOn8L6g8erRKYblIASaFwC/TmnUdxT9+Gl2dcYtdO2IMbH3Ig0zxk3sEfjRq6hK4EGJvAbTpx8MjOLtbwyZ5QD3+0/ivnbdtcYy+hO3TAksjMCEwIQeDIAQccDYM2qnKWSGqEUDm/fVmzioFg6seU5yMTIU4b1Zb6nEIWp19EJW08cdGApl30PCtNm0kFL8opIaqikKaI4YVLbtPrMi+xPXsQ8grvWMcsegvUn+vNYcYnc5uKQ52dS2iO7kC8nyX5xULP2bCYaOzgQYYMK4R+bT7/OiWA2n3542MWIDPQ+/1wMu5KUcyrpaBd0oTNJB8uPheyhPC/p+/baz8sKh1/ataacE2amib1q8fye98uHZ27UT68EXMz/Sh+x2shrAC1kpTZNPIsL9fH2nfjzSnfbYXkFSB8tvyAuiQNV8lKQT2MgDnG8jJ1kxwqRynpjm7mTe3sAR+t58YIrcFW/yqPxTOofC1FrtDfvvhEz/v526STP4oTJjPfyKT+W0gF48uDerfHU9ZyUQIsgEMZO6RcO6IPbzj0H64+ewB3njMQUNpvuxBEKTZPmOh/s3c4SZyeu6DsQnTnPUkU7lJqG1Lx8jOvZ3ej7WnG7flcCjU3g4OlUDjwjv71uk2bwuzg68c1sBl+dLTy2D/tYGyz+hPl775/GgSHC6SCYLWAYQCjnFrus18DqgmpV26Rf1U+nn4//LN3AwXyshnN7/XnDMGFgLzgKv6QzJTVj5WvMpNTalvs2gqJ+Wy0LccBybKdL98lmKdCikyPo3FSVlZWMFWuxCkLw6MWTMWPwAHwavxUrTx1Ax6Aw3NFvPMZH9zb28fana0QEzu/ZExsSEoyB02QfRwgdsX5WPDJmEibG9sKg7p2MPv+yTd55Z7Ly0Lvyq042s29/Pm5f8Ck+vHGmvu8MIjX/cdR+HjHpOPg6tbBENQfuI3tUdff6SPJ8Oxn/XLexdNQkeY3zGeSoPFJ64374zdQbL2l570gBDmVJ94dfjA0OabNsWoAcTNnkDc4mjSEufH31HAzpyNmY25h1jgzHkkfm4JJn3nD7rIKmhAG73KFru4g2RkRPVwn4HgEpqTabIVZMXSFHjzuYkoqXv9kOR0f2mei2HktvmlPOGfv2wCGjNYG8E0d174oPbr+5YjD6XQk0OoFeHdshI7/AyERLZMGcJqV/TMca410cvx8OG5u4eXpd/KGSrt1mV+9gdisY06k7ruxduTC1xgha8A53XzwO13LI90OnUtC7Uwd0ae/+zXbY9hOOt9pBG5z2nTWesT9rxfz9gjjSdAELZQOwPGEIa8cs8A9yoDhfauHMnAILxaVZoPw/E4pHJk3GTSOGG+HfPWAiRLW1f11+JR5d+i1WH4/noBscdVHeewzvjnNGVQpiX3oya/aYv5O8nBdzOv2MMQSWHT6MKwa2HefcC4rGWMWhVHBfYwTc2GGqI3YWhGVoeU8za7+kBswsIZPt8l3m+5KRe9zmj1hEIq1jNnILWTUvD63VWVKqItX1HEXRn22hCzIwBG3PEZPzj2GH6U49wpGcKEPRurkJRyE7Rps/CSI1JeCzBNadPM4O8nzvycSqSewn374YXx/ej3tHujvrS3OfPy/5jk24+Q7l6++HI4k4lJyGAZ1rzgD77ElrwpqVQAaHTH9z1Q/IKijCrEkjERUVggX79yKrqBDTYvtiUo9epTUXngl97obLcPt/3X29pBnhiO4xuHV8zaPztgsKhjOC9zj7gEsXAuP3iX26ZRoZf2bWozmf2PMTZ+DCbn2ModE942wLyx0jQtExIrbcqVr8+/J7KMVBNCqaMJTBO/yqzpb6sSR2ZPsbsf70V3h7/fmwdCxCQKgDYVF0pDk0vd3urooUR8ifo0670oKxfs7P0DGUnbvqaREcqfq1a641up2kF+SjV1Q7BLLfmDczxgdgAXtefhDCQovYRLNsLzYMQHpGmNFfbsnhg+qIlaGpckncWeaMq9zeWjZUfce3ljNsxPO4ftgQfLxjlzE4hxEN7xcX2x37FVlKXAb3y1n8LD+3f2XsZmWN2R/5gj6/d0/c/t172JKawPWVbza+y9u0de8fhcS8LARkshSKLzEHawmd3dgxNiSwTXPRk1cCvk4giDUB8kqTQiapHbCwP0Ug+5CZtnrXUWSncMJAduGQ15yUMh9MTlVHzASkn3UicJA1LzNf+IC1EZJ1A776fg9s/K2wt3MZTWM/2rsTk3vG4tUZ11ZyimJZI7b0kbuwJykZIawNG9YtpsaBOiSOnwweg0WsFeP4obDk8t7mLe9o70AUM+63DxqNB0dORAibJarxOhRycLHsP7LW6whxSNaaDmyFDI7Ttg2F6XcjuMM8r8hsThu+ODkfa1O3YOeRoawRC0RQvhP+wRxXj/gjOuUxfMl78b1Dh7q7IxYf3HTrWTlhngmRAcVE1Vk7/2BIy8uExGjE9kpFSHCxu6UU34PZOSE4ndIOVtbgRQZKx1o1JeAmoI7YWdwJv7noQsis69+wiU14YCB+Mek8LEk8gIWHD3D+C74OpImCdOoscHtURh8yvoIenjYJk/vGGjHf1m8sh8A9g3xO4uxpUjI3KaaP56o2t3zHsNHYcuYk8qLdXqw0CxjWsRNivPQ1aXNw9ISVgA8TmNCtFw6Gb4OzL7NcoX7oHBaK6weUTa0RFRaM4ELOh8hiYif7yQ+OicYU9jkTyy+yIZUjr/Xq1M6Hz1CT5ksEHn57oTFKsfuX1p0ya7ILBWH8EWZBQD6byq5LPI7lxw7jsr4DKiVd+jyO79OjdL3UosVnZfDX2l2Y2jOybN4qc6dzorsaNV5/2LQCxVGcdJjD1t/Wfwz+MP5iY5oac7+2/mkv2oT89HuJoWyIeHGWpD9VeWesiLVaP3Agj++9InvtyH+wJ3s33xk2nMrsxX38UJQdzH5lxRzYns4dnTGLv5PNB62YN+F+DG3nHsXQa2CNtHL9sQQOdM1RFoudOBofgyAOzBbAmrmiogDYbP6GUxbEOcVmjqi+/2EjJa8FBuvH0THLCvBa4AnUKsnqiNUKk/edZGLlx6ZeYMjc47KBA/CPi65CSn4e7l/+JfamJsM/gq8JltTJA/jS9KswuUdvc3dc1Wsonbd9+P70McMZk7nA5OX0t/OuYSfftl3zc1nvAfjTpEvxwg9rkG+z4byuPfHEmAvxzCcrkZaTj0tHDcCV43RejtKbSReUgI8QkHdjv3Yd8O+xo9nJ3Y7J3WMRGlD2Phs3oCeevPkibD6UgFkXjsKIWA5FRnNy0IQbnn3HcMQeu2EqZl5QcxMxHzllTUYzEkjKyGa2vMxkWVpRWIuZkWNLCjH5DVnI5rFbxlSCAABAAElEQVTeHDFjB/6Rvo1PrVuOr4+w/xcLCaRAVH6PpanhUzH9kcv586TQ1bTr+g7DVb2HQCZx7hAcijCPe9zcpy1+7kk6gzUH4yGjJV7f7y+8NmVOmMlDrorUXElteKm58ll79i2/Xlq6ShaSC5MNJ8zGIeuL2AfLxWHnIdMCsTljdkIkgiI5EmIIpw1ipv2y2FHN4oRJOuVeEUfQnsN7h4UAbDhpOGFSKC8to4IdAfjfiRMxMsb9vpNj1JSAOmKNdA90YpvkBdfejsMZaTiYkWq8pMd37VGhBIg/FMywvDLpJvyQkoDVp4+gXUAwro4dhi6hkY2UspYV7E2DhkMklpVXiOv+PA9ZnCxSRrnacOC44ZDdedFYY7v8cK5JjGeJmQNTe/aB0TzK2KJ/lIASaA4C8hxWZdeePwwiT7Oz75gUsogdT8nw3KTLSqBKAiFhgcjj5Ltmll4y+U62CpQWKabJNhnBsDr7xcqFWCu/IR79v6XmppjKZi3Z3d/Ox6fX3FouCCl06BmhtbcmlO/2H8EjHyyG3ebg3GAWXDRnL9pV36LPPJSfrP3gfGEV7UzRaY4y6M/REW2w0sP2D+NgZ7kczp4OjjhjUjNWmEenmc7P2OheFQ9vsu+XDuqH51atNvoM+udajS4qclMGc1Lppy+/CFP69EV0qPSRU6sNAeM55vVt7aaOWCNf4f7tO0JUnUmJ0PjOvQxVt19b37Z69xEOac8XcMlQw4XFdsxbuQXiiEnJmtRArj95wsA0gMzn0xGWH0k1JaAEWgaBQPbReePBm7Ar/jRumOgugGkZKddUNieBB6+YiL/Mj4NLaklo4oTZOT6Ds6zyysgMzxxSdZOwo5npdMKOl07eW/F8JORdqaexM+U0RnbSGo2KfMzvv35vCZw5DnGp6Ig4cSS1Aydd9jI4h3lAuU+OihhyHdekllvbJbgrJ9su6aLAgc1iInKQzCome36AMUiH9EPlgIQIDbZies9B5Y5tyi+dOe/Y81dehl8vWsqRHTkQUYlF9wPO6RmjTpgJpA6f7jupDge0wF01l9oCL1pbTXJZ2WZlAqkczWhNQrzRF0D6AxxiTeT+9JTKO+oaJaAEfI6ANEk07Zw+3XDblNFGsyZzXX0/Jdz4xLT6Hq7HtRACt447B4+zKWtUz1D4RVsxanh3hHQLNJoRSnPBII5y94uxEzC2a9X9hqQ/cvk+S5VP3s5WF5tPJWLx0QOYPv9tjH7/3/jZyq+RkCMjZ6sdPZ2GohybUTMp9Rii/8ZNYg1jhZpI1npZrP3Zr4vesmGSFQ1BQNi9rEWrXADTKagTzmk3it5WEGs+A9GnXRoGRaeiV0w6gqOcCA3zR3RECN6cMgtRgZVr1EoiaZKPyf17IXhgHvy65MPSrQDW/nQakYE7184zWus0SSI0khZFQGvEWtTlatuJnTK8L178aq0xQaTUigUH+mN2SbPEsIAAtjUv4yNt+9sHN+8LuSw1bWcpKScbv/5uGc7k5eKnY8bj+kFD287J65nWmYDUZD/x7hIs2XoAo/p0xXN3Xo6FG/bi7QUbjZHr/vLQNbhgZN86h2se8MOOePzjjVX4z59noX07M9MHrN1zDOt3HcOm1YcxrF8XzH3kKlg9RnU0j9dP3yRwIDMZj61bhKziQlzdbxAeGTkNt583ypCZYpki4Xu2kMhmk8UJ3XtCugtUZ+7+XWUFAt72lf4/e9PP4K/b1pbOIbrk2EGsTzqBuJvvQbs2/ptzKj3HeG49C1b2JHXDN4ceww0jFrP26gD8rF0QFPG/CAi5Bo6i1ZzkeanRHNE/9AY6YVXXWI4IuQJ/P5DHfmAOjm5pw/VDA3Br71kIs7Zj/79iDIzq5BODpCxJ3A0nm8RaIsuGypa7qpg1emtOH8Il3drWnHLenqParpPpipzaNLG2uHQ/JdD4BNqFheCTx+/Aq0s2GP1Ipo8ayME63C81GQjgn9OuwmOrl3CCRyeePH8quodH8qXN0YtyUo0OtLFhHcp3DG78JLe5GO5atACH0tPZIMWFJ+OWY0CHjhjeKabNcWgrJ5ySlYtA9n+QURDrYwmpWVi187Bx6P6TKbju2Xc54hiHA+cE90Hs8D731SVY+Z+f1ydo45hzR8bij49eXeqESX+fxxYtwYoVB1i6zh4p/J6x+QiOn0xH317R9Y5HD2w6Aol5mbj883eMgV0k1n+nbkVifhb+MeGGcolILyhADucUk8E1ajNcuPRnLPLoG1YusJIv8nsSl3is1AmT1fKuK2ArjE8O7sL9I8d7O6zNrOsd0x4BLNAoYj9t06wsIb1o5CyERz9grir99A+exuHnp5V+r27h2S1xrFkTl8aCXNaKfclH+Ov4D3BVjxH445ir2XfMNxp4pRfl8T4qPwq2nJe8e2SbmhKoSEBrxCoS0e8+TSCmXTj+cOulXtM4o+9AiEzbl3Eac9a+jxxbIUu7gQmd++Dl82cZTpm5j342LIFjmZlGxkRClWY+h+mUqSPWsIx9JbRtnIT5gZcXGDVJ85+4E9061H2AIZn0NYAPp5SgO9i537+YZ8dJcaWPj1ggpwc5G5Narn6xnUqDeGnbBnx78ABHNHeP1iZ9S9p3DUPPru1L99EF3ybw1t5NJU6Y2QTChW+O7cfz420ItgYY/YWfXbMa7+/cYfQRlj7Yotevvgbn9ehZ5clJjVhYpAM5WWa45qccImXzwK/HT8HTm1ZVCqOQI4Me4KBcbd26d4zCEzddhOc+XskJlTl6IJtyPsHRUXtE120wE5kc+8uE7/F96h5EB0Xh0g7jkZbGeQdZeGKMZs5akpxUjmbJPqWLXXswILIz5gyY4BP4x0X35n24HgUVnDFxxMZFx/pEGltSIpxGb8OWlOK6p9U3ihDqnm49QglUS+B4bjpuXPkWS6AKOaEnhyW2uYwpAj4+uqXa43Tj2RGY1rsPf4T8jUyLhDSuW9V9Ms4uJj26uQkcPZ3O7BLzRnSiTnP48PpYWHAgPnrsdjx09WQMtbaHf7IdQRlORLj80aFjGP71WPlajrrGsXD5Tky5+W/43V+/Ng5dcHAv8sIcsEXR8QtyIaefE7YL2TeFmUa1lkHAysnBKxr9LCPTL+u/2r8PH+7aZdRu5XHIehlyPofNE+/56ktkFVYeRt0zrK7tQxDUrog1sqzRkfHG5Q7nKH2WACdiI9vh7hFjEetlhEQZjXFcTNk8ZJ5htqXljKx8fPLBJgSeZD+xY4W4ZHBfXD+hcp+v6pjYWZuWkJ+CVw8vwqYzh/D514dx17NfIP/HQoQe5XNq48XmyBwuDsRic1qQc8aKD45uLhfknsxT+OL4dmxLO2E45uU2NvIXcbbG0xkLYaGAabJ8ba9z0CdCa91NJrX5pO/KVk0yl1jNqk14vrqP1oj56pXRdJ0VgTcPbOSoRfTASs3PaNZwMDu5dE19FtLy87HpZKIxNP7kXr04obc+Qp4cZZ68D/fswJncXNw4ZBi6R1SuJXll/vf4bsshPHjLBbhgdD/Pw3W5BRG4ZvxQpGTlGc0SR/etv8MtEzfLyKd7N53A8YQ0zJ4+Fj+/c0qVJKRfWUZuASJCghBQQ41Z3MaDzIgBG7YeM8KTQgIpJSjozpWGG8khAti/VK3lELil/yi8s+dHFLOCRMzCGtSxXbsgPCDI+P761i1GU0Hji8cfuW8WHjiAO86pem66u/tPxAu7l6EgQKpm3SZze07s3A8RRe7wn554MX668ivOOWY37iAZCCQmNBzX9R9iHuITn4eyUrD61FFc3H0AHYAOjZ6m+DPpuP93HyE3tcAoiBN3efXS/SiaM539wvJZOxnKVhIew1hWSJHNWYzUohRsT+fIlZy0uchZhJx1XTgCo/zG0vGyupAXy990zzITLtvtnEuswH29ilgz+dMNHzKMxJLCQD/EhnfAvMmz2Ty1fs2nKySzxq9S+/pvtrxZlLATX56QWlkrbuk9Bpd28637o8YT0R2ajIDmIpsMtUbUlASkE3dl88PIDvXPMB5OT8PNn3xcWvLaJTwCX9x6W7kJPivH2bbWBDJTMmfkmCpPOiUjF+998wPsDieee2eFOmJVkvL9DTLU/E+vaLjmQM88dg0ST2citnvVmcbNR0/grtfmG0NV+zMjPvuCUbjvygmIDPGeyXrorml4M2Q9pl/ozgT9avxkPLRykZGJFsLimD08dmIl2MmFGWwQ44fo4Lo1qaoUkK5ocALSDO29GTPx2x8WIpPv+fNjYvHs2GtL40lhYZk3K6DjlMxBhKqzmX3G4nheGj5iywlxwGROylEdeuCFc6/H1u83GodO69kXn155K17f+QMSc7NwUa9+mDNsDEJqmKOsungbelu+vRg3Ln+HtYJ2vLxnHX64/pFGncpFppW5+YX34VdYDE9XS5yx5YevQGBICpuHWtAt7BoM7vgkB9VwO7XmeSfkx+Nfh55HHvvanS4MxJWYgAg6w0HjT+PMmm5wFrOZY6QUnlQwiYC52EGh3YwNrx5Yw1qwBDpxZQNlHM5JwZ92fIP/G3dDhYMb76tMm3MdJ5YWqZ0dAR2s4+z46dFKoNkIzOgxGKtOHUKh2U6b7+VoDo97Y+zoeqfpyRUrjBG4zJ+DhOwsvL7lB/zvxEn1DrOtHdiOQwxHc/S6jOwCjB1cdX+NtsalNZxvdnERViccQ5+o9hgeXfcBWvxZu9W7R/VzLs55Yz5RuRCSJjkwFz5Y9iPeX7oNvQZ2xHMzZ2BYr/LzO8UyPBmsw7Tpvfvjv5ddh/f2bDcypncNH4NxXcualG1P34df/fhfmNm4CGaoXh7/CGLD6l+AY8atnw1H4LxOvbHiige9BjgqpgtWHTtaUt9ZtouMrHtOl/L3R9lW95LUZvx6xGW4b+AFOJSTjGPpWfjq4H5cu+B93BPWGceyMoz7W+YR+/fFZfdVxXCa+7s4YDJKnwxcJX2VpLlfQ8ypWWA/yabIbCIY0MdwrMzzXPTDXmR0tCOEP47+9IMt0peLGwNDizkYR5KxLDXTSblfM13pGB3zknmo8fmfI39Frj0XZwoj2AxNesywXx8/AqOK0XFcMlK+7yrzNldp9wx2FwjNj/+xnBMmB9h47kuT9uIF1/VGX8EqA9ENSqCZCGiNWDOB12gbl8DlPYciKT8bf/1xJTNVHIHtpB/abbbAcms1b/MakiRDs5tOmOxazBG2TmS1rfljiliqfP+Cr7A5MREzBg7AX6+8vMa5dzyxSlOyT56dg5PJmejbXdvLe7JpycvyLFw1/x3IfH5+BU78pMNIjOt89l2QpTnZiYwstA8NRmRwMBwcTTG4ZDAFZtWY2+Mw0ezOcyQ5Dbf8+yM8MG08Hry86oIR6c82uXssLujRuxLuPHs+Htv+CjN8VgSUvCZyuP/9m/+GJVP/ppm4SsR8c8UjEyZifcKJciMbBtCh7hYRgansw1obax8UirU7E/DGrq2lzRwzrBG4kvf4h1fNxKjOXWsTTLPtI+l/dtwV+OToDg5icS4nsz675reF9iTsSv4F8myH+dRZOUJhKIZ2+j90CHHXJh84w4FK+LgXyDPPvnXBaeyDySi790jlc1OGwYkiviPWocB2kk2C3YUb0iQx25ZlOGCmE2YewUcRoV2lhtOFgGw/Nik2t5R80uELtvvjQo54KSaOpzdzcNAQtZZHwD18/dn/jvj6mbf+M/T1K6DpazQC9ww6H7OODUev94IQvcIf7nli6h6dvMRfW7UJSHYgKJc/Q3kuWAtYKs/+Yef1KCtNr3vILe+I5YcOY1tSkuGErjh8BFsST9b5JEKCAtC/Zydjvpk6H6wH+CQBmdA2hU6YTKYe8b0DixfsQMKpjLNO6xNfL8PVr76HKS++gb2nkul0ufuKSMastFBEMnqS2+P/eeu2GXMK/X7dCtz05Yd4Y8eW0s76T737Lcb+4h84/+GXsPVQYqW0/ePA68wMWlmwUBIcPwNYtF/AgQG2pu+qtH9NK77ZdwD3fvIFnli8DKdzqm8SV1NYur32BIZ27ox3b7gJwzvHyC3Ba2jB1YMG49NbZpXOM5XD2ts/fL8Sty78GM9vWs2mqrZyEZzMzTaaHsqw9KbJ/Sb392/WLDVXNdlnMQvA/rVmA36zcCkOpdRudMYb+ozEJxf/BJf3Oru+SVIY8uPp2cgp3sch2Iv4jOSzVisVO5N/Rm5JBoNLzxkIP6Mamc4SnbGMwRYUdvBDzKgUbMvvjUOFMSh0usv9LWy8mGc7hqyiAjy+5guMeedf+H5PP3y/vT9r2+SKlTdXyToL+4KFHmeWlQUvpixFwIMDzi8tJJF+WP4VhrGXJsYTOvct3ad86PrN1wk4eP1qI18/j+rSpzVi1dHRbS2agJTSu0aFIx9h6JoXiOfvvqZe5/PO2m14beUm/pDI64AldDlA+GkXwqL8MPG6ikV09YqixRwUxVoJMwMsfLcdTsTY7t1KMzhNeSKns3Pwj7j1+OF4ImI7tMfD0yZiZLfqmx41ZfraUlwyZ19UYJDh9MjPpkxdUDlLVTciMtzzFzv2wlLIctFsF3776iL85oLJeD5unbsmzE5njJHYQ9zhulgabot2YuRbL7lLxrltT1oKpN/ikKBoLN64z9ixmMPkP/TqV/j7A1djTJ/upQN+JBScMJw5b6nMstXNkfpy9z78bskKoy+azKMUd+QYlt0/BxFB5fvGeItL1509gbHduuHr2243+vMKf2lyaJo0Vbvp6w9xLDPD6AO2LTkJP1IfXzWrdL/vThwtXTaPMz+PZKYhvTAfHYJDzVWN/vnM8tX4YudeiEO24uARrHvoPgSzj2ZTWHYRB0ZxiPNXvlbJ5bIjKWc++rZ/EOf164kr2ex38bEjxqAa/iysHNY1AV36Z/AoC7KcocgtDMbwkARk2Pzx90Nb8UPafKSmh3N6GU60LeLznpcfjLDQsv7dTha85ByNMH5z5AoGZrO2eo8F9lCpCfdDiMMfd95d1if54aEXYUPKUaQV5iHfUWyMXBhiDcTTo65qClQahxKoF4GmeZLrlTQ9SAmcHYF/rduAz/fuRaHFjtz2DqRxkqL+9Qhy54lTzFCxGE7qj42MH3/Y2WSpILMID/7+Eyx4/X5I/5a2YJN7x+KB8ePw7zUbYclx4a1lPyA5LRe/v+mSJj39nMIi3PDGh8jML2AJrQuJmdnYeuIkPpxzCzMAde+f1KSJb4WRBbN2ePGNs7E0/hA6nB8MW1IRQi21K7mXEvdV6w+gXWQoxo7oVUqn2GZHmN0KR7YUtfvhaFomXvlsHT57YhZW/HAAn6/ageJiBwLz3ZlsB8fryOpsg3+SRRpPwR7F2qywYkimOrRd+eczj4MKPDTva/Tr3BHv/mIm/DnfWI+QGJzMPY4ClziU7mTIsMl+zEpO7nRuabqqWtiReAovrFiL3adO0xF0sRGWA45wOodsTpnGwWm2JJzEtP59qzq8za9Pzc3Dc9/EIa/IhkcvuwD9eW08TVomyITLoXUY5dJbv6hdKWeQyP69MhCHmIS5I/k0TuRwoJhI93xycke57ypjl0p/zr6YoVKQ1a7Yc+oMpFm4mHzK6L3doyqPSGsGkpSezQFsghBOna0VO9IYRGUaLnBqgKIz+GbLfuxNOIMp/fpi7vWXsgnjWsTlvAQLRzksM3mK/LAvtweWpw3liMbHucmCiKh8ZGeHwsFCFXq+iE/shO5d3NNiOOhopZ+JRMqZDggIdsGf/plcTz5KCC30N1pU/On26Yhks2XTpEnmoot/huVJ+7A36zT6hkfj8h7DEOrvOYSIubd++joBuYN0sA5fv0qaPiVQDYE9Z5KNEmnZRR7oI2lpOK9X7ZsSfrfvCN5fvx0p2blsE8/WEFIgaHEhNNldMiiZtcIiOzZvj8fEc/tJNK3epGR5UrdeeD93K3I5ZHABS0U/X78LO4+dwps/u9kYyrwpICxkjUM+5wcSJ8w0yaD8Z+0mvHxL/Wo+zXD0s34EOoaE4rYhJUODDwPi4uJqFdCOvYl45l9LjPnIvnzjAbSPctc0/G0Rm4zZitlfiyUgJTUaBcyQ7TmQhIdnTcWN087Bqh8O4Yu4nUhKzYa1mM2GWVttZpL9sy10sFzo7h+Of36zDk76Yn7uvDfnEGMzM2b4j5xJw6HTqRjSvTNm97kd+3Pmwq+IJfPi1dFC2fbpxp4XcrqKqjNyPx4+iZ+9uQApUTZw+jNjeG0LM40ujlzg4lxlkoctZke2TckJ6ogZVL3/efjjxdiekGTcB7tPnsHaX99XWiu1mJNwP7rsW6OGSwbcePvaG4xBGWQkRKmNlRrY2po4/ub9ZB4jhztYuGbaxbH98McNq8yv5T4Hto9G++CSathyWxrvy5zxY/DbxcsN52NYTGd0i4yoMrK/fbkGH6/ZzonSLXj/V7eib5fyDm2VB1axITLoHBZMlDXRNHcrLo7Cb9/ritSsFXwX2xASGIBXlmzAL2YDuTnBCA6xITDI7TzKMazDwoasfnTC/FCUFoScI5Eo5JCHjvbuZ0T2cXJesISkaBR0CMTuhF7sAsCp1/kM2bq50C40EL8bMQ3bjpxEl/YRxvxk3iaRD+RIqFf2HGFIwlRTAr5OQGvEfP0KafrqTeDWUSOMUmhxHuSHekrfPrUOa966rXhp+Xp2Knb/kFjZccQ/14GwM+wf5vGbJJ3/0zO9D5dc68ha2I6xndqX1hiY2Z9jyemYv3EX7r54XJOcTQIHcDCvjRmhZKNOpGeaX/WzhRDo3qUdwkIC0Y4O2J5jp5GenY8ZEwZj+ZHDcFUYY0Dm2M1FsXFmPWPaY/ZV49GRo3D+/sNvkTGQjg+dLSsHCwnIY88QOl2RP/phGZs3FtG3k2aMEp41iO8DhiNNH8WR7xjudvx6hvbAMyN+h1cOv4FThSkI9gvDwwN/iaHtBlZL8k8fLEdqWIkTJg8E/UZWqhmFNuaBktGXec/UyhPI5gTLH+3eZQz+s/d0cqkzlCE13Xy3+rMELJW1P48tX2rUXMnR25JO4bzXX4U9mE3F04mZDviMcwbgxWtq1/xseKcYdGKhQTFHFrSxlk2arvZv1xG9OdqnaV3CIvDg6Al4efum0sE65NLKyIt/mTLD3K3JPq8ePhjDWdOfmpeH0T26lTqo3hLw5cbdbMLoMBzaNXuOnbUjFuTfGT0i5yDx/9m7DsAqyqx78vLy0nsjBRJK6L1DkCpFmqJYEbuuq+vq6q6r++vq6upadm1rXxs2ioggvUnvvZcQSAgppPe8vJb/3O+VFBIgSHcuzHuTKd98c2fezD3fvffckm/4m6lQh9S5eWH9zkQWcreqY8nCQj8jssIq8LePmQVm7aO2GzByFyJi7LmibvzxFpkNKM/wQcnBIFQSgFl95andgMjvVf7xxyqDK+FedvDV2ALRDbSuLb4iNEBPqmLRvCI6e86d1IDYOatO2/Fy18AIsvpNnRSAw0xu7hvX7LSjiDXPxcxQlfeXbahl6DPbTIbHGZLoGFJ37CBArEv7s/ey1TzOlTof7OuNKY/egs9/2YylO5OUweTOkJFQ/4uXM9EnvimmbdtDYoZqVCwjwANaxV+pav3N9js81B/zvnoU2w6k4U9v/yS/NBxMOYmqoioYI2koFwiy4VL+15fa8NmWrfhp9W60bxeN1+4ai3gODJQ2o7HGpH43OqvFK2X2spHKPoyMalVIys9XzIpEXQiasQOlg+LRcWRn5JaU44nRiYgI9HPpPt43Dq93edn199nMBPD34MCG9s3FgOQUwnIZJoOVJTQskNzKm9t0xJdzN+GzORsQGxGEj565BSEMx6wr4rH5at12bGDNtIndO2BEx9MDwbr7Xyl/5xBUjPnuG1USRPJNPUhVKeQLHgRGt/furMJF5VzSGUaovFjOE6Nujczzcy/lNTeSvIXLF2w7DB+fJXjl2hHOrRr89qTHZNYNk/DS+hVIKshFJ1LR/1/fwad41f7QvR+6R0bjczInZpaVqJywxSPvRax/YINtX8gVzUODIdNb69bhqx070DwoCJ9PmIAwX+ZX1ZDhBKXzGS4o4L9f27gaa859tmXwkwjy6k4w9h2f92WI8B2DD49VEYTZw49NATYYm1Txt8prSGBsZz8ENq5uj/CR9HISSAXpK2E2M+frUBBsem7jyx8rf9oNivzklYXKE+GmkXXOs8H9tBWaBq4wDWhA7Aq7YFp3G6eBjk0iIVNjpKjc6CraXHM/5RXzcYetxA7GvMj+J4Vim0ZXj6TW3P5qnm8THY437hyDdT1T8PXKbejaPBrjera/aKc8sFVzCBjblJJGwGwPiwnx8cbvEntftD5oBzpVAzIwcehkDunmGx+6VVhqH20308jOKyojAUAzFG4/pECNG0GUiNlfh6oUE7LpMSnaU4ifOzWHPp+j5rTVRJxhiVUMYRwa2wqP39EfPR57B1UmQWgMjmJOWeDCw/hs1j/gToO/rszbfxBrUlKV93x027MDQK/dNxq3fz4Vx+mrU6iAjeo5PnBP3x54ILEn0ln2Iprhcxzaxac/rVcDF2ksXD2bIZX3je/r6sKOExnIKi1FPs/9vV/WoZyDPquOpcAwa6Eiw+nSognuTOyCa5slnNYj4mrwMp6RfK9JM36gh4fRBI5rZ/LkNTIQwNLgfnb0YFfvkwvylTdMvCPq+vI6utGgd6t07OjY8sd9+/DPYcPPSjdCtPHO0DGuYzQ00z8mDjKJSKjtpQJhzv4dysnBF6vJBEpP1OGCHLy9fj1eGT7cuVp9P3/btbh1YFeEcWAsNKA2SKu1YSP+kKiSMJ8hanLuFuI/i17JXBXmazMQgFXweth/ps5NmCupI4bib5bLD58Ms+eDRdCTKdueSciSqIS3hQ/p9+/o7Ah7PtN+2vqrSgMC4q920YDY1X6FtfNrtAZCfH3gazBwtM9uGDobEEKOP96WiCWr94N5xLh5VHeMvKadc/Vv8juxbTxkutgioaYf3ToeG44dx870LDQLDsTwtq3gSdIITS6dBh6bOhfrj6SiymzDP/q3aVRHBvdohaTjPZjvVYTHbhmIZWsPYnXZYZSG2GAKqoJfKkfRiZ0sfm4EZHp6y4A3l63Fd5NvRvjPemTF2wdIqgh4pKjshyTrubNbF7z3yA34621vw5BVTAZGVhVkmPFrC1bg/8ZdW6t/iw4l4dmFS1X9qfkHDiOrogTuNDBvaNUegZ7VhAC1duIfTUL8seypB/Dn2Yuw8OBhNch/R48ueHhAbwWgWgSFqF0EfPh4GVBSXklyHx0iQ6vJFl5c/Atm7dmnwKLZSENVTkVwIr/NNGXNTFDdeDgNq0uSFUvpD2MmnxXgUAe+DD4KyirwxqLVOJKTh4TwUIQF+iK5kHGFTltbT2ubpCYi6VVFuOuH6fDI0sHMELuw1gEE2lyn/hNME4S5MzrVTeoMUD8C0KzMI5Kw1MN5ufhkw1aCXRv+OmTgWUdBXAYqOqsuSLSGLlNMU3oCmYdYYayOCHA2IKCpTUy4888zfptIJV9YRk8XSTn8fUbB10vA7OlcVXKbMiTfoxIWwXm8DG4cCdGXEHTxGtoIpnVmXides6BuJN7gdTueGYqyUsdvSMZopPIzr51z4MTVSS6WOmTiAdPTs6b3cIcbIx1uaNsO17VKcG2mzfw2NCD3jhAmXe2iWS1X+xXWzk+9NOQFZjhLI13HF/w/JgzD0zMW0QBiUCIfBgaCsCdGJKJZSBj2zStQBtbbP69Fzy5xyqjQ1HzxNSAGR/8WHLHmpMml10BRhRGrDx8Dimko0sgS5raiMuNZE7hIeOvDNyW6TsRmscGLzJw6tpfXjdF/BGDuTNnU0QiXbwFlJWnlSC0rxtx/PIjEjz6GiQa5TxZH55V9ShD16teY//x9sMb6w/1AlgJhhcOaYxVzZ7IyZpOIpwx/GNMfA9o3x7KDR1xFgCusZry6baUKV/vleDKmXHezq1/1zUjf375xNN7G6PpWq2Wyzf/+7zZMX7IdrZqFY3R/+yDO/uyTmHlgN0zl9B2Ip4fPmyoxUvl2rpse4Vaux9bcdGwh8UfvyGYNHutyWiH5XiM//Yq05RzYooG+50QWbEyXq3IY8QpkCQhz2ls8953r0lWoqTqPzExWBq4+I3eGLyoDXrYXHfHLGsDcXbb57IKl2MdcM1mWnJePefdNrt7xKphrxxppngQnlfQa83bCY/2qParncnplxpVIz72f7zj5wVhQUjGXQGwQokM/Oy3Q30mG0MO5ZFN0XjPuLddEfndlzaxkbDQjMi4XOk8rKelZN8wJwlw78AoJ1uO1doUnEnzJ/ZEQFsJi0RF4u0sXFJEdt3/TpmgRYh/M4BaaaBq46jSgAbGr7pJqJ1RTA8dzCvHARzNxsqgELUiH/NkjNzGX6czhGtd2SMAPj4ZgznYm+nNUNjG+GTgAiDenroDRZCfwkO9ZzFV5aFy/modU8wfzc/DKhpWMpffFi/2HwZ81ljTRNHA1a0C8yEGkki4qKHeZWycLS84aiNXVjYT9fvfTZno/LIhKqkJuKAEeDT2mmqj2nQyIvm4eCA32RV8OiqxOOkZjkKPojlHUYnpiRt77X+YdBaBoYk9UhhC9kQAiO7MUJzPstcEe/WQ263sZUGAjwhOCORqXQuBg5QHE2yKkHudLWsSE4tl7q0PJvkxaj3f3r4AljIZ1np9db2LcspseZIBs2ToMBxjqKSvE42NjyKW44zMIPs+XiHcjPa9IIifRNCzwtAZ4Y49ZXGnEqO+nIMedLkqxNvgYtJCgwZ2q15fypPjf4idarhYBWjVF2DB90+gNpSdFKMwtQZKD5NiD+1upEx3b7R/TDMnHCl3rrsYi2gLmZ/51MpbvOoLE9vFoTtByriLXPSv/SYKw6siPKhZrLjOuQkXlBnpv+zfY9A4CMYszX1ouhYAoingp/d08oQvgNWIx9LQj4aggEGOVZTvgcjna+Ld4rou5HWuCSTFofRFZTgPdMfPGO7Fjw0ZVhNveqvb5W9aARtbxW7762rlfFRp4bupiBcLkvZ2SU4A3Zq/C65MbHrWuedJSY+jJUddg94F0PPnSTEUdXGCQNwZfIhTxnHk0UD/s/kWzWJOoGB58cfp7eOLFxGE1m9bmNQ1cdRqQWlzTHroN//hhGfYeyUIgQVlCdNg5n6eQeHz9zj1Yu+UIlpYew9KkZBZ0tf/2pFGZIys8Hv3wJ3xHmu7Pr7sRD5h/xLZjaa5jerCwrGcJARx//96FpMEOEkDDsC5BHfSoKmH+mSnLCIOvjOnTMDRUwZvMix/dNhF7C7JxY0IHV3s1Zz7Zsxkf7d6omPfGNm+Lf/UfwWeCy9KsuWm988szDuK/B1YqGnaUeag+qpOSrcW4ZadTMvIRHG9AdqmRIV+ykEIWuee/Xob4B0PRNSbKvuwcP8UYf/7bxViy8zBbYGmKdvH4z31j1bPtHJt07SbPv4cWzEZ2RZn9YskaUQ9PQ0+SDQFVIgaWGSiLI+ilvS7ryGCuinTrK2igk4BFsSeaWRvOVKWKGDeND8a+khyYKriPAFbm7fqSOv3Va0Zge3wmnlmwhM1U4S+DB6j2r7aPuIhg3De8168+Lasth+GIdkbDmo0JMCs/AxCL4GCmDFaY6bUWD7UAKRGJaAz0MyC9sgwlSU1gNXqo5SzlqcTmTcTGayYXWgZVPAp1nPin/Ba573tjx542DFg1on1oGrjKNKB5xK6yC/pbPJ2Ve5Lx3rx1yCkqRZ/WzTA2sT2KzZXKSBFPmHPwVPIGMjlC31j5+NvVrBcmoRt8cfClUxlI9kQanc0ig3Dz4PoTiI2sbyMio+llrIWkiaaB34IGYpir9+lDN6lTFXIDCR/9NRIW4ocbRnZFwHF/LC04Butx2nH8KTogCSz0hJjoMbvlzW/x4I198MX1EzHFfxvenrOGG9HYk2ROh1h8CMAcgyh2hxnXsyFhWhRDUV9O7xeZ+8QDIyP8YW4+uL9TT+futb7nJO/Hv7evViBMVkw/vBuLjh7GFyNuQo8mMWpbC583Ukz6SGEeWjJPbGizlqogrbOhKckbISGQSgiy5JykP04R49ZKEPnX7kPxz0W/oLzADKu/BaGrq1DOvLm3Vq3D13dMdG5+Tt87j2VgGb0rEuomsvFQKtYfTFGhmo1t0MRnXiHDU8P8fJFBEHbdT1NQWuw4v5qNUb0CwlR4IZcLGHRnUW6Lnp6tcnvZgcoQgmNeO68c5n95EbRx0KsNKeafHjcYPVrFYOGxw5h2cLd6tkrNr0ntuqraXrHtAzGMRbPlme9HL6cmDWtA51Z/LTI3sn3q3Zs0vCPXXNumFV5euIK/E7MCYa5fGX83J7P5zo0ggK60m5fO6ywN6ozMI/O132uSQ2ZnReQgCdMG3r9jPBJbxZ32uNrK35YG+ISmDeW6u67ak9eA2FV7aX8bJ7bhYCr++tUC0hnbgY/QqS/dkwS3KBaK5Mu4G2PqCxmeVEFjzctDj1v7dz6jYowsTvnZnI04mp6HEb3b1KI1lhF4HUMV83pZsfre2+HDkdj65L/DxuKZ1YsR5u2Lv/S+pr5NtGWaBjQNnKUGBMS8yGKu/9q3ApUcOZccMQvtbJsY6YyssvLH/v78DUg25ePfw8Zg6cZD2J+SrcLgKkJp0BfQsPcjexuPJ691BcCYj6J3Jw06R+VlqZlAzSniMAthXTMBCQImZRBn+b5kTkcYYeeGjbnHVZ0mCPO9ahCskWTEzT9PRWJsHF4bOAJ3zJuBXHqDZFDGi4amMPXNun4Swn3sodHFJodLSDrFXBopXKuj80jyxHT0/ki7buzIm9NW0YPHQSBWpBYPgldWBcxkGDwXZkrn+Tm/pSh73WLIJVzWWFm4/zD+OmeR2i3I2xudOkQqkCT5X27ORDc5T5l4XrJIhZbKOfKfn9WA4goa9Xy+Og33KhbjrgyhLrjMRGQcHOWH3q2bqmOMbdkWMtUnEiJ7uUhKUQG+3rdDAY0HO/ciBbwwVTQswrC542QmekfHIMKnuqxCw3tUr8k3MvyTIvfZ2YhO540A31tQXD6T97kzPJHaZ6ivv8/40zYh79J+4bFYXHTklO2Ud0wYD2lAO6+lXHd3HkKur8XE32GYCfpIC0yBVvyl/RA81LWPyrs+pTFtwW9eA0JNc7WLBsSu9it8lZ/f50s3u0CYnKqZ+QcWMjLZLDQmaLzsLDyJP47sy4RhE3q2jD0rhr/nP1qADXtTYOIo8db9x3Hv6N44kJzF0WtafzTMitqTEMDHE958GTUkQnm8+vaHGlqtLdc0oGmgkRo4miL1wOjV4n5mhz2rE/tRbD5+CVj5aftBvDJoBP798DiMe/4LFMYRqBXqURFu30aMevE9CRCw+dNMLGP+EfOYSoI8WduIuWXSJzbm10SHxKVvwqDTY2RkB+xck49Mko9YyF7oDKnzdXNXoXEVsXwuCFiT5w7DBjdnpmESQZgY1RbpFKWMJRYqLSX425ol+N/ICWrZ6NiOOLY/D7ZdDEtkHwX0gV4hK8d23Fj/TMewPOmLkbk44l1Twn5n92RfPd1V3bWkfvY6TvaVjf8U75I/n2XCTih68ebAUmK7OFdDAkAlF9aXbI8NSU5pGZ6Zw4LLbEMku6QUi5KZc+ZtgZs/T6CIJ0SjXELY5HzEOB85oC1OJOerWm73DuqJyYO644GpP2LFiRRpwiUC5LiLAsOTetYffeDa+DKbyasox/ifvkGJiSyZDFmddWg/RjZtjZbBIbi9E+uk1Qlj3ZSRRvA+nfwV4gcAfhh3B3pG2b2rpzu1LNY4e2Tpz9ibe1Jt1jEsEh8OHw8pSn0miQz+J4G4Lwcrv1ZgzNvQC5EhbxIUnX5fuS9W70zm4IDk6DmOwk4LqDcz50+dgFw7meW192KaoyLQ4byEF1sYU2rxtuLm5l3xcLe+6vqeqa/aek0DV6sGGrYkL94ZS8TwVk7pnMZykuzT6ZziOaVwuoXTqYHMXKiJpoECR+0h0YQx2IZKSUmRt5h80S6oLDGrkfPHhwywLzyLz037UhUIk03FCEnKyMUPHz2Iz9ZtwrSsvYjw98S7g8dqL4+z0KW2iaaB86WBAAIGISuwcoRF5ZyIhU6jVZmt/M1L3pFbNrAhOQ2D27TAq0+MwaPT58KgUpQEwFWhgs8Ijwpxx9jH6k2+BF98uwTkl6OgvViUBEU0Ls0HTPA+7gsjSQfmeB5VANBGD40HI+3se6pDc3ifgzIpfI2ySdqYKG1pRSWLOKcUS+KLQ9hPASE2grIVqUcVqBIjfHLLPpi+YidyCithDbMoAgu/IzqmgKkTIyBjgVzm4RSzUK6IgiR8W5Y3Zd5bbhVybKUYM+Nz/LNVB5wk1X6k9+mNZ0dvan35MHxvxtN3YvamfQoIjuvdHgHM7RNQ+PCXs7E6OUVt706d/PH6/niob59a+8sfaQVF9uLLjihE6a3wnuiCSMNPndj8ucIBxpy6W3o0GeseeQhFBMHRfvZ+C3Gfu9SX4n/J03Oy6Qm5hxt1Mbh1i1OOfTYLpLad9OPXhsmezbFqbrMzO1OFpos+zCzncJI1077O36m8ozuyMvHWyOtqbo53trDGnEIuvNbc6e2tLM0w7tZa29T9Q67T3QtmIonhrzZ6hfWlwC5jplq2aOI9Zzxn8X5FBP8d4UHPs2nRE29kitlajMzSmSg0boOPRxxi/G/nwGNTtU4+Sit5z4peOS/kKjaxJPmHTU8YKZhdwlFkJb/cGfJrZwOVBfZl+hwPeMfb8GgXDYTZlaJ91qcB+e1ooYn1aeb8L3ucTR7g5Cyq8gznl3N6jZPMy/RXTppoGjhFAyO7t8YRhiSJKBBmf4+ovyWRW0KMQn3OLlRD7cSP2IggJDMsUV5yngY9WjeLQEiQL54eMxRPY6hzs9/k995DGVi3JRkjBrVD86aCejXRNHBxNHDftb2wPTkd+46fRICnJwsfl4udB0bsKZtPeiFGYQ49MiLHiulBo7OqpngQrNnJAuzGrjTgxrpeJQkSBkZrUog86NUyhrJRrtOLF4f5ScrIpBfOzd60q0knsBDvu+xgKNChMlL9Yd+GbXifICEBvQDi3akMr8LwmV9w8ypMIIDyyvGBObKc4FIPM9+A1pasvXTEYeASmFhYPsPGWExzqOSv0UAO4p78tgZbYGvCDdi17IpSjFrwCeaNehAxvoHqubUjPw0HCrMQ5uWHwU1aw9O94THXQF8v3D20h+ucZOa5mUuw+miKCwxZ2fd3Zq9HFGufjUtoV2tbqeEnZT5Y8Mwu1J+eOJSQl7iW53OIXpA8hhcSb5XEUSHUZ6XFjO6fv6/670HEdUvzTlifcpynQz3xuatjnp4bQ0dNkXKBgOaBwbWOebZ/TF+zC2/8uFIxd37xx5sRH3nuLINne0zndnGBQS5PZs1cRQlVXZh0+BQgFuUApHLfibRkTpxTRCf7c7JZ8LsMHRluH+kn9ytU/uHxkkIVJut/kIMKjlsvzVKo1iUEn90z2g5SqWhKhSUdWzMmEkuV8/1pZPFlPU6UfI/OER8gxDtRbTP74EEVeeJu5OXhCITK2+Q9YmS9vwBSzptYeL1SPMZy8eR8HOekdpYPvqfNJFv59PBa/LP76cMgXftoM5oGrlINNPx0vjgnHMvDjOH0CqcnHYe8nt+DHfNT+L2SkwbEHArRvmprYFzvDviQQKzuc961FVeENxKI/fvx6/HsB/OQTur7a7q2xJ2jahsprrZ/YzPlzB15/IUZqKSX8OeluzF/yqO/MQ1op3spNeDn7YmvnrhVdWFncgYeeucHmBgiJQDFBbjo+ujd3D5y72UScCPgSAxM+xNCGYbcRpEE0Lg1FJE2vtIGCwtE091mPz1xn3CdAmBcIp51NeLf4EPGvpv9k14zeruCmQuUW17OGmjEIgRhYqyWNbUpqu6Uk0QpbOt/O9eTlp3rw+jlC5M+0jglWDH52uDJkEk5nIRQmn0JvBiyaCKIU14iGtu2cCJMR3fF1i0hOdF/963BM12H4f61XyOlNI8ggPuUUAeVbugQFIUnew7AwLj4M3pJpB+S8yXtukTprArPLVqKUS1asxwAgapDhJyjnXco9mdmK4p5lWvHgtW6MgGz7ANz3xThIw11D56XiaFr7gRmZgJWz3yC1KIq/MQ6auRJQWUwdSXAl2IJ5PlyVogc/j5giONojft6a85qBYbyS8rx5fKt+McdI87YQJnJhL0nT6J5cDAiHIDnjDvVs0ErAql/DRyJNzeT7IlJjWXlFhe5S93BQQmDT96cDd9iMhESbHdtE4Pn+9vPeU1KCh6e97O9xh314c5rcWfnLvj7oCFqmY5eLJ0AIt4XAmRF3PMFUNUZhainj/UtSsr7J/spHl07qmOwPy+jBfty/owBTdfy/nHHL/RoVkQAXrkcrCC7pXiDTSSQadojE16sIVZW4oUTRyJUuLCNjmYTBxk8+VtwCS+tlQyKewsyXIu0GU0D9WlAo6+vTyvnd9k7bO5pTnz9uCSSc5mOv+SbP3dNNA3UrwGhyBbKaIlZNxTxgR/I7cRAESuGBpTBokOgtxfWJqXgaG4+hjBkqTQpB9mpOeg3vicM9eQ+RIcHYsqLk+o/4G98qahV5HzWVrK3qH1qGjh7DXRtGY1V/3kEP+7ei38uWakAjxiiXdtHIy7U7j3p3zwOgVNsfCa4Q0buRQQQkRfCAWKYq8I8sVqAQ21V+0PIB6S4sp4hjhZ6xfRGwjmxPEXE5c7njI7fVm5joucqnGQJ/xsxAXcvnMncK+5MMGIlGLFKiB1JDLyydPAmG6An6fQFRHplqYboHTMr5kYdGxKiCiuRSVE8D+HJ49OTZ2DYpbThXsbnHQFmTZG/co1leHrrLCQVk9qdeWWmfC+GCMrD0A27CCzunv8j2gaGY+pNtyCYhBrnIpLv9PdFy3Ff9+7495xVLN+hx31DeiApNZfEKdI/tioYTcCsCL/FCM8No7FOxko7eQfz9uhp9MkgJT1BohM8iNdHvGA2T25HvQTxud2pWST+0KMvekXF2ttr5GeTIH+k5RYq4BhP2vczSZHRiDFffo1SgjF5xn1z60R0iTr3EgETEtpDJiM9gA/Pm4s1qSkIZ23JT8bKeHO1PPj+D0jJLFBEMHoC2Bv7tlN9TikowANzfmKdY15hh0olfPG7PbvRJ7Yprm3Rkt5Od5TJPU1R4av8dg/Ro0No400nuQZ5FavZgh2ESZtOsVVVUi8HWXOvA5r402TjrWVs4lwLREYXKBDmRo9oenI4QZjcexT228qgFAt/N+I5EzHGWFiYGmgXVKMB+yrtU9NAtQb4nP0thCY6ftrV530R5yQfbDSnRzgN5vRnTrJMhmKCODmlgDP1PUEf4nKZEBkZ2WPatGkyq8l51kBpaSn8fsWo4HnuTr3NZRaUoIjMiPLilHBEGUUWEcNMr3NHOGuenCwmpS6X6SoZBFRgt8q8/bwQk9C4l6yFI5cZqXnQe7gjqlnIWY0uq86c4eNK0LOcQoXRjJIyIwL9vVXY5hlO67JbfbnqWVg/80ulELIbQvyo29MQwVx2Sm2gQxdL1/K7ryTgEUILHw+PWuxrUq4ijzWNJEdFPQDY1yrxuDieEbJQ8reczwyXtevcWJ2b/TWpsBdn3Xg8WS1gzJfWZJlR4hftojxnBE4t6A2R/C4hrigs5rOJxqmACwXqBNjJxOeTvWXuyzbdmFcjfVN9cQAZ8ZDJdiJC3uHnYUBJpYkMi1wo58AGIllAN6fKhAhvP2RXFktTqn82DkLVFTlegMELzQJlxKphEb3ll1fU2EAaZbN8vgqlCQ/nEumqtGt19MeJUV0bUFnV+rUvFdIUxa6n/pS9RezbuRGw6am7lqEhyvtjX3dun0KukkdvmNR7lN/VmUSAWHpxsXqXyLbCchgbSCRJuZD38+H0HOXBVAfihwwwxjAMNIvv39xyjgDUI/4Mz5Xwx3KCPGFnVNiJ95UA4fiQYPjoecOdg5SY9nMv+/WuuTvpVvj7ak5HsRdLC1hxOJfusOpLB4OnGTp6bqt4n1ZWNHBs3rbKG839dBxsSAgMU4Q4NY8j8xdS13WP9Vv6e8iQIdt4vj2vlHMObhNeNeyLm86quzMTP76izq3mSV3K0EQJNh7PScAYx9JUjti3/D7JSaxj8YbJdzan+uRTLpQJsbGxVYMHD5ZZTc6zBqQW0OWuW0nG/nrFNvyy+wiTiE1ILS8CU4nRIToS/500Bo99Pxe7T6hhZ4SuSoP32jSlJS9fT8wtkVvu7GXm56sx462dcGfozcv/uxed+7Q8+51Ps+WVoOfTdP+KWXU56nlPahbuemeaK1FfQo9+N7wvHr6u7xWj1/o6erF0vSXlBJ6bOs8FxsZ3bocXrx+mAFmFtRLD3v8IpccE9TisRgnZo20toMm9nKGBxBvKK6MQjONMuKnCMwRZAoA8BEWxiZJofhFo+RW54ZE+vTFr5yEcZwiz06tjpXFZTjPnr21aqhpkyfl5WLIjCZ+v3oSCqEp4p9A7J/z7BGYiXiTd8CwhGCw1obCdgYxzXMdVAng8820o7GaDe4nk/jAPh/1fcu+9+HDbRtYsS4JnqBWFlnI87tGCnrNQDIzvgNcZllhq4dOv0p0eMXbUZSmrwxEtMUqAfpcDj/6xFmB1rHV9SYTBk9PnYwnp+qVDAq5MDJnT0cMWbPWEKceEygASkzCHS/oaddIHJdSV1ALzKCFQox5Uzpt4ybwENLIBh/rlIPoShhzSMyhtO3XHluDGotoznpyM5gQSAmQvtmxPz8BzM2aqsD4pOfAIyUkG9+ujunEh7+fM1Tvw2syVrtOd89w9EA/eY3PnYX56uh10u9baZ0a1TMDdDrtHPG7r0o+rFYkxzUgI0gAQqtNGfX/uyZ6FnPJlXMWbpYYY3MNZmmE1f0Zy3YDVLM/y/fa96n71znRDREIuKukRzsgKgo55Zc7rKtvKtbXx3nBj7p+NhDkirVhbb/nQ29R83Y8Lqeu6x9L+vnw1IE9Jjb7+wl6fZ9m8TCKDOYlH7E5Ob3K6m9Nrju85/NZE00CDGtDxJX/PsJ5qqrtRbnEZ67H4wsCXh4msZcYukQjenQsL851u+pM4YBsn/a9tj/lTNyI41A+tOzWttfPxg+l4YsBzuOelWzH+kVG11l3sPwScLtmXBCmyOqpTGxg4IqzJ5aeBkopKPPzBj6oOlrN3VhrpXyzbglEkormY5ALO41/u33Jvz168E/uTMlEaXoAVpUkwMvyw1OwFfbEOc3cfQKfYSNzcsxOe3fUhStLE/UIw4wAC4p0Rb4zCQjIUKSvkja++OePYUDb3KKhE8Jz9KB3WDtbIAHiWMqTuKMERPUJTjm1g/ha3Yi0zETE2rV4Mv6IXPpThid/s3olX165S696YNBKvbl2JQlUorPoYRvIxSI5NSSuDCkX0yWYeGUMSjcwZMzJ3TJJspJaYZy7NWn5P+t9UZBoUckTpCb4kh49FRHY+BncbjCIT64sxJFFER/a6U0QOS0Ql/axg6N1bK9Yj0MsLfxjIGk51QI+wU757+zhk0aN37cdfKGp6d5O7oly/gzTyn6xhMepYNig2Ob8yo8tJ0EEGyROiUAJLhmDq8nleLRwAWGxvUZNM3F4AsJrhJ7P82Iw0BFzftz3ahIep+Uvx0T0mGs8PG4Lpu/ege3Q0Huzd09UN8bh+uGC9Chm89Zouil3StfJXztw+sBs6x0dhw4FUTOjbASEBvnhq+UIsSj1sVxN1pnTnPA4HDU5szUZSp2wkxEUo4CWFrc+HtA55DsWVu3gviWdQvPSexNHu6Bj+Dn8a9utUQeA3K20fQRV/SLy2Hkc9kH8oBEVt+DfDZ20GC8OFeS9IvylMD2TYLa8yV1uFvp6htWZ6KzXRNKBpgL+Ny1AJAsBmcLqf03FON3PSRNNAozUwf9dBPPvlQtcLLDLYhQ/yfwAAQABJREFUBy//cQL6vBkNY1klAkIZ99NIiY4Lw5fLnq53LwtJLMrI5GYkC9ullpfn/oKftx9gN6qwcPdhfHT3Da4uSR7A0q2HkZZdiPGJHUhd7OdaV9/MCZILVNEIjG0SxBex3eisbzttWeM1sO5ACsEyab4duyq7hR8eJI5IZ92qKwWICWve4u2HEezrjcT28Y1XRCP2ePur5Zi3eA8pwUmVTTRlivCDJYReKx8OtMSSWCCDNZi27kWH1n5Iyj/JUL0I5ZBxHYLKVjleSukyUk9g4MhdcYIw17ZkUXSzkP8vhaFj0f7MgxIQxmUOA9OjjDNeHOShB0jlSLXXoXVwKEa3aINJP82AUWoP0t78cuVWjPZviR/K9ygPkqt9ztDBRO+RG4LJlijbSuP6SoKYpsyhIp275FKJQSsehuKTFfCgN8/sCN7feiIDoxlqKBJo8MYY1iabf2IvIwIs8PA3wVwsXjG7uBE86olCPQkcu7/9AQ9MBbDdD9duwvsTx2B4u9bOTV3fTfz9sPzh+zB1x24VbTCqbQJ6No3B/1K3chsqjfuriW17lNrvYumnUyQnTrFwqENxY9mezxLxg1XQw2Zm7hhHyQjMrBgV2gIvXT/Suesl+76lcyfIVFOkjMnRrHx8tjld3Uvztx7ErGfvYn5y9bk6tz+YmcNn7iEkRIZhTJc2Z/3M7NCsCWSSMOXv9+zCwuTDMDMuVRXcrhXvScDLezznRCF+/9J0zHr3QQQwzP58iac+An1jFpFufz7LC0gx6maI8rsRnvpw1yG2nDxRHTYqtyfv/9Jo3rwGbiIqIVtmlRQZY0iwOJNV3iCvu3u5XGve18wX69+mmas9bUbTQEMa+C3kiF0uQGwlL4JMInmchqk57UPTwDlqYPPxE3hm+kK7XeV4V+YXlGMBwdk1CfHQs3DpLwQj+49no1fbpujdrtlZvzAb6lKLznFYZJr2q9tpqP3GLF998BgqWERWZNPRtFq7LtzEorffLFNFXH9asxfzXpMxj/rl7Sm/YM4ve9TLdUT/tnj2wRGXxfnV39srb6kXc8GUoVWz67RNJe/jySnzWIA8Dq9PHq1G4WtucrnNP/fFAqxeuZ8j5J54+f7RGNHtVKP+fPR5eWoyZi5jaLADOEnInqGQbHtBOlLL84duEmBGaneTDUklacjYx0EGLpaoQ8djgN2gcSgD+wIKKMKKaIcG9i0ci7ncBu+dGTQydTC2DCabIdc7wIt9T/unzmjBx2/eigPMmRG2xNHCKkiv2J2dumJ3VhYM/Pml2fJxtIJR9soolQ7JvvZOlUYLaYX0wBGBxhkPhu6J6JTnSHps75sASI9SehMEiNHOrarj+Hqh6xhU2ixYmnEA/kHuKBHSjmw9iUakBbbBumcWAj0d2xHdCXgUAo3HZs7Hwefrv2aRBGOPX9MPP+3Yh9fnk6CD3jKTeNx4fKf4MeRSFWx2LJCzsfjwD+Z7OUUdn+fWNscP+azb5t3UGxneFeyvFYYAkku0j0Emc3mjAho/QOY8xrl8S9TAS7OXY8ORNAQT0Nw/sBeu7dCq1m/uaBbrdBFISMimqDyDgyQ5jLaIrDOIlV5QjEmfTKfH0cy6W3oUkDlzcv/uZ9UtGSB7Y8FqfL11Byo5qGBRIZ1UMyknxXvrl87rRTITn5zqgQPpzy+bDuOGYZ3P6hhnu5G7zhvR/hPVVHMfOd53DKNcnXQUyKJ3mCGIAq7kVrAKyyVn5G83hrBaCcKrhLzFeQu4cxsyZrrRI6Yz63AzQ2k10TSgaeDy9Ihp10XTwK/WwMtLV8DMB79BmTfON0EV5q89gOfHDcXfPpiPNUkp6gXyOcPABndugbd+N/5Xg4zLxWM0slMCZmwmgKIMbNO8lj4Pp+WgkqOuYpxm0aCQl6uEItWV3IJS/LR8twJssm7J+oO46/o+iI10DMfX3UH7u9EaGNC+Odo1jcDOozT4eT3kTtXReJUcRDHm1h1MxdJdSRjUoQV8OHhwudxfcqJVljR6SulxcovDlpfmwZesb1UMgd3Tq/1pgZhixztHz+rTqxbBzYeGKfl2xLCnymD2pr7omTKL4S8K5DSsbSvFXmgqYHFih8dL9CsiIMxOXy+Ah0ahAhRuiCVbakx4ADbtJ3ISobfD2C4S5d1jSS/vq5rWCQEGvRTOayWOCjPD7yJJ4d6nSTO1m4QIvrJ7IZLJXnhjQiusTE1VwFpCCwX08SIqT5503sgoPBuNVSl6G5Cqdlf5WEJZbzPwOPXoSY4p+7ozQjErrxTlkXzQOcTAemH/7nUTa4uVEIhmI0jvjdvfn0l8ygPzvzAXOsMJ1XUQ3iLqT1STUVSMaAcxhbM95/d/f9mAr9Zt4+AOPXyUKikyLc422ZFiJx6RduwLpI9G2aZu/7moTDb284Cp2Ep2PzkZ4kOGNX64chPe43H6tmyK98aPJalItTdPbXSBPv7w9c9YdyhVtS7ET09OW4BYsi3Of/JeV0h3VAjdkDVEQr2DWX+tpog+t3EA0DmwIrpaezj1rIHY2wvW4MtN22ByUJNJ7TFpU+KWPEkOFXCCQFAKJdcQ8QoXlfJGuAiyZMshPP/5Ita2I0jkgIIwiUoNOGePxOPqe8yg7gVZKL+zslh73Tt50QrwFxGvss7CQQLmRWqiaeB0GpB7S/OInU5D2jpNA5epBoRF7VB2LqpktJc5HYrlTPoqLwd+Ld5xEBv44hUmM6ehsHZfCvalnkTH+Cbn9axyckuwbkMS4pqFoluXuPPa9uka+8t1A5FIz5+wyQ2qA8RuGtQZc9fvQwlDKCeP6FEvCJO264bdiFHgXk8ozun6oa07vQb09LZ8zkKzmw+l4eiJXHRpGYVMFm197vvF3JGhd9T5a4tW4S9zF3GE3QPv3ToW/VtevPuovt5LnyzFr8BW/h1/PwYUkIpdl9HW/jujYRhyvKi+3TB36wG8MnM5KirN6Ni8Cd69ZzzCmAvTGJFQPyMH/0O30QikZ8jkw3wTeqokVNCD3iOSonJeR+O3G7w83VmeYjHK2V8lgg7EFpQ/lU3IGQcIE69QscGEE8WZCgg5kZY5luyCxB5u9LDZGEInNPTZfVmkeC8BHJdbm9vQbUQM4nxD7Mfg5+QV3+DY/jz4b9Aji3k2lmAaqwwH9FLPIjrt/Bl66KtHYQwZXBnWGG70hHUnV7JjZm4ngNIYxG969kDSDSvrbElOmjJm2WV3Utn7HeWxiVM2VaWhr96G38+bhY/G3ujqQ4S3P8oI5O8hZX1xmIm1uuhJY/6cUOwrUedPAMuwSxONaimgXLM2mKshzojh/SVBmITMOcU9T6dIFwRnWRmGVhZnhWc222d+EEJI/S6sjkbGqTGvzKFsnp29jpixkteQoX7yLOnrFYttJVmosJmVZ0zaX5eUivEzvsYvkx5wgRpZLvlv72/fiOmHdsufuL1tFzzSrY/yPqoF5/ix8chx155KLbyHThSV4Jt123H/oF5qXTAZF5uFB6FVdAUyy0qRF1yJ3l9+jNFxrZGbWoKTzKXL8TQiz8zacbxW8pg0kOxjRMcEV9unm5Hf1FcbdigPpbo/Be/LTUo1ehDAvMtcwAX5u7BlD99bzvuZDeoJCPt2iT9d0+dl3SFGjrz45RJ1L6gGeVmVrviHfMtPyy+JN5L627GGoNEvlYWdSeBhVo5p+3LZVog7WkdxFEITTQNn0IAGxM6gIG21poHLUQPyuJeQrwq+7E2McvFk6JICXLQRxHBYfSRVGQU1+y6ehtziUszduB8y+tmzdWzN1ec0X1JqxAOPfKko3+XF/NTjozB86IUPx8jNKYGJxm5iQly9/W4aEYQl//mdKszsU08dNedOIYG+mDyuF76Zu0UtmjiiG6LoNdDk/GpAvJH92sWpSVruQENra+80LCMLaDkLqeZWliu8IIygf5g2FyuffAABrLF0qcRWuZQgbCoPz1xI1hYKoIM0MNSC0iIPGoZ6dGEIq4gYjPO3HIDkGC5etx/JpcVquZzM/v1ZeOiTH/HpgzcirE54l32j+j+f6NEfr25ciexEuwE42tAca7ancmP51TNsiwyD8+6/F/4s/izy/Ohr8Zdly1BVoiMphhiMDKNiqJcAN71gH+a2WAkaQuOp5yKG3NF4dKNHwitfmcH2ZrmNjqdqoyenvBnhBMGMaZAnvr9hIrOkrOgUHM3nihwf+GDHRuw+XoioTQw5lfwoiiGvCsambIrHFPDx+2v74MGbE0mIIHWZTPj7y7Nx0CqdIW07j1MRRoAkIZD23VEexcLHLAgs4V76TIYAGt1V/TGpaSZeCLHLl+9OwaYeKegTFS+HRClJYIZP+5L9o7JpNJtCuBFJQRgEKWqyC1fpWMfL4muFr9UD4fTq1SdCVW5mHqNT1O48Xw/qSwazbMzzshE8VsTb0D4yDQNbJqlNzfQeLtzZFZmFDpDK84mNIEAsFDccAQZBxI0dOyB3txEHynLUMvkQQz2dQKjAWIFQb3Fz2uUvKxdh0bHD9rw7Lvpo1yacKCnCm4Ovc25yTt/SDwsBvqhbnRtBkMhJvg9qipdBj+NhZSjwMyqGU6OxEnOW71O15aQenJ7XWerIufF09dSrnp7Gge2a12yiwflyPq8tQswiapaOiLAzko8YUGzAiOat0On+cDz4wlT1PhFwLLecPJPbxEfat79An/I7fmfROtZCs+uo7mGcOnP8YlyrnX975vO3Q/0I26iILPfkQER0UIB9gfapaeA3rgHn+NhvXA3a6V9NGhCj6JYunUitu4shFAyNkNFFGh0iUsw1tagIPQi0Nh45YX/z8oUWHuCDN2asQiFrOcmL5ambBmHiNZ3VPuf6cSwlVxkwJgJCkTXrD19wIJZHD9y9t32o8hn+88FktGXeRX0ixv/pQJhzHzEYJ47spjwzAsw0ufAakPv32RuHqqnDi+8It4FLJOzpeEEROjYAxPKN5Xhv2wYsSUlSNbXu7tAdk9p3reVZcDXGmQzWTHpp+QqsOnpMFUa/rnUCnhs6mMV0q2sufbV7O1YdP4Zn+g1Em9Bw5l/RHYXqcChGw+GDZSlYv3wyWnYfgS6DOmB78gkcTs/FlF37UJVViQopnKycI/zm71HybdL25WL0E5+iadNgpFeU0TBnSQiyCw7u1LJmF2vNP9i5F3pGxmD7yQxcS5Y4Y6mFoYTfw2S2wYuhmy8PH4HmrD/llBs7d0M+jdxXtq1CVQFZ2wh0ZNKLIdubhjZzcXRESHkZPiyCbFc0MU9tIXgS0FPchiG8pYQyJCCY8dBtaBNSTV4gOyiDdds6O5Ko3QK8vDwweWJ3GAiw7hhr97IEsA6UTGHBfmQtzGHYGfvE51M5jyMepSA3X5XvZSnj3/SUiacpwi8QWSTsEC+d1OISEYAnYYd/+3EZWpAoxMB7ZPneZJjj2J7DqhdvlCnGBr8jBKISnki0I8BUroUQgnQJj1C/cSeglHadIiGxQjxxMCubixguJ0BFDsrjqGM7NmwTnoHE5kccRxSPkA3X99yOtcktsSczTnZFmY8Zf54wSAH0fsx/HNOzLZLKC5CSX6Ao41VTbDqIuVpBntWDDQLKFhw5ZA+xZDsiAgzmJB/Ac/2GILDGtva1PEUCiMU7DmPa6p0KmA7rmoA7B3d3gXTndk+PH4SX6Knl5qqPFgIGvZkUzvSqOkWiB3LpqS5nOJ2UKBBR4FyAKAtQy7lZBRi781rx28Su6+jB+3rnDjyVOMDZjPqWfn3B39SyY0dwXcvWuKsTvbcEeR5CbMF29PT0Wrm/gDIZLOjsiNKQQbBZ7z6AtduPIq+wDD07NEPz2NBabV+IPz7btBXrTxxXP19p3yq/4zqiCnW7rnydlfxTvIROICZrPRhqqYmmgTNpgMMafK46fvBn2vj8r7+BTY7hFMHpA05LOF0Q0YDYBVGr1uil1sBfhgxAUk4uNhxLY2gEX2hidPD9aaMRcYQMaCds7pgwoCOMLFAc3yQE/drH4Xfv/ai8aNL3hVsO/mog1pwMi+4MPZOXjgCfxL4JjVJLOSn2JTzQi0Vjz1YE9ImRSxsJ5WSGPB8STJCqyaXRQExwAI7nV4f6CYV2TAN5POVmE8bN+gbZ5aUM47KPPLy6aSX25p7E64NGnXIC4o258ZvvkV9hL4bOGwfzDh7CnqyTWHDvXeqezSwtwSvrV6mwsKJKI2bdNAluOgEgBv5WbAx/MyDUtxzBESbc8OhAuHm0RzFzVh7/ZA5u7xQOGen3rjnK7+hFTQP+eFoB63qRRY/G2tNfLcCKV34H39N4artFRkMmJXTQvnDLCPy4cQ9Z2OIwsuuphBMP9OqNjjFR+OeGX7A/Mxt+h/gQuIZsGKSaF9tRXvZuTSrgVuKBKiaW1vvel+34M7SQ6a+pMZgU67VBmOO07MY8/8jnGE7oDrbMxiaM74aOnWMxsktrl+fMub18P/PoSLz9v+UoKqlA075N8NnRHWR1ZYFqhvzteeQxHMnLV7lZHZtE4LMNW/HfnI3Ke6LasGMCdZwT+SWQSUCXmaaD8pbR+1dTyttb4HnUnWGddipxCXn0OKLDTp8svDhtCR4fdw0LH5/6e39xwlDc+tk0WuF2nTnbFKCnk7phzPvpFZPCe8Z+PHn+iAi4SWyRzHsqTv0t9a0m9u+EmxOrB7keu6YvynkvzjtwSHnwerSKwt8HDlX3n+yUnleEhz+aBa9sPgtJo25swnBKep5E3EmnXlhZUS8Qe+3HFZizcZ/rmZ7Kem/ioZ3+9J217q9be3ZG6yZh+GL9NiSfzEUMwe64/q1RyiS8qqpAbD5wHI+/Nxt394uC5z7ezMxrK49iSYE8uSk4CYDnLeVG8KbAhpw7cYaJYbrLjx09BYgtS0nGfzatVXmDO7OzVFHmQc2a485eXfElgZuEnjpDUaV23Es3DVfnKh8GRnoM7XPqPe7a4ALMfL5xG8p5fX04SCD9krpx8rupFvu1cP4t95+IcxBA5lVOpsxQ9HwXDoyLV/Pah6aBi6yBL3i8sZxkVKljjWPLC/JdTnJ3f8bpNU6zHRNjJPBvTks4XRDRgNgFUavW6KXWgCdDpKbcMRFvLF+NKZu3q3wSN4mIcQzEmTjcuGx3Ela//ojqqhAjSDijJD/LyHz/9vG/+hT8/b3w+Yf3YvW6w4hrGopePZqfdZsr1h/CP96Zrwy3N56dgF5dz64/UdHBeOvDu1BGENa959kf76w7pm14UTXw/m3jcc8UFpjl/SlG7esTRimK+Po68VPSfohHzAnCZJsKeg1mH9mPxxnSF+1XOxRo9r79BFLM51GuAHuLAvSySkqw+lgKhrRsQQPX7rWR7bpERKmN3H1uwfTVq/D20i40tqrQgrW8PronE57mDnj8ia+QfCyHgIybdgxT21u8aHyRrlpqbtnttyoE7ypEWetA2rEc8awzzlBUbqxlKNt71vDnuJ7tINPppG90U8y64U6MnvYFjkcWwpMAxWbVwcIcJh3ZOvRS98jbAiuBmHgjJLdUQIf0WEb7K2j8q87zGnRtII9UvJWTO3TD9IO7UR5mRsEoPf7UIxEPde19uq4hiAMd/3hqnNrmg82bYEuyH1dCAgVOt42oBn3jO7XDR8s3wr2A3WFf6upONUIlCzGIMDwKTbiFOWnSdzeCJH8PT3rKeD6FVgQcJtOknJ+s5nNx5o79mLPnEJ68LhF3XdOjVp87R0UhopsHMnJFMexfLq1yhkrqCD4M9KgJc4O3waxUVHNHAWRsXu0j38eLC/Hhzg14uEtfF9ASw/z/hg9Wk9rU8SHMhM99swjbktPVEjbFxgjGWAhaiEzkvHyZNxlI+v5lqUfgT3IP8ZbKoJeAt5/Wk8a/RkillInIKSrDTxv24s4h3R1HsX91i43Gf2+JxuacFDy2aTp2HD6MqkNVCNH7oPhn6lAAKEX05cn6aIoYRv1NICtAlBejkiGa6mTlVqFuRCSvs66kFRdBfmci4h2Tv0X+PHEQQv198PmqLQRpFrSmF/KNyWMQG3xhwsEPp9vDQVvHVN9f0o8T1N0Pa3fhQFo2BLzmS906nk45xz7EQ+fBSQ2kqAsie/BW82Yeo4z7iZpEDfKedXiYm8QGIM+Hxb55zvKsifEPwAuDhnADTTQNnFkD4tc/j/IV23qf09c12pS79QNOwzkxTApbOP3MaT8nkec4yfoLJhoQu2Cq1Rq+HDTw9LCBcC+uwozUncwbdxhT7Ji8SIpKK9HppXdxY9cO+L/RgzH12UmYv/kAokMDMbLH+Rl1jCAL28QbejZaFW9+shQWJtKLvPzfhfj589+fdRtt2vGNqclVoQExxlY99SCyS8oQ4uNNY/dUw855oluyTlSHdzkX8tvAYr0H8nJOAWLi+RKgVlck5OsgvckCxHw8DPjljvuUsdghTCI0GM5YUIV3l/ViQVZxdQHJOeH4ZP0wGPLXIJneZjEuxRujQuf4OxPadwmzksm93ESiixJ4Flug35mH9DGhCGQsmBtDC8X727JJKJqQse5CiOjhheF98eyaacizGFBeRMQlwve83sPCcyWTqE5P8OPOPC0SS4jBSWOyMoJGf6BYl9yW0/CWCWq3+j7+zjC5HvTWHS7IRbeIaAxp1qK+zRpcNrlLV6xPO47kgnw8nXiNunY1N3YncYMuj+HWjoU60plLp+SzprlSRTIE70wdmSwFEFSpPDNbM3dMHXU7Zq3Zgy8LSQzBt7/sL6F4di8HCUJ4Tf9N9r5x3dvXAvwSsvhmn/G4f/V05Y2zepXDY7+vvU4Ujyzewuy8QDSJyFdEFY7uKZBnJiBUYE9AN/vy5pZ1zKXbjBW3PYBIHz/lwa9LDCRMrve9OwNZZDGsK+Jl9GIuoJXj1BNbd0Cfbz9SGjA5wE2LwGDcENJORSMQQdXaXQhHVu09egoQk41KzZX4/YapKLfy5nVIJQGrp5XnWUMEjKl8QzHf5L6WL7mVGPKpU+Qk9o29ORh4W8dO9j9qfI5NaIuPtm9SHjE/gkcJTxQRHd83qreaamx+QWaX7zqCZ79eqNr+113XYViXVmo+JbsAt7/5nfJk2+8xev54bSuF3ZK3ksuz5bjZ5M6rjLZwIgMm8zCFGdHqy6ss9xbz5KTO3zME2QNjmmNHViZrknmgY0REvZ7hC3KiWqNXtgZ4E57n0MTVVEh8HaXISNkRTkcdy6fx+3pOBzi9xkl+KNs5XTDRgNgFU63W8OWiAZOE+TD/olb5cnmR8MVi5vLp2/ao+jWf3jkB9408/ej1xTgnCUksLrEntMvx8guYIM6cgOCg2gbBxeiLdoxLrwFhs4s5i8T25oEhJHVwJ3NgbeNTRqJjOQpdVxJCQ+FFY1GAV02RZfHBZOBwiOTfBIY7QAuXnSwsUUQLTm8DncjYlpSOjKQ81nEVbw7FYagJ+BIDVRlm/PY+Wg5Dif14AnI60Cj7+P6bMI8hY+KRvr5PBwXIpIkLIU19wxDCMMrMo/JbcnSSL3uLSY+YppnI8+LofQ49EHw2VBB86aU2GYGEG+t8yeBNe59QjGpnB2Jr9h3D6z+uVP39281D0LdNnDIwx7Zse85dl5yx7266ucH996SQ1bHGWgEFkv8qBi/ZRLhGcpbkhEglnilrRciymOWOKb+fhLZhkTjcJAdIYd4bLylxp90DyA3t23JXkkaM+egrxPgG4qUbrkW7KDsA7xMRhyXX/Q6v7V6K5UcPsXwB25fQPIp4hDbtaIuxwzYRbVupE7VY9XXqpt4sim3Pz7N5cHt6I6VO3h8W/YzK/Sak0evyp+uvwV1Dq71wu45loJh5WTXP1d6ivZ+JzeLw+PgBmDDnOxfbonP90aICfJy5GQGqaJtzqf1bPHShDYRar8o6XHtj/mWVEgL859SkbCDnFhLkg9A4X3T1j0LrsFC8sX0dFUdSE+aFiQgI6xgZiRvanuqpjfDxxSoyQn6/aSes5QyzrODF8la7qY/Xlq3C11t2ohk9YV9PmogI1nE737I3NctFwCLzTiD26aKNCoTJ8exXluCSYF1fQi8gx0eEUl/uMVlrFa8ycxndE0oJusj8qR4x9r1kf7kPRW99opoqRs7eMbGyWBNNAxdKA2FseGuNxj/lvExnkhhukFZjoxOc78PpMU7XchKXdCtOH3O6IMLHsCaaBq5uDcQ3kRBfirwjZRSTIvaD1OqRWjwiGw4cw/fvz0dB0km07BKPwbclwsuHQ8WXQOqODksXdMw100TTwOk0cHu7zvh095ZaQEwKC7cPjTyFWELaualTB3ywcVMtICbhdX4EA8NatWzwUK1IO12T1EEAlJhmZaQu9yWdvAdrYlnIGMc0TG5nn4TKe0zvdtjZNR15Xx4l858Npe0C8Fj/Hioc6+4aRniDBz4PK+J9o9DCNxoH3Bw/fGebfCCcyKR3LpAMlaXiibYblBL+JjT1Md46PNC9N+7uMkARn2QXleLPX8xzUbo//r+fsejFB1gQuIZF7Wz7PH2/sUUM9B3sGb2HghQpAhLUsyzYhnb+YfC26HHwZA58ywzspz2cVbYTZsBgR+5Xv45xsO2mEc23f0U0z5Sna8iztyWnLQZ0LkNcC4qMuOeLmVj19EMKJEs70QRnJysLGRJpdWhIllJHBG/lDE+cu6k7OnRIQYRvCdJKgrD1eBxMpf7wyaIXyTG2VB7N+4O5dlkZxTAXkteR4P1/izfVAmKVgu7tl0AOUEskf/Dv1w/D4owjXF4fVKMHx8fKx73jYV9jbwlZv2NgtxpLqmfNPIdTRMBEAqn2kxmKSZFyE8I6+82f7oA3XUU2DnK8PXsNulVQ9+GeOFJVCD+DAbd16owb27VvsCTAl0u34psV2wiGWCB55Q7M//t9ikQkNb8Q323brcL4ZP7TDVvw3Ighp3Tr1y64Y1BXbD+arpqReaccOJrpnHV9C5jSSWgm1VnFm0XIXkzhZCSO5H3In1HQ0kB4cSCgoA/vNxkQcIBzuTZP9ejNknGX5j3qOgFt5orVgPy6G+ERy+XmjQ8/qv9JI4d+zzHx68KKBsQurH611i8DDVTJKCwnN47sSV1TWg329zcXWwjGZNTebwtHUXPX0CJhrsPcrZjy4nR8sPk1hDhB3EU8DyHneGjSAPzvu7XKzLj3lr4I9P91Bp6Ei327bSeyyfz1UN9e8PfSXo4X8ZJelENFMMxr+rjb8JdVC3E4P1cBhhHxrfCvgSPrPX4wmRGn334rnl64mKGI9JJQesbG4I3rRp4SElezgQAfL3zB2mcvT1+OAhJzXN+nPTxp6H+wcAPKYuzWs/ymBNTdPbwn/Dmgkdg+Hm1j7cx8OwalY9PBNHRuFoXEdvE1m74o8y91eggZ+V9gc6owpLK/8sqllDBGL5xMf1KbyaJC2riO/w3eevx4y/1o4keXgEMkB0mIeMguoURIFSSM7kIBsT8un4ufyRAoxrBbAsFyEr95TAFiEgLaZB3BU2QeFr/6e4Q4KN8//nEdvl20VW03ql87RIbY+x/u64tgL28UR9GKJnuRFHk2hfARSdY+obK3MYTbI8duGog3NZNU8s3DHINZPNtCUzk35pHb8vsEnyPCTBtPSnd6R8qpry358QCBXbmR5CcVBuapEcBwcnrPDAU8rI8ON/XoiOkpO1W4be/WzeyKdHx2bxmj7p+aCwXUS+jqOw+MRwzDx90ya66tPc8KfGjVKxy5e0pRSpp5uRclKuIpMjZ2bm7Pday9BzAoMgEv1gPG/Dt74F9jbkD6kX14+vYhGNOvvWI5lP0f+uBHbN9zQt1Dbgc5jN6uGd5/4sYzenUlPFLKq4hIv45l5at+CVB03pCS6+bPQZELIeGBfpjyxK2upo+XZeH1g98iHUYYW+jhmUrgyftKQJjcY/ITceflrnLnvRJMBk7JmeTt78PQV7m+hGoI3uqOsoEVMPO6K1ZPekk/WrcJY5t1Qrx/7Tw014G1GU0Dl14D/AGjaY1uiOs2o8bfF3zW/rS94IfRDqBp4NJpwMoQKB0Lp5oYMiN1bzyLaSQ0sYfkuFm4zsQXTTlzHvalK7BW0b0Fco6exOd/+x5/+eLRS9Lxu27qi1vHcXCHAMpTgvR/pSw6lIQ3V65VoZg5ZeV4bcyIX9mitvvlqIGODD1beNM9KCODopAgeAq3/GmkdXgYZt81CSWsaSXGqi9H889GJMH/mydvc20qTJ0CYHYxdG5whxZoFhaEk0cPYOjQa1zbyIyAh+5xsWqqteIi/hHg4YsZ1z6GHw7vxKsb1iDfREBCS5MEfDiW7w0DvSBC2FPFGk4hBCz/HnpdLRAmXW0dHc4xG+aS8Xz4n94RA+Ijq8HK+T6deQwFFHNX/lexf6VtGP5HO96LY8Bu/NaTaU93goWX52/CUxOHqMM/fFMixg/sqAz9ppH2UFMZkFmVcgw2yTMTW5qeSVZE5nOPHqvcKub5ucPN14MV4ricImAgKrAagMqyIVFt8H3yFpgDCHfaE8wyXK2yyBM+zLEC84OUs4761BPQmVnzTApPO0XRnNPA9yCifLxfIm5naQUBtV3qgCNPelk//P0E/OmzuSgjkJJnuJQ1eHXyKOXdk/ZGxbfGa5tWO5uu9S2hgaM7tMWkiV2xJzWTbZjVMU7Hxhns6YPXetyAZ7bNVr8daVDPm+KDvrehQ1AUVuakYPCgzrWOs/dolv3ecSzdlZyB5Iw8JMRKlFTDck2H5iTCIFsoAb/8TpuTuVekSYAf/jlmOD5euxltmR/6UP9eDTfyK9dIHt6U5aSmP5SKQ9Yk2FoWobC9LzlqSFoTyHIQR+hVJbumKgJOsK7ju1K8qKp8Aa8HHxiw+JBqX15PvC8lT86UxwFDM39IFIliNIUY8dj82Zh724NqmfahaaCxGmiER6yxTTu338KZBE7NOaVzkhfbHZwumpz+LX3RuqEdSNPAhdOAxL//d+5a9bKQgqlidEnNFxnJ80ul0cVvhEWhYGIwQr7dBfesAtI/B2ADPWOXUjzpnTtfIi97ETEaPRzz56ttrZ3LTwO+JNlojPzakXcJp71jUDc1OY+bk3LQOXtZft/cuit6N4nDiB++Yjgn0Yx4yikm5vlICNrKSQ+Sytxfgce6JyAG/dQ/34Epv2xTAPaea3uelkil7v6N/pveBcV4wR31hQRNZfw9M39Jap7JKiX8NpAJsqZE1yjAnldRjjvnz1DeUisNaO/jLK0hOXB8KEg0mco1o4EdQg52t2A9Almr7pUJI1xhic52f9f6GizNOIACFhqvsJqZEkZLnJ41fRpD9VrzAStdIB7TV+jJfumNQrMRlcyy8CizG+0e9KxUMCRv1v796BLVBN1aVBfEdh5DvjvHR2HpSw/ieG4hC5h7IoSMgjUlitfmPyzm/NTKBbXCcSXHMS4gGDe36ag8U12aM/7yLGVETHv0DW+BTbnHYGACXWJEC3qHG34O928bh5Vbk12tE+cqgO5a0MDMo6P7o3lkCDLJDDmmV7tatc2u79gOMl1oeXnaMizcfghG8cx5e8AzTo/gAA5KUEoYTljRkcB8jzcxFm8Oh6ioQ2ET5fWt4r2iK5fi4rIF/5XrSYxl47vVsT2VYSkyINlXIsY00TTQeA1IxMJ5BmJT2YvBnGSk5ASnFzh9zukPnBZzknjmLzjt43TRpOEnzEXrgnYgTQMXVgPRIQF46c6ReHbKQoZWOKwWfgmltoAwpyGj03mxGKceVoYj6kiQ4XGJcsQuhDauTWiJF0cMRXZZGe7qUX+OxIU4rtampoHLWQNisP+190C8zULMRpBim24ieUJ40fgWUoWauXB1zyOKz5VnHN6nmuuE/nv3sUz0bt0UYQG+NVed83yfqBhsyEyDrpggjOx0BlKmuynQRW8+gY+NzzUfX09Mvq5hD8off5mHpII8RpzxDLmPnrTz4u2ocnqs5JnI2MKiMiO2v/JEg30Vz9HcYY9gbtpu7Mw/AV8yTXy7bT/cj9GzxtBGaySBbKY7TLneqGxCoEZvXRWLHpt4HDdGHzhrgT+zbIkKgY3088PH48ajdeipXiQB+PERDXsahRhlaLMWWJ6ajHUZxwnwzEiMicP4lu0UEU2DJ1FnhZQKeG7uUszbax88GN2hDV4dN/y0IEyamDS0OzbsT1VgRk9AO7Z/e8SdRTi7nNdYArBLJeIN+3nzfuVp5B0A354FLBpOdCUjdRR/n0rWQePNwYLnVeWCrO3iybBSCUO0FXvA2JSlXorsJRDUWl5ad94/MshpF7YlyI0DBppoGrhMNHB7A/1YwOUyXRLRgNglUbt20IutgVHd26BzXBM89unPSD6RA88ChtMEcVSYxoHUP5HXj415D6aerRle4wn93lRktY9BOkcsY2hwXekiBuWNnTtc6aeh9V/TwHnXwH2dekCmtJJCPLNuMaQw9vN9SMdPBsrGysnCUtzy2reKgMKLHu0FL9wHH4Yt/lppHRqODRlp8GYNLX1pFfxO0uqlmIjzhHQoJ9ENgcE6BcbqO1aBsQJbMk8ooOlcL8+7KgIC9fBzLBQa+9OVSHDu66M34NbmPdUky5J3FGObIQ1eW32Ut6SCKUFWfxrlZKyVvCLJGRKRumPlxFt6hodLqJuF8ZUpxkLc9sMMbHjgIeYaNt4kkRIL41qxlhync5U3lq7G4v1JDmDCyq0HWJeM+VkvjB7aYJPpuUV47IOf7IQtPEUzR/Rm7t+L/jubIbFjC1cIZYMNnGbFDhJpvPrDCrRlCPALtw9XHlrn5uWVJmxnbTUJD45grte5i7z1OBjpY0bZWrkonG9fAn2sna1Sx9BSrybuKM/gvSbhpEVyq8g/vjMZtuubLCMA0kL1LaRo/F0d4hpPKz2M8a4l2oymgcZqoMpF/tLYPa+c7auHOq6cPms91TRwThqQ+mA/PjsZO97/E3a+9QR6ss5PWZgbawaRZYssaj47cxQIk8IppmFd6BkLw43PfQ6rUN9romlA08AVrYHjGfmY9vNW7NiXVu95NPUPwnejbsVP4yajO58N5yLJmXk0TFkwmQW4KyrNDD07tQ7WubQ7dc9OBOxjZpWRr2yGTRqDaBDTznU3sTUBAQw1rGBI5S/Hq8Pkah6nlOBScgBrSkWMw4qWeDol0vMqPDSiT83Nzmq+hV+wAguVgewbAaHJz25a6BhCGUmwEOjvyegDNxgYCimhiZLfpox6dkFfRieZzYTE7z9Bl6/ewwOLZ6GoUuLFL54sPXikFntoJUs6LOOy08nWpBNcXa1TFVmRZ8Uzb/yMIQ/+F+UVHOE7R3nhuyWQgstLdx6GlElwiuRi3vHvqfjLl/Mx4ZUpEPbOcxHJ/bupf0fVezNDSJXnirld5j0BsOaQZIXGr4ElB8yF9H5J6CETvnQsj1B9tvajqnwxLpQ7iNGpdKURacv9xOsq5Sk8+ffridefSxe1fTQNNFYDDIBWdPXjGrvjpd5eA2KX+gpox7/oGhDvkISGPDEoEYHMt/BimI+HwQdVMURkQowhBguNHVU49JgJn5INThNNA5oGrlwNZOeW4IE/f4OPv12NP7/8IzZsO3pBTqYr850igvz4+HBDi+iQ80bgEZBNr5oYt2IK8/lkYZkAMtWjNJZZFCq3jd4lhpudLKvfMC+qMJ4CxGwMITMFM1CRXjABYOaAKsR0DcLdw3o2Wje/u64vpKxBAMO5h/VMUF41Mdq9PTzwbP+BuL9Vd1LqkwhEFtYQBcZot1e4mxVlfpGpEqvSjuHehT/W2OrCz5rJ/FjJEgAy2egZEvFi308ngb5etc9HcqJIWFHIoAoh4XjxC0k5OTcR4hfxqAqmiQ6tjsgQBkgh+SgnyBfa/6SMc8+/urFfRxLs0ARUaMrRT6lFl25AfqE3KljwvJIeLSV2lZx6Mtxc7h/lXW1RwbBZd3jmkSSIkxC/+Gb7I9zr13jtTj2ktuS3pQEbn3lnM1Er9NniIU5zrzQNNT4O4Eo7Q62/mgYa0ED75pFMkvZCSSmNFBpONh0tnZpjfmI08AW05Kdd+P3YxAZa0RZrGtA0cLlr4BBZUBWAIUmEha/1jTuOoV+PFue926XWSmTHVbDOErBDl41XN63E8/2G/urj9GUx5TUnj9Vqx8TQajP5K6rEeCZ4kDC/7pGnevK2Z2bgztk/0OtEN5S88eW5RhHSHkOkO8zhLCrsKAKebC5AZmkJhAijMSIhcjP+eqdrl18OJmN/ZjZ6xMWgX4tmqGxngRQA356Wjo2mDLgV8sEqKIP/zawpVlPMBJR7crOUV0yKiV9IOUEikKdYD640i6y5odIXKWlShYAyDzwxpJ86tJm6kTpnXqX5eP+L79AnNhaTenXDNR2bo1PzJswHzIKJhBcMwiQbL5kFGe5u5PU/yXIG5ypv3DMWy3YlkdAjGG0YgugUKR0xomsCluxIQpNgfxKdxDhXNf6bNSasck8odpXq3W30jLmxJoLVYILVYlDMkTYD34+8d+yeTPu2At7lVtLxvWkNIaCPowdwlzc9nBzopGNMbjReck00DWgaOIMGNCB2BgVpq69eDfhy9PbLf96JHxZvV6OPi9buR3aaDKrYRXIY5EVTWSzxP6eKUEHvLkzFqux9NGrccW2TzkjwP9UQOnVPbYmmAU0DF1MD7Vo1Ud4LxUTKH3Viz5YX5PBvbVlLQpxyWFmzUB4e3+7biVvbdEbrEHrbf4U8MrIfNpIUwixh0nzu6Ni+lSBMKPd1tH8NaXqwnjIO5majU3iTWkf6bMdWkliIwc0O0UAWEsDYoEAMj2uJcSS1uHHOd67txasmNeF+rQxt2xIyOUXo6J8YPwAVZjOu+/ZrHNeT9YHnIca94imTB20NEW/PueTo1WhCze7IzsDrpLgXMNU7vCnaMtduZKtWJPKwe7v+79vFDAHMVWGe3tkEDn4EYcwNe/e60Rjc2g7U73lvOvakZOHhnjE4uO8kthoz8NnGbfAhqg1kAe82LcNxYHcmPZR2ECYHlppbj/9+YN3unNXfh7NykV5UrGrvBfu6mC/UvicKiqALcMf1QzviyeEDmH94eq/d6Q5YrCdlvXDM19Q94yuLw1m6QUg4WBRbwlkNHgTrvC8kF1GFlSqQVYWoMOb2Df8FwQw//M/+IUjLD4Z7IAk8iun5dDTqVlKlmCGF2EYTTQON1QAfA+ebNbGxXbgo22tA7KKoWTvI5aqBqPAA/PHOwap73RNi8dRz09U4n8S3S8w//6N1Qm3DRja2kV3t77unYV3uQVInm9VrZ3rqOtzSrD8eaX2dak/70DSgaeDy0EBYiB++eutubNx+DAnNI9CxzYUZMMmuKINVmAFF+GXlGE5BpZ0S3L7w3D7bN4vE9KfvxILNB/DtL1tg9BbuQzf4p9LkJd6ROmKS0/72z+twc7vOtQ4igEbZ2jSgfdN1yquRE1cIXRsbukREoVeTWGwkI6OILwFKiFdtmvhajf3KPyRUcfbtd+Cl1SvwU8o+1ZrU6pI+GgkWq6g7oZ8f37YtvB1gqeYhj2XnI41erLjwYDWdyC/Cwl2HEObvi/Hd26m6Z87tixnmKHT9ZQR/ci22ncyAH4t2/3PVSvxw622IDwpGSTkrpom1R5GQ9Q4EzB+MH4+4QHvdNVm+JzVLhalLCF5FKzMLaFPXpHAv87CirLwUOTklrKFGMExAHLzf3s7f7x+J7glNZfczinjOJr01lbXOTJgwqBOmbt+taosJCJr5+zvQNMTeFyn8POGdb2AsUUgI25JPYO6T95yx/bobGM0WzFi/C2uSj6h7RtbL/SEeLksoa20GEOyzfpgUAhOd3NWtK6bt2AMLwzd1JGC5rnUC/jZiECxu25BX7o1/bfJGWqG9T1aCtYpYG3yPU1kUIX7JKqSHVQNiSh/aR+M18Fsg69CAWOPvC22Pq1QDPbrGYeTgDli2krS+fDfL69mb9VVe/Ov4U854WdZuBcKMYmlRZNtKmxk/HF+PgREd0DGomVqufWga0DRweWggKiIQE0Z1vaCdmZjQASuSUmodQ4psnw9pGRWKx64fgAF9WmDC3G8RvJOGch1GsWLW7MqpKEW4d3VezmO9+mLZ0WRYswgiaC+Lt0Kf4o7vDm/HiNg2+PK6m/DUnHnYvyYNkQY/rNychMG9E07bZVUY+tBRHKL3ZnCbFmgTVR0+d9oduTKItcXeGjEavQ7EYOrB3RhAuvkR0a3w8IezUE6SEx0fvlmVxSjvZ67l8flm9Ta8u3A9PJi/K57BB4f1xpdrtqp9BMhtSk7Da7eOch0+iyGW4uFTYkcaCpQZGS/37NKlmHrzLXj6pkH4f/auAzCqMuue6ZNJT0gPBELvHZUiHQVRbNixd93muqu/rq6uq9vUXcuurquu6FqwAaKIFZQO0kvoAQKk9zJ95j/3TSYkIUCAkALfhW/Ke2++970zk/fe+e695/6CSroi537h4B546vpJGvkIdlLM2muuMEq4s4i2j81LjxmZG3xOEtpqPREJD/XRMaX30HvEfKvf33oBLjy3Z7CL4z4v3pqplQyQvLL3flzHvCy5mrA/jvmtZWvx2NRAaKvk+QkJ00g11+87SK/iCZqb+5jx8gfIzC3SQkVZWVs7HhHjkAhFTxxjCWX3fO2zG3BTj/54eOxo/Hr0SJQ6HIi0Wol/gGQBIxEbMhI/5b5weBTC30IC4yeHo4CLQcsdPLyBeqUQUAjUR0ARsfqIqPdnLQIy+/d/D16EaVMHYt3G/Yint2zc+T2Y0By88ByGZvaBlQiSsMNLhYx58PnBnxQRqw2Ket0mEJCb6y8zdyCztBij23dCUxGINnHwTTTIKaxrdVmPPZizLUML6/r7xCk40eLaxxvKwMRkZNz4K1zjfg9ZmwNiDR6qJsodtDPFjTvmf4R+7mTMGD0YXSmg0TkmFncOGYJ/715Rp2t/vh6Z5YXoGtIOO+YfgNPuxn4U44l/zce/Yq9Gr85HRgIEO7hz5qdYumO/9vaFr5fhoYvOx00jBgdXN+r52p79IU3sir+9jcIwl1bn0Rvhxsrcg7jw+Tdx5yXDIPL0fSIT8ML8pRp5oE6FZq98sxxGhs15SYwk/+3rjTvqELGOkdEw6Qxw+qsTlcjJhISKB2z1oYMa+Tq3Owsy/+luTfwitl7BaNmJQTx17YmtJokY2K9IuUvTCallnyLPX85tRMEyNTYcFw5vPAmTHkf07KgVdBZZepdEImp5W4Gxirph0GIYpmiiuIaHeY5i4VJ75QRt+c79WoFsydfTTNgSxV589Ha5UjzwSr6eHBuTwSKtOjw2Zpy2mZCvdqF0dzVg3Rnu+VPOwZo1HUIjkJoWjkhbCH5z+eiTGmdNZ+rFWY6A/L0Gpx7OXCgUETtzv1t1ZCeBgBae0jMFvdmOZXZPwBNWfxsJ76hiwr4yhUBbQ+Bvqxfjv5vXMtTWg5fWLcf7U6/GwJOUcW9rx34y4xXiKueL+vY8ydczYyfCwJC7w96D+lud2vsQ5gZlRZShaDBvnqln4WonjIB98r59x94CZB4swbcbd+Grx27XboQPFZRpXhudSJBzUx8LLCNLj/9+sxYdRh4Ow5NRCVHZtOPQUYnYnvwijYTJuU5MyM2fv/gBe41FeHzYhAYx0TY8yoN4o7YXFpJZsK/qcDhnuhM5B314+stF0MUwFLCKtdP0dW9XBFsvxyomYXzJ0XXzkMRLNj29L97evI7yLNyOHqugybqgnL+Ezx2tdpqfecImfo/UKNTw1VN+35xNNxK707gZF+uYfuc3smYavwcHSd/3xH1w51SIqmJjTML2vvujiL0Bc9ZvxR8++14rBSA11W6tRW7lt/bOz67B/733JcycHHx2xkWN6b7ONlJW4TAKh1d5WXRZkDQeMsCca4BtdClmDr+3BqPDWx756n9TpuPSz/6HXUWF6BIdi88uvbFJ8vuO3JNaohA4JgKRXPsam6gmSmszVvfM1maGrQaqEGhZBMYm9EFmZS5c1bOXwdGEGMwYE98n+FY9KwTaDAKztm2iqEPA3SD5OnN3ZigidpRv755XP8XyjP24h7Ltd1147hFbBcUgjljRhAtSIyJQlEdVCO3Omh4Zp4E5ViY4SBr4ClJzKpv5R+I5GZfeGXMXZ8BXixvoSEwObSzB87aldWolivR+5w4Ni4scrCjDFZ/+D24r1SeFv/Hu3UKnHAX48N+Na5EQEo67+x2Jx7EOWwiR1UvPlegOiuAF+/a7KZzBEDdfOeXky0j5WHyaCvd1SIQQwPsnnYf3l29APHPE/nTNhXV2I0TZUMLwchJOP1mTM1oIKGXpSXCm9+p9XMJ4qLgM0198FxYLiZh4ijgzb8o11Q0HJfYaISMZM7Jv50EnHnl7gUbC/nb/VHSLbqftr87AGngTJPSXDeyNYR1TkV1ajp5J8QitVwy8d/sEfPbQzQ300LhFgzvJBONhKiZkWsZvKjOyBfvw47edp6NHdFJwwTGf5be+4PJbjrmNWqkQOFkETiBHrJT7CMxonOzOWuhzh/3eLTQAtVuFQGtEQC7iy/P24Mn1X+Avm75GRklOnWFe0f48xJrDtdCX4AqL3oROoQnMEesVXKSeFQJtBgGRLA/eolkprZcaLhOMyuoj8Mz3i7CUCoZyjpi5cE391c32/tVxl1WTCRIOEUUUj48ISVBqXB+lR1xEKDrFk33QJnbrgtE9OpFM8K6bnEIIlLFaQ2TdwRz84RdTkcCwuqjwENw5fQSG9O6gfa7+wy1ffszgRaem2Kj9WGSX1gBR4mDw/IplKKJq5ImYkJD7qAppYz1HbzwjDdinKY+Eh+TMR3EI6vLDWMzX5AUmeoKCHqznb5yKW0cPwTcP34Z377sGHWLrevY+XL4Rc1dt0Uii5G9ZC+jNostQVBMfOX90zRBl0uH3S7/FVZ+9j6/37qxZ/sai1ShjUWY/U7FCDlDwROYoiFtDxlEiwhyiEVrxOuWQAF81531M+WTm4Ty1hj7YwLLkqAh8tWoHLnj8dTzw+jxN8bGBzU5qUTv+Jn5/7QQSWx4KQxK9ZnZTHaUoHQoxk2WPLvsO6/OyT2of6kMKgaZCQP7cJDSxMa2p9tkS/SiPWEugrvbZqhGQG6wHV3+ChTk7YKciIoV88UHmatzXYwxu7zZCG3uYyYq3zvsZ3tu7GN9RuENi+acmD8F0qiYaKWWvTCHQ1hB4afzFuG3BpzhYUYqxHdJxY++Bbe0QmmW8727YCD9rTRnJN/xRcgseMMnx+XTZZqzbcwi90xJw5fC+LHBcywUV3LCJnpPDIjAwNlkjCMaCwKXcT2+Wm2Pyk3+9PmO6Rlxkd0J2XpkxDX0f+DukXKJ4sMS09AvmHA3tm4Y5Lx05mVzmsmNB9iZUUARkdEIP7C4p0oicnuqGXhI+ISY6ni+dqWSCPsqcUyb+kXlf49VrLg3soJGPt0wYiugwG57+fiGKwgRYHoOcRgVeHpqfTUqd/fbGMRge3wFSt8xKSfygieLgC18uwc6cQozr3Rk3jBqIpdv3wsH6XkELZQji0xMuwIQ+XbVFeY5y5NrLMGvLVnxIb7DL58XqnANIj4rBtT36oYLfZ1BRUU8PHd2MmmfMXMz5azluGVw1MYuLDMWc+2bgza9X44vVGThoq2Bumg/7y0tQ5KhCvO2weEpwPEd73rg3G/NWbYUQumXb9uHb9bsweXD3o21+wsvTSM51qUZNuMRn9FFxE7Ad4DHxd+EjCatKoyInYfvX+hV4bdJlJ9y/+oBCQCFwYggcPpOd2OfU1gqBMxaBxbm7akiYHKREzzuYN/NyxiJMSe2DZFvAUxBhsuHurhdo7YwFQx3YWYNAJ4obfH/1bWfN8Z7sgabHRGOnt4AKc3oMYi6QmNzwX/u39zRPiMiDL966B7MWb8DHD884rWIFH11yHV6Z9VGAFMhAGI6od+lQ6Xfj9lc/wryHb5GlmgkZi+0ZipINlVoumXiwhE7YSOJ2ZOWjX6e6oWi7ynNx09LX4ZADT+gAAEAASURBVGHek5sk5dUdCzUVvLCNlAmksmF5Z54ZGY7nimc3wppISvR2HYosJyfXf+m5vdG/dyIW7t7N3LDF2ti0gcsD+/Yxl+yC9K6Ipuqi5JXllVUgIsSqEbJ735iDzftZWJlsTZ6d/A66JrYjGdsHUSMUE1GPbolxqGR+729Xf4olebtgZt5ZUT49cb5AcJDwKiGbz/20BBGMYxSyZ2dfYhREhI8E3GnxQsdyAV100SS1evRNTcBjl4xDGOuPzV25hb8FNy4e3xNz9mRganoPKlg2LHKhddrAg5WEUSYDg3YqtcKCfdR+7t4uEC6pyfozLtHD1LqynsRIdhmAQdt8ZzHz9pQpBFoSAf4ma/0ptORITuu+FRE7rfCqztsiAvMPbNE8YfXHLjcjT234An8YeAnirI2f4azfj3qvEFAItF0E3pp+Bf6zarWm5nf70CHagSxYux25JRUQEibmdHtRXGHHp8s34aZxgW20FU38IJ54KUCs5foIHeONi6GCeVAM5dvvLcFHSzdAiikvpNS82WzAlQP64NV9K2AsZ94UxyKeJnHs3P3GbLz782sQHWXBJ1nLsaZoNzIrClHlsXMiSgibcC8frLE8Pl0I79f1CMvSoaITvSdyp8RNjC4DmNWExy4cq23fmAchsA+99QX0PI6+w+Px/LaFdDyRHCYZUZUfGJ9479yyX471zS0/oYM/Cs99vaQG62kDemLtXqr2yQHR5DuYv24b3vvFddhIUraasvaSh/bQJaPRoV0U7lv+gUbCxAMmzaslzsmHA8cpfUgBbDdVTUYNTcOWTbkoc7k0kRPxGmmckyqVf71kCgVETHhu7o+49M8z0YuErB2VF+08pj+OnIRnx02Rrk7YuqfE4ZfTRmH28s0Y1bsTzmdrSpPfzPMXTsHvlrwFnbmSKoqxPCwyMHLpoAkSXSm8cSLm5+QEfGzGjuT4p88TfCJjUtsqBNoCAoqItYVvSY2xWREw8kIlF6Lq63rNvsUztiR3NyZ/8xJmjrwJvaOTa9apFwoBhcDZgUCszYaHx4yuc7DbDuRpoWS1F4pXZuv+vNqLTsvrMLMFBqoh8j8V/JgHxVAz7eTFHKA/zvpe26eQB2c4SdN25o+RlPjp1BK5de1Ex8+VO5y45fX3YRtexJA1J1yMTRN+RXFGSsYzP0vcQWISyxjBfor0/LwOEdsNsA/w45PLr2O5g7oetcAHjv34i9fmYkW1DP6i4h1wdgwQ2RB6aQZb22PTnjy4wwIkTHr617KViCi21JAwWfbRT5s0z5yoF+roqdOTJEXGWmFhPtl/7rqC0vQuqgwaNSXCfEdFDQmTz/okr06Aq0XCZLmYh8Rz+Z79iCo1w+IMYCsE1mDSY3KnbkhlaOgFz7wOu6b3DuRn7EGY1YKvfncbrOZTu7W69vwBkHa6zG3dhKTYQqxZ31HU6zVHmM9CYRSpk0YTxcb7BjROdMXvq4C/9EH+UJYQJP5gJIYk9OfQh92m9aUeFAKngkBwIuhU+mjtn63liG7tQ1XjUwg0DwLTOvSH1SAXlCNNLs4S2vLo2s+OXKmWKAQUAmcdAhX0lnxXsFsLXat98BLW1rtDAjKy8jQFw9rrmvK15BIZGA1oYmqVkYSBJaACvEILPQzsSUiV5DlpghVlJB5coHEr4SDVZk8qpOfHrpEwWSQfl2Y2Smhf4AbdQJl2o4ckQ7pg69onHktuuPukSJjso6CcYZLVZjUdPudKbtZL0y5GxiO/xF+nTEYIxWMMHIy1lHW9qr2Owc/xlAy9ECU+e2J8sPdwYlVYJm784T2tFIONyoNGFoEWk5wwCUcMml9jr8F39Z55yOa9OtirC5dJ/2mecFzTqQ+ev3gy3lmyFi6HV8sVlHxBZ5wP5S4n5q3ZWq+j1vXW4a3CvAMfayRMvsTgPz2LVAs5jw2x4cVxU9E/vnHE2l/y6wAJAz/s5/fp54+x4kX4HV+2rgNXo2lzCMhZR1QTG9Pa3MHVGvDhM1KtheqlQuBsRmBouzRc22ko3t2zSivQ3BAWu8vzmcDuRJiJSRLKFAIKgbMWgceWfIN9hjKYLFQvdPK2ljf3LD2FpJhwvPntavzzi2UY268z/nzTyYWqHQ9YC4saCzGq7cKXm+tguKLHLHlA9JSVyEayJkDKNFeI9i6wB10M1RAl9q6eaV3zjkiXbYPjEMMgTfSqtQPiOljxj2suRpztxHKganf/+2sm4pF3vtSw+vk1I/F/a+ehhGTwN33HaVL4su1lnXtpH9lVUogPD25EJat61TYj7/2FJInpCljwOIXhiWSjawoO4IXNP+K3/cdh68FcfLFhG8Jt5pqcMW17Aw9M7vYaMu0usPYKP/KzKzA7ZxP2lBXB6GbkBPerYU3iqK/ivmM82F1EQZNWZllVB7CycDWJtQ7xlgjkFlJhsoHjtlEacuHVM6gASZdkI8zvZfyoaym3FPdqbbPDX/EqdNbJyC4p17zFneIoEiLMXplCQCFQBwFFxOrAod4oBAII/KbvRFyeNgA3LZmJQufhWdsgPnLxlRBGZQoBhcDZjcDK7Cx6kZhvlC71mHQwVDFMMAR4YvpE3PXyp5oHZ/WOrNMGUixraMXFhSIvj8lhNB2VE/0UptDMzTpRcpWnl8xj5VmLhEFHsuhljS5QGj6slPWj6C1ye31ICotCjvdIgQbxREXuT0RBzmECJMIX2fur8P6O9Xhw4OjAvqofV+VlYTXb+JQurEUlSh5HNxEI+fzxW2s2+HzSkcqNcvN+eZfe2jYb1mVj7f5DNdsLmQiQocAi8QbK8ZmyjPBG+PCadwVivWH4++yl0JdyY56yQxOioO9VCoePirgkYnoTQ/JIqoRS1TYJbQyPt6I877D4iEZj2c1P2w5gZHoHbXMhvGI6chEh4ef1SNPet5aHr3O+xaysTyijHxAt0XOWwKn9KI4coZ4qihbWwmy0+UjEdNzeX5+IMUDRm4sZ/56lkWAJh42kguiL11+MPqmJje5ebXi2IxCQrm8kCm22oLOcfZQpBBQCDSDQOSIO9/cYw7CYwyEzspmJBGxMUtejhi820JVapBBQCJyhCERRxU8z3se7I6mwmkTpg1iKT3RIwrj+XTQBhwcvG33ajl7ow1cP3oYLz+sGg4EkjA4yMQPD9aS+mLmcdbAKhCyw8YbYx7pfnnAfrh7cB58/fDNeumUaPn/oZtzX+wLylLq3BPL+XFsfFOYeJmHStxASIR3p1hh5W2MbC7Nx03cf4Pn1P+KKr95GbhV3fhQTcZOt+3OPsrbhxb+aOKKObL1wJxEcqT46rQaWkDEzCy+bM81aSOhzPyyGvpwEtHrM5QVOXJc8DJGmwPfWIcGChNAwhNYKjZS9e0lab54yhP1zJyS3YkK6pDi0h7lUKzdkBfqsfjSTr1nNZoxnzbbWYuXucnyw/xOKkrjp6wz88zD/zxrGwYYwD7DWP7CYdof2ZSRiJyC0YexEUAIEr+4x67AxOwGbsqhgSdVKUZ7MKa3A7W9+qila1t1WvVMINAkCpexFZnLmNUlvzdhJ3bNuM+5Y7Uoh0BYQuKrTYFxEyXrJKwgzWjRS1jMyCU9RObGt2Ko9WfhgxQbsLWBlUmUKAYVAkyLwu/PGclLmcHBJCIUOHj5nNEmRHn+6cTK+e/ouTB7So0n3Wb8zE2sX/u3yi/DY5RMwqS8niUgqtBDJapKg5Y3xtdx4exlN7TPr8e7GTbjng7kY0DEZiVHhGJ/Ql/WjbJpIhzjUyENg0dnQw981wHTq7VSITWUJWU8t21oUIFYibCTrMxnC15CJouSlT7+Fm1/4EHNWbG5okwaXDemYildvuBS9kuIDoiPcj5viHB42t0jLd3TBE+mDs70Lji4sxmzXM0yRKUt0XkmIpvaPRLWjKw4rpv4Wmy99DN9N/jnennohbunXS+szuGM5hjUFBwPCG+zDx/k4d4Qf9kQfvYtC/eoZF/1q8qg6fdTbotnfbi/fxdy6amZevXf5Xr0+I4b03wlzFEMIrYyvtHmR1jkbWW6SV9mgkaajgiZCb9e+7eBHREVTfj9PL+x/RDFrIbfLdu0LbqqeFQLHRUB+jo1px+2oFW9w+OrRigephqYQaCkEJKTiqUGX4N4eo7G1JBspoVHoEdl2QiuEgP31yx+1GxAJSfng7mu0+jothafar0LgTENgeHIHvDf1ary9ZZ0mDnFp1164oCPJSzObhPBdeW5fnN+zExZvygTvtQPCHRyHO4QeHXHqMPHdE847GyEWfNpckY9lu/fh/G6dtPydYTE98e2hbcwVc3PyyYTruo5EpD1EC190MXyxthmqvUSTX3gLxZV2/HvGpegeHg8XpfvFQhniNqBdcu2P1LwWgRFPdX959JSciJ2T3h6f3Hs95u7OwG+XzwGTxkgs2EOkh1FyEnpp0IQzTBbmcGmFrnngPF5nHMVMSkja4r34W9aPKNvqwjXd+uCJzc8jx8Gc3yqG2OliiRE3pgkhKXba8eDF5+OvHy+i2Am7oSNJT9VEMyPx3PSKmaqHLtDGRdkwY9gg7bPN/VDqysOeirUwG0LQNWyY9ixjiDBJmRV+0dVWWmrDth2p8FAJU2odJKQWIDYqGxUeM7KrwhGis5wwkaxgzToPPW6h/P2J3rCb6ik5rLO2q4h5aA2YhLUqUwgoBA4joIjYYSzUK4XAURFIYhFnaW3N3lm2rkZljJkhmL9xO37BQqfKFAIKgaZDYFBCMqS1BouPDMNfKAzy108XkU2QgLAYfY6HbEVkDuvfBPOeWIojB+2ZIRfDtt6MTUWHcH5iZ9zbcxQqHC78aT77qkfExAtnJBnLoRiDg3W33lu5AZExVujyTFrulduoh9VYN6w7uJ/kmAi8cMc07MsvxuXn9QkuPqHnHtFyHmM9tHZOibjUTOqoCeGMIgEZFpeKbwr2aJNQslrP3DFfqFdTVsxzV+JZhk/utK/FAVcOpeo90JudMBkj4XTJmOnP44cu79YLyxfupfAERbQJk9fGxsg9PVUSfVbmWjGczxfhganQgMIyB2fuSQSDg9FGdPofFua+TSGO2USCNIiTbTKGK9s/ijJ7IjYXVsLhiKWqfA4LTeuxdVsHhmsGiKb8HnIPxKHIE8pjZ24Yoz6uSDtxIumwf0H8HHDU4ldU8UfndoXYkRenEdogCkJuz+vSIfhWPSsEjouAKCae6aaI2Jn+DavjO6sRSImOwP7CEi3fQQq7JvO9MoWAQqD5EKiqcGLlwgyER4Zg8KhuzXKjLiqN0sRKKx248aUPsI/ngdgwG9zMcT3kY+4Wb5w7mSMxvHNaDRg2oxlCxsQOlJXi1Z9W0btiwJ+uvACPffqN5luR2yIhGy9eOxVpsdF46fsV2s32tIE9kWOv0MK35Ya7ayy9S8ew4T3TIO1krXtMHAZEdcC6CpItU8AL52ddMIPLigqDA98c3EnRFBIjB+ugcdDuWAYbUphDq3pNosUAQ2wq3sfQRQPPjyYYybS6dcxGbkEkCirCYGb44c7KQ1i8Zo9wOxrl8wt1KI0hcHr2xa4k7E8rHVCpw5h+6ShxOPDBlo3on5CE4e1PP+HYW7EBqwvnopQ13dbnJyPfGcbD8+H9TXPgZrisjqUH/C4LSw50RnxYAY+AY69tfGt0WmBmvtig2PZ4oM/42msb9Vqvb/h7fmLKJvxm7qUorKiqDiMFXrhuKkJZTkCZQqAxCPA0wokFRcQag5XaRiGgEGilCDx9xST8/N15yOTM84TeXXD54ID6WCsdrhqWQuCMQsDpcOOui/+OovxyTUjjwiuG4t7HpzXrMUaGWjGXohxBkzpcC7ft1t6O7dGZRZuPnI9dtS8LMz77BG56yyQ8OyYkBF/+6mbsyJGbeR0Gp6Wwvlgg92jJQ3eRyPgYvmjQvDFhFKzILi/H5X1O/7nmzQuuwCNLvsb8fdvpD2JOG8Mpb+jTH//d9hODK5m/xiH66e3RmwNELYyF1pwkXh7xWpGkHKDkfKjVBJvJQ4efHuVeMxKyc6H/qBIehhq+33sNiaiNhaIFLnq/yCF8YRT5l3tD3iTqCryI2mXEDaMH4mczxuDOL+Zi0b5MrebZDzfejoQwCQ08fbaueAFJmBdbSpIQH16B9tEljIAwYXdpO4YbWkiYOUwep48DDvdKfcxiVDKMMGji1ZzcvjvuGDQU3SMTgotP6Dks/B4UF63hd8+4zaAxd6xrym1Y8OtLsOlALqpYa29gh+QGf2vBj6hnhcDZisCRZ+CzFQl13AqBMxCBuIgwvH/PtWfgkalDUgi0fgR2bMpCfl4pdB7exPP+d8HHq5udiNVHSQpNT+7bvf7imvdLKaZw+yez4baI90iiGf0oqKzCLIp73HfeuTXbBV/oGZ7IUsDaW/GUTerWfPlxoSYzXhg7FX9yT2IxaifahYRqZUVSwiPw3f5dWHYok2lfdkqpMzeukgImPB49PV9G1kLzM4zPTnEKp8eE6NgCLvPBUaFD/ifRsPdlHjDFVgxVflSke2BhfTINC8rdS4iiKDUKEbMW6ZgzHItbrxiueQlFqMXIfmXbYBHpIE6NfRa8f/3tl1i0fy9u6z8I9w85EnMPCfKfl/+AWdvZq24A408NKEiqhN7qRbjJgZ4xOVhfwFwwSmjKMYsXcJtjP6IscTxeD5dTtp8rbBR1eWjYWMSFhjZ2eEdsZ7WORUTE71FW9gzJmIv7MyI8/AGE2AITDv3aE0tlCoGTRMB3FnjEqoOFTxIh9TGFgEJAIaAQUAgoBBpEoF0i80p5wy4mTzHxrTs0WPLFfvPRlzVCGtrAqwc/a/Wmmrct/ULyoJ6Y9Q3ufW02KpnDZiMhSwwNr6nteEP3gXhpzCWwRFJVg0TEVWqF121AaZUNXuauBdiJHEWgTlGZ00piQo+aieUHujL3jCRMW8ubQCO9ZhU96VUi16RDDRHbmY0lzh96yco7+7CmVwlyXYFak38ZfwH+MGY8PrnyWsSGMKHsJOyn7IP4as8uFDvs+Mfq5Qx3rOVpqu7vuVVL8A7FYSor/KiyW2GKdaG0LBT2civslHfcWdwOsdbAmOQjwg0NVg/mXHsdxqd3RlpkFEZ26MD3158SCQseXmjYjUhM2oyExOV83oKw8LuDq9SzQkAhcBwElEfsOACp1QoBhYBCQCGgEDgZBJLax+KOxy7G+y9/h5jYMDz+jxtOpptm+8yWQ3lwUdzDQP7iDZTZ0hikeIFKSquQX16JOBaQPpodKC5FZmExOjF3LDX69IkbFZZXYfaqzeRLeqzLPISRPTseMaQfcinPznBJp0dcWFwtM+t89lQwHyzMrZGT4IckD4XcjoWtybZY+Fhk7iUEU2qGacWvScCkBrLHpkNFGvsjawt4mija6HXhurmzsOKmuxFCD9P0XicnPhIcSwzr0jEDTQtvtFHspMTuxL7iEvSIi4OFHjexOTu2wlVFgRHR5g8HivIEax2qSMQcVH9sl8SC1W5xhQVMji09LF4jXa9cfHpKr4gnzGA4ufDG4DjVs0KgPgLy2z3TTRGxM/0bVsenEFAIKAQUAqcFgW0H8rB4SyYGpqdgSNfUBvdxxbXDIa0tmITFyQ29jsTEXBzIiZJx6zUiImSl4bsiF4v2PvjJfPywI5PeIn6u0oeo8BB89OANiI86+TypjKw8LNmaiZG9OqJn+8M3+bHhNvxs8gjkUvp+2FFw94maBk2nlzFXH5eQKxdzxMp0MEWRbcp6tjCLgyRMj/1bk1l/zAdrCY/ZxdpkLHzt4+dt+0jQenG7CDMq9A5+gv1Jl5I7Ri5UZLdjY34O+scnccHJmd3txq1zZ2P1wYPMv9Pjsu69NEXKKTNn1uTf/fXCCzC+S2fkOStgrKBKIvPaXPKdVHMuP6X3K8tDGGpZjsJKGyxUiZTVoqj43DlXnNzA1KcUAi2IgBLraEHw1a4VAgoBhYBCQCHQWhE4UFiKm//xIZwUvxDhilfvu1wjZK11vI0ZV+/keM0TI9sKGTOQgAUtJSoC8cw5bcheXricJGwvvTRemBgRJ7ygpMyOu1/7FJ/+9saGPnLMZUL4cksqcMsLs4ivF298swqfPXYLRJpfTHLRbp8wrE4f2w/ma6GJnZMCKn6jErrCUwyY11FJkDW/7ClBwkIy5mEtMCpw+Kl+aDO7UVISivKdUXDbzfAkeFAR46VABwkpyZjfyqORutV8qipyw0oZfEciydkBPYxUS/Ra6L1i/t/zH/6It+67ShtbcGBvf78GCzftxsNXjkX3lLjg4gafX1m9Cuuzs7WcPAePed7WbSLOSA8lC1R7NbUQ/PrLBXhmykT4vZKrxvwvDWlhhHWtmMdjr6RLk7Lyw1KS8Xj/S9A5/Nj7r9uDeqcQaHMIiFv4NbZ51a3NHEAgELrNDFcNVCGgEFAIKAQUAi2PwM6DBVpekXiRJLdq877clh/UKY5AlA+fufwCiKCHiDmIGRn+ZzOb8PRlk47a+6w1mzRCKiSstola64naqpws9Pvfixjx7qskIczNEpcOCYcQs6PZ8m37MOP5D3Dts+9h495sbbMocwi6FaaQUJEwUYQjKHVo4HENT07B7InX46FuA1G5OQqlq1jvqtgMqt4j5GBAeMTPfDENAe4+JF9ulQJ4mNlX5DbmjpGEyT+jUzxTOmzenYNDRWU1Q3S4PHh+7o9Yu/sgXpq3tGb50V7sKympIVyyjShWOuvVbpPln+/IkOhJLXTUYyMJlBQycf5pMPG92YNKesNkvHbmxv2i+2SSsHi+bxnz+Dywe8WLqEwhcGIIyF9gIGz4+M/suZTtTjYhYm3KFBFrU1+XGqxCQCGgEFAItAYEBnVOgZUEJdRqpkfMiFG9O7WGYZ3yGMb37IxZd16LSwf2woD2Sbh6aF/Mue8GTX78aJ07GFYXtABdCfACXzDPLLiyEc/3fDdXU0D0sI6XK5zqfszHGt4jDb3aH51M5DFEUUz2XVB2mA1e2KentlxPMha+Rw+TEDKnH7vXFSLan4zL0qfAX2rm5wK+Jfm8jrXILKwXNsQWR5JDRcjDh6b1JQ9COH0kavLPy+LF8rmuSe2QEMWErWqzmAwMm+ygEdrJg49UqXQ63cg6VAw7xUbELunRkwWwA9kikttmprS89FvbhBy7ycK03DBtsFzLZUIgDVxuopS9l0qJte22+bPxwppl2udqLz/drzNKd+GRjU/iltX34c6ffoGHNz6BzMp9p3u3qn+FQJtDQOWItbmvTA1YIaAQUAgoBFoaAanPNefRmzRPWNfkWMRVh8219LiaYv/dEtsd0wNWfx/ndGqPJTv3MtSPawKpWRA+MLx7Wv1Nj/u+0h0gJsJCvJ2BP426ABenBwhVQx/en1+C83uzmPJFdopzGDGmDz9UbTePH4I9OYVYvesAXHoPMsPKtZyu3AQH/r14BR69aLwou9cx4TexNgvivZGw5BbBJQWcNeOz5p2jl5BqhaZ2rKc2riNuHzgU/WITEEJSLiGTQZPXr91/hZZXV3u5rN+6MxsPPPmx5kmV9888dCnG90/HS1MuwuyMDKRGRCDCZMG/Vq6EnXLzta1nXDx+3L4/wP5kBXcpI7RQSp9VnOHuwC+gZhg6FDvteGX9SuwpLcIL46bW7uq0vHb7XPj3jp/BotuKzgzbTGSR8IyqZGTZD+KPW5/F8wOeRqSpdauHnhZgVKcnhUDwr++kPtxGPqSIWBv5otQwFQIKAYWAQqB1IRBhs2J4z7TWNagWGM2jk8fiqgPvw0G5RXeVFHfWwRpmxiOTx5zwaO4bcC7+uWEFc9X0EAXBsanpR+3jyzXb8Pi7X2teyQVP3E4PZd1bmkoqDj5x7UQYmcN35avvIBMkYmIkKu/tXYOB+5LhJ1mAiJFUsxdRSrxuwGBs2JfDYtAU9aAXTVQMRTlShEhCKGKivabX6sLEbjg3uX2gz6M81idhstmjf5mLiqrDCXiP/mUOFvzv55q0vMjLi0lIZonDgXc3bNBex7PW1/NTpiA5MhyvLVmjbSMP4pWToevL9XDG8TX/B9hlDRuDg0qYCzJ3ILeyAgmhDef51XR4ii8+2vsYSL34/clAKHBiZDHn8H1YXtqZ9cuMWJy/HFOTLzjFvaiPnxUIyM+ZuapnutU9a53pR6uOTyGgEFAIKAQUAgqBJkUgLTYKC352Mz5krtjW7Dz0SorHVYP7IopE9b//WYTlP27HiNE9cONt59fxGjU0iJ8NGI7RKZ2Qb6/CeUnttRphDW0nyw4USFoIJeQZ3ieiKbWJ2D/eWYiPv1qP0BAzXv/DdRgxMA0/rc3RSIuIbzhIwN7bOxsDR+zDlmWd4XYaNQ7jYzjkTk8BNu5mrlnwHtDIHBW+NpGIec0U7YinDqGFCoXOau+dNorGPYgQSWHJ4fBJ+ZQIkkiIYig9cUGTMMRHxozGr0eOQIXLhZiQEA27pRl7WUgasMcGiI5sL2PT3tElqRNhD4ZzHl4b6FFCHQ9UlJ5WIuanUmWleznzCqvdotUHIzAmm0ux12lCgauweql6UggoBAQBRcTU70AhoBBQCCgEFAIKgVNCIDo0BHedPwz5pZUorqzSSNjiRdvw6Qcr4XC4kX2oBF26JWLE+UfmS9Xfcb+4xsnA3zRuMMQr2YVKiRIqGjQP5fQ/XLBWiyQsr3Lgs0WbENqJDIpKiEHRDiZVwaE7qEm8D5y0Fd4qE5bv7QSPzoAFO3ZrxZuFQGgmRIfkxp1iogwj88ZIc0IoaHJB767BLY757PMFct1kI/GQde+cgB17cuGl+qHkwCW0C4eNhLEhk9phwfphsv67Dbs08ZEQ5oW5osVlwLDEEo4tkj49kjyNMFJgRcQ+apuL77tExdZe1OSvfX7WNgvGptbqXbxjZibbiYx+74getdaolwqB4yBQf0bhOJu3xdWKiLXFb02NWSGgEFAIKAQUAq0MgS9WZ+CRdxZoo+qcGIMbWQsroHoYCLUrKjy68uHJHIqZZOjqUf2P+KjBoEdiuwjkF7HeFl93S4vHG4vXMKxQB2cMN4/1oH1KEXKdVBdkk3u9BGs5IsLsKCoLg9vlY3Fisi8SNyluLTL+4uS5+bzBSEqMQJXLjcl9uiGRYYLHs/9+uxovzFuCvmlJePPn02FimORf/u8yPPH859iRmYe0lBg8+cDU43oKg/sRRUsxI0VFjPkBqhiI3uJR8O09Q87BgfJSfLdvN6o8AaWREBaGvnfAMERaDpPVYH9N+WzQm+H2R5KrBjyVwb49HGChOwwx5hgMij7y+wpup54VAmcjAi1JxCSw+m22RDaZunmN7QU2OU3OYuvItpftKrYT18Dlh5QpBFoSgUq3E/P3b0Epk6UnduiBtLDTOxvZkseq9q0QUAgoBF7/elUNCHtyitD16lS0o7cnP78McfERGDuhd8360/lCvE7/efI6fLl4C1ITozFmaFcUuO3YMTsf07p3x4xLe+PuVS/zxiNAZGQsuY5weJxUKqwwcqmOIh2A7SDDEckdta14l/LO56vR7ZxEvHT1JWjHnK3G2EzWEhONj82Fufhk82Zc1b8fYqJC8eIfrm7Mx4/Y5orhffHp8k2wUx5fTEikz8BH1jubdf1VGJSaoomAzN2VgU92bNa8aTN6DcTYDkfPtdM6aqKHoXEPYE3+kySWlNJn8zBcsthtQ7EnES8PfITLDE20J9XN2YCAyhE7vd+ynEV+zbaWTaaVJPv0G7ab2b5j+zPbw9XtIT4rO0UEJDY9ozgfhxgn7mExy/ZhUegdLTxYWVMjsOjgDty/chZDT3j15lX8uYxvMTmxD54bfnmjZz6bekyn2p/MbP+4dy/yqyrRLyER3du1a7BLO2dhF+7fwxo4Hoxtn44oJtwrUwgoBM58BEb26og9VBoUE09Ux5RYvPn+PSguqkR0TCi9TPpmAyGWZOeGi4fV7O/a0QMhTez73I0MlTPBwRpXQfP79HCVWzUSJsuoK0FPmFQyOkzWZMp466YczDB8jC9vuin40WM+X3pOb7y6YRWcLBT9h7ULsaIoC/84BfVCKVj95s+vwl8+WYT8sgpkectQkci8MI73roVzsez6u2BiPtjl3Xpr7ZiDOw0rB8eOJzkMx5eHpA5cEfLdkUiyDaNa4nWINB/fg3gahqS6bMMIyCTGmW4t6RHLJrjSxETKKIMthW0a2xg2sZlsi9gUESMIp2JyEz318zeRUVgATlGxVgplZRfZMWRsN7x41w2n0rX6bD0EhIhoJEyktfgXJicSHwt+fp69Bd/M3oFH+0/C1emD2gQhW3voEF5f/RMTsF1U3KpEFkNeZLZZfk9PjR2PK3oHZrgPVZXiUFUJQmDFdR/Ogj/LCz/vDVyxPiTYwnEuk+5/NXEkkhoRylMPTvVWIaAQaCMI/Pqy0WgfF4XtB/NxM/O3pMaaWLu41nUD3jE0nmShFsHiGOU8bdlrgC+Cl8hqYUG9poLBFdrNoPYAP/O9dhUVwc7aaSEm5owdx345bRQ+rcrAfp47JVTwiz3bT4mIye56dUjAzF9djdk7t+BXC+fXjCDfXon3Mzbgxt6Dapa1xIshscMgrcpj1zxgFkPD+W8tMTa1T4VAa0OgJYlYbSw68o1MVa1kS2ALEjR5jmdTdooIPLpqPrYW5rMXXnxEAYrx77kjbMh4eTOeK52NX//2slPcw5n3cfEgbi/LRYGjAv2iUxFhblx8/cJD2wOeMP51ed1Moq40k5RwXlXvh8/qwRPr5yPHXoZf9hnbqkFbmZWFWz+dDUetOjZaLkL1pPbvvv8WUxnms644C/csf48XXCaIc0rFv93CpPHAPLLloA6Hksvw2YYMzNuwDQ9MHIE7mNCvTCGgEDgzEbhqZOvPAUoPS8RdXS7EKzu/hItFkEXu3bkhklKKRljpBasIZaQfc7Ei4y2o3Fd5mIeRhLm4mY0ELFh8uTHf4sjUjhppkgms3u3kFqdpTCbG6tuekuL6i1rsvc2ooiFaDPwzYMcy9XE2hCbWnRJqmS9O5p5+YHua7VO2ErYotqDJWSU6+KbW8518LQ0JCQmDP/jgg1qr1Mv6CGwryYNbwuRqG3/lRtZIMVV50al7YoNhIxUVFQgLO711R2oPqbW89lKGd28Fi3lWh67ICSHRGoEYC5O7j2PFzip6h0q1i7ffS9ainU2qP8S/OB3j+YWUdQiNRrgpQO5aI867q2d9jzjc6rOGeMa6xbYjqSxFmdsR2IzEU+eQDao34iu/5C/IW2FxPG4bZ8nlRiYmxMYQmuYLVZIBtkacZVxnoimsm+dbVTifPM4VTicOFpeAUdSB81N1V4YQ1jCjXLzH7kVphV1bGhtuRkG5C36bDmnR0QhthDes9siKHXZGNvoRbQmhuvzh82PtbU70tUQq7C0rqRFEkX7bh0dy0tByol014/ZyQTz28avf9On5OsaOHSspQENOT+9N36s5Pdmf+sy9jep4zzW/28UNF7LNq26N+lxr2KilPWLi1/+E7V02IWFiuWyiXSveMHnOY2vIRNxDGlJTU/1jxoyRl8qOgsBTn7+M3aJYJUpQYuRkBl5fIjabkLqqGL//+2AMaUBWeNGiRTgbsf3ZillYVLqDUZyHyauVdWbeP/c29IhsOK9OPGjLCrajoPQA/rU2A/YCTqu6xS0mlx2p6yIXIEJvpSJXOGvG8PW8CXeSn3iQuXpbDc4+7jPfWQEbwzmCRE37YDM/PP3mf7GnuO7sqnYEwp34M4qwWPDTFVfiPzuW4D+blsBZRoLJoqKGTAv5VuB3JscsEsteSj77KBft5UEH1ugQZjZj/jU3on0kp5ibyc7W33MzwVtnNwrrOnCctjcK55OHtozS9hf+/nXWAwuoCwo/SogKx4InbtNCsJ98fQHmL92lhS3eOiYFH67Mx1cv3wMzizm3FvuAoYjP/bRUI2P3UBnxkn5DW8vQasYh18Y1RXN4fWTRb28Fwo2xGJtwO6+lo2u2qf1C/aZro3EWv5Ybjup7iUagIO5hzTnTiG1b1SYtScTkfuwNtgy252uh8hlf38QmYh3yPJdN2Ski8Mt+5+PBWfPhjGVH/HFTqwOhuwLx7VJzJbqVxfCf4uGe0sc9rLeyMGcHxCtW21wslPnZ/o3o0fdIIiYXmqc2f4RFeZvhsvsQ/lU4Kgexako1CZN+AsF6dAo5WXQzhDLKFT7csORZxNLhOLWiL5774TGqXVmpsuglASR54f7PbZeOpwdciSiz0LbmtVEd03CovLxOaKKOZCokxASD14A3pl3Gwp169PClwL8+FAzA5AB18JBogoVP5abGz/sVN3Mu5NnPBYEyowEqJkVK7/hyNpJ54zMlvTumdempJZk371GqvSkEFAJnKwJSg+xf91yOB96Yh6LyKnSIi8ZLd07TSFhBWSW+3rZbO6uJ8IhEAPx2xvgWJ2EZhXl4a/Na7CjJR66vGBaTHs9OuBCjk7q02q9RSNiPeTN5XWMdAFo5C2bPyXoWI6rcGJU0odWOWw1MIdAcCLQkERvBA5zBtoltffXBPsJnIWAfst3Gtp9tOpuyU0Tg4g79sHdQPv79xTrtgqJnJBnD4mHNtsMYY0F6D3E+Kjs2AszxIkFqyNYVZ2okzO51Qbc6lBLIvHBLXhj/1TfB3RbtgM7oZ0ifGWUFPnqLWKOBoSuVFFEJmHzOj20V63HD8p/QK7ITHut9Fz1kzUfIHjr/fEh44tL9+zkSjoshhtZkB4xmJ/46+HIMTk6mhLIbv3n/S3g8QdLq19TSJLOz0uKET84w1XLFRpMHHgnVrMGEOXgUj9lWlo/lh7IwZ+dWvDNluvb7DGCgHhUCCoEzFYGvV2zD/GUZSGa9r7suH47IsJbJJxqQnozvn75Lk3w3cGIpaPvzixnM4IOTE2VRYTakUwFy4vCewdUt8vzelvV4cvH3cHNcujBea8x8ZljlHYtnYeW0XzHs8fih88098ECkyPs1JCy4f73eg3lZ/0GEoTf6x6v7jyAu6rkuAke55aq7URt/d/is0/wHsoS7lLvNfmwDqpvI/xSyjWfrWv1cxGdlTYDAz8aPx4U90hC2qwrWPAcit5TAUu7APS9cqW5+a+ErXp6R8Z2paVL3z8NsMGFq+761tjz8Mtt++GeqKzbAwFwpI0M/g+GIh7fkq2pu5sixoWJPJCoORMHFEMaCinAtBCa4QaTFQQ+RF3aymTUUxLhxxdOoYC2c5jKL0YgBPaIRkkaynlJJPDzwbguBI0eHx9d/BvEc5pSWBw+nZlge5iJ2i46Fn3VtdOIKo2KkHFhMVBnzEIVoBpqe+WJ+Y4DAidLk2txDWHpwH8VRKll/LQPLc/dqN0c1HasXCgGFQJtEYAUnWqbNfgcTP3oT72xZh69WZOCJt77CwsxMvLdqI257+gMqywYnoVrmEGuTMBlBv47JGN0nHcmxkfj9jElaIeYTHZmQkC2ZOaiwBzxBJ/r54PZuRmM88MV8PD7/e3jKOSHo5DmV50+JOhCT3LNPt28JvGlljz54tHDEhoYVaqnC40u/bWiVWqYQCCBw+JYheOvQ8HMbxqslPWJtGLa2O/S/3nElXhm8Ep+uXAczQ8x+NXY0JnUUzqusNgJPD56GmxbPRDZFNyQkxePz4v4eo9EnOrn2ZjWv24fGkWsEbiT8sZyizDYhejNQMDCQGxb0jAkxM4Y7ULk3gjLI1R4zhi+CxM1HMuaku8xqkQUsJEoS5hYiU011yjwOvLprLh7seQ22lmZie/l+dApNwoDobtr2x3uo9LjwzcGtKHBW4ty4Tkc9ltr9fHloCzycuRQVRBSZeQLk8yEr3ElObC3JRkcWqa4vAmPU61Doq6QGdMD7pd0r8HM+Esqu3Q+wppAQTh2sxGFfVkLN7hwkY29sX4UVSzMZ8ijHDdadsWLWhBlICW2+PLKaAakXCgGFwCkjsKOoALcs+BgOB88jPLU9U74IfQ/EwC5veG6Vc2JWcSnyiyuQEBt+yvs72Q4kFFHO4XGRAXEqCUf8y80X1XS3KG9vzevGvpi3bCv++Pa36JEWj7cfubaxHztiu7/8sBgLtu/UlmvXEl4nfBUm6KJc2jKf3QCHOxiVcMTHW3SBQcdx+nm90x2p8FhYGYZNBbla+PuJqFC26AGpnSsEmhgBRcSaGNDW3p2oKt03+FyttfaxtuT4Yi2hmDf+HmwoPqjJ1w+MbQ9ZdjTrF5WGi1OGYN7Bn+Af5oYny4KQCiNStrBMQGc/86aqSZpJcr8Mh0lYrQ59BWbo4ji3afJSVZFpVh5KKTOcr7btrcploczleHHnR5wF5XZc2SsyBo/2vI/ja7jA8qGiMry/fC3e28OxxXngtrrxTxKdi1L74KmBFx/TGyqFQTVjGKXMwPKeiQwxcMGXdREhVvz8gvPwz29WwOn2aGGJ4VYLuvWMxr5NFIepZUX54QiLq0B8YikLfRqwJyuu1lrW8DEZsGxvJnS5BlCbDJ5ED+whbvxi2Rx8PPGmOtuqNwoBhUDbQGBR1h54S7wIZ40uMUc7Lwr99LIzdNsl5xUaU08RHtpySn9frKaH7v1vtNPbI1eOxeXDG4580AZ7Ag8RoVYtnD3mFMIuxRv2wYaNPGcGJuhk9xKO6HcZ4Mmn6q5cBAjjvH2bOZHnxk09ByGWirStyUJ8F6AUH8MkX3S1uRmmvmJvZ234TaUiGexbPZ8pCHCiRiZ/z3BTROwM/4LV4Z08AuIJGxCT2ugOHug5DeMS+2FfZT7iBkbAlalHRaUTvbolYcq7M+GI5kWI5xQfa9U0eGrxGGCm6IU9koQvlIIdSQZY4su0/XMoYMAj+kZ0wp8YolK+nnXNzsuHtb2d3rFCPJ3xOzzd93mEGOpegH/csge/efMLhoZ4eEIju9tthq+7F45EN+Yf2KyFWop37Gg2veMgvLB1IRzMSvD3oJerijdTYR7KI0dQ8Srgzbp19FD0TI7H6j0HNGI2bXAvVPITC3e8CY9DbrR0FOvUIdRmRqSxE7qHJMNTZcY+t8zwBoimXIh9FI0x7JbZ08AsuaHcAGd3JzYUZqPM5Wh0HbejHYtarhBQCDQ/AhEWK0J289xTrdhrKdAjfVI8ShxlOFRILwlDEn936yStrEXzjy6wx/9+9xNcPP+Izfx+TZMRsTEDOuP7v9+NsJCTJ5mVFDXyMgy8tokIFEMM4LPQn0hC6zf7kVFSiG3rl+HtjLX4+rJbEW9rPWVnpqVdhpu/2YmBqbsQbnWgsDJUI2EFlVEYmZLW4gIotbFVrxUCzY2AImLNjbja3xmNwIDoTgwVrCY2tcQVB1gTsTYvG652vHBKbH8DVEwvaQQkIUbyHTflkZ2ZEcj16BAdUwkDc6lKWRh6oXM/vCQoMgPqrQr8+Qqpq/LasbxgMcYlXFCDr1y8H3/3a4asBMiOtk9+zrAjBL74cthJrr47tE0LU6z5UL0X16cPw9Lc3VhTuJ/Rk16YrXqGDVrxz3OvqeNJO69rGqQFjZllWHLLXfjjDwsZdlSG/glJeGDECIRT8l5MRE+6RKzCm5vWQGrhDEtKxbKtmdpxyXoZq4QJ6cv0lL5nynx12KesU6YQUAi0HQQupRrqn3Tf1gxYJpUeGTUWHaZEIq+oAuI1Cg1h2HML2jnd2uNAgZQwBYZ2bd+kI5HjOxWLsFpJXiwoqqqXHxzi1SbX/OJVlIsAja9Q7GQI+8aVePzc8dqy1vDQPiIKl3W6GP9cswLeQk4yhvhhCmPBbLMJfxw5sTUMUY2htSIgP+oz3BQRO8O/YHV4rQOBX00dhZ+9PhelnYupMGig94eEpDrSRMu/4jDNvA+ovp7ynbxiDa5iKwpDGc5YJe91WFNSjIFDDqCwcxSMkVTN4r9ocxXX+XDAnsVn4GBpGV7/cTXKyu2ocgRyCLQVtR50VSRU4TpeCI+tVCbhh68Nv14L0VxHMhZrCcPE5J4IMQZKH9Tq8oiXcSFheOHCi49YLgvEA3bPgHO0Ju//u3kNVmTsCx62LNJeS4qcwa7HbU99gFibDddeMAhjB6ucxgBA6lEh0PoRsPJcEWoxs1YXz0U8jU0d2gOdIqO1gSdRMbE12K8vHY3+nZK1CZ+JA1rX+UXOlb8ZNRJPfreQ4erUS2RouDHcw8k5L4xOhic6TDz78wogZUOq7+gWHtiDx9F6iJh8x/cPOg9rlmdhfc4hXtn8uP7qwbh94FBEW499DWoNvw81hhZCgCRMhSa2EPZqtwqBMw2Bc7p2wAu3XoL71rwPewRdXxFUIGQNUf8OGz1glLpnlF8gJI/8rOa6xLsWXmhlBlkvydlCuzj7mVsehc7JBVqelQh6WAziMQL2Fuowm3XOHp/3HXTCv7iMHz3S5OTGkBYDSdZlHUSw9NgWDNE8kTDNY/d45NoiSvc7bD4KlbD2GuGRPH693FcwjMl2SI8sZzGyUIxte3NRcFUFpk8YeGQnaolCQCHQKhFY8rf78N4Pa5ESE4mx/bu0ujHqKTA0aWDjRI9aYvDT+/WFqNg+vOQrVHEGz+01ak3O8XKS1zM8Uc+wcR/PoZJwFxdy9Hzmlhh/cJ8RJos2CScKlbf2GYIohq1K5EZ9xcrg9upZIXA2IKA8YmfDt6yOsVUgcF73NPwj8gr8YtVHzNlyg2r4cCd64crjlZSeH4+NZMvEkLyav0qGMDL8REzEO/yULNZR7MPuNaHcbaGSI7PGmLXtYG4DaRW+np2DT7zfaiRG87KRndlyOWvqYC2cKD2c0exYwiK5z3ZhofgTlSFTQqNaBTYD4pI4a25CZZIbobsNFP3wIi6VipW8qSiuomJibsAD53B58O/Zy3Dl+AF1QiNbxUGoQZw1CJRTjvyztVsRwtCqiwexEDlDiZUdHQEhOjeMHXz0DdSa4yIwIDUJXin/QdJVY/KS+WF+u8RGkIw59PCF+XBfv3NrNmlNL5664QLMWbEFPVLj8OpXyzFn1RZNdXcAvZFPXj0RaSyorUwhUAcBmWw4w63mlu8MP051eAqBVoHA+YldNaXCZzd/g1xHOSJjdChlmKCPRMtDT5jIVGhTnPLMi64+qTovgG8lTUquuxEkZwcKo2G1MTSRC2SZbzM/vIchKh1ItKpVhizFzOmqoB+Nn7MV+pHcPgLTJvTD+KGdkRoWrc1MtgpQOIgx7Ttps7hOexkxqUT/sTu0A5Pjw+D9yPw2DYU7YrXhllMAxUuSaaxO/m8tx6DGcXYgILmLM16Zhf2FJdpkwNId+/Dc9Ydlzs8OFM6Oo9yTQwGMA/no3SEgTNSSR51ZWkxRC06oVYe0NzgWOsQszKkd3T69wdUtvTDCZsWN4wbj//73Jb7duAvOaoGUdZkHMePFD/DVY7drkxstPU61f4VAcyKgiFhzoq32pRAgAlIUWpoIUEj8/76SEnzz/XcU8ZBoQvq2IkiwbB7oGb4oRMTPi6ur0gh9iA+dbfHYl2dEQmIBGN1RY7qeduh2h8BSZIUrUtiLhKvwgSRMLMRixO+uHIchgzoFFrSyRwlN+eiS63DvV3MQ2fdbGM086FqWPmEfSvZGwutiLbL2cZpMfq3V6qVCoNkQKKM3LLOAuZ4sXC62ZPte7bmpHr5dkoH35qxGVIQNv75zAlISW4fXuqmOr6X7ESLtomfdbKZ6rTbT0/CIFm/JxINvfs7zrNRA9OP3k3o0vGEzLe0ZG0cSdiQLo2I9J+MC53x5urRb6yRhQZjK7A58s3FnjUqlLJdJRlGt/H7TLlw0uGdwU/WsECAC1b/tMxiLWrdyZ/BRqkNTCLRCBISEiaVFUXjDaIA3krleouHBvDDt3MMQFB/DEd35FuiolGgz6jEypisMoY4jz00S2tjVCRNLd+l40yD/HNF6aGIXnEXt1SMFA/qntUIUDg8pzhaKf06eAAtrh9U3H7GISCuDjUXIH7/jwvqr1XuFQLMhIAp2yVER2mSAhX+3Q9JTmmzfGzMO4k///Ao79uRh9YZ9uPfR92sIX5Pt5CztSAjYh/PWYMqNL2PidS/iyrv/g+Vr9hwVjde+WqEpzlY53dqzFHxuSRM5+rv7D6NQEufPZYJN5gHYQg4GbuPknK+zenF/z0ktOcxj7lu+g+35+XCFsgom85RrmxCx/BbGuPZ41OtWgoD8TBrTWslwT2YYyiN2MqipzygEmhgBC3NMjCRMHiZby5ynx2mCiWoVQtYMsS7t+bmh0zE7Y2dAtKN6/zKTWOmwsJgyE7WZr6KPJ3kzBgiezsWVvXx46+HbkRoTc8zZ3yY+nJPuTudnLpjEUtYzESkxdKnC/+6/FqmRDReurvcR9VYhcFoQEA/Je/ddg1krNsLGv7mrz+3XZPvZujMbPm/g9y83rRKGW1JahXYxracmVJMdbDN3NP/7zXjtvcWooAvJFw5kF5Xid3/7DK88cy26pR8ZeihhdHL+lcgFA7/z1iAo8cCQkay71RG/mfcFsgvKYCrWwaup45OE2Zh3HOXE5fPfwaDEZBQ4KnFJx964oetglhxp2Tn3lYWb8Zdtb6OgRIeDWXHwMcpcfuVmlpGzFgbyK2WMQzo3vm5nM/981O5aPwJMJsdrbPOqW+sfcfUIG/vXKduJTJkEwo9jO/KsxYXKFAIKgZNDINZqY8L/4T9HHeMUPVUhcFVRZaoiDDenjsaElJ4YmphCtUUDKl0m5kkB+aVhKCgLRRm3rQg1oqq3Gzo3Qx7J5nyhvIkYVYVvSta1CRImyDkp7V9SFaqpQAaRFLLppYuwPNykSFgQFPXcoghEh4bg7vHn4MZRg2AxNd18Zvf0eDhIEqraMW+UpbWkvlZUZN0i7S164G14529/shI5aW7knM/z5jASsTFAhdWDWfSSNWQPXzEW7RgeKuflhKhwttZBhgclJCOtYzjccV5U9qVYU1cSS5so5+rhLmO5E08lvj24E+sLD+Gv6xfil0vnNHR4zbYs11GIJ7e8TpEpJw4ejNGuWxKGDzYXb519nGQT0ZuJ/buiT4daxTebbYRqR60aAWHsjWkAaT3uZBMi1qbseFeQzjyah9gmsO1ky2eT+RfReZXiRf9mm8kmTnJlCgGFwEkiYKZH7I0Lr8BDPyzAofIyEo9A/phZb8QNPQbiwaGjtJ6v6t4Xz63+EaX0ltlZn0e8YTVxiryweaJ88EVwBpeS+F5e4ErWhWF1WCYuSqrA1sI8DE/uQLn74/3Zn+RBNMHHEkLDsHlPd/RM3866YZVahGaVx4TM4lhEuVo2R6MJDk91oRA4JgIO/s26IximzFwgr82Mlx+8WuVDHhOxxq88pK9AZRq3l4ABHx+oIFswiOVA8ssa7KR9XBS+evIOSE5TRIgVP/74Q4PbNffCXy6bjR+z9/FOjMfA/5pJDTEqJoI1xmqnvTm8Hnx1YAd2Fueja3Rccw9V29/nB5ey/hknFqlI5fUKAztsct0bOyAd1wzqj+Hd5ctRphA4+xA43h3ZHwnJK2x3sQknrW3xfHMd2wy2mbVXqNcKAYXAiSMgJOnHa+5Agb2KZMlAiXsPIllnpTZxspnMeHXCZbjxk4/hoaiHNi0SvBjLLvlX6udMur/SD2cq683E+vH99jws3EePPcNrxqam45Xx0058cM30CSkg/ctBY/DX1Zzd9TtgtnjgdhtZGseK/04e30yjULtRCLQMAlH0tEnoI4OM0TMtAWmpAaXQlhnNmbXX+C5RyPLnw1jMULjquxkfBZCGU0X2aCbfhXwnrcXy7RX45tAOzrgJ6ap3Sxa8DjAawpTH+pMe5hfTa+a1+HD552/jH2MvxriUrs0aHSHhtTP3rCTvJeZkjVabEw5GeYDKvjLcMBb6/vP0yQjldU2ZQuAIBOQnXq0CfcS6M2jB8YjYtcc41jyu+8cx1qtVCgGFwAkiICpeIlohFnGUz45o3xFDk1Ox6tABahWTjAUvwLK9TDiypow3lDPr9I6JOa2sN5bv4dVZh03W/QwRsSPE0HpuLrTmRSLQAABAAElEQVRB1nq4pe9gxITY8MKaZcitKkef6HZ4+JzROCepfa2t1EuFQNtGwE2vl9yomkV8odp6to/HS3dOw45DBbhkWK/gYvV8igh4KT3rT2PO7T6eLHlzF1QZ1NtJeAc2ndjKKQ7zuB/Pc1SwdmS1V6k2D5PXcron+bLtMrO4c+A4zdlGVPZzoDLEgfuWfYpJqd3wwnmXnTYyNmvxBvzn65Vwe3yYOKAr+p/LUEQwXL76IhWTVI7iXF6TqkyIDgnBhxdfq0jYcb91tUErRmBQI8Ym6mObjrXd4SvA0beSMES51dt+9E3UGoWAQqA5EXj7iumYuW4t/r5mCRxh9ppJIz9DEl2JDE8h7zKwPpk3PHC11nNWyZRjQPl+J25v90tc2f0iXJZ6SXMO+YT2Na1LT0hTphA4ExFYsH47Hn3/K3hJxH4+eQRuHTuk5jDP6d4B0pQ1HQJfHtiKXeDcsffIW55Ch73pdnSae+oaEUcRJz30jBTwVdLLFPSKuUnO5FRPYmmPD0zAhRwyQM95OkOWBR5eB9zRbszP2oZrO+/DeQkdm3yk81ZtxfNzf4SDpQHEPuP7dQU2+LvyBrJ6slDPcNDYpDK4XayH5nQiPUoVcG7yL+IM61ByxFux/cCxrWar/oU3ONJOXNqxwTXVC488K9Xd+s98K9vIX7bs6DdsyhQCCoEWRkBi6+8YMlRr2wry8K+fVmBN3n6U7HLC4OKfqpy8+ORM8MKZTCXGCB9CDhnhMemR/0M85oTNRXtbCobEDG7hI1G7b04EKhxOvEpZ7oNUjJsyqAcT5GWeTVlzI/D4h9/AVV0T6u/zF2NXSSFuGDoAvVKVDtbp+C4+278JDp+bBIaEhaRFPJGa8W2n2LZTp03O+/8471LcvuhDGEop2CT6ITwGnZOEi6VL3HF+eEICx1YR6kH4FuYbMjrCG8a84SIzLwsuPL36O3w+9bYmh/mjpRtrSJh07nR7sHdXOSwkYrXNx7vJSoYnGpnnfMhegBRby+Su1R6Tet2KEWjdRExI2LjjoPf9cdZrJOtY24znypHVGyw71oZqnUJAIdAyCPRoF48XL7wEP393LhY5d1P9XeZMaDyBWegFc8XSQ8Y6XPJez8nKIuody+3eqqKfWoSIbSvIx5tr1yKD9WTSoiJxy8DBGJycrA25tT64fd5AKYFgWFBrHegxxiU3n7f+8yPszi3UQoeWbtun1aiaTEKmrHkRqCEC3K28/mzpFny+fCv0yQZcOKA7nho1gWp9klejrCkQsBpYFoOmS3DCL0IddmJL74wxyYXkiKMFgTfFnpu+j7HJrCVZTs001pg0OqrP9drpnSd4A1lO9SKpIemKJjGLDNzJSj6cvsSEbfqCph8Ue2xI3l+WVVSGINTm0LxiOl6EnA4TystCkJ5aQu/e8XwBp2WoqlOFQFMhcDwSJvs57jbH+yt4kJ3MZJM/7f9jU6YQUAi0UgQqyxmLHyRhwTHyL1fPnDFzPqdNeRHUhXlQFGfB2kOpiDcwq7uZbe62DPzftwFvgNTn2ZpPIZHMTNw37BzcyxY0L7X53/lkBRYu34GIsBDcd/No9Ojc/NLGBytL8MjaufipYJ92o3FRah881n8Ki2u3veTygvJK7Mkt0kiY4OzgjPWcVVugiFjwV9d8z9dMGID/LvhJWBgvroE7Z6YxwZ3N78SWgU6R0bhrIDXWlTUJAld3GoQfcnbBLvlKKQ6tTyMnVUYmpLNAcoCkNcmOmqmToSmpWF1wEH7WnJO8YjH5HRnLdPDQA2bN18FSyFxh6mKwbFogdJ0EVH5tXhK402E3jh2MrftztfOK9G9lWYcrR/TFmvgfsHY/S5KwNImPY/B6DYiJKqeKYxTirSo08XR8F2dUn/XvaVrvwUlByY5stXnVp40Zbu0PNLT9D1woTZlCQCHQyhEY0SUNqzOy6tZD5mSo0exGSAgv0xTvqIjlc7EFLqcR3x4oxvr0AxgQk3raj+z9VRvw7tJ12FVMIkAtElF25D2BOOkobezBs4uX4t9frEDf9om4bdwQ/HnFV8gqL4aVkse2LSbc/7tZePO5GeiQHHPaxxrcgYtesGt/fAOFLIzKVH/eRHgx/8BmlLjseOW8Y+kYBXtoXc+hFil1cNhktlrqIylrfgQuHtQTr+39CaaMI/ctaqkZhflHrlBLThqBESRcd3UfgVe2LaEXhuF6ZL3p4bH485DWqyB7rIN9acLFuLLwPRzYX1d638TiyKF7fTCTkPkozmQs92uErKg3KRgVIGFgiCJVaJ38jdVW4z3Wvhq7bmy/znjqhgvw2oKVrAfpwZQhPXDnBedgZ0UnPOz/FyooIFLFGpfhlN0P5ano/3re2Niu1XYKgdaOwJscoBCxLWycUtNMbm+ahIh1ZEd72Y5m/MuGSA4dONoGarlCQCHQPAjMGDMIX6zJwK7swpocCEc7qSvGmfZ2LtgrzXBXmqAzM1ylzAKfw4uZO5djwDnTT8sAy6ocuOapd5BTVg63mQSQDjjhX+YSEjCGzVTxzOGXqSCetvScpK6kh06UIFeu2BmYUyLnco9gSOAPOhhydVhE79iNV5x7WsbaUKc/5uxElcelkbDgeiFny/L2IM9RztnctkVibBYTHp8+AX/46Ft693SIDQ/FLy4KRp4Hj1A9n24EfjyQiW/27sK5ye2xMns/jKW8Seasr0z8SiiZlSqKU7uocNGm/h7u7TkK16QPxqbiQ/zbDUPPqOb3sDfVMcVYbXhk9Bjc9+48hlqKn0tuxXgqJcEx59DrRLLj06bZdRTsoDJnBaXseR2Q+EA9t/1o62bc0HeA9pmmfJg0sBuk1bYeEWl4a9jv8FXOSuytzEbnsFRMShyGCBNn5JQpBI6DgC4QWXucrVp8tdyYnLTM7fE8Yn9j5xLTNJdNys/LNB2Dk9GFbSzbeLbfsx1gU6YQUAi0IAImowGzHrwBP2zejX+uWY7Nzly4bJTIdhvgdvBPXXIjyIQYDcUHXrpzjdigy8e2lDx0T4mrCXFpqkO4/++fID+7HORcmjocjHKzIOExfGRIjYWpCg7eC5lKA81cGbiZcLGKkrMn42nEuKi8nwdh35kbzEEIbHR6HgucFYFx1+teVMuKnJVtjojJYVwytBfG9E5HcaUdKTGRqlhwve/2dL/9dMcWPLL4a61GoJmemeTOYbi/83nILipDid4Ju8WLiR07Y2xa+ukeylnZf4zFhtGJcvvS9u25n5aQuHtI5HmG9fCsygktHUmZmE9OuhKyyJO9Xzxh2llX1nByi2oZX+zcflqIWGAPRz5GmcNxdYcJR65QSxQCx0JAu1c51gatZt1yjkSI2NaTGRHvzo5pMlUunV/PditbElsVmwRUzGd7ms3BpkwhoBBoBQgYDXqM798V4/p1wdLsfXh89QJklhYzPp/zKdW5BEJu5Lrs5YRkzjYnrt/wPkJsZlw0rhfuPm+YVt/lVA9lf1ExMnbmaruSvkw8S7hF4avaZAbXwJlaMQPXmRm2IsMS5mU+YIIr3Q0/Z3eFNPo4TqvZhImjmlfOfli7jjWeRW1o1Q8y0vTwuNqL2tTrCJsV0pQ1PwL/WLNUI2GyZ/Gu5juqEJsYiiuH9W3+wag9NgsCxRV27GRduIHpyZDJsqayHcUFWkSBO9arqSaG7uOEm00HO2uAWwp5fhf1RP7XMxJBJ0q6QtJkWp22r6oQu8ry0YVy+MoUAgqBU0ZgJnsQMpbDRg1T7XaGf3BauCKfjm3HI2LyaWF4jx67G7VWIaAQaE0ISAL3yOSO+GDS9Tj3w5c5tADNqRmjXJdJdHyUEK5ibWd3mR3vzl+Lubu34csbbuRFuhAP/PgFyt1O3NF7KO7rf17NRxvz4tnvF9dsJmcjCbvi3CxHESBc8prijZpJmKKfCmagsmNwlEwN024i5EYi0m/DG8/OQHy75g0FTA9vh+kdB+GTfet48+zWCqka6Q37w8CLId4MZQqBE0VA/hbqmniIj1xadxv1rq0iIDW1LntmJuwuN4Z1bY+X7rq0yQ7FZjKj3CX3fJzIKqUkP8+ZPoYkung+9zEXzFzOUyjv8OxU59fRW2Zi3pg7ir81xnoVGYtw9aL/4L3Rt6F7pGjoKlMItEYE5MYheFfQGsdXM6Y3+WoGmxRu5l/biVljiNiJ9ai2VggoBFoNAvEh4ZrsOidFj2JU2SIhMpcHcgmKSipx+TdvYG9hVc3t4csbV6BfuySMSunYYB+5jnxUeuxIDknElux8lNgdqHS54OYNgXjC5Dxanu6FlVL6AXbFMxVJoEN0N+jyckUEwhWtJZIrwxuHWIYtSt6DzoLUsCi8MupKpITzbqIF7JF+F2JSSi8sOLgFoVRKvLTDAHrD2rXASNQuzwQE7h1wDp5c/j0cFDOQENcYawjOS+5wJhyaOoYGEKhk3b4KuxNuqsCKV6wpbXq3Pvjf1vWaZ1Xyb/1a6Pf/s3cdgHEVR/u7XtS7JRfZcu+9YFwwxsaY3gImdAgQIJBAQvjTE0hCQgiBdCAkEAi92VTTbAzuvXdZtmRZxaonXb/7v3l3J51k2RK2rLpjr17ft/vd3Xs7OzPfhO7gJzmTm4/byBBWluIOLg9YY4IbOrrAOpnH7o/bPsU/p17Tms1SdSkEuiMCB9nphSfbcaWInSxy6jqFQCdBYFxmBlblSXhn5LXMVb6T9XQHlFlUmS2tE7sHh+kuFWScVuT8AGMK9leWHaOISf6jf+W+jM+KVmh2Lqc7iINb+sHvNWsWJD31lQCDxANUuKaMzMak4VlYln8AO6pLoGdi6bM5Q2wixfKa7Yfgo2UukERlkHX+4uIzccm4UbAaqK21s4hlcWJqtlbauSnq9l0AgQVDRyPFZseHubuRYY/DHWMmdUr69C7wUbRJF1LiY3DfJTPw8cY9uPv8qa16z/smTMPywwexj8nAvfGcvCriQ11mvfhfnuvRIlbXgI3ER4luGGNC03I8m8QlBdGnqXWFQMdDQL6oHV92son/Y1nEEjJTh9rcKqyJoarUX4WAQqDTIvDEmZdh7tGnUOXgE00IOyg6JnY2l4k7S5BxBXwxM6DbwtwzNEKF9C8xTWkPQCpHVEamZPbWrov+s61qN5YUr4Q3GCLW0FN3S8ouQN6mntp4wJxgxJhxWbh27Gickz1Aq+fuuvzw9TUdYG6r1XsOIc5mwcwRObBbhFtRiUKgayIwt+9ASFHSPRC4ZuZYSGltiTWbsfDS63DTB69juZuTWQmMFSMxk+bJxed5gOy4ek/orgF7AP6e3Ci2wmMNwNTDqYUMZ9iESlGJQqADI9A5FDH6/2gK2NwoJKXlraqIyehNCDtyWH7FIr4UPVhWsyhRCCgEOjACmfYErLnyu3gjd6OWPHnz7lKUF9Xyxc2XM4kwRPNirk2Safgxli6IHrsLu4Ml6GFKxtD4TGR7EvCt37/ORMB+3DJnIm6dG0q8XOxiRHhYJA68xmNG2d4kGJyhnV66vmzcV4inL7xEU8Ii5zZe9s1IhhQlCgGFgELgdCCwtbQId3+8EAccFbS6M342ORv/vOgS2EgC1JnFbDDA6jNT4SJlvYVmMAuVL4aoWKxeuizq4eLwUE83RI2ko8zMJz0n31yMFeN6fFoQdw6Z0Zm7r9quEOgoCNx0Kg1pZMA+blV/4xGJ1l8QPoNhoPhreF0tFAIKgQ6OgNVgwjcHTMQzc76B22ZOhLcHlTA7Gy1TLCwBvrCDFh0O+0qxcM5t2HXlj/HxBbfBsc2FVz7YhMoaF2rdXvz1vRWa9Uq6OyA2m0Yz/qMStqs4DftLUlFbaotUycShchZQWiNEq0oUAgoBhUDbI1BYU43LFr6I3JoKzVrkpxfA0rIDuOTFF9q+Ma18x2KHA8vzDmouifIs9lsC8HJyzWnWIy6rmnFhdD93UNmkJUznkRhdedzLPjM8ZXakGBJauUWqOoVAKyMgdqWWlFa+bQuru60F5zV7TktjxGQKfBzLhvBNy7lU/kNhMNRCIdCZELgwZwgeXrKEvPGNWs2J09L9OryybSPSkm144pNlyMt3aCeJvibiDwbwwML38cR1F2F0ek9MNM/G4vJP4fHxUSLTOnR7Cbq5wllnyWUToJaWbBervRKFQPdEIEBz8edb9kFiLc8ePaDN8+F1T9Trey2EFm5a57UZoshuPqJ2eUqRX16JXkmdVxmpcLnI5hp6OgfMDPaVER03/bSGldXEQCdu5zLRJqy0EjjGhc5NVYzP5xqvF9e9/xqWX3M7EiwtS2exq6gU727diWqfC4lpZgxOS8OszAFMQt65LYuRr4VadmoE5If8FIvEaUlpC3mQNzkRC4/8OO9lkXYdV1qqiMnctgzbRC8VkeQT0SH+2k71RyGgEOj4CAhJQCJ/zhXC0hH5RbPZ1kK6KCbo8PulX0LXuxruPHm5GqExtQvjVljK8mqw4L8v440bvwmbOxMHNvQGsl2aZcw3kLEHRxhoVmWC0arHRcOHwmZSL+kIdmrZ/RD408JlePXLzVrHV+0+hJ9cNbv7gdCOPT5QxXnjkK7SoBWil7yyfgvmjxiMoRmdM59WdmIiTMwd6WTMb0QJk04aanSw76AVjHqYp4cPLnpAQM4hDkErbWeSAJoikwNv7dmGG0eM17ZP9OeZ5Wvw5yUrURPnhI8xZ8gjgBRbjA5vnX89hiQqGvwT4aeOnQQC8jXVgh5bdG0lz2rW+tSimlp+0lKeemEzp3/czHFtDru5c+T4kyxvsaSz/JrlS5bfsChRCCgEOiEC9844E/Z9NGLVsNBz0J4r7+iQiay61ktqYyZU5gyrL4b08mSO9/Fl62eOGh/dF+Wf8WAQr+3cgqE97Zg+dQt6GSphYoLaOLOLRCB80dNePjm7N342Z1YnREc1WSHQegis3HVQyyMluaRWUxFT0rYITOvZl0+2YzUxHbkr/rN2Pa76z8v4aOeetm1UK93NYjTi21MmcbAaxOSUQ3hjxovYcP5fkF0SyhvijePTutoIQ2Xo2a7dVqAwhebRnX4fciupqDYjhZXVeOLzFXASNJ+do2O/KGFSERNI1wRx/eJX2ISQctdMVeqwQqArIXATO9Nc+W5zHQ5NaTR3FvAiT3mA5bcshSyXsLzGokQhoBDohAjcOHIS+o1Kgr1Qj5h8Mm0ZDaRADnVEcsyIBEjmoVnM+L7107vQF8t9ES9DvnPdNT5scL2O/QUZOHQwA97DsbCREn/owALMHzAQ/776MlhNLTW6h+6t/ioEuhoC184aBzN/X1KuO7v12fO6Gl6t3Z9LBwxDTkKylqNQq1v0BT7ajNUGuElAJDndHvnki9a+7Wmvr8rtxqT//g2/WbMUwxKL8OTkdzEwvoyuimTCJfuSm/xH8twWQiaL3g9bLJWzRmKnS+GwFJlfF10uiLWlefjN5g+ZX+wT7KsqqTv7i7250AsTIwmdwLizkBIWOaxDqcOFQqdQByhRCLQuAkLg3JLSundt29qaGyXxp1wnxVx7qW4LkGNlUdtqVSGgEOgkCEh+rDcX3IDzXc8jn7OdPiYclVgDednq0z0hv2MbRyvRUzUygJGiiQ7XjR6LF4rfgqM2nS4uoRnSWqcFKT3d+OWUcyInqqVCoFsjcNGkYZgyuA+CjBXLSIrr1li0R+ettBq9S5r313dvwzt7diDJZMWSzblhhYIt4iiv3F2B7254gEnpM3Fzv+uRaklpj6Y2uKew1H64fhdcXh/mjBmIxJjILFjotAvfeh7FTro0UC+6ZcRqKlviexiS289ahl+tn8NjIaVJZwgiLqkWXo8RPpJ2yDaoU1nIunhB/yHaRaKAvZG3IZQDktc9v3cVfjZmPi7LHguxvEkak+MGpBBDs+bDHmmBWioEWgmBujFHK9XXAauJHmY11bx13LmWRZYyPbKbRWz4si77lCgEFAKdFAF5uS668To8eNZ0nNm3D64eMwrPLrgEpsj7PtYHvUyiyoNQPFm4NJG7QzbtMSYMyUhHhjULQ3IOw2L2wmT0YcSAApyTMxGJ9pYFf7MqJQqBToeAEHDsPFKCdXkFqHIea2lo3KH0hFilhDUGpQ23hUzi2mFj8NrFC/DU/Etx+YgRsNl0iB92FEnjSmAdXoLc6hpsrtiGH2z8GQpqjrSrq518v27982t4+NVP8bs3luCy3z6Pqtr679m+8jLkVVWEEKR+1CehnJNo9YCeO3In3cYPI0DyJL8pCGuPWu25LWqZiDXOhX5JicTjGsSYzNhZeQSv560PuaTzuJ/WMXfAh4c2vU9SDzfOHpyj4aF3cO5ec2uMHh0HMTQ9FanWGK1u9UchoBD4egg0ZxHrF67uH1wuZHk/vH0el2rKOwyGWigEOisCQqRx44RxWon04QeuOXh068fUvWgl45SL5pZIvcrAmArh99Bx/bHb52unfzP72yhx/xxp03bzRR/kTHI6Lu39zUhVatkNEdh7pBT7issYKxhyce1qEHjoynbrf9/CxkOFGouoWAoeu3w+5g4f2NW62mX78+vz52C3fSsKPPyOUjsJ1hhQsLonzD4mO+5Xi1tcj6C3PQOPjv4OEs30yW5jySspx86CErg8IStXGVOALNuWi/MnDtVa8lGezInXy4byLGTHVsBEdkQ/yQ2+tfRy7HakIWBk52j9qiyOhynRSYsYXWRtHujNfqQkMAVJYsjy91XRPvhI3NFYjDoDNpbl48yM/njyygtx72vvwlKlgyPBTcIPxp/xuz8wLRHPzb668aVqWyHQnRAgQxkuZ+nLEq1X/YrbzUr0BSc6eSIP3hF1wgdcfyhqW60qBBQCXQSBb/afhJk9BuK9/K14umANanRu6KsM0JfqYbJ7YZxUjR/uehGvpN+NnvY0JJcswNNfrSNVPXABGciMg+WZpKQ7IvDqys343btLNYr2m4ak4b0NO3H+2CFdCoonP1uBtbSESWoGET8nIO559V28e+d1GJCR2qX62lU7I8pziT/Ephgknbvvk2QEvTq4qZW5i21UMvQoGFyCZ/YvxPeHXNPmMNgtZrp711udZPWrI3lY+WU+zus3CAlmzoZFydN7JuLcTDorGb3YWNoTeytTadEyhc5gXwNePfxUwuLTHDBQCfOyf3uqi7GXcWAD4tMQb7ZRiTNoLupR1Wrf8fjwvWYM6Iuv7r8dX+3Pg5fETOkpdqTHxKJPbFL0JWpdIdAdEXiHnRbWRvEUdH9dAKKM2Se8VHjyf8LSlyWb5ccsR1mUKAQUAl0QgV4xSdhfWUklzA99ESkQaw0M+uYgxWJEnNEFm74MD2z4PZ5a9yH+vXIjZ1OD2kt78c69eHa5eDMr6W4I+Dmj/puFn2sxLTVuxhly8PiLNz/pcjAs35dXp4TVdY59ffyT5XWbaqXjI5BqCbET+fOtIeuOmMZEyAhYuz1es3YWu9onDD4jMRbfPu8MTmjQ6sRRmjXJhDeLduC/zIl280dvItlqi7RWa/IRVxyuXLYAb+UNx56qFM0qph2I/GEMl4kkTHoy4QZ5JUPDGPPlR5mbMWaUc7OGhWLAIudzqed5qdZYjEjMqttrN5swZ8gAzB82GBMyeislrA4ZtXK6EGgJUYec087Si/e/iuX3LI9FFa42Ly1VxBawqjQWobB/m0VodmSfEoWAQqCLIrBo3zYEixjYTVcX+Sf5PPwyU0wXnoMre2P3Fiv+9sYmeCu90HlDIEhg+RpaC5R0PwS8JHzxR83iCwJufh+iZ/a7Aio2DkabkvzycMxOUwfVvg6HwEOjrkaMwYJgFR2DwjpYpJF+pxFVRfGYnjo+sqvNlzfPmYglv7kDr/7oWlT09MDFmC0RYXn8345NuHXUhAZtKnTG4+Gts/CH7VNJuNHY2YkkTJLwWeuouBQy7JcP7eGJmVodYvV6euq1SLfGwWYwkfjDiEHx6Xj2zOs098MGN1IbCoG2REDyiLWktGWbjr2XzMKNPHZ3y/Y0/rUe7yqZFrr3eAfVfoWAQqDrIWB2m1AfHh7qnyhkmkcWZ6AchbGwVOhgDo9ihC7ZZDdgkHLP6npfhhb0SFIVTMzphQ15h+FhfBgn83HW0ByNibMFl3eaUx6cNxNXPkUCYZmFlQE8l8I4etagnE7TB9VQUAnpjdem3Y9b8l5Fbn4F9DKZJLoKp6d9dh3K8mLxqG0NzsuaDGM0E0YbghdP0iODSa991SK3FbfKRFrEfjxlFi1jdvxhzTJawEImgXP7bcK8nC3YfzQNz66dDbffROstn9rsmzM3DoYYL6y9ajQrm3x37UZ6O4RlbEpvfD7veyQtKSWbohHiFaFEIaAQOCECW3hUfnyiS93Esp9FXBPDbwaM4nqz0lKL2Oes6bMmSrM3UCcoBBQCnROB74w7M/SIadx8ecRQDM7QUjalGJxBDExPwV0zpoQOqL/dDoG/3Hgxrpw0EmP6ZCI5xo5HrwmRunQlIEb27IH7Zp+pKZpGDoqt9PPqn5qMW2dIKLWSzoKAxPg98Nkn2OKtgJt8HJ7EALzJAbjj6J1opqs1R0cFNQ5sPJrfrl0SVsP/mzyD3zMjYrkubok/nDRDa9O3x0zG3lvvx7dHT0JmjAvzqYRZDH6yGB7B7+a9iFRjNfQeOiMG+IQWj4YaE9yFdu3aofGZx1i7RMnrz5gxpYS160eubh5BQFSclpbINW27vIC3u5BFCAwHsMwNb0f2c7N5aalF7PtRVUmUqLCDhOzkUQfUqkJAIdB1ELhl1GR8tGovNh0+wk5xVtUnT0RaxCLTNxysCGuWPCiDdNLWp+twL+MaJI5ASfdEQD77H108S+v8kiVLYOmiCb1vmzEJF48ZhpX7DyI9LhaT+/Xucpa/rv4Nfm/vLnyVnwcfySt8A2nNFzKiMj7cGD8VsJJ90E7zGJW17eXFmJDWp13huHnkBMzpOxAltTUYkpwKOxWyiLg9K3DrwDdxXu9C5Lvl4RxiKy2piUeVS5SuyAObq1TGfFVmWHp78OORF0WqUEuFgELg5BDIC1/2Xy6va1RFU/sanRLabKkitq7R1V9xe2mjfWpTIaAQ6EIIlDlrcTTWjUCGTmPJ0nNQEhdHCuRysiKSYczX04tgih76I2Z446mNZelJoZxIdq4NCPh2Qm/sD71p4jGzrl0IItWVboxARnyspox1Ywg6XdcLq6rxv/WbsC7/MHaWkR4+yPlkWsPEpC/WMG9SAFabG+k9Kvjcku7pUO0XMrT2l95xCZASLVXVf0FV9R+pL9I9wW9la+2cF9MaToutJBQJrUdfI8rY9alzSMIh/AJKFAIdHAGZ/+34MrxREzmdgxYHmLZUEWP0R53I9IrcoEfdHrWiEFAIdDkEHlyyGAeZNNQb5MwwRyXi3eIKGGGPc8O7K5Z09kb4xjvgzyZZR20MZvfqjXTfnXCVbdSsZHKN3jAA1pSXGJMgox0lCgGFgEKgfRBYceAgbn99IQllAvD4Q1YjJuXQGBODzLWl4+SSpre4GfXKySc998mD7MOSz/GtQSFXwPZpedN3DQSqUVn1GA+GInnj9AxN8YfcDuWKtNhqZMRV4HBlMuPEQlYxbUzLbj6+fBW+KjiEm0aPx5x+4lGlRCHQMRHoAIyIJwLm/3jwRyw2liqWyMyHh+tPsbRIomzWJzxfLGJrWWS5guV+lltYlCgEFAJdFIGtpUW0hFEJqxNaxios8JHaWUeKZ5lZ1dEVRuIKrhg0DI+OY24lWsMgs7NgCdZqljFP9aN1NagVhYBCQCHQ1ggIm+udbyyC0+utU8KkDUI+JAqYVmRdXPfcRlQejq9rYrXLhR++tjBEUlS3t/1XfL79nOuqdwM3csTa2+BgL4Ikj7FSxbTgzulLMZxKZcBIV0sTqeuZ8DnIuXqJj1uRfwh3fboID331eft3RrVAIdA5Efgtmx3HIoMceWjIupQUFlHSWiQttYhJOvfGBGr0T1KiEFAIdDUEPs7bg9+s+hxHaqsbdo3TqTJz7Ivzw1gtb3Me5gte6I6vGDAKQZc8kxo/JjzwOd+BJeGXDetSWwoBhYBCoI0Q+Hj3Xqonmj2oyTuKAkYNJiRc97qihkZUXpZtOYhJTLLckcRozKZyKBPv9ZJg8CCWRR/3MypbdLvUTcDjxs/hp3UsJIzldTFDmD/UX4/Ogxe3bcJ1I8agb4JiSaxHUq11GASO/7PtME1kQ8QqdhnLNBZp8TIWSfXVIol62pzw/OU8Oq7RGWIZa7yv0SlqUyGgEOjoCCzLO4B/rFmD/eVl0FkDOOyUl3ZkVCKt18HErKJeBoEHqYQxXQ3s+7k3xs94CgOGJ2ViZFJPuEqir4nqdSjYImqHWlUIKAQUAm2HQEFllZZovEV3pGXJwthYP5UVHdfLmE8sQKIih0tYqTuO6PWJiIu9HVuPvI4Xdw/HmqJ+HALqkJMSwD1njsLUtME49/XncNjB53nUoznAZ7yuhju0vGIkKSHr5xGyQypFrON8tqolnQ6Bv7LF4uP7Urjld3A5h+Wu8PYJF80pYhIH1pNF/B/HskR+zmKCq3dG5oYShYBCoPMh8PiK5Xhm3Vo4mSRUJnICOi7J22xwhX7qARupkPscRmYyA9a5q7AyHjsLqXiNzkCvXjYMY0LQ6wZM0dwTjbar4HU8yXqirWIWGK0yUaREIaAQUAi0DwK9EhO0fFy0z4dIBDlnLY86HUPFxBoWpKueML+KiOuew2OBu8gIDy1jXqcJMfTE1hhi26f5De5a7nTigz17UEzlaWfFMCw+sIBJ0+tPKc0nfdvr72JS9jLsrmCScfanbuSmncf+khFSR28G0LXcq/ejn7KG1QOo1joWAqGfZcdq07GtmcldI1girX2O61uOPa3pPc0pYufyshtZerH8kSUiMmUupjglCgGFQCdFYF9ZGZ6mEubSlDB2gjEEfCvDXMo3N1d1LAP7FCAjpRIGuueIZCZUQV+rx4LY+bhk0mhtX+SPKfZbCHjXkjVRjOUSfhogc+IImOPuj5yilgqBTouA0+fFYrrtVnvcmJDRizTiaZ22L92t4Q6/G069aF7hnnMZZHiVcFjovYyfYiqOIIt2nMc8JOzwyHyScHpwW8dz4mMlc0/7ytLcXNy1aJE22pPvY0DaLI/aSL+keXx8Bxn3tno/J8+kydL+8HGNs8NPd/ICHQxuA/z99HhoxjnIiFFkSgKdEoXASSKwi9f1YckLX9+by83h9WYXzSliz7EGKZezvNFsbeoEhYBCoNMg8Nq2rfCF2cO0RpMWUV/NFzQTM5scoXxh+/PSkdGzvK5PopBl0Dq2ftVBXDK7oSKm05lhTf43lbEdWtEZB8BgHlV3rVpRCHRWBCrdLpzzr3/BcURicmg5Tgd+O+9cXDagMWtxZ+1h1223l8+4R75c1lBZke6KckKlRcuIGqBCI3oaJ5+CEgom+o2Hron8uP22ABz9gih3O7GSTINTesoYq+2lxuPB3e++G/ZeCN9f+tCEyG6dj7FgQTH5NTxB5wUs5UbNAng3vRmuHCIT+UoUAh0PATFShw3VHa9xDVuUws0dLKvDuydyKTPSC8PbF4WXTS6aU8Su5VUvsPRluY+lsURbyRofO9XteazgCRZ5VD7D8giLEoWAQqCVECiia4uPLjkR0TllejgAUw1f4PL2lsFJsRkbt2dj/MgDkdPoxqNDYoJ4KzctetNQ5g8Tfh8lCoGugcAPF38IR4Fbc2OTka2+MIgHP/sQ52YPRExUct2u0duu1YudpaWkrK9/zjXoHR9zQSpaPZNt6JOYjFXbC0lmUa+5aAQfQkzIZ54n4MeCt1/Bp1ffzDis5AbVtMXGElrDfNE+iCe6abi7OiqTQRM3wl2ShXg0hESHGLPlRLWoYwqB9keAv71OID87lTY2p4jFhCtvym59nCfbqTSn7lpRviT4TYLd8lnWsIhmuZ1FiUJAIdAKCIzK6IHFe/dqM6wSIyGxEoFG3jcSL1HmiIOfsQQGSwB+urxUb07F1VdOaIUWqCoUAl8PAS8Hw1/m52n022f2zIbV2Nwr7OvVf7yzdx8u1awk0ceNHj1JDqrRP1EmQ5V0VASMeuYK05z5jm2hmUQVPzhzJm4ZNx4OrwtTyx+H8zCVEz4L5Zogn3myHlFkZNDz7Pp1eHiODE3aViQ2LJL/TO4sk2XiMikulprIeFUaSCOY7LNZXHAz3UiAz/CglQf4X1+th5GKmJxGdntcMG6IrNXJpvxC/HbxF9hZVIIeTFj+3VlTMW/YoLrjsiLU93KrjhIz16BxakMh0D4ILOVts1kGsnzCIjPV8nKqZmlWmnuL/TNcg1T8VaPazmy03Zqbk1jZXpb94Upf5vJilm6jiJW4HCiorUBObCrizY1Gx2FQ1EIhcCoIXD50GB5b/iVf1BxsiMjsKV/azkyyhpXwhUsXHVcGX/R8a+9/ZiD6XLcfxftTcO2wbyArIzF0jfqrEGgjBGQQeuXC/2FvxVHtjpkxcVh46XWwt4FFqn9GMgoPVoUGuuH++hhT2YNtUNKxERiSyneoxYJa5hBrLKJQnNt/gLY71mTFpYOH47XgJrjLbAiSzCIY60d8jBM2JrE3eTORmOjAlsLDWlJoAxW8tpRU+7H8aHoSKwW8zA8mxBuiMMqjnM2yx7rQo1c1ivYlwlttQoBeDprwNIPRB1+GHn+96mIkx9g1opIPd+zBh9t345Ode+FhJfLsr3aW4Z5338ODVVW4ecoEbD9ajIdWfIZVhfmaIjarTw5+dsbZ6BOv3gVt+T3odvfid7YTyLfYxttYxFTen6UXyz9YZrM0K/Icaoms50njGp3Y1L5Gp5z05hW8ch7LreEaruNyMsvd4W1ZSKelICMjY/zLL4uu1vlFvnOigFVxdk4+HNlOtcQg3do+L3yHw4HY2KYMop0f647Ug/bCuaC6CmUuScDMGU6Z/JWVRuMLPccvRp7it/EoEzn3Tk1AnK1zurS0F84Ca3eT1sa6iiQZh6ortRl5wVISiWfFxiHJcnw32dbC3M/ZiN0lJaQxlxr5O+AUZlZCfIN7yzmlzhqYDUbub7vJs9bGubUw60j11Hg9OEAGQZlUCj/lNItOmp3v1piQ40+5p0azcPo5IQVJfMznoNFIdsFwkEpK0IajOieCjKUN+o3ol5ik5VBsq35KrNuuo5yECHWiwW3jYwxw+skJSbIlaZ8MHpJtVroemlDkrKK3gw8msiMaOelm1tv5/UzhdmgevthRg1KWSLXs+THvgJzkZORWldf99uTmMj6R3+CgpFSI1bE1RX2nWxPN+rpmzZq1jludxp3F0rNXsNd37qvvwAnW9j14X3v2bSObJgakVSzCMC+yhWWkttbMn9Av8fgnncFDU1mEHioajXhuc+78tIn8xhuL9giN2vkU16WQRrtX8KyzzpLVTi+Pb/sUz+/bBlfoja/1x+Yx4aHhF+L83i36TFsVgyVLlqCrYNuqwLRyZe2Bs7gjjnn2L6h0Mz+ODDbo0mKoMYRmgiWugKJ36pC2IaDNCk+7ZzWqy+Nw2YTH0CM2qwECQd9+BD30IDb0gc48mYOX1n0xN7jZKWy0B86n0NxOfWlrY/3e/l3425IPUEO2OBErFZ6fDp2FS4eNaROczuBg/kOyJlZw4uKTgr1YXbQVPeyxeHb2FRyMpuH2xW/jU+6XvEz/OvcyTKXrZFtIa+PcFm1uj3sIS+xTa9dgU9ERZMbF4eax4zE9O/QZ1fjcmP3x71BLTnvnAVp4hLgoxoPEQeWI6Bg3uEbiOesWKmFAeW4CrDoTVlx0D+xGc5t15+U338KyfbnQVQc4OSaxvPSBMpowd/gAvF95BFU+T2gyTdtvxIKRo/CTufNRwnjg299+h30voo5ZgV7mavzqvHMwrX82Rv7mSXijYs9EEfPLPFtkhMdXQdKhUhyNr62rO9Jhk96AG3um4cdTzorsOmYpRDfPb9uAcv5u5vUbRAZeHV7esVljHb1xxDhuH/uuUN/pY2Dstjs6CVkHB1E0JteL6FahQVT9vuOuNaeIyRNGzCFyXlxULVVcF6vV6ZJ8Vtw7qnIx8x2O2u6yq6/kroPLr0271vXR6ffiuX2r2kURq2uEWulyCEjwebU7/OzgI8Po4Iudm0GfzPgyXowJP4f0PYiBw0qR2ocU9nTFSk6rwqKih/Ct2L/X4eGv/hOCNU9zm8MCUcCMA2FIfpHKWNtZBeoao1a6LAIze/VDktUGP1k95V+s2Yzzcga1WX/FBVJYEp/fuR7rSwpIdBNAfk0V7lz6Dj655FZtUByyE7BJEb7wNmudulFzCPSnVed3c89t8rTlJXs0BUHPCShrdjVcebHa804UMrGORYuQFRmYXzEYNFIh34OLsodHHz6t638491xMfeQfWpxX5Dvm9HixaP1O7asXlFFTeBpbckO+sHkT7p40GY98sQybqYSJ0JERB11VuPvVRfjvDVc0sHJpJ/CPVBHd6yoHiWqonAUbOUJIzOanefuoiM2KXNpgKZbI8998HkWMpRRl74XtNBywcnEztjG+s9rrxnfHn9ngGrWhEGiAQPQXscGBDrWxlK35EYu4Z8xhuZNlEUuLpDlFTCqX8h+WPJa2Ek6ta0Fv/bgsYLma5RqWLi8uKl1NiQQSK1EItCYCQTKJyexkgEthCtPomuUtyRnfIHPN+OOCGD3wIOKsMtkTFk5PVXgKWY4g0dyDSlteWAkLnyMPTe8uBGpfgSHmhshVaqkQOGUERPF67/Ib8M7e7RA3wIv6D0Wy9di4mVO+UaMKcovL8KvXP0GV040fXDQThVS+oifLSpwO7YrfzZyHf21Zi5yEZEzN6tOoFrXZVgjk031VXFj7xichk66rLRF570Zc8wxxPtiHVcBfbeS80rGjQNnnoqJt8vlJ8BH1bGzJjU7xnLziChhrWAmf2xER0gxppc5HEg3O4daRd3CfKDzT//c0kvyMeau7gCucL3NTUft0136ScQzUlnX5JHlYyzcWOZ8Ximu6eZ8Btf3ov85bB8RjImwxM57A++Hzg/tR5qyts7jJ5F/IjsdYZN5/zREZ3ilRCLQKAgms5SkWUYBarAS1yp2BB1nPLSzijng7y/ssz7C0SJpTxCKV1HLlURaZ+ome5j47ckIrL8UkJPFgH7HIz/1Zlm0sXV7OTO+PJUW7G8xSmWn+n9dzWJfvu+pg2yLwrw0M86T1S16sYv6vf7WzHbKPClqIH6thu2SvgW45IkHvBp7In2jdW172uhCsfgg+x5+gj72PLj7XyU4lCoFTRiCBsVfXD28crnzK1R63gqPVtfjGoy/A7ebsBOX2v7+B390xX3MH83FQKa5ZCwaFXCOlbfdNmHbcurrigbzicixctY3EDh7MGtUfUwbTNbmdrIESQ3jXJ+9gdWEB4/QMVEJ8mNazL56cfUGzKQbGJfelck8lIyyiWwScRtQescOeweEPt0XELbGm0kqCDAPcZM3MssrYr+0kOZYTDw2etdH3pkLWSHGUZ7VYnRz0fNCLdiUPeXnWsx8S32U2GvDIxfPw5JLlWHngEOIsZqwtOYxa8coJ38cg3ec8sI8em8aqMBCsxpcopB5BxGl+jNHtqF8XhsWGLxZusg02uhWLXDloRP3JTawddJThH7u+wKayAgyIT8OdQ2ZicEJGE2eqXV0SAfn6hL+HLegfM5iHeCNacG5rnyIPj7fDpeTrVt5SRexFVvwKywUsd7DcwPK1b8Zrvo6IRimlW8mPRs/DpiX5qKWvt7gk2g1mZNoTcPNAZb7vVl+ENujsq0zo7Ocsqjge21M4a+mMYaA337/ysuaMZ1wPB8r9dsQE3VS8Qk9DccvpaR+COFOK1kKdoR8vOM6TMliNQPUvedgDQ6xMFilRCHQuBFbuzoMnrIRJy8l5gML8aiw8/3p8nr8PfeIScW6ftnOP7EjofbJxD3783w+ZFD6g5bdauHo7zhqZg99ef167KGN3MEZvLS0sYnVxh937vyw4gO9+9h6ePvfSE0LX056EaWmD8GXJbrjD8dl6umY782M5oWSGJcWJgEWHmuWp0JH+XYLkPVREnt60Fmf16n/CulvzoM7K5y/JkgrKOOaMKLx8/MpstamXGU6TV2N0lHuKEqaxKfJ5LrnEdE46JVLDFFImAx1vbCTyuHT0ME1p/f7s6XXN3LC/ADc+/Rq8PFFPg5+BKUu8DFARRl15NYiIwqez+jjJFsAmOkuNe+0PGJmYhR+On41hySFFSWIphyWncwxjgofWQ3HlFXfEH00+CzaTCf0SkjA+o2eowib+FtRU4PLPn9LGQuJOmesoxbKivfjv9BsxPCmriSvULoVAmyMgP4mfs4jhSNalyKzdn1l+xdIiaakiJqOuf7Hcy7I0qnBVSWsi0NOeiI/m3oP387cit7oUI5N6YnbWEDIdyaNWiUKg9RDwhwO0RccyxfiQOq4QR/KS+RThrGeKF6kxNSj3xcDoCiDdUsUZ1CAqvTbcnfXD+kaYRtFnhSRMnrXcR/+VJiRIyxiUItYEMmpXR0egoFTCoesHoLIeYzFhYGKqVmRbpKrWhS0HjqBfj2RkJcswvWuLlwPrn/9vMVxecV4JicQqLdmyH+v2FmDCwF6R3W2yzK0sx7qiw5oSFn1DN13zvsjP1dgQm0s18JuxV+ChLQuxuHAr37dG+FJ88Bb64ak2ayXYh+9gKmFasnvexFypx6H95dG3O23rQqx09ZsvY3VpPoLJVKL8BiS6zbDTgjV/zGDcds5kuPnkfvDTj/A5Ez+L4qIpYYbwJBmbbvAEYaoMwidGtWQfzp7QT2P+bNzogZmpMDNvJDWnukMBGSnKEJMiCp4/ixqahcoYT5OzKjhxvPTQQXyR9zxuGT4e723bjdLiWuY547vFrMfc0QNgthpw0YChOLtPf62e5v48tftLjQlS+iIif2Vy+vHtn+KZM6/T9qk/3QCB+q9hR+zsd9kosZJMZMkNNzCHy7+zfI/l8fC+Ey74M2qR8OekSSH/ns8ylqVtn7Ta7bvHnxiyMF3ZdxweGDkX5/UarpSw7vGxt3kvLxg0mN8tPgJIzOF0mlBKRkQXXXqCyYyXkBesm+u0gJV44rGtuhe2VPXGgZo0bKjYWtdWcUMyJD0FfcIv6/Ydu9K0gnbseWqPQqBjITAyu8cxDZo3dnCDfSt3HcT8X/wLP/j3u7jsN89hc668Jru27CooCQ+PG/ZTlLEvd0TGIw2Pnc6tA1TEtGdZEzeRdAIHKiuaONJwl4WWm4fHXI6PZn8fj49fgOem3YbMnPK6eCmxCLkyaB0KhyXq+GwsK63FXzes4HPy9I4Wf7/0C6wqoRImyhDb4Uz3ozTDhf/duwD3XzQDcXYLJM/YH+bMgzGGz24rzV6SWyysPOkdXGFmZ49YtiS+jGlIFhVvxk4yLTaWWKYm+fb8MzS3xcgxiT+r83yQeqmERdw15RwxzulodRMYnl25HlW7a2Em5CbGswUrAliydB8yaFZrqRImdW4pL9BiQWU9WnZVhkhHovep9S6MAL9T2sOmuWX7QHA9b7uAJTfq9vu5fi2LHGuRtFQRe5i1iTP0/SzfZ3mGRTRBJQoBhUAnReDOiZPQMz5Bi59wMoFpTQnf4E4DvMU2eItsKD6SSCKP8JucffRTYSs+Eq+5ikR3Wcd4Mb3tMr6JE6N3R61Hh5VG7VarCoEOjsDkQX2QEBf6/so4wGTSw2KqdyTZmncEd/3jLVS7PKhxe+H0+PD68s0dvFen3rx4u1VzSWxck8QcJfBYW4skFY6mYI++v8SK9eFzrqWSZI7BxNQcDI7PhLs8Q3PlC1nB+CzkiMlL/g8vuxikh4CH7ouPr1uGf2xa1dLqT+q8t7dsr1OqtArYDh9znL2xpn5STPYnklX026MnwxKOwdLO9bHZbh3cKQF40tjmZNqYxOJVaYSky2lKbj5nIi6aNhxiCZNXgJFxYqHBcGQ03NRVoX3W4vCwUtPOBLnQv/8u2YCnqLRqcWONLt9N5erF/avxyeGddVbN2GAs8bfAWWqDu8ICP3OkifSPS2t0tdpUCLQbAhIsX9rE3SV0KxRI38TBxrvCv5jGu4/Zfpd76JQM+dXPYhnP0jL7Mk9UohBQCHQ8BOKtVrz3zWsxisHPBnkx0wymC+ih93HJuIDaWgt2b++JqkoqaQ4LY2OSUV6SiCmp8vM/VvRxDxy7k3t0seI+3TVE3Dk/Wr8L6/fld40OqV6cEIHF+/eiNMtFKwgtDByJerwBfOvZN+quETIPIT2IiMVkgMPg0dzhTreVJHLP9lj2SUvEgMwUjXU1+v56WlvOnzg0elebrPdPTMaI1AySpzQc0ohL/5TMPkz8/fXdRauZX5GGNk2NaNAJ3sJHRlkfE9y7E7kk4dF/t69pcEprb/hq+f2L6EASgcIiXgvriwvqbiUKzq9XLqFSuJqnCqGSTmM7tPNR5YvnxVQadTYfglx62P4An/FfFe/H9oqmLbj9ezIihbfVsV49tTFLGeujZc1Al0xQuYs2Asq6pD0REebGuvg1bU/4D8mf/vjFV7jk03+gyhNigZbfyE/XL8JVS5/Bo1s+xoPr3sLcj55AUa0DK3KPMIE6zX9iBuQkoNEdhIXt+d7w2dG1qvUujgDnOzTCjuaW7QSD5wT3PdGxBpc1fGo1ONTsxn3NnqFOUAgoBDo0Al/sy8PWw0V8ObOZ8h6VYvZDl8RnCIOxHS4qYzt6Yc/2bLgq0vHC5VcjyZzUZJ/09m9AH/cQ66BlTRMblbAHSNRxR5Pnd8adL3+xET97cTHu+Ntb2HO4qYmwztgr1ebjIfDZgf1wUvkm+3dYgthfUhbZwPRh/XD3BVMxc3g/DMtOR22KHx849jK58zv47aqlded1xZUnb7sYI/r0gIVWMCF+SI6z44lvXYT0BPq/tYMIIceY9Ewt0XccUx1Iwu9Jmb3wl3MuPG5rRHk5nsJ8tKZWY8Vs6mKxFDkzaWmilUbn0qOoysWYblqtwuJ3L4Or/Ntwld0Cn/P9494jcn5zy9nZOVpMmihGdcIB6he1eRpRiux7dss65unaoKVWENp6SfFgjDWiTzaf1yY+0xkHLJqMjvT8mttihUkj9nhu78q6KqNXJKekMOeGrIFc5/2M1J9MtI6Zd1i09GqaAiZKGDEAmSQ1ZVFGlXKgkch8hd7owY4jFXhg5ULt6JfF+/Be/hatzUKSUsNYs2IqYcW1NWELZxAZ6eUYMrAAffuUIiv1KH605Y/YUXGwUe1qUyHQLgiM5l2rmijV3DeypS3i4+SkRYZsShQCCoFOjMC723aiNhAJAQ13hEqYMNJrL22+dOPjA/jxmPNwSc44GBvNODfuuj5mAZm0xGW6a0pTbjVds6eqV4JAj9hYbTDuSSSFDQemwg4+bER93JhYgG44e4JWHlrxGVZuOawxj8qA+dmt6/B/k2e2C4NgW3x6qfExeP6+q3GkvBo1dM3sl5EMwaO9RJJ9v3bRNdhfUYa8qgrkJCYhm7nEmpL9ZWX46aefYnV+vmY5Om/QIPx81iwk2uo0bmTGxzVp2JH6gkKCwf8RJSXo0eH36/+Ia4bOxWRbEbyOv/KsUGys37Oc1pwvYEl8pKmmtGjfffOnY9Wf85Hrp2OSPJs1IWkGmQidPjIlegL458vLYa6h22GSDq5MrXEae2R8Zgx0ZaV1fdFY+unWCLdRU6CWMin1vaWLMLVXX1w4eAjsZDQU+Xjj7rr+aTui/hhr9DjXNxzvFu2BsVqv5RXzpLJO4uJi/Jq1qK6RoauoxVmSXUgbWoz9OzLx0YaDuLt2EeLT2P5GuVO94oKojUyDSEhyIDmxpq7tUlmQ1KX3bfwj3p/5eJf9bUVBrVY7NgKNvugn11iZuzhZ4S9diUJAIdCZEYgl65bwgDUQuoHIy1pmQAf0LUZvzki+eOQl/Cf3nQandceNa2aOxc8XzMFf77gEA7NSuyME3arPt44dz6TAsRpToqG3Kysv6wAAQABJREFUEZZ+Vvzs7NlNYhBrsjSYqLCQ+KY7SI+kOPSnm2J7KmHROOfQTXFWn5zjKmFlTieueOklrDp0SItXktiy93fvxoJXX20Qv2Qh1frd06Zo9O7R9YvbX4C07RGRbQmk+nT9YHz/nR3IL32Gh6IIioK1tIq9STe73MglX3uZHh+LDx64CXOzcnitDL3oemjQYWbvfogzW3DPk2/DX+pnLJcetkI9FaHQM13OFBfFgbFpmpFKM1TxULLXRZdaHmQ3qipcWLRzN37++SeY9q+nsas0bOln/XJ9tEhfxSXSQKPaxl2HYaoQV3a6Kzrpulgiw0kd/DSIuhmHJi8QnZ4WOBZzgpusvAyb4UxGQpqDSx3e27UbZZXuBu8fk95Pq+Yh1sI7mwJIpSKmb5QbTSxrOlLrP7TpzeimqXWFQKdFoDmLmJjXGv8WpbPyE66fOpI9ShQCCoFOh8CCcaOwiFYxnzj9R6TMolnDevUpgpEznMwAQxeWIBYdXoL8miMYnZyDmemTkWw+HjlHpKKutzTQIjh/wpCu1zHVoyYRiGeS5g+vuQFL8w7QBcyPaX2yNUKEpk6+eeR4vLt/Jw47mD+PI97HZs5XM/ZNAdXO+17ZvBkuH2OlotrhozJWUFWFFQcP4szs7Lojt06eoMUrPbdmA/dRAaMlSZSwoBiNDFQ2GGclIUzi3uj3GFBWHYOHVp+Fv8x4u64ObUVnZEzWJrrm9Wu4/2tsWUkS89Rll6G4xoH3c3dBEohf2H+odu/tTJ0gE2ciEs9lqqBVrEdQy9t1ISnjrx4yCue/8hgp5HMxNX0vpqTnotZrxmZvL9TACpfHhFW7BuBQSRpuevtNfHnLt3AtJ53W7so/ZgQYIGuilfT5XrIkIhwFo9FxRBwriIePsXNxfSqRYHPCaGXic3GLpIgiaBRrnAj1tvIiH8wxBs01UXaNSTuENKsDQfH+5XGjQQLimpYPC9bhZ2S5VNLFEQh/r7tyL5tTxMgPpEQhoBDoqgiMzOqB35w/Fz/76FNUSvbOiNDVxmbnm1WmXDgAMXJW06DzY3P1FmxzbMNr+R/gJ8PuxrD4AZEr1DKMwOqNB3C0vAbnTBtClr3uYRXpyh++JJ+dN2Bgs12UgfFHV9yEQ9WVSKabnGwr6XgIbC0upsvesQN8sYztOXq0gSImrb9jyiQ8n7ueebnIGcEkznXCn7YkNtY0OlFCZNWjx7qDmaihYuOg1SfPZ6NSDvSm0pKj71l36amspMfE4sYR4xtUkZWSgMOllVpTNG4LWqXsRhNGp/fAFYNG8FwfHj7zJXj9ZXVuflYmfz4zuAefOYbBbg1i+oid+GitlQygJqygtXBE/wwMHJ+I3RvKwUe/1n8k6JBRY8e86YNRFFeL97/cpSmC8p7wx9SPmCU+b17vCdiGJfBKFvSw+EkEVV7MYaUYzGjVqgm6aOUbgY/yt7EeH3rEOHCgJAV95b3D4mKutBibu67NUo1Yy8Ry5qM7pjBiSnoCJV0UAX6lIhMMXbSHWrfUN7grf7qqbwqBFiBw4Ygh6JkShwVvvcIBSgC2NOaAifXiaLWdzIlWmIJ+ZGcdBSd1NeGcMNwBD/6y53n8ddwv1ax/FMZLV+7Br/70njaIWLclDz+5Z37UUbXa0RGoqHHiB8+/h7X7CjQSilvPmYRbZk/UvuPL8w5iZ0kJeick4JwB/Zv83ksMZb+EpuOSOnrfu0v7hqSm4vP9+49RxoRxMSc5+RgYUmx2XDV4FPRHjjY8Vq93aEyzBvFG5D43LU1/2TkGo/rn0ptA3PVo+eEcV1n5u5jSY6K23dp/nrz3Ejzw93dRWFaFvn1TMGFGNsZnZuHMntna97TCuYyKDmN/tYm1hndPN1bhiC+R3oIBDO2Tj3W7krCptAD3bX4JzgQvvFMD7J8BZosBb825Ff3iQi7ZNV4PWRsPI7+wCgEzLYJhRUwUwGULboPgtvhIOv6293WyjZI8hP8OMD7Mz7pCzdBjt6sEu3MZVykDbm1nENuPZGJ62N+qqCIO/awRRSxI5dIDG/0i5fyBPUrx/U0P4kdDv48etvq4zYa9U1sKgY6PgFLEOv5npFqoEDjtCOxlgLuB7GcyVWkixXGACT9LKiT3jjidBKmUuZBBf/1oOeqpQJXPgQSTMpxHcDl4uIz51gJajqXcQ40GbpGT1LJDIiDuZXc+/RZ25pdobHK1nHF/6pNVSIyxYh+/6y9s2KTtF2VrRr+++PNFFzSpjHXIzrVDoyTVgzCLWsmo2De94yinC0aNwjPr1jVQxOQzTWcs4LQot8RoyH5+xtl47cP3YdTp4ZMAWioCIiZqD0LbTsI/bZ88LW1mNwZlH6pTwuQ8sb/lORajv/MapNkaJgSX402JfB/FndFPog+ZBTNa59K1cUBTp6Jvj2S8+svrmzwmO/0BuvtFGh11VpDmsxJXDCqDVj7HXbBZGLPFPq2s2g0H48jE/icTcEGjj16IPjy69WP87YwQGVOMyYzF192MNYX5eH7bBo0gRVgq7xk3VVPC5DZze0zGjLSxWJm3Gz989V0EbaJthbVBYuirMkJHzwtmGWDr9DjqjKGVq96LwOUx40BRKnokVSLR7iQTpk9T2ERp0/OKCm85frvzD3h8zO/Z7pDSK/dV0oUQCP/WulCPjumKUsSOgUTtUAh0PwQyY+OogFHZSqiEwSS09fVuVfIclBdiY5HXqVXPeDIldQhcNGcUVqzdj9JyB7536+y6/Wql4yNQVevGroKSOjpwabGLCZpfXrUJOxi04vaF3NmEGvyL3ANYk1+ASb17dfyOtUMLV+8+hAf+8x6tQ5zUoW9er9QECN19T7rQtbekxsTg1auvxo8WL8aWoiJNmZ7Vrx8enjNHU0Kaap/Ehvawx2LLDd/Bhwf2YMXhg4y/MuGC/kMQZ7Dg518sxoY9zMdFxr/0xAoqawZmcxU1pl4kDu353F/jniHP0LXu2Odp/ZlUSoJOUt/fREVsIzfEZZwU+WX/wJsV52KHqz8GxWdhQfZ0ZNpapuDGW89gnZEgrvo7ySTbZkcvOPxWZFirUFScjBhS/+fWljRqvShKwLqjB+sv5pqJGtRUWt2kHE+sBjPOyhmBs8ccxis7tzY8TV4iJIcStkWRtYXZVIiFDbxenHRPzD2ShvF98/j51O+XNVHIyj1V2FW9G0PjVexuQ3TUVmdBQClineWTUu1UCJxGBGJivEjJKmXgeWjwYDTWoKbGSusOZxkZ/J0S19AaZuQ06fS0ibDwJaukHoGEOBv+9pvQjHH9XrXWGRAQ1j9xeWos/PrDzHwObs2uETqq4wiw2NHwN9H4uu66XcYk1/c89TacVGIjsv9IGb79tzfxzk9u7BBWxMF0T3zjmmvg8tIaQyXL3EKGSxutQJcOHK6VSN9kaUhj7FWwBmBurnJaAI2MYTpGGOxSRsV0W9UWjElsGOPV+FxP5U8R8KznblHCgBKvBbfvmU2qd+p6OIBtlQfx0pdb4cqPQUqMHU9ffykGZqRq5zb1x2RIQe+kHyG37CEelmc8XQmpLC6pGIwqv1275IgrASW1zBV5yWX48ZY3UepmfxpJqoXBZycp03Ky8eaebYxTa/QjCythUm2tz4waP5lQZGTa6DTzcYg7xPK6u7JAKWIn+bl0+MsafQ86fHtPooHKlnsSoKlLFAJdDYFXDy2tU8KkbzIuyUp1IMlqgb3CjMM70uF0mDVa+wBHpjn2gbgt5+quBoPqTzdGIM5mwRmD+8CsueiGgBCmuhunjdeS40ZDI4O/4RkZ0bvUehiBxRt2H6PQCotkcWWN5vbZkYCykoilpUrYidrtEvIPGU0l+1DONAY7jmbSgkrl3c9cXRxI+jihVVidiJX5vckQ6DpRVXzGVpPuXhIeh5QwOfml4mFUUIxUwkJue+5aHRyHrJoLdFGVA79Y+OkJ65R8Y88c9ODVkrFYWjEQyyoH4enC6fiyqp6EJkA3xQvHZGFoWhruGDyDboBCDVkvNm7fPnh6/Y6vuTZ/4CCM7GWnIk5FkORPGr19MuO/iJtgVFfEdVGwlO67SY3v0MN4WI9Kh+2Y75U0Qaxi7+4+LKua6/BRlySD5ufxNaXAQctaWclJXfs1b6VObyEC/Gg1sg4h7GiutLDKDnmasoh1yI9FNUoh0LYISDxCY/GRpKOciVpNaZw95Utx7+5MGOPdiE3R4ebsGXRLafiibny92lYIdDYE/nDDBfjtm5/hyx0HtNimu+edgfnjhyItNRZ3vL2QbnYBTSm7f8aZyE7qfukbWvJ5OvjM8DbBSmigxdHhqlcuWlJXZznnvJ6jsa38i1BzyaT40j4SvOSJdsE8PwYvxlvyseJoDhDvx/t788hMWIvhycOQZWvIpOhjsNmG0g+wv7gfrUM6jE04goEx5dhSk057bEgJ027S6HEtEwPR4qUb7cpdB2mV9GLyoD5Y49iCrZVbmXLEhM/Kh9IKGH12aF0UmgTmlRSZ23Moyjw1+NO2z1DrF4IME+4achYu6jMqdPJJ/BUr8s9mzcQvNj7N/gdxpDYOHiYl83nD/ZIGCGRUCHUcdVtT3JiRMQAbtxxhvFoABwrSkBxfo+UVk1NF/HRrLK2Kxb7CI3g7dyseWv8xanx8Z9Fl8q7hU3H70DOatcDmVZXjrk8WYU95qZYH0EDN8KdnzMKVg0eGbqL+KgROMwJKETvNAKvqFQKdAYEres/Apop9ZEMMxRHIe72SOXFE6E0De7wXxlS+kBkXkWqNxexM5Y/fGT5X1cavh4CNA9VfXX3uMRedkd0Hq+68HT/+4mMszN2J3236Aq8c2IJXLrha0dQ3QuvMoX3x9EeraJ2od02UUyQX4cjszEZnd43Nb/Sdgn/uXIUqMgkyjTGDp0SnoEJBpcxr0GNjMBMZvSs0N+9Fyw7gzdIjSO31Je64YAB8OhdSzekYlzgNl773T+YzM5C8YgaMJnIuMlfZQ4O/QC9TFXYUpMNdboWOVPgxmTUwZ9bCWWKDJc6A0vhqXL3oBdw2ZgRKKw7hd//Zp7kAihVByDcGnxGEIzOAHLMznCA5rMlEwW+gojcxeWjdnqv7TcA3+o5HNUk7JFm5KCinKqMSh+D2IRfj2dzXEBNTghrGfx0sSoEnQCbFsDuiwegXBDVLVw97HAK9DsJHRkZPhRUbdmUjp2cJ4mOZJJukHgVFSSjIT4EuoMcP3v8Q/ji+v4iPhxaxP2/9iu+qGFyRM/q4zXaT/v6Kd/5HkhAnHTaDdQQuP/vqE6QzJlASZitpZwQaTTq0c2tOy+2VInZaYFWVKgQ6FwITUwbjvkFX4RebXtHcPyodpK53mmGo0kFfaWLwOWdKyZyV0SMJ//7GAtIIK5KOzvUJq9aeKgIrjhzCBwd3My+Sn4NcYF/FUTy88nM8OvO8U626S10/tHc6Lps6Am+u2Ao348QMVESE7OIX18yhlbFrDjkSzDa8Nft23PnVa9heXqIpEZEP1W71It7qCu1jPFT8qHKUfZWOo4fj8PLqdcgaQEuMzoS/bFyJ/IoQ+YY/QKoPYabg9+zBZXOQkGvUaN/FZdxNQ2wFFTBPMpWODObSotthdXkl9pVWYVXuEcTk6aHnIWFwFBG30B1fBZE2n/FXsSTlsFSh2B0fUhgjjeRWpi0FZ6RKzrF6ESVO+taacm6PGTgnYxre2rMJD37xKclxQiNtYXXUW0V5Z6wmvyay98PDO7Rb68xUzrijptqGLXv61DeH/dRLHDNFvDYgpFIpzDJNZczp9+KfO1aeUBH7KHcPLY9eTQnTKgn/cTLh9xPrvlKKWDQoav20IXDqUxynrWmqYoWAQqAtEciJ6YOKo+koLEnky52++WRP1NXyzW/gi5ELn9eE/J21ePK95W3ZLHUvhUCHQGA/UzwI811EJAHwrrLSyKZaRiHww8tn4am7LscNs8fjW3Mn440Hr8O8cYOjzuh6q71jkvDTMfM0ivtQ72gTo/ZgYDxU9KS+sN8b7JIixICaKhIiMea2xCnxX5KILCQGC5+9fOZKsR7mRJiWe4uqFV3xTIwPE74Ngzs8fBN9i+fxkQ2dlxNn1GUiSlikPrm/91Ac9jpSkWKuQaa1kmFYzA+mlSAmJg3G42Pv5aOeFbWBiHXtikFj8eqF12B4SjrvGEIo4DIyPs6kKa3ifigU+tIXvSVAt3gSqwjDIpUviR+jIbFOCZOE03q6cgqxFMo0c6TWi0r3iePx9leWoYaELU1JbiUtmEraFwF+LUQBb0lp34ae2t275vTUqWGirlYIdEsEesbE06XDSxp7r5bfRU+3Ghg5T+mRJ6H8p5sNc2R9uHE3fvmNud0SI9Xp7ovAyNQeHFTLYC+EgcVgxIQePbsvIM30fHS/LEjpTjIpvQ/uGzkTv9/0uRbrZDb7qLzr5fFZL9zwOUx0O/QjPt2B1YezSRDBrFjUpJJTqrRBp+gUDleIzVCUrwaKVf1cQH2dXNPRjdFg5bM62FTsrg6xgQQqibXYXp2FeHo3pDNn2Oz0mbis9yV0PQzdq0GFbbAxLiML7152Pc5Y9AeUOcVqSOWV7xwNMC4S2a6fjJqPH214B5bkANyxZHLkhKC30oxAWG+l1yYVVBbiIsVn14U8ONI8SGuG5VGSr0sCarGKNZa+CSoGtDEm7bLN70FXF6WIdfVPWPVPIdBCBCT+y8yXvn6dHTayVYkEzfSbT/fBb+TLLT4AS5WZMQYNhhUtrF2dphDo3AhIstr7J0zH71Yv1Qg7pvXsgwcmTu/cnVKtb3UEYmGHkXkYPXoPGRLNcNWY4Waaj+QUB/R0E3RsT9Rix3oMLoY/wQ9PuYHugyHrlo+TX6lxNahy0vWbhBYi7iwvbLl0uROhsuYXT0E+ggN8Nmsr/Guye2AlkZJmWCqIQ9DBmCtNm+FBijyx7592AawZTqw+up3uhnFMtjwJPW1p2vH2/CMkHveMOAt/YLJocSeMiJkKZW2ZBfcu/pBxblZcNGQg+ibHYRoJPD7dfgB/X7OafPfsm1jDwpZAAcBcSW8Opw4uBuptc5Rh2L8fwwW89ucT5yHGGMaRNxGCqjOzsjVFzEVXxGi7pc1o1BJTR9qilgqB04mAUsROJ7qqboVAJ0JAXkyGHXyBu6JmYOnZYSk0oHoEX5B8m+s5cXrJ+OHH7ZXfvQzuqt8i6M/lDG0OLPH/B4Nl2nHPVwcUAp0JgVtHTsDNI4TOPqAxs0W3XZIXSyyUkTFRSrovAs9tW89E1mK2qh9eOakUFNRa0advMRLGlGvgBOhDWMO8WfXCGCm6MXrohmiiVSg10aG5LQ5Ky8YWUy0VNmocVMQCvMQXF0BKrF0jmRBLmChhdVwaY2g1Wh+LYK18D0OTZr2zEjBjSH/tuzk19eSZD+vb2rprC0gMIu+fv+5cikqvUyMGMdXG46jLGXZaBBbt3IuPr7wJveMTETuMudPWrSNrosSORdwxpU2h/hroxWEi6YnPF6QiHMCr63bijV3bsfSKO9ArNhGL9u7Ar1Z8TiucU0tfkGK1kZTEo/1+JS7uJ1POwqw+Oa3bSVXbySHQDSxi6o1xcl8NdZVCoMshsONQMWcWxY0m9DKTDmoqGa1kxurQo6K6pwcvVG3EsOf+hAeWv4FCZ32MjN/9JVxltyDo28YBQy2XTDhadhODqFd0OaxUh7ovAjJQE3rsiMgA8rt/fAszbnkC02/5E95etiVySC27IQLHizkKQVH/bGXoLcrL4hGoYq4x0dto1YnXU6GSgBiqH0IxH0tyk2uGjEfQrocnnWyAJOfwJQVgNuvp0ncDLu03HLao76J2D2sAukwXjDbGmcX6MGt6Nl6/77oOPUEgVrFv9p+Er+Z/H+su/D8MScjA0dp6JUz6ZeIEx85wTGb/5GTcNXlymNSjqZE631yMF9MxXi5gJZqWIHycYLzso2exvCAPP1j6AUrLaqErI6alPpSWOzE2JROvXbQA6667C1cN6XjKqvbZqj9dEgGliHXJj1V1SiHw9RGodXuPn9xUBgqUgDlAtxkPaYe9eHX7Pty04vd4Pvd97Zi76vdcCnVVtNA9p/ye6B1qXSHQpRB4efEGrFq7XyNJkHjK3z31MS0iwv6mpDsicG7fgVTUjx1amUh+pMXdhkFxFNtRsT8R7pIYBLbHwb8zDkXrMxkcRpZJBjtZ9EbMyhiO+T3H4aL+Q5hgOWRhk7oHJaeiR0wsHp87H8/Mu5xKSv3EAIoZI5Zrh5+kFwYXkzBPPhOSlqEziChkkUTSabZQ+pRIu4UoJ5vWsIjcPWUK/n3N5bDaiAuVV/kXLX5aDf0ptJhR4RWrGaiQFVe68Pj6L+EpCyBuvx72Qj1imCxamCZX5ubjzd3bjv8OjK5crbcZAjIv0ZLSZg06DTc69mlxGm6iqlQIKAQ6PgKDeqfC00QiVnm/+WJDmpg2WcuuaPO63FVNX/zX8z/HfsdhWsD2NN3JYDFdFQ83fUztVQh0cgQ27syv64H8LoL+IKprTszWVneBWulyCHxn7BnIYA4qibkVMVNx0lGxSk0nq0RYaERF2b5kBCz0NqAnoSgK2j8+U4/uSMPdg8/BknN+hV+MukqjnxdXWC9zY4k1VpI3p1jtdWx/U7P64aExl2iKWwzTihg9Rs21T24lhDJHqxlI1Qnl73MuJomIGXFmM/thwLfHTNYU0OiuTOubjfX/9x08dv0F2ktJU8f4kvIkBoktXe0rjNBXM0Mal4ZKKqu0qu1zFGrKl4a54B52bbRRKXt2/Xo0x7QYfX+1rhBoDQTqnZhbozZVh0JAIdBpEdhdVQp/f8aB7ZEpKHaDCxFnNmf3xY+G2yEqZk0N0475ScHsD/qwrnwn5pv4ogtfox2M+uN3fQZjzLVRe9SqQqBrIHDGyL74YjUnIcLffTN/BykJDWfzu0ZPVS9agkAS440WX3ET3tq7HasL85GTmIzEODdezF9MZSr8NaGyFBSCDmH7k+9N5JFKRSvoNmBB9ty6W/125VIs3LtTI4iJ7Fxx+BDu+mQhnpt/hbbr/F6jMT19EF3FKxE71YY7//wWDpZWYFjvDEwZ3CdyWadajicj6VfX3I69zNcnyZV7xyU02X4hjzp35CB8NGoAPl67F944ennGUWll6hVRbjURjIXun6Xc7UdcOPdYpEI5z+AlY2ONDquI7dx+AyOH1LK9ETjOmKK9m9Wa91eKWGuiqepSCHRiBCQvEpJp5RrphbGSs7h8AHoTySUlTwnOGppNZO+yuFDlrXeDsdncpEQ2kGY4FnrjYAS8a5tGQJ/S9H61VyHQyRG4eNZI7D5QjPe/2Ia4GAse/f6ltIBERtadvHOq+SeFgJ2WnG8OHaOVSAXTM4dgUcFKlLorMSwmB7/euAF+ZgaXOCbtGStfGT5zGSZWJy7Sqv9vxyayLzZ0dfXQOray8BDyqyvRK6ygxDPxshSRt398A5weL+yWaDKQumo7zUqCxYrxGT1b1N5HFszH9pLnkF9QATcTOmte8tGDeK5LKgAhO9H0s6hjshrkZKOOpsr1BQVKEWsR4m1wkvbBtMF92vkWShFr5w9A3V4h0FEQGJmSobmyOEweeFNDU7fGcrLAscQk16LHmBJt+rY2vgZFpQmoZT6XI6XxSO7txrS00TD77ic5xzXsTtQbTuuchflt5nSUbqp2KARaFQFRuh64+RyttGrFqrIuhUBObCbuHXxpXZ9WD67GRzv30HIT0JSDIOe3dCSnTbGQuj4swhp4PJ3eTHe9g1UVdYpY5BpZyneysyth0f1pybqZlPMffO9mfHRgD+746K1jX0NUdIOSo4z/a3sEYT/CWrVXFQ/wv1vCz7h8JX8JBu1NwWUDRrbktuochcApI6BixE4ZQlWBQqBrIBBrsuDVeddo8QfygjIXGGAuon893TlShpRxVtZIBkQDKj7NhGl1HEzMf+OqNWGKZRZsBipblqkwxn6HYEQ/VmywpbzGgYGa8+ka3xLVC4WAQqA1EHj4wjmYlpMNfQwnu2gFMzKUyw4Dnro95G4o90i12akbUDtoQjy0kvVLoAuDkjoERAEVspQ+SYy/i/XzNcZ/dO2QfwEbvTvEUkbxknnSmcFlrLgx0v0+nURUPCZHyUOFR7a+0XS8tHa1+tOWCIhnTktKW7apte+lRketjaiqTyHQiRHon5CCtd+4G/2felSjrJdA5oAlgAPVSVqvTKXMcUOKe81VsdgAbwqP7SkGJoY6bYm7H+aYm+H3rKbylQy9eYI2O9uJIVFNVwgoBBQCrY5ADEkonlpwKQ5XVmFP0VG6FVowNCsdVlLWR0TINm4cMQ7/3roOTiYdjoiQV8zs3Q+ZsdQilDRAQJSxv86+CFe99xJqTEwHwHeYWBuZTZusU1RquZ1iJmOl1QXm29YSQmsV8LXmi/OR5j4GFnMVtpQeabFbZIMGqA2FwNdEIHrq+mteqk5XCCgEuiIC8iIzC+lbaPIQgUQfAszJEmBwuTuOs4bi3sEpKkMfus0wwHl6Um8NBsmnVFThgMsXC6P1XKw92gMXvf4CZrzwNH669GPurx9IdEXcVJ8UAgoBhcDXRSArIR4zB/XD2L5ZDZSwSD33T5iGa4eN0ejrYxh7Ji6J5/UbhCfOJlOgkiYRGJmaga+uuh1XDBqlxX6J8oVAaALRwPfbn86ZD38M6e1j/PAlsiSzJJE5xcjz+G4rq4yFK1jdZN1qZxsjIOOQlpQ2blZr3q5+6qU1a1V1KQQUAp0aAbvJCOa/1B6AQc4ikk9Kc5AJkpG5aoRHcz5Mi6mCfqUBORdmwMO8Sbf+5XVoSaH5orv36un41YbPMbvHdtw0Yj1ijH4s2TYC5wy4ADpDOvSWabSUde5A8k79AavGKwQUAk0iUFXrwt5CMvUlxqJXStNMfU1eeJp2Gkh//+Mps/C98WfisKMaafYYCImFkhMjIOyVj511Hq4cNAIPrfwMhTXVGJOWhUemz8Fnh3JDF4cyDDSqiC8+6mzpjfKYNTpJbbYRAuKW2NVFKWJd/RNW/VMInAQC100ei6cPbqACFqQ/PZ+EHlIB6xlULrS/tIyJ9325KwHeLBu++eoi/GHCPOwuKIHL7NX88B9ZuhS/nvk2ZmTt5QwjYIMR/WK+gq9qBRUwofbWwZT0DypkZ5xE69QlCoGOh4CbMTubi48w9xFdzFLTOl4DVYtOiEAgEMRjby/Fq19uhtlIt2t/ACP6ZOCPt16IxJgQG+EJKzjNB4WJcUCSYp/9ujBPyeqN9y67ocFlPUiHz1fYcYQH+K7rE8Pk2koUAm2AgHJNbAOQ1S0UAp0NgfNzRmoBzX4JcJb0YFTAAl7O24h7R/gN5vYZYUx2020xiEqvB54YH2pHOeEc6kJmz/w6JYw0HlTDJKmpWNX4kgs6WKrhLf8W6yUToxKFQCdHwOHxYP4rz+PGd9/EZW++iJ9/8Wkn71H3a/4LS9bj9eVb4PH54XB5SNpAxfpAIb73zKLuB0YX7/H0Xn21RNuhbh6rkRnpoijvKyUdAAH5eFpSOkBTT7YJShE7WeSauK6ovBovLt2A177aDIczKhlIE+eqXQqBjozAH9d8CT/5OTxppLGPiDwMJcxLnhpSxIMjzgNzDyceOvABKkhjryV4ZpLno3qJguY5FDMVsSZfakE//LWvhk5SfxUCnRiB13Zu1XI61XBCQkgVXt6xGYeqKjtxj7pf05/7bC1cnoZxrGIV23awCIdKKrofIF24x+Lu+cCUGYCJcWHaeypqpG+mv4cxgG9+/gJ8kltTiULgNCOgXBNbAWAhKdh1+HPc/MQmzqYxQzsT3D778Rq89sNrEWurzwkit1rDZIH/2bgeJTU1mJ3TH9eMHIW4qLwhrdAcVYVC4JQRWJqfC9rC+KJiVfKi4qquVo9gXOTFFbqF1yO2LrovOnliOEeLnO8OhJzvw7rYcdrjRtC35zjH1G6FQOdBQNwSA3wPRETP2XTZp6TzIFDucDbZWJNBr5EQNXlQ7ey0CKw6fIgThGy+hYpXRA/jto66V8Cpw/ayYqwoOoDpmTmdto+dvuGRz6XTd+TEHVAWsRPj0+zRQNCD3JJv4JnFL6DWFYDXF4SLLg1l1bVYvGF3g+ufWL4C1/zvVXy4dQ/W5R7GHz77Emf94xlsKihscJ7aUAi0NwIykAwJl9S9tCI76seamoIWYOyYT5Qw0b784Wt4zgWZ27WXW/TpcnlDsUBnHNRwl9pSCHRCBC4ZOBR2uu7G5RkQ4zBgbEYW+ieqHE+d6aPsl9H05yWuiv0zVWxWZ/osW9JWGwmptPcWX1Ly5tLzj445M+Hmfrriu0iaWOvztKQqdc5pQkA+l5aW09SENqlWKWKnCHOZgwqYZ6OmgAU1f61QhYGgFz66NURkS+ER/HnZSi2eRi9WM/6+de4gqqvcWPCfV/Dz9z7hwPXEw9ZIXWqpEDjdCMwjPbLQ/GpC6l+TibP7koMlvEvbz6930MNHiEZrxO+uUARzJjG2IIjta/vi4XcuQEF5AjxC7dHUd5uWY4P9G6e7K1+7fnEr/ut7y/E6XYybbPfXrlFd0NUR6MF8TnPj+0NPo0pmTQyev/CKpt1xuzoQnbh/9108AxZtcF7fCcnpddkZI5AU2/5kHfWtUmutgcC1w8aGJhjlvcUXmzHgR3JsLbLSyhEf69TGahPSerfGrVQdCoETIqBcE08IT/MHK2rf5mDNidljNmH5jqF0TaSrFgemJqMXZ48eUFfBw58tpTGBJODMxySm7xAZeOiwn2QH72zZgcl9e2P+8MF116gVhUB7IfDAxBlYSorfKvf/t3ceAHIUV/r/Js/O5rySVjlLBKFAEiIHEQQGZILBBOsAcw7nbGP+Tuc7Z84BsDHY2CbnnEWQSQKBEMoB5bgrbU6TZ/7v9e5Is0kaSTM76Xvw1NWpuurXvdP9ql698iJgFRdCS0gmvpQXVoulyz1RnmU1ruQXxGkVI80UQDBkQWGHD/7afGwODcIWUyU+2TIcd9/4D5SL+0eu2GxBefLtZpdUywZbyV8llH1ZsqrY73V//eQCvLR4DaziklSan4vTjhrd77HcQQIRAjefczz8viAun3U0rDIGhZJeBGZOGoHfz5uD2559G5tqGiRSohPXnD4N154+Pb0qwtLGROC4QUMxxFqEHcEmuJxeTKiuMRoTzTLxs86baQ4WothBAzwmmIk8KAv6J2iIHeYDZDEVGDmMrNqNH33hEby19EgJfRvAnOP2oKzgB3tz31DfYBhfpu5DbPbud4s7432LltAQ20uEiWQSqMzNw6tzr8e9yxfjmS1LUScvq0BOUFr8JfBGi5SsWMLUSzRF7TTzSjAOdVl0tJjgW69/D9rCKJt0AuigFY9uPRKDB9fDJcZapSWIM6t/B5P9eDk3NX9+1C2zswad9TMqw39I4AAERlWV4g83XHiAo7g7lQmoMaZKyXwCD256H56iPfI+s2BwSRM2b65As0zkbJKG9IqKJgyqasKypjWYUjwp82GwhkklkJpfQklFcnAXL8ufh3bfh0av2MjK3Rh59hvyh5yDqsL/1y2j8rxcNLrd4rwY+cTrtttYaWjve7Bw7yO5hQQST6A0x4XvHjsLX592Ak588bfwis98KFd6xlwBmVMsYm5pOeSZlkAdxW4/2tF9otGgtCy6XB6jsB1hK0KOU4zJnBNf+kO/wvcuPRWDSgpQKRO6nnLEqEPPiGeSAAmQAAmkHAF30Ifb174OX1BaxqVFcfeuYrS15hiNh1rYml0lCEqj49/Mb+D2EybKN13/320pV7kMK5Ax8iE5ddKX/62iOqv73EQWgf4Th0G3Q0IV/2hhO65+4yb8x4LrsbBmsnySOlCaew1K867plvNV046SP2bt+VYHxb77WtU1kUICqUbAYbHitzMuQX6JGmDy7Pbzy+gokYaEyFgwYxnGjBlrUFjYblTJaSnCGYN/nGrV61WeXKcdXz73eFwsY0P4Au6FhxtIgARIIK0JrG+plTHQZgQDEvO3wyJj9V17jTCtmDjeo253ET5c3oLXt2xI67qy8N0I3Ctru0VXdNsKzJb1taLrRSOubBslPU804UJD7DAQ3zz/Wby0aR2aJOhGrbsA/7fsHDTgRQwq/lGvD7jLjzgSBSU26AS5KtHGWKdpFsb5RzOC3GHcDp6aQAKnDxqP586+EddMn4Rcm0RJjBhcxjXltSXGWemkRuRXt8gYyBDsdh9yj2xE/Z487FxRhk0rTsW1Y56Fzdy9xyyBRT6srLfXN+OtZRu6Bdw5rAx5MgmQAAmQQEoQKLC7jDnCLDqPmHht9CfuDU48vGZpf7u5fSAIaL9FLBpbWf4ph6nRFS0ytgJ3ip4rqn6oV3YtZTEwQkPsEDm7A368u2NLZ9d2Vx4e6eZ+fF3f8yLZLBY8cvGVCOXJnEsFMkOTOIVGDDAdaxPID2F9a8MhloankUDiCYzML8PPjrkAPzjyLISNl1fk17Gzk2xbTSkGnVSDsVesR8VZNahrKMLy5qF4945jsHjtoMQXME5X8Mvf8eW/fgA/+NdLuOvlD+KUK7MhARIgARJIBQLDc0sxIq9UogFL2CiXjHdWWyy6cVHay206h7e852rdOiiakjQCkc+MAy1jK+DbcljPD+1jZZv2hGkPmM5X8IjoRaIDJjTEDhH1vuH8nRkU2DyYN+ZDfLH6R2iqu0y6vPWedpdxpWU4Y9gowwgLyFgbf6EYZaKBfDHJrCaUOhmhpzsxrqUigUaPG2G3FWGPhJ+XEKAaBTTQZkdtfRE+WT0Syz8eibVLxM1WQtnbm01omJyDDgn0ES06XvLGZ57BcXfdhbkPP4xNjY3Ru5Oa1r9ts04qI2K19N9amtRC8uIkQAIkQAKHTOAP06/C4JwilJWHkFMo45g1KrC8piSmFOz1YoiJ/eWTbzO7i3OJHTLkgT1RQzB/HKU3xnj5IXLctqhjt0tat+nkgXeJyjwHuEU0YcJgHYeI1mm14qzhY/HWts+QY+7Awyc/iEK7Bw6JChfw1aFp9xkoKHscNntn6NsWvxurmnfi+iOPxtuLNyPgkgtHzGCx9J3SY3basNGHWBqeRgIDR6DClYsciw3uQAD+Vu3V3yfBkBkBp86oF4SrxirDycQHXw5p3+rF/R+9hs9PPUmedRe+9PTTWL17N/zixljf0YHLHnkEC+bNQ67dvi+zJKU0bP0T3/8iNkgI6+PGi0FJIQESIAESyCgCg8QIe+7Ub+CVravxX0+92jmtkDQqWts6pxlyV0kYe/Gk3xXYhhd3LMb5Q6YdVP2D8m5r9XlR6HD2GqpyUBll88HybdzPkPS+qNTJxs4P7r729r+tr9ZW7X8Tcxxf7v+0+O2hIXYYLP94xvn48Zv/hZG5G2W+CbeMf5G/YpHOuyoR5JpuQVHFfNy/4T3cse512W9BS2MQFn8+zNLtHdahNnKw2Q/k59hlPibejsO4HTx1gAicP2oCfr7wrc7JMI1r6m9W1G+ZPMZWv8l4tiM/omFpbXxiyQIstT2IuYNvwsraWgS7XEH0bI1etaauDtMGDx6gWuz/MpXF+VClkAAJkAAJZCYBswTs2FrbZkzebHg5SeN4oEDeXfJfSCIB62ttmG23RFh8DucMmiLzA3ZveOyPyvs7t+LGV5+W6VsCKMvJxUMXXI6RhcX9Hc7tySWwXS4f3eJaLes7B7JIkT6ZgbxmxlxLo8l9Z/ILmDti2V4jLLpywcBqLK3fgjs/ewPeUABtAZkY1yvI5evUcH+Sj1WLT1JijbV0yL5oH+XojJgmgRQikCe9Vg+cfxnK5QWjc251ippTneMe9eUVdHQzzWCWnmJ7fhv8YR8e2y4TOe89r/PsgLQe5qdAb1hnafgvCZAACZBANhBY06xB9HqI2mA6Dlr+391YgCmFy7HT3XNoUY9zulbV+Lrh1afQJlG11eOjpr0VGtiNcogE9NMiFj3E7OW0j0THio4UVZecK0SfEx0woSF2mKjNlkojB21B6SmekAV/2fBDGRS6DXlW8UEWMeWJA7IxMrT70eq+5RFXLwoJpAOBKRWD8MFVX8Zjc67EpNKKzme668fSpL1h8gbzlIURtMhfhjQ8uKraUTVuj1E1i4y7unxqFXKkB1iD2LhkxPQ5Y8ZgbKm6ZFNIgARIgARIYGAIDCmVcSJicEXEIp9qubtMyNtmQu52M7bVl2GoqwF/XPdg5JA+lws3bsUDiz7FSyvXSbTdfd+DmtrSkjpjoPssfApvVK+aWDTGKjwsxy0UHS+qPWHzRPXD+6uir4quFn1MdKXogAl94Q4TtSvvm2hrvlX+jr3dcvKLsbXUU44hrmZDJxbWYHnjYAnNUgFLuRfBOodEntt3iq3ACgddE/cBYSrlCVjMZkyvGoKXLr0O//nCc3hp61pY2y0y2LnzrSbtEPCIjWYp9CJUGYRPxo85u9x3Z08qx5xRJ2G5uCgOLSzEmaNH9+olS3kALCAJRBFQj4alNTWGZ8OUQYP4PEexYZIEUpXA7JETcN/aj9C+U8eKAI6GTi8lLa/ZH0ZOrUVC6dmwoW0ztrTXYHhuVa+q/OylN/H00lUIyUedujtabPIOVM92WVjE+8NorOx1FjckgcCV/VzzJdmumhRJliH2W6ntHFENR7NB9HpRDRaqotFJ1EqV+DX4uqhaqSkrTtcV8uJtQUfLb+RvWN0L1bw2Y5m7HEu8lfJH2Vn0Fp8Tbp8NPp8ELxjsgUmiyAVrZBJB2W9zWfDFacdEuXmlbHVZMBLok8DpI0fjjbUbJVqoRFGMbl6Uo8MtdhTZO7AltxxjymvlmQ/hiMJpEsymCDOq1R2bQgLpT+CH8+fj+bVrjad/9tix+O3s2elfKdYgKwls2laHW371DBqbOzBhTBV+/p0LUZAnkSsyUI4sHoKZw4fhzbrt8PvVSazro81ImeCQ8ctqoFU42rDDvaeXIbZiZy2eWroSHn/EoykIp8xPVGF1oCHUgUllFfjLWQMaDT2z7pKwz3RJlmvifAF7hOhRoutE1fhSmSSq/pmTRfUt9mdRaVdPXdGxLq68m1BU/hKsjvNRH8zDam8xVvtKpNCdf9BbW4vwwubJWN9cIZMIWhGUroKAzCcWltD1tjyJljh6JL55wompW0mWjAQOQOD8ceMQktZDNcLUTTfokoiK5eIjX+GDTtWwZ0cRav89BNtbSzAidxZyrQUHyJG7SSB9CHjFrfyJlSvh9vvRIfrs6tVGOl41qGtqQ1ObO17ZMR8S6JdAc6sb133rPmzf1YT2Dh8WL9uKm77/YEaPYf/JEXNgbeiMniaBfo13mALSd1mrzYpWrwP51hZxURQXjx6ys7lFgnh0/5TWIdDfnToL62/4Np67+IuocOX1OIurCSBQKHneLaqdPGklyeoRey2K0geSntu1rs0Gj4h6RTeJrhc9VlR9OlNWAv51MnfYHPmh8qDUGkSRpRXj7A14qnUCmoNOvLdrtESIi7Yn5a9U/26LPXjq7BswUVpMKCSQzgRydGbMrparYGEQ4Rzxu+16N4XyQvA5QnCsN6OjNhfv2FfAE7oXl1afhBJ7BQpt2mhBIYH0JaBjHTWITYtXX12A/j3YZVs85E+Pvo1H5y8xsvr+NWfgwpO1DZNCAokh8Ma7axAMRo2bkMtsr2lEfWM7ykoy06DYWt8EdbW3eMPwyFBlR6M0o8s4r6CEbvDKWOfnVk7B56bUiSHWGRMgmvzo8lIZE9adl74Kx8l2yuET0PFhMUqzHHdjjMem1GH7+mCTV6zn5dKPij4geoeoGmaaVvm76MuiT+hKD1HgBvTKysppj8g8RMmSYHCLjPfSZ6C7eKR7ujHghLolal9BL5EHbHxhhRHWvte+FNnQ1taGvLzM/PFNEcRGMTKB88ra3UaradjW9y+nWSKE2l1+iaAoLy35c3Ca9biQMa+YGmQ9XRoTcX8ygXMiuCQiz2xjrb1hO1pkFliRwQUFRhCaw+UaEl/3dVslyE1XRF39WBw7rLxbttnGuVvlB3AlWzi3tHqwa3fv75mxIyv2TnSfaOwDzVqj9q6trTP+zoxYavq51vUak8844301uigPLmtun1Wvb+9AbWub8Q7TXrSyvFxUiKaanHbaaYulTIcy11ZSqpJTVh2ecOm3Yrr2kr9+K63qFl2pRPaIvS4X6j2qEbhVtkdieWpaHWsj4Wj6sFYifw5yVHfRLkhVVFdXh0899VRNJkXqd02TQZo1va7dEHDg7j3T8PKWyQh06xHrPFTfratOvSylDbEFCxYgmWx7Qc3QDZnA+fYnH8CSLTUIFYtPfXdPDeOlViw9xiOH1aLdb0OuzYdBzlaJnOiH1WTDlKKZuGLYzQm/u5nAOeGQ4nSBbGG9s6EFd77yPnY3t2HaqCGYd8axsFnj0xvmDwTxk/+8E15f5/iT6ooi3HDN57vdoWzh3K3SSVjJFs4+edbmfOnPhltiBPPxU0fipnmnRVYTvkwG68cffQYLVm80phNSY0obC/2FIYRcapGF8bujzsN5447ot+5bG5qwqb4R1cWFGF1GL49+QXFHLwI9P5d6HXAYG86Uc/Wp7akRI+xa2XeB6FWi+qSrbBcdaqQ6/6mWxc6o9ZRLetsfgiVcD6v81aqa9a9XRI0sq8zUfFLBOukt621fGg2cHVbskZYUCglkAoFfnTG70yWx9+MuLakhFA1uMYww9Z/Xt5zN3PlxGQj78WnTezLnii8TMLAOWUSgpqkVn7/tAby4eA0+/Gwb7n3zY3z1788YPcPxwKAG3W+/fiGGVBRiTHUZfvO1tBv+EA8MzGMACdjtVjx195dxxkkTMFECdXzp8hPxmx9eMoAlSM6l/vz5CzF6dInMgRlC0BmGryTYZYRJeeSltWDnenFBlEbGfmRYSRFOGTuSRlg/fA55c6cd3Gkl7C99yBdI/omJ7BHbX+1my87vi54iGm2JPCfrD4n+n+hg0bGii0RTUvyet+Bu/qmULSiflZ1fnxITEa3SE/Zw8yTUBV3IMflQGPCh3uI06iCRTTvD1gdNcLbnosHdgcH5+SlZPxYKMtG2B8s318BqMWPKqMFwyMBdSt8Eajva4CoOQSe0DPiFU6fFJQeHYVF3RGtYJrm0y0TQ7ci3eaShQn9VOyUoriFfuv1BmdS5CF+7YCYmDuW4yQgbLlOXwKPvLkWHxyceEZ3PskZO+3TTTqzduQcThsTnGT7+iBF4+jfzUhcCS5ZxBHJddvzsW9pOnj2ibr/PX3UN/veDt3Dvik+6V1wGKr20cyXmP7kKvznuQlw0XPsXAHfAj7uXfoRn1q+C3WzBVZOm4GpR8953X/dsuEYCfRFI1leljgVziGr0RJUPRL8sqpOo6WRqq0S1ufwrov03QcjOZIqnTavRPZKVGmS5loBET3TJ6Bcz2sNOHDt2Iz5dPxw7JHqiST5Gw14Lwm4LQlYTRhezCzuZ93B/195U24Dr/vCoMRBX5wgqyXfh/m9dieK8nP2dlrX7Fuxah5yCFthkHrGmBmlc0G/Trt6xQNCMlnYnXNJLXFks4w6tnUEN2tsdWLu2GkG/BQ2bG2EONuGTDdsNzmMHl2UtS1Y8PQg0trslENO+BgUttX7Qtbo7n+/0qAVLSQIkoAT0b/dbM07CQxsXwdMh7sXqzSRGmMkRRI7Tj7LcNty+5l58uOo4THSOwaO7luMzd6PR+Kjn//LDBVi6ZxduO/U8XaUcJgH9fDiIYB2HebXknZ5I18T91WqM7FQXxCldqkZYRP5XEqNFdeZrDdSRshIO1vRbNqep0+1KD7BYwpgwfAdC7RK6vskuRphVxsbY8PXjjzeia/WbCXcklcD3//kSmqVHrE1avNu9fuxqbMUvn3gzqWVK5Ytv8Ww3XHJbmmWQsvELGimthLSXyZzr6woRFoNLjTBtMAwEzHj3nSOwY3spamqL4SuS42S7R8YoPPz2p5GTuSSBlCVw4vjhyBFXrmjR3rGxg9iIEM2EaRJIFwILatZJhF8b8lfLu2qjxVgWyACZ8RW7UWT2ouGNQXhp/nbc9uy/sfXDRoRr9/UVuGUaixc2rMG21t7BTtKl/iznwBNIliE28DVNwBUtjhMk196Dsj0yV1h7WMJ5i2hj6bIt1Xh9hXRly6DPsCuEsoIc/PLMs3HT9BnGMfwnNQls2SM/slGN3Rqidu32PalZ2BQoVZHTIbxMCInRtbcrLFIuNczEFTHX5UGzvxCTC6bD7K0SA00/YvcdrzFtFLnH54+cySUJpCyBs44eiy+cNAU2cV12OWzIc9px+7yLUJTLXvOUvWksGAnsh0B7QHqzm83inSHh7D0y7l/eZ6E9DmxbU4m6T8sQ9IhHkzQi6th/7a1x1kpkgH3t7oaL4toGfifsB/HB7dIPglgUKJQjNYBf2g2k7d6Ud3B4sv7onLxvIOB+RT4+24VF51+iX74kX2obJev65QkZK1CFzXVlMoagy+aVza0mLwpy1TOTksoEKovysWW3TCjSJRazCcMriiOrXPYgcN6QKXh1y7oeW6NW5dkvLWyXMZRmLG9y4OZJ38Vb7zwFv/xniOw3SeOiU8bhzZ6mHeIUEkhtAibp2v2vC2bh2tOmo1EmXB5UXABnjx6y1K4BS0cCJBBN4JjSYQjZxR1RRp3oUBO1AYI2E5rrJXz9bl3r/LaLnKNeHJYOEwIFuk8iLcp45xGF/E6I8DncpSm6NXz/mWk35I37PyQ193ZZB6lZuFQvldlajfyK12RupC/AbB0Hq+NMuHN/hw2+QXuLvqG2EsFQ914zY4DnspSNQbK37Nme+OU1s5ErrdzqepRjt6HQ5cQPP396tmPpt/7rd4uR1eKU91Sk+Sr6UBljl9dmbNCAHatbNuFHq/6Cv155ISZVViDPLJxbTSiQXrXvXXoKTp6sjRkUEkgPAtoDNrKyhEZYetwulpIE+iUwOq8M3gLpBbOLd4d8IQfllSZOHIZ7fVAaY3uKvu7UcFNxyCTuMwZVY3RhvgxF+QdCdZ9DqH4uwh1PSoN990mfe+bD9ewlwB6xw7z3ZstguIp0WFun5MniBssE3Lbm/2C3uOEPdjfCIsftcWsvGiWVCUweVoWnf3gtPli3VSL8mTFr8kgUiDFG6U1gS0sjbvvoPXkTSUuivKvCEhW0m1+ntCs6nT40yeTmQWNOvTDcQS+a7TV4+kadwYJCAiRAAiRAAskloGM8TRYTvKWdxlV0aYIyr5ilvuv9Jj1jDpuMIytxIpTvNr4R5so8Y9+ZfiLCDdeK9aax5zzG6WH/Z4DvfZiKbovOjukDEdBb0Ps2HOistNtPQywBt2xsvgQg2HEylu/eLbl3taBENaRomNMzh41OwJWZZbwJVBbn46LjJsc724zLb2NTozFORmPFqSFmsoY67TD129BfUmk29IclWI1hhHVW3yPzhm1o24GzMo4GK0QCJEACJJCOBGzSqzVl8CAs2bGzmw2gnWGlQ70ondIE//pqFATLcMG0ybjspKO6TeAe9r4tY8hWS9U7jbBOBuLn6Jkv29fLu3FMOmJhmRNIgIZYAuCuaqjFyj11knOU9aVWvazqhM/lrlzcdNSxCbgysySB5BAYnJcvvb/qeiEPub1zqQZZdOxZl6P7hM02kw1DcsqTU2BelQRIgARIgAT6IPA/552JK+57FD6ZwNkjkRBdNhmaIG7zT8+9CaW5rj7O2Lcp7JM5yMId+zbsTckLUffRENtLJJaEMdIhlgPT+BgaYnG+ecbEtG88IcE5uj5Ko/NXY8wbxs3jpqPIyaha0WiYTm8C40vKcfmEI/Ho2mXwhd1GS6JhiGm15Lk3Sw+ZVSd17hJ/ox17PizDT7wfY+GUVvz0yrNglyAdFBIgARIgARJIJoGxZaV4/cvX44llK7GhvgFHD67CRZMnwiVjxQ8kJotEAzbJ9528B7uJSQacWSq7beJKDAT0uznDRQ3vbacAAEAASURBVJ4MSjwJvFuzCa3BHn+AkQvId6i9wYR7nl8kATz2fZRGdnNJAulM4KcnnoGbjpmBoE5YLmPEtC1CNeQ3yVx6Iekrs8BptiPgM6PhbQli025DMBDGa5+uwx+ffzedq86ykwAJkAAJZBCBYlcObjh+On51/tm48pijYjLCjOo7dTLn7rEB1JYISXRtv39jBhFiVeJFgIZYvEh25bOqeRUmDtqE4yesx6iKWukF6Jrsz/hLlFgGdWZ4/QHUt/TVdR3nwjA7EhhAAr5QEPet/1iuKEZYwIKQz2qoTuasXopnVhyPa4ZciroNVTLXmG4RQ00WHeLK+I+Pl+D6vz0Bj/xtUEiABEiABEggHQmYzAVA8X0IyUCUsAT+UFUPKV/YC3/brxHwvJWO1UpamdU1MRZNWgHjcGEaYnGAGMni33tew2rPUyjPa5ZQ3R0YO6QWJ01aK9F05ONSvztFvaWdPWGFuYy+F+HGZWYQ+P3ytyUqovQGGzaWtjyo6K8oUJLfjrfqFuGEqjEYW1LdaYHJ3qBOp2ccD3y6dSce/XCZnkQhARIgARIggbQkEArXwyuGmAc+Q30yV6bxRhR3RX/bn9OyTmlQ6LSd0JmGWJyeLnewA09tfxiBsN+IGqfZWixhCW8awMiqrlnWpbfaUxXGBSdMlO0cDxMn9MwmBQjUe9px79oPDaPKEg7CJO6IGjbRLFpV2iDPexA2kxV1vmY8MvcK3HzJCcacS0ZQxS5DTIN9NHX049abAnVkEUiABEiABEjgQATCwZ3y/utsdO95bDi0o+cmru+PgFqwsSgQmdD5+f1ll4r7aIjF6a5sbt8Ai3xo9hSLWT5Ei/T56BSdj+rKc46JrHJJAhlBQMdG6rNtcptg3+yEc7sVrs12OLbY0bKsHCHpFNbf0hG5VcZxN59yPD747Vfx+6vPh1MaJfIcdkMvnX5ERvBgJdKfwIbNu7H40829KvLQ04sw98t345lXP+21jxtIgARIwGzr7z1mhtk2hYBiJSAfDbG4JaZ7ZMXelkOsgHhcNwJOS458aPbdAhI9qbNG3anMy0OtuwVlzjwx3mgLdwPJlbQkUGB3QOdZse2xyg9nVxeX1ETTYb8ZrbvzcNeF81Bg2xf61yRhFc85chzGDyrH5rpGHFVdJS69+/anJQgWOu0JtHd48c9nP8STdy2AvaYD1/9oDi6/eqZRr4D02v7lAZknSD4Q/njvW/jcOfyoSvsbzgqQQJwJWGxHwmw/RsZIL5acdXbNLjE5YM/7RmSNSxIwCNAQi9ODMNw1CrnWPHh9UX90kncwaMaOukrYpLdAja6ZIwZh1su3GQM4nRYbbj36XMwZemScSsFsSCA5BE6qGoV8McYafRqcRr5S82Vpk4aJNvmJ8ZngbnFiuKuqz8KNKCuGKoUEUoHA3Fv+gZ0hCaZ0ZJ480zLY3pgfr7NkFmltmDC6Cuu37MHRE4ekQnFZBhIggRQk4Cz5B3wtEpzD/ai8Et1GT5i94KeyHJeCpU3hIqkrTYYLDbE43WCzGFlfGf09/PGzX8IX8iAg4en94o+1q74cNY0FkDmccVRVCT5oXgeP+mmJeGX54yXPo9pVhGNKh8apJMyGBAaegM1swRNnXIcz198lkTkCMBV0+SLqULGNLtgLwnBZ7QNfMF6RBA6SwC4Z72t078qz655YiCuunbU3B+3F/csvrsTO2mYMqSrau50JEiABEogmYDI54Sj8iaHR25kmgZ4EaIj1JHIY64XWcgwLXoVHP3tTjDA3Glpz4fHpx2cYzTKR8wfYBqtTewz2iTfox/0bPqQhtg8JU2lKoDInH8VjfGgKyJxhUdOomEe4ccaIA/ceGKF+A59J7U0wWcdI0Jt9Lo5pioTFTkMCFfniOt7WZpR8XHVFrxpYrRYMG1LSazs3kAAJkAAJxI+AfgGk+/ivWGjQEIuFUgzHeAJ+XPzMg9jY1AC/T2NyR4en7/yg9Dc5YS7vgFmiKUZEUw3e9sgqlySQlgRa/B34/Bt/kaiHYZhy9KneZ0SZxEVxVunUXvXyB4OwiMuuWQyukH8NPI3zJNBUg7RbSB6WSjhL7oXZOrrXedxAAokkUDQtD3v+3WrMcXfB7EmJvBTzJgESIAES2B8BHZCb4cJIEXG6wU+uW4mtrc3wS2+AIQF5eDR8qblLddyMJH3u7rZvjowTmz1kcpxKwWxIIDkE/rDmRWmEcMPTZpfHfp8Rpr3B4YAJv/lgkTEuUksXkh/W7770Cib//k848g9/wnOrVsDd8AUZi7NdDhe3MLglvRnuukvlnK6/p+RUi1fNMgL6bK5ctgM5u0Jw7QzhuQ9WZBkBVpcESIAESGAgCdAQixPtFzaugVt6xVRMGq/DIYaXTZbqoqWUrWrVy0epfKTazRJZTtZcFjsmFQ3CJSMYzl5wUNKYwNMbV8IsQTnMux3wb3NJpESJlig2VMBrQXuTC01eD1Y17MbCLVtx7B1/wdMrVxkGmTcQxC2vzEd9R7Tx1gUi3CiDnX+UxlRY9HQjoL2zE8ZWdbrFWkw475iJ6VYFlpcESIAEMoYAw9dnzK1MfEVqPE37LqLflBHVrZpWO0yMMqfdgp8dcy52dDTjqOIhmFkx2nDN0sMoJJCuBHyNFtg/zYFJe8NkPj1vQz6C4qEbzBNrTFwV1QVxT3s7vvb0i3D7OxssInW1SiS6eo8DpdHevF07gx2PI5z/Q5jMEu2GQgIDQOChL1yJN07YgIrcXEwfVD0AV+QlSIAESIAEDpNAoZx/t6hO6JxWkzp395M7TArZfHqrVca2GBM6y4do1BiwvUzUGBNr7I5Zn8OZ1QxfupcLE2lPoL61A9alYihFXBJl7jBnUxj+UV64Q9otLC65wQAKbc6okWOd1da5x/KdORiRX9e5ode/4uoY3CqGGHsmeqHhhoQQcFisOG/M+ITkzUxJgARIgARiJNDpSBbjwWiWA2+M9eBUOo6uiXG6Gya7HzZRS7vMF9ZugbXBAkur4NUHSUWWJWYXjbBOGvw3gwis2FIjvbo9fkrEn8BuD8DpC8BhseDcEeMwuaISRU6nzKdntEoYPcGzx43DU1dfBVfBf/YiIrET5T8JhW85cMTFXidzAwmQAAmQAAmQAAmkOAH2iMXpBh1fPgrzt+wAZGxMpy+iLHwyPEwMs1CuuGeJITYYBXG6GrMhgdQh0BbwISjz5qnZFJGw9IqZzGEU1gcwYtII/GrWObCLQfbE1Vfij+8tRIvXi3nTp2HK4EHGKeHw1xHyvCS9XxtlvXOePRNcsLgul3z4dxPhyiUJkAAJkAAJZAsBk3w+Z7rQEIvTHS61FiLYslty2/cxqh+mZjHGpAsAdrcFl512ZJyuxmxIIHUIvF6/AUHxQLT4JOy8GGASkgYh+WXxOMyobMvBI+dfgVU7anHXGx9iS10Tjhs9FN88/USU5e8b92WSHjVn2ZPwt/5JDLIX5c8oB5bc62B1XZU6FWVJspLAmvU1ePa1pZg5YwxOmsHpFLLyIUhSpf0SzOj5t5ajrqkdp8jzN35EZZJKwsuSQJIIRLzKknT5gbgsDbE4UF7euAPPblshn5+9x8CoNW9tMWN4eSGumEZDLA64mUUKEfCHgnhl62foGOVHzk4rLB4ZKiZGmbdYjLF2O+qnBHHXRwtx59MfQA41ZHNdI15dvg4vfPs65OfonHudYjLnw154K6BKIYEUIBAIhvC1Hz8Kt8eP195ejfv+cB2GVBWlQMlYhGwg8L3fPYMla7bDJy7eD7/4Me780eWYNLoqG6rOOpJA1hDoMbAja+od14q+U7NeghH4EcwJGr0BkcyNngF7CP6iEJ7+j6tht9LujbDhMjMI7O5ol+nxQmKAWeAXD0JvhXjnFuuMDSZjrGSTOYA/vf7eXiNMa60ft20eH576mHM0ZcZTkLm1CIfCCMrzGpGA9FBQSGAgCDRLEKRFK7bAK0aYdgp4ZKm9YxQSyCYCDF+fTXf7MOpa7HDJ+BcrOvKCCEoYOLNH3BPl/5AzhLD0DgwvKILLaj+MK/DUVCKwraEJH2zahor8PMwcPRxWS/a2ZxQ5nAjKvHkmmbQ5eoyY3i9jvdGBkLgs6nR60eLxB7B2V3+REqOPZJoEkkfAZrPg17dcjIef+xinnjAOw6tLk1cYXjmrCDz36jJpwBITLDLaQZKt7eJyQCEBEsgoAuyiicPtPLf6CPx+1Zuw5/rgDZgRtEd+OWXeMDHQfnzC6XG4CrNIBQL3fbAEt81/x4j4p5O/VhXk45EbrpAQ7Ptc7FKhnANVhlybHUPs+diBlr4vKR8PYZv2Dcv4sb1fFDKeTMZNThnWGaij7xO5lQRSg8CMKSOgSiGBgSTwyPMfw+SXX05pzDVEfkt3bmscyCLwWiSQXALyzCOs/2S2ZG9Tfhzva5E9Bw+d/CUcP3gY8kokZLdD4r3ZbDi6vAr3nHMxzhjOAd5xxJ20rBraOwwjzCvuSW7p0Wn3+bGtsRl/f/fjpJUpFS58TMVgKca+xoeeZQqWSxRE+aVRY0wlLL4GuS475kzl3GA9WXGdBEiABJSAPxCCWbxizeJxoEG/VAOyjUIC2USAronZdLcPs65jCsrxr1nXHmYuPD2VCazcuRs2CcGuhlhEfMEg3t2wBd84c2ZkU1Yt3QE/OmSeBsPI0vnB1B4zWrH2mWZmawA2ddmVqRx8eTJ6bFAYf7vyYuTYI029WYWMlR1AAvp8/mP5Yuxsa8W5o8Zh5pDhA3h1XooEDp3AKcePxRvvrZFAHfK+kd9Up8OKc06ddOgZ8kwSIIGUJEDXxJS8LSxUKhIYVlIEvxhe0aLuieOryqI3ZU06IHOHXfbcw1i+RyZ0tkY617t6xuTDIazjGyxBFL6bg4BLWnTdYRTsCuPokcMwuYxuiVnzoCSpojq33cVPP4B1jXUSTgZ4fN1y/OmMOThnxNgklYiXJYHYCXznprOMQDELFq4TV24zLj3vGFx54YzYM0jjI9Ub7Xv/fBGfrN+B86ZPwDcvmgWTNvRRso+APAuZLjTEMv0Os35xIzC8tAhnTxqL11evN1wTtXfMabXgy7OOjds10imjN7ZuwIamBqPIYXlJRo8B0yZcHdtg9pvhzxUjTf5vHh1G/rYw1i+oAW5Ip5qyrOlIYENTPdaIERYRrzSi3PnJB1lriGkj0usbN2BFbS2qCwtx/rjxKHBk59jWyDORykuHXcaXf+N8Q1O5nIkoW2O7GwuWb4DXH8Rj7y7D8eOH48SJwxNxKeaZOQQKpSp3iz7fpWlTMxpiaXOrWNBUIPDri2fj1fHr8PqaDaguKsAXjp2CyoK8VCjagJdhhfSEdYjrV1/Dw2ReZ4TNEi1RxjfsFQnQ4ZGgc/493XsV9+5nggTiSEA7ZHtJljaqN7jdmPvow9jT3o52v8z5J1Op/Oqdt/Hg3M/jiApOEtzrOeGGpBIISW92SKaOUNGOsA6fDJCjZB0B/bnWMWIxSrMcd2OMx6bUYTTEUup2sDCpTsAsY53OPWK8oale1kSXb1RRiUzLYENHsJ+XpEZLLAsbkURD2vAuL1aHBFccO6I80UVj/iSACSVlyJOonm3+fc/nLcedkpVk/nvBW9jR0gK/fOCquAMSQEfk5heex9vXz6Pbl0GD/ySbQKvbi4cXLoVOmV5dVojNtY04esQgnHLEqGQXjddPBgH1Uc2CqIk0xJLxcPGaJJABBM4dOQ6/WfQO3O1+WGT8V0iihUbE1m5CSDrLpkwaAnttECua62B3mzHc68L//L+LIodxSQIJI6BjSt66fB5++M58NHk9+Oa0E3GCRLbNNgnLh8wr6z/ba4RF17+hw4119fUYX5ad41yjWTCdfAJfuucJfFZTh5smD4KryIGPfvB1I0BW8kvGEpBA4gjQEEscW+ZMAhlNwCm9YTcdNQP/vfBNOBtM8BeJO4ld3BElOqKtRQwxhwX+cAiP/fiL2FnbDK8vgGGDi2GVcXUUEhgIAuWuPGMKkYG4VipfQwPr9CXSwQ+N/EohgUMhsG17A9xuH8aOqTzsXlV1RVy9Y7cRdFef1hXbammEHcpNybBzDsI1MW1rTkMsbW8dC04CySVQ096KXy76N0IyGKd9fABhta8kKIe6PwWazXDsNmNq+WDjBT2kSp1NKCRAAgNNQHsGT6geive3bTU+cqOvrwGHJpbTVTiaCdOxEXjm+U/wl3vegkms+Vkzx+HW714Q24n9HKVu/1NHDsHK7bXQBoJjRw/t50huJoHMIiCfTRQSIAESOHgC8zevR1jcDzVYR1ibdNQQU+9EWQakd8xTFcRlY46SDRQSIIFkEvjZaacjXyIk2sXwUrGIceaUgB2/PutsWM38DEjmvUnXaz/8+IeGl4PH48cbb62S+c46xx0eTn3unncJvn/BKagqzMed19GF/XBYZsy5GqwjFk3jCrNHLI1vHotOAskk8NmeegTbpQRF8ivZ17ecjBmrFNcwCgmQQHIJjCopwfxrrsN9ny7BRzt3YGRxMa6bMpVjw5J7W9L66iOGl6G+oc2Y66yo0AWbrdPIP5xKOW1WXHb8UViwoAEOSVNIIBsI8EnPhrvMOpJAnAloAID5EsLfkKB0g1nFGNPesCjJLzChOCcnaguTJEACySJQnpuLb888KVmX53UzjMD/+/4c3HvfO2hr8+Daq0867DFiGYaH1YkTAY4RixNIZkMCJJBZBFbt3oM2mdslJHOFwSfdYQ4Z8K/uA2qM6UhrWfX4A2j2uVFopzGWWXeftSEBEsh2Avl5TvzXf56V7RhY/0QS0G+KrvnkEnmZZOfdl0NRssvE65MACaQ4gTaf1xhQrePBTDp7c6skvLL0i3aYYG6ywBIyY2t7Q4rXhMUjARIgARIggcQQ8EtU0v6ilibmisw13QjQNTHd7hjLSwIpQGBsaRm8Gva6qynHMMa8Yoyph6JsllgACNsDKHNwjFgK3C4WgQRIgATiTkBd1P/w2nt4bNFyCTVvxrdnz8JFUyfF/TrpmGGrx4sfvPAq3vxsk9FoecGkCfjv886AQ4LkUA6CgPaKZbjwicjwG8zqkUAiCCyvr4Hf3OWOqBcQwysi8m6WkMZhHDmoHINchZHNXJIACZAACWQQgQcXfooH3l8Ct7ihq/zs2TdQXVKIaSOGZFAtD64qvkAAVolOet1DT2D5rt3GyfKmxAur1kKDkfzs3DMOLkMenfEEaIhl/C1mBUkgvgRq29tw0/xnELQEYXZbELZF5S/vY7XJTMV+/PH4y6N2MEkCJEACJJBJBN5avXGvEab18ooRsmjjtqw0xNq8Xtxw/9NYvnEXArmdQ6Wj77W6KD6zfBUNsWgoMaQZrCMGSDyEBEggewi4A35c//ZDCBa0IafGAZ/4IJokOEe469fSLGPEHF4/OiqAcrolZs+DwZqSAAlkHYEhxQXS+2NGIKgRmiRmk7jdVRRkpzv6tx94AVtf2Y4C4RC0Aw2TzdIoGeUqIttD6i5COTgCWcCMwToO7pHg0SSQ1QR+uPh5rG+vNcaA2XZbJFCH9H5JsA5TSAJ0SHAOi0zwnLdZxoqJbGpuzGpWrDwJkAAJZDKBb5w9E5X5ech12OCy23DU0CpceMzETK5yv3Vb+cl2oxvMLDapxSfDp+VdGG12aXpylbRQUhJFQMdB3C06J1EXSFS+dE1MFFnmSwIZRqAj4MP8nWs6fQ+lbjkNYbhLJVBiifgjSgughq3PXS6tgLKqRpk/pJ7xFBIgARIggUwkUJLnwvPfvBbLt9XALhM6HzmkCmZz916gTKx3zzp9+NlWtJkDcEnVtXFSe8RCXS770cbYmJzinqdy/QAEDsI1sVmyuvEA2aXkbhpiKXlbWCgSSD0CnmCgWwufryAMuzcEv75w9N0r/evto0KwBEywBWwYXlCUepVgiUiABEiABOJGQANQzBhVHbf80i0jjRz506fno6MyDPWis7UDzaPlZaihgyM2qWy3tgCL10uvGYUEehBItiH2HSnPb0XLReu6ynaLLOeJanP610VfFaWQAAkkmUCJw4VhucXY0Nr5p9o2WaJD1YkbYuRlI8uQA2gbC1w6ZjJcNu0mo5AACZAACZBAZhAIhkJYvrXGGO/VYHLjlndew+5Csb5EvOIhYpKGSIsnbHiG7B0jJoaYjgPS8XSUgyCg3YmqGS7JNMSGCtuzRLdGMdYJKK4QnSw6WPR10XGi9HESCBQSSDaBPxx3Ca7+933okKAdvtIAPPpeCYsxpmO1DYPMhEFVRfjBcacku6i8PgmQAAmQAAnEjcAKMcC+cu+z8Eq4/lA4hA7xEmkbGYDJJi/AoLwAtRdMPESCNjHEvGFYOzrHS+u7UYNaXTRDP20psRLQTwpTFgTrSKYh9nth/D3RZ6NuykWSfkTUK7pJdL3osaILRSkkQAJJJjC+sBLvnP8NLNj1Gd7buQX/Dm/FztZWwwizyEvoC0cdg29On4kCu3SNUUiABEiABEggAwjo/GA33fMUWtz6eRqRMAoqW5Dn8sHbYkN9nbjjq/UgEtZXYJt058h70dIBnDh6GOadNsPYx39IIJpAsgyxC6UQO0SXRhdG0joL4AdR27Z3bYvaxCQJkEAyCTgtNsyunmSoNpPs7miDV+ZIqXTlwS4TWVJIgARIgARIIJMILFy3FcFQdz8504gOFOZ70FLnQsue/E4bTKZw0YBVwdwQzIUm3Dx5Br4w9WhUFOZnEo6Bq4t622S4JNIQU7fCqj743Srbfih6dh/7utoSuu3p/uTv26XRUVSxfft2LFiwQJOUOBNoa2sj2zgz7Su7dOWsf5yRP9oNfVUsxbalK+cUwxhTccg6JkyHfRA5HzbCmDIg55gwxeWgVGStPWHXTa5AKNoYyw3A3B5GwGJFuHJf1fdG+pN2yXJ3E1YtWYxV+3YzRQLdCCTSEDuz25X2rRwpyZGikd6wakl/IqouiNoDpmPHIqL7dkZWeix1vgBVVFdXh0899VRNUuJMQA1cso0z1D6ySyfOGiXqj5+8jzuXfICgpMcXl+Hvsy/B4DydyjK1JZ04pzbJA5eOrA/MKB5HkHM8KB44D3I+MKN4HZGKrDfXNeCW2/8F6eyCRbwTLZLwH91qNER6mlyyNeKTKCmdQ0zGT5vzwrh5+HG4fMaseKHJunyyYYyYDrUfaFkuF6wQHdGlanxNFa0RfU5Ug3Wod60aaxJ/DYtEKSRAAilC4LG1y/HXpYtknrCQETlqXWMdrnrhMSOdIkVkMUiABEiABEggLgTuWfIxzrz3X/C4pPdL1CdTBwclQIf94zxYPs2VYFVdRphxNQnUYZepXlwh2MwWXDQmOye4jgv4LMkkkT1ih4JwpZz0mKj24mrDw1dEGTFRIFBIIFUIPLt+NdwycDki2itW09GKnW0tqM7Xye0pJEACJEACJJD+BD7dvQu/WfCOzBEmBlZXr5e1Q0LRt+uaBOKQMWE5O8LwVIVl+haZRzPPB1swB7awCT8+8XSMFY8RyiES0LEP/Q1OOsQsU/G0VDDERvQA87+yrkohARJIQQIO8YfvKTqIua/tPY/jOgmQAAmQAAmkC4EPdm5FONBlhOngrxIfgsMDcKvrYbMZ9g0OCVNvhrMW6BgrATr8Tvzf6efjpOoRjB582DdZeIsBnOmSDNfETGfK+pFARhOYM2qC+MfLT4e0BOr8YU4xzE4cMgzlLnHRoJAACZAACZBAhhAodDhhsppg1kmayz0w5Ys3iAYH1tdfYQieo90I24PGdJo5Zge+cszxOG/UeBphGXL/B6IaNMQGgjKvQQIZQmBbczN+/NabCIvDcFhaB3VAsrQPwhMKYHX97gypJatBAiRAAiRAAsCFoyfAJd1fZp2ROUc0+qtZh4aJ+qsDqJ5UiNtOPRffmDaT2OJIQDshY9E4XnLAs4p+pAb84rwgCZBAehF4dt1qeAJ+BC3yQrJJ2eUXRMeILdy1DZc8/xC2tDSmV4VYWhIgARIgARLoh0Bjm/SCyZgwk13eeX2J9I5NPXow5l89z+gJ6+sQbiOB/RGgIbY/OtxHAiTQjYDVbIZJWwHVCNNllPhkUue7l30ctYVJEiABEiABEkhfAm6/HyFfCCaPfC73MVzJajJjQpEGAqckhICOEYtFE3LxgcmUhtjAcOZVSCAjCFwyYRJybfY+6xIIh7C6ge6JfcLhRhIgARIggbQjMKSoEPJqg7lN3BM7pPWxR8eYhqi/duyxaVevtCiwuiUK71g0LerTTyFpiPUDhptJgAR6E6jIzcPLV14Dl027xHpLXUc7Ovy+3ju4hQRIgARIgARSlEBQ5sV8Z8sWvLhuLXa0tOwtZY7dCrvVIg4gJuR+4IC5QT6b1RgzjDMT/nnyFzA8r2Tv8UyQwMESoCF2sMR4PAlkOYFB+QX45UlnIyc6jL26bIjuamvFzxe+leWEWH0SIAESIIF0IbCtpRkz770H1z/7FL716ss46/5/GkaZlt8kvvhfOecEOG1WhCwm5H3sgOvdHOS+k4OfVJ2DqWVD06Wa6VnOWNwS9Zg0lt4TAqVxZVh0EiCBgSHwuTGToOPFvvrm88ZEl8ZVxWvDJ+EUn5MJn3958jkDUxBehQRIgARIgAQOkcCe9nac849/wucJIiSOHiFLGH7pHfvayy/g0y9/xcj1S6fNQHl+Hl74ZDWCngCOrKjEJacchaGDig/xqjyNBPYRoCG2jwVTJEACB0Hg9KGjJGaH2Qhfb4T01eAd0jDlN0lsewoJkAAJkAAJpDCBNo8XV9/5CMx7gnBqOeX9FciTBsUioNXrhTcQgMPa+Zk8Z/pEqFIGmEDsnV2FUrK7RZ/v0gEu6KFfjq6Jh86OZ5JAVhNwSdCOc0eOgwSN2hdBUYyxQM/RzFlNiZUnARIgARJIRQI/evQ17NrTIvNUyQgwVfnP2g5YW2XKMBkHHTHCUrHs2VImk7gdxqLCo1n0RlE1xNJKaIil1e1iYUkgtQj85pRzUZrj6laoYkdOt3WukAAJkAAJkEAqEdjT0oZ/r9poREaPLpcaZLY24NsncGLmaC5MJ44ADbHEsWXOJJDxBJzitvHg7MtRZHcaYe1dVhtuP21OxtebFSQBEiABEkg/Ah5fAD/410s468f3wB/o243eGjbj2inHpF/lMrHEDNaRiXeVdSIBEogngQkl5fjgipuxrbUZg3LzkW93xDN75kUCJEACJEACcSHw+2ffxhtL10PicQASBbEvmTGqGmaJlkghgYEgwGAdA0GZ1yCBDCeQIz1h44rLMryWrB4JkAAJkEA6E3jx4zXwSU+YEVtKO8Qsol02l8aFyJEw9d+Zc3I6VzFzyq43RA3mDBcaYhl+g1k9EiABEiABEiABEiABsbtk2pWImOUj3/B8k5D1IbukZbjzHddchAlDKiKHcEkCCSew74lM+KV4ARIgARIgARIgARIgARJIDoErTz4aDun12idhBFxheEaHMPbIchw3Yti+XUwllYBJ5hOIJWKiHpPOQkMsne8ey04CJEACJEACJEACJBATgRvOOQ4XnjgJDocFEpMD4RLxSxxhxpiiUtx11udiyoMHDSABBusYQNi8FAmQAAmQAAmQAAmQAAkcBoGwfLw/tvFT3LX6fdQ3dWB0QRluPeFMTCqqxH+8+jQ+ad4J6yQzfKEwZg0Zju8de7IxxtnEAB2HQZ2nHiqB6P7ZQ82D55EACZAACZBA1hDwB4N4c9NGbG5qwpiSEpwyYiSsUWNPsgYEK0oCKUQgKKEQt7Y34YHPPsaji5fCtNYMU8CCDWjEl959HJOnDsLipl0ItYXgtwYQlCkv39+5DUtqd2G8RP+lpCCBNHc7jIUoDbFYKPEYEiABEiABEhACO1pacNljj6LF64U34IdD5tIrdbnw+GVXoDw3l4xIgASSQOCtnevxvUXPo8Pvg681iJyVTphCUSHoPcDK93fBqZtM6pMIBGWmlfaRfty/egmumHhUEkrNS5IAwDFifApIgARIgARIIEYC33zlZexub0O7fPAFpLW23e83jLPvzX81xhx4GAmQQDwJbGtrwlfffxIN3g54QgHYttv6DXtuCpsMA02XFi/grDFDe7gpKUggEr5eQ9gfSFOw+LEWiT1isZLicSRAAiRAAllNoMHtxrLaGgR7uMvo+vtbt6Ld50OuXeJgU0iABAaMwNObl3f7m7S0iUtiZHKwqFL03KbGmL3FhEvGTo46islUIpDuERFjYckesVgo8RgSIAESIIGsJ+CW3i/zfgb0+9iynvXPCAEMPIE2vxeB0L5erZAz9nDmVosZ1x0xbeALzSuSQBcBGmJ8FEiABEiABEggBgKD8/NR7HT2eeTQwkIU58jofwoJkMCAEjirehycFnFH7BL/EL+Epj+wMWa3WnDjGcfCKeM8KSlKQL0PYtEULX4sxaIhFgslHkMCJEACJJD1BDS89S/PPNv4cIv0jFlkW458yP3ijLOyng8BkEAyCEwvG4pLRx4lxpgVdrMFzjIrzCPD0N4uh92KHIdNJnG24PKTjkJxXg6csk2NsDnHTsR/nH1cMorMa5LAXgJsBtiLggkSIAESIAES2D+Bk0eMwJOXX4m7P/4Ia+vrMLm8AjdOn4GxpaX7P5F7SYAEEkJAG0h+Nm02rhx9DBbWbkGp04WzLhoPrzeAhWu2ijsxcMKE4ShwOfH9uadhV2MriiSdlyNhEykpTKCrNyyFSxiPotEQiwdF5kECJEACJJA1BCaWl+P3556XNfVlRUkgHQhMkAmbVUMyUfPCtVuwYVc9BhXn45QjRsFu6/zctch8f9WlhelQHZZRvUt7BEbKRCg0xDLxrrJOJDAABFrdXtx6/8tYvqUWU0cNxs+vPgcuByPGDQB6XoIESIAESKAPArXS2zXv9sdR39oBfyAIm7ggqv71Py/FxKEVfZzBTSSQXAIcI5Zc/rw6CaQtgZ8/+jreX70FDfLCe3vlJvzqibfSti4sOAmQAAmQQHoTaJTpJebd+Th21Legw+uX+cFCxrK53YOb/vykYZildw2zsPQHmj8ssj+N0dAQS+Obx6KTQDIJrNm223jRaRl80vK4WtYpJJDOBMLiBvPH597F3F/dj+cXrUrnqrDsJJA1BPTv9nfvvYsT//xXbNvTjFAf7mzaO/bOqk1Zw4QVTR8CNMTS516xpCSQUgRmTR4JZ5ffvS5PFj98TyCAjU0NaPZ6UqqsLAwJxELgw3Vb8fDbS/DZzjr87JHX0djmjuU0HkMCJJBEAs+uXYN/LPkEAW8IIejAot4SkN6xGnFbpKQXAZ3QORZNr1p1Ly3HiHXnwTUSIIEYCXzzopONqFOL12/H8eOHY/zECky7/04EQyH4ZHLNi0ZPxO9PPz/G3LLzsI019di6pwmjq0oxtLwoOyGkUK1tFsvezzgJtAaLhlujkAAJpDSBez7+GG5pBDTJEOX+/mI1lP2YQWUpXQ8WLjsJ0BDLzvvOWpPAYRPQF9vN555g5LOnox2nPHIPOgL+vfk+vX4VXtm8Dv++4gZUuPL2bmcCaPf48PW7n8WKLTXGXDf+YBAzJ47Ar689b290L3IaeALTxlTjq+edaIx5vPq0qUa464EvBa9IAiRwMAT0/aMSli/agMypbnVrL8o+k0wbVCqL8jFjbPXBZMtjU4FAH26mqVCseJaBronxpMm8SCBLCays3w1PMNCr9tpKeemzD/Xanu0bNNDJss274PEH0CZGmdcfxHsS+OTOlxZmO5qk1/+Lp0/DPV+ba4S8TnphWAASIIEDEpg6aPDenjCPBEZUYyxsCiMsX7g6cfMRw6rwN/mb1vnGKGlEQL1MZSqCmDSNqtWzqDTEehLhOgmQwEETyLfb+xwgrRlta21Gm8930Hlm6gleMb5eX7reCHASXUfd/sR7y6I3MU0CJEACJHAAAt884UTk2GydR8lXradKjLGRFkyZNgRP3XIN7vvWFSgryD1ALtxNAskhQEMsOdx5VRLIKAJHlVbutz7NPgbv0MheK7fW4L1VmxGWcXR9iYZcppAACZAACcROYHxZGR6/7AqcNGwYXGKQVebm4mszT8C9V83l2NvYMabgkdIbpq6JsSigs3TfLTonBSuy3yJxjNh+8XAnCZBALARsVityLFa4+3BPtJrMGJTlY8R2N7fhxtufQG1TG8wyXiGo7hZ9yJEjpCmXQgIkQAIkcFAEJpaX475L5h7UOTw4owg0S21uTMcasUcsHe8ay0wCKUjgf2ad3Wepvn/sLDE+svun5of3vYKtdU3o8PmNMWERMywyYkEHk+fYbfjBpaf1yZAbSYAESIAESCDrCMTSG6bHpLGwRyyNbx6LTgKpRODScZNR6szBT99/A7vaW1GWk4tbjz8F542akErFHPCyeHwBfLJhe69eMJ17bezgMgnUEcCkYZX43CnjsMj7IR5dvhPjC0bg/EEzUWjPH/Dy8oIkQAIkQAIkkBIE0tzIioUhDbFYKPEYEiCBmAicOmwUFohS9hHQQF2d0bq6t9ppL9hVp07FudPGY1PbDnx36R/gDwUQCAexrOkzPLdjAf5wzHdRlVO6LzOmSIAESIAESIAEMoZAdvsLZcxtZEVIgARSlYBDer5OnjwSNmv3n1s1y3S7yp/XPy7j67yGEabrvrAf7QE3/rHpOV2lkAAJkAAJkEB2EdCXJMPXZ9c9Z21JgARIIBEEfvaFszF11BBjThsdC1YuoZT/cvMlyHXaJSBUGKtbNvW6bAhhLGlc3Ws7N5AACZAACZAACWQGAbomZsZ9ZC1IgARSmECBy4m7vzoXGj2xXSZwHl5ebERP1CKr26LDbIMn1HuutRyrM4VrxaKRAAmQAAmQQKIISJdYuO+pXhJ1xWTk291XJhkl4DVJgARIIEsIVBTmYWRlyV4jLFLtcwadCLsYY9HiMNtx4eBTojcxTQIkQAIkQALZQyALoibSEMuex5k1JQESSFEC14+cg+klk2AzWZFrcRrLUyum4XPVDGeforeMxSIBEiABEiCBwyZA18TDRsgMSIAESODwCNikN+zWSfOwx9OIXZ46DHVVothecHiZ8mwSIAESyCICOt5WpTNKbRZVPFOrqrdTg3VkuNAQy/AbzOqRAAmkD4FyZzFUKSRAAiRAArEReG/HFvzs/TfxWWMd7BYLLh4zCT868XTk2uyxZcCjSCCJBGiIJRE+L00CJEAC0QQCMo9YR9CNPGsuzCZ6jkezYZoESIAEogmEpAfs6288jxc2rt272RsM4qnPVmFTSyMenXPl3u1MpCmBrl7ONC19TMWmIRYTJh5EAiRAAokjoC41z+98ydCgTOjskHFiVw+/AjPLjk/cRZkzCZAACaQxgUfWLMMrm9b1qoEvFMSyPTVY07AHE0rKe+3nBhJIJQLJbHL9moDQZoyVor+JgnKLpNeL6r5zorYzSQIkQAIJIaCGUGR8QUIucIBM397zHp7d+aKEsPfCHw6gLdCGezfdh7UtvT8yDpAVd5MACZBAVhB4cNWnCPTTY2IRj4LNzY1ZwSGjK6n3NxZNYwjJ6hHTUGAXiR4l6hWtEFWZJHqF6GTRwaKvi44TDYpSSIAESCCuBOpa2vGLx97EghUbJd8wZk0aiR9edgYqi/LQ3O5BQ1sHqksLYbNa4nrdnpm9uOsV+HrMI6brL9fMx/gC/QmkkAAJkAAJRBMIhPqfY8ov+8azNywaVxqmu4ywNCz5wRQ5WYbYzVLIX4mqEaayu3NhGGePSFq3bxLVnrFjRReKUkiABEggbgR8/gCuuu1h7JFJloNdkZneWbUJV/3uIUwbMwRvLtsAq8VsROD67sWn4OITjojbtXtm1B5o77nJWG/2N/e53RMIYHldDQrtTowrKevzGG4kARIggUwmcOm4ybjto3fhFVfEaLGYTDht6EiMLGTgo2guTKcmgWQZYtrEO0v0f0U9ot8R/Uh0iOgHohHZLgndRiEBEiCBuBJ4Y9l6tHR49hphmrkaZI1tbry+dD0CwRB8gc4X/K+efAvDK4owdXR1XMsQyezooiPxXt0HCMl/EbHLhM4zSqZFVvcuX5aB6d9e8LIE8zAhGA4ZHxv/PHcuKlx5e49hggRIgAQyncC1R0zFW9s24tPaXfAGA8avpxphNx41A9+acVKmVz/z66eR6/fT65kpAEwJrIi6FVb1kf+tsk0NsDdF/0t0huijoqNE7xDV3q8HRFX+LvqS6JO60kNulHVVVFZWTnvkEe1Io8SbQFtbG/Ly+IEXb6498yPnnkQSsx7Nebf0hNW1dMR8oYIcB6rLCmM+/mAO1AAdOz27ZMoUMcXEH16NLJvJhkE5VTDJfxFRd5t1DXXywaFvqE7RvS4J0zwqxVp/o1lHyspl/AmQc/yZ9pUjOfdFJTHbDpZ1R8APt6jdbEW+3Z6YQmVArqeddtpiqcb0dKlKgbU8fGLZ52Mq7is1f06rukVXKpE9YmdGX6hHWl0TnxLVr4lFotoMrP412gM2VDQi2vy8M7LSY3m3rKuiuro6fOqpp2qSEmcCCxYsANnGGWof2ZFzH1ASsCma8/OLVuG+d96E2+eP6UpTRg7Gv+bq0NbEiDfoxUcNi2VC51qMzB2OY4qPhsXUfWza35Z9hD9u/FTGk3X21EVKYjOb8fHsr6DQ4YxsSvoymnXSC5PBBSDngbm55DwwnPUqZD1wrFP+Sv0EY0n5ch9EAZMVNfEZKePpXeUcJ0ttwqgTfU5Ug3U4REeKjhVVQ41CAiRAAnElcNaUcXA5bEbvUyRj7YmymPf1QEW2O21WnHH0mMhqQpYOiwMnlZ+Izw+9GNNLpvYywvSi2iMm8R17XV97zdRNkUICJEACJEACGUMgloiJaW6sJcsQu1ceklGiK0TVp/BaUf260FD2j4muEn1F9Cui3Zt+ZQOFBEiABA6XgNNuxf3fugLTx1TDKj1KaoAdM2owfnf9BcgVA83eFSkxx27D0PIizJ2pQV6TK+eMGNtromcJJ2IE7ChxupJbOF6dBEiABEiABEjgoAgk0jVxfwXxyc6r+zlAx4+pUkiABEggoQSGSGj6e742Fx5fwOhpUqNL5dkR1+GphSuwva4Zx44binOOGQe79IolW0YVleCW407BLz5cAJu5020xT8aH3XHGnGQXjdcnARIggUMisLOhBQ+89QmWbNyJQSX5OLM6dVysD6lCPClOBKR/piuicZwyTMlskv9lkZJYWCgSIIFsIqC9Y9FSXpiHm2YfH70pZdLXSaSwC0aPx8Kd22RMmAMnDh5u9OilTAFZEBIgARKIkcCqrbWYd/vj8EuEWr9Eql29rRbjHENw35uLcc3pvaPGxpgtDyOBtCHQ/esjbYrNgpIACZBA9hIoy8nFnNETshcAa04CJJARBH7y0Gvo8O4LmKRjVLQT5PYX3sP5MyaiNJ8u1xlxow+lEvIchLNg7HOyxogdyi3hOSRAAiRAAiRAAiRAAhlAoL6lHZt2N/ZZE4uM2317xcY+93FjFhFQqzwWTWMkNMTS+Oax6CRAAiRAAiRAAiSQjgR0zsTeMWr31SSc5tHw9tWEKRLonwANsf7ZcA8JkAAJkAAJkAAJkEACCOhY3MElBX3mHJSpOmZO0lmMKFlNgOHrs/r2s/IkQAIkQAIkQAIkQAIJIvCTK8+CBkvSORwjounrzpiOyqK8yCYuSSBjCTBYR8beWlaMBEiABEiABEiABFKXwNTRQ/DQt7+Av89fhGWbd4nxlY/qskJcMPvE1C00SzYwBLQ3THpGM11oiGX6HWb9SIAESIAESIAESCBFCYweVIpfXHPu3tItWLBgb5oJEsh0AjTEMv0Os34kQAIkQAIkQAIkQAIkkG4EsiBgCw2xdHsoWV4SIAESIAESIAESIAESyHAC4SxwTWTUxAx/iFk9EiABEiABEiABEiABEiCB1CPAHrHUuycsEQmQAAmQAAmQAAmQAAlkMQEJ1pEFronsEcviR5xVJwESIAESIAESIAESIAESSA4B9oglhzuvSgIkQAIkQAIkQAIkQAIk0BcB6RBDSP/JbKEhltn3l7UjARIgARIgARIgARIggfQjEM78ecTomph+jyVLTAIkQAIkQAIkQAIkQAIkkOYE2COW5jeQxScBEiABEiABEiABEiCBTCKgTonhLHBNZI9YJj21rAsJkAAJkAAJkAAJkAAJkEBaEGCPWFrcJhaSBEiABEiABEiABEiABLKEgIauz4IxYjTEsuR5ZjVJgARIgARIgARIgARIIF0IJNE1MVcY/VnUJ7pA9EHRhAhdExOClZmSAAmQAAmQAAmQAAmQAAmkCIF7pRy7RVf0KM9sWV8rul70B137LpHlE6I3iF7YtS0hCxpiCcHKTEmABEiABEiABEiABEiABA6ZgLomxqKxXeCfcpgaXdFikZU7Rc8VnSR6ZdeyWpbbRFWCnYvE/EtDLDFcmSsJkAAJkAAJkAAJkAAJkEBqEHhbitHQoyjHyrr2hG0UVTfER0QvEt0uqsaYSkJtpYwYI7Z48eI6k8m0pZMX/40zgTLJry7OeTK73gTIuTeTRGwh50RQ7TtPsu6bS7y3knO8ifadHzn3zSURW8k6EVSB4YnJNjG5tqLx1dfDT+izEIs45aCPow68W9KqB5IhckCk50uPVQPsONE/id4her7o86IUEkgagegHO2mFyIILk/PA3GRyHhjOehWyHhjW5EzOA0Ng4K7CZ3rgWGfblUZIhVdEVfrzkv5b1PoXJX171HrCkwntbkt46XkBEiABEiABEiABEiABEiABEjh4AtoDNjTqNHVH3Bm1nvAkDbGEI+YFSIAESIAESIAESIAESIAEUozAR1KesaIjRe2iV4g+JzpgotFCKCRwIAKLD3QA98eFADnHBeMBMyHnAyKK2wFkHTeU+82InPeLJ247yTluKA+YEVkfEBEPOEgCD8vx/yM6TPQm0WZRfc4+E9V5wr4m+oDok6IUEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiCBuBLQyDErRWX2PEyPynmEpN2in3bpXbKMyDRJLBfVORg01KdJlHJgAv2x1jNvEVWeOsv7OaIRIesIiUNb/lRO2yEaeY7Pi8qmP+ZRhzB5EAR0wkx9fvU5/sFBnMdDD0xgsxyiv7n6HEciy5VIer6outfosliUcvAE7pVTdotGR1TbH1v+bhw8Yz2jL84/le38fVY6FBIggawlMFFqPl50gWhPQyz6xSS798oiSZ0gqgbYy6I6MznlwAT6Y62zui8VdYjqwNENopFxnGQtMA5DfirnfqeP8/fHvI/DuekABPR51ed2lKgOfNbnWRlT4kNgs2RT1iOr38h6xODV5a977OdqbAROlsOmika/7/pjy9+N2Jj2dVRfnH8qB/L3uS9a3JbxBBg1MeNvccwVXC1Hait2rDJIDiwQXSgaFr1P9HOilAMT6I+1zuaus7p7RTeJao+CzvpO1gIhQdIf8wRdLuOz1edVn9uNoj5RfZ6VMSVxBJTvv7qy1yV/hw+N9dtyWkOPU/tjy9+NHqAOYrUvzv2dTs79keH2jCFAQyxjbmVCK6K9M0tE/y06q+tKQ2Sp8y9ERNO6jXLoBJRfzxnedRtZHzrT6DO/KivLRNU1JuK+1R/z6POYjp0AecbO6lCO1Eav10Q10teNXRlUynJXV1qXFV1pLg6fQH9s+ZwfPtueOfD3uScRrmcFAWtW1JKVjBB4XRJVkZWo5a2SfjZqPTqpL/ZhovWiOk7pGdHJoibRnqIfCZROAofCuj+m/W0n6+4E9sf8L3Loz0X1GdXlbaJfEiVbgRBHIc84wuwjq5mybaeoGlvzRdeIUgaeAJ/z+DLn73N8eTK3NCJAQyyNblYcinrmIeShbnKqKtoKq+M/xolqD1i1aEQ0rR8IlE4Ch8JamQ6NAhhhStZRUPaTjJX5PZLHC1359Md8P5fhrv0QIM/9wInDrshv7G7J62lRdQWtFVX3ZW0006Xuo8SHQH9s+ZzHh28kF+UcEf4+R0hwmRUE6JqYFbf5sCpZLmfrAHyVUaI6A7mO/9CXfqvo8aLaOniNaH+9arKLEgOB5+SYK0QdoiNFlfUiUbIWCIcp+oEakYslERmQ3x/zyLFcHhyBj+RwfW71+bWL6vOsjCmHTyBXssjvykbTZ4vqc6x8rxVV0SV/hw0UcfmnP7b83YgL3r2Z8Pd5LwomSIAEspWAfpxqK59XVFunXhVVuVRUw9ovFf1EdI5oRKZLQj8EtJfsDtG+3DVkM6UHgf5Y62G3iirPtaLnikaErCMkDm15v5ymYb+XiepHVPSLvz/mchjlEAicJ+esE9XnWNlS4kNAG8L0d1hVf5MjbEsl/YboZ13LEllSDp7Aw3KKNnr5RfVdOE90f2yVf1+/1bKZsh8CfXHm7/N+gHEXCZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACcSHQFt8suk3l5dkT1GX/me/R/W/41TZ9UL/u/vco9MORJ/zW1n/WPSUrqN13sNXutJckAAJkAAJkEBKE+CEzil9e1g4EiABEkhZAjpfWJOoGmOHYogdSsW+JSfd03XihK7lybL8Sld6jyx1LqiZXetckAAJkAAJkEDKEqAhlrK3hgUjARIggbgTmCI5fiCqE1s/LVosqrJA9Neii0R1MuZZoiou0cdE9fhHRT8U1cnFVTaLlon+SnS06Kei2kN1qmh0r9Udsn6dqMps0TWi74peIhqRXEncK/qR6BLRi0T7Ep1gPtLjZZF0SDQsGj2Z/DOyfpUohQRIgARIgARIgARIgARIgAQGnEBbH1dUgyrixvffkv5D1zELZHlbV/o8Wb7elf6OLP/alT5ClgHR6V3rm2VZJjpCdIVoRE6VxAuRFVneIXqdqFN0m+hYUTWc1MCLHPcLSV8tqlIkqsagGmfRMlJWFkdvkPTtXdtOj9o+RNLLo9aZJAESIAESIIGUJMAesZS8LSwUCZAACcSdQKHkqEbOv7ty/pcs1a0vIk91JdTYGdGVPkmWj3Sl1dhSQ+5QZYKcuEn0M1HtxXpANCJnS+IHop+KLhBVo22YaLTo+LA90Rsk/TXRaaJvRm3fLenBUetMkgAJkAAJkEBKErCmZKlYKBIgARIggYEm4O26YFCWkXeD6RAKob1m0Y18alRFRA2wvkSvo26Ha/va2bXNLcvovPo7VI/RYykkQAIkQAIkkNIEol+WKV1QFo4ESIAESOCwCDTL2Y2ikfFfX5T0vw+Qo47luqzrmEmyPLKP41tlW37U9i2S1mMdotoLd4aoio4NGyk6WldEruxcGP++Kv9q75YaZCrHdC66/avuiiO6bel7ZZxs1t47CgmQAAmQAAmkNIFIq2dKF5KFIwESIAESOGgCLjlje9RZ/yfpa0XvEtV9G0WvF92f/Fl2/ktUXRKXdC3VoIuWell5T1SNn5dFvyv6mKieo26Iep6KR/RG0RdF60TVyNNxZyo/F9XxanqOGmObRS8QjZZ2WdkgOkZ0ffSOHunTZF2vQSEBEiABEiABEiABEiABEiCBtCRgkVJH3AG1J2uzqF00WXKxXPh/DnDxt2V/8QGO4W4SIAESIAESIAESIAESIAESSFkC6nL4sehSUe2tOlc02fIf+ylAuez73H72cxcJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAC/HyXDAAAAJUlEQVQJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJpBKB/w+gi7y7pp43UAAAAABJRU5ErkJggg==)",
"_____no_output_____"
]
],
[
[
"# Replicate the figure here",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7f1a6e68c75728258c56fc31b4c376ae0ffd55b | 152,128 | ipynb | Jupyter Notebook | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects | c0ebd502cacfb906dd69dca50d7fedd1c567775a | [
"MIT"
] | 1 | 2021-02-08T14:55:29.000Z | 2021-02-08T14:55:29.000Z | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects | c0ebd502cacfb906dd69dca50d7fedd1c567775a | [
"MIT"
] | null | null | null | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects | c0ebd502cacfb906dd69dca50d7fedd1c567775a | [
"MIT"
] | null | null | null | 47.763893 | 6,029 | 0.49292 | [
[
[
"# Data Preparation",
"_____no_output_____"
],
[
"### Settings/Functions\nRead in settings and functions.",
"_____no_output_____"
]
],
[
[
"libraries <-c('here','missForest','stringr','imputeMissings','regclass'\n ,'purrr','DescTools')\nsuppressWarnings(lapply(libraries, require, character.only = TRUE))\nsuppressWarnings(source(here::here('Stock Estimation', 'settings.R')))",
"_____no_output_____"
]
],
[
[
"### Data\nRead in the final data set from the data preparation notebook.",
"_____no_output_____"
]
],
[
[
"data <- fread(paste0(dir$final_data,'combined_financial.csv'))",
"_____no_output_____"
]
],
[
[
"## More Cleaning",
"_____no_output_____"
],
[
"### Duplicates",
"_____no_output_____"
]
],
[
[
"#Checking for duplicate column sums\ndups <- data[ , which(duplicated(t(data)))]\ndups <- names(dups)\ndups",
"_____no_output_____"
],
[
"#Removing any duplicate column sums after verifying them\ndata <- data %>% dplyr::select(-c(dups))\ndim(data)",
"_____no_output_____"
],
[
"#Looking for missing values & evaluating list of variable names\nna <- apply(is.na(data),2,sum)\nmax(na)\n# NOTE: The following code has been commmented out due to the length of its output.\n#print(na)\nhead(sort(na, decreasing = TRUE), n=25)",
"_____no_output_____"
],
[
"#Merging and dropping duplicated variable names\n#NOTE: Portions of the following code has been commmented out due to the length of its output.\n#view(data[, c(\"Payout Ratio\", \"payoutRatio\")])\ndata <- Name_Changer(dat=data,x='Payout Ratio',y='payoutRatio')\n\n#view(data[, c('interestCoverage', 'Interest Coverage')])\ndata <- Name_Changer(dat=data,x='Interest Coverage',y='interestCoverage')\n\n#view(data[, c('netProfitMargin', 'Net Profit Margin')])\ndata <- Name_Changer(dat=data,x='Net Profit Margin',y='netProfitMargin')\n\n#view(data[, c('PE ratio', 'priceEarningsRatio')])\ndata <- Name_Changer(dat=data,x='PE ratio',y='priceEarningsRatio')\n\n#view(data[, c('priceToFreeCashFlowsRatio', 'PFCF ratio')])\ndata <- Name_Changer(dat=data,x='PFCF ratio',y='priceToFreeCashFlowsRatio')\n\n#view(data[, c('priceToOperatingCashFlowsRatio', 'POCF ratio')])\ndata <- Name_Changer(dat=data,x='POCF ratio',y='priceToOperatingCashFlowsRatio')\n\n#view(data[, c('priceToSalesRatio', 'Price to Sales Ratio')])\ndata <- Name_Changer(dat=data,x='Price to Sales Ratio',y='priceToSalesRatio')\n\n#view(data[, c('Days Payables Outstanding', 'daysOfPayablesOutstanding')])\ndata <- Name_Changer(dat=data,x='Days Payables Outstanding',y='daysOfPayablesOutstanding')\n\n#view(data[, c('Free Cash Flow per Share', 'freeCashFlowPerShare')])\ndata <- Name_Changer(dat=data,x='Free Cash Flow per Share',y='freeCashFlowPerShare')\n\n#view(data[, c('ROE', 'returnOnEquity')])\ndata <- Name_Changer(dat=data,x='ROE',y='returnOnEquity')\n\n#view(data[, c('priceToBookRatio', 'PTB ratio')])\ndata <- Name_Changer(dat=data,x='PTB ratio',y='priceToBookRatio')\n\n#view(data[, c('priceBookValueRatio', 'PB ratio')])\ndata <- Name_Changer(dat=data,x='PB ratio',y='priceBookValueRatio')\n\n#view(data[, c('operatingCashFlowPerShare', 'Operating Cash Flow per Share')])\ndata <- Name_Changer(dat=data,x='Operating Cash Flow per Share',y='operatingCashFlowPerShare')\n\n#view(data[, c('Cash per Share', 'cashPerShare')])\ndata <- Name_Changer(dat=data,x='Cash per Share',y='cashPerShare')\n\ndim(data)",
"_____no_output_____"
]
],
[
[
"### Variable Names",
"_____no_output_____"
]
],
[
[
"#Checking variable names\nnames(data)\ndata <- setDT(data)",
"_____no_output_____"
],
[
"#Changing all names to lower case and replacing spaces with \"_\"\n#Amending various features to make more compatible models\nnames(data) <- str_trim(names(data), side = \"both\")\nnames(data) <- str_to_lower(names(data), locale = \"en\")\nnames(data) <- str_replace_all(names(data), \" \", \"_\")\nnames(data) <- str_replace_all(names(data), \"-\", \"\")\nnames(data) <- str_replace_all(names(data), \"&\", \".\")\nnames(data) <- str_replace_all(names(data), \"\\\\(\", \"\")\nnames(data) <- str_replace_all(names(data), \"\\\\)\", \"\")\nnames(data) <- str_replace_all(names(data), \"3y\", \"three_yr\")\nnames(data) <- str_replace_all(names(data), \"5y\", \"five_yr\")\nnames(data) <- str_replace_all(names(data), \"10y\", \"ten_yr\")\nnames(data) <- str_replace_all(names(data), \"\\\\\\\\\", \"\")\nnames(data) <- str_replace_all(names(data), \"////\", \"_\")\nnames(data) <- str_replace_all(names(data), \",\", \"\")\nnames(data) <- str_replace_all(names(data), \"_._\", \"_\")\nnames(data) <- str_replace_all(names(data), \"/\", \"_\")\nsetnames(data, 'eps', 'earnings_per_share')\nnames(data)",
"_____no_output_____"
]
],
[
[
"### Categorical Encoding",
"_____no_output_____"
]
],
[
[
"#Categorical Encoding\ndata[, sector := as.factor(sector)]\ndata[, sector_num := as.numeric(sector)]\n\n#Reordering data to put \"sector\" with \"sector_num\"\ndata <- data %>%\n dplyr::select('stock','nextyr_price_var','class','year','sector','sector_num', everything()) %>%\n setDT()",
"_____no_output_____"
]
],
[
[
"### Missing Data",
"_____no_output_____"
]
],
[
[
"na <- apply(is.na(data),2,sum)\n#print(na)\nmax(na)\n#sort(na, decreasing = TRUE)\nhead(sort(na, decreasing = TRUE), n=25)\nsummary(na)",
"_____no_output_____"
],
[
"#Checking how many rows are complete\nsum(complete.cases(data))\n\n#Checking for NA across rows\ndata$na <- rowSums(is.na(data))\nmax(data$na)\nhead(sort(data$na, decreasing = TRUE),n = 20)\nsummary(data$na)",
"_____no_output_____"
],
[
"#Found that 50 was a good cut off for dropping rows\ndrop <- data %>% \n filter(na >= 50)\ndim(drop)\n\ndata <- data %>%\n filter(na <= 50)\n\ndata <- dplyr::select(data, -c(na))\n\n#Re-checking the NAs across columns\nna <- apply(is.na(data),2,sum)\nmax(na)\n# NOTE: The following code has been commmented out due to the length of its output.\n#print(na)\n#sort(na, decreasing = TRUE)\nhead(sort(na, decreasing = TRUE), n=25)",
"_____no_output_____"
],
[
"#Keeping only columns with less than ~15 percent missing\nperc <- apply(data,2,Perc_Missing)\nmax(perc)\n# NOTE: The following code has been commmented out due to the length of its output.\n#print(perc)\n#sort(perc, decreasing = TRUE)\nhead(sort(perc, decreasing = TRUE), n=25)\n\n#Choosing to only keep variables with less than 15% missing data\ndata <- data[, which(apply(data,2,Perc_Missing) < 15.0)]",
"_____no_output_____"
]
],
[
[
"### Multicollinearity/Linear Dependence/Winsorization",
"_____no_output_____"
]
],
[
[
"#Splitting datasets\ndata <- setDT(data)\ndata2 <- select(data, c('stock','nextyr_price_var','sector'))\ndata <- select(data, -c('stock','nextyr_price_var','sector'))\n\n#Converting class to a factor\ndata <- data[, class := as.factor(class)]\n\n#Run regression to identify linearly dependent variables\nset.seed(123)\nglm <- suppressWarnings(glm(class~., family = binomial\n , data = data))",
"_____no_output_____"
],
[
"#Find the linearly dependent variables\nvars <- attributes(alias(glm)$Complete)$dimnames[[1]]\nvars\n\n# Remove the linearly dependent variables\nremove <- match(vars,names(data))\nremove\n\ndim(data)\ndata <- select(data, -c(remove))\ndim(data)",
"_____no_output_____"
],
[
"#Re-run regression without linearly dependent variables\nset.seed(123)\nglm <- suppressWarnings(glm(class~., family = binomial\n , data = data))",
"_____no_output_____"
],
[
"#NOTE: This section of the code will take some time to run.\n#The function VIF_Check runs a regression and removes the max\n#VIF, repeating this process until all VIFs are below threshold\n#Removing variables with VIFs above 5\ndata <- VIF_Check(dat=data, threshold=5)",
"[1] 42501422\n[1] 42501422\n[1] 14857839\n[1] 7233613\n[1] 589255.9\n[1] 224275.1\n[1] 136088.1\n[1] 90188.32\n[1] 85687.64\n[1] 64580.32\n[1] 47223.74\n[1] 36937.24\n[1] 34305.54\n[1] 15651.42\n[1] 8154.741\n[1] 6957.033\n[1] 3236.491\n[1] 3432.8\n[1] 3006.367\n[1] 2107.776\n[1] 1695.465\n[1] 1459.61\n[1] 1059\n[1] 897.1616\n[1] 805.2764\n[1] 847.6565\n[1] 787.2204\n[1] 729.5147\n[1] 593.1282\n[1] 296.241\n[1] 368.0201\n[1] 235.8145\n[1] 234.6721\n[1] 207.8195\n[1] 206.0238\n[1] 175.2889\n[1] 164.1711\n[1] 119.1208\n[1] 112.1676\n[1] 86.7602\n[1] 75.9692\n[1] 98.85711\n[1] 65.27094\n[1] 44.12094\n[1] 41.82128\n[1] 38.25669\n[1] 38.15252\n[1] 1099.636\n[1] 34.42022\n[1] 25.95463\n[1] 25.16551\n[1] 24.20825\n[1] 21.9634\n[1] 21.5175\n[1] 20.7845\n[1] 17.37877\n[1] 15.94764\n[1] 11.32786\n[1] 9.723508\n[1] 9.203622\n[1] 8.782897\n[1] 8.662801\n[1] 7.623118\n[1] 7.273039\n[1] 6.057325\n[1] 6.021532\n[1] 5.181338\n[1] 5.031095\n[1] 4.370228\n[1] \"All VIFs are below threshold of 5\"\n"
],
[
"#Re-combine data\ndata <- cbind(data2,data) %>% setDT()\ndim(data)",
"_____no_output_____"
],
[
"#Re-run regression without linearly dependent variables\nset.seed(123)\nglm <- suppressWarnings(glm(class~., family = binomial(link = \"logit\")\n , data = data[, -c('stock','nextyr_price_var','sector')], control = list(maxit = 100)))",
"_____no_output_____"
],
[
"#Split data for winsorization\ndata2 <- select(data, c('class','year','sector_num','stock'\n ,'nextyr_price_var','sector'))\ndata <- select(data, -c('class','year','sector_num','stock'\n ,'nextyr_price_var','sector'))",
"_____no_output_____"
],
[
"#Winsorize each column accordingly\ndata <- map_df(data, ~Winsorize(., probs=c(0.05,0.95),na.rm=TRUE))\n\n#Recombine datasets\ndata <- cbind(data2,data) %>% setDT()\ndim(data)",
"_____no_output_____"
],
[
"#Re-run regression to see if error has been corrected\n#No warning message occurs\nset.seed(123)\nglm <- glm(class~., family = binomial(link = \"logit\")\n , data = data[, -c('stock','nextyr_price_var','sector')], control = list(maxit = 100))\nsummary(glm)",
"_____no_output_____"
]
],
[
[
"### Imputing Missing Values",
"_____no_output_____"
]
],
[
[
"#Imputation will be implemented if necessary in the modeling notebook",
"_____no_output_____"
]
],
[
[
"### Uniformity",
"_____no_output_____"
]
],
[
[
"#Implementing scaling in the modeling notebook",
"_____no_output_____"
]
],
[
[
"### Additional Cleaning",
"_____no_output_____"
]
],
[
[
"#No additional cleaning was performed in this notebook",
"_____no_output_____"
]
],
[
[
"## Save the Modeling Dataset",
"_____no_output_____"
]
],
[
[
"fwrite(data, paste0(dir$final_data,'clean_financial.csv'))",
"_____no_output_____"
]
],
[
[
"## Outcome",
"_____no_output_____"
],
[
"##### The following changes were made to the data set:\n - Duplicates: Duplicate columns were removed from the dataset. This was done by first using the duplicated function to identify identical columns. Then, I looked at the columns sums to identify additional duplicate variables. In the end, about 10 variables were duplicated. \n - Variable Names: Variable names were cleaned up so that they were consistent. Spaces and dashes were removed, all letters were lower-cased, and underscores were added between most words.\n - Categorical Encoding: A new variable \"sector_num\" was created as the numeric version of the categorical variable \"sector\" to aid in machine learning models.\n - Missing Data: The missing data was cleaned in the following manner:\n 1. Any row with missing values greater than 50 was removed\n 2. Any column with missing values greater than 15% was removed\n 3. The remaining values will be imputed after relevant variables are identified.\n - Outliers: The outliers in the data set were removed in the previous notebook using Cook's Distance. Only four rows were identified as extreme outliers and were thus removed from the data set.\n - Multicollinearity: A significant portion of the variables in the dataset had high multicollinearity and a few were even found to be perfectly collinear. To solve this issue, all variables with a VIF greater than or equal to 5 were removed from the dataset. This resulted in approximately 70 variables being removed from the dataset.\n - Complete Separation: When running a basic regression, predicted probabilities that were effectively indistinguishable from 1 were occurring. To solve this issue, I first removed collinear variables. After this did not solve the issue, I winsorized the majority of the independent variables in the dataset and the problem was solved.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7f1bba181b6ab13eb7ffbb013e2cbcb4c80a0f6 | 4,394 | ipynb | Jupyter Notebook | Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day61/WebAuto-HackerrankSubmission/.ipynb_checkpoints/solution-checkpoint.ipynb | pooja-gera/TheWireUsChallenge | 18abb5ff3fd31b7dbfef41b8008f91d3fac029d3 | [
"MIT"
] | null | null | null | Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day61/WebAuto-HackerrankSubmission/.ipynb_checkpoints/solution-checkpoint.ipynb | pooja-gera/TheWireUsChallenge | 18abb5ff3fd31b7dbfef41b8008f91d3fac029d3 | [
"MIT"
] | null | null | null | Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day61/WebAuto-HackerrankSubmission/.ipynb_checkpoints/solution-checkpoint.ipynb | pooja-gera/TheWireUsChallenge | 18abb5ff3fd31b7dbfef41b8008f91d3fac029d3 | [
"MIT"
] | 1 | 2021-05-21T09:30:41.000Z | 2021-05-21T09:30:41.000Z | 21.752475 | 205 | 0.550979 | [
[
[
"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time",
"_____no_output_____"
],
[
"driver = webdriver.Chrome('/Users/jappanjeetsingh/Downloads/Drivers/chromedriver')",
"_____no_output_____"
],
[
"driver.get('https://www.hackerrank.com/auth/login?h_l=body_middle_left_button&h_r=login')\ntime.sleep(3)",
"_____no_output_____"
],
[
"username_element=driver.find_element_by_id(\"input-1\")",
"_____no_output_____"
],
[
"username_element.send_keys(\"[email protected]\")",
"_____no_output_____"
],
[
"password_element=driver.find_element_by_id(\"input-2\")",
"_____no_output_____"
],
[
"password_element.send_keys(\"Anonymous786#\")",
"_____no_output_____"
],
[
"password_element.send_keys(Keys.RETURN);\ntime.sleep(5)",
"_____no_output_____"
],
[
"search_element=driver.find_element_by_class_name(\"ac-input\")",
"_____no_output_____"
],
[
"search_element.send_keys(\"Solve Me First\")",
"_____no_output_____"
],
[
"# selecting the challenge and clicking it\ndriver.find_element_by_xpath(\"//*[@id=\\\"search-span\\\"]/div/div/div/div[2]/ul/li[8]/div/div\").click()\ntime.sleep(3)",
"_____no_output_____"
],
[
"driver.find_element_by_class_name(\"css-rzj7fh-indicatorContainer\").click()\ntime.sleep(5)",
"_____no_output_____"
],
[
"driver.find_element_by_id(\"react-select-2-option-5\").click()",
"_____no_output_____"
],
[
"with open('hackerrankSol.txt','r',encoding='utf-8') as f:\n code=f.read() ",
"_____no_output_____"
],
[
"text_area=driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/div/div/div[3]/div/section/div/div/div/div[1]/section[2]/div[1]/div[2]/div/div[1]/div[1]/div[1]/div[2]/div/div/div[1]/textarea\")",
"_____no_output_____"
],
[
"text_area.send_keys(code)\ntime.sleep(5)",
"_____no_output_____"
],
[
"submit_button=driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div/div/div/div[3]/div/section/div/div/div/div/section[2]/div[1]/div[2]/div/div[1]/div[2]/button[1]\")\nsubmit_button.click()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f1c0354b43eeccc8e196daa7b18e02e47ea51b | 31,962 | ipynb | Jupyter Notebook | Pandas/Pandas Daten Visualisierung.ipynb | florianfricke/data_science_jupyter_notebooks | b35593d32d02d737c3bdfa4fc62b5af1806f6e9a | [
"MIT"
] | null | null | null | Pandas/Pandas Daten Visualisierung.ipynb | florianfricke/data_science_jupyter_notebooks | b35593d32d02d737c3bdfa4fc62b5af1806f6e9a | [
"MIT"
] | null | null | null | Pandas/Pandas Daten Visualisierung.ipynb | florianfricke/data_science_jupyter_notebooks | b35593d32d02d737c3bdfa4fc62b5af1806f6e9a | [
"MIT"
] | null | null | null | 145.945205 | 26,544 | 0.872474 | [
[
[
"## Pandas Daten Visualisierung\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n%matplotlib inline",
"_____no_output_____"
],
[
"pd.read_csv('',index_col=0) #die Erste Zeile der csv ist nun der Spaltenindex/Schlüssel pro Zeile\n#das Styling des Plots wird verändert (rote Balken)\n#stacked = True -> Werte werden übereinander gelegt\n\n#Lineplot\n\ns=df1['C']*100 #die Diagrammpkt. werden größer dargestellt",
"_____no_output_____"
]
],
[
[
"Plotly ist eine Visualisierungslibary -> 3D Dia. mögl\nCufflinks verbindet Plotly mit Pandas\nbeide müssen installiert werden\nnicht mit Anaconda installierbar -> mit Terminal installieren\n`pip install plotly` `pip install cufflinks`",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\ndf = pd.read_csv('tips.csv')\ndf.head()",
"_____no_output_____"
],
[
"sns.violinplot(x='day', y='total_bill', data=df)",
"_____no_output_____"
],
[
"sns.violinplot",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f1c0eaaeb050f8272b06a6c24b4ba36d19bf8f | 673 | ipynb | Jupyter Notebook | notebooks/exploratory/.ipynb_checkpoints/summary_inventory_file-checkpoint.ipynb | mariakmejiaguerra/TF_binding_maize_leaf | df0c3678eeda7388e3ea9fe3c0459cb0d1592203 | [
"MIT"
] | null | null | null | notebooks/exploratory/.ipynb_checkpoints/summary_inventory_file-checkpoint.ipynb | mariakmejiaguerra/TF_binding_maize_leaf | df0c3678eeda7388e3ea9fe3c0459cb0d1592203 | [
"MIT"
] | null | null | null | notebooks/exploratory/.ipynb_checkpoints/summary_inventory_file-checkpoint.ipynb | mariakmejiaguerra/TF_binding_maize_leaf | df0c3678eeda7388e3ea9fe3c0459cb0d1592203 | [
"MIT"
] | 1 | 2020-04-04T15:20:24.000Z | 2020-04-04T15:20:24.000Z | 16.825 | 40 | 0.523031 | [
[
[
"# Inspection of the inventory file",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e7f1c95d1f1cc02821a7a422e251ef18279b36e5 | 391,496 | ipynb | Jupyter Notebook | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp | 04976c1ccc00a06faa15994015dfeaf4aed451c7 | [
"MIT"
] | null | null | null | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp | 04976c1ccc00a06faa15994015dfeaf4aed451c7 | [
"MIT"
] | null | null | null | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp | 04976c1ccc00a06faa15994015dfeaf4aed451c7 | [
"MIT"
] | null | null | null | 435.479422 | 62,020 | 0.929762 | [
[
[
"import networkx as nx\nimport matplotlib.pyplot as plt\nimport community\n%matplotlib inline",
"_____no_output_____"
],
[
"G_k = nx.karate_club_graph()\nnx.draw_networkx(G_k,with_labels=True,node_color='cyan')\nplt.title(\"draw function with node labels\")\nplt.show()",
"_____no_output_____"
],
[
"import community\nG = nx.random_partition_graph([10,10,10],0.9,0.1)\npartition = community.best_partition(G)\nprint(partition)\ncolors=['green','hotpink','yellow']\npos=nx.spring_layout(G)\nnx.draw_networkx_nodes(G,pos=pos,c=colors,cmap=matplotlib.colors.ListedColormap(colors), node_color=list(partition.values()))\nnx.draw_networkx_edges(G,pos=pos)\nplt.title(\"random partition graph\")\nplt.show()",
"{0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 2, 21: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2}\n"
]
],
[
[
"# INCIDENCE MATRIX",
"_____no_output_____"
],
[
"# decomposing Incidence matrix and plotting node features(W)",
"_____no_output_____"
]
],
[
[
"inci = nx.incidence_matrix(G).todense()\nprint(inci.shape)\nprint(inci)",
"(30, 154)\n[[ 1. 1. 1. ..., 0. 0. 0.]\n [ 1. 0. 0. ..., 0. 0. 0.]\n [ 0. 1. 0. ..., 0. 0. 0.]\n ..., \n [ 0. 0. 0. ..., 1. 1. 0.]\n [ 0. 0. 0. ..., 1. 0. 1.]\n [ 0. 0. 0. ..., 0. 1. 1.]]\n"
]
],
[
[
"# NMF Decomposition",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import NMF\n\nmodel = NMF(n_components=2,init='random', random_state=0)\nW = model.fit_transform(inci)\nH = model.components_\nerr = model.reconstruction_err_\nit = model.n_iter_\n",
"_____no_output_____"
],
[
"print(err)\nprint(it)\nprint(W.shape)\nprint(H.shape)\n# print(W[0])\n# print(H[:,0])",
"16.3736251866\n89\n(30, 2)\n(2, 154)\n"
]
],
[
[
"# NMF displaying learned nodes",
"_____no_output_____"
]
],
[
[
"# displaying learned nodes\nimport matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow']#, 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\n\nsvd.scatter(W[:, 0], W[:, 1],c=np.array(list(partition.values())),marker='o',s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"W-nodes\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# NMF displaying learned edge vectors(H)",
"_____no_output_____"
]
],
[
[
"#color edges\nedges = G.edges()\ned_label = []\nfor ed in edges:\n if partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==0:\n ed_label.append(0)\n elif partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==1:\n ed_label.append(1)\n elif partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==2:\n ed_label.append(2)\n elif partition[ed[0]]==0 and partition[ed[1]]==1:\n ed_label.append(3)\n elif partition[ed[0]]==1 and partition[ed[1]]==2:\n ed_label.append(4)\n elif partition[ed[0]]==0 and partition[ed[1]]==2:\n ed_label.append(5)\nprint(len(edges))\nprint(len(ed_label))\n \n ",
"154\n154\n"
],
[
"# displaying learned edge vectors(H)\nimport matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\n# 0-0 1-1 2-2 0-1 1-2 0-2\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\nH1 = np.transpose(H)\nsvd.scatter(H1[:, 0], H1[:, 1],c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"W-edges\")\nplt.show()",
"_____no_output_____"
],
[
"# PCA and t-SNE for node features (W)\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import normalize\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\n# W1 = normalize(W)\ntsne = fig.add_subplot(1,2,1)\nX_tsne = TSNE(n_components=2, perplexity=40).fit_transform(W)\ntsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\ntsne.title.set_text(\"t-SNE\")\n\npca = fig.add_subplot(1,2,2)\nX_pca = PCA(n_components=2).fit_transform(W)\npca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(list(partition.values())), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))\npca.title.set_text(\"PCA\")\n\nplt.show()",
"_____no_output_____"
],
[
"# PCA and t-SNE for edge features (H)\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import normalize\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nH1 = np.transpose(H)\n# H1 = normalize(H1)\ntsne = fig.add_subplot(1,2,1)\nX_tsne = TSNE(n_components=2, perplexity=40).fit_transform(H1)\ntsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\ntsne.title.set_text(\"t-SNE\")\n\npca = fig.add_subplot(1,2,2)\nX_pca = PCA(n_components=2).fit_transform(H1)\npca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(ed_label), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))\npca.title.set_text(\"PCA\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# SVD decomposition of Incidence matrix",
"_____no_output_____"
]
],
[
[
"# SVD decomposition\nui,si,vi = np.linalg.svd(inci)\n\nprint(ui.shape)\n# u=np.around(u,decimals=5)\n# print(ui)\n\nprint(si.shape)\n# s=np.around(s)\n# print(si)\n\nprint(vi.shape)\n# v=np.around(v,decimals=5)\n# print(vi)",
"(30, 30)\n(30,)\n(154, 154)\n"
]
],
[
[
"# SVD features of nodes decomposed from incidence matrix",
"_____no_output_____"
]
],
[
[
"import matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\nprint(len(list(partition.values())))\nprint(ui[:,0].shape)\nsvd.scatter([ui[:, 0]], [ui[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"U-nodes\")\nplt.show()",
"30\n(30, 1)\n"
]
],
[
[
"# SVD features of edges decomposed from incidence matrix",
"_____no_output_____"
]
],
[
[
"# SVD features of edges decomposed from incidence matrix\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\nvi1 = np.transpose(vi)\nsvd.scatter([vi1[:, 0]], [vi1[:, 1]],c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"W-edges\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# NORMALIZED GRAPH LAPLACIAN",
"_____no_output_____"
],
[
"# Decomposing normalized laplacian and plotting node features(W)",
"_____no_output_____"
]
],
[
[
"# calculate normalized graph laplacian \nL = nx.normalized_laplacian_matrix(G).todense()\nprint(L.shape)\nprint(L[0,0:5])",
"(30, 30)\n[[ 1. -0.09534626 -0.09090909 -0.09534626 -0.09534626]]\n"
],
[
"# NMF does not work on input matrix with negative values\n# from sklearn.decomposition import NMF\n\n# model = NMF(n_components=2,init='random', random_state=0)\n# # decomposing normalized graph laplacian L\n# W = model.fit_transform(L)\n# H = model.components_\n# err = model.reconstruction_err_\n# it = model.n_iter_\n# print(err)\n# print(it)\n# print(W.shape)\n# print(H.shape)\n# print(W[0])\n# print(H[:,0])",
"_____no_output_____"
]
],
[
[
"# SVD decomposition of normalized graph laplacian",
"_____no_output_____"
]
],
[
[
"# SVD decomposition\nul,sl,vl = np.linalg.svd(L)\n\nprint(ul.shape)\n# u=np.around(u,decimals=5)\n# print(ui)\n\nprint(sl.shape)\n# s=np.around(s)\n# print(si)\n\nprint(vl.shape)\n# v=np.around(v,decimals=5)\n# print(vi)",
"(30, 30)\n(30,)\n(30, 30)\n"
]
],
[
[
"# displaying SVD node features(U) of laplacian matrix\n### Doing SVD on normalized graph laplacian gives USV^T, where U and V are same, i.e. rows of U are same as clms of V^T. Hence below I displayed node features from U",
"_____no_output_____"
]
],
[
[
"import matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\n\nsvd.scatter([ul[:, 0]], [ul[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"U-nodes:SVD decomposition of normalized graph laplacian\")\nplt.show()",
"_____no_output_____"
],
[
"# applying tsne and pca on U-nde features--laplacian matrix\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import normalize\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan', 'red', 'purple']\n# normalize\nul1 = normalize(ul)\ntsne = fig.add_subplot(1,2,1)\nX_tsne = TSNE(n_components=2, perplexity=40).fit_transform(ul1)\ntsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\ntsne.title.set_text(\"t-SNE\")\n\npca = fig.add_subplot(1,2,2)\nX_pca = PCA(n_components=2).fit_transform(ul1)\npca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(list(partition.values())), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))\npca.title.set_text(\"PCA\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# ADJACENCY MATRIX",
"_____no_output_____"
],
[
"# Decomposing Adjacency matrix and displaying node featues",
"_____no_output_____"
]
],
[
[
"Adj = nx.adjacency_matrix(G)\nprint(Adj.todense().shape)\n\n# convert adjacency matrix to dense matrix(default format is sparse matrix)\nAdjDense = Adj.todense() ",
"(30, 30)\n"
]
],
[
[
"# NMF decomposition of Adjacency matrix",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import NMF\n\nmodel = NMF(n_components=2,init='random', random_state=0)\nWa = model.fit_transform(AdjDense)\nHa= model.components_\nerra = model.reconstruction_err_\nita = model.n_iter_\nprint(erra)\nprint(ita)\nprint(Wa.shape)\nprint(Ha.shape)\nprint(Wa[0])\nprint(Ha[:,0])",
"12.1730415662\n27\n(30, 2)\n(2, 30)\n[ 0.46313587 0. ]\n[ 0.70797697 0. ]\n"
],
[
"# displaying learned nodes\nimport matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\n\nsvd.scatter([Wa[:, 0]], [Wa[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"W-nodes:NMF decomposition of Adjacency matrix\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# SVD Decomposition of adjacency matrix",
"_____no_output_____"
]
],
[
[
"# Calculate SVD (Singular value decomposition) of graph's adjacency matrix\nua,sa,va = np.linalg.svd(AdjDense)\n\nprint(ua.shape)\n# u=np.around(u,decimals=3)\n# print(u)\n\nprint(sa.shape)\n# s=np.around(s)\n# print(s)\n\nprint(va.shape)\n# v=np.around(v,decimals=3)\n# print(v)",
"(30, 30)\n(30,)\n(30, 30)\n"
],
[
"import matplotlib\nimport numpy as np\n\nfig = plt.figure(figsize=(10,10))\ncolors=['green','hotpink','yellow', 'cyan','red','purple']\nsvd = fig.add_subplot(1,1,1)\n\nsvd.scatter([ua[:, 0]], [ua[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))\nsvd.title.set_text(\"U-nodes:SVD Decomposition of adjacency matrix\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# CONCLUSION\n### Learned node/edge features are highly dependent on which matrix factorization method is used\n\n*1) Non-negative matrix factorization(NMF):*\n\n - (why to use this:)Decomposes matrix X = W^T.H using alternating minimization algorithm. We can use Inductive matrix completion technique [Natarajan and Dhillon 2014](https://academic.oup.com/bioinformatics/article/30/12/i60/385272) to incorporate extra infor(like node features and edge features) while decomposing.\n \n - (disadv:)Works only on matrix with non-negative entries.\n\n*2) Singular value decomposition(SVD):*\n\n - (Why use this:)popular method. Decomposes matrix into 3 matrices.\n \n - (disadv:) Is it possible add extra infor while decomposing(something similar to NMF)\n \n## OBSERVATION:\n\n### Incidence matrix gives good separable node features as well as edge features\n\n - NMF is allowed on Incidence matrix.\n \n - Decomposed W matrix contains features of rows of incidence matrix which are nothing but the nodes of graph.\n \n - Scatter plot of both decompositions NMF and SVD, are nicely separable. However, representations of SVD are better.(This may be because, number of components used in NMF is 2, which leads to very high reconstruction error. perhaps, increasing it may help get better featuresusing NMF.)\n \n - ADV: along with good features, we are getting features for edges as well (which indicate reationship between nodes).\n - As NMF can be applied, we can make use of extra information while decomposition as per Inductive matrix completion [Natarajan and Dhillon 2014](https://academic.oup.com/bioinformatics/article/30/12/i60/385272)\n\n \n### Normalized Graph Laplacian does not give good features for nodes\n\n - NMF does not work for this matrix\n \n - SVD works but does not give good features. Even after doing PCA and TSNE, the node features are not forming clusters according to communitites.\n \n - We get only node features.\n\n### Adjacency matrix gives only good node features\n\n - NMF is allowed and gives good result but SVD gives better representation.\n \n - Gives only node features.\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7f1dd5813235074a0549a26d433c8cdb4df227c | 3,105 | ipynb | Jupyter Notebook | 2016/ferran/day3.ipynb | bbglab/adventofcode | 65b6d8331d10f229b59232882d60024b08d69294 | [
"MIT"
] | null | null | null | 2016/ferran/day3.ipynb | bbglab/adventofcode | 65b6d8331d10f229b59232882d60024b08d69294 | [
"MIT"
] | null | null | null | 2016/ferran/day3.ipynb | bbglab/adventofcode | 65b6d8331d10f229b59232882d60024b08d69294 | [
"MIT"
] | 3 | 2016-12-02T09:20:42.000Z | 2021-12-01T13:31:07.000Z | 20.032258 | 83 | 0.45314 | [
[
[
"# Chellenge 3\n\n## Challenge 3.1",
"_____no_output_____"
]
],
[
[
"myinput = '/home/fmuinos/projects/adventofcode/2016/ferran/inputs/input3.txt'",
"_____no_output_____"
],
[
"def is_triangle(sides):\n return sides[0] + sides[1] > sides[2]",
"_____no_output_____"
],
[
"def no_triangles(path):\n with open(path,'rt') as f:\n ntr = 0\n for line in f:\n sides = list(map(int, line.rstrip().split()))\n if is_triangle(sorted(sides)):\n ntr += 1\n return ntr",
"_____no_output_____"
],
[
"no_triangles(myinput)",
"_____no_output_____"
]
],
[
[
"## Challenge 3.2",
"_____no_output_____"
]
],
[
[
"def no_triangles_by_cols(path):\n triangles = [[0,0,0], [0,0,0], [0,0,0]]\n with open(path,'rt') as f:\n ntr = 0\n i = 1\n for line in f:\n sides = list(map(int, line.rstrip().split()))\n for j in range(3):\n triangles[j][i % 3] = sides[j]\n if i % 3 == 0:\n for j in range(3):\n if is_triangle(sorted(triangles[j])):\n ntr += 1\n i += 1\n return ntr",
"_____no_output_____"
],
[
"no_triangles_by_cols(myinput)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f1dfeb07e301b4a61d80873a999a4f97722887 | 188,787 | ipynb | Jupyter Notebook | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste | 644acaca4c40ea680feabef4c0a7870a10eaf305 | [
"MIT"
] | null | null | null | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste | 644acaca4c40ea680feabef4c0a7870a10eaf305 | [
"MIT"
] | null | null | null | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste | 644acaca4c40ea680feabef4c0a7870a10eaf305 | [
"MIT"
] | null | null | null | 195.027893 | 139,353 | 0.866516 | [
[
[
"<a href=\"https://colab.research.google.com/github/ChristianEngProd/HTML_Teste/blob/main/Primeiros_Passos_Christian_Python.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Agenda",
"_____no_output_____"
],
[
"**Tópicos**:\n\n* Revisão de Python:\n - Variáveis\n - Operações Matemáticas\n\n* Exercício Prático (Hands on)\n\n* Carregamento dos dados\n\n* Visualizações",
"_____no_output_____"
],
[
"# Revisão de Python",
"_____no_output_____"
],
[
"## Variáveis",
"_____no_output_____"
],
[
"Uma variável é um objeto que guarda um valor e armazena esse valor na memória do computador durante o tempo de desenvolvimento. Podemos inicializar uma variável por meio do comando de atribuição '='.",
"_____no_output_____"
]
],
[
[
"# podemos definir uma variável dando um nome\nano = 2020",
"_____no_output_____"
],
[
"# para imprimir a variável criada, utilizamos a função print\nprint(ano)",
"2020\n"
],
[
"salario = 1500\nprint(salario)\nsalario = 1000\nprint(salario)\n",
"1500\n1000\n"
]
],
[
[
"## Operações Matemáticas",
"_____no_output_____"
],
[
"Com Python podemos realizar operações matemáticas. Com o uso das variáveis isso fica ainda mais poderoso.",
"_____no_output_____"
]
],
[
[
"salario1 = 1500\nsalario2 = 1000",
"_____no_output_____"
],
[
"print(salario1 + salario2) #soma\nprint(salario1 - salario2) #subtração\nprint(salario1 * salario2) #multiplicação\nprint(salario1 / salario2) #divisão\nprint(salario1 // salario2) #divisão inteira\nprint(salario1 % salario2) #resto da divisão\nprint(salario1 ** 2) #exponenciação",
"2500\n500\n1500000\n1.5\n1\n500\n2250000\n"
]
],
[
[
"# Exercício Prático (Hands on)\n\n* 1. Abrir Google Colab: https://colab.research.google.com/\n* 2. Login na conta Google\n* 3. Arquivo --> Novo notebook",
"_____no_output_____"
],
[
"# Carregamento dos dados",
"_____no_output_____"
]
],
[
[
"#Biblioteca Pandas\nimport pandas as pd",
"_____no_output_____"
],
[
"#Carregando bases de dados de Jan22 a Mar22\n#Fonte: https://www.gov.br/anp/pt-br/centrais-de-conteudo/dados-abertos/serie-historica-de-precos-de-combustiveis\n\netanol_202201 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/precos-gasolina-etanol-2022-01.csv', sep = ';', decimal = ',', encoding = 'latin')\netanol_202202 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/precos-gasolina-etanol-2022-02.csv', sep = ';', decimal = ',', encoding = 'latin')\netanol_202203 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/dados-abertos-precos-2022-03-gasolina-etanol.csv', sep = ';', decimal = ',', encoding = 'latin')\n\ndf = pd.concat([etanol_202201, etanol_202202, etanol_202203])\ndf['Data da Coleta'] = pd.to_datetime(df['Data da Coleta'], dayfirst=True)\ndf.head()",
"_____no_output_____"
]
],
[
[
"## Dimensões do dataframe (tabela)",
"_____no_output_____"
]
],
[
[
"df.shape",
"_____no_output_____"
]
],
[
[
"# Visualizações",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"## Preço por Estado",
"_____no_output_____"
]
],
[
[
"df_estado = df.groupby('Estado')['Valor de Venda'].mean()\ndf_estado",
"_____no_output_____"
],
[
"df_estado.plot.bar(figsize = (20,5));",
"_____no_output_____"
]
],
[
[
"## Preço por Regiao",
"_____no_output_____"
]
],
[
[
"df_regiao = df.groupby('Regiao')['Valor de Venda'].mean()\ndf_regiao",
"_____no_output_____"
],
[
"df_regiao.plot.bar(figsize = (10,5));",
"_____no_output_____"
]
],
[
[
"## Preço por Região - Linha do Tempo",
"_____no_output_____"
]
],
[
[
"df_regiao_data = df.groupby(['Regiao','Data da Coleta'])['Valor de Venda'].mean().reset_index()\ndf_regiao_data",
"_____no_output_____"
],
[
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize = (20,5))\nsns.lineplot(data = df_regiao_data, \n x = 'Data da Coleta', \n y = 'Valor de Venda', \n hue = 'Regiao');\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f1fdbdb684df6bba9fd4dd2e93aa2705182bd1 | 641 | ipynb | Jupyter Notebook | pset_challenging_ext/exercises/nb/p80.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_challenging_ext/exercises/nb/p80.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_challenging_ext/exercises/nb/p80.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | 24.653846 | 138 | 0.561622 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f20ee57c8adcda49a0793c5917a4ba07e2b443 | 11,721 | ipynb | Jupyter Notebook | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning | c470f83310f824860af3382baba13b2667f987c9 | [
"Apache-2.0"
] | 22 | 2018-03-05T11:17:48.000Z | 2021-06-15T02:10:36.000Z | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning | c470f83310f824860af3382baba13b2667f987c9 | [
"Apache-2.0"
] | 7 | 2018-03-10T10:17:30.000Z | 2018-04-23T00:57:39.000Z | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning | c470f83310f824860af3382baba13b2667f987c9 | [
"Apache-2.0"
] | 8 | 2018-03-06T01:21:31.000Z | 2021-06-15T02:10:37.000Z | 29.673418 | 344 | 0.57205 | [
[
[
"# Network Initializer\n\n### What is neuron?\n\nFeed-forward neural networks are inspired by the information processing of one or more neural cells, called a neuron. A neuron accepts input signals via its dendrites, which pass the electrical signal down to the cell body. The axon carries the signal out to synapses, which are the connections of a cell’s axon to other cell’s dendrites.",
"_____no_output_____"
]
],
[
[
"from random import random, seed\n\ndef initialize_network(n_inputs, n_hidden, n_outputs):\n network = list()\n # Creating hidden layers according to the number of inputs\n hidden_layer = [{'weights': [random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]\n network.append(hidden_layer)\n # Creating output layer according to the number of hidden layers\n output_layer = [{'weights': [random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]\n network.append(output_layer)\n return network",
"_____no_output_____"
],
[
"# It is good practice to initialize the network weights to small random numbers. \n# In this case, will we use random numbers in the range of 0 to 1.\n# To achieve that we seed random with 1\nseed(1)",
"_____no_output_____"
],
[
"# 2 input units, 1 hidden unit and 2 output units\nnetwork = initialize_network(2, 1, 2)\n# You can see the hidden layer has one neuron with 2 input weights plus the bias.\n# The output layer has 2 neurons, each with 1 weight plus the bias.\n\nfor layer in network:\n print(layer)",
"[{'weights': [0.7887233511355132, 0.0938595867742349, 0.02834747652200631]}]\n[{'weights': [0.8357651039198697, 0.43276706790505337]}, {'weights': [0.762280082457942, 0.0021060533511106927]}]\n"
]
],
[
[
"# Forward propagate\n\nWe can calculate an output from a neural network by propagating an input signal through each layer until the output layer outputs its values.\n\nWe can break forward propagation down into three parts:\n\n\n1. Neuron Activation.\n\n2. Neuron Transfer.\n\n3. Forward Propagation.\n",
"_____no_output_____"
],
[
"# 1. Neuron Activation\n\nThe first step is to calculate the activation of one neuron given an input.\n\nNeuron activation is calculated as the weighted sum of the inputs. Much like linear regression.\n\n\nactivation = sum(weight_i * input_i) + bias\n\n\nWhere weight is a network weight, input is an input, i is the index of a weight or an input and bias is a special weight that has no input to multiply with (or you can think of the input as always being 1.0).\n",
"_____no_output_____"
]
],
[
[
"# Implementation\ndef activate(weights, inputs):\n activation = weights[-1]\n for i in range(len(weights) - 1):\n activation += weights[i] * inputs[i]\n return activation",
"_____no_output_____"
]
],
[
[
"# 2. Neuron Transfer\n\nOnce a neuron is activated, we need to transfer the activation to see what the neuron output actually is.\n\nDifferent transfer functions can be used. It is traditional to use the *sigmoid activation function*, but you can also use the *tanh* (hyperbolic tangent) function to transfer outputs. More recently, the *rectifier transfer function* has been popular with large deep learning networks.\n\n\nSigmoid formula\n\noutput = 1 / (1 + e^(-activation))",
"_____no_output_____"
]
],
[
[
"from math import exp\n\ndef transfer(activation):\n return 1.0 / (1.0 + exp(-activation))",
"_____no_output_____"
]
],
[
[
"# 3. Forawrd propagate",
"_____no_output_____"
]
],
[
[
"# Foward propagate is self-explanatory\ndef forward_propagate(network, row):\n inputs = row\n for layer in network:\n new_inputs = []\n for neuron in layer:\n activation = activate(neuron['weights'], inputs)\n neuron['output'] = transfer(activation)\n new_inputs.append(neuron['output'])\n inputs = new_inputs\n return inputs",
"_____no_output_____"
],
[
"inputs = [1, 0, None]\noutput = forward_propagate(network, inputs)",
"_____no_output_____"
],
[
"# Running the example propagates the input pattern [1, 0] and produces an output value that is printed.\n# Because the output layer has two neurons, we get a list of two numbers as output.\noutput",
"_____no_output_____"
]
],
[
[
"# Backpropagation\n\n### What is it?\n\n1. Error is calculated between the expected outputs and the outputs forward propagated from the network.\n\n2. These errors are then propagated backward through the network from the output layer to the hidden layer, assigning blame for the error and updating weights as they go.\n\n\n### This part is broken down into two sections.\n\n- Transfer Derivative\n- Error Backpropagation",
"_____no_output_____"
],
[
"## Transfer Derivative\n\nGiven an output value from a neuron, we need to calculate it’s *slope*.\n\nderivative = output * (1.0 - output)",
"_____no_output_____"
]
],
[
[
"# Calulates the derivation from an neuron output\ndef transfer_derivative(output):\n return output * (1.0 - output)",
"_____no_output_____"
]
],
[
[
"# Error Backpropagation\n\n1. calculate the error for each output neuron, this will give us our error signal (input) to propagate backwards through the network.\n\nerror = (expected - output) * transfer_derivative(output)\n\n\nexpected: expected output value for the neuron\n\noutput: output value for the neuron and transfer_derivative()\n\n----\n\nThe back-propagated error signal is accumulated and then used to determine the error for the neuron in the hidden layer, as follows:\n\n\nerror = (weight_k * error_j) * transfer_derivative(output)\n\nerror_j: the error signal from the jth neuron in the output layer\n\nweight_k: the weight that connects the kth neuron to the current neuron and output is the output for the current neuron",
"_____no_output_____"
]
],
[
[
"def backward_propagate_error(network, expected):\n for i in reversed(range(len(network))):\n layer = network[i]\n errors = list()\n if i != len(network) - 1:\n for j in range(len(layer)):\n error = 0.0\n for neuron in network[i + 1]:\n error += (neuron['weights'][j] * neuron['delta'])\n errors.append(error)\n else:\n for j in range(len(layer)):\n neuron = layer[j]\n errors.append(expected[j] - neuron['output'])\n for j in range(len(layer)):\n neuron = layer[j]\n neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])",
"_____no_output_____"
],
[
"expected = [0, 1]\nbackward_propagate_error(network, expected)\n# delta: error value\nfor layer in network:\n print(layer)",
"[{'weights': [0.7887233511355132, 0.0938595867742349, 0.02834747652200631], 'output': 0.6936142046010635, 'delta': -0.011477619712406795}]\n[{'weights': [0.8357651039198697, 0.43276706790505337], 'output': 0.7335023968859138, 'delta': -0.1433825771158816}, {'weights': [0.762280082457942, 0.0021060533511106927], 'output': 0.6296776889933221, 'delta': 0.08635312555373359}]\n"
]
],
[
[
"# Train Network\n\nTwo parts\n\n- Update Weights\n\n- Train Network",
"_____no_output_____"
],
[
"### Update weights\n\nOnce errors are calculated for each neuron in the network via the back propagation method above, they can be used to update weights.\n\nweight = weight + learning_rate * error * input\n\nweight = given weight\n\nThe learning_rate parameter is explicitly specified by the programmer. This controls how much the weights will be updated at each step.\nA learning rate that is very large may sound appealing, as the weights will be more dramatically updated, which could lead to faster learning.\nHowever, this causes the learning process to become very unstable. Ideally, we want a learning rate that is steady and reliable, but will find a solution in a reasonable amount of time.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7f2130e0746ba7efc81d47e480678c649ad7e33 | 82,292 | ipynb | Jupyter Notebook | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy | f90be88fd401edf11d16c72b11934f1e7160aeb5 | [
"MIT"
] | 1 | 2018-10-04T21:43:09.000Z | 2018-10-04T21:43:09.000Z | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy | f90be88fd401edf11d16c72b11934f1e7160aeb5 | [
"MIT"
] | null | null | null | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy | f90be88fd401edf11d16c72b11934f1e7160aeb5 | [
"MIT"
] | null | null | null | 47.294253 | 20,584 | 0.682703 | [
[
[
"# Modeling and Simulation in Python\n\nChapter 20\n\nCopyright 2017 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)\n",
"_____no_output_____"
]
],
[
[
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *",
"_____no_output_____"
]
],
[
[
"### Dropping pennies\n\nI'll start by getting the units we need from Pint.",
"_____no_output_____"
]
],
[
[
"m = UNITS.meter\ns = UNITS.second",
"_____no_output_____"
]
],
[
[
"And defining the initial state.",
"_____no_output_____"
]
],
[
[
"init = State(y=381 * m, \n v=0 * m/s)",
"_____no_output_____"
]
],
[
[
"Acceleration due to gravity is about 9.8 m / s$^2$.",
"_____no_output_____"
]
],
[
[
"g = 9.8 * m/s**2",
"_____no_output_____"
]
],
[
[
"When we call `odeint`, we need an array of timestamps where we want to compute the solution.\n\nI'll start with a duration of 10 seconds.",
"_____no_output_____"
]
],
[
[
"t_end = 10 * s",
"_____no_output_____"
]
],
[
[
"Now we make a `System` object.",
"_____no_output_____"
]
],
[
[
"system = System(init=init, g=g, t_end=t_end)",
"_____no_output_____"
]
],
[
[
"And define the slope function.",
"_____no_output_____"
]
],
[
[
"def slope_func(state, t, system):\n \"\"\"Compute derivatives of the state.\n \n state: position, velocity\n t: time\n system: System object containing `g`\n \n returns: derivatives of y and v\n \"\"\"\n y, v = state\n unpack(system) \n\n dydt = v\n dvdt = -g\n \n return dydt, dvdt",
"_____no_output_____"
]
],
[
[
"It's always a good idea to test the slope function with the initial conditions.",
"_____no_output_____"
]
],
[
[
"dydt, dvdt = slope_func(init, 0, system)\nprint(dydt)\nprint(dvdt)",
"0.0 meter / second\n-9.8 meter / second ** 2\n"
]
],
[
[
"Now we're ready to call `run_ode_solver`",
"_____no_output_____"
]
],
[
[
"results, details = run_ode_solver(system, slope_func, max_step=0.5*s)\ndetails.message",
"_____no_output_____"
]
],
[
[
"Here are the results:",
"_____no_output_____"
]
],
[
[
"results",
"_____no_output_____"
]
],
[
[
"And here's position as a function of time:",
"_____no_output_____"
]
],
[
[
"def plot_position(results):\n plot(results.y, label='y')\n decorate(xlabel='Time (s)',\n ylabel='Position (m)')\n\nplot_position(results)\nsavefig('figs/chap09-fig01.pdf')",
"Saving figure to file figs/chap09-fig01.pdf\n"
]
],
[
[
"### Onto the sidewalk\n\nTo figure out when the penny hit the sidewalk, we can use `crossings`, which finds the times where a `Series` passes through a given value.",
"_____no_output_____"
]
],
[
[
"t_crossings = crossings(results.y, 0)",
"_____no_output_____"
]
],
[
[
"For this example there should be just one crossing, the time when the penny hits the sidewalk.",
"_____no_output_____"
]
],
[
[
"t_sidewalk = t_crossings[0] * s",
"_____no_output_____"
]
],
[
[
"We can compare that to the exact result. Without air resistance, we have\n\n$v = -g t$\n\nand\n\n$y = 381 - g t^2 / 2$\n\nSetting $y=0$ and solving for $t$ yields\n\n$t = \\sqrt{\\frac{2 y_{init}}{g}}$",
"_____no_output_____"
]
],
[
[
"sqrt(2 * init.y / g)",
"_____no_output_____"
]
],
[
[
"The estimate is accurate to about 10 decimal places.",
"_____no_output_____"
],
[
"## Events\n\nInstead of running the simulation until the penny goes through the sidewalk, it would be better to detect the point where the penny hits the sidewalk and stop. `run_ode_solver` provides exactly the tool we need, **event functions**.\n\nHere's an event function that returns the height of the penny above the sidewalk:",
"_____no_output_____"
]
],
[
[
"def event_func(state, t, system):\n \"\"\"Return the height of the penny above the sidewalk.\n \"\"\"\n y, v = state\n return y",
"_____no_output_____"
]
],
[
[
"And here's how we pass it to `run_ode_solver`. The solver should run until the event function returns 0, and then terminate.",
"_____no_output_____"
]
],
[
[
"results, details = run_ode_solver(system, slope_func, events=event_func)\ndetails",
"_____no_output_____"
]
],
[
[
"The message from the solver indicates the solver stopped because the event we wanted to detect happened.\n\nHere are the results:",
"_____no_output_____"
]
],
[
[
"results",
"_____no_output_____"
]
],
[
[
"With the `events` option, the solver returns the actual time steps it computed, which are not necessarily equally spaced. \n\nThe last time step is when the event occurred:",
"_____no_output_____"
]
],
[
[
"t_sidewalk = get_last_label(results) * s",
"_____no_output_____"
]
],
[
[
"Unfortunately, `run_ode_solver` does not carry the units through the computation, so we have to put them back at the end.\n\nWe could also get the time of the event from `details`, but it's a minor nuisance because it comes packed in an array:",
"_____no_output_____"
]
],
[
[
"details.t_events[0][0] * s",
"_____no_output_____"
]
],
[
[
"The result is accurate to about 15 decimal places.\n\nWe can also check the velocity of the penny when it hits the sidewalk:",
"_____no_output_____"
]
],
[
[
"v_sidewalk = get_last_value(results.v) * m / s",
"_____no_output_____"
]
],
[
[
"And convert to kilometers per hour.",
"_____no_output_____"
]
],
[
[
"km = UNITS.kilometer\nh = UNITS.hour\nv_sidewalk.to(km / h)",
"_____no_output_____"
]
],
[
[
"If there were no air resistance, the penny would hit the sidewalk (or someone's head) at more than 300 km/h.\n\nSo it's a good thing there is air resistance.",
"_____no_output_____"
],
[
"## Under the hood\n\nHere is the source code for `crossings` so you can see what's happening under the hood:",
"_____no_output_____"
]
],
[
[
"%psource crossings",
"_____no_output_____"
]
],
[
[
"The [documentation of InterpolatedUnivariateSpline is here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.html).\n\nAnd you can read the [documentation of `scipy.integrate.solve_ivp`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html) to learn more about how `run_ode_solver` works.",
"_____no_output_____"
],
[
"### Exercises\n\n**Exercise:** Here's a question from the web site [Ask an Astronomer](http://curious.astro.cornell.edu/about-us/39-our-solar-system/the-earth/other-catastrophes/57-how-long-would-it-take-the-earth-to-fall-into-the-sun-intermediate):\n\n\"If the Earth suddenly stopped orbiting the Sun, I know eventually it would be pulled in by the Sun's gravity and hit it. How long would it take the Earth to hit the Sun? I imagine it would go slowly at first and then pick up speed.\"\n\nUse `run_ode_solver` to answer this question.\n\nHere are some suggestions about how to proceed:\n\n1. Look up the Law of Universal Gravitation and any constants you need. I suggest you work entirely in SI units: meters, kilograms, and Newtons.\n\n2. When the distance between the Earth and the Sun gets small, this system behaves badly, so you should use an event function to stop when the surface of Earth reaches the surface of the Sun.\n\n3. Express your answer in days, and plot the results as millions of kilometers versus days.\n\nIf you read the reply by Dave Rothstein, you will see other ways to solve the problem, and a good discussion of the modeling decisions behind them.\n\nYou might also be interested to know that [it's actually not that easy to get to the Sun](https://www.theatlantic.com/science/archive/2018/08/parker-solar-probe-launch-nasa/567197/).",
"_____no_output_____"
]
],
[
[
"# Solution\n\nN = UNITS.newton\nkg = UNITS.kilogram\nm = UNITS.meter\nAU = UNITS.astronomical_unit",
"_____no_output_____"
],
[
"# Solution\n\nr_0 = (1 * AU).to_base_units()\nv_0 = 0 * m / s\ninit = State(r=r_0,\n v=v_0)",
"_____no_output_____"
],
[
"# Solution\n\nr_earth = 6.371e6 * m\nr_sun = 695.508e6 * m\n\nsystem = System(init=init,\n G=6.674e-11 * N / kg**2 * m**2,\n m1=1.989e30 * kg,\n r_final=r_sun + r_earth,\n m2=5.972e24 * kg,\n t_0=0 * s,\n t_end=1e7 * s)",
"_____no_output_____"
],
[
"# Solution\n\ndef universal_gravitation(state, system):\n \"\"\"Computes gravitational force.\n \n state: State object with distance r\n system: System object with m1, m2, and G\n \"\"\"\n r, v = state\n unpack(system)\n \n force = G * m1 * m2 / r**2\n return force",
"_____no_output_____"
],
[
"# Solution\n\nuniversal_gravitation(init, system)",
"_____no_output_____"
],
[
"# Solution\n\ndef slope_func(state, t, system):\n \"\"\"Compute derivatives of the state.\n \n state: position, velocity\n t: time\n system: System object containing `g`\n \n returns: derivatives of y and v\n \"\"\"\n y, v = state\n unpack(system) \n\n force = universal_gravitation(state, system)\n dydt = v\n dvdt = -force / m2\n \n return dydt, dvdt",
"_____no_output_____"
],
[
"# Solution\n\nslope_func(init, 0, system)",
"_____no_output_____"
],
[
"# Solution\n\ndef event_func(state, t, system):\n r, v = state\n return r - system.r_final",
"_____no_output_____"
],
[
"# Solution\n\nevent_func(init, 0, system)",
"_____no_output_____"
],
[
"# Solution\n\nresults, details = run_ode_solver(system, slope_func, events=event_func)\ndetails",
"_____no_output_____"
],
[
"# Solution\n\nt_event = details.t_events[0] * s",
"_____no_output_____"
],
[
"# Solution\n\nt_event.to(UNITS.day)",
"_____no_output_____"
],
[
"# Solution\n\nts = linspace(t_0, t_event, 201)\nresults, details = run_ode_solver(system, slope_func, events=event_func, t_eval=ts)",
"_____no_output_____"
],
[
"# Solution\n\nresults.index /= 60 * 60 * 24",
"_____no_output_____"
],
[
"# Solution\n\nresults.r /= 1e9",
"_____no_output_____"
],
[
"# Solution\n\nplot(results.r, label='r')\n\ndecorate(xlabel='Time (day)',\n ylabel='Distance from sun (million km)')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f219b9fae9771d9cfa8bc0608a6689926a19b3 | 2,208 | ipynb | Jupyter Notebook | Scraper.ipynb | mobu/PyNews | 8efc1fa5825e22bbbce7cb90e158440d80fbb100 | [
"MIT"
] | null | null | null | Scraper.ipynb | mobu/PyNews | 8efc1fa5825e22bbbce7cb90e158440d80fbb100 | [
"MIT"
] | null | null | null | Scraper.ipynb | mobu/PyNews | 8efc1fa5825e22bbbce7cb90e158440d80fbb100 | [
"MIT"
] | null | null | null | 23 | 111 | 0.504982 | [
[
[
"import requests\nimport json\nimport numpy as np\nimport pandas as pd\nimport urllib.parse\nimport re\nfrom newspaper import Article",
"_____no_output_____"
],
[
"query = 'gun AND (gun control)' #use AND/OR/NOT keywords like this (syria AND trump) NOT USA\nquery = urllib.parse.quote(query)\ndate = '2018-04-22'\nsource = 'breitbart'\n\nurl_list = []\narticle_list = []\n\nurl = ('https://newsapi.org/v2/everything?'\n 'q={}&'\n 'from={}&'\n 'sortBy=popularity&'\n 'sources={}&apiKey=d878fe697fb242269bae9d3361e6d299').format(query,date,source)\n\nresponse = requests.get(url).json()\n\nprint('Gathering article...')\nfor k in response['articles']:\n url = k['url']\n article = Article(url)\n article.download()\n article.parse()\n url_list.append(url)\n article_list.append(article.text)\n\nprint('Done.')\n\ndata = pd.DataFrame({'URL':url_list,'Content':article_list})\n\ndate = re.sub('-','',date)\ndata.to_json('{}-{}{}.json'.format(query,source,date)) ",
"Gathering article...\nDone.\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7f22616e2b7cb2013c90de426d45f8967fe8b48 | 426,271 | ipynb | Jupyter Notebook | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras | 44ca407196ed393b06f5097e2a2d15d082deeb9a | [
"Apache-2.0"
] | 2 | 2021-08-22T11:48:58.000Z | 2021-09-22T12:34:31.000Z | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras | 44ca407196ed393b06f5097e2a2d15d082deeb9a | [
"Apache-2.0"
] | null | null | null | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras | 44ca407196ed393b06f5097e2a2d15d082deeb9a | [
"Apache-2.0"
] | null | null | null | 269.621126 | 59,870 | 0.897272 | [
[
[
"<a href=\"https://colab.research.google.com/github/ashishpatel26/SRGAN-Keras-For-Medical-Images/blob/main/SRGAN_Final.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# !gdown --id 1LukOUfVNeps1Jj7Z27JbkmrO90jwBgie\n# !pip install kora\n# from kora import drive\n# drive.download_folder('1LukOUfVNeps1Jj7Z27JbkmrO90jwBgie')",
"_____no_output_____"
],
[
"import shutil\nshutil.unpack_archive('mri.zip')",
"_____no_output_____"
],
[
"# !ls /content/img_align_celeba",
"_____no_output_____"
]
],
[
[
"### Load Libraries",
"_____no_output_____"
]
],
[
[
"!pip install scipy==1.1.0",
"Collecting scipy==1.1.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/40/de/0c22c6754370ba6b1fa8e53bd6e514d4a41a181125d405a501c215cbdbd6/scipy-1.1.0-cp37-cp37m-manylinux1_x86_64.whl (31.2MB)\n\u001b[K |████████████████████████████████| 31.2MB 93kB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.8.2 in /usr/local/lib/python3.7/dist-packages (from scipy==1.1.0) (1.19.5)\n\u001b[31mERROR: plotnine 0.6.0 has requirement scipy>=1.2.0, but you'll have scipy 1.1.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\nInstalling collected packages: scipy\n Found existing installation: scipy 1.4.1\n Uninstalling scipy-1.4.1:\n Successfully uninstalled scipy-1.4.1\nSuccessfully installed scipy-1.1.0\n"
],
[
"import glob\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom keras import Input\nfrom keras.applications import VGG19\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import BatchNormalization, Activation, LeakyReLU, Add, Dense\nfrom keras.layers.convolutional import Conv2D, UpSampling2D\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom scipy.misc import imread, imresize\nimport keras.backend as K\nimport cv2\nimport os\nfrom PIL import Image\n# from imageio import imread\n# from skimage.transform import resize",
"_____no_output_____"
]
],
[
[
"### Residual Block",
"_____no_output_____"
]
],
[
[
"def residual_block(x):\n \"\"\"\n Residual block\n \"\"\"\n filters = [64, 64]\n kernel_size = 3\n strides = 1\n padding = \"same\"\n momentum = 0.8\n activation = \"relu\"\n\n res = Conv2D(filters=filters[0], kernel_size=kernel_size, strides=strides, padding=padding)(x)\n res = Activation(activation=activation)(res)\n res = BatchNormalization(momentum=momentum)(res)\n\n res = Conv2D(filters=filters[1], kernel_size=kernel_size, strides=strides, padding=padding)(res)\n res = BatchNormalization(momentum=momentum)(res)\n\n # Add res and x\n res = Add()([res, x])\n return res",
"_____no_output_____"
]
],
[
[
"### Build Generator",
"_____no_output_____"
]
],
[
[
"def build_generator():\n \"\"\"\n Create a generator network using the hyperparameter values defined below\n :return:\n \"\"\"\n residual_blocks = 16\n momentum = 0.8\n input_shape = (64, 64, 3)\n\n # Input Layer of the generator network\n input_layer = Input(shape=input_shape)\n\n # Add the pre-residual block\n gen1 = Conv2D(filters=64, kernel_size=9, strides=1, padding='same', activation='relu')(input_layer)\n\n # Add 16 residual blocks\n res = residual_block(gen1)\n for i in range(residual_blocks - 1):\n res = residual_block(res)\n\n # Add the post-residual block\n gen2 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(res)\n gen2 = BatchNormalization(momentum=momentum)(gen2)\n\n # Take the sum of the output from the pre-residual block(gen1) and the post-residual block(gen2)\n gen3 = Add()([gen2, gen1])\n\n # Add an upsampling block\n gen4 = UpSampling2D(size=2)(gen3)\n gen4 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(gen4)\n gen4 = Activation('relu')(gen4)\n\n # Add another upsampling block\n gen5 = UpSampling2D(size=2)(gen4)\n gen5 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(gen5)\n gen5 = Activation('relu')(gen5)\n\n # Output convolution layer\n gen6 = Conv2D(filters=3, kernel_size=9, strides=1, padding='same')(gen5)\n output = Activation('tanh')(gen6)\n\n # Keras model\n model = Model(inputs=[input_layer], outputs=[output], name='generator')\n return model\n",
"_____no_output_____"
]
],
[
[
"### Build Descriminator",
"_____no_output_____"
]
],
[
[
"def build_discriminator():\n \"\"\"\n Create a discriminator network using the hyperparameter values defined below\n :return:\n \"\"\"\n leakyrelu_alpha = 0.2\n momentum = 0.8\n input_shape = (256, 256, 3)\n\n input_layer = Input(shape=input_shape)\n\n # Add the first convolution block\n dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)\n dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)\n\n # Add the 2nd convolution block\n dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)\n dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)\n dis2 = BatchNormalization(momentum=momentum)(dis2)\n\n # Add the third convolution block\n dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)\n dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)\n dis3 = BatchNormalization(momentum=momentum)(dis3)\n\n # Add the fourth convolution block\n dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)\n dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)\n dis4 = BatchNormalization(momentum=0.8)(dis4)\n\n # Add the fifth convolution block\n dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)\n dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)\n dis5 = BatchNormalization(momentum=momentum)(dis5)\n\n # Add the sixth convolution block\n dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)\n dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)\n dis6 = BatchNormalization(momentum=momentum)(dis6)\n\n # Add the seventh convolution block\n dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)\n dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)\n dis7 = BatchNormalization(momentum=momentum)(dis7)\n\n # Add the eight convolution block\n dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)\n dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)\n dis8 = BatchNormalization(momentum=momentum)(dis8)\n\n # Add a dense layer\n dis9 = Dense(units=1024)(dis8)\n dis9 = LeakyReLU(alpha=0.2)(dis9)\n\n # Last dense layer - for classification\n output = Dense(units=1, activation='sigmoid')(dis9)\n\n model = Model(inputs=[input_layer], outputs=[output], name='discriminator')\n return model",
"_____no_output_____"
]
],
[
[
"### Build VGG19",
"_____no_output_____"
]
],
[
[
"def build_vgg():\n \"\"\"\n Build VGG network to extract image features\n \"\"\"\n input_shape = (256, 256, 3)\n\n # Load a pre-trained VGG19 model trained on 'Imagenet' dataset\n vgg = VGG19(include_top=False, weights='imagenet', input_shape=input_shape)\n vgg.outputs = [vgg.layers[20].output]\n\n # Create a Keras model\n model = Model(vgg.input, vgg.outputs)\n return model\n\n\n# def build_vgg():\n# \"\"\"\n# Build VGG network to extract image features\n# \"\"\"\n# input_shape = (256, 256, 3)\n\n# # Load a pre-trained VGG19 model trained on 'Imagenet' dataset\n# vgg = VGG19(include_top=False, weights='imagenet')\n# vgg.outputs = [vgg.layers[20].output]\n\n# input_layer = Input(shape=input_shape)\n\n# # Extract features\n# features = vgg(input_layer)\n\n# # Create a Keras model\n# model = Model(inputs=[input_layer], outputs=[features])\n# return model",
"_____no_output_____"
],
[
"model = build_vgg()\nmodel.summary()",
"Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\n80142336/80134624 [==============================] - 0s 0us/step\nModel: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 256, 256, 3)] 0 \n_________________________________________________________________\nblock1_conv1 (Conv2D) (None, 256, 256, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 256, 256, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 128, 128, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 128, 128, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 128, 128, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 64, 64, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 64, 64, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 64, 64, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 64, 64, 256) 590080 \n_________________________________________________________________\nblock3_conv4 (Conv2D) (None, 64, 64, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 32, 32, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 32, 32, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 32, 32, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 32, 32, 512) 2359808 \n_________________________________________________________________\nblock4_conv4 (Conv2D) (None, 32, 32, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 16, 16, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 16, 16, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 16, 16, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 16, 16, 512) 2359808 \n_________________________________________________________________\nblock5_conv4 (Conv2D) (None, 16, 16, 512) 2359808 \n=================================================================\nTotal params: 20,024,384\nTrainable params: 20,024,384\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Sample Images",
"_____no_output_____"
]
],
[
[
"def sample_images(data_dir, batch_size, high_resolution_shape, low_resolution_shape):\n # Make a list of all images inside the data directory\n all_images = glob.glob(data_dir)\n\n # Choose a random batch of images\n images_batch = np.random.choice(all_images, size=batch_size)\n\n low_resolution_images = []\n high_resolution_images = []\n\n for img in images_batch:\n # Get an ndarray of the current image\n img1 = imread(img, mode='RGB')\n img1 = img1.astype(np.float32)\n\n # Resize the image\n img1_high_resolution = imresize(img1, high_resolution_shape)\n img1_low_resolution = imresize(img1, low_resolution_shape)\n\n # Do a random horizontal flip\n if np.random.random() < 0.5:\n img1_high_resolution = np.fliplr(img1_high_resolution)\n img1_low_resolution = np.fliplr(img1_low_resolution)\n\n high_resolution_images.append(img1_high_resolution)\n low_resolution_images.append(img1_low_resolution)\n\n # Convert the lists to Numpy NDArrays\n return np.array(high_resolution_images), np.array(low_resolution_images)",
"_____no_output_____"
]
],
[
[
"### Save Images",
"_____no_output_____"
]
],
[
[
"def compute_psnr(original_image, generated_image):\n \n original_image = tf.convert_to_tensor(original_image, dtype = tf.float32)\n generated_image = tf.convert_to_tensor(generated_image, dtype = tf.float32)\n \n psnr = tf.image.psnr(original_image, generated_image, max_val = 1.0)\n \n return tf.math.reduce_mean(psnr, axis = None, keepdims = False, name = None)\n\ndef plot_psnr(psnr):\n \n psnr_means = psnr['psnr_quality']\n plt.figure(figsize = (10,8))\n \n plt.plot(psnr_means)\n plt.xlabel('Epochs')\n plt.ylabel('PSNR')\n plt.title('PSNR')\n \ndef compute_ssim(original_image, generated_image):\n \n original_image = tf.convert_to_tensor(original_image, dtype = tf.float32)\n generated_image = tf.convert_to_tensor(generated_image, dtype = tf.float32)\n \n ssim = tf.image.ssim(original_image, generated_image, max_val = 1.0, filter_size = 11, filter_sigma = 1.5, k1 = 0.01, )\n \n return tf.math.reduce_mean(ssim, axis = None, keepdims = False, name = None)\n\ndef plot_ssim(ssim):\n \n ssim_means = ssim['ssim_quality']\n \n plt.figure(figsize = (10,8))\n plt.plot(ssim_means)\n plt.xlabel('Epochs')\n plt.ylabel('SSIM')\n plt.title('SSIM')\n\ndef plot_loss(losses):\n \n d_loss = losses['d_history']\n g_loss = losses['g_history']\n \n plt.figure(figsize = (10,8))\n plt.plot(d_loss, label = \"Discriminator loss\")\n plt.plot(g_loss, label = \"Generator Loss\")\n plt.xlabel(\"Epochs\")\n plt.ylabel('Loss')\n plt.title(\"Loss\")\n plt.legend()",
"_____no_output_____"
],
[
"def save_images(low_resolution_image, original_image, generated_image, path, psnr, ssim):\n \"\"\"\n Save low-resolution, high-resolution(original) and\n generated high-resolution images in a single image\n \"\"\"\n fig = plt.figure(figsize=(12,5))\n ax = fig.add_subplot(1, 3, 1)\n ax.imshow(low_resolution_image)\n ax.axis(\"off\")\n ax.set_title(\"Low-resolution \")\n\n ax = fig.add_subplot(1, 3, 2)\n ax.imshow(original_image)\n ax.axis(\"off\")\n ax.set_title(f\"High-resolution\\nPSNR : {psnr}\")\n # ax.set_xlabel(f\"PSNR : {psnr}\")\n # ax.save(hr_path,bbox_inches='tight',transparent=True, pad_inches=0)\n\n ax = fig.add_subplot(1, 3, 3)\n ax.imshow(np.squeeze(generated_image), cmap = plt.get_cmap(name = 'gray'))\n ax.axis(\"off\")\n ax.set_title(f\"Generated\\nSSIM : {ssim}\" )\n # ax.set_xlabel(f\"SSIM : {ssim}\")\n # ax.save(pr_path, bbox_inches='tight',transparent=True, pad_inches=0)\n\n plt.savefig(path)",
"_____no_output_____"
]
],
[
[
"### Write a Log",
"_____no_output_____"
]
],
[
[
"from PIL import Image\nfrom skimage.metrics import structural_similarity as ssim",
"_____no_output_____"
]
],
[
[
"### Final SRGAN Execution",
"_____no_output_____"
]
],
[
[
"losses = {'d_history' : [], \"g_history\": []}\npsnr = {'psnr_quality' : []}\nssim = {'ssim_quality' : []}",
"_____no_output_____"
],
[
"from tqdm.notebook import tqdm\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) ",
"_____no_output_____"
],
[
"data_dir = \"/content/train/*.*\"\nos.makedirs(\"results\", exist_ok=True)\n# os.makedirs(\"HR\", exist_ok=True)\n# os.makedirs(\"PR\", exist_ok=True)\n# os.makedirs(\"LR\", exist_ok=True)\n\nepochs = 1000\nbatch_size = 1\nmode = 'train'\n\n# Shape of low-resolution and high-resolution images\nlow_resolution_shape = (64, 64, 3)\nhigh_resolution_shape = (256, 256, 3)\n\n# Common optimizer for all networks\ncommon_optimizer = Adam(0.0002, 0.5)\n\nif mode == 'train':\n # Build and compile VGG19 network to extract features\n vgg = build_vgg()\n vgg.trainable = False\n vgg.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])\n\n # Build and compile the discriminator network\n discriminator = build_discriminator()\n discriminator.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])\n\n # Build the generator network\n generator = build_generator()\n\n \"\"\"\n Build and compile the adversarial model\n \"\"\"\n\n # Input layers for high-resolution and low-resolution images\n input_high_resolution = Input(shape=high_resolution_shape)\n input_low_resolution = Input(shape=low_resolution_shape)\n\n # Generate high-resolution images from low-resolution images\n generated_high_resolution_images = generator(input_low_resolution)\n\n # Extract feature maps of the generated images\n features = vgg(generated_high_resolution_images)\n\n # Make the discriminator network as non-trainable\n discriminator.trainable = False\n\n # Get the probability of generated high-resolution images\n probs = discriminator(generated_high_resolution_images)\n\n # Create and compile an adversarial model\n adversarial_model = Model([input_low_resolution, input_high_resolution], [probs, features])\n adversarial_model.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1e-3, 1], optimizer=common_optimizer)\n\n # Add Tensorboard\n tensorboard = TensorBoard(log_dir=\"logs/\".format(time.time()))\n tensorboard.set_model(generator)\n tensorboard.set_model(discriminator)\n\n for epoch in tqdm(range(epochs)):\n # print(\"Epoch:{}\".format(epoch))\n\n \"\"\"\n Train the discriminator network\n \"\"\"\n\n # Sample a batch of images\n high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,\n low_resolution_shape=low_resolution_shape,\n high_resolution_shape=high_resolution_shape)\n # Normalize images\n high_resolution_images = high_resolution_images / 127.5 - 1.\n low_resolution_images = low_resolution_images / 127.5 - 1.\n\n # Generate high-resolution images from low-resolution images\n generated_high_resolution_images = generator.predict(low_resolution_images)\n\n # Generate batch of real and fake labels\n real_labels = np.ones((batch_size, 16, 16, 1))\n fake_labels = np.zeros((batch_size, 16, 16, 1))\n\n # Train the discriminator network on real and fake images\n d_loss_real = discriminator.train_on_batch(high_resolution_images, real_labels)\n d_loss_fake = discriminator.train_on_batch(generated_high_resolution_images, fake_labels)\n\n # Calculate total discriminator loss\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n # print(\"d_loss:\", d_loss)\n\n \"\"\"\n Train the generator network\n \"\"\"\n\n # Sample a batch of images\n high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,\n low_resolution_shape=low_resolution_shape,\n high_resolution_shape=high_resolution_shape)\n # Normalize images\n high_resolution_images = high_resolution_images / 127.5 - 1.\n low_resolution_images = low_resolution_images / 127.5 - 1.\n\n # Extract feature maps for real high-resolution images\n image_features = vgg.predict(high_resolution_images)\n\n # Train the generator network\n g_loss = adversarial_model.train_on_batch([low_resolution_images, high_resolution_images], [real_labels, image_features])\n\n # print(\"g_loss:\", g_loss)\n\n # Write the losses to Tensorboard\n # write_log(tensorboard, 'g_loss', g_loss[0], epoch)\n # write_log(tensorboard, 'd_loss', d_loss[0], epoch)\n\n # Sample and save images after every 100 epochs\n if epoch % 100 == 0:\n high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,\n low_resolution_shape=low_resolution_shape,\n high_resolution_shape=high_resolution_shape)\n # Normalize images\n high_resolution_images = high_resolution_images / 127.5 - 1.\n low_resolution_images = low_resolution_images / 127.5 - 1.\n\n generated_images = generator.predict_on_batch(low_resolution_images)\n ps = compute_psnr(high_resolution_images, generated_images)\n ss = compute_ssim(high_resolution_images, generated_images)\n print(\"-\"*15)\n print(\"Epoch:{}\".format(epoch))\n print(f\"D_loss : {d_loss}\") \n print(f\"G_loss : {g_loss}\") \n print(f\"PSNR : {np.around(ps,decimals=2)}\") \n print(f\"SSIM: {np.around(ss,decimals=2)}\")\n\n #***************************************\n # Store into list\n #***************************************\n losses['d_history'].append(d_loss)\n g_loss = 0.5 * (g_loss[1])\n losses['g_history'].append(g_loss)\n psnr['psnr_quality'].append(ps)\n ssim['ssim_quality'].append(ss)\n\n\n for index, img in enumerate(generated_images):\n img = np.mean(img, axis=2)\n save_images(low_resolution_images[index], high_resolution_images[index], img, path=\"/content/results/img_{}_{}\".format(epoch, index), psnr=ps, ssim=ss)\n # gn_im = np.squeeze(img).astype(np.float16)\n # hr_im = high_resolution_images[index].astype(np.float16)\n # lr_im = low_resolution_images[index].astype(np.float16) \n # psnr = psnr(hr_im,gn_im).numpy()\n # ssim_Score = ssim(hr_im,gn_im, multichannel=True)\n # print(\"PSNR : \", psnr)\n # print(\"SSIM Loss : \", ssim_Score)\n \n \n # plt.imshow(np.squeeze(img), cmap = plt.get_cmap(name = 'gray'))\n # plt.axis('off')\n # plt.savefig(f\"PR/im_PR_{epoch}_{index}.png\", dpi=100, pad_inches=0.0, bbox_inches='tight')\n # plt.clf()\n\n # plt.imshow(high_resolution_images[index])\n # plt.axis('off')\n # plt.savefig(f\"HR/im_HR_{epoch}_{index}.png\", dpi=100, pad_inches=0.0, bbox_inches='tight')\n # plt.clf()\n\n # plt.imshow(low_resolution_images[index])\n # plt.axis('off')\n # plt.savefig(f\"LR/im_LR_{epoch}_{index}.png\", dpi=100, pad_inches=0.0, bbox_inches='tight')\n # plt.clf()\n\n \n\n # Save models\n generator.save_weights(\"generator.h5\")\n discriminator.save_weights(\"discriminator.h5\")\n\nif mode == 'predict':\n # Build and compile the discriminator network\n discriminator = build_discriminator()\n\n # Build the generator network\n generator = build_generator()\n\n # Load models\n generator.load_weights(\"generator.h5\")\n discriminator.load_weights(\"discriminator.h5\")\n\n # Get 10 random images\n high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=10,\n low_resolution_shape=low_resolution_shape,\n high_resolution_shape=high_resolution_shape)\n # Normalize images\n high_resolution_images = high_resolution_images / 127.5 - 1.\n low_resolution_images = low_resolution_images / 127.5 - 1.\n\n # Generate high-resolution images from low-resolution images\n generated_images = generator.predict_on_batch(low_resolution_images)\n # generated_images = cv2.cvtColor(generated_images, cv2.COLOR_BGR2GRAY)\n \n # Save images\n for index, img in enumerate(generated_images):\n img = np.mean(img, axis=2)\n # save_images(low_resolution_images[index], high_resolution_images[index], img, path=\"/content/results/gen_{}\".format(index))",
"_____no_output_____"
],
[
"plot_loss(losses)\nplot_psnr(psnr)\nplot_ssim(ssim)",
"_____no_output_____"
],
[
"!zip -r results.zip /content/results",
" adding: content/results/ (stored 0%)\n adding: content/results/img_400_0.png (deflated 10%)\n adding: content/results/img_700_0.png (deflated 18%)\n adding: content/results/img_900_0.png (deflated 21%)\n adding: content/results/img_0_0.png (deflated 6%)\n adding: content/results/img_800_0.png (deflated 11%)\n adding: content/results/img_300_0.png (deflated 14%)\n adding: content/results/img_500_0.png (deflated 13%)\n adding: content/results/img_200_0.png (deflated 15%)\n adding: content/results/img_600_0.png (deflated 15%)\n adding: content/results/img_100_0.png (deflated 11%)\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f23e58369c19830fe1b4766d33ffbf99d8f51a | 266,431 | ipynb | Jupyter Notebook | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw | 385e2e694a74cd986c9e0855e18d4645e598a36f | [
"BSD-3-Clause"
] | 23 | 2019-10-02T19:16:07.000Z | 2022-03-09T23:59:17.000Z | legacy/clase20/01_Cython.ipynb | leliel12/diseno_sci_sfw | 385e2e694a74cd986c9e0855e18d4645e598a36f | [
"BSD-3-Clause"
] | null | null | null | legacy/clase20/01_Cython.ipynb | leliel12/diseno_sci_sfw | 385e2e694a74cd986c9e0855e18d4645e598a36f | [
"BSD-3-Clause"
] | 7 | 2020-09-16T23:29:46.000Z | 2022-02-18T17:49:59.000Z | 110.506429 | 47,384 | 0.762216 | [
[
[
"\n# Diseño de software para cómputo científico\n\n----\n\n## Unidad 5: Integración con lenguajes de alto nivel con bajo nivel.\n",
"_____no_output_____"
],
[
"## Agenda de la Unidad 5\n\n- JIT (Numba)\n- **Cython.**\n- Integración de Python con FORTRAN.\n- Integración de Python con C. \n\n",
"_____no_output_____"
],
[
"## Recapitulando\n\n- Escribimos el código Python.\n- Pasamos todo a numpy.\n- Hicimos profile.\n- Paralelisamos (joblib/dask).\n- Hicimos profile.\n- Usamos Numba.\n- Hicimos profile.\n- **Si llegamos acá** Cython\n",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"# vamos a hacer profiling\nimport timeit\nimport math\n\n# vamos a plotear\n%matplotlib inline \nimport matplotlib.pyplot as plt\n\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Numba vs Cython\n\n- Cython es un compilador estático/optimizador tanto para el lenguaje de programación Python como para el extenciones en Cython. \n- Hace que escribir extensiones C para Python sea tan \"\"fácil\"\" como el propio Python.\n- En lugar de analizar bytecode y generar IR, Cython usa un superconjunto de sintaxis de Python que luego se traduce en código C (Se escribe código C con sintaxis Python).\n- A diferencia de usar C, **generalmente** no hay que preocuparse las llamadas de bajo nivel de Python (esto se expande automáticamente a un código C por Cython).\n- A diferencia de Numba, todo el código debe estar separado en archivos especiales (`*.pyx`). \n- Cython analiza y traduce dichos archivos a código C y luego lo compila utilizando el compilador C proporcionado.\n\n![image.png](attachment:image.png)",
"_____no_output_____"
],
[
"### Por qué preferimos Numba/JIT sobre Cython?\n\n- Curva de aprendizaje (Es otro lenguaje)\n- Necesitas *algo* de experiencia en C **Y** Python\n- El paquete se vuelve un poco complejo.\n- Todo código Python es Cython válido\n\n### Por qué preferimos Cython sobre Numba/C/Fortran?\n\nFacil interaccion con librerias C/C++ y integración total con objetos y clases python",
"_____no_output_____"
],
[
"## Ejemplo - Mandelbrot Fractal Python Puro",
"_____no_output_____"
]
],
[
[
"def mandel(x, y, max_iters):\n \"\"\"\n Given the real and imaginary parts of a complex number,\n determine if it is a candidate for membership in the Mandelbrot\n set given a fixed number of iterations.\n \"\"\"\n i = 0\n c = complex(x,y)\n z = 0.0j\n for i in range(max_iters):\n z = z * z + c\n if (z.real * z.real + z.imag * z.imag) >= 4:\n return i\n return 255\n\ndef create_fractal(min_x, max_x, min_y, max_y, image, iters):\n height = image.shape[0]\n width = image.shape[1]\n\n pixel_size_x = (max_x - min_x) / width\n pixel_size_y = (max_y - min_y) / height\n for x in range(width):\n real = min_x + x * pixel_size_x\n for y in range(height):\n imag = min_y + y * pixel_size_y\n color = mandel(real, imag, iters)\n image[y, x] = color\n\n return image",
"_____no_output_____"
]
],
[
[
"## Ejemplo - Mandelbrot Fractal Python Puro",
"_____no_output_____"
]
],
[
[
"# creamos la imagen\nimage = np.zeros((500 * 2, 750 * 2), dtype=np.uint8)\n\n# ejecutamos los calculos\nnormal = %timeit -o create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)\n\n# mostramos todo\nplt.imshow(image, cmap=\"viridis\");",
"4.09 s ± 22.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
],
[
[
"## Ejemplo - Mandelbrot Fractal Cython",
"_____no_output_____"
]
],
[
[
"!pip install Cython",
"Requirement already satisfied: Cython in /home/juan/proyectos/dis_ssw/lib/python3.8/site-packages (0.29.21)\r\n"
],
[
"%load_ext Cython",
"_____no_output_____"
],
[
"%%cython --annotate\n\ndef mandel(x, y, max_iters):\n \"\"\"\n Given the real and imaginary parts of a complex number,\n determine if it is a candidate for membership in the Mandelbrot\n set given a fixed number of iterations.\n \"\"\"\n i = 0\n c = complex(x,y)\n z = 0.0j\n for i in range(max_iters):\n z = z * z + c\n if (z.real * z.real + z.imag * z.imag) >= 4:\n return i\n return 255\n\ndef create_fractal(min_x, max_x, min_y, max_y, image, iters):\n height = image.shape[0]\n width = image.shape[1]\n\n pixel_size_x = (max_x - min_x) / width\n pixel_size_y = (max_y - min_y) / height\n for x in range(width):\n real = min_x + x * pixel_size_x\n for y in range(height):\n imag = min_y + y * pixel_size_y\n color = mandel(real, imag, iters)\n image[y, x] = color\n\n return image",
"_____no_output_____"
]
],
[
[
"## Ejemplo - Mandelbrot Fractal Cython",
"_____no_output_____"
]
],
[
[
"# creamos la imagen\nimage = np.zeros((500 * 2, 750 * 2), dtype=np.uint8)\n\n# ejecutamos los calculos\nnormal = %timeit -o create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)\n\n# mostramos todo\nplt.imshow(image, cmap=\"viridis\");",
"3.41 s ± 64.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
],
[
[
"## Cython Hello World 1/2\n\n- Como Cython puede aceptar casi cualquier archivo fuente de Python válido, una de las cosas más difíciles para comenzar es descubrir cómo compilar su extensión.\n- Entonces, comencemos con el hola-mundo canónico de Python:\n\n```python \n## helloworld.pyx\nprint(\"Hello World\")\n```\n\n- Pueden ver el código resultante con \n - `cython -3 helloworld.pyx`,\n - o `cython -3 helloworld.pyx -cplus`",
"_____no_output_____"
],
[
"## Cython Hello World 2/2\n\n- Y en `setup.py`\n\n```python\nfrom distutils.core import setup\nfrom Cython.Build import cythonize\nsetup(\n ...\n ext_modules=cythonize(\"helloworld.pyx\"))\n```\n- Ejecutar `python setup.py build_ext --inplace`\n- Probamos con `python -c \"import helloworld\"`",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.insert(0, \"./cython\")\n\nimport helloworld\nhelloworld.__file__",
"Hello World\n"
]
],
[
[
"## Cython - Números Primos",
"_____no_output_____"
]
],
[
[
"%%cython\ndef primes(int nb_primes):\n cdef int n, i, len_p\n cdef int p[1000] \n if nb_primes > 1000:\n nb_primes = 1000\n len_p = 0 # The current number of elements in p.\n n = 2\n while len_p < nb_primes:\n # Is n prime?\n for i in p[:len_p]:\n if n % i == 0:\n break\n # If no break occurred in the loop, we have a prime.\n else:\n p[len_p] = n\n len_p += 1\n n += 1\n # Let's return the result in a python list:\n result_as_list = [prime for prime in p[:len_p]]\n return result_as_list",
"_____no_output_____"
],
[
"print(primes(100))",
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541]\n"
]
],
[
[
"## Cython - Números Primos - Numpy",
"_____no_output_____"
]
],
[
[
"%%cython\nimport numpy as np # importar donde vas a compilar\n\ndef primes_np(int nb_primes):\n \n # Memoryview on a NumPy array\n narr = np.empty(nb_primes, dtype=np.dtype(int))\n cdef long [:] narr_view = narr\n \n cdef long len_p = 0 # The current number of elements in p.\n cdef long n = 2\n \n while len_p < nb_primes:\n # Is n prime?\n for i in narr_view[:len_p]:\n if n % i == 0:\n break\n # If no break occurred in the loop, we have a prime.\n else:\n narr_view[len_p] = n\n len_p += 1\n n += 1\n return narr",
"_____no_output_____"
],
[
"print(primes_np(2000))",
"[ 2 3 5 ... 17383 17387 17389]\n"
]
],
[
[
"## Cython - Números Primos - Profiling\n",
"_____no_output_____"
]
],
[
[
"%%cython --annotate\nimport numpy as np # importar donde vas a compilar\n\ncdef primes_np(unsigned int nb_primes):\n \n # Memoryview on a NumPy array\n narr = np.empty(nb_primes, dtype=np.dtype(int))\n cdef long [:] narr_view = narr\n \n cdef long len_p = 0 # The current number of elements in p.\n cdef long n = 2\n \n while len_p < nb_primes:\n # Is n prime?\n for i in narr_view[:len_p]:\n if n % i == 0:\n break\n # If no break occurred in the loop, we have a prime.\n else:\n narr_view[len_p] = n\n len_p += 1\n n += 1\n return narr",
"_____no_output_____"
]
],
[
[
"### Y si usamos la librería vector de C++",
"_____no_output_____"
]
],
[
[
"%%cython --cplus\n\nfrom libcpp.vector cimport vector\n\ndef primes_cpp(unsigned int nb_primes):\n cdef int n, i\n cdef vector[int] p\n p.reserve(nb_primes) # allocate memory for 'nb_primes' elements.\n\n n = 2\n while p.size() < nb_primes: # size() for vectors is similar to len()\n for i in p:\n if n % i == 0:\n break\n else:\n p.push_back(n) # push_back is similar to append()\n n += 1\n\n # Vectors are automatically converted to Python\n # lists when converted to Python objects.\n return p",
"_____no_output_____"
]
],
[
[
"## Benchmarks",
"_____no_output_____"
]
],
[
[
"%timeit primes(1000)\n%timeit primes_np(1000)\n%timeit primes_cpp(1000)",
"2.3 ms ± 58.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n113 ms ± 1.35 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n2.29 ms ± 19.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
],
[
[
"## Integrando C puro con Cython\n\n- Supongamos que tenemos ya escrito este super complejo codigo C en un archivo que se llama\n `hello_c.c`.\n\n```C\n#include <stdio.h>\nvoid f();\n\nvoid f() {\n printf(\"%s\", \"Hello world from a pure C function!\\n\");\n}\n```\n\nY queremos integrarlo a -Python-",
"_____no_output_____"
],
[
"Hay que hacer el wrapper `hello_cwrapper.pyx`.\n\n```cython\ncdef extern from \"hello_c.c\":\n void f()\n \ncpdef myf():\n f()\n ```\n \n Despues agregarlo al `setup.py`",
"_____no_output_____"
],
[
"## Librería externa desde el notebook",
"_____no_output_____"
]
],
[
[
"%%cython -I ./cython/\n\ncdef extern from \"hello_c.c\":\n void f()\n \ncpdef myf():\n f()",
"_____no_output_____"
],
[
"myf() ## ESTO IMPRIME SI O SI A LA CONSOLA",
"_____no_output_____"
]
],
[
[
"## Cosas que quedaron pendientes.\n\n- Structs.\n- Enums.\n- Classes.\n- Memory views.",
"_____no_output_____"
],
[
"## Finalizando\n\n- Esto **NO** es más rapido que numpy bien utilizado.\n- Copite con numba y el rendimiento es similar.\n- En general no es peor que C/C++/FORTRAN.\n- Lo mejor es que Python es Válido en Cython.\n\nY finalmente",
"_____no_output_____"
],
[
"**Todo lenguaje que tenga una ABI compatible con C puede interactuar con Cython.**\n\nOsea TODO",
"_____no_output_____"
],
[
"## Referencias\n\n- https://rushter.com/blog/numba-cython-python-optimization/\n- https://cython.org/\n- http://docs.cython.org/en/latest/src/userguide/language_basics.html\n- https://telliott99.blogspot.com/2010/12/cython-3-my-own-c-source-file.html\n- https://stackoverflow.com/questions/32528560/using-setuptools-to-create-a-cython-package-calling-an-external-c-library",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7f23f8bf1c252fe2c2324416a8c2db26446fc12 | 31,527 | ipynb | Jupyter Notebook | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday | d228369e48e42bbc252dd2fed2630cbc107e3e8a | [
"Apache-2.0"
] | null | null | null | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday | d228369e48e42bbc252dd2fed2630cbc107e3e8a | [
"Apache-2.0"
] | null | null | null | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday | d228369e48e42bbc252dd2fed2630cbc107e3e8a | [
"Apache-2.0"
] | null | null | null | 34.721366 | 1,488 | 0.505694 | [
[
[
"## Setup\nImport the standard Python Libraries that are used in this lab.\n",
"_____no_output_____"
]
],
[
[
"import boto3\nfrom time import sleep\nimport subprocess\nimport pandas as pd\nimport json\nimport time",
"_____no_output_____"
]
],
[
[
"Import sagemaker and get execution role for getting role ARN",
"_____no_output_____"
]
],
[
[
"import sagemaker\nregion = boto3.Session().region_name \nsmclient = boto3.Session().client('sagemaker')\nfrom sagemaker import get_execution_role\n\nrole_arn = get_execution_role()\nprint(role_arn)\n\n#Make sure this role has the forecast permissions set to be able to use S3",
"arn:aws:iam::226154724374:role/service-role/AmazonSageMaker-ExecutionRole-wkshop\n"
]
],
[
[
"The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that.",
"_____no_output_____"
]
],
[
[
"session = boto3.Session(region_name='us-east-1') \nforecast = session.client(service_name='forecast') \nforecastquery = session.client(service_name='forecastquery')",
"_____no_output_____"
]
],
[
[
"## Data Prepraration",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"../data/COF_yearly_Revenue_Data.csv\", dtype = object, names=['metric_name','timestamp','metric_value'])\ndf.head(3)",
"_____no_output_____"
]
],
[
[
"Create the training set and validation set. Use the last years revenue as the validation set",
"_____no_output_____"
]
],
[
[
"# Select 1996 to 2017 in one data frame\ndf_1996_2017 = df[(df['timestamp'] >= '1995-12-31') & (df['timestamp'] <= '2017-12-31')]\n\n# Select the year 2018 seprately for validation\ndf = pd.read_csv(\"../data/COF_yearly_Revenue_Data.csv\", dtype = object, names=['metric_name','timestamp','metric_value'])\ndf_2018 = df[(df['timestamp'] >= '2018-12-31')]",
"_____no_output_____"
],
[
"df_1996_2017\n",
"_____no_output_____"
],
[
"df_2018",
"_____no_output_____"
]
],
[
[
"Now export them to CSV files and place them into your data folder.",
"_____no_output_____"
]
],
[
[
"df_1996_2017.to_csv(\"../data/cof-revenue-train.csv\", header=False, index=False)\ndf_2018.to_csv(\"../data/cof-revenue-validation.csv\", header=False, index=False)",
"_____no_output_____"
]
],
[
[
"Define the S3 bucket name where we will upload data where Amazon Forecast will pick up the data later",
"_____no_output_____"
]
],
[
[
"bucket_name = \"sagemaker-capone-forecast-useast1-03\" # Rember to change this to the correct bucket name used for Capital One\nfolder_name = \"cone\" # change this to the folder name of the user.",
"_____no_output_____"
]
],
[
[
"Upload the data to S3",
"_____no_output_____"
]
],
[
[
"s3 = session.client('s3')\nkey=folder_name+\"/cof-revenue-train.csv\"\ns3.upload_file(Filename=\"../data/cof-revenue-train.csv\", Bucket=bucket_name, Key=key)",
"_____no_output_____"
]
],
[
[
"## Creating the Dataset Group and Dataset <a class=\"anchor\" id=\"dataset\"></a>\n\nIn Amazon Forecast , a dataset is a collection of file(s) which contain data that is relevant for a forecasting task. A dataset must conform to a schema provided by Amazon Forecast. \n\nMore details about `Domain` and dataset type can be found on the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html) . For this example, we are using [METRICS](https://docs.aws.amazon.com/forecast/latest/dg/metrics-domain.html) domain with 3 required attributes `metrics_name`, `timestamp` and `metrics_value`.\n\n\nIt is importan to also convey how Amazon Forecast can understand your time-series information. That the cell immediately below does that, the next one configures your variable names for the Project, DatasetGroup, and Dataset.",
"_____no_output_____"
]
],
[
[
"DATASET_FREQUENCY = \"Y\" \nTIMESTAMP_FORMAT = \"yyyy-mm-dd\"",
"_____no_output_____"
],
[
"project = 'cof_revenue_forecastdemo'\ndatasetName= project+'_ds'\ndatasetGroupName= project +'_dsg'\ns3DataPath = \"s3://\"+bucket_name+\"/\"+key",
"_____no_output_____"
]
],
[
[
"### Create the Dataset Group",
"_____no_output_____"
]
],
[
[
"create_dataset_group_response = forecast.create_dataset_group(DatasetGroupName=datasetGroupName,\n Domain=\"METRICS\",\n )\ndatasetGroupArn = create_dataset_group_response['DatasetGroupArn']",
"_____no_output_____"
],
[
"forecast.describe_dataset_group(DatasetGroupArn=datasetGroupArn)",
"_____no_output_____"
]
],
[
[
"### Create the Schema",
"_____no_output_____"
]
],
[
[
"# Specify the schema of your dataset here. Make sure the order of columns matches the raw data files.\nschema ={\n \"Attributes\":[\n {\n \"AttributeName\":\"metric_name\",\n \"AttributeType\":\"string\"\n },\n {\n \"AttributeName\":\"timestamp\",\n \"AttributeType\":\"timestamp\"\n },\n {\n \"AttributeName\":\"metric_value\",\n \"AttributeType\":\"float\"\n }\n ]\n}",
"_____no_output_____"
]
],
[
[
"### Create the Dataset",
"_____no_output_____"
]
],
[
[
"response=forecast.create_dataset(\n Domain=\"METRICS\",\n DatasetType='TARGET_TIME_SERIES',\n DatasetName=datasetName,\n DataFrequency=DATASET_FREQUENCY, \n Schema = schema\n)",
"_____no_output_____"
],
[
"datasetArn = response['DatasetArn']\nforecast.describe_dataset(DatasetArn=datasetArn)",
"_____no_output_____"
]
],
[
[
"### Add Dataset to Dataset Group",
"_____no_output_____"
]
],
[
[
"forecast.update_dataset_group(DatasetGroupArn=datasetGroupArn, DatasetArns=[datasetArn])",
"_____no_output_____"
]
],
[
[
"### Create Data Import Job\n\n\nNow that Forecast knows how to understand the CSV we are providing, the next step is to import the data from S3 into Amazon Forecaast.",
"_____no_output_____"
]
],
[
[
"datasetImportJobName = 'EP_DSIMPORT_JOB_TARGET'\nds_import_job_response=forecast.create_dataset_import_job(DatasetImportJobName=datasetImportJobName,\n DatasetArn=datasetArn,\n DataSource= {\n \"S3Config\" : {\n \"Path\":s3DataPath,\n \"RoleArn\": role_arn\n } \n },\n TimestampFormat=TIMESTAMP_FORMAT\n )",
"_____no_output_____"
],
[
"ds_import_job_arn=ds_import_job_response['DatasetImportJobArn']\nprint(ds_import_job_arn)",
"arn:aws:forecast:us-east-1:457927431838:dataset-import-job/cof_revenue_forecastdemo_ds/EP_DSIMPORT_JOB_TARGET\n"
]
],
[
[
"Check the status of dataset, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on the data size. It can take 10 mins to be **ACTIVE**. This process will take 5 to 10 minutes.",
"_____no_output_____"
]
],
[
[
"while True:\n dataImportStatus = forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)['Status']\n print(dataImportStatus)\n if dataImportStatus != 'ACTIVE' and dataImportStatus != 'CREATE_FAILED':\n sleep(30)\n else:\n break",
"CREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nCREATE_IN_PROGRESS\nACTIVE\n"
],
[
"forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)",
"_____no_output_____"
],
[
"print(\"DatasetArn: \")\nprint(datasetGroupArn)",
"DatasetArn: \narn:aws:forecast:us-east-1:457927431838:dataset-group/cof_revenue_forecastdemo_dsg\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f2450af80b9cbe979216dfec87927cf1f2d988 | 60,288 | ipynb | Jupyter Notebook | lab4/notebooks/50046-nn.ipynb | brun0vieira/psn | 870d1d99efab2ce1aed05361640fbed47e4a861b | [
"MIT"
] | null | null | null | lab4/notebooks/50046-nn.ipynb | brun0vieira/psn | 870d1d99efab2ce1aed05361640fbed47e4a861b | [
"MIT"
] | null | null | null | lab4/notebooks/50046-nn.ipynb | brun0vieira/psn | 870d1d99efab2ce1aed05361640fbed47e4a861b | [
"MIT"
] | null | null | null | 64 | 16,404 | 0.709329 | [
[
[
"import numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def update_weights(topology, weights):\n j = 0\n for i in range(len(weights)):\n if j==3:\n j=0\n if i < 3: # w1x\n topology[1][1][j][1] = weights[i]\n j+=1\n elif i < 6: #w2x\n topology[2][1][j][1] = weights[i]\n j+=1\n elif i < 9: #w3x\n topology[3][1][j][1] = weights[i]\n j+=1\n\ndef sigmoid(x):\n sig = 1 / (1 + math.exp(-x))\n return sig\n\ndef update_input(topology, x, y, classe):\n topology[0][1][0]=x\n topology[0][1][1]=y\n topology[0][1][2]=classe\n \ndef draw_plot(topology, out1, out2, classe, title):\n \n w0=topology[1][1][0][1]\n w1=topology[1][1][1][1]\n w2=topology[1][1][2][1]\n y1 = (-w0-w1)/w2\n y2 = (-w0)/w2\n x = [1, 0]\n y = [y1, y2]\n plt.plot(x, y, color='purple')\n \n for i in range(len(out1)):\n if classe[i] < 0.5:\n color = 'blue'\n else:\n color = 'red'\n plt.scatter(out1[i], out2[i], color=color,s=10)\n \n plt.title(title)\n plt.xlabel('out1')\n plt.ylabel('out2')\n plt.grid()\n plt.show()",
"_____no_output_____"
],
[
"df = pd.read_excel(r'C:/Users/utilizador/Documents/GitHub/psn/lab4/data/ct-21-22.xlsx')",
"C:\\Users\\utilizador\\anaconda3\\lib\\site-packages\\openpyxl\\worksheet\\_reader.py:312: UserWarning: Unknown extension is not supported and will be removed\n warn(msg)\n"
],
[
"df",
"_____no_output_____"
],
[
"training_data = np.array([df['x'], df['y'], df['classe']])\ntraining_data",
"_____no_output_____"
]
],
[
[
"def nn_topology(num_layers, nodes_per_layer, connections):\n # to-do",
"_____no_output_____"
]
],
[
[
"topology = np.array([\n [ 'input',\n [\n ['x', '-'],\n ['y', '-'],\n ['classe', '-']\n ], \n ],\n [ 'n1',\n [\n ['w10', 1],\n ['w11', '-'],\n ['w12', '-'],\n 0, # delta1 \n 0 # o1\n ]\n ],\n [ 'n2',\n [\n ['w20', 1],\n ['w21', '-'],\n ['w22', '-'],\n 0, # delta2 \n 0 # o2\n ]\n ],\n [ 'n3',\n [\n ['w30', 1],\n ['w31', '-'],\n ['w32', '-'],\n 0, # delta3\n 0 # o3\n ]\n ]\n])",
"<ipython-input-6-bd2f284901f8>:1: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.\n topology = np.array([\n"
],
[
"print(topology)",
"[['input' list([['x', '-'], ['y', '-'], ['classe', '-']])]\n ['n1' list([['w10', 1], ['w11', '-'], ['w12', '-'], 0, 0])]\n ['n2' list([['w20', 1], ['w21', '-'], ['w22', '-'], 0, 0])]\n ['n3' list([['w30', 1], ['w31', '-'], ['w32', '-'], 0, 0])]]\n"
],
[
"update_weights(topology, [0.4, 1.4, 1.0, -1.5, 1, -0.5, 5.4, -8.0, -10.0])\nprint(topology)",
"[['input' list([['x', '-'], ['y', '-'], ['classe', '-']])]\n ['n1' list([['w10', 0.4], ['w11', 1.4], ['w12', 1.0], 0, 0])]\n ['n2' list([['w20', -1.5], ['w21', 1], ['w22', -0.5], 0, 0])]\n ['n3' list([['w30', 5.4], ['w31', -8.0], ['w32', -10.0], 0, 0])]]\n"
],
[
"training_data",
"_____no_output_____"
],
[
"update_input(topology, training_data[0][0], training_data[1][0], training_data[2][0])\nprint(topology)",
"[['input' list([-6.760253967, -2.438752346, 0.0])]\n ['n1' list([['w10', 0.4], ['w11', 1.4], ['w12', 1.0], 0, 0])]\n ['n2' list([['w20', -1.5], ['w21', 1], ['w22', -0.5], 0, 0])]\n ['n3' list([['w30', 5.4], ['w31', -8.0], ['w32', -10.0], 0, 0])]]\n"
],
[
"def calculate_ssod(topology): # calculate sums, sigmoids, outputs and deltas\n sum1 = 1*topology[1][1][0][1] + topology[0][1][0]*topology[1][1][1][1] + topology[0][1][1]*topology[1][1][2][1]\n sig1 = sigmoid(sum1)\n out1 = 1 / (1 + math.exp(-sum1))\n \n sum2 = 1*topology[2][1][0][1] + topology[0][1][0]*topology[2][1][1][1] + topology[0][1][1]*topology[2][1][2][1]\n sig2 = sigmoid(sum2)\n out2 = 1 / (1 + math.exp(-sum2))\n \n sum3 = 1*topology[3][1][0][1] + out1*topology[3][1][1][1] + out2*topology[3][1][2][1]\n sig3 = sigmoid(sum3)\n out3 = 1 / (1 + math.exp(-sum3))\n \n delta3 = out3*(1-out3)*(topology[0][1][2] - out3)\n delta1 = out1*(1-out1)*topology[3][1][1][1]*delta3\n delta2 = out2*(1-out2)*topology[3][1][2][1]*delta3\n \n return np.array([['sums', [sum1, sum2, sum3]], ['sigmoids', [sig1, sig2, sig3]], ['outputs', [out1, out2, out3]], ['deltas', [delta1, delta2, delta3]]])\n\nbackpropagation = calculate_ssod(topology)\nprint('sum1, sum2, sum3: ')\nprint(backpropagation[0][1])\nprint('\\nsig1, sig2, sig3: ')\nprint(backpropagation[1][1])\nprint('\\nout1, out2, out3: ')\nprint(backpropagation[2][1])\nprint('\\ndelta1, delta2, delta3: ')\nprint(backpropagation[3][1])\n\n",
"sum1, sum2, sum3: \n[-11.503107899799998, -7.040877794000001, 5.3911732889439605]\n\nsig1, sig2, sig3: \n[1.0098557173726497e-05, 0.0008745922598650376, 0.9954640446810432]\n\nout1, out2, out3: \n[1.0098557173726497e-05, 0.0008745922598650376, 0.9954640446810432]\n\ndelta1, delta2, delta3: \n[3.6313227823143915e-07, 3.9277655553302354e-05, -0.004494898864430347]\n"
],
[
"print('topology:')\nprint(topology)\nprint('\\nbackpropagation:')\nprint(backpropagation)",
"topology:\n[['input' list([-6.760253967, -2.438752346, 0.0])]\n ['n1' list([['w10', 0.4], ['w11', 1.4], ['w12', 1.0], 0, 0])]\n ['n2' list([['w20', -1.5], ['w21', 1], ['w22', -0.5], 0, 0])]\n ['n3' list([['w30', 5.4], ['w31', -8.0], ['w32', -10.0], 0, 0])]]\n\nbackpropagation:\n[['sums'\n list([-11.503107899799998, -7.040877794000001, 5.3911732889439605])]\n ['sigmoids'\n list([1.0098557173726497e-05, 0.0008745922598650376, 0.9954640446810432])]\n ['outputs'\n list([1.0098557173726497e-05, 0.0008745922598650376, 0.9954640446810432])]\n ['deltas'\n list([3.6313227823143915e-07, 3.9277655553302354e-05, -0.004494898864430347])]]\n"
],
[
"def calculate_new_weights(topology, backpropagation, learning_factor):\n for i in range(3):\n for j in range(3):\n if j == 0:\n inputW=1\n elif i==2: # no caso do 3º neuronio, o input é o out1 ou out2\n inputW = backpropagation[2][1][j-1]\n else:\n inputW = topology[0][1][j-1]\n topology[i+1][1][j][1] += learning_factor*backpropagation[3][1][i]*inputW\n\ncalculate_new_weights(topology, backpropagation, 0.1)\nprint(topology)",
"[['input' list([-6.760253967, -2.438752346, 0.0])]\n ['n1'\n list([['w10', 0.40000003631322784], ['w11', 1.3999997545133576], ['w12', 0.9999999114410305], 0, 0])]\n ['n2'\n list([['w20', -1.4999960722344448], ['w21', 0.9999734473073232], ['w22', -0.5000095788474626], 0, 0])]\n ['n3'\n list([['w30', 5.399550510113557], ['w31', -8.0000000045392], ['w32', -10.000000393120375], 0, 0])]]\n"
],
[
"# reset à topologia\ntopology = np.array([\n [ 'input',\n [\n ['x', '-'],\n ['y', '-'],\n ['classe', '-']\n ], \n ],\n [ 'n1',\n [\n ['w10', 1],\n ['w11', '-'],\n ['w12', '-'],\n 0, # delta1 \n 0 # o1\n ]\n ],\n [ 'n2',\n [\n ['w20', 1],\n ['w21', '-'],\n ['w22', '-'],\n 0, # delta2 \n 0 # o2\n ]\n ],\n [ 'n3',\n [\n ['w30', 1],\n ['w31', '-'],\n ['w32', '-'],\n 0, # delta3\n 0 # o3\n ]\n ]\n])\nupdate_weights(topology, [0.4, 1.4, 1.0, -1.5, 1, -0.5, 5.4, -8.0, -10.0])\nprint(topology)",
"[['input' list([['x', '-'], ['y', '-'], ['classe', '-']])]\n ['n1' list([['w10', 0.4], ['w11', 1.4], ['w12', 1.0], 0, 0])]\n ['n2' list([['w20', -1.5], ['w21', 1], ['w22', -0.5], 0, 0])]\n ['n3' list([['w30', 5.4], ['w31', -8.0], ['w32', -10.0], 0, 0])]]\n"
],
[
"epoch = 1\nerror = 1\ntopology_not_fitted = topology\nupdate_input(topology_not_fitted, training_data[0][0], training_data[1][0], training_data[2][0])\nout1_not_fitted = []\nout2_not_fitted = []\nout3_not_fitted = []\n\nwhile(error != 0):\n error = 0\n out1 = []\n out2 = []\n out3 = []\n for i in range(len(training_data[0])):\n \n update_input(topology, training_data[0][i], training_data[1][i], training_data[2][i])\n backpropagation = calculate_ssod(topology)\n calculate_new_weights(topology, backpropagation, 0.1)\n\n out1.append(backpropagation[2][1][0])\n out2.append(backpropagation[2][1][1])\n out3.append(backpropagation[2][1][2])\n \n if round(backpropagation[2][1][2]) != training_data[2][i]:\n error = 1\n \n if epoch==1:\n out1_not_fitted = out1\n out2_not_fitted = out2\n out3_not_fitted = out3\n \n print('epoch:' + str(epoch))\n epoch += 1",
"epoch:1\nepoch:2\nepoch:3\nepoch:4\nepoch:5\nepoch:6\nepoch:7\nepoch:8\nepoch:9\nepoch:10\nepoch:11\nepoch:12\nepoch:13\nepoch:14\nepoch:15\nepoch:16\nepoch:17\nepoch:18\nepoch:19\nepoch:20\nepoch:21\nepoch:22\nepoch:23\nepoch:24\nepoch:25\nepoch:26\nepoch:27\nepoch:28\nepoch:29\nepoch:30\nepoch:31\nepoch:32\nepoch:33\nepoch:34\nepoch:35\nepoch:36"
]
],
[
[
"# VER MELHOR O MODELO NÃO TREINADO",
"_____no_output_____"
]
],
[
[
"draw_plot(topology_not_fitted, out1_not_fitted, out2_not_fitted, out3_not_fitted, 'Modelo Não Treinado') \ndraw_plot(topology, out1, out2, out3, 'Modelo Treinado (Erro zero)') ",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f24ad9ef8d0b7d7d77273991c0aa586e9c20a9 | 8,039 | ipynb | Jupyter Notebook | notebooks/section7.ipynb | kamujun/exercise_of_deep_larning_from_scratch | 118833420ecb175f15ba4290fcd202f69eed4a3d | [
"MIT"
] | 1 | 2019-01-13T19:01:45.000Z | 2019-01-13T19:01:45.000Z | notebooks/section7.ipynb | kamujun/exercise_of_deep_larning_from_scratch | 118833420ecb175f15ba4290fcd202f69eed4a3d | [
"MIT"
] | 1 | 2017-10-26T04:56:12.000Z | 2017-10-26T09:09:37.000Z | notebooks/section7.ipynb | kamujun/exercise_of_deep_larning_from_scratch | 118833420ecb175f15ba4290fcd202f69eed4a3d | [
"MIT"
] | null | null | null | 28.710714 | 202 | 0.635029 | [
[
[
"# 7章 畳み込みニューラルネットワーク\n\n## 7.1 全体の構造\nCNNはニューラルネットワークと同様、複数のレイヤを組み合わせて作成する。CNNでは新たに「Convolutionレイヤ(畳み込み層)」と「Poolingレイヤ(プーリング層)」が登場する。\n\nこれまで出てきたニューラルネットワークは隣接する層の全てのニューロン間を結合する全結合(fully-connected)であり、Affineレイヤと言う名前で実装してきた。例として全結合のニューラルネットワークでは「Affineレイヤ→活性化関数ReLUレイヤ」の組み合わせを1層として複数層で構築し、出力層にはSoftmaxレイヤを用いていた。\n\nCNNでは「Convolutionレイヤ→ReLU→(Poolingレイヤ。省略される場合あり)」を1層として構築する。また、出力に近い層ではこれまでの「Affine→ReLU」が、出力層には「Affine→Softmax」が用いられることが一般的には多い。\n",
"_____no_output_____"
],
[
"## 7.2 畳み込み層\n\n### 7.2.1 全結合層の問題点\n全結合層では隣接する層のニューロンがすべて連結されており、出力数は任意に定めることができる。問題点としてはデータの形状が無視されてしまうことである。入力データが画像の際には縦・横・チャンネル方向の3次元形状だが、全結合層へ入力する際には一列の配列(1次元)にする必要がある。そのため空間的な近さなどの本質的な近さを無視して扱うので情報を活かす事ができていない。\n\n畳み込み層(Convolutionレイヤ)は形状を維持する。画像のデータを3次元として扱い、次の層にデータ出力することができる。CNNでは畳み込み層の入出力データを「特徴マップ(feature map)」と言う場合がある。更に、畳み込み層の入力データを「入力特徴マップ(input feature map)」、出力データを「出力特徴マップ(output feature map)」と言う。\n",
"_____no_output_____"
],
[
"### 7.2.2 畳み込み演算\n畳み込み層で行う処理は「畳み込み演算」である。畳み込み演算は入力データに対してフィルターを適用する。\n\n入力データが縦・横方向の形状を持つデータに対して、フィルターも同様に縦・横方向の次元を持たせる。例として、入力サイズが4×4、フィルターサイズが3×3、出力サイズが2×2などのようになる。文献によっては「フィルター」という単語は「カーネル」とも言われる。\n\n畳み込み演算は入力データに対してフィルターのウィンドウを一定の間隔でスライドさせながら適用する。それぞれの場所でフィルターの要素と入力の要素を乗算し、その和を求める(この計算を積和演算と呼ぶ)。結果を出力の対応する場所へ格納するプロセスをすべての場所で行なう子男tで畳み込み演算の出力を得ることが出来る。\n\nCNNにおける重みパラメータはフィルターのパラメータにあたる。また、バイアスはフィルター適用後のデータに対して加算する、一つの固定値(いずれの要素に対しても)である。\n",
"_____no_output_____"
],
[
"### 7.2.3 パディング\n\n畳み込み層の処理を行うにあたり、入力データの周囲に固定のデータ(0など)を埋めることがある。これを「パディング」という。例として4×4の入力データに対して幅1のパディングを適用するなどである。周囲を幅1ピクセル0で埋めることを言う。(パディング適用後は6×6のデータとなる)\n\nパディングを用いる理由は出力サイズを調整するためにある。4×4の入力データに3×3のフィルターを適用した場合、出力サイズは2×2となってしまう。ディープなネットワークにおいては小さくなり続けて処理できなくなってしまう。そこでパディングを用いるとデータサイズを保つことができる。",
"_____no_output_____"
],
[
"### 7.2.4 ストライド\n\nフィルターを適用する位置の感覚を「ストライド(stride)」と言う。ストライドを2とするとフィルターを適用する窓の間隔が2要素毎になる。\n\nストライドを大きくすると出力サイズが小さくなるが、パディングを用いると出力サイズは大きくなる。出力サイズの計算を考えてみる。\n入力サイズを$(H,W)$、フィルターサイズを$(FH,FW)$、出力サイズを$(OH,OW)$、パディングを$P$、ストライドを$S$とする。出力サイズは以下式で求められる。\n\n$$\nOH = \\frac{H + 2P - FH}{S} + 1 \\\\\nOH = \\frac{W + 2P - FW}{S} + 1\n$$\n\n(例)入力サイズ:(4,4)、パディング:1、ストライド:1、フィルターサイズ:(3,3)\n$$\nOH = \\frac{4+2・1-3}{1} + 1 = 4\\\\\nOH = \\frac{4+2・1-3}{1} + 1 = 4\n$$\n",
"_____no_output_____"
],
[
"### 7.2.5 3次元データの畳み込み演算\n\n画像の場合、縦・横方向に加えてチャンネル方向も合わせた3次元データを扱う必要がある。チャンネル別にフィルターを用意して畳み込み演算を行い、すべての結果を加算して出力を得る。\n\nチャンネル数とフィルターの数は一致している必要があり、チャンネル毎のフィルターサイズは全て統一する必要がある。\n",
"_____no_output_____"
],
[
"### 7.2.6 ブロックで考える\n\n3次元の畳み込み演算はデータやフィルターを直方体のブロックで考える事ができる。多次元配列として表す時は(channel, height, width)の順に並べて書く。フィルターの場合フィルターの高さをFH(Filter Height)、横幅をFW(Filter Width)と記載する。\n\nフィルターが一つの時には出力データはチャンネル数1つの特徴マップになる。チャンネル方向にも複数持たせるためには、複数のフィルター(重み)を用いる。フィルターの重みデータは4次元データとして(output_channel, input_channel, height, width)の順に書く。\nまた、バイアスは1チャンネル毎に1つ持つため形状は(FN, 1, 1)である。\n\n",
"_____no_output_____"
],
[
"### 7.2.7 バッチ処理\n\nニューラルネットワークの処理では、入力データをひと束にまとめたバッチ処理を行っていた。畳み込み演算でも同様にバッチ処理を行なう。その為、各層を流れるデータとして4次元のデータ(batch_num, channnel, height, width)を格納する。\n\n",
"_____no_output_____"
],
[
"## 7.3 プーリング層\n\nプーリングは縦・横方向の空間を小さくする演算である。例えば2×2の領域を一つの要素に集約するような処理である。\n「Maxプーリング」は対象とする領域のサイズ無いで最大値を取る演算である。一般的にプーリングのウィンドウサイズとストライドは同じ手に設定する。\n\nプーリングにはMaxプーリングの他に、Averageプーリングなどがある。Averageプーリングは対象領域の平均を計算する。画像認識の分野においては主にMaxプーリングが使われる\n",
"_____no_output_____"
],
[
"### 7.3.1 プーリング層の特徴\n\n* 学習するパラメータが無い\nプーリング層は畳み込み層と違って学習するパラメータを持たない。(最大値を取るだけなので)\n\n* チャンネル数は変化しない\nチャンネルごとに独立して計算が行われるため、チャンネル数は変化しない。\n\n* 微小な位置変化に対してロバスト(頑健)\n入力データの小さなズレに対してプーリングは同じような結果を返す。\n\n\n\n",
"_____no_output_____"
],
[
"## 7.4 Convoultion/Poolingレイヤの実装\n\n### 7.4.1 4次元配列\nCNNで流れる4次元データが(10, 1, 28, 28)だとすると、高さ28・横幅28・1チャンネル・データが10個ある場合に対応する。\n\n以下処理でランダムデータが作成できる。\nx = np.random.rand(10, 1, 28, 28)\n",
"_____no_output_____"
],
[
"### 7.4.2 im2colによる展開\n\n畳み込み演算をfor文で行なうと処理が遅くなってしまう(Numpyでは要素アクセスの際にfor文を使わないほうがよい)。そこでim2colという関数を用いる。\n\nim2colはフィルターにとって都合の良いように展開する関数である。入力データに対してフィルターを適用する場所の領域を横方向に1列に展開する。im2colによる展開はフィルター適用における重複要素を配列として出力するために元ブロックの要素数よりも多くなるため、メモリを多く消費してしまう。\n",
"_____no_output_____"
],
[
"### 7.4.3 Convolutionレイヤの実装\n\nConvolutionクラスで用いるim2colの引数は以下を設定。\n\n* input_data:データ数、チャンネル、高さ、横幅の4次元配列からなる入力データ\n* filter_h:フィルターの高さ\n* filter_w:フィルターの横幅\n* stride:ストライド\n* pad:パディング\n\nConvolutionクラスは以下処理を実装する\n* __init__:初期化メソッド。 \n\tフィルターとバイアス、ストライドとパディングを受け取る\n* forward:順伝播メソッド。 \n\t初期化メソッドで定めたパラメータから出力の高さ、幅のサイズを定める。\n\tAffineレイヤと同じように計算出来るように入力データをim2colを用いて配列化する。\n\t重みもreshape( ,-1)とすることによって配列化を行なう。\n\t入力データ配列と重み配列のドット積を求め、バイアスを加算する。\n\t計算結果をtransposeを用いて整形し、返却する。\n\n### 7.4.4 Poolingレイヤの実装\n\nPoolingレイヤも同じくim2colを使って入力データを展開するが、チャンネル方向には独立である点が異なる。",
"_____no_output_____"
],
[
"## 7.5 CNNの実装\n\nネットワークの構成は「Convolution-ReLU-Pooling-Affine-ReLU-Affine-Softmax」とする。\n\n## 7.6 CNNの可視化\n\n### 7.6.1 1層目の重みの可視化\n\n今まで行ったMNISTのCNNにおける学習では1層目の重みの形状は(30,1,5,5)(サイズが5×5、チャンネルが1のフィルターが30個)である。フィルターは1チャンネルのグレー画像として可視化出来ると言うことを意味する。\n(サンプルコードch07/visualize_filter.py)\n\n学習前のフィルターはランダムに初期化されているため白黒の濃淡に規則性は無いが、学習後は規則性のある画像になっている。白から黒へグラデーションを伴って変化するフィルターや塊のある領域(「ブロブ(blob)」)を持つフィルターなど学習によって更新されていることが分かる。\n\n規則性のあるフィルターは何を見てるのかというと、エッジやブロブなどを見ている。畳み込み層のフィルターはエッジやブロブなどのプリミティブな情報を抽出することが分かる。\n\n\n### 7.6.2 階層構造による情報抽出\n\n1層目の畳み込み層ではエッジやブロブなどの低レベルな情報が抽出される。何層も重ねたCNNにおいて各層でどのような情報が抽出されるのかというと、層が深くなるに従って抽出される情報(強く反応するニューロン)はより抽象化されていく。\n\n一般物体認識(車や犬など)を行なう8層のCNNであるAlexNetは畳み込み層とプーリング層が何層も重なり、最後に全結合層を用いて結果が出力される。最初の層は単純なエッジに反応し、続いてテクスチャ、より複雑な物体のパーツへと反応するように変化している。\n",
"_____no_output_____"
],
[
"## 7.7 代表的なCNN\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f250334ba17ebb56fe2988b512eb00a3b4d634 | 1,236 | ipynb | Jupyter Notebook | notebooks/0.DownloadLeafClassificationDataset.ipynb | ThibLang/ITF712-Classification-Project | 5086607c02c22bb13abc3cd1ef4e058ea5e01762 | [
"MIT"
] | null | null | null | notebooks/0.DownloadLeafClassificationDataset.ipynb | ThibLang/ITF712-Classification-Project | 5086607c02c22bb13abc3cd1ef4e058ea5e01762 | [
"MIT"
] | null | null | null | notebooks/0.DownloadLeafClassificationDataset.ipynb | ThibLang/ITF712-Classification-Project | 5086607c02c22bb13abc3cd1ef4e058ea5e01762 | [
"MIT"
] | null | null | null | 22.071429 | 124 | 0.565534 | [
[
[
"import os\nfrom pathlib import Path\n\nfrom src.data.make_dataset import download_dataset",
"_____no_output_____"
],
[
"# Make sure the authentication token from kaggle is valid. See https://www.kaggle.com/docs/api for more information\n\n# Original competition:\n# https://www.kaggle.com/c/leaf-classification\n\n# Download the dataset\nroot_dir = Path(os.path.abspath('')).resolve().parents[0]\ndownload_dataset(root_dir, \"leaf-classification\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7f267634540753c6a1f22750f1faba330f73628 | 10,387 | ipynb | Jupyter Notebook | Netflix Recommended Movies/DSC 630 Final Code.ipynb | Lemonchasers/Lemonchasers.github.io | 9d0e9ba5e832da52ab8e82a86c65f9e3a0ad015b | [
"Unlicense"
] | null | null | null | Netflix Recommended Movies/DSC 630 Final Code.ipynb | Lemonchasers/Lemonchasers.github.io | 9d0e9ba5e832da52ab8e82a86c65f9e3a0ad015b | [
"Unlicense"
] | null | null | null | Netflix Recommended Movies/DSC 630 Final Code.ipynb | Lemonchasers/Lemonchasers.github.io | 9d0e9ba5e832da52ab8e82a86c65f9e3a0ad015b | [
"Unlicense"
] | null | null | null | 34.508306 | 135 | 0.570232 | [
[
[
"Final Code",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.sparse import csr_matrix\nfrom sklearn import metrics\nfrom sklearn.preprocessing import StandardScaler\n\n'''\nThis cell reads in the data needed for the model. The two files needed are the combined data files with the \ncustomer ratings and the movie titles files in order for the model to print out recommended movies.\n\nThen once the data is read in, we put it in a pandas dataframe for it to be easier to work with.\n'''\n\nmov_titles = pd.read_csv('movie_titles.csv', header = None, encoding = \"ISO-8859-1\")\n\nmov_titles = mov_titles.drop(columns=[1,3,4,5], axis = 1)\nmov_titles = mov_titles.rename(columns = {0:'movie_id',2:'movie_title'})\n\n \nlist_1 = []\nwith open('combined_data_1.txt', 'r') as f:\n for line in f:\n splitLine = line.split('/t')\n for item in splitLine:\n list_1.append(splitLine)\n \n# Then we needed a way to loop through and associate movie id with each record. So we append it back to the list_1\nfor x in list_1:\n for i in x:\n if ':' in i:\n a = len(i)\n y2 = i[0:a]\n y = y2.replace(\":\", \"\")\n x.append(y)\n\n\n# In this section we want to take everything in list_1 and split out the customer id, rating, and date better.\nkeys = ['customer_id','customer_rating','date','movie_id']\nnewList=[]\n\nfor x in list_1:\n movie_id = x[1]\n y = x[0]\n d = y.split(',')\n d.append(movie_id)\n newList.append(d)\n\n# Now that we have the structure by customer, how they rated the movie and all that jazz. \n# We need to get rid of the values in the list that are just the movie numbers.\nvalues = []\nfor x in newList:\n if len(x)==4:\n values.append(x)\n \n# Finally we can put it into a dataframe and start looking at our data. \ndf = pd.DataFrame(values, columns=keys)\ndf = df.replace('\\n','', regex=True)\ndf['date'] = df['date'].astype('datetime64[ns]')\ndf['customer_rating'] = df['customer_rating'].astype('float')",
"_____no_output_____"
],
[
"'''\nIn this cell, we do a left join of the ratings file and the movie titles file to replace movie id with the title of the movie.\nwe will use the df3 dataframe later in the model to output movie titles.\n'''\n\ndf_3 = df.join(mov_titles, lsuffix='movie_id', rsuffix='movie_id')\ndf_3 = df_3.drop(columns=['movie_idmovie_id'], axis = 1)",
"_____no_output_____"
],
[
"'''\nThis section of code is to create functions to run our code. The PreProcess function takes a given customer id. Then it \nfilters our dataset for the movies that customer rated. Then we get a list of just those movies and apply it back to \nthe overall dataset. This way when we run a our model, the nearest neighbors aren't the ones with many 0's for ratings.\nFrom the PreProcessing function we receive a matrix to use with filtered values necessary for modeling.\n\nThe matrix_prep function takes the processed matrix and groups it so that we get a nxm matrix where n are the customers\nand m are the movies they rated. If there is a movie a customer has not rated it gets a 0. The output is a sparse matrix \nwith these results.\n\nFinally, the Recommendation function takes the sparse matrix from the matrix_prep function, the customer id, \nand how many neighbors you want your model to have. The model is a nearestneighbor model that caluclates the \ncosine similarity between the provided customer and the other customers that rated the at least one of the\nmovies that the customer rated. \n\nThen we loop through the customers pulling out the similar customers and put this in a list. We then use this \nlist to go back and filter for these customers movies that they rated a 4 or 5. Then we grab this list of movies\nand this is the list returned.\n'''\n\ndef PreProcess(customer_id):\n \n query_index = str(customer_id) #np.random.choice(ddf_3.shape[0])\n \n customer = df[df['customer_id'] == query_index]\n\n customer_movies = customer.loc[:, (customer != 0).any(axis = 0)]\n\n movies_to_include = customer_movies['movie_id'].tolist()\n\n mask = df['movie_id'].isin(movies_to_include)\n movies_matrix_for_sim = df.loc[~mask]\n \n movies_matrix_for_sim = movies_matrix_for_sim.append(customer_movies, ignore_index=True)\n \n return movies_matrix_for_sim\n\ndef matrix_prep(movies_matrix_for_sim):\n \n ddf_2 = movies_matrix_for_sim.groupby(['customer_id', 'movie_id']).customer_rating.mean().unstack(fill_value=0)\n \n \n mat_features = csr_matrix(ddf_2.values)\n \n return mat_features\n\n \ndef Recommendation(mat_features, customer_id, n_neighbors):\n \n query_index = str(customer_id)\n\n model_knn = NearestNeighbors(metric='cosine', algorithm='brute')\n model_knn = model_knn.fit(mat_features)\n\n distances, indices = model_knn.kneighbors(ddf_2.loc[[query_index]], n_neighbors = n_neighbors)\n\n sim_customers_key = []\n sim_customers_vals = []\n for i in range(0, len(distances.flatten())):\n if i == 0: \n #key = ddf_2.index[customer_id]\n #sim_customers_key.append(key)\n pass\n else: \n val = ddf_2.index[indices.flatten()[i]]\n sim_customers_vals.append(val)\n \n mask = df_3['customer_id'].isin(sim_customers_vals)\n sim_customers = df_3.loc[~mask]\n \n #need orig customer to have filtered df_3 table\n orig_customer = df_3[df_3['customer_id'] == query_index]\n #mask = df_3['customer_id'].isin(sim_customers_key)\n #orig_customer = df_3.loc[~mask]\n \n mask = sim_customers['customer_rating'].isin([4,5])\n sim_customers = sim_customers.loc[~mask]\n \n orig_movies = orig_customer['movie_title'].values\n sim_movies = sim_customers['movie_title'].values\n \n rec_list = [i for i in sim_movies if i not in orig_movies]\n \n return rec_list\n ",
"_____no_output_____"
],
[
"'''\nThis is implementing the PreProcess function for customer 1488844.\n'''\n\nmatrix_1 = PreProcess(1488844)\n",
"_____no_output_____"
],
[
"'''\nDue to memory issues I could not run matrix_prep with the two function in it. Thus I ran them separately.\nThis is the first part of the matrix_prep function.\n'''\n\nddf_2 = matrix_1.groupby(['customer_id', 'movie_id']).customer_rating.mean().unstack(fill_value=0)",
"_____no_output_____"
],
[
"'''\nDue to memory issues I could not run matrix_prep with the two function in it. Thus I ran them separately.\nThis is the second part of the matrix_prep function.\n'''\n\nmat_features = csr_matrix(ddf_2.values)",
"_____no_output_____"
],
[
"'''\nThis is the final function running the model and saving the results for customer 1488844 with 3 neighbors.\n'''\n\nrecommended_for_1488844 = Recommendation(mat_features,1488844, 3)",
"_____no_output_____"
],
[
"'''\nThis is the firt 10 recommended movies for customer 1488844.\n'''\n\nrecommended_for_1488844[0:10]",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f267d33a5d7238eda4f1bdcef87811d7af3de7 | 44,034 | ipynb | Jupyter Notebook | k1lib/_mo/atom.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | 1 | 2021-08-11T19:10:08.000Z | 2021-08-11T19:10:08.000Z | k1lib/_mo/atom.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | k1lib/_mo/atom.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | 51.261932 | 219 | 0.624404 | [
[
[
"#export\n\"\"\"This module is for all things related to atoms, molecules and their simulations\"\"\"\nimport k1lib\nfrom typing import Dict, List\nsettings = k1lib.Settings().add(\"overOctet\", False, \"whether to allow making bonds that exceeds the octet rule\")\nk1lib.settings.add(\"mo\", settings, \"from k1lib.mo module\")\n__all__ = [\"Atom\", \"substances\", \"NoFreeElectrons\", \"OctetFull\"]",
"_____no_output_____"
],
[
"#export\nclass NoFreeElectrons(RuntimeError): pass\nclass OctetFull(RuntimeError): pass",
"_____no_output_____"
],
[
"#export\n# if Atom's gDepth is smaller than this, then it means that it has not been visited\n_depthAuto = k1lib.AutoIncrement()\n_idxAuto = k1lib.AutoIncrement()\nclass Atom:\n \"\"\"Just an atom really. Has properties, can bond to other atoms, and can\ngenerate a :class:`System` for simulation.\"\"\"\n\n def __init__(self, name:str, atomicN:int, massN:float, valenceE:int, radius:List[float]=[], octetE:int=8):\n \"\"\"Creates a new atom. Not intended to be used by the end user. If you\nwish to get a new atom, just do stuff like this::\n\n c1 = mo.C\n c2 = mo.C\n c1 == c2 # returns False, demonstrating that these are different atoms\n\nIf you wish to register new substances with the module, you can do this::\n\n genF = lambda: Atom(...)\n mo.registerSubstance(\"elementName\", genF)\n mo.elementName # should executes `genF` and returns\n\n:param name: element name (eg. \"C\")\n:param atomicN: atomic number (eg. 6)\n:param massN: atomic mass in g/mol (eg. 12)\n:param valenceE: how many valence electrons initially?\n:param radius: covalent radiuses (in pm) for single, double and triple bonds\n:param octetE: how many electrons in a full octet? Default 8, but can be 2 for H and He\"\"\"\n self.name = name; self.atomicN = atomicN; self.massN = massN\n self.ogValenceE = valenceE # original\n self.valenceE = valenceE; self.octetE = octetE; self.radius = radius\n self._bonds = [] # list of Atoms this Atom is bonded to\n self.gDepth = -1 # graph depth, for graph traversal stuff. Values will be updated from _depthAuto\n self.idx = f\"A{_idxAuto()}\" # unique value for Atoms everywhere\n # contracts:\n # - valenceE = eClouds * 2 + freeE + len(bonds) * 2\n # - valenceE <= octetE. \"<\" happens when octet not full\n # can only form a new bond if freeE >= 1. Can dec eClouds to inc freeE\n if name != \"_e\":\n self.eClouds = []; self.freeE = valenceE % 2\n for i in range(valenceE//2): self.eClouds.append(mo._e)\n else: self.eClouds = []; self.freeE = 0\n @property\n def bonds(self):\n \"\"\"List of Atoms bonded to this Atom\"\"\"\n return self._bonds\n @bonds.setter\n def bonds(self, v): self._bonds = v\n @property\n def nonHBonds(self) -> List[\"Atom\"]:\n \"\"\"All atoms this atom is bonded to, minus the Hydrogens.\"\"\"\n return [a for a in self.bonds if a.name != \"H\"]\n @property\n def HBonds(self) -> List[\"Atom\"]:\n \"\"\"All hydrogens this atom is bonded to.\"\"\"\n return [a for a in self.bonds if a.name == \"H\"]\n @property\n def uniqueBonds(self) -> List[\"Atom\"]:\n \"\"\"All unique bonds. Meaning, if there's a double bond, only return 1\natom, not 2.\"\"\"\n return list(set(self.bonds))\n @property\n def uniqueNonHBonds(self) -> List[\"Atom\"]:\n \"\"\"All unique non Hydrogen bonds.\"\"\"\n return list(set(self.nonHBonds))\n def nBonds(self, atom:\"Atom\"):\n \"\"\"Get number of bonds between this and another atom.\"\"\"\n return len([bond for bond in self.bonds if bond == atom])\n @property\n def availableBonds(self) -> int:\n \"\"\"Available bonds. This includes electron clouds, radical electrons, and\nHydrogen bonds.\"\"\"\n return len(self.eClouds) * 2 + self.freeE + len([a for a in self.bonds if a.name == \"H\"])\n def __repr__(self):\n return f\"\"\"<Atom {self.name} ({self.atomicN}), {len(self.bonds)} bonds, {self.valenceE}/{self.octetE} valence electrons, {len(self.eClouds)} electron clouds, {self.freeE} free (radical) electrons>\"\"\"",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef _show(self, g=None, gDepth=-1, H:bool=True, GVKwargs={}):\n self.gDepth = gDepth\n if not H:\n nH = len(self.HBonds); nH = \"\" if nH==0 else (\"H\" if nH == 1 else f\"H{nH}\")\n g.node(self.idx, f\"{self.name}{nH}\", **GVKwargs)\n else: g.node(self.idx, self.name, **GVKwargs)\n for atom in self.bonds:\n if atom.gDepth >= gDepth or (not H and atom.name == \"H\"): continue\n # all this complexity just to determine arrow direction\n d1 = (self.nonHBonds[0] == atom) if len(self.nonHBonds) > 0 else False\n d2 = (atom.nonHBonds[0] == self) if len(atom.nonHBonds) > 0 else False\n if d1 and d2: g(self.idx, atom.idx, dir=\"both\")\n elif d1: g(self.idx, atom.idx)\n elif d2: g(atom.idx, self.idx)\n else: g(self.idx, atom.idx, arrowhead=\"none\")\n if H: [atom._show(g, gDepth, H) for atom in self.bonds if atom.gDepth < gDepth]\n else: [atom._show(g, gDepth, H) for atom in self.nonHBonds if atom.gDepth < gDepth]\[email protected](Atom)\ndef show(self, H:bool=True):\n \"\"\"Show the molecule graph this atom is a part of. Meant for debugging\nsimple substances only, as graphs of big molecules look unwieldy. This also\nhighlights the current :class:`Atom`, and each bond is an arrow, indicating\nwhere :meth:`next` will go next.\n\n:param H: whether to display hydrogens as separate atoms, or bunched into the main atom\"\"\"\n g = k1lib.digraph(); self._show(g, _depthAuto(), H, {\"style\": \"filled\"}); return g",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef _addFreeE(self, amt:int=1):\n \"\"\"Adds free electron to atom.\"\"\"\n if amt > 1: [self._addFreeE() for i in range(amt)]\n self.freeE += 1\n if self.freeE >= 2: self.eClouds.append(mo._e); self.freeE -= 2\[email protected](Atom)\ndef _subFreeE(self, amt:int=1) -> bool:\n \"\"\"Tries to use ``amt`` free electrons. Returns successful or not.\"\"\"\n if amt > 1: [self._subFreeE() for i in range(amt)]\n elif self.freeE > 0: self.freeE -= 1\n elif len(self.eClouds) > 0:\n self.freeE += 1; self.eClouds.pop()\n else: raise RuntimeError(f\"Can't give away any more free electrons on atom {self.name}!\")",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef _makeRoom(self, nBonds:int):\n \"\"\"Tries to remove bonds with Hydrogen to make room for ``nBonds`` more bonds.\"\"\"\n nBondsToRemove = self.valenceE + nBonds - self.octetE\n if nBondsToRemove > 0:\n Hs = [bond for bond in self.bonds if bond.name == \"H\"]\n if len(Hs) >= nBondsToRemove:\n for i in range(nBondsToRemove): self.removeBond(Hs[i])\n elif not settings.overOctet:\n ans = input(f\"Can't remove Hydrogen bonds to make room for new bond! Do you want to do anyway (y/n): \")\n print(\"Btw, you can auto accept this by doing `settings.mo.overOctet = True`\")\n if ans.lower()[0] != \"y\": raise OctetFull(\"Stopping...\")\n availableE = len(self.eClouds) * 2 + self.freeE\n if availableE < nBonds: raise NoFreeElectrons(f\"Can't make room for {nBonds} new bonds on {self.name}. Only {availableE} electrons left for bonds!\")",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef __call__(self, atom:Atom, nBonds:int=1, main=False) -> Atom:\n \"\"\"Forms a bond with another atom. If valence electrons are full, will\nattempt to disconnect Hydrogens from self to make room.\n\n:param bond: number of bonds. 2 for double, 3 for triple\n:param main: whether to put this bond in front of existing bonds, to\n signify the \"main\" chain, so that it works well with :meth:`next`\n:return: self\"\"\"\n self._makeRoom(nBonds); atom._makeRoom(nBonds)\n if main: self.bonds = [atom] * nBonds + self.bonds\n else: self.bonds += [atom] * nBonds\n atom.bonds += [self] * nBonds\n self.valenceE += nBonds; self._subFreeE(nBonds)\n atom.valenceE += nBonds; atom._subFreeE(nBonds)\n return self\[email protected](Atom)\ndef bond(self, atom:Atom, nBonds:int=1, main=False) -> Atom:\n \"\"\"Like :meth:`__call__`, but returns the atom passed in instead, so you\ncan form the main loop quickly.\"\"\"\n self(atom, nBonds, main); return atom\[email protected](Atom)\ndef main(self, atom:Atom, nBonds:int=1) -> Atom:\n \"\"\"Like :meth:`bond`, but with ``main`` param defaulted to True.\"\"\"\n return self.bond(atom, nBonds, True)",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef removeBond(self, atom:\"Atom\"):\n \"\"\"Removes all bonds between this and another atom\"\"\"\n nBonds = self.nBonds(atom)\n self.bonds = [bond for bond in self.bonds if bond != atom]\n self.valenceE -= nBonds; self._addFreeE(nBonds)\n atom.bonds = [bond for bond in atom.bonds if bond != self]\n atom.valenceE -= nBonds; atom._addFreeE(nBonds)",
"_____no_output_____"
],
[
"#export\[email protected](Atom, \"next\")\ndef _next(self, offset=0, times:int=1) -> \"Atom\":\n \"\"\"Returns the next atom bonded to this. Tries to avoid going into Hydrogens.\nThis is the main way to navigate around the molecule.\n\nYou kinda have to make sure that your molecule's bonding order is appropriate by\nchoosing between :meth:`bond` and :meth:`main`. Check the bonding order with\n:meth:`show`.\n\n:param offset: if there are multiple non-Hydrogen atoms, which ones should I pick?\n:param times: how many times do you want to chain ``.next()``?\"\"\"\n if times < 0: raise RuntimeError(\"Can't do .next() with negative `times`\")\n if times == 0: return self\n atoms = self.nonHBonds + self.HBonds\n if len(atoms) == 0: return None\n _next = atoms[offset]\n if times == 1: return _next\n else: return _next.next(offset, times-1)",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef nexts(self, atoms:int=2) -> List[Atom]:\n \"\"\"Kinda like :meth:`next`, but fetches multiple atoms on the backbone.\nExample::\n\n c1, c2 = mo.CH4(mo.CH4).nexts()\"\"\"\n if atoms < 1: raise RuntimeError(f\"Zero or negative ({atoms}) number of atoms does not make sense!\")\n if atoms == 1: return [self]\n return [self, *(self.next().nexts(atoms-1))]",
"_____no_output_____"
],
[
"#export\nempiricalOrder = [\"C\", \"H\", \"O\", \"N\"]\ndef em1(e:str, n:int):\n if n == 1: return e\n else: return f\"{e}{n}\"\[email protected](Atom)\ndef _empirical(self, d:Dict[str, int], gDepth:int):\n if self.gDepth >= gDepth: return\n self.gDepth = gDepth; d[self.name] += 1\n for atom in self.bonds: atom._empirical(d, gDepth)\[email protected](Atom)\ndef empirical(self) -> str:\n \"\"\"Returns an empirical formula for the molecule this :class:`Atom` is attached to.\"\"\"\n d = k1lib.Object().withAutoDeclare(lambda: 0)\n self._empirical(d, _depthAuto()); answer = \"\"\n for e in empiricalOrder:\n if e in d: answer += em1(e,d[e]); del d[e]\n for e in d.state.keys(): answer += em1(e,d[e])\n return answer",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef _atoms(self, l, gDepth):\n if self.gDepth >= gDepth: return\n self.gDepth = gDepth; l.append(self)\n for atom in self.bonds: atom._atoms(l, gDepth)\[email protected](Atom)\ndef atoms(self) -> List[Atom]:\n \"\"\"Returns a list of Atoms in the molecule this specific Atom is attached to.\"\"\"\n l = []; self._atoms(l, _depthAuto()); return l",
"_____no_output_____"
],
[
"#export\[email protected](Atom, \"endChain\")\n@property\ndef endChain(a) -> Atom:\n \"\"\"Do a bunch of .next() until reached the end of the carbon chain.\nExample::\n\n c1 = mo.alcohol(3, 1)\n c3 = c1.endChain\n c3(mo.NH3)\n c1.show() # displays in cell\"\"\"\n lastA = None\n for i in range(200): # for loop to prevent infinite recursion\n nextA = a.next()\n if nextA == lastA: return a\n lastA = a; a = nextA",
"_____no_output_____"
],
[
"#export\[email protected](Atom)\ndef moveLastCTo2ndC(a:Atom) -> Atom:\n \"\"\"Move last carbon to 2nd carbon. Useful in constructing iso- and tert-.\"\"\"\n end = a.endChain; nearEnd = end.next()\n end.removeBond(nearEnd); nearEnd(mo.H); a.next()(mo.CH4); return a",
"_____no_output_____"
],
[
"#export\n_a = {} # dict of atoms, which will be used to patch the entire module\nclass _Mo:\n def __init__(self): self._MoWrap_dirs = []\n def registerSubstance(self, name:str, _f):\n setattr(_Mo, name, property(lambda self: _f()))\n self._MoWrap_dirs.append(name)\n def __dir__(self):\n return super().__dir__() + self._MoWrap_dirs\n pass\nmo = _Mo() # internal convenience object so that I can use the same style as the module\ndef _atom(name, *args, **kwargs):\n _a[name] = f = lambda: Atom(name, *args, **kwargs)\n mo.registerSubstance(name, f)",
"_____no_output_____"
],
[
"#export\ndef substances() -> List[str]:\n \"\"\"Get a list of builtin substances. To register new substances, check over\n:class:`Atom`.\"\"\"\n return [k for k in _a.keys() if not k.startswith(\"_\")]",
"_____no_output_____"
],
[
"#export\n# covalent radius taken from (Pyykko & Atsumi) https://chem.libretexts.org/@api/deki/pages/2182/pdf/A3%253A%2bCovalent%2bRadii.pdf?stylesheet=default\n_atom(\"_e\", 0, 0.1, 0, [25]) # electron cloud, for internal use\n_atom(\"H\", 1, 1.008, 1, [32], octetE=2)\n_atom(\"Li\", 3, 6.94, 1, [133, 124])\n_atom(\"Be\", 4, 9.0122, 2, [102, 90, 85])\n_atom(\"B\", 5, 10.81, 3, [85, 78, 73])\n_atom(\"C\", 6, 12.011, 4, [75, 67, 60])\n_atom(\"N\", 7, 14.007, 5, [71, 60, 54])\n_atom(\"O\", 8, 15.999, 6, [63, 57, 53])\n_atom(\"F\", 9, 18.998, 7, [64, 59, 53])",
"_____no_output_____"
],
[
"#export\n_atom(\"Na\", 11, 22.990, 1, [155, 160])\n_atom(\"Mg\", 12, 24.305, 2, [139, 132, 127])\n_atom(\"Al\", 13, 26.982, 3, [126, 113, 111])\n_atom(\"Si\", 14, 28.085, 4, [116, 107, 102])\n_atom(\"P\", 15, 30.974, 5, [111, 102, 94])\n_atom(\"S\", 16, 32.06, 6, [103, 94, 95])\n_atom(\"Cl\", 17, 35.45, 7, [99, 95, 93])",
"_____no_output_____"
],
[
"#export\n_atom(\"K\", 19, 39.098, 1, [196, 193])\n_atom(\"Ca\", 20, 40.078, 2, [171, 147, 133])\n_atom(\"Ga\", 31, 69.723, 3, [124, 117, 121])\n_atom(\"Ge\", 32, 72.630, 4, [121, 111, 114])\n_atom(\"As\", 33, 74.922, 5, [121, 114, 106])\n_atom(\"Se\", 34, 78.971, 6, [116, 107, 107])\n_atom(\"Br\", 35, 79.904, 7, [114, 109, 110])\n_atom(\"I\", 53, 126.9, 7, [133, 129, 125])",
"_____no_output_____"
],
[
"!../../export.py _mo/atom",
"Current dir: /home/kelvin/repos/labs/k1lib, ../../export.py\nrm: cannot remove '__pycache__': No such file or directory\nFound existing installation: k1lib 0.11\nUninstalling k1lib-0.11:\n Successfully uninstalled k1lib-0.11\nrunning install\nrunning bdist_egg\nrunning egg_info\ncreating k1lib.egg-info\nwriting k1lib.egg-info/PKG-INFO\nwriting dependency_links to k1lib.egg-info/dependency_links.txt\nwriting requirements to k1lib.egg-info/requires.txt\nwriting top-level names to k1lib.egg-info/top_level.txt\nwriting manifest file 'k1lib.egg-info/SOURCES.txt'\nreading manifest file 'k1lib.egg-info/SOURCES.txt'\nadding license file 'LICENSE'\nwriting manifest file 'k1lib.egg-info/SOURCES.txt'\ninstalling library code to build/bdist.linux-x86_64/egg\nrunning install_lib\nrunning build_py\ncreating build\ncreating build/lib\ncreating build/lib/k1lib\ncopying k1lib/_learner.py -> build/lib/k1lib\ncopying k1lib/fmt.py -> build/lib/k1lib\ncopying k1lib/_context.py -> build/lib/k1lib\ncopying k1lib/selector.py -> build/lib/k1lib\ncopying k1lib/imports.py -> build/lib/k1lib\ncopying k1lib/_baseClasses.py -> build/lib/k1lib\ncopying k1lib/_basics.py -> build/lib/k1lib\ncopying k1lib/viz.py -> build/lib/k1lib\ncopying k1lib/_higher.py -> build/lib/k1lib\ncopying k1lib/__init__.py -> build/lib/k1lib\ncopying k1lib/_monkey.py -> build/lib/k1lib\ncopying k1lib/knn.py -> build/lib/k1lib\ncopying k1lib/graphEqn.py -> build/lib/k1lib\ncopying k1lib/schedule.py -> build/lib/k1lib\ncopying k1lib/_perlin.py -> build/lib/k1lib\ncopying k1lib/kdata.py -> build/lib/k1lib\ncopying k1lib/eqn.py -> build/lib/k1lib\ncreating build/lib/k1lib/_hidden\ncopying k1lib/_hidden/hiddenFile.py -> build/lib/k1lib/_hidden\ncopying k1lib/_hidden/__init__.py -> build/lib/k1lib/_hidden\ncreating build/lib/k1lib/cli\ncopying k1lib/cli/bio.py -> build/lib/k1lib/cli\ncopying k1lib/cli/structural.py -> build/lib/k1lib/cli\ncopying k1lib/cli/modifier.py -> build/lib/k1lib/cli\ncopying k1lib/cli/gb.py -> build/lib/k1lib/cli\ncopying k1lib/cli/output.py -> build/lib/k1lib/cli\ncopying k1lib/cli/kxml.py -> build/lib/k1lib/cli\ncopying k1lib/cli/inp.py -> build/lib/k1lib/cli\ncopying k1lib/cli/mgi.py -> build/lib/k1lib/cli\ncopying k1lib/cli/grep.py -> build/lib/k1lib/cli\ncopying k1lib/cli/sam.py -> build/lib/k1lib/cli\ncopying k1lib/cli/trace.py -> build/lib/k1lib/cli\ncopying k1lib/cli/entrez.py -> build/lib/k1lib/cli\ncopying k1lib/cli/__init__.py -> build/lib/k1lib/cli\ncopying k1lib/cli/filt.py -> build/lib/k1lib/cli\ncopying k1lib/cli/utils.py -> build/lib/k1lib/cli\ncopying k1lib/cli/init.py -> build/lib/k1lib/cli\ncopying k1lib/cli/others.py -> build/lib/k1lib/cli\ncopying k1lib/cli/kcsv.py -> build/lib/k1lib/cli\ncreating build/lib/k1lib/callbacks\ncopying k1lib/callbacks/loss_accuracy.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/progress.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/limits.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/hookParam.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/profiler.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/callbacks.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/paramFinder.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/core.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/__init__.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/landscape.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/confusionMatrix.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/recorder.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/shorts.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/hookModule.py -> build/lib/k1lib/callbacks\ncreating build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/time.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/memory.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/__init__.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/io.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/computation.py -> build/lib/k1lib/callbacks/profilers\ncreating build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/accuracy.py -> build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/__init__.py -> build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/shorts.py -> build/lib/k1lib/callbacks/lossFunctions\ncreating build/lib/k1lib/_mo\ncopying k1lib/_mo/atom.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/parseM.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/substance.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/system.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/__init__.py -> build/lib/k1lib/_mo\ncreating build/bdist.linux-x86_64\ncreating build/bdist.linux-x86_64/egg\ncreating build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_learner.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/fmt.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_context.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/selector.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/imports.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_baseClasses.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_basics.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/bio.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/structural.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/modifier.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/gb.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/output.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/kxml.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/inp.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/mgi.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/grep.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/sam.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/trace.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/entrez.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/filt.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/utils.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/init.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/others.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/kcsv.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/viz.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_higher.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/__init__.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_monkey.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/atom.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/parseM.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/substance.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/system.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/knn.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/graphEqn.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/schedule.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/loss_accuracy.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/progress.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/limits.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/hookParam.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/profiler.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/callbacks.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/paramFinder.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/core.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/time.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/memory.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/io.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/computation.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/landscape.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/confusionMatrix.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/recorder.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/shorts.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/hookModule.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/accuracy.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/shorts.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/_perlin.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/kdata.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/_hidden/hiddenFile.py -> build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/_hidden/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/eqn.py -> build/bdist.linux-x86_64/egg/k1lib\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_learner.py to _learner.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/fmt.py to fmt.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_context.py to _context.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/selector.py to selector.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/imports.py to imports.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_baseClasses.py to _baseClasses.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_basics.py to _basics.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/bio.py to bio.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/structural.py to structural.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/modifier.py to modifier.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/gb.py to gb.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/output.py to output.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/kxml.py to kxml.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/inp.py to inp.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/mgi.py to mgi.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/grep.py to grep.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/sam.py to sam.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/trace.py to trace.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/entrez.py to entrez.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/filt.py to filt.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/utils.py to utils.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/init.py to init.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/others.py to others.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/kcsv.py to kcsv.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/viz.py to viz.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_higher.py to _higher.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_monkey.py to _monkey.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/atom.py to atom.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/parseM.py to parseM.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/substance.py to substance.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/system.py to system.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/knn.py to knn.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/graphEqn.py to graphEqn.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/schedule.py to schedule.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/loss_accuracy.py to loss_accuracy.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/progress.py to progress.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/limits.py to limits.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/hookParam.py to hookParam.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profiler.py to profiler.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/callbacks.py to callbacks.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/paramFinder.py to paramFinder.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/core.py to core.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/time.py to time.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/memory.py to memory.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/io.py to io.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/computation.py to computation.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/landscape.py to landscape.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/confusionMatrix.py to confusionMatrix.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/recorder.py to recorder.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/shorts.py to shorts.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/hookModule.py to hookModule.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/accuracy.py to accuracy.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/shorts.py to shorts.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_perlin.py to _perlin.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/kdata.py to kdata.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_hidden/hiddenFile.py to hiddenFile.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_hidden/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/eqn.py to eqn.cpython-38.pyc\ncreating build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\nzip_safe flag not set; analyzing archive contents...\ncreating dist\ncreating 'dist/k1lib-0.11-py3.8.egg' and adding 'build/bdist.linux-x86_64/egg' to it\nremoving 'build/bdist.linux-x86_64/egg' (and everything under it)\nProcessing k1lib-0.11-py3.8.egg\nCopying k1lib-0.11-py3.8.egg to /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nAdding k1lib 0.11 to easy-install.pth file\n\nInstalled /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages/k1lib-0.11-py3.8.egg\nProcessing dependencies for k1lib==0.11\nSearching for dill==0.3.4\nBest match: dill 0.3.4\nAdding dill 0.3.4 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for matplotlib==3.3.2\nBest match: matplotlib 3.3.2\nAdding matplotlib 3.3.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for numpy==1.19.2\nBest match: numpy 1.19.2\nAdding numpy 1.19.2 to easy-install.pth file\nInstalling f2py script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling f2py3 script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling f2py3.8 script to /home/kelvin/anaconda3/envs/torch/bin\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for torch==1.10.0\nBest match: torch 1.10.0\nAdding torch 1.10.0 to easy-install.pth file\nInstalling convert-caffe2-to-onnx script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling convert-onnx-to-caffe2 script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling torchrun script to /home/kelvin/anaconda3/envs/torch/bin\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for kiwisolver==1.3.2\nBest match: kiwisolver 1.3.2\nAdding kiwisolver 1.3.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for cycler==0.10.0\nBest match: cycler 0.10.0\nAdding cycler 0.10.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for pyparsing==2.4.7\nBest match: pyparsing 2.4.7\nAdding pyparsing 2.4.7 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for python-dateutil==2.8.2\nBest match: python-dateutil 2.8.2\nAdding python-dateutil 2.8.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for certifi==2021.10.8\nBest match: certifi 2021.10.8\nAdding certifi 2021.10.8 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for Pillow==7.2.0\nBest match: Pillow 7.2.0\nAdding Pillow 7.2.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for typing-extensions==3.10.0.2\nBest match: typing-extensions 3.10.0.2\nAdding typing-extensions 3.10.0.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for six==1.16.0\nBest match: six 1.16.0\nAdding six 1.16.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nFinished processing dependencies for k1lib==0.11\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f26aed09c37b4ea5411d76e2740be5afb46251 | 823,210 | ipynb | Jupyter Notebook | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all | e0820a0fd32f852549ebde54425177156fdf40db | [
"Apache-2.0"
] | 1 | 2021-01-28T09:57:42.000Z | 2021-01-28T09:57:42.000Z | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | SizingLab/droneapp-legacy | b7844810107051bc4bb1861ecedc71188d09e881 | [
"Apache-2.0"
] | null | null | null | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | SizingLab/droneapp-legacy | b7844810107051bc4bb1861ecedc71188d09e881 | [
"Apache-2.0"
] | null | null | null | 1,562.068311 | 760,756 | 0.955062 | [
[
[
"## Decision trees example",
"_____no_output_____"
],
[
"Continuous output example: A prediction model that states the motor references. Different decision trees are created according to the required selection criteria.",
"_____no_output_____"
],
[
"**Step 1**: Import the required libraries.",
"_____no_output_____"
]
],
[
[
"# import numpy package for arrays and stuff \nimport numpy as np \n\n# import matplotlib.pyplot for plotting our result \nimport matplotlib.pyplot as plt \n\n# import pandas for importing csv files \nimport pandas as pd \n",
"_____no_output_____"
]
],
[
[
"Import the file `predicted_values_Dt.py` containing the decision trees algorithms",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.insert(0, 'decision trees')\n\nfrom predicted_values_DT import *",
"_____no_output_____"
]
],
[
[
"Read the dataframe for references of motors:",
"_____no_output_____"
]
],
[
[
"# import dataset \n# dataset = pd.read_csv('Data.csv') \n# alternatively open up .csv file to read data \n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\npath='./Motors/'\ndf = pd.read_csv(path+'Non-Dominated-Motors.csv', sep=';')\ndf = df[['Tnom_Nm','Kt_Nm_A','r_omn','weight_g']] # we select the first five rows\n\ndf.head()\n",
"_____no_output_____"
]
],
[
[
"Calculated values: Example. Code: Tnom=2.2 Nm,Kt=?, R=?\nCriteria:\n- Torque: select the next ref.\n- Kt: select the nearest ref.\n- Resistance: select the nearest ref.",
"_____no_output_____"
],
[
"**1D decision tree**",
"_____no_output_____"
],
[
"Torque: Once the value of the torque in the optimization code is calculated, we will create a 1D decision tree that selects the higher value.",
"_____no_output_____"
]
],
[
[
"df_X=pd.DataFrame(df.iloc[:,0]) # column torque\ndf_y=df.iloc[:,0] # column of torque\n\ndf_X=pd.DataFrame(df_X)\nxy = pd.concat([df_X,df_y],axis=1)\nsorted_xy = np.unique(xy,axis=0)\n\n#axis X\nframes=[]\nfor i in range(len(df_X.columns)):\n # a vector of supplementary points around the reference value to force the regression tree through X\n C=(np.vstack((sorted_xy[:,i]-sorted_xy[:,i].min()/1000,sorted_xy[:,i]+sorted_xy[:,i].min()/1000)).ravel('F'))\n D=np.repeat(C, 2)\n frames.append(D[:-2])\ndf_X_Next=np.column_stack(frames)\n\n#axis y \n\ndf_y1 = sorted_xy[:,-1]\ndf_y1_C1 = df_y1-df_y1.min()/100\ndf_y1_C2 = df_y1+df_y1.min()/100\nA=np.repeat(df_y1_C1, 2)\nB=np.repeat(df_y1_C2, 2)\nC=(np.vstack((A,B)).ravel('F'))\nC=(np.delete(np.delete(C,2),2))\ndf_y_Next=(C)\n\n\n\n# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)\nregressorNext = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,\n max_leaf_nodes=len(df_X_Next), min_impurity_decrease=0.0,\n min_impurity_split=None, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n random_state=None, splitter='best')\n\n# fit the regressor with X and Y data \nregressorNext.fit(df_X_Next, df_y_Next) \n\n\n# arange for creating a range of values \n# from min value of X to max value of X \n# with a difference of 0.01 between two \n# consecutive values \nX_grid = np.linspace(min(df_X_Next), max(df_X_Next), num=10000) \n\n# reshape for reshaping the data into \n# a len(X_grid)*1 array, i.e. to make \n# a column out of the X_grid values \nX_grid = X_grid.reshape((len(X_grid), 1)) \n\n# scatter plot for original data \n\nplt.scatter(df_X_Next, df_y_Next, color = 'red',label='supplementary points') \nplt.scatter(df_X ,df_y, marker='x',label='references',color='black')\n\n# specify title \nplt.title('Non dominated references torque to torque (Decision Tree Regression)') \n\n# plot predicted data \nplt.plot(X_grid, regressorNext.predict(X_grid), color = 'green', label='decision tree') \n\n# specify y axis label \nplt.ylabel('Torque [Nm]')\nplt.xlabel('Torque [Nm]')\n\n# show the plot \nplt.grid()\nplt.legend()\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"If the calculated value was 2.2 Nm, the predicted one is:",
"_____no_output_____"
]
],
[
[
"regressorNext.predict(np.array([[2.2]]))",
"_____no_output_____"
]
],
[
[
"**2D decision tree**",
"_____no_output_____"
],
[
"With this new predicted value of torque, we will estimate the best Kt constant of the catalogue.\nFor that, we construct a decision tree centered on the reference, which takes as input the torque and as output, the Kt constant:",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeRegressor\ndf_X=pd.DataFrame(df.iloc[:,0])\ndf_y=df.iloc[:,1]\n\n# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)\nregressorAver = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,\n max_leaf_nodes=len(df_X), min_impurity_decrease=0.0,\n min_impurity_split=None, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n random_state=None, splitter='best')\n\n# fit the regressor with X and Y data \nregressorAver.fit(df_X , df_y) \n\n# arange for creating a range of values \n# from min value of X to max value of X \n# with a difference of 0.0001 between two \n# consecutive values \nX_grid = np.linspace(min(df_X.values), max(df_X.values), num=10000) \n\n# reshape for reshaping the data into \n# a len(X_grid)*1 array, i.e. to make \n# a column out of the X_grid values \nX_grid = X_grid.reshape((len(X_grid), 1)) \n\n# scatter plot for original data \nplt.scatter(df_X, df_y, color = 'red', label='references') \n# plt.scatter(df_X, df_y, color = 'red') \n# plt.xlim(df_X.min(), df_X.max())\n\n# plot predicted data \nplt.plot(X_grid, regressorAver.predict(X_grid), color = 'black', label='decision tree based on average val.') \n\n# specify title \nplt.title('Non dominated references Kt to torque (Decision Tree Regression)') \n\n# specify labels \nplt.xlabel('Torque [Nm]')\nplt.ylabel('Kt [Nm/A]')\n\n# plot the legend\nplt.legend()\n# show the plot \nplt.grid()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Estimated value: (Tnom=3.2003048 Nm/A), the nearest Kt in the dataframe is:",
"_____no_output_____"
]
],
[
[
"# average_DT(df.iloc[:,0:2],df.iloc[:,2],np.array([[]]))\nregressorAver.predict(np.array([[3.2003048]]))",
"_____no_output_____"
]
],
[
[
"**3D Decision Tree**",
"_____no_output_____"
],
[
"In the file `predicted_values_DT.py` we have developed different algorithms which construct decision trees based on the previous reference (previous_DT), on the next references (next_DT) or centered on the reference (average_DT). Considering we have previously obtained the values of Kt and Tnom, a prediction of the resistance value can be deduced from the decision tree:",
"_____no_output_____"
]
],
[
[
"average_DT(df[['Tnom_Nm','Kt_Nm_A']],df['r_omn'],np.array([[3.2003048,0.05161782]]))",
"_____no_output_____"
]
],
[
[
"**Visualizing 3D decision tree in scikit-learn**",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image \nfrom sklearn.externals.six import StringIO \nimport pydot \nfrom sklearn import tree\n\ndf_X=df[['Tnom_Nm','Kt_Nm_A']]\ndf_y=df['r_omn']\n\n# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)\nregressorAver = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,\n max_leaf_nodes=len(df_X), min_impurity_decrease=0.0,\n min_impurity_split=None, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n random_state=None, splitter='best')\n\n# fit the regressor with X and Y data \nregressorAver.fit(df_X, df_y) \n\ndot_data = StringIO() \ntree.export_graphviz(regressorAver, out_file=dot_data, feature_names=['Torque','Kt'],\n filled=True, rounded=True,\n special_characters=True)\ngraph = pydot.graph_from_dot_data(dot_data.getvalue()) \nImage(graph[0].create_png()) ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f26dd38e73a2ddacbb44ccc511a7198051e1c0 | 157,174 | ipynb | Jupyter Notebook | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST | dd3439461ddae4f875c39e6a1127b66fb4e2ce96 | [
"Apache-2.0"
] | 4 | 2017-02-21T14:56:07.000Z | 2020-04-07T15:16:04.000Z | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST | dd3439461ddae4f875c39e6a1127b66fb4e2ce96 | [
"Apache-2.0"
] | null | null | null | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST | dd3439461ddae4f875c39e6a1127b66fb4e2ce96 | [
"Apache-2.0"
] | 2 | 2017-05-31T03:26:17.000Z | 2019-01-09T05:42:47.000Z | 133.651361 | 41,368 | 0.849218 | [
[
[
"## This is a sketch for Adversarial images in MNIST",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)",
"Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\nExtracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\nExtracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n"
],
[
"import seaborn as sns\nsns.set_style('white')\ncolors_list = sns.color_palette(\"Paired\", 10)",
"_____no_output_____"
]
],
[
[
"### recreate the network structure",
"_____no_output_____"
]
],
[
[
"x = tf.placeholder(tf.float32, shape=[None, 784])\ny_ = tf.placeholder(tf.float32, shape=[None, 10])\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x, [-1,28,28,1])\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\ny_pred = tf.nn.softmax(y_conv)\n\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))",
"_____no_output_____"
]
],
[
[
"### Load previous model",
"_____no_output_____"
]
],
[
[
"model_path = './MNIST.ckpt'\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\ntf.train.Saver().restore(sess, model_path)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Extract some \"2\" images from test set",
"_____no_output_____"
]
],
[
[
"index_mask = np.where(mnist.test.labels[:, 2])[0]",
"_____no_output_____"
],
[
"subset_mask = np.random.choice(index_mask, 10)",
"_____no_output_____"
],
[
"subset_mask",
"_____no_output_____"
],
[
"origin_images = mnist.test.images[subset_mask]\norigin_labels = mnist.test.labels[subset_mask]",
"_____no_output_____"
],
[
"origin_labels",
"_____no_output_____"
],
[
"prediction=tf.argmax(y_pred,1)\nprediction_val = prediction.eval(feed_dict={x: origin_images, keep_prob: 1.0}, session=sess)\nprint(\"predictions\", prediction_val)\nprobabilities=y_pred\nprobabilities_val = probabilities.eval(feed_dict={x: origin_images, keep_prob: 1.0}, session=sess)\nprint (\"probabilities\", probabilities_val)",
"predictions [2 2 2 2 2 2 2 2 2 2]\nprobabilities [[ 1.68790103e-11 1.04460350e-10 9.99990940e-01 4.87415809e-06\n 6.91035228e-14 4.10699903e-13 1.38419029e-12 7.07499055e-08\n 4.17573347e-06 2.63317731e-12]\n [ 1.50550719e-07 7.52393389e-03 9.87179160e-01 3.15451246e-07\n 5.28161833e-03 3.42207557e-07 1.41383189e-05 2.95582026e-07\n 7.38369650e-08 3.04421571e-10]\n [ 4.02434095e-14 5.18871945e-10 1.00000000e+00 7.36044212e-12\n 3.96116959e-11 9.11448532e-16 2.68945306e-15 7.65700697e-12\n 7.59310573e-15 7.34182578e-15]\n [ 1.25397404e-03 7.70974736e-07 9.96126950e-01 2.49213097e-03\n 6.36435834e-06 2.00931572e-05 1.77277016e-07 7.27009028e-05\n 1.58615767e-05 1.11465779e-05]\n [ 4.99005437e-05 1.13155475e-05 9.99935031e-01 2.62967137e-09\n 1.03853085e-06 4.65414568e-10 3.99612041e-08 2.78365087e-06\n 3.01886480e-08 3.22925263e-11]\n [ 5.71090376e-13 3.97125555e-10 1.00000000e+00 1.17869670e-08\n 1.28539714e-12 3.67561375e-14 2.48410064e-14 7.60623298e-09\n 2.59703058e-11 3.01168183e-12]\n [ 6.95616942e-09 2.31470767e-05 9.99972463e-01 2.54789541e-07\n 1.43252720e-07 1.45724433e-09 1.19073404e-10 4.00210592e-06\n 7.80533860e-09 9.53596993e-11]\n [ 4.02434095e-14 5.18871945e-10 1.00000000e+00 7.36044212e-12\n 3.96116959e-11 9.11448532e-16 2.68945306e-15 7.65700697e-12\n 7.59310573e-15 7.34182578e-15]\n [ 6.33278355e-14 8.21794410e-10 1.00000000e+00 3.18889290e-11\n 5.03287386e-12 1.52704235e-14 2.85616291e-14 3.60496827e-10\n 1.04232909e-12 1.76853720e-14]\n [ 1.90051690e-11 2.66234167e-07 9.99999762e-01 2.22919159e-08\n 6.02706843e-11 1.17165975e-13 6.59776234e-10 2.66559327e-11\n 7.27193950e-09 2.79776321e-14]]\n"
],
[
"for i in range(0, 10):\n print('correct label:', np.argmax(origin_labels[i]))\n print('predict label:', prediction_val[i])\n print('Confidence:', np.max(probabilities_val[i]))\n plt.figure(figsize=(2, 2))\n plt.axis('off')\n plt.imshow(origin_images[i].reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\n plt.show()",
"correct label: 2\npredict label: 2\nConfidence: 0.999991\n"
],
[
"target_number = 6",
"_____no_output_____"
],
[
"target_labels = np.zeros(origin_labels.shape)",
"_____no_output_____"
],
[
"target_labels[:, target_number] = 1",
"_____no_output_____"
],
[
"origin_labels",
"_____no_output_____"
],
[
"target_labels",
"_____no_output_____"
],
[
"img_gradient = tf.gradients(cross_entropy, x)[0]",
"_____no_output_____"
]
],
[
[
"### one Adversarial vs one image",
"_____no_output_____"
]
],
[
[
"eta = 0.5\niter_num = 10",
"_____no_output_____"
]
],
[
[
"### Method 1: update using the info in gradient\nThis means we will update the image based on the value of gradient, ideally, this will give us a adversarial image with less wiggle, as we only need to add a little wiggle when the gradient at that point is large.",
"_____no_output_____"
]
],
[
[
"adversarial_img = origin_images.copy()\nfor i in range(0, iter_num):\n gradient = img_gradient.eval({x: adversarial_img, y_: target_labels, keep_prob: 1.0})\n adversarial_img = adversarial_img - eta * gradient\n prediction=tf.argmax(y_pred,1)\n prediction_val = prediction.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n print(\"predictions\", prediction_val)\n probabilities=y_pred\n probabilities_val = probabilities.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n print('Confidence 2:', probabilities_val[:, 2])\n print('Confidence 6:', probabilities_val[:, 6])\n print('-----------------------------------')",
"predictions [2 2 2 2 2 2 2 2 2 2]\nConfidence 2: [ 0.99839801 0.50398463 0.99999976 0.94279677 0.99306434 0.99999869\n 0.99774051 0.99999976 0.99999988 0.99998116]\nConfidence 6: [ 6.17733331e-09 3.38034965e-02 3.61205510e-11 5.49222386e-05\n 1.65044228e-04 2.51908945e-11 4.98797135e-07 3.61205510e-11\n 8.44649004e-11 1.06398193e-06]\n-----------------------------------\npredictions [2 6 2 2 6 2 2 2 2 2]\nConfidence 2: [ 0.90054828 0.03599812 0.99992478 0.47941697 0.3857542 0.99992812\n 0.88223279 0.99992478 0.99999475 0.99883395]\nConfidence 6: [ 5.24239840e-06 9.09998178e-01 3.14857857e-07 1.03679458e-02\n 4.14035559e-01 2.03342374e-08 7.65050703e-04 3.14857573e-07\n 9.70845377e-08 6.13783835e-04]\n-----------------------------------\npredictions [3 6 2 6 6 2 2 2 2 2]\nConfidence 2: [ 0.20391738 0.02125967 0.99488431 0.12929185 0.01710233 0.99819332\n 0.36685336 0.99488431 0.99973804 0.86787164]\nConfidence 6: [ 5.72559598e-04 9.47188795e-01 2.24302203e-04 3.12704206e-01\n 9.43210959e-01 3.14465137e-06 7.00001568e-02 2.24301548e-04\n 6.08862283e-05 1.23816974e-01]\n-----------------------------------\npredictions [8 6 2 6 6 2 6 2 2 6]\nConfidence 2: [ 0.43293276 0.01552619 0.83097196 0.03268598 0.0135146 0.98310214\n 0.17826064 0.83097178 0.97425836 0.11591232]\nConfidence 6: [ 1.79927237e-02 9.61492419e-01 3.42250541e-02 7.99241543e-01\n 9.55691159e-01 1.36969538e-04 6.16287053e-01 3.42250690e-02\n 1.96619965e-02 8.76042128e-01]\n-----------------------------------\npredictions [3 6 6 6 6 2 6 6 6 6]\nConfidence 2: [ 0.17021255 0.01231071 0.19562197 0.01843761 0.01121253 0.88237929\n 0.04999156 0.19562216 0.23194622 0.06901591]\nConfidence 6: [ 0.28051642 0.9694531 0.53274441 0.88252693 0.96344072 0.00382947\n 0.86769354 0.53274429 0.73012829 0.9247852 ]\n-----------------------------------\npredictions [6 6 6 6 6 2 6 6 6 6]\nConfidence 2: [ 0.07458363 0.01019469 0.06034603 0.01337874 0.00959486 0.66686749\n 0.03255163 0.06034593 0.07704844 0.05089864]\nConfidence 6: [ 0.72089374 0.974684 0.84580153 0.91406661 0.96881437 0.0405265\n 0.91041219 0.84580171 0.89538473 0.94383085]\n-----------------------------------\npredictions [6 6 6 6 6 2 6 6 6 6]\nConfidence 2: [ 0.03893126 0.00872765 0.03884212 0.01059401 0.00841283 0.46066824\n 0.02436219 0.0388421 0.05182601 0.04104275]\nConfidence 6: [ 0.84897608 0.97832572 0.89983678 0.9321211 0.9727276 0.18205585\n 0.93081117 0.8998369 0.92495954 0.95425797]\n-----------------------------------\npredictions [6 6 6 6 6 6 6 6 6 6]\nConfidence 2: [ 0.02573399 0.00763769 0.02883839 0.0087844 0.00748532 0.29014409\n 0.01946484 0.02883845 0.03953246 0.03457938]\nConfidence 6: [ 0.89540702 0.98103446 0.92485535 0.9435631 0.97574246 0.44339713\n 0.94352108 0.92485535 0.94018751 0.9611299 ]\n-----------------------------------\npredictions [6 6 6 6 6 6 6 6 6 6]\nConfidence 2: [ 0.01902132 0.00679732 0.02307542 0.00752009 0.00675084 0.18342426\n 0.01634321 0.0230754 0.03184611 0.02982386]\nConfidence 6: [ 0.9198994 0.983105 0.93942189 0.95158327 0.97813272 0.62893689\n 0.95190406 0.93942195 0.9500286 0.96620733]\n-----------------------------------\npredictions [6 6 6 6 6 6 6 6 6 6]\nConfidence 2: [ 0.01520571 0.00613233 0.0192919 0.00655318 0.0061516 0.13245167\n 0.01406015 0.01929193 0.02656174 0.02627148]\nConfidence 6: [ 0.93462354 0.98475128 0.94931847 0.95763385 0.98007178 0.73152864\n 0.95811945 0.94931847 0.95700026 0.97002554]\n-----------------------------------\n"
]
],
[
[
"### Method 2: update using the sign of gradient\nperform some step size for each pixel ",
"_____no_output_____"
]
],
[
[
"eta = 0.02\niter_num = 10",
"_____no_output_____"
],
[
"adversarial_img = origin_images.copy()\nfor i in range(0, iter_num):\n gradient = img_gradient.eval({x: adversarial_img, y_: target_labels, keep_prob: 1.0})\n adversarial_img = adversarial_img - eta * np.sign(gradient)\n prediction=tf.argmax(y_pred,1)\n prediction_val = prediction.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n print(\"predictions\", prediction_val)\n probabilities=y_pred\n probabilities_val = probabilities.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n print('Confidence 2:', probabilities_val[:, 2])\n print('Confidence 6:', probabilities_val[:, 6])\n print('-----------------------------------')",
"predictions [2 2 2 2 2 2 2 2 2 2]\nConfidence 2: [ 0.99979955 0.86275303 1. 0.9779107 0.99902475 0.99999976\n 0.99971646 1. 1. 0.99999583]\nConfidence 6: [ 1.66726910e-10 1.24624989e-03 4.56519967e-13 8.34497041e-06\n 5.59669525e-06 1.79199841e-12 1.30735716e-08 4.56519967e-13\n 3.46567068e-12 6.27776799e-08]\n-----------------------------------\npredictions [2 2 2 2 2 2 2 2 2 2]\nConfidence 2: [ 0.99511552 0.40977556 0.99999964 0.85962117 0.98393112 0.99999559\n 0.99609464 0.99999964 0.99999964 0.99994993]\nConfidence 6: [ 2.01981152e-08 8.79419371e-02 1.22339749e-10 4.89167869e-04\n 1.19251851e-03 1.82640972e-10 1.73009698e-06 1.22339749e-10\n 5.76917680e-10 6.33407490e-06]\n-----------------------------------\npredictions [2 6 2 2 2 2 2 2 2 2]\nConfidence 2: [ 0.92691237 0.0824458 0.99998283 0.54052806 0.69164306 0.99994981\n 0.94957453 0.99998283 0.99999595 0.99876642]\nConfidence 6: [ 1.97517147e-06 7.88923085e-01 2.59027715e-08 1.52549399e-02\n 1.51991054e-01 1.05832694e-08 1.59343646e-04 2.59027715e-08\n 7.01664717e-08 5.28034056e-04]\n-----------------------------------\npredictions [3 6 2 6 6 2 2 2 2 2]\nConfidence 2: [ 0.38114282 0.00284192 0.99941409 0.21674696 0.04668415 0.99948311\n 0.68562496 0.99941409 0.99993396 0.96271199]\nConfidence 6: [ 8.61597146e-05 9.92703676e-01 5.69670192e-06 2.89392889e-01\n 8.71554732e-01 4.64192766e-07 6.55736076e-03 5.69670192e-06\n 6.37889843e-06 3.00177168e-02]\n-----------------------------------\npredictions [2 6 2 6 6 2 2 2 2 6]\nConfidence 2: [ 5.83209932e-01 6.27083209e-05 9.90212023e-01 2.70510484e-02\n 2.11280608e-03 9.95150447e-01 3.76711369e-01 9.90212023e-01\n 9.98733342e-01 4.64150667e-01]\nConfidence 6: [ 2.44543725e-03 9.99762475e-01 3.85647581e-04 8.70872498e-01\n 9.93551373e-01 1.34517468e-05 1.35343209e-01 3.85647581e-04\n 4.81195719e-04 5.04597306e-01]\n-----------------------------------\npredictions [3 6 2 6 6 2 6 2 2 6]\nConfidence 2: [ 1.45977870e-01 2.26086172e-06 8.54788423e-01 2.14479375e-03\n 8.69234063e-05 9.71471608e-01 1.03391998e-01 8.54788423e-01\n 9.68404591e-01 4.15184237e-02]\nConfidence 6: [ 3.94732542e-02 9.99990463e-01 1.52496705e-02 9.87855494e-01\n 9.99670744e-01 2.56853382e-04 7.45402575e-01 1.52496705e-02\n 2.36869231e-02 9.47378218e-01]\n-----------------------------------\npredictions [6 6 2 6 6 2 6 2 2 6]\nConfidence 2: [ 2.31417045e-01 1.05129189e-07 3.71916145e-01 1.65524441e-04\n 4.47992488e-06 8.64461243e-01 6.83465134e-03 3.71916145e-01\n 5.43019056e-01 2.49437825e-03]\nConfidence 6: [ 0.3545565 0.9999994 0.22301799 0.99881208 0.99998033 0.00355855\n 0.98034912 0.22301799 0.42559034 0.99609852]\n-----------------------------------\npredictions [6 6 6 6 6 2 6 6 6 6]\nConfidence 2: [ 1.95937138e-02 6.35231245e-09 7.78834969e-02 2.18999739e-05\n 2.25597717e-07 5.93729377e-01 3.56450648e-04 7.78834969e-02\n 3.73114012e-02 1.58468843e-04]\nConfidence 6: [ 0.85764623 1. 0.81097031 0.99987864 0.99999869 0.03135163\n 0.99828064 0.81097031 0.94914585 0.9996804 ]\n-----------------------------------\npredictions [6 6 6 6 6 2 6 6 6 6]\nConfidence 2: [ 2.98802019e-03 4.13267927e-08 6.95284083e-03 2.13227167e-06\n 1.25024888e-08 4.91525024e-01 7.30973698e-05 6.95284083e-03\n 1.61215290e-03 1.72482469e-05]\nConfidence 6: [ 0.98444527 1. 0.98080987 0.99998796 1. 0.19622776\n 0.99981946 0.98080987 0.99635267 0.99996758]\n-----------------------------------\npredictions [6 6 6 6 6 6 6 6 6 6]\nConfidence 2: [ 2.74159829e-04 2.28510810e-09 5.19630907e-04 2.98820567e-07\n 8.52226556e-09 2.46330112e-01 4.67527661e-06 5.19630907e-04\n 1.09362918e-04 1.23258530e-06]\nConfidence 6: [ 0.99770629 1. 0.99812537 0.99999869 1. 0.58065033\n 0.99997211 0.99812537 0.99967241 0.99999702]\n-----------------------------------\n"
]
],
[
[
"### Take a look at individual image",
"_____no_output_____"
]
],
[
[
"threshold = 0.99",
"_____no_output_____"
],
[
"eta = 0.001\n\nprediction=tf.argmax(y_pred,1)\nprobabilities=y_pred\n\nadversarial_img = origin_images[1: 2].copy()\nadversarial_label = target_labels[1: 2]\nstart_img = adversarial_img.copy()\nconfidence = 0\niter_num = 0\nprob_history = list()\nwhile confidence < threshold:\n gradient = img_gradient.eval({x: adversarial_img, y_: adversarial_label, keep_prob: 1.0})\n adversarial_img -= eta * np.sign(gradient)\n probabilities_val = probabilities.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n confidence = probabilities_val[:, 6]\n prob_history.append(probabilities_val[0])\n iter_num += 1\nprint(iter_num)",
"69\n"
],
[
"sns.set_style('whitegrid')\nprob_history = np.array(prob_history)\n\nfig = plt.figure(figsize=(8, 6))\nax = fig.add_subplot(111)\n\nfor i, record in enumerate(prob_history.T):\n plt.plot(record, color=colors_list[i])\n \nax.legend([str(x) for x in range(0, 10)], \n loc='center left', bbox_to_anchor=(1.05, 0.5), fontsize=14)\nax.set_xlabel('Iteration')\nax.set_ylabel('Prediction Confidence')",
"_____no_output_____"
],
[
"sns.set_style('white')\nfig = plt.figure(figsize=(9, 4))\n\nax1 = fig.add_subplot(1,3,1)\nax1.axis('off')\nax1.imshow(start_img.reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax1.title.set_text('Confidence for 2: ' + '{:.4f}'.format(prob_history[0][2]) \n + '\\nConfidence for 6: ' + '{:.4f}'.format(prob_history[0][6]))\n\nax2 = fig.add_subplot(1,3,2)\nax2.axis('off')\nax2.imshow((adversarial_img - start_img).reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax2.title.set_text('Delta')\n\nax3 = fig.add_subplot(1,3,3)\nax3.axis('off')\nax3.imshow((adversarial_img).reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax3.title.set_text('Confidence for 2: ' + '{:.4f}'.format(prob_history[-1][2]) \n + '\\nConfidence for 6: ' + '{:.4f}'.format(prob_history[-1][6]))\n\nplt.show()\n\nprint(\"Difference Measure:\", np.sum((adversarial_img - start_img) ** 2))",
"_____no_output_____"
],
[
"eta = 0.01\n\nprediction=tf.argmax(y_pred,1)\nprobabilities=y_pred\n\nadversarial_img = origin_images[1: 2].copy()\nadversarial_label = target_labels[1: 2]\nstart_img = adversarial_img.copy()\nconfidence = 0\niter_num = 0\nprob_history = list()\nwhile confidence < threshold:\n gradient = img_gradient.eval({x: adversarial_img, y_: adversarial_label, keep_prob: 1.0})\n adversarial_img -= eta * gradient\n probabilities_val = probabilities.eval(feed_dict={x: adversarial_img, keep_prob: 1.0}, session=sess)\n confidence = probabilities_val[:, 6]\n prob_history.append(probabilities_val[0])\n iter_num += 1\nprint(iter_num)",
"109\n"
],
[
"sns.set_style('white')\nfig = plt.figure(figsize=(9, 4))\n\nax1 = fig.add_subplot(1,3,1)\nax1.axis('off')\nax1.imshow(start_img.reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax1.title.set_text('Confidence for 2: ' + '{:.4f}'.format(prob_history[0][2]) \n + '\\nConfidence for 6: ' + '{:.4f}'.format(prob_history[0][6]))\n\nax2 = fig.add_subplot(1,3,2)\nax2.axis('off')\nax2.imshow((adversarial_img - start_img).reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax2.title.set_text('Delta')\n\nax3 = fig.add_subplot(1,3,3)\nax3.axis('off')\nax3.imshow((adversarial_img).reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)\nax3.title.set_text('Confidence for 2: ' + '{:.4f}'.format(prob_history[-1][2]) \n + '\\nConfidence for 6: ' + '{:.4f}'.format(prob_history[-1][6]))\n\nplt.show()\n\nprint(\"Difference Measure:\", np.sum((adversarial_img - start_img) ** 2))",
"_____no_output_____"
],
[
"sns.set_style('whitegrid')\nprob_history = np.array(prob_history)\n\nfig = plt.figure(figsize=(8, 6))\nax = fig.add_subplot(111)\n\nfor i, record in enumerate(prob_history.T):\n plt.plot(record, color=colors_list[i])\n \nax.legend([str(x) for x in range(0, 10)], \n loc='center left', bbox_to_anchor=(1.05, 0.5), fontsize=14)\nax.set_xlabel('Iteration')\nax.set_ylabel('Prediction Confidence')",
"_____no_output_____"
]
],
[
[
"We can observe that when taking the value of gradients into account, when it comes close to local optima, the gradient is becoming small and helps us to converge",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7f27c6143eae1589d7a175814019e2f38ac5d3e | 3,048 | ipynb | Jupyter Notebook | 08-ensemble.ipynb | jm-begon/ml101 | a9fb8b7a24e8839c322095209f4a0a6383188176 | [
"BSD-3-Clause"
] | null | null | null | 08-ensemble.ipynb | jm-begon/ml101 | a9fb8b7a24e8839c322095209f4a0a6383188176 | [
"BSD-3-Clause"
] | null | null | null | 08-ensemble.ipynb | jm-begon/ml101 | a9fb8b7a24e8839c322095209f4a0a6383188176 | [
"BSD-3-Clause"
] | null | null | null | 39.584416 | 262 | 0.627625 | [
[
[
"```\n _____ _ _\n| ___| | | | |\n| |__ _ __ ___ ___ _ __ ___ | |__ | | ___\n| __| '_ \\/ __|/ _ \\ '_ ` _ \\| '_ \\| |/ _ \\\n| |__| | | \\__ \\ __/ | | | | | |_) | | __/\n\\____/_| |_|___/\\___|_| |_| |_|_.__/|_|\\___|\n```",
"_____no_output_____"
],
[
"# Motivation and conditions\nEnsembling is a technique used with complex hypothesis spaces. In such a case,\nthe learning algorithm has high variance. Therefore, averaging several models\nleads to better and more stable models.\n\nIn order to average several models, they must first be built. There is, however,\nusually only one training set. Consequently, randomness is injected in the\nlearning process, resulting in more variance. Ensembling can only work if the\ngain obtained from the variance reduction is better than the introduced\nvariance.\n\n# Bagging\nBagging is a general concept which consists in drawing bootstrap samples (ie.\nselecting with repetition) out of the training set. This somewhat reduces the\neffective number of samples (and places more weights on some). Bagging is a\nvery broad and generic method and be applied in [classification](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html)\nand [regression](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingRegressor.html)\nalike.\n\n# Forests\nWith tree-based methods, there is actually many ways to introduce randomness\nin a model. More specifically, randomness can be introduced at each stage of the\nrecursive building mechanism. Most famous methods include\n- random forests (for [classification](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) and [regression](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html));\n- extremely randomized trees (for [classification](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html) and [regression](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)).",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown"
]
] |
e7f28616bf7065f7749d16297806e5e7c64f3283 | 103,933 | ipynb | Jupyter Notebook | Chapter 4.ipynb | PacktPublishing/-Python-Your-First-Step-Toward-Data-Science-V- | f3c2747780792b0786d527fa99038afc08bada1e | [
"MIT"
] | 2 | 2020-05-19T01:31:19.000Z | 2021-10-01T22:06:06.000Z | Chapter 4.ipynb | PacktPublishing/-Python-Your-First-Step-Toward-Data-Science-V- | f3c2747780792b0786d527fa99038afc08bada1e | [
"MIT"
] | null | null | null | Chapter 4.ipynb | PacktPublishing/-Python-Your-First-Step-Toward-Data-Science-V- | f3c2747780792b0786d527fa99038afc08bada1e | [
"MIT"
] | 1 | 2020-09-03T16:16:44.000Z | 2020-09-03T16:16:44.000Z | 44.396839 | 10,848 | 0.49275 | [
[
[
"from sklearn.datasets import load_diabetes",
"_____no_output_____"
],
[
"diabetes_data = load_diabetes()",
"_____no_output_____"
],
[
"dir(diabetes_data)",
"_____no_output_____"
],
[
"print(diabetes_data.DESCR)",
"Diabetes dataset\n================\n\nNotes\n-----\n\nTen baseline variables, age, sex, body mass index, average blood\npressure, and six blood serum measurements were obtained for each of n =\n442 diabetes patients, as well as the response of interest, a\nquantitative measure of disease progression one year after baseline.\n\nData Set Characteristics:\n\n :Number of Instances: 442\n\n :Number of Attributes: First 10 columns are numeric predictive values\n\n :Target: Column 11 is a quantitative measure of disease progression one year after baseline\n\n :Attributes:\n :Age:\n :Sex:\n :Body mass index:\n :Average blood pressure:\n :S1:\n :S2:\n :S3:\n :S4:\n :S5:\n :S6:\n\nNote: Each of these 10 feature variables have been mean centered and scaled by the standard deviation times `n_samples` (i.e. the sum of squares of each column totals 1).\n\nSource URL:\nhttp://www4.stat.ncsu.edu/~boos/var.select/diabetes.html\n\nFor more information see:\nBradley Efron, Trevor Hastie, Iain Johnstone and Robert Tibshirani (2004) \"Least Angle Regression,\" Annals of Statistics (with discussion), 407-499.\n(http://web.stanford.edu/~hastie/Papers/LARS/LeastAngle_2002.pdf)\n\n"
],
[
"inputs = diabetes_data.data",
"_____no_output_____"
],
[
"output = diabetes_data.target",
"_____no_output_____"
],
[
"inputs.shape",
"_____no_output_____"
],
[
"output.shape",
"_____no_output_____"
],
[
"diabetes_data.feature_names",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.DataFrame(inputs, columns=diabetes_data.feature_names)",
"_____no_output_____"
],
[
"df = pd.concat([df, pd.DataFrame(output)], axis=1)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df.describe().style.format(\"{:.5f}\")",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.matshow(df.corr())\nplt.xticks(range(len(df.columns)), df.columns)\nplt.yticks(range(len(df.columns)), df.columns)\nplt.colorbar()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Chapter Break",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(inputs, output, test_size=0.33, random_state=42)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.pipeline import make_pipeline\n\npipe = make_pipeline(LinearRegression())\npipe.fit(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_test, y_test)",
"_____no_output_____"
],
[
"from sklearn.linear_model import Ridge\n\n# tactic 1: minimize weights, smaller the better, higher penalty on large weights\n# = ridge regression\n\npipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Ridge())\npipe.fit(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()",
"_____no_output_____"
],
[
"pipe.score(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_test, y_test)",
"_____no_output_____"
],
[
"from sklearn.linear_model import Lasso\n\n# tactic 2: minimize number of non-zero weights\n# = Lasso\n\npipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Lasso())\npipe.fit(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_test, y_test)",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()",
"_____no_output_____"
],
[
"from sklearn.linear_model import ElasticNet\n\n# tactic 3: mix lasso and ridge!\n# = elasticnet\n\npipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), ElasticNet())\npipe.fit(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_train, y_train)",
"_____no_output_____"
],
[
"pipe.score(X_test, y_test)",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_",
"_____no_output_____"
],
[
"pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()",
"_____no_output_____"
]
],
[
[
"# Understanding regression and linear regression",
"_____no_output_____"
],
[
"`np.concatenate` joins a sequence of arrays along an existing axis.\n\n`np.ones` returns a new array of given shape and type, filled with ones.\n\n`np.zeroes` return a new array of given shape and type, filled with zeroes.\n\n`np.dot` if a is an N-D array and b is a 1-D array, it is a *sum product over the last axis of a and b*.",
"_____no_output_____"
]
],
[
[
"learning_rate = 0.01\nfit_intercept = True\nweights = 0",
"_____no_output_____"
],
[
"def fit(X, y):\n global weights\n \n if fit_intercept:\n X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)\n\n weights = np.zeros(X.shape[1])\n\n # gradient descent (there are other optimizations)\n for i in range(1000): # epochs\n current_prediction = np.dot(X, weights) # linear regression\n gradient = np.dot(X.T, (current_prediction - y)) / y.size # find the gradient\n weights -= learning_rate * gradient # modify the weights using the gradient",
"_____no_output_____"
],
[
"def predict_prob(X):\n global weights\n \n if fit_intercept:\n X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)\n\n return np.dot(X, weights)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f29622bae873bf76ee7cf5db3a1310b5d671c8 | 183,905 | ipynb | Jupyter Notebook | covid_control.ipynb | thongisto/InfiniteOptTutorials | 57c5c71004bbbf765ee3ef760aaa0efac7e100e5 | [
"MIT"
] | 2 | 2021-01-13T19:22:20.000Z | 2021-01-21T05:37:22.000Z | covid_control.ipynb | thongisto/InfiniteOptTutorials | 57c5c71004bbbf765ee3ef760aaa0efac7e100e5 | [
"MIT"
] | null | null | null | covid_control.ipynb | thongisto/InfiniteOptTutorials | 57c5c71004bbbf765ee3ef760aaa0efac7e100e5 | [
"MIT"
] | 1 | 2021-01-13T16:14:26.000Z | 2021-01-13T16:14:26.000Z | 144.693155 | 33,007 | 0.649172 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f29dd51293a70251c8bfee913899c2f33a03fc | 114,138 | ipynb | Jupyter Notebook | crime_stats_compute_aea.ipynb | OpenUpSA/crime-stats-demystifed | 86560a8bfc27027db957beaa3388babb3e80332b | [
"MIT"
] | 1 | 2018-07-09T16:54:03.000Z | 2018-07-09T16:54:03.000Z | crime_stats_compute_aea.ipynb | Code4SA/crime-stats-demystifed | 86560a8bfc27027db957beaa3388babb3e80332b | [
"MIT"
] | null | null | null | crime_stats_compute_aea.ipynb | Code4SA/crime-stats-demystifed | 86560a8bfc27027db957beaa3388babb3e80332b | [
"MIT"
] | 2 | 2021-04-16T13:00:15.000Z | 2022-03-10T09:33:47.000Z | 32.592233 | 548 | 0.460565 | [
[
[
"### Notation:\n- SAL- small area\n- PP- police precinct\n- AEA- Albers Equal Area Conic\n- CPS- crime per SAL ",
"_____no_output_____"
]
],
[
[
"from random import shuffle, randint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom mpl_toolkits.basemap import Basemap\nfrom shapely.geometry import Polygon, Point, MultiPoint, MultiPolygon, LineString, mapping, shape\nfrom descartes import PolygonPatch\nimport random\nimport fiona\nimport numpy as np\nimport csv\nfrom fiona import collection\n\nimport geopandas as gpd\nfrom geopandas.tools import sjoin # rtree index in-build, used with inner, intersection\nimport pandas as pd\n\nfrom collections import defaultdict",
"_____no_output_____"
]
],
[
[
"def sjoin(left_df, right_df, how='inner', op='intersects',\n lsuffix='left', rsuffix='right', **kwargs):\n \"\"\"Spatial join of two GeoDataFrames.\n left_df, right_df are GeoDataFrames\n how: type of join\n left -> use keys from left_df; retain only left_df geometry column\n right -> use keys from right_df; retain only right_df geometry column\n inner -> use intersection of keys from both dfs;\n retain only left_df geometry column\n op: binary predicate {'intersects', 'contains', 'within'}\n see http://toblerity.org/shapely/manual.html#binary-predicates\n lsuffix: suffix to apply to overlapping column names (left GeoDataFrame)\n rsuffix: suffix to apply to overlapping column names (right GeoDataFrame)\n \"\"\"\n",
"_____no_output_____"
]
],
[
[
"def find_intersections(o):\n \n from collections import defaultdict\n\n paired_ind = [o.pp_index, o.sal_index]\n\n d_over_ind = defaultdict(list)\n\n # creating a dictionary that has prescints as keys and associated small areas as values\n for i in range(len(paired_ind[0].values)):\n if not paired_ind[0].values[i]==paired_ind[1].values[i]: # it shows itself as intersection\n d_over_ind[paired_ind[0].values[i]].append(paired_ind[1].values[i])\n\n # get rid of the pol precincts with no small areas associated to them- not the most efficient way\n d_temp = {}\n for l in d_over_ind:\n if len(d_over_ind[l]):\n d_temp[l] = d_over_ind[l]\n\n return d_temp\n \n \ndef calculate_join_indices(g1_reind, g2_reind):\n\n # A: region of the police data with criminal record\n # C: small area with population data\n # we look for all small areas intersecting a given C_i, calculate the fraction of inclusion, scale the\n # population accordingly: area(A_j, where A_j crosses C_i)/area(A_j)* popul(A_j)\n \n \n # the actual indexing:\n out = sjoin(g1_reind, g2_reind, how =\"inner\", op = \"intersects\")\n \n out.drop('index_right', axis=1, inplace=True) # there is a double index fo smal areas, so we drop one\n #out_sorted = out.sort(columns='polPrecincts_index', ascending=True) # guess sorting is not necessary, cause we are\n # using doctionaries at later stages\n #dict_over_ind = find_intersections(out_sorted)\n\n # output retains only 1 area (left or right join), and gives no intersection area.\n # so we create an array with paired indices: police precincts with associated small areas\n # we use it in a loop in a function below\n dict_over_ind = find_intersections(out) \n \n return dict_over_ind\n \ndef calculate_inclusion_indices(g1_reind, g2_reind):\n\n out = sjoin(g1_reind, g2_reind, op = \"contains\") ## PP contains SAL\n \n out.drop('index_right', axis=1, inplace=True) \n \n dict_over_ind = find_intersections(out) \n \n return dict_over_ind\n \ndef calculate_join(dict_over_ind, g1_reind, g2_reind):\n area_total = 0\n data_aggreg = []\n\n # note to self: make sure to import shapely Polygon\n for index1, crim in g1_reind.iterrows():\n try:\n index1 = crim.pp_index\n sals_found = dict_over_ind[index1]\n\n for sal in range(len(sals_found)):\n pom = g2_reind[g2_reind.sal_index == sals_found[sal]]['geometry'] \n\n #if pom.intersects(crim['geometry']).values[0]:\n area_int = pom.intersection(crim['geometry']).area.values[0]\n if area_int>0:\n area_total += area_int \n area_crim = crim['geometry'].area\n\n area_popu = pom.values[0].area\n\n popu_count = g2_reind[g2_reind.sal_index == sals_found[sal]]['PPL_CNT'].values[0]\n murd_count = crim['murd_cnt']\n pol_province = crim['province']\n popu_frac = (area_int / area_popu) * popu_count# fraction of the pop area contained inside the crim\n #print(popu_frac)\n extra_info_col_names = ['DC_NAME','MN_NAME','MP_NAME','PR_NAME','SP_NAME']\n \n extra_info_col_codes = ['MN_CODE','MP_CODE','PR_CODE','SAL_CODE','SP_CODE']\n\n extra_names = g2_reind[g2_reind.sal_index == sals_found[sal]][extra_info_col_names]#.filter(regex=(\"NAME\"))\n extra_codes = g2_reind[g2_reind.sal_index == sals_found[sal]][extra_info_col_codes]#.filter(regex=(\"NAME\"))\n\n data_aggreg.append({'geometry': pom.intersection(crim['geometry']).values[0], 'id1': index1,\\\n 'id2': sals_found[sal] ,'area_pp': area_crim,'area_sal': area_popu,\\\n 'area_inter': area_int, 'popu_inter' : popu_frac, 'popu_sal': popu_count,\\\n 'murd_cnt': murd_count,'province': pol_province,\n 'DC_NAME': extra_names.DC_NAME.values[0],\\\n 'MN_NAME': extra_names.MN_NAME.values[0], 'MP_NAME': extra_names.MP_NAME.values[0],\\\n 'PR_NAME': extra_names.PR_NAME.values[0],'SP_NAME': extra_names.SP_NAME.values[0],\\\n 'MN_CODE': extra_codes.MN_CODE.values[0],'MP_CODE': extra_codes.MP_CODE.values[0],\\\n 'PR_CODE': extra_codes.PR_CODE.values[0],'SAL_CODE': extra_codes.SAL_CODE.values[0],\\\n 'SP_CODE': extra_codes.SP_CODE.values[0]} )\n except:\n pass\n \n df_t = gpd.GeoDataFrame(data_aggreg,columns=['geometry', 'id1','id2','area_pp',\\\n 'area_sal','area_inter', 'popu_inter',\\\n 'popu_sal', 'murd_cnt','province','DC_NAME',\\\n 'MN_NAME','MP_NAME','PR_NAME','SP_NAME',\\\n 'MN_CODE','MP_CODE','PR_CODE','SAL_CODE','SP_CODE'])\n #df_t.to_file(out_name)\n return df_t, area_total, data_aggreg",
"_____no_output_____"
],
[
"# this function adds the remaining columns, calculates fractions etc\ndef compute_final_col(df_temp):\n # add population data per police percinct to the main table\n # id1- PP, id2 - SAL\n temp = df_temp.groupby(by=['id1'])['popu_inter'].sum().reset_index()\n\n data_with_population = pd.merge(df_temp, temp, on='id1', how='outer')\\\n .rename(columns={'popu_inter_y':'popu_frac_per_pp', 'popu_inter_x':'popu_inter'})\n\n # finally, update the murder rate per SAL : id2 is sal's id \n\n data_with_population['murd_est_per_int'] = data_with_population['popu_inter']/data_with_population['popu_frac_per_pp']\\\n * data_with_population['murd_cnt']\n data_mur_per_int = data_with_population.groupby(by=['id2'])['murd_est_per_int'].sum().reset_index()\n\n data_mur_per_sal = data_mur_per_int.rename(columns={'murd_est_per_int':'murd_est_per_sal'})\n\n data_with_population['ratio_per_int'] = data_with_population['popu_inter']/data_with_population['popu_frac_per_pp']\\\n\n data_complete = pd.merge(data_with_population, data_mur_per_sal, on='id2', how='outer')\\\n .rename(columns={'id1':'index_PP', 'id2':'index_SAL'})\n return data_complete\n",
"_____no_output_____"
]
],
[
[
"Main functions to find intersection. Files loaded in are the AEA projected shapefiles.",
"_____no_output_____"
]
],
[
[
"salSHP_upd = 'shapefiles/updated/sal_population_aea.shp'\npolSHP_upd = 'shapefiles/updated/polPrec_murd2015_prov_aea.shp'\n\ngeo_pol = gpd.GeoDataFrame.from_file(polSHP_upd)\ngeo_sal = gpd.GeoDataFrame.from_file(salSHP_upd)\n\ngeo_pol_reind = geo_pol.reset_index().rename(columns={'index':'pp_index'})\ngeo_sal_reind = geo_sal.reset_index().rename(columns={'index':'sal_index'})\n\n#dict_int = calculate_join_indices(geo_pol_reind,geo_sal_reind)",
"_____no_output_____"
]
],
[
[
"test on a subset:",
"_____no_output_____"
]
],
[
[
"gt1= geo_pol_reind[geo_pol.province==\"Free State\"].head(n=2)\ngt2 = geo_sal_reind[geo_sal_reind.PR_NAME==\"Free State\"].reset_index()\nd = calculate_join_indices(gt1, gt2)",
"_____no_output_____"
]
],
[
[
"Running the intersections on pre-computed indices:",
"_____no_output_____"
]
],
[
[
"from timeit import default_timer as timer\n\n#start = timer() \n\n#df_inc, sum_area_inc, data_inc = calculate_join(dict_inc, geo_pol_reind, geo_sal_reind)\n#end = timer()\n#print(\"1st\", end - start) \n\nstart = timer() \ndf_int, sum_area_int, data_int = calculate_join(dict_int, geo_pol_reind, geo_sal_reind)\nend = timer()\nprint(\"2nd\", end - start) ",
"_____no_output_____"
]
],
[
[
"find pol precincts within WC boundary",
"_____no_output_____"
]
],
[
[
"za_province = gpd.read_file('za-provinces.topojson',driver='GeoJSON')#.set_index('id')\nza_province.crs={'init': '27700'}\n\nwc_boundary = za_province.ix[8].geometry # WC\n#pp_WC = geo_pol[geo_pol.geometry.within(wc_boundary)]\npp_WC_in = geo_pol[geo_pol.geometry.intersects(wc_boundary)]\n#.unary_union, sal_wc_union_bound = sal_WC_in.unary_union\npp_WC_overlaps = pp_WC_in[pp_WC_in.province!=\"Western Cape\"]\npp_WC_pol_annot = pp_WC_in[pp_WC_in.province==\"Western Cape\"]",
"_____no_output_____"
],
[
"#pp_test = pp_WC_in[pp_WC_in['compnt_nm'].isin(['atlantis','philadelphia','kraaifontein','brackenfell','kuilsriver','kleinvleveerste river','macassar','somerset west','fish hoek'])]\n#pp_test = pp_WC_in[pp_WC_in['compnt_nm'].isin(['beaufort west','doring bay','murraysburg', 'strandfontein','nuwerus','lutzville'])]\n%matplotlib inline\n#pp_WC_overlaps.plot()",
"_____no_output_____"
]
],
[
[
"Adding final columns:",
"_____no_output_____"
]
],
[
[
"# There are 101,546 intersections \ndf_int_aea = compute_final_col(df_int) # add final calculations\ndf_int_aea.to_csv('data/pp_int_intersections2.csv')",
"_____no_output_____"
]
],
[
[
"Some intersections are multipolygons (PP and SAL intersect in multiple areas):",
"_____no_output_____"
]
],
[
[
"df_int_aea.head(n=3).values[2][0]",
"_____no_output_____"
]
],
[
[
"There are curious cases of intersections, which form polygons. For example,a Free State police precinct 'dewetsdorp' with murder count of 1 (yet high rate of Stock-theft- 52 in 2014) intersects the SAL 4990011 (part of SP Mangaung NU) in two lines:",
"_____no_output_____"
]
],
[
[
"geo_sal_reind[geo_sal_reind.sal_index==28532].geometry.values[0]",
"_____no_output_____"
],
[
"geo_pol_reind[geo_pol_reind.pp_index ==358].geometry.values[0]",
"_____no_output_____"
],
[
"a = geo_pol_reind[geo_pol_reind.pp_index ==358].geometry.values[0]\nb= geo_sal_reind[geo_sal_reind.sal_index==28532].geometry.values[0]\nc = [geo_pol_reind[geo_pol_reind.pp_index ==358].geometry.values[0],geo_sal_reind[geo_sal_reind.sal_index==28532].geometry.values[0]]\ncascaded_union(c)\n",
"_____no_output_____"
],
[
"from shapely.ops import cascaded_union\ncascaded_union(b)",
"_____no_output_____"
],
[
"geo_sal_reind[geo_sal_reind.sal_index==28532]",
"_____no_output_____"
],
[
"df_int_aea.to_file('data/pp_int_intersections.shp')",
"_____no_output_____"
],
[
"# When reading from a file\"\n\nimport pandas as pd\ndf_int_aea = pd.read_csv('data/pp_int_intersections.csv') \n\n# when reading from file a column Unnamed is added. Needs to be removed.\ncols = [c for c in df_int_aea.columns if c.lower()[:7] != 'unnamed']\n\ndf_int_aea=df_int_aea[cols]",
"_____no_output_____"
],
[
"df_int_aea.head(n=2)",
"_____no_output_____"
],
[
"data_prov = df_int_aea[['PR_NAME','province','murd_est_per_int']]\ndata_prov.groupby('province')['murd_est_per_int'].sum()",
"_____no_output_____"
],
[
"data_prov.groupby('PR_NAME')['murd_est_per_int'].sum()",
"_____no_output_____"
],
[
"# check over small areas- sum of all the crimes should be 17482\npom = {}\nfor ind, row in df_inc_aea.iterrows():\n pom[row['index_SAL']] = row['murd_est_per_sal'] \ns=0\nfor key in pom:\n s = s + pom[key]\nprint(s)",
"_____no_output_____"
]
],
[
[
"## measuring the error of the 'CPS' estimate\nComputing the lower (LB) and upper bounds (UB), wherever possible, is done the following way:\nUB: based the calcualation of population per PP on all SALs included entirely within PP. If not possible, set to NaN\nLB: find all SALs intersecting a given PP, but base the PP population estimation on the population of the entire SAL, not the population of the intersection.\n\nAs a result, each intersection will have a triplet of values associated to it: (LB, actual estimate, UB/NaN). The bounds are not additive- that is, the estimates applies only to the level of SAL area, and will not be maintained when summed over, e.g. SP or MN",
"_____no_output_____"
],
[
"For modyfying/selecting entries for bound estimation, we discard the last 4 columns \nwith precomputed values",
"_____no_output_____"
]
],
[
[
"df_int=df_int_aea.ix[:,:20]",
"_____no_output_____"
],
[
"# this function adds the remaining columns, calculates fractions etc\ndef compute_final_col_bounds(df_aea):\n\n #recalculate pop frac per PP\n temp = df_aea.groupby(by=['index_PP'])['popu_inter'].sum().reset_index()\n data_with_population = pd.merge(df_aea, temp, on='index_PP', how='outer')\\\n .rename(columns={'popu_inter_y':'popu_frac_per_pp', 'popu_inter_x':'popu_inter'})\n\n data_with_population['murd_est_per_int'] = data_with_population['popu_inter']/data_with_population['popu_frac_per_pp']\\\n * data_with_population['murd_cnt']\n \n data_mur_per_int = data_with_population.groupby(by=['index_SAL'])['murd_est_per_int'].sum().reset_index()\n\n data_mur_per_sal = data_mur_per_int.rename(columns={'murd_est_per_int':'murd_est_per_sal'})\n\n data_with_population['ratio_per_int'] = data_with_population['popu_inter']/data_with_population['popu_frac_per_pp']\\\n\n data_complete = pd.merge(data_with_population, data_mur_per_sal, on='index_SAL', how='outer')\n #\\ .rename(columns={'id1':'index_PP', 'id2':'index_SAL'})\n return data_complete",
"_____no_output_____"
]
],
[
[
"create new tables for the LB and UB",
"_____no_output_____"
]
],
[
[
"list_lb =[]\nlist_ub = []\nfor i,entry in df_int.iterrows():#f_inc_aea:\n if (entry.area_inter/entry.area_sal==1): # select those included 'completely'\n list_ub.append(entry)\n \n entry.popu_inter = entry.popu_sal # this is actually already true for the above if() case\n list_lb.append(entry)\n \ndf_int_aea_ub_p=gpd.GeoDataFrame(list_ub)\ndf_int_aea_lb_p=gpd.GeoDataFrame(list_lb)\n",
"_____no_output_____"
],
[
"df_int_aea_lb = compute_final_col_bounds(df_int_aea_lb_p)\\\n .rename(columns={'murd_est_per_int':'murd_est_per_int_lb',\\\n 'ratio_per_int':'ratio_per_int_lb','murd_est_per_sal':'murd_est_per_sal_lb'})\n# complete\ndf_int_aea_ub = compute_final_col_bounds(df_int_aea_ub_p)\\\n .rename(columns={'murd_est_per_int':'murd_est_per_int_ub',\\\n 'ratio_per_int':'ratio_per_int_ub','murd_est_per_sal':'murd_est_per_sal_ub'})",
"_____no_output_____"
],
[
"#check if numbers add up per province level (invariant for inclusion):\ndata_prov = df_int_aea_ub[['PR_NAME','province','murd_est_per_int_ub']]\ndata_prov.groupby('province')['murd_est_per_int_ub'].sum()",
"_____no_output_____"
],
[
"temp_ub = df_int_aea_ub.groupby(by=['SP_CODE'])['murd_est_per_int_ub'].sum().reset_index()\ntemp_lb = df_int_aea_lb.groupby(by=['SP_CODE'])['murd_est_per_int_lb'].sum().reset_index()\ntemp_est = df_int_aea.groupby(by=['SP_CODE'])['murd_est_per_int'].sum().reset_index()\ntemp = pd.merge(temp_lb, temp_est, on='SP_CODE', how='outer')\ndf_bounds = pd.merge(temp, temp_ub, on='SP_CODE', how='outer')",
"_____no_output_____"
]
],
[
[
"At the level of SP (and probably others) some bounds are inverted... UB < LB (2,242 out of 21,589)",
"_____no_output_____"
]
],
[
[
"#mn_bounds_def = mn_bounds[~mn_bounds.UB_murder.isnull()]\ndf_inv_bounds = df_bounds[df_bounds.murd_est_per_int_ub<df_bounds.murd_est_per_int_lb]",
"_____no_output_____"
],
[
"df_inv_bounds.tail()",
"_____no_output_____"
],
[
"temp_ub = df_int_aea_ub.groupby(by=['SAL_CODE'])['murd_est_per_int_ub'].sum().reset_index()\ntemp_lb = df_int_aea_lb.groupby(by=['SAL_CODE'])['murd_est_per_int_lb'].sum().reset_index()\ntemp_est = df_int_aea.groupby(by=['SAL_CODE'])['murd_est_per_int'].sum().reset_index()\n\n# .rename(columns={'popu_inter_y':'popu_frac_per_pp', 'popu_inter_x':'popu_inter'})",
"_____no_output_____"
],
[
"temp = pd.merge(temp_lb, temp_est, on='SAL_CODE', how='outer')\ndf_bounds = pd.merge(temp, temp_ub, on='SAL_CODE', how='outer')",
"_____no_output_____"
],
[
"mn_names_set = set(df_int_aea_lb.MN_NAME)\nmn_names = []\nfor s in mn_names_set:\n mn_names.append(s)",
"_____no_output_____"
],
[
"df_bounds.head(n=2)",
"_____no_output_____"
],
[
"df_bound_nonan = df_bounds[~df_bounds.murd_est_per_int_ub.isnull()&df_bounds.murd_est_per_int>0].sort(['murd_est_per_int'])",
"_____no_output_____"
]
],
[
[
"Plotting the lower and upper bounds:",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings('ignore')\nimport mpld3\nfrom mpld3 import plugins\nfrom mpld3.utils import get_id\n#import numpy as np\nimport collections\n\nfrom mpld3 import enable_notebook\nenable_notebook()",
"_____no_output_____"
],
[
"def make_labels_points(dataf):\n L = len(dataf)\n\n x = np.array(dataf['murd_est_per_int_lb']) \n y = np.array(dataf['murd_est_per_int_ub'])\n z = np.array(dataf['murd_est_per_int'])\n l = np.array(dataf['SAL_CODE']) \n d = y-x # error\n \n s = \" \"\n sc = \", err: \"\n seq = []\n seqc = []\n\n \n t = [seq.append(s.join((str(l[i]), str(z[i])))) for i in range(L)]\n t = [seqc.append(sc.join((seq[i], str(d[i])))) for i in range(L)]\n\n return seqc, L",
"_____no_output_____"
],
[
"def make_scatter(dataf, outname, outtitle):\n l = np.array(dataf['SAL_CODE']) \n x = np.array(dataf['murd_est_per_int_lb']) \n y = np.array(dataf['murd_est_per_int_ub'])\n z = np.array(dataf['murd_est_per_int'])\n d = y-x # error\n \n # build a rectangle in axes coords\n left, width = .15, .7\n bottom, height = .09, .75\n right = left + width\n top = bottom + height\n\n fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))\n N=len(dataf)\n scatter = ax.scatter(range(1,N+1),z,c=100*d,s=1000*d,alpha=0.3, cmap=plt.cm.jet, color='blue', label='...')\n ax.set_title(outtitle, size=15)\n \n seqc, L = make_labels_points(dataf)\n labels12 = ['(SAL id, est: {0}'.format(seqc[i]) for i in range(L)]\n\n tooltip = plugins.PointLabelTooltip(scatter, labels=labels12)\n plugins.connect(fig, tooltip)\n \n ax.set_xlabel('SAL')\n ax.set_ylabel('murder rate', labelpad = 20)\n \n html_str = mpld3.fig_to_html(fig)\n Html_file= open(outname,\"w\")\n Html_file.write(html_str)\n Html_file.close()",
"_____no_output_____"
],
[
"make_scatter(df_bound_nonan.head(n=8000), 'bounds.html', \"SAL estimation bounds\")",
"_____no_output_____"
],
[
"df_bound_nonan[df_bound_nonan.SAL_CODE==3760001]",
"_____no_output_____"
],
[
"df_int_aea_ub[df_int_aea_ub.SAL_CODE==3760001]",
"_____no_output_____"
],
[
"df_int_aea_lb[df_int_aea_lb.SAL_CODE==3760001]",
"_____no_output_____"
],
[
"df_int_aea_lb[df_int_aea_lb.index_PP==551]",
"_____no_output_____"
],
[
"df_int_aea[df_int_aea.index_PP==551]",
"_____no_output_____"
]
],
[
[
"### Add gender data:",
"_____no_output_____"
]
],
[
[
"full_pop = pd.read_csv('data/sal_pop.csv')",
"_____no_output_____"
],
[
"def get_ratio(i,full_pop):\n \n try: \n x = int(full_pop.iloc[i,].Female)/(int(full_pop.iloc[i,].Male)+int(full_pop.iloc[i,].Female)) \n \n except: \n x =0\n\n return x\n ",
"_____no_output_____"
],
[
"wom_ratio = [get_ratio(i,full_pop) for i in range(len(full_pop))]",
"_____no_output_____"
],
[
"full_pop['wom_ratio'] = wom_ratio",
"_____no_output_____"
],
[
"full_pop.drop('Male', axis=1, inplace=True)",
"_____no_output_____"
],
[
"data_full = pd.merge(df_int_aea, full_pop, on='SAL_CODE')",
"_____no_output_____"
],
[
"data_full.head()",
"_____no_output_____"
]
],
[
[
"WARDS:",
"_____no_output_____"
]
],
[
[
"wardsShp =gpd.GeoDataFrame.from_file('../maps/data/Wards2011_aea.shp')",
"_____no_output_____"
],
[
"wardsShp.head(n=2)",
"_____no_output_____"
],
[
"za_province = gpd.GeoDataFrame.from_file('../south_africa_adm1.shp')#.set_index('id')\n",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"#import matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom descartes import PolygonPatch\nimport fiona\nfrom shapely.geometry import Polygon, MultiPolygon, shape\n\n# We can extract the London Borough boundaries by filtering on the AREA_CODE key\nmp = MultiPolygon(\n [shape(pol['geometry']) for pol in fiona.open('../south_africa_adm1.shp')])\nmpW = MultiPolygon(\n [shape(pol['geometry']) for pol in fiona.open('../wards_delimitation/Wards_demarc/Wards2011.shp')])\nmpS = MultiPolygon(\n [shape(pol['geometry']) for pol in fiona.open('shapefiles/oryginal/SAL_SA_2013.shp')])\n\n# define map extent\nlllon = 21\nlllat = -18\nurlon = 34\nurlat = -8\n\n# set up Basemap instance\nm = Basemap(\n projection = 'merc',\n llcrnrlon = lllon, llcrnrlat = lllat, urcrnrlon = urlon, urcrnrlat = urlat,\n resolution='h')\n# We can now do GIS-ish operations on each borough polygon!\n# we could randomize this by dumping the polygons into a list and shuffling it\n# or we could define a random colour using fc=np.random.rand(3,)\n# available colour maps are here: http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps\ncm = plt.get_cmap('RdBu')\nnum_colours = len(mpW)\n \nfig = plt.figure(figsize=(16, 16))\nax = fig.add_subplot(111)\nminx, miny, maxx, maxy = mp.bounds\nw, h = maxx - minx, maxy - miny\nax.set_xlim(minx - 0.2 * w, maxx + 0.2 * w)\nax.set_ylim(miny - 0.2 * h, maxy + 0.2 * h)\nax.set_aspect(1)\n\npatches = []\nfor idx, p in enumerate(mp):\n #colour = cm(1. * idx / num_colours)\n patches.append(PolygonPatch(p, alpha=1., zorder=1))\n\nfor idx, p in enumerate(mpW):\n colour = cm(1. * idx / num_colours)\n patches.append(PolygonPatch(p, ec='#4C4C4C', alpha=1., zorder=1))\n\nfor idx, p in enumerate(mpS):\n colour = cm(1. * idx / num_colours)\n patches.append(PolygonPatch(p, ec='#4C4C4C', alpha=1., zorder=1))\n\nax.add_collection(PatchCollection(patches, match_original=True))\n\nax.set_xticks([])\nax.set_yticks([])\nplt.title(\"SAL on Wards\")\n#plt.savefig('data/london_from_shp.png', alpha=True, dpi=300)\nplt.show()",
"_____no_output_____"
],
[
"# define map extent\nlllon = 15\nlllat = -35\nurlon = 33\nurlat = -22\n\n# set up Basemap instance\nm = Basemap(\n projection = 'merc',\n llcrnrlon = lllon, llcrnrlat = lllat, urcrnrlon = urlon, urcrnrlat = urlat,\n resolution='h')",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(16, 16))\nm.drawmapboundary(fill_color=None, linewidth=0)\nm.drawcoastlines(color='#4C4C4C', linewidth=0.5)\nm.drawcountries()\nm.fillcontinents(color='#F2E6DB',lake_color='#DDF2FD')\n#m.readshapefile('../wards_delimitation/Wards_demarc/Wards2011.sbh','Wards',drawbounds=False)\nm.readshapefile('../maps/data/test','wards',drawbounds=False)",
"_____no_output_____"
],
[
"from itertools import chain\nshp = fiona.open('../maps/data/test.shp')\nbds = shp.bounds\nshp.close()\nextra = 0.01\nll = (bds[0], bds[1])\nur = (bds[2], bds[3])\ncoords = list(chain(ll, ur))\nw, h = coords[2] - coords[0], coords[3] - coords[1]",
"_____no_output_____"
],
[
"m = Basemap(\n projection='tmerc',\n lon_0=24.000,\n lat_0=-24.0000,\n ellps = 'WGS84',\n llcrnrlon=coords[0] - extra * w,\n llcrnrlat=coords[1] - extra + 0.01 * h,\n urcrnrlon=coords[2] + extra * w,\n urcrnrlat=coords[3] + extra + 0.01 * h,\n lat_ts=0,\n resolution='i',\n suppress_ticks=True)\nm.readshapefile(\n '../maps/data/test',\n 'wards',\n color='none',\n zorder=2)",
"_____no_output_____"
]
],
[
[
"clean the utf problems",
"_____no_output_____"
]
],
[
[
"from unidecode import unidecode\n\nwith fiona.open(\n '../maps/data/wards_sel.shp', 'r') as source:\n\n # Create an output shapefile with the same schema,\n # coordinate systems. ISO-8859-1 encoding.\n with fiona.open(\n '../maps/data/wards_sel_cleaned.shp', 'w',\n **source.meta) as sink:\n\n # Identify all the str type properties.\n str_prop_keys = [\n k for k, v in sink.schema['properties'].items()\n if v.startswith('str')]\n\n for rec in source:\n\n # Transliterate and update each of the str properties.\n for key in str_prop_keys:\n val = rec['properties'][key]\n if val:\n rec['properties'][key] = unidecode(val)\n\n # Write out the transformed record.\n sink.write(rec)",
"_____no_output_____"
],
[
"salSHP = 'shapefiles/updated/sal_population_4326.shp'\nwarSHP = '../wards_delimitation/Wards_demarc/Wards2011.shp'\n\ngeo_war = gpd.GeoDataFrame.from_file(warSHP)\ngeo_sal = gpd.GeoDataFrame.from_file(salSHP)",
"_____no_output_____"
],
[
"import pyepsg\n\npyepsg.get(geo_war.crs['init'].split(':')[1])",
"_____no_output_____"
],
[
"pyepsg.get(geo_sal.crs['init'].split(':')[1])",
"_____no_output_____"
]
],
[
[
" to plot the data on a folium map, we need to convert to a Geographic coordinate system with the wgs84 datum (EPSG: 4326). We also need to greate a GeoJSON object out of the GeoDataFrame.\n AND! as it turns out (many hourse of tripping over the problem) to SIMPLIFY the geometries. They are too big for webmaps.",
"_____no_output_____"
]
],
[
[
"warSHP = '../maps/data/Wards2011.shp'\n\ngeo_war = gpd.GeoDataFrame.from_file(warSHP)\n#geo_sal = gpd.GeoDataFrame.from_file(salSHP_upd)\n",
"_____no_output_____"
],
[
"geo_war.head(n=2)",
"_____no_output_____"
],
[
"geo_war_sub = geo_war.iloc[:,[2,3,7,8,9]].reset_index().head(n=2)",
"_____no_output_____"
],
[
"#g = geo_war_sub.simplify(0.05, preserve_topology=False)",
"_____no_output_____"
],
[
"geo_war_sub.head(n=3)",
"_____no_output_____"
],
[
"geo_war_sub.to_file('../maps/data/wards_sel.shp')",
"_____no_output_____"
],
[
"geo_war_sub['geometry'].replace(g,inplace=True)\n#data['index_rank'].replace(index_dict, inplace=True)",
"_____no_output_____"
],
[
"geo_war_sub_sim.head(n=2)",
"_____no_output_____"
],
[
"salSHP = 'shapefiles/updated/sal_population.shp'\ngeo_sal = gpd.GeoDataFrame.from_file(salSHP)",
"_____no_output_____"
],
[
"#geo_sal.head(n=2)\ngeo_sal_sub = geo_sal.iloc[:,[7,11,15,16,20,23]].reset_index()#.head()",
"_____no_output_____"
],
[
"geo_sal_sub.to_file('../maps/data/sal_sub.shp')",
"_____no_output_____"
],
[
"#gjsonSal = geo_sal.to_crs(epsg='4326').to_json()# no need to convert, as it already is in 4326\n#gjsonSal = geo_sal.to_json()\n#gjsonWar = geo_war.to_json()\ngj = g.to_json()",
"_____no_output_____"
],
[
"import folium\n#import pandas as pd\n\nlllon = 15\nlllat = -35\nurlon = 33\nurlat = -22\n#state_geo = r'shapefiles/updated/sal_population.json'\n#ward_path = r'../maps/data/test.geojson'\n\n#state_geo = r'shapefiles/oryginal/SAL_SA_2013.json'\nstate_geo = r'../maps/data/sal.json'\n#state_geo = r'temp_1E-7.topojson'\n\n#Let Folium determine the scale\nmap = folium.Map(location=[(lllat+urlat)/2, (lllon+urlon)/2], tiles='Mapbox Bright',zoom_start=6)\n#, tiles='cartodbpositron')\n#map.geo_json(geo_path=state_geo)\n#map.geo_json(geo_path=state_geoW)\n#map.geo_json(geo_path=ward_path)\n\nmap.create_map(path='test.html')",
"_____no_output_____"
],
[
"state_geo",
"_____no_output_____"
],
[
"lllon = 15\nlllat = -35\nurlon = 33\nurlat = -22\n\nimport folium\n#map = folium.Map(location=[-33.9249, 18.4241], zoom_start=10)\n\nmapa = folium.Map([(lllat+urlat)/2, (lllon+urlon)/2],\n zoom_start=7,\n tiles='cartodbpositron')\n\n#pSal = folium.features.GeoJson(gjsonSal)\n#pWae = folium.features.GeoJson(gjsonWar)\n\n#mapa.add_children(pSal)\n#mapa.add_children(pWar)\n#mapa.geo_json(gj)\n#test = folium.folium.Map.geo_json(gj)\n#ice_map.geo_json(geo_path=topo_path, topojson='objects.antarctic_ice_shelf')\n#mapa.add_children(test)\nmapa.create_map(path='test.html')",
"_____no_output_____"
],
[
"testshp = '../maps/data/test.shp'\ngeo_test = gpd.GeoDataFrame.from_file(testshp)",
"_____no_output_____"
],
[
"import pyepsg\npyepsg.get(geo_test.crs['init'].split(':')[1])",
"_____no_output_____"
],
[
"gjson = geo_test.to_json()",
"_____no_output_____"
],
[
"import folium\ngeo_path = r'../maps/data/test.json'\nmap_osm = folium.Map(location=[-24.5236, 24.6750],zoom_start=6)\nmap_osm.geo_json(geo_path=geo_path)\nmap_osm.create_map(path='osm.html')",
"_____no_output_____"
]
],
[
[
"analytics based on intersections:",
"_____no_output_____"
]
],
[
[
"def find_intersections(o):\n \n from collections import defaultdict\n\n paired_ind = [o.pp_index, o.sal_index]\n\n d_over_ind = defaultdict(list)\n\n # creating a dictionary that has prescints as keys and associated small areas as values\n for i in range(len(paired_ind[0].values)):\n if not paired_ind[0].values[i]==paired_ind[1].values[i]: # it shows itself as intersection\n d_over_ind[paired_ind[0].values[i]].append(paired_ind[1].values[i])\n\n # get rid of the pol precincts with no small areas associated to them- not the most efficient way\n d_temp = {}\n for l in d_over_ind:\n if len(d_over_ind[l]):\n d_temp[l] = d_over_ind[l]\n\n return d_temp\n \n \ndef calculate_join_indices(g1_reind, g2_reind):\n out = sjoin(g1_reind, g2_reind, how =\"inner\", op = \"intersects\")\n \n out.drop('index_right', axis=1, inplace=True) \n dict_over_ind = find_intersections(out) \n \n return dict_over_ind\n ",
"_____no_output_____"
],
[
"#warSHP = '../maps/data/Wards2011_aea.shp'\n\n#geo_war = gpd.GeoDataFrame.from_file(warSHP)\n\n#salSHP = 'shapefiles/updated/sal_population_aea.shp'\n#geo_sal = gpd.GeoDataFrame.from_file(salSHP)\n#geo_sal = geo_sal.reset_index()\n\n#geo_war_sub = geo_war.iloc[:,[2,3,7,8,9]].reset_index()#.head(n=2)\nout = sjoin(geo_war_sub, geo_sal, how =\"inner\", op = \"intersects\")\n ",
"_____no_output_____"
],
[
"out_sub = out.iloc[:,[2,3,5,6,15,23,24,28]].reset_index().rename(columns={'index':'index_ward','index_right':'index_sal'})",
"_____no_output_____"
],
[
"geo_war_sub = geo_war_sub.rename(columns={'index':'index_ward'})#head(n=2)\n#head(n=2)\ngeo_sal_sub = geo_sal.iloc[:,[5,11,16,17,19,21,24]].reset_index().rename(columns={'index':'index_sal'}) ",
"_____no_output_____"
],
[
"from collections import defaultdict\n\npaired_ind = [out_sub.index_ward, out_sub.index_sal]\n\ndict_temp = defaultdict(list)\n\n # creating a dictionary that has prescints as keys and associated small areas as values\nfor i in range(len(paired_ind[0].values)):\n if not paired_ind[0].values[i]==paired_ind[1].values[i]: # it shows itself as intersection\n dict_temp[paired_ind[0].values[i]].append(paired_ind[1].values[i])\n\ndict_int_ward = {}\nfor l in dict_temp:\n if len(dict_temp[l]):\n dict_int_ward[l] = dict_temp[l]\n \n#dict_int_ward",
"_____no_output_____"
],
[
"def calculate_join_ward_sal(dict_over_ind, g1_reind, g2_reind):\n area_total = 0\n data_aggreg = []\n\n # note to self: make sure to import shapely Polygon\n for index1, row in g1_reind.iterrows():\n #print(index1, row.index_ward)\n try:\n index1 = row.index_ward\n \n sals_found = dict_over_ind[index1]\n for sal in range(len(sals_found)):\n pom = g2_reind[g2_reind.index_sal == sals_found[sal]]['geometry'] \n\n area_int = pom.intersection(row['geometry']).area.values[0] \n \n area_sal = pom.values[0].area\n int_percent = area_int/area_sal\n #popu_count = g2_reind[g2_reind.sal_index == sals_found[sal]]['PPL_CNT'].values[0]\n \n \n extra_info_col = ['MP_NAME','PR_NAME','SAL_CODE','SP_NAME']\n\n extra_names = g2_reind[g2_reind.index_sal == sals_found[sal]][extra_info_col]#.filter(regex=(\"NAME\"))\n\n #extra_names = g2_reind[g2_reind.sal_index == sals_found[sal]][extra_info_col_names]#.filter(regex=(\"NAME\"))\n\n data_aggreg.append({'geometry': pom.intersection(row['geometry']).values[0],\\\n 'id1': index1,'ward_id': row.WARD_ID,'id2': sals_found[sal] ,'area_int': area_int,\\\n 'area_sal': area_sal,'int_percent': int_percent,\\\n 'MP_NAME': extra_names.MP_NAME.values[0],\\\n 'PR_NAME': extra_names.PR_NAME.values[0],'SAL_CODE': extra_names.SAL_CODE.values[0],\\\n 'SP_NAME': extra_names.SP_NAME.values[0]} )\n \n except:\n pass\n \n cols=['geometry', 'id1','ward_id','id2','area_int','area_sal','int_percent','MP_NAME','PR_NAME','SAL_CODE','SP_NAME'] \n df_t = gpd.GeoDataFrame(data_aggreg,columns=cols)\n #df_t.to_file('shapefiles/sal_ward.shp')\n return df_t\n ",
"_____no_output_____"
],
[
"from timeit import default_timer as timer\n\nstart = timer() \ndf = calculate_join_ward_sal(dict_int_ward,geo_war_sub, geo_sal_sub)\nend = timer()\nprint(\"time: \", end - start) ",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.to_csv('df.csv')",
"_____no_output_____"
],
[
"df_nc = df[df.int_percent<1]\n#df.groupby(by=['ward_id']).sum()",
"_____no_output_____"
],
[
"s = df_nc.groupby(by=['PR_NAME','ward_id'])",
"_____no_output_____"
],
[
"type(s)",
"_____no_output_____"
],
[
"#There are 4277 wards\nlen(geo_war)",
"_____no_output_____"
],
[
"# all wards have intersections\nlen(set(df_nc.ward_id))",
"_____no_output_____"
],
[
"#84907 SAL areas\nlen(geo_sal_sub)",
"_____no_output_____"
],
[
"# half of the intersect\nlen(set(df_nc.SAL_CODE))",
"_____no_output_____"
]
],
[
[
"40515 out of 84907 SALs intersect ward borders.\nLet's see whether the intersections generated from PP and SAL fit better.",
"_____no_output_____"
]
],
[
[
"#trying the intersections\ngeo_int_p = pd.read_csv('data/pp_int_intersections.csv')",
"_____no_output_____"
],
[
"geo_war_sub.crs",
"_____no_output_____"
],
[
"#geo_int.head(n=2)\ngeo_int = gpd.GeoDataFrame(geo_int_p, crs=geo_war_sub.crs)",
"_____no_output_____"
],
[
"#geo_int.head(n=2)\ncols = [c for c in geo_int.columns if c.lower()[:7] != 'unnamed']\ngeo_int = geo_int[cols]",
"_____no_output_____"
],
[
"geo_int.head(n=2)\ngeo_int_sub = geo_int.iloc[:,[1,2,0]].reset_index().rename(columns={'index':'index_int'}) ",
"_____no_output_____"
],
[
"geo_sal_sub.head(n=1)",
"_____no_output_____"
],
[
"geo_int_sub.geometry.head()",
"_____no_output_____"
],
[
"geo_war_sub.head(n=2)",
"_____no_output_____"
],
[
"out = sjoin(geo_war_sub.head(n=1), geo_int_sub, how =\"inner\", op = \"intersects\")\n",
"_____no_output_____"
],
[
"geo_war_sub.head(n=2)",
"_____no_output_____"
],
[
"type(geo_int)",
"_____no_output_____"
],
[
"geo_int.crs",
"_____no_output_____"
],
[
"test = gpd.GeoDataFrame(pd.read_csv('data/pp_test2.csv'))",
"_____no_output_____"
],
[
"geo_war_sub.to_csv('auch.csv')",
"_____no_output_____"
],
[
"test.plot()",
"_____no_output_____"
],
[
"f,ax = plt.subplots(1)\ngpd.plotting.plot_multipolygon(ax, df_int.head(n=2).geometry.values[0], linewidth = 0.1, edgecolr='grey')\nplt.show()",
"_____no_output_____"
],
[
"df_int.head(n=2).geometry.values[0]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f2a01a1319bebc6a9313c341e0efd6cf75eefa | 37,078 | ipynb | Jupyter Notebook | Home Assignments/HA3/HA3.ipynb | fridokus/deep-machine-learning | 2aca0bccc45600a7c518bdd4dd372b63c46badc2 | [
"MIT"
] | null | null | null | Home Assignments/HA3/HA3.ipynb | fridokus/deep-machine-learning | 2aca0bccc45600a7c518bdd4dd372b63c46badc2 | [
"MIT"
] | null | null | null | Home Assignments/HA3/HA3.ipynb | fridokus/deep-machine-learning | 2aca0bccc45600a7c518bdd4dd372b63c46badc2 | [
"MIT"
] | null | null | null | 37.757637 | 973 | 0.589271 | [
[
[
"### Checklist for submission\n\nIt is extremely important to make sure that:\n\n1. Everything runs as expected (no bugs when running cells);\n2. The output from each cell corresponds to its code (don't change any cell's contents without rerunning it afterwards);\n3. All outputs are present (don't delete any of the outputs);\n4. Fill in all the places that say `# YOUR CODE HERE`, or \"**Your answer:** (fill in here)\".\n5. You should not need to create any new cells in the notebook, but feel free to do it if convenient for you.\n6. The notebook contains some hidden metadata which is important during our grading process. **Make sure not to corrupt any of this metadata!** The metadata may be corrupted if you perform an unsuccessful git merge / git pull. It may also be pruned completely if using Google Colab, so watch out for this. Searching for \"nbgrader\" when opening the notebook in a text editor should take you to the important metadata entries.\n7. Fill in your group number and the full names of the members in the cell below;\n8. Make sure that you are not running an old version of IPython (we provide you with a cell that checks this, make sure you can run it without errors).\n\nFailing to meet any of these requirements might lead to either a subtraction of POEs (at best) or a request for resubmission (at worst).\n\nWe advise you the following steps before submission for ensuring that requirements 1, 2, and 3 are always met: **Restart the kernel** (in the menubar, select Kernel$\\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\\rightarrow$Run All). This might require a bit of time, so plan ahead for this (and possibly use Google Cloud's GPU in HA1 and HA2 for this step). Finally press the \"Save and Checkout\" button before handing in, to make sure that all your changes are saved to this .ipynb file.",
"_____no_output_____"
],
[
"---\n\nGroup number and member names:",
"_____no_output_____"
]
],
[
[
"GROUP = \"\"\nNAME1 = \"\"\nNAME2 = \"\"",
"_____no_output_____"
]
],
[
[
"Make sure you can run the following cell without errors.",
"_____no_output_____"
]
],
[
[
"import IPython\nassert IPython.version_info[0] >= 3, \"Your version of IPython is too old, please update it.\"",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"# Home Assignment 3\nThis home assignment will focus on reinforcement learning and deep reinforcement learning. The first part will cover value-table reinforcement learning techniques, and the second part will include neural networks as function approximators, i.e. deep reinforcement learning. \n\nWhen handing in this assignment, make sure that you're handing in the correct version, and more importantly, *that you do no clear any output from your cells*. We'll use these outputs to aid us when grading your assignment.",
"_____no_output_____"
],
[
"## Task 1: Gridworld\n\nIn this task, you will implement Value Iteration to solve for the optimal policy, $\\pi^*$, and the corresponding state value function, $V^*$.\n\nThe MDP you will work with in this assignment is illustrated in the figure below\n\n![title](./grid_world.png) ",
"_____no_output_____"
],
[
"The agent starts in one of the squares shown in the above figure, and then proceeds to take actions. The available actions at any time step are: **North, West, South,** and **East**. If an action would make the agent bump into a wall, or one of the black (unreachable) states, it instead does nothing, leaving the agent at the same place it was before.\n\nThe reward $R_s^a$ of being in state $s$ and performing actions $a$ is zero for all states, regardless of the action taken, with the exception of the green and the red squares. For the green square, the reward is always 1, and for the red square, always -1, regardless of the action.\n\nWhen the agent is either in the green or the red square, it will be transported to the terminal state in the next time step, regardless of the action taken. The terminal state is shown as the white square with the \"T\" inside.\n\n#### State representation\nThe notations used to define the states are illustrated in the table below\n\n| $S_0$ | $S_1$ | $S_2$ | $S_3$ | $S_4$ | |\n|-------|-------|-------|-------|-------|----|\n| $S_5$ | $S_6$ | $S_7$ | $S_8$ | $S_9$ | |\n| $S_{10}$ | $S_{11}$ | $S_{12}$ | $S_{13}$ | $S_{14}$ | $S_{15}$|\n\nwhere $S_{10}$ corresponds to the initial state of the environment, $S_4$ and $S_9$ to the green and red states of the environment, and $S_{15}$ to the terminal state.",
"_____no_output_____"
],
[
"\n### Task 1.a: Solve for $V^*(s)$ and $Q^*(s,a)$\nFor this task all transition probabilities are assumed to be 1 (that is, trying to move in a certain direction will definitely move the agent in the chosen direction), and a discount factor of .9, i.e. $\\gamma=.9$.",
"_____no_output_____"
],
[
"* Solve for $V^*(S_{10})$ \n\n**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"* Solve $Q^*(S_{10},a)$ for all actions\n\n**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"\n\n### Task 1.b Write a mathematical expression relating $V^\\pi(s)$ to $Q^\\pi(s,a)$ and $\\pi(a|s)$\n",
"_____no_output_____"
],
[
"**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"\n### Task 1.c: Value Iteration\nFor this task, the transitions are no longer deterministic. Instead, there is a 0.2 probability that the agent will try to travel in an orthogonal direction of the chosen action (0.1 probability for each of the two orthogonal directions). Note that the Markov decision process is still known and does not have to be learned from experience.\n\nYour task is to implement value iteration and solve for the\n* optimal greedy policy $\\pi^*(s)$ \n* $V^*(s)$",
"_____no_output_____"
],
[
"#### The value iteration algorithm\nValue iteration is an iterative algorithm used to compute the optimal value function $V^*(s)$. Each iteration starts with a guess of what the value function is and then uses the Bellman equations to improve this guess iteratively. We can describe one iteration of the algorithm as\n\n$\n\\textbf{For} \\quad s \\in {\\cal S}:\\qquad \\\\\n\\quad \\textbf{For} \\quad \\, a \\in {\\cal A}: \\\\\n\\qquad Q(s,a) = \\sum_{s'\\in S} T(s,a,s')\\left(R(s,a,s') + \\gamma V(s') \\right)\\\\ \n\\quad V(s) = \\underset{a}{\\text{max}}~ Q(s,a)\n$\n\nwhere $T(s, a, s')={\\mathrm Pr}[S'=s'\\big|S=s,A=a]$ is the probability to transition state $s$ to $s'$ given action $a$.\n\n\n#### The MDP Python class\nThe Markov Decision Process you will work with is defined in `gridworld_mpd.py`. In the implementation, the actions are represented by integers as, North = 0, West = 1, South = 2, and East = 3.\nTo interact with the MDP, you need to instantiate an object as: \n\n\n```python\nmdp = GridWorldMDP()\n```\n\nAt your disposal there are a number of instance-functions implemented for you, and presented below:",
"_____no_output_____"
]
],
[
[
"from gridworld_mdp import *\nimport numpy as np\n\nhelp(GridWorldMDP.get_states)",
"_____no_output_____"
],
[
"# The constructor\nhelp(GridWorldMDP.__init__)",
"_____no_output_____"
],
[
"help(GridWorldMDP.get_actions)",
"_____no_output_____"
],
[
"help(GridWorldMDP.state_transition_func)",
"_____no_output_____"
],
[
"help(GridWorldMDP.reward_function)",
"_____no_output_____"
]
],
[
[
"We also provide two helper functions for visualizing the value function and the policies you obtain:",
"_____no_output_____"
]
],
[
[
"# Function for printing a policy pi\ndef print_policy(pi):\n print('Policy for non-terminal states: ')\n indencies = np.arange(1, 16)\n txt = '| '\n hor_delimiter = '---------------------'\n print(hor_delimiter)\n for a, i in zip(pi, indencies):\n txt += mdp.act_to_char_dict[a] + ' | '\n if i % 5 == 0:\n print(txt + '\\n' + hor_delimiter)\n txt = '| '\n print(' ---')\n print('Policy for terminal state: |', mdp.act_to_char_dict[pi[15]],'|')\n print(' ---') \n\n# Function for printing a table with of the value function\ndef print_value_table(values, num_iterations=None): \n if num_iterations:\n print('Values for non-terminal states after: ', num_iterations, 'iterations \\n', np.reshape(values, [3, 5]), '\\n')\n print('Value for terminal state:', terminal_value, '\\n')\n else: \n terminal_value = values[-1]\n print('Values for non-terminal states: \\n', np.reshape(values[:-1], [3, 5]))\n print('Value for terminal state:', terminal_value, '\\n')",
"_____no_output_____"
]
],
[
[
"Now it's time for you to implement your own version of value iteration to solve for the greedy policy and $V^*(s)$.",
"_____no_output_____"
]
],
[
[
"def value_iteration(gamma, mdp):\n V = np.zeros([16]) # state value table\n Q = np.zeros([16, 4]) # state action value table\n pi = np.zeros([16]) # greedy policy table\n\n # Complete this function\n \n return V, pi",
"_____no_output_____"
]
],
[
[
"Run your implementation for the deterministic version of our MDP. As a sanity check, compare your analytical solutions with the output from your implementation.",
"_____no_output_____"
]
],
[
[
"mdp = GridWorldMDP(trans_prob=1.)\nv, pi = value_iteration(.9, mdp)\nprint_value_table(v)\nprint_policy(pi)",
"_____no_output_____"
]
],
[
[
"Once your implementation passed the sanity check, run it for the stochastic case, where the probability of an action succeding is 0.8, and 0.2 of moving the agent in an orthogonal direction to the intended. Use $\\gamma = .99$.",
"_____no_output_____"
]
],
[
[
"# Run for stochastic MDP, gamma = .99\nmdp = GridWorldMDP()\nv, pi = value_iteration(.99, mdp)\nprint_value_table(v)\nprint_policy(pi)",
"_____no_output_____"
]
],
[
[
"Does the policy that the algorithm found looks reasonable? For instance, what's the policy for state $S_8$? Is that a good idea? Why?\n\n**Your answer**: (fill in here)",
"_____no_output_____"
],
[
"Test your implementation using this function.",
"_____no_output_____"
]
],
[
[
"test_value_iteration(v, pi)",
"_____no_output_____"
]
],
[
[
"Run value iteration for the same scenario as above, but now with $\\gamma=.9$",
"_____no_output_____"
]
],
[
[
"# Run for stochastic MDP, gamma = .9\nmdp = GridWorldMDP()\nv, pi = value_iteration(.9, mdp)\nprint_value_table(v)\nprint_policy(pi)",
"_____no_output_____"
]
],
[
[
"Do you notice any difference between the greedy policy for the two different discount factors. If so, what's the difference, and why do you think this happened?",
"_____no_output_____"
],
[
"**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"## Task 2: Q-learning\n\nIn the previous task, you solved for $V^*(s)$ and the greedy policy $\\pi^*(s)$, with the entire model of the MDP being available to you. This is however not very practical since for most problems we are trying to solve, the model is not known, and estimating the model is quite often a very tedious process which often also requires a lot of simplifications. \n\n#### Q-learning algorithm\n$\n\\text{Initialize}~Q(s,a), ~ \\forall~ s \\in {\\cal S},~ a~\\in {\\cal A} \\\\\n\\textbf{Repeat}~\\text{(for each episode):}\\\\\n\\quad \\text{Initialize}~s\\\\\n\\qquad \\textbf{Repeat}~\\text{(for each step in episode):}\\\\\n\\qquad\\quad \\text{Chose $a$ from $s$ using poliy derived from $Q$ (e.g., $\\epsilon$-greedy)}\\\\\n\\qquad\\quad \\text{Take action a, observe r, s'}\\\\\n\\qquad\\quad Q(s,a) \\leftarrow Q(s,a) + \\alpha \\left(r + \\gamma~\\underset{a}{\\text{max}}~Q(s',a) - Q(s,a) \\right) \\\\\n\\qquad\\quad s \\leftarrow s' \\\\\n\\qquad \\text{Until s is terminal}\n$",
"_____no_output_____"
],
[
"### Task 2.1 Model-free control\nWhy is it that Q-learning does not require a model of the MDP to solve for it? ",
"_____no_output_____"
],
[
"**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"### Task 2.2 Implement an $\\epsilon$-greedy policy\nThe goal of the Q-learning algorithm is to find the optimal policy $\\pi^*$, by estimating the state action value function under the optimal policy, i.e. $Q^*(s, a)$. From $Q^*(s,a)$, the agent can follow $\\pi^*$, by choosing the action with that yields the largest expected value for each state, i.e. $\\underset{a}{\\text{argmax}}~Q^*(s, a)$.\n\nHowever, when training a Q-learning model, the agent typically follows another policy to explore the environment. In reinforcement learning this is known as off-policy learning. \n\nYour task is to implement a widely popular exploration policy, known as the $\\epsilon$-greedy policy, in the cell below.\n\nAn $\\epsilon$-Greedy policy should:\n* with probability $\\epsilon$ take an uniformly-random action.\n* otherwise choose the best action according to the estimated state action values.",
"_____no_output_____"
]
],
[
[
"def eps_greedy_policy(q_values, eps):\n '''\n Creates an epsilon-greedy policy\n :param q_values: set of Q-values of shape (num actions,)\n :param eps: probability of taking a uniform random action \n :return: policy of shape (num actions,)\n '''\n \n # Complete this function \n \n return policy ",
"_____no_output_____"
]
],
[
[
"Run the cell below to test your implementation",
"_____no_output_____"
]
],
[
[
"mdp = GridWorldMDP()\n\n# Test shape of output\nactions = mdp.get_actions()\nfor eps in (0, 1):\n foo = np.zeros([len(actions)])\n foo[0] = 1.\n eps_greedy = eps_greedy_policy(foo, eps)\n assert foo.shape == eps_greedy.shape, \"wrong shape of output\"\nactions = [i for i in range(10)]\nfor eps in (0, 1):\n foo = np.zeros([len(actions)])\n foo[0] = 1.\n eps_greedy = eps_greedy_policy(foo, eps)\n assert foo.shape == eps_greedy.shape, \"wrong shape of output\"\n\n# Test for greedy actions\nfor a in actions:\n foo = np.zeros([len(actions)])\n foo[a] = 1.\n eps_greedy = eps_greedy_policy(foo, 0)\n assert np.array_equal(foo, eps_greedy), \"policy is not greedy\"\n\n# Test for uniform distribution, when eps=1\neps_greedy = eps_greedy_policy(foo, 1)\nassert all(p==eps_greedy[0] for p in eps_greedy) and np.sum(eps_greedy)==1, \\\n\"policy does not return a uniform distribution for eps=1\"\n\nprint('Test passed, good job!')",
"_____no_output_____"
]
],
[
[
"### Task 2.2: Implement the Q-learning algorithm\n\nNow it's time to actually implement the Q-learning algorithm. Unlike the Value iteration where there is no direct interactions with the environment, the Q-learning algorithm builds up its estimations by interacting and exploring the environment. \n\nTo enable the agent to explore the environment a set of helper functions are provided:",
"_____no_output_____"
]
],
[
[
"help(GridWorldMDP.reset)",
"_____no_output_____"
],
[
"help(GridWorldMDP.step)",
"_____no_output_____"
]
],
[
[
"Implement your version of Q-learning in the cell below. \n\n**Hint:** It might be useful to study the pseudocode provided above. ",
"_____no_output_____"
]
],
[
[
"def q_learning(eps, gamma):\n Q = np.zeros([16, 4]) # state action value table\n pi = np.zeros([16]) # greedy policy table\n alpha = .01\n \n # Complete this function\n \n return pi, Q",
"_____no_output_____"
]
],
[
[
"Run Q-learning with $\\epsilon = 1$ for the MDP with $\\gamma=0.99$",
"_____no_output_____"
]
],
[
[
"pi, Q = q_learning(1, .99)\nprint_policy(pi)",
"_____no_output_____"
]
],
[
[
"Test your implementation by running the cell below",
"_____no_output_____"
]
],
[
[
"test_q_learning(Q)",
"_____no_output_____"
]
],
[
[
"Run Q-learning with $\\epsilon=0$",
"_____no_output_____"
]
],
[
[
"pi, Q = q_learning(0, .99)\nprint_policy(pi)",
"_____no_output_____"
]
],
[
[
"You ran your implementation with $\\epsilon$ set to both 0 and 1. What are the results, and your conclusions?",
"_____no_output_____"
],
[
"**Your answer:** (fill in here)",
"_____no_output_____"
],
[
"# Task 3: Deep Double Q-learning (DDQN)\nFor this task, you will implement a DDQN (double deep Q-learning network) to solve one of the problems of the OpenAI gym. Before we get into details about these type of networks, let's first review the simpler, DQN (deep Q-learning network) version. \n\n#### Deep Q Networks\nAs we saw in the video lectures, using a neural network as a state action value approximator is a great idea. However, if one tries to use this approach with Q-learning, it's very likely that the optimization will be very unstable. To remediate this, two main ideas are used. First, we use experience replay, in order to decorrelate the experience samples we obtain when exploring the environment. Second, we use two networks instead of one, in order to fix the optimization targets. That is, for a given minibatch sampled from the replay buffer, we'll optimize the weights of only one of the networks (commonly denoted as the \"online\" network), using the gradients w.r.t a loss function. This loss function is computed as the mean squared error between the current action values, computed according to the **online** network, and the temporal difference (TD) targets, computed using the other, **fixed network** (which we'll refer to as the \"target\" network).\n\nThat is, the loss function is \n\n$$ L(\\theta) = \\frac{1}{N}\\sum_{i=1}^N \\left(Q(s_i,a_i; \\theta\\right) - Y_i)^2~,$$\n\nwhere $N$ is the number of samples in your minibatch, $Q(s,a;\\theta)$ is the state action value estimate, according to the online network (with parameters $\\theta$), and $Y_t$ is the TD target, computed as\n\n$$ Y_i = r_i + \\gamma ~\\underset{a}{\\text{max}}~Q(s_i', a; \\theta^-)~, $$\n\nwhere $Q(s', a;\\theta')$ is the action value estimate, according to the fixed network (with parameters $\\theta^-$).\n\nFinally, so that the offline parameters are also updated, we periodically change the roles of the networks, fixing the online one, and training the other.\n\n#### Double Deep Q Networks\n\nThe idea explained above works well in practice, but later it was discovered that this approach is very prone to overestimating the state action values. The main reason for this is that the max operator, used to select the greedy action when computing the TD target, uses the same values both to select and to evaluate an action (this tends to prefer overestimated actions). In order to prevent this, we can decouple the selection from the evaluation, which is the idea that created DDQN. More concretely, the TD target for a DDQN is now \n\n$$ Y_i = r_i + \\gamma Q(s_i', \\underset{a}{\\text{argmax}}Q(s_i',a;\\theta); \\theta^-)~. $$\n\nHence, we're using the **online** network to select which action is best, but we use the **fixed** network to evaluate the state action value for that chosen action in the next state. This is what makes DDQN not overestimate (as much) the state action values, which in turn helps us to train faster and obtain better policies.\n\n\n",
"_____no_output_____"
],
[
"#### Environment\n\nThe problem you will solve for this task is the inverted pendulum problem. \nOn [Open AIs environment documentation](https://gym.openai.com/envs/CartPole-v0) , the following description is provided:\n\n*A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every time step that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center.*\n\n![title](./cartpole.jpg) \n\n#### Implementation\nWe'll solve this task using a DDQN. Most of the code is provided for you, in the file **ddqn_model.py**. This file contains the implementation of a neural network, which is described in the table below (feel free to experiment with different architectures).\n\n|Layer 1: units, activation | Layer 2: units, activation | Layer 3: units, activation | Cost function |\n|---------------------------|----------------------------|----------------------------|---------------|\n| 100, ReLu | 60, ReLu | number of actions, linear | MSE |\n\nThe only missing part of the code is the function that computes the TD targets for each minibatch of samples.",
"_____no_output_____"
],
[
"## Task 3.1: Calculate TD-target\n\nFor this task, you will calculate the temporal difference target used for the loss in the double Q-learning algorithm. Your implementation should follow precisely the equation defined above for the TD target of DDQNs, with one exception: when s' is terminal, the TD target for it should simply be $ Y_i = r_i$. Why is this necessary?\n\n**Your answer**: (fill in here)\n\nImplement your function in the following cell.",
"_____no_output_____"
]
],
[
[
"def calculate_td_targets(q1_batch, q2_batch, r_batch, t_batch, gamma=.99):\n '''\n Calculates the TD-target used for the loss\n : param q1_batch: Batch of Q(s', a) from online network, shape (N, num actions)\n : param q2_batch: Batch of Q(s', a) from target network, shape (N, num actions)\n : param r_batch: Batch of rewards, shape (N, 1)\n : param t_batch: Batch of booleans indicating if state, s' is terminal, shape (N, 1)\n : return: TD-target, shape (N, 1)\n '''\n \n # Complete this function\n \n return Y",
"_____no_output_____"
]
],
[
[
"Test your implementation by trying to solve the reinforcement learning problem for the Cartpole environment. The following cell defines the `train_loop_ddqn` function, which will be called ahead,",
"_____no_output_____"
]
],
[
[
"# Import dependencies\nimport numpy as np\nimport gym\nfrom keras.utils.np_utils import to_categorical as one_hot\nfrom collections import namedtuple\nfrom dqn_model import DoubleQLearningModel, ExperienceReplay\n\ndef train_loop_ddqn(model, env, num_episodes, batch_size=64, gamma=.94): \n Transition = namedtuple(\"Transition\", [\"s\", \"a\", \"r\", \"next_s\", \"t\"])\n eps = 1.\n eps_end = .1 \n eps_decay = .001\n R_buffer = []\n R_avg = []\n for i in range(num_episodes):\n state = env.reset() #reset to initial state\n state = np.expand_dims(state, axis=0)/2\n terminal = False # reset terminal flag\n ep_reward = 0\n q_buffer = []\n steps = 0\n while not terminal:\n env.render() # comment this line out if ou don't want to render the environment\n steps += 1\n q_values = model.get_q_values(state)\n q_buffer.append(q_values)\n policy = eps_greedy_policy(q_values.squeeze(), eps) \n action = np.random.choice(num_actions, p=policy) # sample action from epsilon-greedy policy\n new_state, reward, terminal, _ = env.step(action) # take one step in the evironment\n new_state = np.expand_dims(new_state, axis=0)/2\n \n # only use the terminal flag for ending the episode and not for training\n # if the flag is set due to that the maximum amount of steps is reached \n t_to_buffer = terminal if not steps == 200 else False\n \n # store data to replay buffer\n replay_buffer.add(Transition(s=state, a=action, r=reward, next_s=new_state, t=t_to_buffer))\n state = new_state\n ep_reward += reward\n \n # if buffer contains more than 1000 samples, perform one training step\n if replay_buffer.buffer_length > 1000:\n s, a, r, s_, t = replay_buffer.sample_minibatch(batch_size) # sample a minibatch of transitions\n q_1, q_2 = model.get_q_values_for_both_models(np.squeeze(s_))\n td_target = calculate_td_targets(q_1, q_2, r, t, gamma)\n model.update(s, td_target, a) \n \n eps = max(eps - eps_decay, eps_end) # decrease epsilon \n R_buffer.append(ep_reward)\n \n # running average of episodic rewards\n R_avg.append(.05 * R_buffer[i] + .95 * R_avg[i-1]) if i > 0 else R_avg.append(R_buffer[i])\n print('Episode: ', i, 'Reward:', ep_reward, 'Epsilon', eps, 'mean q', np.mean(np.array(q_buffer)))\n \n # if running average > 195, the task is considerd solved\n if R_avg[-1] > 195:\n return R_buffer, R_avg\n return R_buffer, R_avg",
"_____no_output_____"
]
],
[
[
"and the next cell performs the actual training. \n\nA Working implementation should start to improve after 500 episodes. An episodic reward of around 200 is likely to be achieved after 800 episodes for a batchsize of 128, and 1000 episodes for a batchsize of 64. ",
"_____no_output_____"
]
],
[
[
"# Create the environment\nenv = gym.make(\"CartPole-v0\")\n\n# Initializations\nnum_actions = env.action_space.n\nobs_dim = env.observation_space.shape[0]\n\n# Our Neural Netork model used to estimate the Q-values\nmodel = DoubleQLearningModel(state_dim=obs_dim, action_dim=num_actions, learning_rate=1e-4)\n\n# Create replay buffer, where experience in form of tuples <s,a,r,s',t>, gathered from the environment is stored \n# for training\nreplay_buffer = ExperienceReplay(state_size=obs_dim)\n\n# Train\nnum_episodes = 1200 \nbatch_size = 128 \nR, R_avg = train_loop_ddqn(model, env, num_episodes, batch_size) ",
"_____no_output_____"
],
[
"# close window\nenv.close()",
"_____no_output_____"
]
],
[
[
"According to the code above, and the code in the provided .py file, answer the following questions:\n \nWhat is the state for this problem?\n\n**Your answer**: (fill in here)\n\nWhen do we switch the networks (i.e. when does the online network become the fixed one, and vice-versa)?\n\n**Your answer**: (fill in here)",
"_____no_output_____"
],
[
"Run the cell below to visualize your final policy in an episode from this environment.",
"_____no_output_____"
]
],
[
[
"import time\nnum_episodes = 1\nenv = gym.make(\"CartPole-v0\")\n\nfor i in range(num_episodes):\n state = env.reset() #reset to initial state\n state = np.expand_dims(state, axis=0)/2\n terminal = False # reset terminal flag\n while not terminal:\n env.render()\n time.sleep(.05)\n q_values = model.get_q_values(state)\n policy = eps_greedy_policy(q_values.squeeze(), .1) # greedy policy\n action = np.random.choice(num_actions, p=policy)\n state, reward, terminal, _ = env.step(action) # take one step in the evironment\n state = np.expand_dims(state, axis=0)/2\n# close window\nenv.close();",
"_____no_output_____"
]
],
[
[
"Plot the episodic rewards obtained throughout the optimization, together with a moving average of it (since the episodic reward is usually very noisy).",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nrewards = plt.plot(R, alpha=.4, label='R')\navg_rewards = plt.plot(R_avg,label='avg R')\nplt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)\nplt.xlabel('Episode')\nplt.ylim(0, 210)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Congratulations, you have now successfully implemented the DDQN algorithm. You are encouraged to explore different problems. There are a lot of different environments ready for you to implement your algorithms in. A few of these resources are:\n* [OpenAI gym](https://github.com/openai/gym)\n* [OpenAI Universe](https://github.com/openai/universe)\n* [DeepMind Lab](https://deepmind.com/blog/open-sourcing-deepmind-lab/)\n\nThe model you implemented in this lab can be extended to solve harder problems. A good starting-point is to try to solve the Acrobot-problem, by loading the environment as \n\n**gym.make(\"Acrobot-v1\")**.\n\nThe problem might require some modifications to how you decay $\\epsilon$, but otherwise, the code you have written within this lab should be sufficient. ",
"_____no_output_____"
],
[
"### Task 3.2 Atari games\n\nA common benchmark for reinforcement learning algorithms is the old Atari games. For the Atari games, each observation consists of one screenshot of the current state of the game. Other than adding convolutional layers to your neural network, there is one more issue regarding the new input that needs to be solved. Name at least two solutions to the problem, and why it won't work without these changes. \n\nHint:\n- Imagine the game of pong. What is important for the algorithm to predict? What is the input to the algorithm? Is it possible to predict what we want from the input given?",
"_____no_output_____"
],
[
"**Your answer:** (fill in here)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7f2bdcaf65b45d6f3f46513e9fb4c3d6f40b557 | 158,754 | ipynb | Jupyter Notebook | main.ipynb | sharifchowdhury/Advanced-Lane-finding | f47d58d3f8908cb1123e8227c6e7c962da1a9bc3 | [
"MIT"
] | null | null | null | main.ipynb | sharifchowdhury/Advanced-Lane-finding | f47d58d3f8908cb1123e8227c6e7c962da1a9bc3 | [
"MIT"
] | null | null | null | main.ipynb | sharifchowdhury/Advanced-Lane-finding | f47d58d3f8908cb1123e8227c6e7c962da1a9bc3 | [
"MIT"
] | null | null | null | 21.29497 | 837 | 0.438005 | [
[
[
"## Advanced Lane Finding Project\n\nThe goals / steps of this project are the following:\n\n* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.\n* Apply a distortion correction to raw images.\n* Use color transforms, gradients, etc., to create a thresholded binary image.\n* Apply a perspective transform to rectify binary image (\"birds-eye view\").\n* Detect lane pixels and fit to find the lane boundary.\n* Determine the curvature of the lane and vehicle position with respect to center.\n* Warp the detected lane boundaries back onto the original image.\n* Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.\n\n---\n## First, I'll compute the camera calibration using chessboard images",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n%matplotlib qt\n\nimport cv2\nfrom lib import CameraCalibrate, EstimateWrapParameterWrapper, IMREAD, warp_image, BinarizeImage, fit_polynomial, search_around_poly, cal_undistort, measure_curvature_real\n## Initialize\n# initialize paths\ncalDataPath = 'camera_cal'\ntestDataPath = 'test_images'\n\n# initialize parameters\nmtx, dist = CameraCalibrate(calDataPath)\nM, Minv = EstimateWrapParameterWrapper(mtx, dist, False, True, 'straight_lines1*.jpg')\n#M = EstimateWrapParameterWrapper(mtx, dist, True, False)\n#M = EstimateWrapParameterWrapper(mtx, dist)\n\n\n\n#print(M)\n#print(Minv)\n\n",
"FALSE\n[[-6.15384615e-01 -1.37820513e+00 9.69230769e+02]\n [ 1.97716240e-16 -1.96794872e+00 8.90769231e+02]\n [ 0.00000000e+00 -2.40384615e-03 1.00000000e+00]]\n[[ 1.43118893e-01 -7.85830619e-01 5.61278502e+02]\n [-2.27373675e-16 -5.08143322e-01 4.52638436e+02]\n [-2.41886889e-19 -1.22149837e-03 1.00000000e+00]]\n"
]
],
[
[
"## And so on and so forth...",
"_____no_output_____"
]
],
[
[
"left_fit = []\nright_fit = []\nleft_fitx_old = []\nright_fitx_old = []\nind= 0\ncr=[]\npt=[]\n\n\n\ndef init():\n global left_fit\n global right_fit\n global left_fitx_old\n global right_fitx_old\n global ind\n global cr\n global pt\n left_fit = []\n right_fit = []\n left_fitx_old = []\n right_fitx_old = []\n ind= 0\n cr=[]\n pt=[]\n \n\n\ndef Recast(warped,undist,left_fitx, right_fitx, ploty):\n #global cr\n #global pt\n \n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n ## DEBUG\n #print('pts::')\n #print( color_warp.shape )\n #print(pts)\n #cr =color_warp\n #pt = pts\n cv2.fillPoly(color_warp, np.int_(pts), (0,255, 0))\n newwarp = cv2.warpPerspective(color_warp, Minv, (color_warp.shape[1], color_warp.shape[0])) \n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n return result\n\n\ndef updateFit(old,new,x_old,x,thres= 50):\n\n delta = np.mean(np.absolute(x_old-x))\n \n if delta > thres:\n res = old\n ret_X = x_old\n else:\n res = new\n ret_X = x\n \n return res, ret_X\n\n \n\ndef process_image(frame, viz=False, name='None'):\n global left_fit\n global right_fit\n global left_fitx_old\n global right_fitx_old\n global ind\n print('In process_image2')\n \n #plt.imshow(frame)\n \n \n img = cal_undistort(frame, mtx,dist);\n\n if viz:\n cv2.imwrite('rawdata/'+name +'or.tif', cv2.cvtColor(frame,cv2.COLOR_BGR2RGB ))\n cv2.imwrite('rawdata/'+name +'or_cal.tif', cv2.cvtColor(img,cv2.COLOR_BGR2RGB ))\n \n \n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )\n #figure(ind)\n #plt.imshow(img)\n img = warp_image(img, M)\n \n\n \n #cv2.imwrite('rawdata/' + str(ind)+'im.tif', img)\n \n binaryImage = BinarizeImage(img,s_thresh=(170, 255), sx_thresh=(20, 150))\n \n \n \n \n \n #cv2.imwrite('rawdata/' + str(ind)+'msk.tif', mask*255)\n wrappedBinaryI = binaryImage[:,:,0]+ binaryImage[:,:,1]+binaryImage[:,:,2]\n wrappedBinaryI[wrappedBinaryI>0]=1\n #BI = BI*mask\n #wrappedBinaryI = BI\n \n if viz:\n cv2.imwrite('rawdata/' + name +'or_cal_wr.tif', cv2.cvtColor(img,cv2.COLOR_BGR2RGB ))\n cv2.imwrite('rawdata/' + name +'or_cal_wr_bin.tif', wrappedBinaryI*255)\n\n\n if len(left_fit) == 0:\n outIM, left_fit, right_fit = fit_polynomial(wrappedBinaryI)\n result, left_fitx, right_fitx, ploty, left_fit_new, right_fit_new, l_fit_2, r_fit_2 = search_around_poly(wrappedBinaryI, left_fit, right_fit)\n left_fitx_old = left_fitx\n right_fitx_old = right_fitx\n \n \n else:\n result, left_fitx, right_fitx, ploty, left_fit_new, right_fit_new, l_fit_2, r_fit_2 = search_around_poly(wrappedBinaryI, left_fit, right_fit)\n \n left_fit, left_fitx_old = updateFit(left_fit,left_fit_new,left_fitx_old, left_fitx, thres= 50)\n right_fit,right_fitx_old = updateFit(right_fit,right_fit_new,right_fitx_old,right_fitx, thres= 50)\n \n\n \n \n resFinal = Recast(img[:,:,0],frame,left_fitx_old, right_fitx_old, ploty)\n lrad, rrad, posdelta = measure_curvature_real(l_fit_2, r_fit_2, ploty, Minv)\n rad_str = 'Radius of Curvature = '+ str((lrad+rrad)//2 ) + '(m)' \n if posdelta<0:\n pos = 'left'\n else:\n pos = 'right'\n \n pos_str = 'Vehicle is ' + str(np.absolute( ((posdelta*100)//1)/100 ) ) + 'm ' +pos + ' of center'\n \n resFinal = cv2.putText(resFinal, rad_str, (100,100), cv2.FONT_HERSHEY_SIMPLEX , \n 1, (255, 255, 255) , 2, cv2.LINE_AA) \n resFinal = cv2.putText(resFinal, pos_str, (100,200), cv2.FONT_HERSHEY_SIMPLEX , \n 1, (255, 255, 255) , 2, cv2.LINE_AA) \n \n \n \n if viz:\n cv2.imwrite('rawdata/' + name + 'or_res.tif', cv2.cvtColor(resFinal,cv2.COLOR_BGR2RGB ) )\n ind+=1\n return resFinal",
"_____no_output_____"
],
[
"from moviepy.editor import VideoFileClip\n#white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\n#clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\n#temp = 'test_videos_output/solidWhiteRight.mp4'\n\nleft_fit = []\nright_fit = []\n\n#clip1 = VideoFileClip('project_video.mp4')\nclip1 = VideoFileClip('project_video.mp4')\n\n#challenge_video\n\n\nwhite_output = 'project_video_out.mp4'\n\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False) ",
"\rt: 0%| | 0/1260 [00:00<?, ?it/s, now=None]"
],
[
"## PIPELINE STARTS HEREwarp_image\nfname = testDataPath + '/' + 'test1.jpg';\n#fname = calDataPath + '/' + 'calibration1.jpg';\n\nframe = cv2.imread(fname)\nframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB )\ninit()\nres=process_image(frame, True, 'test1')\nplt.imshow(res)\n\n#for i in range(3) :\n# plt.figure(i+1)\n# plt.imshow( frame[:,:,i])\n\n",
"In process_image2\n"
],
[
"print( np.linalg.inv(M) )",
"[[-1.62500000e+00 8.92247511e+00 -6.37286629e+03]\n [ 1.85369521e-15 5.76955903e+00 -5.13934566e+03]\n [ 4.45599811e-18 1.38691323e-02 -1.13541963e+01]]\n"
],
[
" a=np.array( [( 91 ,233),\n (419 ,227),\n (410, 324),\n ( 94, 349)], 'int32')\n \n#print (a.checkVector(2, CV_32S) )\n\ncv2.fillPoly(cr, a, (0,255, 0))\n ",
"_____no_output_____"
],
[
"a=np.int_(pt)\n",
"_____no_output_____"
],
[
"\n\n\nprint(str( ((0.111 * 100)//1)/100 ))\n",
"0.11\n"
],
[
"print(cr.shape)",
"(720, 1280, 3)\n"
],
[
"print() )",
"[-730.76923077 -526.15384615 -0.73076923]\n"
],
[
"res = np.matmul(M, [550, 480 ,1])\nprint(res/res[2])",
"[200. 350. 1.]\n"
],
[
"print (range(10))\n",
"range(0, 10)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f2bf5a7084623fbf4b5f270ca4a4b27641388e | 34,767 | ipynb | Jupyter Notebook | python-sdk/tutorials/automl-with-azureml/image-object-detection/auto-ml-image-object-detection.ipynb | brynn-code/azureml-examples | e265f568654eaa9452467770b59762fb76972a57 | [
"MIT"
] | null | null | null | python-sdk/tutorials/automl-with-azureml/image-object-detection/auto-ml-image-object-detection.ipynb | brynn-code/azureml-examples | e265f568654eaa9452467770b59762fb76972a57 | [
"MIT"
] | 1 | 2021-11-05T00:49:06.000Z | 2021-11-05T00:49:06.000Z | python-sdk/tutorials/automl-with-azureml/image-object-detection/auto-ml-image-object-detection.ipynb | mutazag/azureml-examples | dbc3e5d61cc61295436f23df52552add9d7d34ce | [
"MIT"
] | null | null | null | 42.347138 | 740 | 0.623148 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.\n\n# Training an Object Detection model using AutoML\nIn this notebook, we go over how you can use AutoML for training an Object Detection model. We will use a small dataset to train the model, demonstrate how you can tune hyperparameters of the model to optimize model performance and deploy the model to use in inference scenarios. For detailed information please refer to the [documentation of AutoML for Images](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models).",
"_____no_output_____"
],
[
"![img](example_object_detection_predictions.jpg)",
"_____no_output_____"
],
[
"**Important:** This feature is currently in public preview. This preview version is provided without a service-level agreement. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/en-us/support/legal/preview-supplemental-terms/).",
"_____no_output_____"
],
[
"## Environment Setup\nPlease follow the [\"Setup a new conda environment\"](https://github.com/Azure/azureml-examples/tree/main/python-sdk/tutorials/automl-with-azureml#3-setup-a-new-conda-environment) instructions to get started.",
"_____no_output_____"
],
[
"## Workspace setup\nIn order to train and deploy models in Azure ML, you will first need to set up a workspace.\n\nAn [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#workspace) is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.\n\nCreate an Azure ML Workspace within your Azure subscription or load an existing workspace.",
"_____no_output_____"
]
],
[
[
"from azureml.core.workspace import Workspace\n\nws = Workspace.from_config()",
"_____no_output_____"
]
],
[
[
"## Compute target setup\nYou will need to provide a [Compute Target](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#computes) that will be used for your AutoML model training. AutoML models for image tasks require [GPU SKUs](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) such as the ones from the NC, NCv2, NCv3, ND, NDv2 and NCasT4 series. We recommend using the NCsv3-series (with v100 GPUs) for faster training. Using a compute target with a multi-GPU VM SKU will leverage the multiple GPUs to speed up training. Additionally, setting up a compute target with multiple nodes will allow for faster model training by leveraging parallelism, when tuning hyperparameters for your model.",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import AmlCompute, ComputeTarget\n\ncluster_name = \"gpu-cluster-nc6\"\n\ntry:\n compute_target = ws.compute_targets[cluster_name]\n print(\"Found existing compute target.\")\nexcept KeyError:\n print(\"Creating a new compute target...\")\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=\"Standard_NC6\",\n idle_seconds_before_scaledown=600,\n min_nodes=0,\n max_nodes=4,\n )\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n# Can poll for a minimum number of nodes and for a specific timeout.\n# If no min_node_count is provided, it will use the scale settings for the cluster.\ncompute_target.wait_for_completion(\n show_output=True, min_node_count=None, timeout_in_minutes=20\n)",
"_____no_output_____"
]
],
[
[
"## Experiment Setup\nCreate an [Experiment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#experiments) in your workspace to track your model training runs",
"_____no_output_____"
]
],
[
[
"from azureml.core import Experiment\n\nexperiment_name = \"automl-image-object-detection\"\nexperiment = Experiment(ws, name=experiment_name)",
"_____no_output_____"
]
],
[
[
"## Dataset with input Training Data\n\nIn order to generate models for computer vision, you will need to bring in labeled image data as input for model training in the form of an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset). You can either use a dataset that you have exported from a [Data Labeling](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-label-data) project, or create a new Tabular Dataset with your labeled training data.",
"_____no_output_____"
],
[
"In this notebook, we use a toy dataset called Fridge Objects, which consists of 128 images of 4 classes of beverage container {can, carton, milk bottle, water bottle} photos taken on different backgrounds.\n\nAll images in this notebook are hosted in [this repository](https://github.com/microsoft/computervision-recipes) and are made available under the [MIT license](https://github.com/microsoft/computervision-recipes/blob/master/LICENSE).\n\nWe first download and unzip the data locally.",
"_____no_output_____"
]
],
[
[
"import os\nimport urllib\nfrom zipfile import ZipFile\n\n# download data\ndownload_url = \"https://cvbp-secondary.z19.web.core.windows.net/datasets/object_detection/odFridgeObjects.zip\"\ndata_file = \"./odFridgeObjects.zip\"\nurllib.request.urlretrieve(download_url, filename=data_file)\n\n# extract files\nwith ZipFile(data_file, \"r\") as zip:\n print(\"extracting files...\")\n zip.extractall()\n print(\"done\")\n# delete zip file\nos.remove(data_file)",
"_____no_output_____"
]
],
[
[
"This is a sample image from this dataset:",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\n\nImage(filename=\"./odFridgeObjects/images/31.jpg\")",
"_____no_output_____"
]
],
[
[
"### Convert the downloaded data to JSONL\nIn this example, the fridge object dataset is annotated in Pascal VOC format, where each image corresponds to an xml file. Each xml file contains information on where its corresponding image file is located and also contains information about the bounding boxes and the object labels. In order to use this data to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset), we first need to convert it to the required JSONL format. Please refer to the [documentation on how to prepare datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-prepare-datasets-for-automl-images).\n\nThe following script is creating two .jsonl files (one for training and one for validation) in the parent folder of the dataset. The train / validation ratio corresponds to 20% of the data going into the validation file.",
"_____no_output_____"
]
],
[
[
"import json\nimport os\nimport xml.etree.ElementTree as ET\n\nsrc = \"./odFridgeObjects/\"\ntrain_validation_ratio = 5\n\n# Retrieving default datastore that got automatically created when we setup a workspace\nworkspaceblobstore = ws.get_default_datastore().name\n\n# Path to the annotations\nannotations_folder = os.path.join(src, \"annotations\")\n\n# Path to the training and validation files\ntrain_annotations_file = os.path.join(src, \"train_annotations.jsonl\")\nvalidation_annotations_file = os.path.join(src, \"validation_annotations.jsonl\")\n\n# sample json line dictionary\njson_line_sample = {\n \"image_url\": \"AmlDatastore://\"\n + workspaceblobstore\n + \"/\"\n + os.path.basename(os.path.dirname(src))\n + \"/\"\n + \"images\",\n \"image_details\": {\"format\": None, \"width\": None, \"height\": None},\n \"label\": [],\n}\n\n# Read each annotation and convert it to jsonl line\nwith open(train_annotations_file, \"w\") as train_f:\n with open(validation_annotations_file, \"w\") as validation_f:\n for i, filename in enumerate(os.listdir(annotations_folder)):\n if filename.endswith(\".xml\"):\n print(\"Parsing \" + os.path.join(src, filename))\n\n root = ET.parse(os.path.join(annotations_folder, filename)).getroot()\n\n width = int(root.find(\"size/width\").text)\n height = int(root.find(\"size/height\").text)\n\n labels = []\n for object in root.findall(\"object\"):\n name = object.find(\"name\").text\n xmin = object.find(\"bndbox/xmin\").text\n ymin = object.find(\"bndbox/ymin\").text\n xmax = object.find(\"bndbox/xmax\").text\n ymax = object.find(\"bndbox/ymax\").text\n isCrowd = int(object.find(\"difficult\").text)\n labels.append(\n {\n \"label\": name,\n \"topX\": float(xmin) / width,\n \"topY\": float(ymin) / height,\n \"bottomX\": float(xmax) / width,\n \"bottomY\": float(ymax) / height,\n \"isCrowd\": isCrowd,\n }\n )\n # build the jsonl file\n image_filename = root.find(\"filename\").text\n _, file_extension = os.path.splitext(image_filename)\n json_line = dict(json_line_sample)\n json_line[\"image_url\"] = json_line[\"image_url\"] + \"/\" + image_filename\n json_line[\"image_details\"][\"format\"] = file_extension[1:]\n json_line[\"image_details\"][\"width\"] = width\n json_line[\"image_details\"][\"height\"] = height\n json_line[\"label\"] = labels\n\n if i % train_validation_ratio == 0:\n # validation annotation\n validation_f.write(json.dumps(json_line) + \"\\n\")\n else:\n # train annotation\n train_f.write(json.dumps(json_line) + \"\\n\")\n else:\n print(\"Skipping unknown file: {}\".format(filename))",
"_____no_output_____"
]
],
[
[
"### Convert annotation file from COCO to JSONL\nIf you want to try with a dataset in COCO format, the scripts below shows how to convert it to `jsonl` format. The file \"odFridgeObjects_coco.json\" consists of annotation information for the `odFridgeObjects` dataset.",
"_____no_output_____"
]
],
[
[
"# Generate jsonl file from coco file\n!python coco2jsonl.py \\\n--input_coco_file_path \"./odFridgeObjects_coco.json\" \\\n--output_dir \"./odFridgeObjects\" --output_file_name \"odFridgeObjects_from_coco.jsonl\" \\\n--task_type \"ObjectDetection\" \\\n--base_url \"AmlDatastore://workspaceblobstore/odFridgeObjects/images/\"",
"_____no_output_____"
]
],
[
[
"### Visualize bounding boxes\nPlease refer to the \"Visualize data\" section in the following [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-auto-train-image-models#visualize-data) to see how to easily visualize your ground truth bounding boxes before starting to train.",
"_____no_output_____"
],
[
"### Upload the JSONL file and images to Datastore\nIn order to use the data for training in Azure ML, we upload it to our Azure ML Workspace via a [Datastore](https://docs.microsoft.com/en-us/azure/machine-learning/concept-azure-machine-learning-architecture#datasets-and-datastores). The datastore provides a mechanism for you to upload/download data and interact with it from your remote compute targets. It is an abstraction over Azure Storage.",
"_____no_output_____"
]
],
[
[
"# Retrieving default datastore that got automatically created when we setup a workspace\nds = ws.get_default_datastore()\nds.upload(src_dir=\"./odFridgeObjects\", target_path=\"odFridgeObjects\")",
"_____no_output_____"
]
],
[
[
"Finally, we need to create an [AzureML Tabular Dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset) from the data we uploaded to the Datastore. We create one dataset for training and one for validation.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Dataset\nfrom azureml.data import DataType\n\n# get existing training dataset\ntraining_dataset_name = \"odFridgeObjectsTrainingDataset\"\nif training_dataset_name in ws.datasets:\n training_dataset = ws.datasets.get(training_dataset_name)\n print(\"Found the training dataset\", training_dataset_name)\nelse:\n # create training dataset\n training_dataset = Dataset.Tabular.from_json_lines_files(\n path=ds.path(\"odFridgeObjects/train_annotations.jsonl\"),\n set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n )\n training_dataset = training_dataset.register(\n workspace=ws, name=training_dataset_name\n )\n# get existing validation dataset\nvalidation_dataset_name = \"odFridgeObjectsValidationDataset\"\nif validation_dataset_name in ws.datasets:\n validation_dataset = ws.datasets.get(validation_dataset_name)\n print(\"Found the validation dataset\", validation_dataset_name)\nelse:\n # create validation dataset\n validation_dataset = Dataset.Tabular.from_json_lines_files(\n path=ds.path(\"odFridgeObjects/validation_annotations.jsonl\"),\n set_column_types={\"image_url\": DataType.to_stream(ds.workspace)},\n )\n validation_dataset = validation_dataset.register(\n workspace=ws, name=validation_dataset_name\n )\nprint(\"Training dataset name: \" + training_dataset.name)\nprint(\"Validation dataset name: \" + validation_dataset.name)",
"_____no_output_____"
]
],
[
[
"Validation dataset is optional. If no validation dataset is specified, by default 20% of your training data will be used for validation. You can control the percentage using the `split_ratio` argument - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#model-agnostic-hyperparameters) for more details.\n\nThis is what the training dataset looks like:",
"_____no_output_____"
]
],
[
[
"training_dataset.to_pandas_dataframe()",
"_____no_output_____"
]
],
[
[
"## Configuring your AutoML run for image tasks\nAutoML allows you to easily train models for Image Classification, Object Detection & Instance Segmentation on your image data. You can control the model algorithm to be used, specify hyperparameter values for your model as well as perform a sweep across the hyperparameter space to generate an optimal model. Parameters for configuring your AutoML Image run are specified using the `AutoMLImageConfig` - please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-your-experiment-settings) for the details on the parameters that can be used and their values.",
"_____no_output_____"
],
[
"When using AutoML for image tasks, you need to specify the model algorithms using the `model_name` parameter. You can either specify a single model or choose to sweep over multiple models. Please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#configure-model-algorithms-and-hyperparameters) for the list of supported model algorithms.",
"_____no_output_____"
],
[
"### Using default hyperparameter values for the specified algorithm\nBefore doing a large sweep to search for the optimal models and hyperparameters, we recommend trying the default values for a given model to get a first baseline. Next, you can explore multiple hyperparameters for the same model before sweeping over multiple models and their parameters. This allows an iterative approach, as with multiple models and multiple hyperparameters for each (as we showcase in the next section), the search space grows exponentially, and you need more iterations to find optimal configurations.\n\nIf you wish to use the default hyperparameter values for a given algorithm (say `yolov5`), you can specify the config for your AutoML Image runs as follows:",
"_____no_output_____"
]
],
[
[
"from azureml.automl.core.shared.constants import ImageTask\nfrom azureml.train.automl import AutoMLImageConfig\nfrom azureml.train.hyperdrive import GridParameterSampling, choice\n\nimage_config_yolov5 = AutoMLImageConfig(\n task=ImageTask.IMAGE_OBJECT_DETECTION,\n compute_target=compute_target,\n training_data=training_dataset,\n validation_data=validation_dataset,\n hyperparameter_sampling=GridParameterSampling({\"model_name\": choice(\"yolov5\")}),\n iterations=1,\n)",
"_____no_output_____"
]
],
[
[
"## Submitting an AutoML run for Computer Vision tasks\nOnce you've created the config settings for your run, you can submit an AutoML run using the config in order to train a vision model using your training dataset.",
"_____no_output_____"
]
],
[
[
"automl_image_run = experiment.submit(image_config_yolov5)",
"_____no_output_____"
],
[
"automl_image_run.wait_for_completion(wait_post_processing=True)",
"_____no_output_____"
]
],
[
[
"### Hyperparameter sweeping for your AutoML models for computer vision tasks\n\nIn this example, we use the AutoMLImageConfig to train an Object Detection model using `yolov5` and `fasterrcnn_resnet50_fpn`, both of which are pretrained on COCO, a large-scale object detection, segmentation, and captioning dataset that contains over 200K labeled images with over 80 label categories.\n\nWhen using AutoML for Images, you can perform a hyperparameter sweep over a defined parameter space to find the optimal model. In this example, we sweep over the hyperparameters for each algorithm, choosing from a range of values for `learning_rate`, `optimizer`, `lr_scheduler`, etc., to generate a model with the optimal primary metric. If hyperparameter values are not specified, then default values are used for the specified algorithm.\n\nWe use Random Sampling to pick samples from this parameter space and try a total of 10 iterations with these different samples, running 2 iterations at a time on our compute target, which has been previously set up using 4 nodes. Please note that the more parameters the space has, the more iterations you need to find optimal models.\n\nWe leverage the Bandit early termination policy which will terminate poor performing configs (those that are not within 20% slack of the best performing config), thus significantly saving compute resources.\n\nFor more details on model and hyperparameter sweeping, please refer to the [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters).",
"_____no_output_____"
]
],
[
[
"from azureml.automl.core.shared.constants import ImageTask\nfrom azureml.train.automl import AutoMLImageConfig\nfrom azureml.train.hyperdrive import BanditPolicy, RandomParameterSampling\nfrom azureml.train.hyperdrive import choice, uniform\n\nparameter_space = {\n \"model\": choice(\n {\n \"model_name\": choice(\"yolov5\"),\n \"learning_rate\": uniform(0.0001, 0.01),\n \"model_size\": choice(\"small\", \"medium\"), # model-specific\n #'img_size': choice(640, 704, 768), # model-specific; might need GPU with large memory\n },\n {\n \"model_name\": choice(\"fasterrcnn_resnet50_fpn\"),\n \"learning_rate\": uniform(0.0001, 0.001),\n \"optimizer\": choice(\"sgd\", \"adam\", \"adamw\"),\n \"min_size\": choice(600, 800), # model-specific\n #'warmup_cosine_lr_warmup_epochs': choice(0, 3),\n },\n ),\n}\n\ntuning_settings = {\n \"iterations\": 10,\n \"max_concurrent_iterations\": 2,\n \"hyperparameter_sampling\": RandomParameterSampling(parameter_space),\n \"early_termination_policy\": BanditPolicy(\n evaluation_interval=2, slack_factor=0.2, delay_evaluation=6\n ),\n}\n\nautoml_image_config = AutoMLImageConfig(\n task=ImageTask.IMAGE_OBJECT_DETECTION,\n compute_target=compute_target,\n training_data=training_dataset,\n validation_data=validation_dataset,\n **tuning_settings,\n)",
"_____no_output_____"
],
[
"automl_image_run = experiment.submit(automl_image_config)",
"_____no_output_____"
],
[
"automl_image_run.wait_for_completion(wait_post_processing=True)",
"_____no_output_____"
]
],
[
[
"When doing a hyperparameter sweep, it can be useful to visualize the different configurations that were tried using the HyperDrive UI. You can navigate to this UI by going to the 'Child runs' tab in the UI of the main `automl_image_run` from above, which is the HyperDrive parent run. Then you can go into the 'Child runs' tab of this HyperDrive parent run. Alternatively, here below you can see directly the HyperDrive parent run and navigate to its 'Child runs' tab:",
"_____no_output_____"
]
],
[
[
"from azureml.core import Run\n\nhyperdrive_run = Run(experiment=experiment, run_id=automl_image_run.id + \"_HD\")\nhyperdrive_run",
"_____no_output_____"
]
],
[
[
"## Register the optimal vision model from the AutoML run\nOnce the run completes, we can register the model that was created from the best run (configuration that resulted in the best primary metric)",
"_____no_output_____"
]
],
[
[
"# Register the model from the best run\n\nbest_child_run = automl_image_run.get_best_child()\nmodel_name = best_child_run.properties[\"model_name\"]\nmodel = best_child_run.register_model(\n model_name=model_name, model_path=\"outputs/model.pt\"\n)",
"_____no_output_____"
]
],
[
[
"## Deploy model as a web service\nOnce you have your trained model, you can deploy the model on Azure. You can deploy your trained model as a web service on Azure Container Instances ([ACI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-container-instance)) or Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service)). Please note that ACI only supports small models under 1 GB in size. For testing larger models or for the high-scale production stage, we recommend using AKS.\nIn this tutorial, we will deploy the model as a web service in AKS.",
"_____no_output_____"
],
[
"You will need to first create an AKS compute cluster or use an existing AKS cluster. You can use either GPU or CPU VM SKUs for your deployment cluster",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AksCompute\nfrom azureml.exceptions import ComputeTargetException\n\n# Choose a name for your cluster\naks_name = \"cluster-aks-cpu\"\n# Check to see if the cluster already exists\ntry:\n aks_target = ComputeTarget(workspace=ws, name=aks_name)\n print(\"Found existing compute target\")\nexcept ComputeTargetException:\n print(\"Creating a new compute target...\")\n # Provision AKS cluster with a CPU machine\n prov_config = AksCompute.provisioning_configuration(vm_size=\"STANDARD_D3_V2\")\n # Create the cluster\n aks_target = ComputeTarget.create(\n workspace=ws, name=aks_name, provisioning_configuration=prov_config\n )\n aks_target.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"Next, you will need to define the [inference configuration](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models#update-inference-configuration), that describes how to set up the web-service containing your model. You can use the scoring script and the environment from the training run in your inference config.\n\n<b>Note:</b> To change the model's settings, open the downloaded scoring script and modify the model_settings variable <i>before</i> deploying the model.",
"_____no_output_____"
]
],
[
[
"from azureml.core.model import InferenceConfig\n\nbest_child_run.download_file(\n \"outputs/scoring_file_v_1_0_0.py\", output_file_path=\"score.py\"\n)\nenvironment = best_child_run.get_environment()\ninference_config = InferenceConfig(entry_script=\"score.py\", environment=environment)",
"_____no_output_____"
]
],
[
[
"You can then deploy the model as an AKS web service.",
"_____no_output_____"
]
],
[
[
"# Deploy the model from the best run as an AKS web service\nfrom azureml.core.webservice import AksWebservice\nfrom azureml.core.model import Model\n\naks_config = AksWebservice.deploy_configuration(\n autoscale_enabled=True, cpu_cores=1, memory_gb=5, enable_app_insights=True\n)\n\naks_service = Model.deploy(\n ws,\n models=[model],\n inference_config=inference_config,\n deployment_config=aks_config,\n deployment_target=aks_target,\n name=\"automl-image-test-od\",\n overwrite=True,\n)\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)",
"_____no_output_____"
]
],
[
[
"## Test the web service\nFinally, let's test our deployed web service to predict new images. You can pass in any image. In this case, we'll use a random image from the dataset and pass it to the scoring URI.",
"_____no_output_____"
]
],
[
[
"import requests\n\n# URL for the web service\nscoring_uri = aks_service.scoring_uri\n\n# If the service is authenticated, set the key or token\nkey, _ = aks_service.get_keys()\n\nsample_image = \"./test_image.jpg\"\n\n# Load image data\ndata = open(sample_image, \"rb\").read()\n\n# Set the content type\nheaders = {\"Content-Type\": \"application/octet-stream\"}\n\n# If authentication is enabled, set the authorization header\nheaders[\"Authorization\"] = f\"Bearer {key}\"\n\n# Make the request and display the response\nresp = requests.post(scoring_uri, data, headers=headers)\nprint(resp.text)",
"_____no_output_____"
]
],
[
[
"## Visualize detections\nNow that we have scored a test image, we can visualize the bounding boxes for this image",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.patches as patches\nfrom PIL import Image\nimport numpy as np\nimport json\n\nIMAGE_SIZE = (18, 12)\nplt.figure(figsize=IMAGE_SIZE)\nimg_np = mpimg.imread(sample_image)\nimg = Image.fromarray(img_np.astype(\"uint8\"), \"RGB\")\nx, y = img.size\n\nfig, ax = plt.subplots(1, figsize=(15, 15))\n# Display the image\nax.imshow(img_np)\n\n# draw box and label for each detection\ndetections = json.loads(resp.text)\nfor detect in detections[\"boxes\"]:\n label = detect[\"label\"]\n box = detect[\"box\"]\n conf_score = detect[\"score\"]\n if conf_score > 0.6:\n ymin, xmin, ymax, xmax = (\n box[\"topY\"],\n box[\"topX\"],\n box[\"bottomY\"],\n box[\"bottomX\"],\n )\n topleft_x, topleft_y = x * xmin, y * ymin\n width, height = x * (xmax - xmin), y * (ymax - ymin)\n print(\n \"{}: [{}, {}, {}, {}], {}\".format(\n detect[\"label\"],\n round(topleft_x, 3),\n round(topleft_y, 3),\n round(width, 3),\n round(height, 3),\n round(conf_score, 3),\n )\n )\n\n color = np.random.rand(3) #'red'\n rect = patches.Rectangle(\n (topleft_x, topleft_y),\n width,\n height,\n linewidth=3,\n edgecolor=color,\n facecolor=\"none\",\n )\n\n ax.add_patch(rect)\n plt.text(topleft_x, topleft_y - 10, label, color=color, fontsize=20)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f2bf7415b4a14baa6bb50d4b6a5ef7dd976e5f | 6,157 | ipynb | Jupyter Notebook | content/blog/bigdata/sql/SQL_8.ipynb | psyssai/PSYda-gatsby | 75724cbaa030a014825f6e937f17c0c51fe3b04a | [
"MIT"
] | null | null | null | content/blog/bigdata/sql/SQL_8.ipynb | psyssai/PSYda-gatsby | 75724cbaa030a014825f6e937f17c0c51fe3b04a | [
"MIT"
] | null | null | null | content/blog/bigdata/sql/SQL_8.ipynb | psyssai/PSYda-gatsby | 75724cbaa030a014825f6e937f17c0c51fe3b04a | [
"MIT"
] | null | null | null | 28.637209 | 110 | 0.357317 | [
[
[
"import psycopg2\nimport pandas as pd\nfrom pandas import Series, DataFrame",
"_____no_output_____"
],
[
"connection = psycopg2.connect(database=\"01_dvdrental\", user=\"postgres\", password=\"qkrtkddus!1\")\ncursor = connection.cursor()",
"_____no_output_____"
],
[
"pd.read_sql(\"SELECT * FROM CATEGORY_IMPORT\", connection)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7f2ce2a51f6f0038d3d8a482546d4a42c0f82d3 | 4,853 | ipynb | Jupyter Notebook | adam_asmaca.ipynb | canselkundukan/bby162 | 796b10f33dfb49f4610d99d518c4411ca3ae4243 | [
"MIT"
] | null | null | null | adam_asmaca.ipynb | canselkundukan/bby162 | 796b10f33dfb49f4610d99d518c4411ca3ae4243 | [
"MIT"
] | null | null | null | adam_asmaca.ipynb | canselkundukan/bby162 | 796b10f33dfb49f4610d99d518c4411ca3ae4243 | [
"MIT"
] | 1 | 2019-04-24T13:34:58.000Z | 2019-04-24T13:34:58.000Z | 36.765152 | 231 | 0.443437 | [
[
[
"<a href=\"https://colab.research.google.com/github/canselkundukan/bby162/blob/master/adam_asmaca.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"\n__author__=\"CANSEL KUNDUKAN\"\nprint(\"ADAM ASMACA OYUNUNA HOŞGELDİNİZ...\")\nprint(\"ip ucu=Oyunumuz da ülke isimlerini bulmaya çalışıyoruz\")\nfrom random import choice\nwhile True:\n kelime = choice ([\"ispanya\", \"almanya\",\"japonya\",\"ingiltere\",\"brezilya\",\"mısır\",\"macaristan\",\"hindistan\"])\n kelime = kelime.upper()\n harfsayisi = len(kelime)\n print(\"Kelimemiz {} harflidir.\\n\".format(harfsayisi))\n tahminler = []\n hata = []\n KalanCan = 3\n while KalanCan > 0:\n bos = \"\"\n for girilenharf in kelime:\n if girilenharf in tahminler:\n bos = bos + girilenharf\n else:\n bos = bos + \" _ \"\n if bos == kelime:\n print(\"Tebrikler!\")\n break\n print(\"Kelimeyi Tahmin Ediniz\", bos)\n print(KalanCan, \"Canınız Kaldı\")\n Tahmin = input(\"Bir Harf Giriniz :\")\n Tahmin = Tahmin.upper()\n if Tahmin == kelime:\n print(\"\\n\\n Tebrikler\\n\\n\")\n break\n elif Tahmin in kelime:\n rpt = kelime.count(Tahmin)\n print(\"Dogru.{0} Harfi Kelimemiz İçerisinde {1} Kere Geçiyor\".format(Tahmin, rpt))\n tahminler.append(Tahmin)\n else:\n print(\"Yanlış.\")\n hata.append(Tahmin)\n KalanCan = KalanCan - 1\n if KalanCan == 0:\n print(\"\\n\\nHiç Hakkınız Kalmadı.\")\n print(\"Kelimemiz {}\\n\\n\".format(kelime))\n print(\"Oyundan Çıkmak İstiyorsanız\\n'X' Tuşuna Basınız\\nDevam Etmek İçin -> ENTER. \")\n devam = input(\":\")\n devam = devam.upper()\n if devam == \"X\":\n break\n else:\n continue",
"ADAM ASMACA OYUNUNA HOŞGELDİNİZ...\nip ucu=Oyunumuz da ülke isimlerini bulmaya çalışıyoruz\nKelimemiz 9 harflidir.\n\nKelimeyi Tahmin Ediniz _ _ _ _ _ _ _ _ _ \n3 Canınız Kaldı\nBir Harf Giriniz :b\nYanlış.\nKelimeyi Tahmin Ediniz _ _ _ _ _ _ _ _ _ \n2 Canınız Kaldı\nBir Harf Giriniz :a\nDogru.A Harfi Kelimemiz İçerisinde 1 Kere Geçiyor\nKelimeyi Tahmin Ediniz _ _ _ _ _ _ _ A _ \n2 Canınız Kaldı\nBir Harf Giriniz :i\nDogru.I Harfi Kelimemiz İçerisinde 2 Kere Geçiyor\nKelimeyi Tahmin Ediniz _ I _ _ I _ _ A _ \n2 Canınız Kaldı\nBir Harf Giriniz :m\nYanlış.\nKelimeyi Tahmin Ediniz _ I _ _ I _ _ A _ \n1 Canınız Kaldı\nBir Harf Giriniz :b\nYanlış.\n\n\nHiç Hakkınız Kalmadı.\nKelimemiz HINDISTAN\n\n\nOyundan Çıkmak İstiyorsanız\n'X' Tuşuna Basınız\nDevam Etmek İçin -> ENTER. \n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e7f2db342142c900684518e82dcfa079618e6021 | 404,870 | ipynb | Jupyter Notebook | truncated_regression_MWE.ipynb | drbenvincent/pymc3-demo-code | d9dc6b429382c7273c580a64cea78261df5873ef | [
"MIT"
] | 3 | 2020-03-03T18:54:05.000Z | 2020-10-12T23:44:17.000Z | truncated_regression_MWE.ipynb | drbenvincent/pymc3-demo-code | d9dc6b429382c7273c580a64cea78261df5873ef | [
"MIT"
] | null | null | null | truncated_regression_MWE.ipynb | drbenvincent/pymc3-demo-code | d9dc6b429382c7273c580a64cea78261df5873ef | [
"MIT"
] | 1 | 2020-08-14T13:11:55.000Z | 2020-08-14T13:11:55.000Z | 402.855721 | 96,516 | 0.905713 | [
[
[
"# Truncated regression: minimum working example",
"_____no_output_____"
]
],
[
[
"import numpy as np\n%config InlineBackend.figure_format = 'retina'\nimport matplotlib.pyplot as plt\nimport pymc3 as pm\nimport arviz as az",
"_____no_output_____"
],
[
"def pp_plot(x, y, trace):\n fig, ax = plt.subplots()\n # plot data\n ax.scatter(x, y)\n # plot posterior predicted... samples from posterior\n xi = np.array([np.min(x), np.max(x)])\n n_samples=100\n for n in range(n_samples):\n y_ppc = xi * trace[\"m\"][n] + trace[\"c\"][n]\n ax.plot(xi, y_ppc, \"k\", alpha=0.1, rasterized=True)\n # plot true\n ax.plot(xi, m * xi + c, \"r\", lw=3, label=\"True\")\n # plot bounds\n ax.axhline(bounds[0], c='r', ls='--')\n ax.axhline(bounds[1], c='r', ls='--')",
"_____no_output_____"
],
[
"def truncate_y(x, y, bounds):\n keep = (y >= bounds[0]) & (y <= bounds[1])\n return (x[keep], y[keep])",
"_____no_output_____"
],
[
"m, c, σ, N = 1, 0, 2, 200\nx = np.random.uniform(-10, 10, N)\ny = np.random.normal(m * x + c, σ)\nbounds = [-5, 5]",
"_____no_output_____"
],
[
"xt, yt = truncate_y(x, y, bounds)",
"_____no_output_____"
],
[
"plt.scatter(xt, yt)",
"_____no_output_____"
]
],
[
[
"## Linear regression of truncated data underestimates the slope",
"_____no_output_____"
]
],
[
[
"def linear_regression(x, y):\n\n with pm.Model() as model:\n m = pm.Normal(\"m\", mu=0, sd=1)\n c = pm.Normal(\"c\", mu=0, sd=1)\n σ = pm.HalfNormal(\"σ\", sd=1)\n y_likelihood = pm.Normal(\"y_likelihood\", mu=m*x+c, sd=σ, observed=y)\n\n with model:\n trace = pm.sample()\n\n return model, trace\n\n# run the model on the truncated data (xt, yt)\nlinear_model, linear_trace = linear_regression(xt, yt)",
"/Users/benjamv/opt/anaconda3/lib/python3.8/site-packages/pymc3/sampling.py:465: FutureWarning: In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.\n warnings.warn(\nAuto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [σ, c, m]\n"
],
[
"az.plot_posterior(linear_trace, var_names=['m'], ref_val=m)",
"/Users/benjamv/opt/anaconda3/lib/python3.8/site-packages/arviz/data/io_pymc3.py:88: FutureWarning: Using `from_pymc3` without the model will be deprecated in a future release. Not using the model will return less accurate and less useful results. Make sure you use the model argument or call from_pymc3 within a model context.\n warnings.warn(\n"
],
[
"pp_plot(xt, yt, linear_trace)",
"_____no_output_____"
]
],
[
[
"## Truncated regression avoids this underestimate",
"_____no_output_____"
]
],
[
[
"def truncated_regression(x, y, bounds):\n\n with pm.Model() as model:\n m = pm.Normal(\"m\", mu=0, sd=1)\n c = pm.Normal(\"c\", mu=0, sd=1)\n σ = pm.HalfNormal(\"σ\", sd=1)\n\n y_likelihood = pm.TruncatedNormal(\n \"y_likelihood\",\n mu=m * x + c,\n sd=σ,\n observed=y,\n lower=bounds[0],\n upper=bounds[1],\n )\n \n with model:\n trace = pm.sample()\n\n return model, trace\n\n\n# run the model on the truncated data (xt, yt)\ntruncated_model, truncated_trace = truncated_regression(xt, yt, bounds)",
"WARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\n/Users/benjamv/opt/anaconda3/lib/python3.8/site-packages/pymc3/sampling.py:465: FutureWarning: In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.\n warnings.warn(\nAuto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [σ, c, m]\n"
],
[
"az.plot_posterior(truncated_trace, var_names=['m'], ref_val=m)",
"/Users/benjamv/opt/anaconda3/lib/python3.8/site-packages/arviz/data/io_pymc3.py:88: FutureWarning: Using `from_pymc3` without the model will be deprecated in a future release. Not using the model will return less accurate and less useful results. Make sure you use the model argument or call from_pymc3 within a model context.\n warnings.warn(\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\nWARNING (theano.tensor.opt): The Op erfcx does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.\n"
],
[
"pp_plot(xt, yt, truncated_trace)",
"_____no_output_____"
],
[
"%load_ext watermark\n%watermark -n -u -v -iv -w",
"Last updated: Sun Jan 24 2021\n\nPython implementation: CPython\nPython version : 3.8.5\nIPython version : 7.19.0\n\narviz : 0.11.0\npymc3 : 3.10.0\nnumpy : 1.19.2\nmatplotlib: 3.3.2\n\nWatermark: 2.1.0\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f2f10ecd23f03f393e912b1cb9a8274617eb61 | 19,768 | ipynb | Jupyter Notebook | LeNet-5.ipynb | maxmax1992/Deep_Learning | ed12fb1aadd2fc122758d1b73a458201a3de2935 | [
"MIT"
] | null | null | null | LeNet-5.ipynb | maxmax1992/Deep_Learning | ed12fb1aadd2fc122758d1b73a458201a3de2935 | [
"MIT"
] | null | null | null | LeNet-5.ipynb | maxmax1992/Deep_Learning | ed12fb1aadd2fc122758d1b73a458201a3de2935 | [
"MIT"
] | null | null | null | 52.574468 | 130 | 0.465348 | [
[
[
"from __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K",
"_____no_output_____"
]
],
[
[
"get the minst dataset",
"_____no_output_____"
]
],
[
[
"\nbatch_size = 128\nnum_classes = 10\nepochs = 100\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(6, (5, 5), activation='relu', input_shape = input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(16, (5, 5), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(120, activation='relu'))\nmodel.add(Dense(84, activation='relu'))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])",
"x_train shape: (60000, 28, 28, 1)\n60000 train samples\n10000 test samples\n"
]
],
[
[
"Visualize the model",
"_____no_output_____"
]
],
[
[
"from IPython.display import SVG\nfrom keras.utils.vis_utils import plot_model\nplot_model(model, show_shapes=True, show_layer_names=True)",
"_____no_output_____"
]
],
[
[
"![title](./model.png)",
"_____no_output_____"
],
[
"Train the model",
"_____no_output_____"
]
],
[
[
"model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/100\n60000/60000 [==============================] - 2s - loss: 0.3232 - acc: 0.9029 - val_loss: 0.1030 - val_acc: 0.9701\nEpoch 2/100\n60000/60000 [==============================] - 1s - loss: 0.0855 - acc: 0.9744 - val_loss: 0.0740 - val_acc: 0.9774\nEpoch 3/100\n60000/60000 [==============================] - 2s - loss: 0.0620 - acc: 0.9802 - val_loss: 0.0505 - val_acc: 0.9835\nEpoch 4/100\n60000/60000 [==============================] - 2s - loss: 0.0477 - acc: 0.9847 - val_loss: 0.0426 - val_acc: 0.9853\nEpoch 5/100\n60000/60000 [==============================] - 1s - loss: 0.0397 - acc: 0.9878 - val_loss: 0.0396 - val_acc: 0.9864\nEpoch 6/100\n60000/60000 [==============================] - 2s - loss: 0.0362 - acc: 0.9884 - val_loss: 0.0385 - val_acc: 0.9876\nEpoch 7/100\n60000/60000 [==============================] - 2s - loss: 0.0284 - acc: 0.9909 - val_loss: 0.0376 - val_acc: 0.9879\nEpoch 8/100\n60000/60000 [==============================] - 2s - loss: 0.0269 - acc: 0.9912 - val_loss: 0.0330 - val_acc: 0.9894\nEpoch 9/100\n60000/60000 [==============================] - 2s - loss: 0.0240 - acc: 0.9921 - val_loss: 0.0315 - val_acc: 0.9900\nEpoch 10/100\n60000/60000 [==============================] - 2s - loss: 0.0197 - acc: 0.9935 - val_loss: 0.0352 - val_acc: 0.9883\nEpoch 11/100\n60000/60000 [==============================] - 2s - loss: 0.0174 - acc: 0.9941 - val_loss: 0.0337 - val_acc: 0.9895\nEpoch 12/100\n60000/60000 [==============================] - 2s - loss: 0.0159 - acc: 0.9947 - val_loss: 0.0352 - val_acc: 0.9894\nEpoch 13/100\n60000/60000 [==============================] - 2s - loss: 0.0139 - acc: 0.9953 - val_loss: 0.0368 - val_acc: 0.9896\nEpoch 14/100\n60000/60000 [==============================] - 1s - loss: 0.0140 - acc: 0.9954 - val_loss: 0.0314 - val_acc: 0.9909\nEpoch 15/100\n60000/60000 [==============================] - 2s - loss: 0.0117 - acc: 0.9961 - val_loss: 0.0393 - val_acc: 0.9881\nEpoch 16/100\n60000/60000 [==============================] - 2s - loss: 0.0108 - acc: 0.9963 - val_loss: 0.0395 - val_acc: 0.9894\nEpoch 17/100\n60000/60000 [==============================] - 2s - loss: 0.0098 - acc: 0.9965 - val_loss: 0.0418 - val_acc: 0.9897\nEpoch 18/100\n60000/60000 [==============================] - 2s - loss: 0.0105 - acc: 0.9965 - val_loss: 0.0430 - val_acc: 0.9881\nEpoch 19/100\n60000/60000 [==============================] - 1s - loss: 0.0076 - acc: 0.9974 - val_loss: 0.0401 - val_acc: 0.9897\nEpoch 20/100\n60000/60000 [==============================] - 1s - loss: 0.0071 - acc: 0.9975 - val_loss: 0.0427 - val_acc: 0.9890\nEpoch 21/100\n60000/60000 [==============================] - 1s - loss: 0.0088 - acc: 0.9972 - val_loss: 0.0362 - val_acc: 0.9904\nEpoch 22/100\n60000/60000 [==============================] - 1s - loss: 0.0073 - acc: 0.9977 - val_loss: 0.0449 - val_acc: 0.9886\nEpoch 23/100\n60000/60000 [==============================] - 1s - loss: 0.0082 - acc: 0.9972 - val_loss: 0.0437 - val_acc: 0.9891\nEpoch 24/100\n60000/60000 [==============================] - 1s - loss: 0.0049 - acc: 0.9983 - val_loss: 0.0361 - val_acc: 0.9908\nEpoch 25/100\n60000/60000 [==============================] - 1s - loss: 0.0050 - acc: 0.9982 - val_loss: 0.0376 - val_acc: 0.9905\nEpoch 26/100\n60000/60000 [==============================] - 2s - loss: 0.0090 - acc: 0.9969 - val_loss: 0.0546 - val_acc: 0.9871\nEpoch 27/100\n60000/60000 [==============================] - 2s - loss: 0.0047 - acc: 0.9983 - val_loss: 0.0450 - val_acc: 0.9904\nEpoch 28/100\n60000/60000 [==============================] - 1s - loss: 0.0055 - acc: 0.9980 - val_loss: 0.0429 - val_acc: 0.9886\nEpoch 29/100\n60000/60000 [==============================] - 1s - loss: 0.0039 - acc: 0.9989 - val_loss: 0.0528 - val_acc: 0.9877\nEpoch 30/100\n60000/60000 [==============================] - 2s - loss: 0.0056 - acc: 0.9980 - val_loss: 0.0477 - val_acc: 0.9891\nEpoch 31/100\n60000/60000 [==============================] - 1s - loss: 0.0044 - acc: 0.9984 - val_loss: 0.0498 - val_acc: 0.9888\nEpoch 32/100\n60000/60000 [==============================] - 1s - loss: 0.0044 - acc: 0.9985 - val_loss: 0.0501 - val_acc: 0.9897\nEpoch 33/100\n60000/60000 [==============================] - 1s - loss: 0.0043 - acc: 0.9984 - val_loss: 0.0493 - val_acc: 0.9895\nEpoch 34/100\n60000/60000 [==============================] - 1s - loss: 0.0029 - acc: 0.9991 - val_loss: 0.0530 - val_acc: 0.9896\nEpoch 35/100\n60000/60000 [==============================] - 1s - loss: 0.0053 - acc: 0.9984 - val_loss: 0.0445 - val_acc: 0.9908\nEpoch 36/100\n60000/60000 [==============================] - 1s - loss: 0.0054 - acc: 0.9983 - val_loss: 0.0502 - val_acc: 0.9902\nEpoch 37/100\n60000/60000 [==============================] - 1s - loss: 0.0049 - acc: 0.9984 - val_loss: 0.0449 - val_acc: 0.9907\nEpoch 38/100\n60000/60000 [==============================] - 1s - loss: 0.0048 - acc: 0.9986 - val_loss: 0.0483 - val_acc: 0.9900\nEpoch 39/100\n60000/60000 [==============================] - 1s - loss: 0.0021 - acc: 0.9994 - val_loss: 0.0576 - val_acc: 0.9892\nEpoch 40/100\n60000/60000 [==============================] - 2s - loss: 0.0025 - acc: 0.9992 - val_loss: 0.0535 - val_acc: 0.9900\nEpoch 41/100\n60000/60000 [==============================] - 1s - loss: 0.0060 - acc: 0.9982 - val_loss: 0.0673 - val_acc: 0.9869\nEpoch 42/100\n60000/60000 [==============================] - 2s - loss: 0.0040 - acc: 0.9987 - val_loss: 0.0417 - val_acc: 0.9912\nEpoch 43/100\n60000/60000 [==============================] - 1s - loss: 0.0026 - acc: 0.9991 - val_loss: 0.0498 - val_acc: 0.9902\nEpoch 44/100\n60000/60000 [==============================] - 2s - loss: 0.0022 - acc: 0.9993 - val_loss: 0.0545 - val_acc: 0.9899\nEpoch 45/100\n60000/60000 [==============================] - 2s - loss: 0.0057 - acc: 0.9982 - val_loss: 0.0477 - val_acc: 0.9906\nEpoch 46/100\n60000/60000 [==============================] - 2s - loss: 0.0023 - acc: 0.9991 - val_loss: 0.0565 - val_acc: 0.9900\nEpoch 47/100\n60000/60000 [==============================] - 2s - loss: 0.0039 - acc: 0.9987 - val_loss: 0.0538 - val_acc: 0.9907\nEpoch 48/100\n60000/60000 [==============================] - 1s - loss: 0.0012 - acc: 0.9996 - val_loss: 0.0528 - val_acc: 0.9901\nEpoch 49/100\n60000/60000 [==============================] - 1s - loss: 0.0066 - acc: 0.9981 - val_loss: 0.0478 - val_acc: 0.9909\nEpoch 50/100\n60000/60000 [==============================] - 1s - loss: 0.0011 - acc: 0.9996 - val_loss: 0.0493 - val_acc: 0.9913\nEpoch 51/100\n60000/60000 [==============================] - 2s - loss: 0.0011 - acc: 0.9997 - val_loss: 0.0486 - val_acc: 0.9907\nEpoch 52/100\n60000/60000 [==============================] - 2s - loss: 0.0061 - acc: 0.9981 - val_loss: 0.0626 - val_acc: 0.9892\nEpoch 53/100\n60000/60000 [==============================] - 1s - loss: 0.0043 - acc: 0.9988 - val_loss: 0.0609 - val_acc: 0.9886\nEpoch 54/100\n60000/60000 [==============================] - 2s - loss: 0.0024 - acc: 0.9992 - val_loss: 0.0521 - val_acc: 0.9908\nEpoch 55/100\n60000/60000 [==============================] - 2s - loss: 0.0020 - acc: 0.9994 - val_loss: 0.0532 - val_acc: 0.9915\nEpoch 56/100\n60000/60000 [==============================] - 2s - loss: 0.0025 - acc: 0.9993 - val_loss: 0.0577 - val_acc: 0.9893\nEpoch 57/100\n60000/60000 [==============================] - 2s - loss: 0.0047 - acc: 0.9985 - val_loss: 0.0550 - val_acc: 0.9896\nEpoch 58/100\n60000/60000 [==============================] - 1s - loss: 0.0026 - acc: 0.9993 - val_loss: 0.0436 - val_acc: 0.9912\nEpoch 59/100\n60000/60000 [==============================] - 2s - loss: 5.6958e-04 - acc: 0.9998 - val_loss: 0.0433 - val_acc: 0.9922\nEpoch 60/100\n60000/60000 [==============================] - 2s - loss: 4.2636e-04 - acc: 0.9999 - val_loss: 0.0440 - val_acc: 0.9922\nEpoch 61/100\n60000/60000 [==============================] - 1s - loss: 4.6596e-05 - acc: 1.0000 - val_loss: 0.0429 - val_acc: 0.9933\nEpoch 62/100\n60000/60000 [==============================] - 1s - loss: 1.4470e-05 - acc: 1.0000 - val_loss: 0.0430 - val_acc: 0.9934\nEpoch 63/100\n60000/60000 [==============================] - 1s - loss: 1.0095e-05 - acc: 1.0000 - val_loss: 0.0432 - val_acc: 0.9933\nEpoch 64/100\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7f2fcfb6657803f1037a9802a41a7b5942cceef | 72,038 | ipynb | Jupyter Notebook | code/12_CheckBestTargetSet.ipynb | menchelab/Perturbome | c93aeb2d42a1900f5060322732dd97f8eb8db7bd | [
"MIT"
] | 5 | 2019-11-15T19:58:31.000Z | 2021-12-08T19:30:10.000Z | code/12_CheckBestTargetSet.ipynb | mcaldera/Perturbome | 82c752f90f7100865c09cfea0f1fe96deffe2ed9 | [
"MIT"
] | 1 | 2020-01-06T21:23:57.000Z | 2020-01-07T14:06:21.000Z | code/12_CheckBestTargetSet.ipynb | mcaldera/Perturbome | 82c752f90f7100865c09cfea0f1fe96deffe2ed9 | [
"MIT"
] | 4 | 2019-11-26T07:34:49.000Z | 2022-02-22T06:41:43.000Z | 24.586348 | 298 | 0.466518 | [
[
[
"import networkx as nx\nimport numpy as np\nfrom matplotlib import pylab as plt\nimport os\nimport seaborn as sns\nfrom scipy.stats import mannwhitneyu as mu",
"_____no_output_____"
]
],
[
[
"### Load PPI and Targets",
"_____no_output_____"
]
],
[
[
"PPI = nx.read_gml('../data/CheckBestTargetSet/Human_Interactome.gml')",
"_____no_output_____"
]
],
[
[
"Load all the different drug targets from the various sources",
"_____no_output_____"
]
],
[
[
"#Dictionary with the CLOUD : targets\ntargets_DrugBank = {}\ntargets_DrugBank_Filtered = {}\ntargets_Pubchem = {}\ntargets_Pubchem_Filtered = {}\ntargets_Chembl = {}\ntargets_Chembl_Filtered = {}\ntargets_All_Filtered = {}\ntargets_All = {}\n\n#Get all extracted targets (with the DrugBank target split)\ntargets_only = set()\nfp = open('../data/CheckBestTargetSet/TargetSets/CLOUD_to_TargetsSplit.csv')\nfp.next()\nfor line in fp:\n tmp = line.strip().split(',')\n targets_All_Filtered[tmp[0]] = [x for x in tmp[1].split(';') if x != '']\n \n targets_only.update([x for x in tmp[1].split(';') if x != ''])\n \n targets_All[tmp[0]] = [x for x in tmp[1].split(';') if x != '']\n targets_All[tmp[0]].extend([x for x in tmp[2].split(';') if x != ''])\n targets_All[tmp[0]].extend([x for x in tmp[3].split(';') if x != ''])\n targets_All[tmp[0]].extend([x for x in tmp[4].split(';') if x != ''])\nfp.close()\n \n#\n# DRUGBANK\n#\nfp = open('../data/CheckBestTargetSet/TargetSets/CLOUD_DrugBank_Targets.csv')\nfp.next()\nfor line in fp:\n tmp = line.strip().split(',')\n targets_DrugBank[tmp[0]] = [x for x in tmp[2].split(';') if x != '']\n targets_DrugBank_Filtered[tmp[0]] = [x for x in tmp[2].split(';') if x != '' and x in targets_All_Filtered[tmp[0]]]\nfp.close()\n\n\n#\n# PUBCHEM\n#\nfp = open('../data/CheckBestTargetSet/TargetSets/CLOUD_PubChem_Targets.csv')\nfp.next()\nfor line in fp:\n tmp = line.strip().split(',')\n targets_Pubchem[tmp[0]] = [x for x in tmp[2].split(';') if x != '']\n targets_Pubchem_Filtered[tmp[0]] = [x for x in tmp[2].split(';') if x != '' and x in targets_All_Filtered[tmp[0]]]\nfp.close()\n\n#\n# CHEMBL\n#\nfp = open('../data/CheckBestTargetSet/TargetSets/CLOUD_ChEMBL_Targets.csv')\nfp.next()\nfor line in fp:\n tmp = line.strip().split(',')\n targets_Chembl[tmp[0]] =[x for x in tmp[2].split(';') if x != '']\n targets_Chembl_Filtered[tmp[0]] = [x for x in tmp[2].split(';') if x != '' and x in targets_All_Filtered[tmp[0]]]\nfp.close() \n\n \n#Make a list with all clouds\nall_Clouds = targets_All.keys()\nall_Clouds.sort()",
"_____no_output_____"
]
],
[
[
"### Calculate the various distance measurements",
"_____no_output_____"
]
],
[
[
"saved_distances = {}\n\ndef Check_Drug_Module_Diameter(PPI,targets):\n '''\n Extract the min path between targets (=Diameter)\n This is always the minimum path between one target and any other target of the same set.\n Returns Mean of all paths (d_d) as well as paths (min_paths)\n \n This function uses only one set hence calulcates the intra drug distance or drug_module diamter\n \n '''\n filtered_targets = []\n for t in targets:\n if PPI.has_node(t):\n filtered_targets.append(t)\n\n min_paths = []\n if len(filtered_targets) > 1:\n try:\n for t1 in filtered_targets:\n min_distances = []\n for t2 in filtered_targets:\n if t1 != t2:\n #print nx.shortest_path(PPI,t1,t2)\n if saved_distances.has_key(t1+','+t2):\n min_distances.append(saved_distances[t1+','+t2])\n elif saved_distances.has_key(t2+','+t1):\n min_distances.append(saved_distances[t2+','+t1])\n elif nx.has_path(PPI,t1,t2):\n dist_path_length = len(nx.shortest_path(PPI,t1,t2))-1\n min_distances.append(dist_path_length)\n saved_distances[t1+','+t2] = dist_path_length\n \n min_paths.append(min(min_distances))\n d_d = sum(min_paths)/float(len(filtered_targets))\n\n return d_d\n except:\n return \"None\"\n else:\n return 0",
"_____no_output_____"
],
[
"def Check_Shortest_DistancesBetween(PPI, targets1, targets2):\n '''\n Extract the min path between targets.\n This is always the minimum path between one target and any other target of the other set.\n Returns Mean of all paths (d_d) as well as paths (min_paths)\n \n This function uses two sets hence calulcates the inter drug distance\n \n '''\n filtered_targets = []\n for t in targets1:\n if PPI.has_node(t):\n filtered_targets.append(t)\n\n filtered_targets2 = []\n for t in targets2:\n if PPI.has_node(t):\n filtered_targets2.append(t)\n\n min_paths = []\n if len(filtered_targets) >= 1 and len(filtered_targets2) >= 1:\n try:\n for t1 in filtered_targets:\n min_distances = []\n for t2 in filtered_targets2:\n # print nx.shortest_path(PPI,t1,t2)\n if saved_distances.has_key(t1+','+t2):\n min_distances.append(saved_distances[t1+','+t2])\n elif saved_distances.has_key(t2+','+t1):\n min_distances.append(saved_distances[t2+','+t1])\n elif nx.has_path(PPI,t1,t2):\n dist_path_length = len(nx.shortest_path(PPI,t1,t2))-1\n min_distances.append(dist_path_length)\n saved_distances[t1+','+t2] = dist_path_length \n if len(min_distances) != 0:\n min_paths.append(min(min_distances))\n return min_paths\n except:\n return 'None'\n else:\n return 'None'",
"_____no_output_____"
],
[
"def calculate_ClosestDistance(PPI,targets1, targets2 ):\n '''\n Add information here\n '''\n filtered_targets = []\n for t in targets1:\n if PPI.has_node(t):\n filtered_targets.append(t)\n\n filtered_targets2 = []\n for t in targets2:\n if PPI.has_node(t):\n filtered_targets2.append(t)\n \n \n distances = []\n if len(filtered_targets) > 0 and len(filtered_targets2) > 0:\n for t1 in filtered_targets:\n tmp = []\n for t2 in filtered_targets2:\n \n if saved_distances.has_key(t1+','+t2):\n tmp.append(saved_distances[t1+','+t2])\n elif saved_distances.has_key(t2+','+t1):\n tmp.append(saved_distances[t2+','+t1])\n elif nx.has_path(PPI,t1,t2):\n dist_path_length = len((nx.shortest_path(PPI, source=t1, target=t2))) - 1\n tmp.append(dist_path_length)\n saved_distances[t1+','+t2] = dist_path_length\n if len(tmp) != 0:\n distances.append(min(tmp))\n\n if len(distances) == 0:\n result = 'None'\n else:\n result = np.mean(distances)\n \n return result",
"_____no_output_____"
],
[
"def calculate_MeanDistance(PPI,targets1, targets2 ):\n '''\n Add information here\n '''\n filtered_targets = []\n for t in targets1:\n if PPI.has_node(t):\n filtered_targets.append(t)\n\n filtered_targets2 = []\n for t in targets2:\n if PPI.has_node(t):\n filtered_targets2.append(t)\n \n\n\n\n distances = []\n for t1 in filtered_targets:\n for t2 in filtered_targets2:\n \n \n if saved_distances.has_key(t1+','+t2):\n distances.append(saved_distances[t1+','+t2])\n elif saved_distances.has_key(t2+','+t1):\n distances.append(saved_distances[t2+','+t1])\n elif nx.has_path(PPI,t1,t2):\n dist_path_length = len((nx.shortest_path(PPI, source=t1, target=t2))) - 1\n distances.append(dist_path_length)\n saved_distances[t1+','+t2] = dist_path_length\n if len(distances) > 0:\n result = np.mean(distances)\n else:\n result = 'None'\n \n return result\n",
"_____no_output_____"
]
],
[
[
"# Calculate All Distances",
"_____no_output_____"
]
],
[
[
"dic_target_sets = {'DrugBank':targets_DrugBank, 'PubChem':targets_Pubchem, 'Chembl':targets_Chembl,'DrugBank_Filtered':targets_DrugBank_Filtered, 'PubChem_Filtered':targets_Pubchem_Filtered, 'Chembl_Filtered':targets_Chembl_Filtered, 'All_Filtered':targets_All_Filtered, 'All':targets_All}\n\nfor key in dic_target_sets:\n print key\n \n \n #Open corresponding result file\n fp_out = open('../results/CheckBestTargetSet/'+key+'.csv','w')\n fp_out.write('Drug1,Drug2,d_A,d_B,d_AB,s_AB,AB_Min,AB_Mean\\n')\n\n #Go though all pairs\n for cloud1 in all_Clouds:\n print cloud1\n #Targets of drug A\n targets1 = dic_target_sets[key][cloud1]\n\n #Diameter of drug A\n d_A = Check_Drug_Module_Diameter(PPI, targets1)\n\n for cloud2 in all_Clouds:\n\n #only calculate the half matrix\n if cloud1 < cloud2:\n\n #targets of drug B\n targets2 = dic_target_sets[key][cloud2]\n\n #Diameter of drug B\n d_B = Check_Drug_Module_Diameter(PPI, targets2)\n\n #Min distance from A to B\n distances1 = Check_Shortest_DistancesBetween(PPI, targets1, targets2)\n #Min distance from B to A\n distances2 = Check_Shortest_DistancesBetween(PPI, targets2, targets1)\n\n\n if distances1 != \"None\" and distances2 != 'None':\n #Dab\n between_Distance = (sum(distances1)+sum(distances2))/float((len(distances1)+len(distances2)))\n else:\n between_Distance = \"None\"\n\n if d_A != \"None\" and d_B != 'None' and between_Distance != \"None\":\n #Sab\n separation = between_Distance - (d_A+d_B)/2.0\n else:\n separation = 'None'\n\n #Create AB_Min\n min_Distance = calculate_ClosestDistance(PPI, targets1, targets2)\n\n #Create AB_Mean\n mean_Distance = calculate_MeanDistance(PPI, targets1, targets2)\n\n #Save results\n fp_out.write(cloud1+','+cloud2+','+str(d_A)+','+str(d_B)+','+str(between_Distance)+','+str(separation)+','+str(min_Distance)+','+str(mean_Distance)+'\\n')\n\n fp_out.close() \n\n \n ",
"DrugBank\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nPubChem\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nChembl_Filtered\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nDrugBank_Filtered\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nChembl\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nPubChem_Filtered\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nAll_Filtered\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\nAll\nCLOUD001\nCLOUD002\nCLOUD003\nCLOUD004\nCLOUD005\nCLOUD006\nCLOUD007\nCLOUD008\nCLOUD009\nCLOUD010\nCLOUD011\nCLOUD012\nCLOUD013\nCLOUD014\nCLOUD015\nCLOUD016\nCLOUD017\nCLOUD018\nCLOUD019\nCLOUD020\nCLOUD021\nCLOUD022\nCLOUD023\nCLOUD024\nCLOUD025\nCLOUD026\nCLOUD027\nCLOUD028\nCLOUD029\nCLOUD030\nCLOUD031\nCLOUD032\nCLOUD033\nCLOUD034\nCLOUD035\nCLOUD036\nCLOUD037\nCLOUD038\nCLOUD039\nCLOUD040\nCLOUD041\nCLOUD042\nCLOUD043\nCLOUD044\nCLOUD045\nCLOUD046\nCLOUD047\nCLOUD048\nCLOUD049\nCLOUD050\nCLOUD051\nCLOUD052\nCLOUD053\nCLOUD054\nCLOUD055\nCLOUD056\nCLOUD057\nCLOUD058\nCLOUD059\nCLOUD060\nCLOUD061\nCLOUD062\nCLOUD063\nCLOUD064\nCLOUD065\nCLOUD066\nCLOUD067\nCLOUD068\nCLOUD069\nCLOUD070\nCLOUD071\nCLOUD072\nCLOUD073\nCLOUD074\nCLOUD075\nCLOUD076\nCLOUD077\nCLOUD078\nCLOUD079\nCLOUD080\nCLOUD081\nCLOUD082\nCLOUD083\nCLOUD084\nCLOUD085\nCLOUD086\nCLOUD087\nCLOUD088\nCLOUD089\nCLOUD090\nCLOUD091\nCLOUD092\nCLOUD093\nCLOUD094\nCLOUD095\nCLOUD096\nCLOUD097\nCLOUD098\nCLOUD099\nCLOUD100\nCLOUD101\nCLOUD102\nCLOUD103\nCLOUD104\nCLOUD105\nCLOUD106\nCLOUD107\nCLOUD108\nCLOUD109\nCLOUD110\nCLOUD111\nCLOUD112\nCLOUD113\nCLOUD114\nCLOUD115\nCLOUD116\nCLOUD117\nCLOUD118\nCLOUD119\nCLOUD120\nCLOUD121\nCLOUD122\nCLOUD123\nCLOUD124\nCLOUD125\nCLOUD126\nCLOUD127\nCLOUD128\nCLOUD129\nCLOUD130\nCLOUD131\nCLOUD132\nCLOUD133\nCLOUD134\nCLOUD135\nCLOUD136\nCLOUD137\nCLOUD138\nCLOUD139\nCLOUD140\nCLOUD141\nCLOUD142\nCLOUD143\nCLOUD144\nCLOUD145\nCLOUD146\nCLOUD147\nCLOUD148\nCLOUD149\nCLOUD150\nCLOUD151\nCLOUD152\nCLOUD153\nCLOUD154\nCLOUD155\nCLOUD156\nCLOUD157\nCLOUD158\nCLOUD159\nCLOUD160\nCLOUD161\nCLOUD162\nCLOUD163\nCLOUD164\nCLOUD165\nCLOUD166\nCLOUD167\nCLOUD168\nCLOUD169\nCLOUD170\nCLOUD171\nCLOUD172\nCLOUD173\nCLOUD174\nCLOUD175\nCLOUD176\nCLOUD177\nCLOUD178\nCLOUD179\nCLOUD180\nCLOUD181\nCLOUD182\nCLOUD183\nCLOUD184\nCLOUD185\nCLOUD186\nCLOUD187\nCLOUD188\nCLOUD189\nCLOUD190\nCLOUD191\nCLOUD192\nCLOUD193\nCLOUD194\nCLOUD195\nCLOUD196\nCLOUD197\nCLOUD198\nCLOUD199\nCLOUD200\nCLOUD201\nCLOUD202\nCLOUD203\nCLOUD204\nCLOUD205\nCLOUD206\nCLOUD207\nCLOUD208\nCLOUD209\nCLOUD210\nCLOUD211\nCLOUD212\nCLOUD213\nCLOUD214\nCLOUD215\nCLOUD216\nCLOUD217\nCLOUD218\nCLOUD219\nCLOUD220\nCLOUD221\nCLOUD222\nCLOUD223\nCLOUD224\nCLOUD225\nCLOUD226\nCLOUD227\nCLOUD228\nCLOUD229\nCLOUD230\nCLOUD231\nCLOUD232\nCLOUD233\nCLOUD234\nCLOUD235\nCLOUD236\nCLOUD237\nCLOUD238\nCLOUD239\nCLOUD240\nCLOUD241\nCLOUD242\nCLOUD243\nCLOUD244\nCLOUD245\nCLOUD246\nCLOUD247\nCLOUD248\nCLOUD249\nCLOUD250\nCLOUD251\nCLOUD252\nCLOUD253\nCLOUD254\nCLOUD255\nCLOUD256\nCLOUD257\nCLOUD258\nCLOUD259\nCLOUD260\nCLOUD261\nCLOUD262\nCLOUD263\nCLOUD264\nCLOUD265\nCLOUD266\nCLOUD267\n"
]
],
[
[
"## Calculate the different metrics for the different target sets\nTargetSets: All, Chembl, PubChem, DrugBank (all associations and target only filtered) \nMetrics: S_AB, D_AB, Min_AB and Mean_AB",
"_____no_output_____"
]
],
[
[
"#network = nx.read_gml('../data/Check_Features/DrugPairFeature_Files/DPI_iS3_pS7_abMAD2_gP100/Networks/DPI_Network_CoreToPeriphery.gml')",
"_____no_output_____"
],
[
"targetLists = [f for f in os.listdir('../results/CheckBestTargetSet/') if os.path.isfile(os.path.join('../results/CheckBestTargetSet/', f)) and '.csv' in f]\ndistance_metric = {'D_AB':4, 'S_AB':5, 'Min_AB':6, 'Mean_AB':7}\ninteraction_colors = {'Increasing':'#ACD900','Decreasing':'#F70020','Emergent':'#0096FF','All':'grey'}\nnetwork_parts = ['Complete','Core','CoreToPeriphery','Periphery']\n\nfor part in network_parts:\n print part\n network = nx.read_gml('../data/CheckBestTargetSet/DrugPairFeature_Files/DPI_iS3_pS7_abMAD2_gP100/Networks/DPI_Network_'+part+'.gml')\n\n \n #create the directory if not existing\n directory = os.path.dirname('../results/CheckBestTargetSet/Results/'+part +'/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n \n fp_out = open('../results/CheckBestTargetSet/Results/'+part+'/StatisticResult.csv','w')\n fp_out.write('Metric,TargetSet,Type1,Type2,Foldchange,Pvalue,IsSignificant\\n')\n\n #Go through all metrics and target sets\n print 'Calculate Metrics:'\n for metric in distance_metric.keys():\n\n for targetList in targetLists:\n\n\n #check if S_AB (as only sab has negative values)\n if metric != 'S_AB':\n distance_cutoffs = [5,4,3,2,1,0]\n else:\n distance_cutoffs = [3.5,2.5,1.5,0.5,-0.5,-1.5]\n\n\n #remove .csv from file name\n targetName = targetList.split('.')[0]\n\n #create the directory if not existing\n directory = os.path.dirname('../results/CheckBestTargetSet/Results/'+part +'/'+ targetName + '/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n #create a dictionary with the respective distance for a given drug pair\n #all values contains all durg pair values (needed for normalization later)\n all_values = []\n fp = open('../results/CheckBestTargetSet/' + targetList,'r')\n fp.next()\n drugpairs = {}\n for line in fp:\n tmp = line.strip().split(',')\n value = tmp[distance_metric[metric]]\n #print tmp\n\n drugpairs[tmp[0]+','+tmp[1]] = value\n drugpairs[tmp[1]+','+tmp[0]] = value\n\n if value != \"None\":\n all_values.append(float(value))\n\n #Split info into the various interaction types\n interaction_types = ['Increasing','Decreasing','Emergent','All']\n interaction_type_results = {}\n for it in interaction_types:\n \n #binarize the data into the correspodning bins; normalize is used to later take care of the fact that most interaction have a distance around 2\n results = {}\n to_normalize = {}\n interaction_type_results[it] = []\n \n #Go through the cutoffs\n for i in range(1, len(distance_cutoffs)):\n\n #this will contain the actual results; integer later number of interaction within this distance\n results[distance_cutoffs[i]] = 0\n \n #get the corresponding results\n to_normalize[distance_cutoffs[i]] = len([x for x in all_values if x < distance_cutoffs[i-1] and x >= distance_cutoffs[i]]) \n\n \n #Go though all edges of the certain network and add to bin if existing\n for edge in network.edges():\n for key in network[edge[0]][edge[1]]:\n\n if network[edge[0]][edge[1]][key]['Type'] != it and it != 'All' :\n continue\n\n\n value = drugpairs.get(edge[0]+','+edge[1],'None')\n if value != \"None\":\n value = float(value)\n interaction_type_results[it].append(value)\n if value >= distance_cutoffs[i] and value < distance_cutoffs[i-1]:\n results[distance_cutoffs[i]] += 1\n\n\n \n '''\n PLOT OUTPUT\n '''\n \n sorted_distance_cutOffs = list(distance_cutoffs)\n sorted_distance_cutOffs.sort()\n\n #PLOT THE INDIVDIUAL BAR PLOT WITH X-AXIS = PPI DISTANCE AND Y-AXIS FREQUENCY\n plt.bar([i for i in sorted_distance_cutOffs[:-1] if to_normalize[i] != 0],[results[i]/float(to_normalize[i]) for i in sorted_distance_cutOffs[:-1] if to_normalize[i] != 0], color=interaction_colors[it])\n plt.xlabel('PPI ' + metric)\n plt.ylabel('Percent of all drug pairs within this distance')\n plt.savefig('../results/CheckBestTargetSet/Results/'+part+'/' + targetName + '/'+metric+'_'+it+'_PPI_Distances.pdf', bbox_inches = \"tight\")\n plt.close()\n #plt.show()\n\n\n #quick bug solution (only happens once in the periphery part and not important)\n if len(interaction_type_results['Decreasing']) == 0:\n interaction_type_results['Decreasing'].append(2)\n \n #PLOT A BOX PLOT WITH THE VARIOUS INTERACTION TYPES AS DIFFERENCE\n bplot = sns.boxplot(data=[all_values,interaction_type_results['All'],interaction_type_results['Increasing'],interaction_type_results['Decreasing'],interaction_type_results['Emergent']],orient='h', showfliers = False)\n\n interaction_types_2 = ['All','Interacting','Increasing','Decreasing','Emergent']\n interaction_colors_2 = ['grey','#F8B301','#ACD900','#F70020','#0096FF']\n color_dict = dict(zip(interaction_types_2, interaction_colors_2))\n for i in range(0,5):\n mybox = bplot.artists[i]\n mybox.set_facecolor(color_dict[interaction_types_2[i]])\n\n\n interaction_type_results['AllPairs'] = all_values\n for key1 in interaction_type_results:\n for key2 in interaction_type_results:\n if key1 > key2:\n pval = mu(interaction_type_results[key2],interaction_type_results[key1])[1]\n is_significant = pval < 0.05\n foldchange = np.mean(interaction_type_results[key2])/np.mean(interaction_type_results[key1])\n fp_out.write(metric+','+targetName+','+key1+',' +key2 +','+str(foldchange)+',' + str(pval)+','+str(is_significant) + '\\n')\n\n\n plt.yticks(range(0,5),['All','Interacting','Increasing','Decreasing','Emergent'])\n plt.ylabel('Interaction Type')\n plt.tick_params(axis = 'y', which = 'major', labelsize = 5)\n plt.xlabel(metric)\n plt.savefig('../results/CheckBestTargetSet/Results/'+part +'/'+ targetName + '/'+metric+'_InteractionDifference.pdf', bbox_inches = \"tight\")\n plt.close()\n fp_out.close()\n print 'Done'",
"Complete\nCalculate Metrics:\nDone\nCore\nCalculate Metrics:\nDone\nCoreToPeriphery\nCalculate Metrics:\nDone\nPeriphery\nCalculate Metrics:\nDone\n"
]
],
[
[
"## Analyse the result file",
"_____no_output_____"
]
],
[
[
"interaction_types = ['Increasing','Decreasing','Emergent']\nnetwork_parts = ['Complete','Core','CoreToPeriphery','Periphery']\n\n\nfor part in network_parts:\n print part\n results = {}\n\n fp = open('../results/CheckBestTargetSet/Results/'+part+'/StatisticResult.csv','r')\n fp.next()\n for line in fp:\n tmp = line.strip().split(',')\n\n if results.has_key(tmp[0]) == False:\n results[tmp[0]] = {}\n\n if results[tmp[0]].has_key(tmp[1]) == False:\n results[tmp[0]][tmp[1]] = 0\n\n if tmp[2] in interaction_types and tmp[3] in interaction_types:\n if tmp[6] == 'True':\n results[tmp[0]][tmp[1]] += 1\n #print tmp\n for metric in results:\n print '\\t' + metric\n for targetSet in results[metric]:\n if results[metric][targetSet] == 3:\n print '\\t\\t' + targetSet",
"Complete\n\tMin_AB\n\t\tDrugBank_Filtered\n\tMean_AB\n\t\tPubChem_Filtered\n\tD_AB\n\t\tChembl_Filtered\n\t\tChembl\n\t\tPubChem_Filtered\n\tS_AB\n\t\tPubChem\nCore\n\tMin_AB\n\t\tDrugBank\n\tMean_AB\n\t\tChembl_Filtered\n\t\tChembl\n\tD_AB\n\tS_AB\nCoreToPeriphery\n\tMin_AB\n\t\tAll_Filtered\n\t\tAll\n\t\tDrugBank\n\t\tPubChem\n\t\tChembl_Filtered\n\t\tChembl\n\t\tPubChem_Filtered\n\tMean_AB\n\t\tAll_Filtered\n\t\tAll\n\t\tPubChem\n\t\tChembl_Filtered\n\t\tChembl\n\t\tPubChem_Filtered\n\tD_AB\n\t\tAll_Filtered\n\t\tAll\n\t\tPubChem\n\t\tChembl_Filtered\n\t\tDrugBank_Filtered\n\t\tChembl\n\t\tPubChem_Filtered\n\tS_AB\n\t\tChembl_Filtered\n\t\tChembl\nPeriphery\n\tMin_AB\n\tMean_AB\n\t\tAll_Filtered\n\t\tDrugBank\n\t\tPubChem\n\t\tDrugBank_Filtered\n\t\tChembl\n\tD_AB\n\t\tAll_Filtered\n\tS_AB\n\t\tDrugBank\n\t\tPubChem\n\t\tPubChem_Filtered\n"
]
],
[
[
"### Plot S_AB distribution",
"_____no_output_____"
]
],
[
[
"import seaborn as sns",
"_____no_output_____"
],
[
"targetLists = [f for f in os.listdir('../results/Check_Features/CheckBestTargetSet/') if os.path.isfile(os.path.join('../results/Check_Features/CheckBestTargetSet/', f)) and '.csv' in f]\ndistance_metric = {'D_AB':4, 'S_AB':5, 'Min_AB':6, 'Mean_AB':7}\n\n\nmetric = 'S_AB'\nfor targetList in targetLists:\n fp = open('../results/Check_Features/CheckBestTargetSet/' + targetList,'r')\n fp.next()\n \n all_values = []\n for line in fp:\n tmp = line.strip().split(',')\n value = tmp[distance_metric[metric]]\n\n\n if value != \"None\":\n all_values.append(float(value))\n \n print np.mean(all_values)\n\n plt.title(targetList.split('.')[0])\n #plt.yscale('log')\n #\n plt.fill([0, 0, max(all_values), max(all_values)], [0, 0.625, 0.625, 0], color='lightgrey', alpha=0.4)\n plt.hist(all_values,bins=12, density= True, color='#40B9D4',edgecolor=\"#40B9D4\", linewidth=0.0, alpha=0.5)\n \n plt.xlabel('S_AB')\n plt.ylabel('Frequency')\n #plt.ylim([0.00000001,1])\n #plt.yscale('log', nonposy='clip')\n #plt.xscale('log')\n #plt.show()\n plt.yscale('log')\n plt.savefig('../results/Check_Features/CheckBestTargetSet/Results/S_AB_Distributions/'+targetList.split('.')[0]+'.pdf', format = 'pdf', dpi=800)\n plt.close()",
"0.6722009834273841\n1.3609922810737909\n0.6663973106768771\n1.4210949885061646\n0.515554244097155\n0.6616415751265295\n0.2801638381785182\n1.4125882193782637\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f2fd7bbd5408f9863b654325e2e6bd2e60fe4e | 19,844 | ipynb | Jupyter Notebook | Rules Validation.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | Rules Validation.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | Rules Validation.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | 30.066667 | 287 | 0.510935 | [
[
[
"import re\nfrom ast import literal_eval\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\nimport ast\nimport collections\nimport math\n# Gensim\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\n#scipy\nimport scipy \nfrom sklearn.cluster import KMeans\nfrom collections import defaultdict\n\nfrom urduhack.preprocess import remove_punctuation\nfrom urduhack import stop_words\nfrom urduhack import tokenization as tok\nfrom urduhack import preprocess\nfrom urduhack import utils\nfrom urduhack import normalization as norm\nfrom utilities import words as urduwords\nfrom urduhack import stop_words\nfrom sklearn import metrics\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport os\nfrom kneed import KneeLocator\nfrom sklearn.datasets import fetch_20newsgroups\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport CRFTagger\n# NLTK Stop words\n# Extract noun chunks from corpus\nimport math\n\n\nstopwords = list(stop_words.STOP_WORDS)\n",
"_____no_output_____"
],
[
"#from SNgramExtractor import SNgramExtractor\nimport stanza\nimport spacy_stanza",
"C:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\nC:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\nC:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\nC:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\nC:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\nC:\\Users\\Zarmeen\\Anaconda2\\envs\\py36\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
"# Initialize the pipeline\nnlp = spacy_stanza.load_pipeline(\"ur\")",
"2021-10-25 13:37:32 INFO: Loading these models for language: ur (Urdu):\n=======================\n| Processor | Package |\n-----------------------\n| tokenize | udtb |\n| pos | udtb |\n| lemma | udtb |\n| depparse | udtb |\n=======================\n\n2021-10-25 13:37:32 INFO: Use device: cpu\n2021-10-25 13:37:32 INFO: Loading: tokenize\n2021-10-25 13:37:32 INFO: Loading: pos\n2021-10-25 13:37:33 INFO: Loading: lemma\n2021-10-25 13:37:33 INFO: Loading: depparse\n2021-10-25 13:37:34 INFO: Done loading processors!\n"
],
[
"x=\"اداکاری کرنا\"\ndoc = nlp(x)\nfor token in doc:\n print(token.text, token.lemma_, token.pos_, token.dep_, token.morph)",
"اداکاری اداکاری NOUN compound Case=Nom|Gender=Fem|Number=Sing|Person=3\nکرنا کر VERB root VerbForm=Inf|Voice=Act\n"
],
[
"x='انعقاد احتیاطی تدابیر اپنا کر ڈینگی پر قابو'\ndoc = nlp(x)\nfor token in doc:\n print(token.text, token.lemma_, token.pos_, token.dep_,)",
"انعقاد انعقاد NOUN obj\nاحتیاطی احتیاطی ADJ amod\nتدابیر تدابیر NOUN obj\nاپنا اپنا VERB advcl\nکر کر AUX aux\nڈینگی ڈینگی NOUN root\nپر پر ADP case\nقابو قابو NOUN punct\n"
],
[
"tagged = CRFTagger.pos_tag(x)\nwords,tgs = get_words_from_tags(tagged)\n ",
"_____no_output_____"
],
[
"tgs",
"_____no_output_____"
],
[
"def pos_regex_matches(text):\n \"\"\"\n \n * pattern1: r'<ADJ>?<NOUN|PROPNOUN>+ <AUX>?'\n * pattern2: r'(<ADJ>?<NOUN|PROPNOUN>)+ <AUX>?'\n \n \"\"\"\n #doc = nlp(text)\n #mytokens = []\n #for token in doc:\n # mytokens.append(token.pos_)\n \n #tags = ' ' + ' '.join(mytokens)\n #tagged = CRFTagger.pos_tag(text)\n #words,tgs = get_words_from_tags(tagged)\n #tags = \" \".join(tgs).strip()\n #words=text.split()\n tags = text\n print(tags)\n #pattern = r'(<ADV>?<ADJ>*<NOUN|PROPN><ADP>?)+<AUX>?'\n #pattern = r'(<ADV>*<ADJ>*<NOUN|PROPN>+<AUX>?)'\n #pattern = r'<ADV|ADJ>* <NOUN|PROPN>* <ADP>* <NOUN|PROPN>'\n pattern = r'<ADJ>*'\n \n #pattern =r'<DET>? (<NOUN>+ <ADP|CONJ>)* <NOUN>+'\n # standardize and transform the regular expression pattern...\n pattern = re.sub(r'\\s', '', pattern)\n pattern = re.sub(r'<([A-Z]+)\\|([A-Z]+)>', r'( (\\1|\\2))', pattern)\n pattern = re.sub(r'<([A-Z]+)>', r'( \\1)', pattern)\n sx = re.compile(pattern)\n #print(pattern)\n \n #mo = re.search(pattern,tags)\n #print(mo.groups())\n print(sx.match(tags))\n #matched = re.match(pattern, tags)\n #is_match = bool(matched)\n\n #print(is_match)\n #return is_match\n for m in re.finditer(pattern, tags):\n print(m.start())\n yield words[tags[0:m.start()].count(' '):tags[0:m.end()].count(' ')]\n\ndef get_words_from_tags(postags):\n words = []\n tags = []\n for u,v in postags:\n words.append(u)\n tags.append(v)\n return words,tags\ndef check_ngram_from_CRFPOS(ngram):\n tags = CRFTagger.pos_tag(ngram)\n words,tgs = get_words_from_tags(tags)\n mytokens = []\n for token in tgs:\n mytokens.append(token)\n print(\" \".join(mytokens))\n correct = True\n if tgs[0] not in ['NN','PN','ADJ']:\n correct = False\n elif (tgs[0] == 'NN' or tgs[0] == 'PN') and (tgs[1]== 'ADJ') and (tgs[2] == 'NN' or tgs[2] == 'PN'):\n correct = False\n elif tgs[len(tgs)-1] not in ['NN','PN','AA']:\n correct = False\n else:\n correct = True\n return correct \ndef check_ngram_from_stanza(ngram):\n doc = nlp(ngram)\n mytokens = []\n for token in doc:\n mytokens.append(token.pos_)\n print(\" \".join(mytokens))\n correct = True\n if doc[0].pos_ not in ['NOUN','PROPN','ADJ']:\n correct = False\n elif (doc[0].pos_ == 'NOUN' or doc[0].pos_ == 'PROPN') and (doc[1].pos_== 'ADJ') and (doc[2].pos_ == 'NOUN' or doc[2].pos_ == 'PROPN'):\n correct = False\n elif doc[len(doc)-1].pos_ not in ['NOUN','PROPN','AUX']:\n correct = False\n else:\n correct = True\n return correct",
"_____no_output_____"
],
[
"#x='محکمہ اینٹی کرپشن'\nx = 'ADJ'",
"_____no_output_____"
],
[
"pos_regex_matches(x)\n",
"ADJ\n<_sre.SRE_Match object; span=(0, 0), match=''>\n"
],
[
"noun_phrases",
"_____no_output_____"
],
[
"check_ngram_from_stanza(x)",
"PROPN PROPN PROPN PROPN ADP\n"
],
[
"check_ngram_from_CRFPOS(x)",
"_____no_output_____"
],
[
"x='جوبائیڈن'\ntags='NOUN' \npattern = r'<NOUN>+'\nfor match in re.finditer(pattern,tags):\n start, end = match.span()\n print(start)\n #span = x[start:end]\n # This is a Span object or None if match doesn't map to valid token sequence\n #if span is not None:\n # print(\"Found match:\", span)",
"_____no_output_____"
],
[
"import spacy\nimport re\n\n#nlp = spacy.load(\"en_core_web_sm\")\ndoc = nlp(\"ADV ADJ ADJ NOUN NOUN\")\nexpression = r\"((ADV )?(ADJ )*((NOUN )(ADP )?)+(NOUN )?)\"\n#xpression = r\"(NOUN|PROPNOUN)+(NOUN|PROPNOUN|AUX)$\"\nfor match in re.finditer(expression, doc.text):\n start, end = match.span()\n span = doc.char_span(start, end)\n # This is a Span object or None if match doesn't map to valid token sequence\n if span is not None:\n print(\"Found match:\", span.text)\ntitle_search = re.search(expression,doc.text)\nif title_search:\n title = title_search.group(1)\n print(title)",
"ADV ADJ ADJ NOUN \n"
],
[
"x='برائے موسمیاتی تبدیلی'\ntags = CRFTagger.pos_tag(x)\nwords,tgs = get_words_from_tags(tags)\n\nprint(tags)\nprint(\" \".join(tgs))",
"[('برائے', 'ADV'), ('موسمیاتی', 'ADJ'), ('تبدیلی', 'NN')]\nADV ADJ NN\n"
],
[
"### for CRF POS TAG USE THIS CODE\n#https://applied-language-technology.readthedocs.io/en/latest/notebooks/part_iii/02_pattern_matching.html\nfrom spacy.matcher import Matcher\n\nmatcher = Matcher(nlp.vocab)\n# Add match ID \"HelloWorld\" with no callback and one pattern\npattern = [\n {'TEXT': 'ADV', 'OP': '?'},\n {'TEXT': 'ADJ', 'OP': '*'},\n {'TEXT': \"P\", 'OP': '*','IS_SENT_START':False},\n {'TEXT': {\"IN\": [\"A\",\"NN\", \"PN\"]}, 'OP': '+'},\n {'TEXT': {\"IN\": [\"ADJ\"]}, 'OP': '?'}\n \n ]\n \n \nmatcher.add(\"GrammarRules\", [pattern],greedy=\"LONGEST\")\n\ndoc = nlp(\"A NN\")\nmatches = matcher(doc)\nmatched_text = []\nfor match_id, start, end in matches:\n string_id = nlp.vocab.strings[match_id] # Get string representation\n print(string_id)\n span = doc[start:end] # The matched span\n print(match_id, string_id, start, end, span.text)\n matched_text.append(span.text)\n \nif len(doc) == len(\" \".join(matched_text).split()):\n print('This is a valid sequence')\nelse:\n print('This is a not a valid sequence')",
"GrammarRules\n14395842542968392008 GrammarRules 0 2 A NN\nThis is a valid sequence\n"
],
[
"## THIS CODE FOR STANZA\n#https://applied-language-technology.readthedocs.io/en/latest/notebooks/part_iii/02_pattern_matching.html\nfrom spacy.matcher import Matcher\n\nmatcher = Matcher(nlp.vocab)\n# Add match ID \"HelloWorld\" with no callback and one pattern\n\npattern = [\n {'POS': 'ADV', 'OP': '?'},\n {'POS': 'ADJ', 'OP': '*'},\n {'POS': \"ADP\", 'OP': '*','IS_SENT_START':False},\n {'POS': {\"IN\": [\"NOUN\", \"PROPN\",\"ADP\"]}, 'OP': '+'},\n {'POS': {\"IN\": [\"ADJ\"]}, 'OP': '?'}\n \n ]\n \nmatcher.add(\"GrammarRules\", [pattern],greedy=\"LONGEST\")\n\ndoc = nlp(\"محکمہ اینٹی کرپشن سرگودھا\")\nmatches = matcher(doc)\nmatched_text = []\nfor match_id, start, end in matches:\n string_id = nlp.vocab.strings[match_id] # Get string representation\n print(string_id)\n span = doc[start:end] # The matched span\n print(match_id, string_id, start, end, span.text)\n matched_text.append(span.text)\n \nif len(doc) == len(\" \".join(matched_text).split()):\n print('This is a valid sequence')\nelse:\n print('This is a not a valid sequence')",
"GrammarRules\n14395842542968392008 GrammarRules 0 3 محکمہ اینٹی کرپشن\nThis is a not a valid sequence\n"
],
[
"import re\n\nx='جوبائیڈن'\ntags='NOUN NOUN' \npattern = r'<NOUN>+'\nfor match in re.finditer(pattern,tags):\n start, end = match.span()\n print(start)",
"_____no_output_____"
],
[
"print(x)",
"<_sre.SRE_Match object; span=(0, 17), match='The rain in Spain'>\n"
],
[
"import re\n\n#Check if the string starts with \"The\" and ends with \"Spain\":\ntext = ' کرونا کیسز مزید'\ntagged = CRFTagger.pos_tag(text)\nwords,tgs = get_words_from_tags(tagged)\n \ntxt = \" \".join(tgs)\n\nprint(txt)\n#x = re.search(\"^(ADJ|ADV|NOUN|PROPN).*(NOUN|PROPN|AUX)$\", txt)\nx = re.search(\"^(ADJ|ADV|NN|PN).*(NN|PN|AUX)$\", txt)\n\nif x:\n print(\"YES! We have a match!\")\nelse:\n print(\"No match\")",
"PN PN ADJ\nNo match\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f309626cb389d1ec9838fba5fcb4452f4be35e | 22,694 | ipynb | Jupyter Notebook | aas_233_workshop/09b-Specutils/Specutils_analysis.ipynb | astropy/astropy-workshops | 2c35a2775b5926e1bcbffadd5934591d0acb989f | [
"BSD-3-Clause"
] | 2 | 2020-10-07T17:54:01.000Z | 2021-06-24T22:33:27.000Z | aas_233_workshop/09b-Specutils/Specutils_analysis.ipynb | astropy/astropy-workshops | 2c35a2775b5926e1bcbffadd5934591d0acb989f | [
"BSD-3-Clause"
] | 2 | 2019-10-31T15:15:25.000Z | 2019-10-31T19:16:05.000Z | aas_233_workshop/09b-Specutils/Specutils_analysis.ipynb | astropy/astropy-workshops | 2c35a2775b5926e1bcbffadd5934591d0acb989f | [
"BSD-3-Clause"
] | 6 | 2019-09-11T14:59:00.000Z | 2021-01-18T18:51:16.000Z | 35.68239 | 680 | 0.650921 | [
[
[
"# Specutils Analysis\n\n![Specutils: An Astropy Package for Spectroscopy](data/specutils_logo.png)\n\n\nThis notebook provides an overview of some of the spectral analysis capabilities of the Specutils Astropy coordinated package. While this notebook is intended as an interactive introduction to specutils at the time of its writing, the canonical source of information for the package is the latest version's documentation: \n\nhttps://specutils.readthedocs.io\n\nNote that the below assumes you have knowledge of the material in the [overview notebook](Specutils_overview.ipynb). If this is not the case you may wish to review that notebook before proceding here.",
"_____no_output_____"
],
[
"## Imports\n\nWe start with some fundamental imports for working with specutils and simple visualization of spectra:",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nimport astropy.units as u\n\nimport specutils\nfrom specutils import Spectrum1D, SpectralRegion\nspecutils.__version__",
"_____no_output_____"
],
[
"# for plotting:\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n\n# for showing quantity units on axes automatically:\nfrom astropy.visualization import quantity_support\nquantity_support();",
"_____no_output_____"
]
],
[
[
"## Sample Spectrum and SNR\n\nFor use below, we also load the sample SDSS spectrum downloaded in the [overview notebook](Specutils_overview.ipynb). See that notebook if you have not yet downloaded this spectrum.",
"_____no_output_____"
]
],
[
[
"sdss_spec = Spectrum1D.read('data/sdss_spectrum.fits', format='SDSS-III/IV spec')\nplt.step(sdss_spec.wavelength, sdss_spec.flux);",
"_____no_output_____"
]
],
[
[
"Because this example file already has uncertainties, it is straightforward to use one of the fundamental quantifications of a spectrum: the whole-spectrum signal-to-noise ratio:",
"_____no_output_____"
]
],
[
[
"from specutils import analysis\n\nanalysis.snr(sdss_spec)",
"_____no_output_____"
]
],
[
[
"# Spectral Regions\n\nMost analysis required on a spectrum requires specification of a part of the spectrum - e.g., a spectral line. Because such regions may have value independent of a particular spectrum, they are represented as objects distrinct from a given spectrum object. Below we outline a few ways such regions are specified.",
"_____no_output_____"
]
],
[
[
"ha_region = SpectralRegion((6563-50)*u.AA, (6563+50)*u.AA)\nha_region",
"_____no_output_____"
]
],
[
[
"Regions can also be raw pixel values (although of course this is more applicable to a specific spectrum):",
"_____no_output_____"
]
],
[
[
"pixel_region = SpectralRegion(2100*u.pixel, 2600*u.pixel)\npixel_region",
"_____no_output_____"
]
],
[
[
"Additionally, *multiple* regions can be in the same `SpectralRegion` object. This is useful for e.g. measuring multiple spectral features in one call:",
"_____no_output_____"
]
],
[
[
"HI_wings_region = SpectralRegion([(1.4*u.GHz, 1.41*u.GHz), (1.43*u.GHz, 1.44*u.GHz)])\nHI_wings_region",
"_____no_output_____"
]
],
[
[
"While regions are useful for a variety of analysis steps, fundamentally they can be used to extract sub-spectra from larger spectra:",
"_____no_output_____"
]
],
[
[
"from specutils.manipulation import extract_region\n\nsubspec = extract_region(sdss_spec, pixel_region)\nplt.step(subspec.wavelength, subspec.flux)\n\nanalysis.snr(subspec)",
"_____no_output_____"
]
],
[
[
"# Line Measurements\n\nWhile line-fitting (detailed more below) is a good choice for high signal-to-noise spectra or when detailed kinematics are desired, more empirical measures are often used in the literature for noisier spectra or just simpler analysis procedures. Specutils provides a set of functions to provide these sorts of measurements, as well as similar summary statistics about spectral regions. The [analysis part of the specutils documentation](https://specutils.readthedocs.io/en/latest/analysis.html) provides a full list and detailed examples of these, but here we demonstrate some example cases.",
"_____no_output_____"
],
[
"Note: these line measurements generally assume your spectrum is continuum-subtracted or continuum-normalized. Some spectral pipelines do this for you, but often this is not the case. For our examples here we will do this step \"by-eye\", but for a more detailed discussion of continuum modeling, see the next section. Based on the above plot we estimate a continuum level for the area of the SDSS spectrum around the H-alpha emission line, and use basic math to construct the continuum-normalized and continuum-subtracted spectra.",
"_____no_output_____"
]
],
[
[
"# estimate a reasonable continuum-level estimate for the h-alpha area of the spectrum\nsdss_continuum = 205*subspec.flux.unit\n\nsdss_halpha_contsub = extract_region(sdss_spec, ha_region) - sdss_continuum\n\nplt.axhline(0, c='k', ls=':')\nplt.step(sdss_halpha_contsub.wavelength, sdss_halpha_contsub.flux)\nplt.ylim(-50, 50)",
"_____no_output_____"
]
],
[
[
"With the continuum level identified, we can now make some measurements of the spectral lines that are apparent by eye - in particular we will focus on the H-alpha emission line. While there are techniques for identifying the line automatically (see the fitting section below), here we assume we are doing \"quick-look\" procedures where manual identification is possible. \n\nIn the cell below, lill in the `<LOWER>` and `<UPPER>` values to make a spectral region that just encompasses the H-alpha line (the middle of the three lines).",
"_____no_output_____"
]
],
[
[
"line_region = SpectralRegion(<LOWER>*u.angstrom,\n <UPPER>*u.angstrom)\n\nplt.step(sdss_halpha_contsub.wavelength, sdss_halpha_contsub.flux)\n\nyl1, yl2 = plt.ylim()\nplt.fill_between([halpha_lines_region.lower, halpha_lines_region.upper], \n yl1, yl2, alpha=.2)\nplt.ylim(yl1, yl2)",
"_____no_output_____"
]
],
[
[
"You can now call a variety of analysis functions on the continuum-subtracted spectrum to estimate various properties of the line:",
"_____no_output_____"
]
],
[
[
"analysis.centroid(sdss_halpha_contsub, halpha_lines_region)",
"_____no_output_____"
],
[
"analysis.gaussian_fwhm(sdss_halpha_contsub, halpha_lines_region)",
"_____no_output_____"
],
[
"analysis.line_flux(sdss_halpha_contsub, halpha_lines_region)",
"_____no_output_____"
]
],
[
[
"Equivalent width, being a continuum dependent property, can either be computed directly from the spectrum if the continuum level is given, or measured on a continuum-normalized spectrum. The latter is mainly useful if the continuum is non-uniform over the line being measured.",
"_____no_output_____"
]
],
[
[
"analysis.equivalent_width(sdss_spec, sdss_continuum, regions=halpha_lines_region)",
"_____no_output_____"
],
[
"sdss_halpha_contnorm = sdss_spec / sdss_continuum\nanalysis.equivalent_width(sdss_halpha_contnorm, regions=halpha_lines_region)",
"_____no_output_____"
]
],
[
[
"## Exercise\n\nLoad one of the spectrum datasets you made in the overview exercises into this notebook (i.e., your own dataset, a downloaded one, or the blackbody with an artificially added spectral feature). Make a flux or width measurement of a line in that spectrum directly. Is anything odd?",
"_____no_output_____"
],
[
"# Continuum Subtraction\n\nWhile continuum-fitting for spectra is sometimes thought of as an \"art\" as much as a science, specutils provides the tools to do a variety of approaches to continuum-fitting, without making a specific recommendation about what is \"best\" (since it is often very data-dependent). More details are available [in the relevant specutils doc section](https://specutils.readthedocs.io/en/latest/fitting.html#continuum-fitting), but here we outline the two basic options as it stands: an \"often good-enough\" function, and a more customizable tool that leans on the [`astropy.modeling`](http://docs.astropy.org/en/stable/modeling/index.html) models to provide its flexibility.",
"_____no_output_____"
],
[
"### The \"often good-enough\" way\n\nThe `fit_generic_continuum` function provides a function that is often sufficient for reasonably well-behaved continuua, particular for \"quick-look\" or similar applications where high precision is not that critical. The function yields a continuum model, which can be evaluated at any spectral axis value:",
"_____no_output_____"
]
],
[
[
"from specutils.fitting import fit_generic_continuum",
"_____no_output_____"
],
[
"generic_continuum = fit_generic_continuum(sdss_spec)\n\ngeneric_continuum_evaluated = generic_continuum(sdss_spec.spectral_axis)\n\nplt.step(sdss_spec.spectral_axis, sdss_spec.flux)\nplt.plot(sdss_spec.spectral_axis, generic_continuum_evaluated)\nplt.ylim(100, 300);",
"_____no_output_____"
]
],
[
[
"(Note that in some versions of astropy/specutils you may see a warning that the \"Model is linear in parameters\" upon executing the above cell. This is not a problem unless performance is a serious concern, in which case more customization is required.)\n\nWith this model in hand, continuum-subtracted or continuum-normalized spectra can be produced using basic spectral manipulations:",
"_____no_output_____"
]
],
[
[
"sdss_gencont_sub = sdss_spec - generic_continuum(sdss_spec.spectral_axis)\nsdss_gencont_norm = sdss_spec / generic_continuum(sdss_spec.spectral_axis)\n\nax1, ax2 = plt.subplots(2, 1)[1]\n\nax1.step(sdss_gencont_sub.wavelength, sdss_gencont_sub.flux)\nax1.set_ylim(-50, 50)\nax1.axhline(0, color='k', ls=':') # continuum should be at flux=0\n\nax2.step(sdss_gencont_norm.wavelength, sdss_gencont_norm.flux)\nax2.set_ylim(0, 2)\nax2.axhline(1, color='k', ls='--'); # continuum should be at flux=1",
"_____no_output_____"
]
],
[
[
"### The customizable way\n\nThe `fit_continuum` function operates similarly to `fit_generic_continuum`, but is meant for you to provide your favorite continuum model rather than being tailored to a specific continuum model. To see the list of models, see the [astropy.modeling documentation](http://docs.astropy.org/en/stable/modeling/index.html).",
"_____no_output_____"
]
],
[
[
"from specutils.fitting import fit_continuum\nfrom astropy.modeling import models",
"_____no_output_____"
]
],
[
[
"For example, suppose you want to use a 3rd-degree Chebyshev polynomial as your continuum model. You can use `fit_continuum` to get an object that behaves the same as for `fit_generic_continuum`:",
"_____no_output_____"
]
],
[
[
"chebdeg3_continuum = fit_continuum(sdss_spec, models.Chebyshev1D(3))\n\ngeneric_continuum_evaluated = generic_continuum(sdss_spec.spectral_axis)\n\nplt.step(sdss_spec.spectral_axis, sdss_spec.flux)\nplt.plot(sdss_spec.spectral_axis, chebdeg3_continuum(sdss_spec.spectral_axis))\nplt.ylim(100, 300);",
"_____no_output_____"
]
],
[
[
"This then provides total flexibility. For example, you can also try other polynomials like higher-degree Hermite polynomials:",
"_____no_output_____"
]
],
[
[
"hermdeg7_continuum = fit_continuum(sdss_spec, models.Hermite1D(degree=7))\nhermdeg17_continuum = fit_continuum(sdss_spec, models.Hermite1D(degree=17))\n\nplt.step(sdss_spec.spectral_axis, sdss_spec.flux)\nplt.plot(sdss_spec.spectral_axis, chebdeg3_continuum(sdss_spec.spectral_axis))\nplt.plot(sdss_spec.spectral_axis, hermdeg7_continuum(sdss_spec.spectral_axis))\nplt.plot(sdss_spec.spectral_axis, hermdeg17_continuum(sdss_spec.spectral_axis))\nplt.ylim(150, 250);",
"_____no_output_____"
]
],
[
[
"This immediately demonstrates the tradeoffs in polynomial fitting: while the high-degree polynomials capture the wiggles of the spectrum better than the low, they also *over*-fit near the strong emission lines.",
"_____no_output_____"
],
[
"## Exercise\n\nTry combining the `SpectralRegion` and continuum-fitting functionality to only fit the parts of the spectrum that *are* continuum (i.e. not including emission lines). Can you do better?",
"_____no_output_____"
],
[
"## Exercise\n\nUsing the spectrum from the previous exercise, first subtract a continuum, then re-do your measurement. Is it better?",
"_____no_output_____"
],
[
"# Line-Fitting\n\nIn addition to the more empirical measurements described above, `specutils` provides tools for doing spectral line fitting. The approach is akin to that for continuum modeling: models from [astropy.modeling](http://docs.astropy.org/en/stable/modeling/index.html) are fit to the spectrum, and either those models can be used directly, or their parameters.",
"_____no_output_____"
]
],
[
[
"from specutils import fitting\n",
"_____no_output_____"
]
],
[
[
"The fitting machinery must first be given guesses for line locations. This process can be automated using functions designed to identify lines (more detail on the options is [in the docs](https://specutils.readthedocs.io/en/latest/fitting.html#line-finding)). For data sets where these algorithms are not ideal, you may substitute your own (i.e., skip this step and start with line location guesses). \n\nHere we identify the three lines near the Halpha region in our SDSS spectrum, finding the lines above about a $\\sim 3 \\sigma$ flux threshold. They are then output as an astropy Table:",
"_____no_output_____"
]
],
[
[
"halpha_lines = fitting.find_lines_threshold(sdss_halpha_contsub, 3)\n\nplt.step(sdss_halpha_contsub.spectral_axis, sdss_halpha_contsub.flux, where='mid')\nfor line in halpha_lines:\n plt.axvline(line['line_center'], color='k', ls=':')\n\nhalpha_lines",
"_____no_output_____"
]
],
[
[
"Now for each of these lines, we need to fit a model. Sometimes it is sufficient to simply create a model where the center is at the line and excise the appropriate area of the line to do a line estimate. This is not *too* sensitive to the size of the region, at least for well-separated lines like these. The result is a list of models that carry with them them the details of the fit:",
"_____no_output_____"
]
],
[
[
"halpha_line_models = []\nfor line in halpha_lines:\n line_region = SpectralRegion(line['line_center']-5*u.angstrom,\n line['line_center']+5*u.angstrom)\n line_spectrum = extract_region(sdss_halpha_contsub, line_region)\n line_estimate = models.Gaussian1D(mean=line['line_center'])\n line_model = fitting.fit_lines(line_spectrum, line_estimate)\n \n halpha_line_models.append(line_model)\n \nplt.step(sdss_halpha_contsub.spectral_axis, sdss_halpha_contsub.flux, where='mid')\nfor line_model in halpha_line_models:\n evaluated_model = line_model(sdss_halpha_contsub.spectral_axis)\n plt.plot(sdss_halpha_contsub.spectral_axis, evaluated_model) \n \nhalpha_line_models",
"_____no_output_____"
]
],
[
[
"For more complicated models or fits it may be better to use the `estimate_line_parameters` function instead of manually creating e.g. a `Gaussian1D` model and setting the center. An example of this pattern is given below.\n\nNote that we provided a default `Gaussian1D` model to the `estimate_line_parameters` function above. This function makes reasonable guesses for `Gaussian1D`, `Voigt1D`, and `Lorentz1D`, the most common line profiles used for spectral lines, but may or may not work for other models. See [the relevant docs section](https://specutils.readthedocs.io/en/latest/fitting.html#parameter-estimation) for more details.\n\nIn this example we also show an example of a *joint* fit of all three lines at the same time. While the difference may seems subtle, in cases of blended lines this typically provides much better fits:",
"_____no_output_____"
]
],
[
[
"halpha_line_estimates = []\nfor line in halpha_lines:\n line_region = SpectralRegion(line['line_center']-3*u.angstrom,\n line['line_center']+3*u.angstrom)\n line_spectrum = extract_region(sdss_halpha_contsub, line_region)\n line_estimate = fitting.estimate_line_parameters(line_spectrum, models.Gaussian1D())\n \n halpha_line_estimates.append(line_estimate)\n\n# this could be done more flexibly with a for loop but we are explicit here for simplicity\ncombined_model_estimate = halpha_line_estimates[0] + halpha_line_estimates[1] + halpha_line_estimates[2]\ncombined_model_estimate",
"_____no_output_____"
],
[
"combined_model = fitting.fit_lines(sdss_halpha_contsub, combined_model_estimate)\n\nplt.step(sdss_halpha_contsub.spectral_axis, sdss_halpha_contsub.flux, where='mid')\nplt.plot(sdss_halpha_contsub.spectral_axis, \n combined_model(sdss_halpha_contsub.spectral_axis)) \n \ncombined_model",
"_____no_output_____"
]
],
[
[
"## Exercise\n\nFit a spectral feature from your own spectrum using the fitting methods outlined above. Try the different line profile types (Gaussian, Lorentzian, or Voigt). If you are using the blackbody spectrum (where you know the \"true\" answer for the spectral line), compare your answer to the true answer.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7f3128f1c2cff17f9b2b45719f1e1fe7c4c647b | 55,267 | ipynb | Jupyter Notebook | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/keras_CNN_CIFAR10.ipynb | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/keras_CNN_CIFAR10.ipynb | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/keras_CNN_CIFAR10.ipynb | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | 50.151543 | 3,762 | 0.544213 | [
[
[
"<br />\n\n<div style=\"text-align: center;\">\n<font size=\"7\">Keras simple CNN</font>\n<br /> \n\n \n</div>\n<br />\n\n\n<div style=\"text-align: right;\">\n<font size=\"4\">2020/11/11</font>\n<br />\n<font size=\"4\">Ryutaro Hashimoto</font>\n</div>\n\n___",
"_____no_output_____"
],
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Setup\" data-toc-modified-id=\"Setup-1\"><span class=\"toc-item-num\">1 </span>Setup</a></span><ul class=\"toc-item\"><li><span><a href=\"#Launching-a-Sagemaker-session\" data-toc-modified-id=\"Launching-a-Sagemaker-session-1.1\"><span class=\"toc-item-num\">1.1 </span>Launching a Sagemaker session</a></span></li><li><span><a href=\"#Prepare-the-dataset-for-training\" data-toc-modified-id=\"Prepare-the-dataset-for-training-1.2\"><span class=\"toc-item-num\">1.2 </span>Prepare the dataset for training</a></span></li></ul></li><li><span><a href=\"#Train-the-model\" data-toc-modified-id=\"Train-the-model-2\"><span class=\"toc-item-num\">2 </span>Train the model</a></span><ul class=\"toc-item\"><li><span><a href=\"#Specifying-the-Instance-Type\" data-toc-modified-id=\"Specifying-the-Instance-Type-2.1\"><span class=\"toc-item-num\">2.1 </span>Specifying the Instance Type</a></span></li><li><span><a href=\"#Setting-for-hyperparameters\" data-toc-modified-id=\"Setting-for-hyperparameters-2.2\"><span class=\"toc-item-num\">2.2 </span>Setting for hyperparameters</a></span></li><li><span><a href=\"#Metrics\" data-toc-modified-id=\"Metrics-2.3\"><span class=\"toc-item-num\">2.3 </span>Metrics</a></span></li><li><span><a href=\"#Tags\" data-toc-modified-id=\"Tags-2.4\"><span class=\"toc-item-num\">2.4 </span>Tags</a></span></li><li><span><a href=\"#Setting-for-estimator\" data-toc-modified-id=\"Setting-for-estimator-2.5\"><span class=\"toc-item-num\">2.5 </span>Setting for estimator</a></span></li><li><span><a href=\"#Specify-data-input-and-output\" data-toc-modified-id=\"Specify-data-input-and-output-2.6\"><span class=\"toc-item-num\">2.6 </span>Specify data input and output</a></span></li><li><span><a href=\"#Execute-Training\" data-toc-modified-id=\"Execute-Training-2.7\"><span class=\"toc-item-num\">2.7 </span>Execute Training</a></span></li><li><span><a href=\"#Checking-the-accuracy-of-a-model-with-TensorBoard\" data-toc-modified-id=\"Checking-the-accuracy-of-a-model-with-TensorBoard-2.8\"><span class=\"toc-item-num\">2.8 </span>Checking the accuracy of a model with TensorBoard</a></span></li></ul></li><li><span><a href=\"#Predict-by-trained-Model\" data-toc-modified-id=\"Predict-by-trained-Model-3\"><span class=\"toc-item-num\">3 </span>Predict by trained Model</a></span><ul class=\"toc-item\"><li><span><a href=\"#Deploy-the-trained-model\" data-toc-modified-id=\"Deploy-the-trained-model-3.1\"><span class=\"toc-item-num\">3.1 </span>Deploy the trained model</a></span></li><li><span><a href=\"#Invoke-the-endpoint\" data-toc-modified-id=\"Invoke-the-endpoint-3.2\"><span class=\"toc-item-num\">3.2 </span>Invoke the endpoint</a></span></li><li><span><a href=\"#Download-the-dataset-for-prediction\" data-toc-modified-id=\"Download-the-dataset-for-prediction-3.3\"><span class=\"toc-item-num\">3.3 </span>Download the dataset for prediction</a></span></li><li><span><a href=\"#Prediction\" data-toc-modified-id=\"Prediction-3.4\"><span class=\"toc-item-num\">3.4 </span>Prediction</a></span></li><li><span><a href=\"#Accuracy\" data-toc-modified-id=\"Accuracy-3.5\"><span class=\"toc-item-num\">3.5 </span>Accuracy</a></span></li><li><span><a href=\"#Confusion-Matrix\" data-toc-modified-id=\"Confusion-Matrix-3.6\"><span class=\"toc-item-num\">3.6 </span>Confusion Matrix</a></span></li></ul></li><li><span><a href=\"#Cleanup\" data-toc-modified-id=\"Cleanup-4\"><span class=\"toc-item-num\">4 </span>Cleanup</a></span></li></ul></div>",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"### Launching a Sagemaker session",
"_____no_output_____"
]
],
[
[
"import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nrole = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' # ← your iam role ARN",
"_____no_output_____"
]
],
[
[
"### Prepare the dataset for training\n\nSkip the next code since you have already downloaded it.",
"_____no_output_____"
]
],
[
[
"!python generate_cifar10_tfrecords.py --data-dir ./data",
"_____no_output_____"
]
],
[
[
"Next, we upload the data to Amazon S3:",
"_____no_output_____"
]
],
[
[
"from sagemaker.s3 import S3Uploader\n\nbucket = 'sagemaker-tutorial-hashimoto'\ndataset_uri = S3Uploader.upload('data', 's3://{}/tf-cifar10-example/data'.format(bucket))\n\ndisplay(dataset_uri)",
"_____no_output_____"
]
],
[
[
"## Train the model",
"_____no_output_____"
],
[
"### Specifying the Instance Type",
"_____no_output_____"
]
],
[
[
"instance_type = 'ml.p2.xlarge'",
"_____no_output_____"
]
],
[
[
"### Setting for hyperparameters",
"_____no_output_____"
]
],
[
[
"hyperparameters = {'epochs': 10, 'batch-size': 256}",
"_____no_output_____"
]
],
[
[
"### Metrics",
"_____no_output_____"
]
],
[
[
"metric_definitions = [\n {'Name': 'train:loss', 'Regex': '.*loss: ([0-9\\\\.]+) - accuracy: [0-9\\\\.]+.*'},\n {'Name': 'train:accuracy', 'Regex': '.*loss: [0-9\\\\.]+ - accuracy: ([0-9\\\\.]+).*'},\n {'Name': 'validation:accuracy', 'Regex': '.*step - loss: [0-9\\\\.]+ - accuracy: [0-9\\\\.]+ - val_loss: [0-9\\\\.]+ - val_accuracy: ([0-9\\\\.]+).*'},\n {'Name': 'validation:loss', 'Regex': '.*step - loss: [0-9\\\\.]+ - accuracy: [0-9\\\\.]+ - val_loss: ([0-9\\\\.]+) - val_accuracy: [0-9\\\\.]+.*'},\n {'Name': 'sec/steps', 'Regex': '.* - \\d+s (\\d+)[mu]s/step - loss: [0-9\\\\.]+ - accuracy: [0-9\\\\.]+ - val_loss: [0-9\\\\.]+ - val_accuracy: [0-9\\\\.]+'}\n]",
"_____no_output_____"
]
],
[
[
"### Tags",
"_____no_output_____"
]
],
[
[
"tags = [{'Key': 'Project', 'Value': 'cifar10'}, {'Key': 'TensorBoard', 'Value': 'file'}]",
"_____no_output_____"
]
],
[
[
"### Setting for estimator",
"_____no_output_____"
]
],
[
[
"import subprocess\n\nfrom sagemaker.tensorflow import TensorFlow\n\nestimator = TensorFlow(entry_point='cifar10_keras_main.py',\n source_dir='source_dir',\n metric_definitions=metric_definitions,\n hyperparameters=hyperparameters,\n role=role,\n framework_version='1.15.2',\n py_version='py3',\n instance_count=1,\n instance_type=instance_type,\n base_job_name='cifar10-tf',\n tags=tags)",
"_____no_output_____"
],
[
"help(TensorFlow)",
"Help on class TensorFlow in module sagemaker.tensorflow.estimator:\n\nclass TensorFlow(sagemaker.estimator.Framework)\n | TensorFlow(py_version=None, framework_version=None, model_dir=None, image_uri=None, distribution=None, **kwargs)\n | \n | Handle end-to-end training and deployment of user-provided TensorFlow code.\n | \n | Method resolution order:\n | TensorFlow\n | sagemaker.estimator.Framework\n | sagemaker.estimator.EstimatorBase\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, py_version=None, framework_version=None, model_dir=None, image_uri=None, distribution=None, **kwargs)\n | Initialize a ``TensorFlow`` estimator.\n | \n | Args:\n | py_version (str): Python version you want to use for executing your model training\n | code. Defaults to ``None``. Required unless ``image_uri`` is provided.\n | framework_version (str): TensorFlow version you want to use for executing your model\n | training code. Defaults to ``None``. Required unless ``image_uri`` is provided.\n | List of supported versions:\n | https://github.com/aws/sagemaker-python-sdk#tensorflow-sagemaker-estimators.\n | model_dir (str): S3 location where the checkpoint data and models can be exported to\n | during training (default: None). It will be passed in the training script as one of\n | the command line arguments. If not specified, one is provided based on\n | your training configuration:\n | \n | * *distributed training with SMDistributed or MPI with Horovod* - ``/opt/ml/model``\n | * *single-machine training or distributed training without MPI* - ``s3://{output_path}/model``\n | * *Local Mode with local sources (file:// instead of s3://)* - ``/opt/ml/shared/model``\n | \n | To disable having ``model_dir`` passed to your training script,\n | set ``model_dir=False``.\n | image_uri (str): If specified, the estimator will use this image for training and\n | hosting, instead of selecting the appropriate SageMaker official image based on\n | framework_version and py_version. It can be an ECR url or dockerhub image and tag.\n | \n | Examples:\n | 123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0\n | custom-image:latest.\n | \n | If ``framework_version`` or ``py_version`` are ``None``, then\n | ``image_uri`` is required. If also ``None``, then a ``ValueError``\n | will be raised.\n | distribution (dict): A dictionary with information on how to run distributed training\n | (default: None). Currently, the following are supported:\n | distributed training with parameter servers, SageMaker Distributed (SMD) Data\n | and Model Parallelism, and MPI. SMD Model Parallelism can only be used with MPI.\n | To enable parameter server use the following setup:\n | \n | .. code:: python\n | \n | {\n | \"parameter_server\": {\n | \"enabled\": True\n | }\n | }\n | \n | To enable MPI:\n | \n | .. code:: python\n | \n | {\n | \"mpi\": {\n | \"enabled\": True\n | }\n | }\n | \n | To enable SMDistributed Data Parallel or Model Parallel:\n | \n | .. code:: python\n | \n | {\n | \"smdistributed\": {\n | \"dataparallel\": {\n | \"enabled\": True\n | },\n | \"modelparallel\": {\n | \"enabled\": True,\n | \"parameters\": {}\n | }\n | }\n | }\n | \n | **kwargs: Additional kwargs passed to the Framework constructor.\n | \n | .. tip::\n | \n | You can find additional parameters for initializing this class at\n | :class:`~sagemaker.estimator.Framework` and\n | :class:`~sagemaker.estimator.EstimatorBase`.\n | \n | create_model(self, role=None, vpc_config_override='VPC_CONFIG_DEFAULT', entry_point=None, source_dir=None, dependencies=None, **kwargs)\n | Create a ``TensorFlowModel`` object that can be used for creating\n | SageMaker model entities, deploying to a SageMaker endpoint, or\n | starting SageMaker Batch Transform jobs.\n | \n | Args:\n | role (str): The ``TensorFlowModel``, which is also used during transform jobs.\n | If not specified, the role from the Estimator is used.\n | vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the\n | model. Default: use subnets and security groups from this Estimator.\n | \n | * 'Subnets' (list[str]): List of subnet ids.\n | * 'SecurityGroupIds' (list[str]): List of security group ids.\n | \n | entry_point (str): Path (absolute or relative) to the local Python source file which\n | should be executed as the entry point to training. If ``source_dir`` is specified,\n | then ``entry_point`` must point to a file located at the root of ``source_dir``.\n | If not specified and ``endpoint_type`` is 'tensorflow-serving',\n | no entry point is used. If ``endpoint_type`` is also ``None``,\n | then the training entry point is used.\n | source_dir (str): Path (absolute or relative or an S3 URI) to a directory with any other\n | serving source code dependencies aside from the entry point file (default: None).\n | dependencies (list[str]): A list of paths to directories (absolute or relative) with\n | any additional libraries that will be exported to the container (default: None).\n | **kwargs: Additional kwargs passed to\n | :class:`~sagemaker.tensorflow.model.TensorFlowModel`.\n | \n | Returns:\n | sagemaker.tensorflow.model.TensorFlowModel: A ``TensorFlowModel`` object.\n | See :class:`~sagemaker.tensorflow.model.TensorFlowModel` for full details.\n | \n | hyperparameters(self)\n | Return hyperparameters used by your custom TensorFlow code during model training.\n | \n | transformer(self, instance_count, instance_type, strategy=None, assemble_with=None, output_path=None, output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None, max_payload=None, tags=None, role=None, volume_kms_key=None, entry_point=None, vpc_config_override='VPC_CONFIG_DEFAULT', enable_network_isolation=None, model_name=None)\n | Return a ``Transformer`` that uses a SageMaker Model based on the training job. It\n | reuses the SageMaker Session and base job name used by the Estimator.\n | \n | Args:\n | instance_count (int): Number of EC2 instances to use.\n | instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.\n | strategy (str): The strategy used to decide how to batch records in a single request\n | (default: None). Valid values: 'MultiRecord' and 'SingleRecord'.\n | assemble_with (str): How the output is assembled (default: None). Valid values: 'Line'\n | or 'None'.\n | output_path (str): S3 location for saving the transform result. If not specified,\n | results are stored to a default bucket.\n | output_kms_key (str): Optional. KMS key ID for encrypting the transform output\n | (default: None).\n | accept (str): The accept header passed by the client to\n | the inference endpoint. If it is supported by the endpoint,\n | it will be the format of the batch transform output.\n | env (dict): Environment variables to be set for use during the transform job\n | (default: None).\n | max_concurrent_transforms (int): The maximum number of HTTP requests to be made to\n | each individual transform container at one time.\n | max_payload (int): Maximum size of the payload in a single HTTP request to the\n | container in MB.\n | tags (list[dict]): List of tags for labeling a transform job. If none specified, then\n | the tags used for the training job are used for the transform job.\n | role (str): The IAM Role ARN for the ``TensorFlowModel``, which is also used\n | during transform jobs. If not specified, the role from the Estimator is used.\n | volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML\n | compute instance (default: None).\n | entry_point (str): Path (absolute or relative) to the local Python source file which\n | should be executed as the entry point to training. If ``source_dir`` is specified,\n | then ``entry_point`` must point to a file located at the root of ``source_dir``.\n | If not specified and ``endpoint_type`` is 'tensorflow-serving',\n | no entry point is used. If ``endpoint_type`` is also ``None``,\n | then the training entry point is used.\n | vpc_config_override (dict[str, list[str]]): Optional override for\n | the VpcConfig set on the model.\n | Default: use subnets and security groups from this Estimator.\n | \n | * 'Subnets' (list[str]): List of subnet ids.\n | * 'SecurityGroupIds' (list[str]): List of security group ids.\n | \n | enable_network_isolation (bool): Specifies whether container will\n | run in network isolation mode. Network isolation mode restricts\n | the container access to outside networks (such as the internet).\n | The container does not make any inbound or outbound network\n | calls. If True, a channel named \"code\" will be created for any\n | user entry script for inference. Also known as Internet-free mode.\n | If not specified, this setting is taken from the estimator's\n | current configuration.\n | model_name (str): Name to use for creating an Amazon SageMaker\n | model. If not specified, the estimator generates a default job name\n | based on the training image name and current timestamp.\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | __abstractmethods__ = frozenset()\n | \n | ----------------------------------------------------------------------\n | Methods inherited from sagemaker.estimator.Framework:\n | \n | training_image_uri(self)\n | Return the Docker image to use for training.\n | \n | The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does\n | the model training, calls this method to find the image to use for model\n | training.\n | \n | Returns:\n | str: The URI of the Docker image.\n | \n | ----------------------------------------------------------------------\n | Class methods inherited from sagemaker.estimator.Framework:\n | \n | attach(training_job_name, sagemaker_session=None, model_channel_name='model') from abc.ABCMeta\n | Attach to an existing training job.\n | \n | Create an Estimator bound to an existing training job, each subclass\n | is responsible to implement\n | ``_prepare_init_params_from_job_description()`` as this method delegates\n | the actual conversion of a training job description to the arguments\n | that the class constructor expects. After attaching, if the training job\n | has a Complete status, it can be ``deploy()`` ed to create a SageMaker\n | Endpoint and return a ``Predictor``.\n | \n | If the training job is in progress, attach will block until the training job\n | completes, but logs of the training job will not display. To see the logs\n | content, please call ``logs()``\n | \n | Examples:\n | >>> my_estimator.fit(wait=False)\n | >>> training_job_name = my_estimator.latest_training_job.name\n | Later on:\n | >>> attached_estimator = Estimator.attach(training_job_name)\n | >>> attached_estimator.logs()\n | >>> attached_estimator.deploy()\n | \n | Args:\n | training_job_name (str): The name of the training job to attach to.\n | sagemaker_session (sagemaker.session.Session): Session object which\n | manages interactions with Amazon SageMaker APIs and any other\n | AWS services needed. If not specified, the estimator creates one\n | using the default AWS configuration chain.\n | model_channel_name (str): Name of the channel where pre-trained\n | model data will be downloaded (default: 'model'). If no channel\n | with the same name exists in the training job, this option will\n | be ignored.\n | \n | Returns:\n | Instance of the calling ``Estimator`` Class with the attached\n | training job.\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from sagemaker.estimator.Framework:\n | \n | CONTAINER_CODE_CHANNEL_SOURCEDIR_PATH = '/opt/ml/input/data/code/sourc...\n | \n | INSTANCE_TYPE = 'sagemaker_instance_type'\n | \n | LAUNCH_MPI_ENV_NAME = 'sagemaker_mpi_enabled'\n | \n | LAUNCH_PS_ENV_NAME = 'sagemaker_parameter_server_enabled'\n | \n | LAUNCH_SM_DDP_ENV_NAME = 'sagemaker_distributed_dataparallel_enabled'\n | \n | MPI_CUSTOM_MPI_OPTIONS = 'sagemaker_mpi_custom_mpi_options'\n | \n | MPI_NUM_PROCESSES_PER_HOST = 'sagemaker_mpi_num_of_processes_per_host'\n | \n | ----------------------------------------------------------------------\n | Methods inherited from sagemaker.estimator.EstimatorBase:\n | \n | compile_model(self, target_instance_family, input_shape, output_path, framework=None, framework_version=None, compile_max_run=900, tags=None, target_platform_os=None, target_platform_arch=None, target_platform_accelerator=None, compiler_options=None, **kwargs)\n | Compile a Neo model using the input model.\n | \n | Args:\n | target_instance_family (str): Identifies the device that you want to\n | run your model after compilation, for example: ml_c5. For allowed\n | strings see\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.\n | input_shape (dict): Specifies the name and shape of the expected\n | inputs for your trained model in json dictionary form, for\n | example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28],\n | 'var2':[1,1,28,28]}\n | output_path (str): Specifies where to store the compiled model\n | framework (str): The framework that is used to train the original\n | model. Allowed values: 'mxnet', 'tensorflow', 'keras', 'pytorch',\n | 'onnx', 'xgboost'\n | framework_version (str): The version of the framework\n | compile_max_run (int): Timeout in seconds for compilation (default:\n | 3 * 60). After this amount of time Amazon SageMaker Neo\n | terminates the compilation job regardless of its current status.\n | tags (list[dict]): List of tags for labeling a compilation job. For\n | more, see\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.\n | target_platform_os (str): Target Platform OS, for example: 'LINUX'.\n | For allowed strings see\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.\n | It can be used instead of target_instance_family.\n | target_platform_arch (str): Target Platform Architecture, for example: 'X86_64'.\n | For allowed strings see\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.\n | It can be used instead of target_instance_family.\n | target_platform_accelerator (str, optional): Target Platform Accelerator,\n | for example: 'NVIDIA'. For allowed strings see\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.\n | It can be used instead of target_instance_family.\n | compiler_options (dict, optional): Additional parameters for compiler.\n | Compiler Options are TargetPlatform / target_instance_family specific. See\n | https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html for details.\n | **kwargs: Passed to invocation of ``create_model()``.\n | Implementations may customize ``create_model()`` to accept\n | ``**kwargs`` to customize model creation during deploy. For\n | more, see the implementation docs.\n | \n | Returns:\n | sagemaker.model.Model: A SageMaker ``Model`` object. See\n | :func:`~sagemaker.model.Model` for full details.\n | \n | delete_endpoint = func(*args, **kwargs)\n | \n | deploy(self, initial_instance_count, instance_type, serializer=None, deserializer=None, accelerator_type=None, endpoint_name=None, use_compiled_model=False, wait=True, model_name=None, kms_key=None, data_capture_config=None, tags=None, **kwargs)\n | Deploy the trained model to an Amazon SageMaker endpoint and return a\n | ``sagemaker.Predictor`` object.\n | \n | More information:\n | http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html\n | \n | Args:\n | initial_instance_count (int): Minimum number of EC2 instances to\n | deploy to an endpoint for prediction.\n | instance_type (str): Type of EC2 instance to deploy to an endpoint\n | for prediction, for example, 'ml.c4.xlarge'.\n | serializer (:class:`~sagemaker.serializers.BaseSerializer`): A\n | serializer object, used to encode data for an inference endpoint\n | (default: None). If ``serializer`` is not None, then\n | ``serializer`` will override the default serializer. The\n | default serializer is set by the ``predictor_cls``.\n | deserializer (:class:`~sagemaker.deserializers.BaseDeserializer`): A\n | deserializer object, used to decode data from an inference\n | endpoint (default: None). If ``deserializer`` is not None, then\n | ``deserializer`` will override the default deserializer. The\n | default deserializer is set by the ``predictor_cls``.\n | accelerator_type (str): Type of Elastic Inference accelerator to\n | attach to an endpoint for model loading and inference, for\n | example, 'ml.eia1.medium'. If not specified, no Elastic\n | Inference accelerator will be attached to the endpoint. For more\n | information:\n | https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html\n | endpoint_name (str): Name to use for creating an Amazon SageMaker\n | endpoint. If not specified, the name of the training job is\n | used.\n | use_compiled_model (bool): Flag to select whether to use compiled\n | (optimized) model. Default: False.\n | wait (bool): Whether the call should wait until the deployment of\n | model completes (default: True).\n | model_name (str): Name to use for creating an Amazon SageMaker\n | model. If not specified, the estimator generates a default job name\n | based on the training image name and current timestamp.\n | kms_key (str): The ARN of the KMS key that is used to encrypt the\n | data on the storage volume attached to the instance hosting the\n | endpoint.\n | data_capture_config (sagemaker.model_monitor.DataCaptureConfig): Specifies\n | configuration related to Endpoint data capture for use with\n | Amazon SageMaker Model Monitoring. Default: None.\n | tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific\n | endpoint. Example:\n | >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]\n | For more information about tags, see\n | https://boto3.amazonaws.com/v1/documentation /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags\n | **kwargs: Passed to invocation of ``create_model()``.\n | Implementations may customize ``create_model()`` to accept\n | ``**kwargs`` to customize model creation during deploy.\n | For more, see the implementation docs.\n | \n | Returns:\n | sagemaker.predictor.Predictor: A predictor that provides a ``predict()`` method,\n | which can be used to send requests to the Amazon SageMaker\n | endpoint and obtain inferences.\n | \n | disable_profiling(self)\n | Update the current training job in progress to disable profiling.\n | \n | Debugger stops collecting the system and framework metrics\n | and turns off the Debugger built-in monitoring and profiling rules.\n | \n | enable_default_profiling(self)\n | Update training job to enable Debugger monitoring.\n | \n | This method enables Debugger monitoring with\n | the default ``profiler_config`` parameter to collect system\n | metrics and the default built-in ``profiler_report`` rule.\n | Framework metrics won't be saved.\n | To update training job to emit framework metrics, you can use\n | :class:`~sagemaker.estimator.Estimator.update_profiler`\n | method and specify the framework metrics you want to enable.\n | \n | This method is callable when the training job is in progress while\n | Debugger monitoring is disabled.\n | \n | enable_network_isolation(self)\n | Return True if this Estimator will need network isolation to run.\n | \n | Returns:\n | bool: Whether this Estimator needs network isolation or not.\n | \n | fit(self, inputs=None, wait=True, logs='All', job_name=None, experiment_config=None)\n | Train a model using the input training dataset.\n | \n | The API calls the Amazon SageMaker CreateTrainingJob API to start\n | model training. The API uses configuration you provided to create the\n | estimator and the specified input training data to send the\n | CreatingTrainingJob request to Amazon SageMaker.\n | \n | This is a synchronous operation. After the model training\n | successfully completes, you can call the ``deploy()`` method to host the\n | model using the Amazon SageMaker hosting services.\n | \n | Args:\n | inputs (str or dict or sagemaker.inputs.TrainingInput): Information\n | about the training data. This can be one of three types:\n | \n | * (str) the S3 location where training data is saved, or a file:// path in\n | local mode.\n | * (dict[str, str] or dict[str, sagemaker.inputs.TrainingInput]) If using multiple\n | channels for training data, you can specify a dict mapping channel names to\n | strings or :func:`~sagemaker.inputs.TrainingInput` objects.\n | * (sagemaker.inputs.TrainingInput) - channel configuration for S3 data sources\n | that can provide additional information as well as the path to the training\n | dataset.\n | See :func:`sagemaker.inputs.TrainingInput` for full details.\n | * (sagemaker.session.FileSystemInput) - channel configuration for\n | a file system data source that can provide additional information as well as\n | the path to the training dataset.\n | \n | wait (bool): Whether the call should wait until the job completes (default: True).\n | logs ([str]): A list of strings specifying which logs to print. Acceptable\n | strings are \"All\", \"None\", \"Training\", or \"Rules\". To maintain backwards\n | compatibility, boolean values are also accepted and converted to strings.\n | Only meaningful when wait is True.\n | job_name (str): Training job name. If not specified, the estimator generates\n | a default job name based on the training image name and current timestamp.\n | experiment_config (dict[str, str]): Experiment management configuration.\n | Dictionary contains three optional keys,\n | 'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.\n | \n | get_vpc_config(self, vpc_config_override='VPC_CONFIG_DEFAULT')\n | Returns VpcConfig dict either from this Estimator's subnets and\n | security groups, or else validate and return an optional override value.\n | \n | Args:\n | vpc_config_override:\n | \n | latest_job_debugger_artifacts_path(self)\n | Gets the path to the DebuggerHookConfig output artifacts.\n | \n | Returns:\n | str: An S3 path to the output artifacts.\n | \n | latest_job_profiler_artifacts_path(self)\n | Gets the path to the profiling output artifacts.\n | \n | Returns:\n | str: An S3 path to the output artifacts.\n | \n | latest_job_tensorboard_artifacts_path(self)\n | Gets the path to the TensorBoardOutputConfig output artifacts.\n | \n | Returns:\n | str: An S3 path to the output artifacts.\n | \n | logs(self)\n | Display the logs for Estimator's training job.\n | \n | If the output is a tty or a Jupyter cell, it will be color-coded based\n | on which instance the log entry is from.\n | \n | prepare_workflow_for_training(self, job_name=None)\n | Calls _prepare_for_training. Used when setting up a workflow.\n | \n | Args:\n | job_name (str): Name of the training job to be created. If not\n | specified, one is generated, using the base name given to the\n | constructor if applicable.\n | \n | register(self, content_types, response_types, inference_instances, transform_instances, image_uri=None, model_package_name=None, model_package_group_name=None, model_metrics=None, metadata_properties=None, marketplace_cert=False, approval_status=None, description=None, compile_model_family=None, model_name=None, **kwargs)\n | Creates a model package for creating SageMaker models or listing on Marketplace.\n | \n | Args:\n | content_types (list): The supported MIME types for the input data.\n | response_types (list): The supported MIME types for the output data.\n | inference_instances (list): A list of the instance types that are used to\n | generate inferences in real-time.\n | transform_instances (list): A list of the instance types on which a transformation\n | job can be run or on which an endpoint can be deployed.\n | image_uri (str): The container image uri for Model Package, if not specified,\n | Estimator's training container image will be used (default: None).\n | model_package_name (str): Model Package name, exclusive to `model_package_group_name`,\n | using `model_package_name` makes the Model Package un-versioned (default: None).\n | model_package_group_name (str): Model Package Group name, exclusive to\n | `model_package_name`, using `model_package_group_name` makes the Model Package\n | versioned (default: None).\n | model_metrics (ModelMetrics): ModelMetrics object (default: None).\n | metadata_properties (MetadataProperties): MetadataProperties (default: None).\n | marketplace_cert (bool): A boolean value indicating if the Model Package is certified\n | for AWS Marketplace (default: False).\n | approval_status (str): Model Approval Status, values can be \"Approved\", \"Rejected\",\n | or \"PendingManualApproval\" (default: \"PendingManualApproval\").\n | description (str): Model Package description (default: None).\n | compile_model_family (str): Instance family for compiled model, if specified, a compiled\n | model will be used (default: None).\n | model_name (str): User defined model name (default: None).\n | **kwargs: Passed to invocation of ``create_model()``. Implementations may customize\n | ``create_model()`` to accept ``**kwargs`` to customize model creation during\n | deploy. For more, see the implementation docs.\n | \n | Returns:\n | str: A string of SageMaker Model Package ARN.\n | \n | update_profiler(self, rules=None, system_monitor_interval_millis=None, s3_output_path=None, framework_profile_params=None, disable_framework_metrics=False)\n | Update training jobs to enable profiling.\n | \n | This method updates the ``profiler_config`` parameter\n | and initiates Debugger built-in rules for profiling.\n | \n | Args:\n | rules (list[:class:`~sagemaker.debugger.ProfilerRule`]): A list of\n | :class:`~sagemaker.debugger.ProfilerRule` objects to define\n | rules for continuous analysis with SageMaker Debugger. Currently, you can\n | only add new profiler rules during the training job. (default: ``None``)\n | s3_output_path (str): The location in S3 to store the output. If profiler is enabled\n | once, s3_output_path cannot be changed. (default: ``None``)\n | system_monitor_interval_millis (int): How often profiling system metrics are\n | collected; Unit: Milliseconds (default: ``None``)\n | framework_profile_params (:class:`~sagemaker.debugger.FrameworkProfile`):\n | A parameter object for framework metrics profiling. Configure it using\n | the :class:`~sagemaker.debugger.FrameworkProfile` class.\n | To use the default framework profile parameters, pass ``FrameworkProfile()``.\n | For more information about the default values,\n | see :class:`~sagemaker.debugger.FrameworkProfile`. (default: ``None``)\n | disable_framework_metrics (bool): Specify whether to disable all the framework metrics.\n | This won't update system metrics and the Debugger built-in rules for monitoring.\n | To stop both monitoring and profiling,\n | use the :class:`~sagemaker.estimator.Estimator.desable_profiling`\n | method. (default: ``False``)\n | \n | .. attention::\n | \n | Updating the profiling configuration for TensorFlow dataloader profiling\n | is currently not available. If you started a TensorFlow training job only with\n | monitoring and want to enable profiling while the training job is running,\n | the dataloader profiling cannot be updated.\n | \n | ----------------------------------------------------------------------\n | Readonly properties inherited from sagemaker.estimator.EstimatorBase:\n | \n | model_data\n | str: The model location in S3. Only set if Estimator has been\n | ``fit()``.\n | \n | training_job_analytics\n | Return a ``TrainingJobAnalytics`` object for the current training\n | job.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from sagemaker.estimator.EstimatorBase:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n\n"
]
],
[
[
"### Specify data input and output",
"_____no_output_____"
]
],
[
[
"inputs = {\n 'train': '{}/train'.format(dataset_uri),\n 'validation': '{}/validation'.format(dataset_uri),\n 'eval': '{}/eval'.format(dataset_uri),\n}",
"_____no_output_____"
]
],
[
[
"### Execute Training",
"_____no_output_____"
]
],
[
[
"estimator.fit(inputs)",
"2021-02-08 06:06:01 Starting - Starting the training job...\n2021-02-08 06:06:25 Starting - Launching requested ML instancesProfilerReport-1612764359: InProgress\n......\n2021-02-08 06:07:32 Starting - Preparing the instances for training........."
]
],
[
[
"### Checking the accuracy of a model with TensorBoard\n\nUsing the visualization tool [TensorBoard](https://www.tensorflow.org/tensorboard), we can compare our training jobs.\n\nIn a local setting, install TensorBoard with `pip install tensorboard`. Then run the command generated by the following code:",
"_____no_output_____"
]
],
[
[
"!python generate_tensorboard_command.py",
"_____no_output_____"
],
[
"! AWS_REGION=us-west-2 tensorboard --logdir file:\"s3://sagemaker-us-west-2-005242542034/cifar10-tf-2021-02-08-04-01-54-836/model\"",
"_____no_output_____"
]
],
[
[
"After running that command, we can access TensorBoard locally at http://localhost:6006.\n\nBased on the TensorBoard metrics, we can see that:\n1. All jobs run for 10 epochs (0 - 9).\n1. Both File Mode and Pipe Mode run for ~1 minute - Pipe Mode doesn't affect training performance.\n1. Distributed training runs for only 45 seconds.\n1. All of the training jobs resulted in similar validation accuracy.\n\nThis example uses a relatively small dataset (179 MB). For larger datasets, Pipe Mode can significantly reduce training time because it does not copy the entire dataset into local memory.",
"_____no_output_____"
],
[
"## Predict by trained Model",
"_____no_output_____"
],
[
"### Deploy the trained model",
"_____no_output_____"
]
],
[
[
"predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')",
"_____no_output_____"
]
],
[
[
"### Invoke the endpoint\n\nI'll try to generate a random matrix and see if the predictor is working.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndata = np.random.randn(1, 32, 32, 3)\nprint('Predicted class: {}'.format(np.argmax(predictor.predict(data)['predictions'])))",
"_____no_output_____"
]
],
[
[
"### Download the dataset for prediction",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.datasets import cifar10\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()",
"_____no_output_____"
]
],
[
[
"### Prediction",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ndef predict(data):\n predictions = predictor.predict(data)['predictions']\n return predictions\n\n\npredicted = []\nactual = []\nbatches = 0\nbatch_size = 128\n\ndatagen = ImageDataGenerator()\nfor data in datagen.flow(x_test, y_test, batch_size=batch_size):\n for i, prediction in enumerate(predict(data[0])):\n predicted.append(np.argmax(prediction))\n actual.append(data[1][i][0])\n\n batches += 1\n if batches >= len(x_test) / batch_size:\n break",
"_____no_output_____"
]
],
[
[
"### Accuracy",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\naccuracy = accuracy_score(y_pred=predicted, y_true=actual)\ndisplay('Average accuracy: {}%'.format(round(accuracy * 100, 2)))",
"_____no_output_____"
]
],
[
[
"### Confusion Matrix",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sn\nfrom sklearn.metrics import confusion_matrix\n\ncm = confusion_matrix(y_pred=predicted, y_true=actual)\ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nsn.set(rc={'figure.figsize': (11.7,8.27)})\nsn.set(font_scale=1.4) # for label size\nsn.heatmap(cm, annot=True, annot_kws={\"size\": 10}) # font size",
"_____no_output_____"
]
],
[
[
"## Cleanup\n\nTo avoid incurring extra charges to your AWS account, let's delete the endpoint we created:",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f31b6f93d14a7ea2340fc5a814ffff60aff9e2 | 92,622 | ipynb | Jupyter Notebook | examples/examples.ipynb | matthieubulte/Probabilistic.jl | 8eb68f7c9372570657efb8362ff5bc7b09bca62b | [
"MIT"
] | null | null | null | examples/examples.ipynb | matthieubulte/Probabilistic.jl | 8eb68f7c9372570657efb8362ff5bc7b09bca62b | [
"MIT"
] | null | null | null | examples/examples.ipynb | matthieubulte/Probabilistic.jl | 8eb68f7c9372570657efb8362ff5bc7b09bca62b | [
"MIT"
] | null | null | null | 686.088889 | 47,558 | 0.950584 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f3342c2e4c69ee31104e596653a81af148fcb8 | 9,524 | ipynb | Jupyter Notebook | Naive_Bayes_Diabetes/Naive_Bayes.ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | 1 | 2020-05-29T20:07:49.000Z | 2020-05-29T20:07:49.000Z | Naive_Bayes_Diabetes/Naive_Bayes.ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | null | null | null | Naive_Bayes_Diabetes/Naive_Bayes.ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | null | null | null | 23.81 | 454 | 0.604998 | [
[
[
"We will use Naive Bayes to model the \"Pima Indians Diabetes\" data set. This model will predict which people are likely to develop diabetes.\n\nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective of the dataset is to diagnostically predict whether or not a patient has diabetes, based on certain diagnostic measurements included in the dataset. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.",
"_____no_output_____"
],
[
"## Import Libraries",
"_____no_output_____"
]
],
[
[
"# data processing, CSV file I/O\n# matplotlib.pyplot plots data\n\n",
"_____no_output_____"
]
],
[
[
"## Load and review data",
"_____no_output_____"
]
],
[
[
"# Check number of columns and rows in data frame\n",
"_____no_output_____"
],
[
"# To check first 5 rows of data set\n",
"_____no_output_____"
],
[
"# If there are any null values in data set\n",
"_____no_output_____"
],
[
"# Excluding Outcome column\n\n# Histogram of first 8 columns\n\n",
"_____no_output_____"
]
],
[
[
"## Identify Correlation in data ",
"_____no_output_____"
]
],
[
[
"#show correlation matrix \n\n",
"_____no_output_____"
],
[
"# However we want to see correlation in graphical representation\n\n",
"_____no_output_____"
]
],
[
[
"## Calculate diabetes ratio of True/False from outcome variable ",
"_____no_output_____"
],
[
"## Spliting the data \n",
"_____no_output_____"
],
[
"Lets check split of data",
"_____no_output_____"
],
[
"Now lets check diabetes True/False ratio in split data ",
"_____no_output_____"
],
[
"# Data Preparation\n\n### Check hidden missing values \n\nAs we checked missing values earlier but haven't got any. But there can be lots of entries with 0 values. We must need to take care of those as well.",
"_____no_output_____"
],
[
"### Replace 0s with serial mean ",
"_____no_output_____"
],
[
"# Train Naive Bayes algorithm ",
"_____no_output_____"
],
[
"### Performance of our model with training data",
"_____no_output_____"
],
[
"### Performance of our model with testing data",
"_____no_output_____"
],
[
"### Lets check the confusion matrix and classification report ",
"_____no_output_____"
]
],
[
[
"# Print Classification report\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e7f33e9b312c6beb6c7028fb60bee8c11d1d43a1 | 18,963 | ipynb | Jupyter Notebook | codes/chapter-exercise-edwardzcn/3_2HistogramEqualization.ipynb | CSU-CS-WIKI/digital-image-process | 7df204b0bd7e14cd05f91987c7f8dccb3a6b3423 | [
"MIT"
] | null | null | null | codes/chapter-exercise-edwardzcn/3_2HistogramEqualization.ipynb | CSU-CS-WIKI/digital-image-process | 7df204b0bd7e14cd05f91987c7f8dccb3a6b3423 | [
"MIT"
] | null | null | null | codes/chapter-exercise-edwardzcn/3_2HistogramEqualization.ipynb | CSU-CS-WIKI/digital-image-process | 7df204b0bd7e14cd05f91987c7f8dccb3a6b3423 | [
"MIT"
] | null | null | null | 46.477941 | 1,887 | 0.586405 | [
[
[
"import numpy as np\nimport cv2\nimport imageio\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def GetRGB(path):\n im_BGR = cv2.imread(path,cv2.COLOR_GRAY2RGB)\n im = cv2.cvtColor(im_BGR,cv2.COLOR_BGR2RGB)\n return im\n\n# 设置为你的图片\n# im = GetRGB('./3_2Photo/1.jpg')",
"_____no_output_____"
],
[
"# 尝试直方图均衡化, \ndef CaculateHistogram(input_image):\n # 参数1. 单通道8位灰度图像\n # 输出1. 总像素值 \n # 输出2. 灰度值分布计数\n # 输出3. 灰度值分布前缀和(用来做后续直方图平局奴化)\n \n # 区分通道\n if len(np.shape(input_image)) == 3:\n height,width,level = np.shape(input_image)\n summ = height*width*level\n else :\n height,width = np.shape(input_image)\n summ = height*width\n\n \n# 第一种 通过访问指定位置像素 耗时executed in 7m 9s\n# 巨慢无比\n# for i in range(height):\n# for j in range(width):\n# caculate_num[input_image[i][j]] += 1\n \n# 第二种 直接拆分行,像素获取像素值 耗时executed in 14.4s\n# for line in input_image:\n# for px in line:\n# caculate_num[px] +=1 \n\n# 第三种 调用np.histogram()方法 耗时executed in 644ms\n caculate_num,index_x = GetHistogramArray(input_image)\n caculate_num = np.append(caculate_num,1)\n# print(np.shape(caculate_num))\n # 第四种 吕少推荐的方法(还没看懂)\n# caculate_num = cumsum(input_image,256)\n# print(np.shape(caculate_num))\n \n sum_num = np.copy(caculate_num)\n for i in range(1,256):\n# print(sum_num[i-1],caculate_num[i])\n sum_num[i] = sum_num[i-1] + sum_num[i]\n return summ,caculate_num,sum_num\n \ndef GetHistogramArray(image):\n return np.histogram(image,np.arange(0,256));\n\ndef cumsum(img, bins):\n \n histogram = np.zeros(bins)\n for pixel in np.arange(0, bins, 1):\n histogram[pixel] += len(img[img==pixel])\n \n return histogram\n \ndef HistogramEqualizationLUT(input_image):\n # 参数1, 单通道8位灰度图像\n # 输出1. 单通道8位灰度图像(直方图均衡化后)\n size,data,data_sum = CaculateHistogram(input_image)\n fxy = lambda x: (255*data_sum[x])//size\n table = np.array([fxy(i) for i in range(256)])\n# print(table)\n# 不可以这样\n# image_new = table[input_image]\n lut = lambda x: table[x]\n return lut(input_image),table\n\n\nred_channel_he,red_lut_table = HistogramEqualizationLUT(im[:,:,0])\nall_he,lut_table = HistogramEqualizationLUT(im)\n\n\nplt.figure(figsize=(20,10))\nax1 = plt.subplot(231)\nax1.set_title(\"Original\")\nax1.imshow(im)\n\nax2 = plt.subplot(232)\nax2.set_title(\"Red Channel HE\")\nax2.imshow(red_channel_he, cmap = 'gray')\n\n\nnew_im = np.copy(im)\nnew_im[:,:,0] = red_channel_he\nax3 = plt.subplot(233)\nax3.set_title(\"Image With Red Channel HE\")\nax3.imshow(new_im)\n\nax4 = plt.subplot(234)\nax4.set_title(\"Original\")\nax4.imshow(im)\n\nax5 = plt.subplot(235)\nax5.set_title(\"All Channel Together HE\")\nax5.imshow(all_he,cmap = \"gray\")\n\nax6 = plt.subplot(236)\nax6.set_title(\"All Channel Together HE\")\nax6.imshow(all_he)\n\nplt.show()\n",
"_____no_output_____"
],
[
"# OK, Do something interesting!\n# 写一个整合函数,对每个输入图像的三通道都画出其三通道的函数变换图像和\n\n\ndef DrawPerChannel(input_img,split_channel : int, channel_color ,plotline = None, histogram = None, pic = None):\n# 该通道的均衡曲线(分布函数曲线)\n# 获取分离的通道\n input_channel = input_img[:,:,split_channel]\n temp_he,temp_table = HistogramEqualizationLUT(input_channel)\n if plotline != None:\n plotline.set_title(\"The function of \"+channel_color+\" channel\")\n plotline.plot(np.arange(256),np.arange(256),color = \"black\",label = \"$r_{\" + channel_color +\"}$\")\n plotline.plot(np.arange(256),temp_table,color = channel_color ,label = \"$T(r_{\"+ channel_color +\"})$\")\n plotline.legend()\n# 该通道的灰度分布直方图\n if histogram != None:\n kwargs = dict(bins = 25, histtype='bar', edgecolor = \"white\",alpha=0.5, density = True)\n histogram.set_title(\"The histogram of \"+channel_color+\" channel\")\n histogram.hist(input_channel.flatten(), color = \"black\",**kwargs, label = channel_color +\" original\")\n histogram.hist(temp_he.flatten(), color = channel_color , **kwargs, label = channel_color +\" after he\")\n histogram.legend()\n# 该通道均匀后叠加到原图像的效果\n if pic != None:\n show_img = input_img.copy()\n show_img[:,:,split_channel] = temp_he\n pic.imshow(show_img)\n \n \n \n\ndef DrawChannelsEqualization(img_with_3channels):\n plt.figure(figsize=(30,30))\n# 调用库函数\n# r,g,b = cv2.split(im)\n# 直接分离 \n# r,g,b = img_with_3channels[:,:,0],img_with_3channels[:,:,1],img_with_3channels[:,:,2]\n DrawPerChannel(img_with_3channels,0,\"red\",plt.subplot(331),plt.subplot(334),plt.subplot(337))\n DrawPerChannel(img_with_3channels,1,\"green\",plt.subplot(332),plt.subplot(335),plt.subplot(338))\n DrawPerChannel(img_with_3channels,2,\"blue\",plt.subplot(333),plt.subplot(336),plt.subplot(339))\n plt.show()\n\nDrawChannelsEqualization(im)",
"_____no_output_____"
],
[
"# 测试三通道分别做均衡化后效果和采取统一函数做均衡化的效果\ndef DrawCompareEqualization(im_with_3_channel):\n together_img,together_table = HistogramEqualizationLUT(im_with_3_channel)\n seperate_img = im_with_3_channel.copy()\n seperate_table = np.array([together_table,together_table,together_table])\n for i in range(3):\n seperate_img[:,:,i],seperate_table[i] = HistogramEqualizationLUT(im_with_3_channel[:,:,i])\n \n plt.figure(figsize=(15,10))\n plotline0 = plt.subplot(231)\n plotline0.set_title(\"No Change\")\n plotline0.plot(np.arange(256),np.arange(256),color = \"black\",label = \"$r$\")\n plotline1 = plt.subplot(232)\n plotline1.set_title(\"Seperate Change\")\n plotline1.plot(np.arange(256),np.arange(256),color = \"black\",label = \"$r$\")\n plotline1.plot(np.arange(256),seperate_table[0],color = \"red\" ,label = \"$T(r_{red})$\") \n plotline1.plot(np.arange(256),seperate_table[1],color = \"green\" ,label = \"$T(r_{green})$\")\n plotline1.plot(np.arange(256),seperate_table[2],color = \"blue\" ,label = \"$T(r_{blue})$\")\n plotline1.legend()\n plotline2 = plt.subplot(233)\n plotline2.set_title(\"Together Change\")\n plotline2.plot(np.arange(256),np.arange(256),color = \"black\",label = \"$r$\")\n plotline2.plot(np.arange(256),together_table,color = \"orange\" ,label = \"$T(r_{together})$\")\n plotline2.legend()\n pic0 = plt.subplot(234)\n pic0.imshow(im_with_3_channel)\n pic1 = plt.subplot(235)\n pic1.imshow(seperate_img)\n pic2 = plt.subplot(236)\n pic2.imshow(together_img)\n plt.show()\n\nDrawCompareEqualization(im)",
"_____no_output_____"
],
[
"# 多来几组样例\n\nimtest1 = imageio.imread('imageio:chelsea.png')\nimtest2 = GetRGB('./3_2Photo/2.jpg')\nimtest3 = GetRGB('./3_2Photo/3.jpg')\nimtest4 = GetRGB('./3_2Photo/4.tif')\n\nDrawCompareEqualization(imtest1)\n# DrawCompareEqualization(imtest2)\nDrawCompareEqualization(imtest3)\nDrawCompareEqualization(imtest4)",
"_____no_output_____"
],
[
"# 展示直方图\ndef DrawHist(input_img,pic_handle,histogram_handle):\n kwargs = dict(bins = 25, histtype='bar', edgecolor = \"white\",alpha=0.5, density = True)\n pic_handle.set_title(\"The Image\")\n pic_handle.imshow(input_img,cmap = \"gray\")\n histogram_handle.set_title(\"The histogram_handle\")\n histogram_handle.hist(input_img.flatten(),**kwargs)\n\n\nplt.figure(figsize=(20,15))\nDrawHist(imtest1[:,:,0],plt.subplot(321),plt.subplot(322))\nDrawHist(imtest2[:,:,0],plt.subplot(323),plt.subplot(324))\nDrawHist(imtest3[:,:,0],plt.subplot(325),plt.subplot(326))\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f343d1810e35271bcf2ddffb07ced0bb3edcc9 | 15,189 | ipynb | Jupyter Notebook | Aula1/ResolucaoExercicios_Aula01.ipynb | anablima/CursoUSP_PythonNLP | 037d8fdcb0b8e103b2e89ce7f1db72bd79dc366d | [
"MIT"
] | 1 | 2022-02-04T10:36:31.000Z | 2022-02-04T10:36:31.000Z | Aula1/ResolucaoExercicios_Aula01.ipynb | anablima/CursoUSP_PythonNLP | 037d8fdcb0b8e103b2e89ce7f1db72bd79dc366d | [
"MIT"
] | null | null | null | Aula1/ResolucaoExercicios_Aula01.ipynb | anablima/CursoUSP_PythonNLP | 037d8fdcb0b8e103b2e89ce7f1db72bd79dc366d | [
"MIT"
] | null | null | null | 25.023064 | 795 | 0.497926 | [
[
[
"# Resolução dos Exercícios - Lista I",
"_____no_output_____"
],
[
"### 1. Crie três variáveis e atribua os valores a seguir: a=1, b=5.9 e c=‘teste’. A partir disso, retorne o tipo de cada uma das variáveis.",
"_____no_output_____"
]
],
[
[
"# Criando as variáveis\na=1\nb=5\nc='teste'\n\n# Retornando o tipo de cada variável\nprint(\"Tipos das variáveis:\\n>> Variável 'a' é do tipo {typea}.\"\n \"\\n>> Variável 'b' é do tipo {typeb}.\"\n \"\\n>> Variável 'c' é do tipo {typec}\".format(typea=type(a), \n typeb=type(b), \n typec=type(c)))",
"Tipos das variáveis:\n>> Variável 'a' é do tipo <class 'int'>.\n>> Variável 'b' é do tipo <class 'int'>.\n>> Variável 'c' é do tipo <class 'str'>\n"
]
],
[
[
"### 2. Troque o valor da variável a por ‘1’ e verifique se o tipo da variável mudou.",
"_____no_output_____"
]
],
[
[
"# Alterando a variável\na='1'\n\n# Retornando o novo tipo da variável\nprint(\"O tipo da variável 'a' mudou para \", type(a))",
"O tipo da variável 'a' mudou para <class 'str'>\n"
]
],
[
[
"### 3. Faça a soma da variável b com a variável c. Interprete a saída, tanto em caso de execução correta quanto em caso de execução com erro.",
"_____no_output_____"
]
],
[
[
"print(b+c)\n\n# Não podemos realizar operações aritméticas entre variáveis com tipos diferentes.\n# Para isso ambas as variáveis precisam ser do mesmo tipo ou retorna erro.",
"_____no_output_____"
]
],
[
[
"### 4. Crie uma lista com números de 0 a 9 (em qualquer ordem) e faça:\n* a) Adicione o número 6\n* b) Insira o número 7 na 3ª posição da lista\n* c) Remova o elemento 3 da lista\n* d) Adicione o número 4\n* e) Verifique o número de ocorrências do número 4 na lista",
"_____no_output_____"
]
],
[
[
"# Criando a lista\nl1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nl1",
"_____no_output_____"
],
[
"# a) Adicione o número 6\nl1.append(6)\nl1",
"_____no_output_____"
],
[
"# b) Insira o número 7 na 3ª posição da lista\nl1.insert(2,7)\nl1",
"_____no_output_____"
],
[
"# c) Remova o elemento 3 da lista\nl1.remove(3)\nl1",
"_____no_output_____"
],
[
"# d) Adicione o número 4\nl1.append(4)\nl1",
"_____no_output_____"
],
[
"# e) Verifique o número de ocorrências do número 4 na lista\nprint(l1.count(4))",
"2\n"
]
],
[
[
"### 5. Ainda com a lista criada na questão anterior, faça:\n* a) Retorne os primeiros 3 elementos da lista\n* b) Retorne os elementos que estão da 3ª posição até a 7ª posição da lista\n* c) Retorne os elementos da lista de 3 em 3 elementos\n* d) Retorne os 3 últimos elementos da lista\n* e) Retorne todos os elementos menos os 4 últimos da lista",
"_____no_output_____"
]
],
[
[
"# a) Retorne os primeiros 3 elementos da lista\nprint('Lista:', l1)\nprint('\\n3 primeiros elementos da lista:', l1[:3])",
"Lista: [0, 1, 7, 2, 4, 5, 6, 7, 8, 9, 6, 4]\n\n3 primeiros elementos da lista: [0, 1, 7]\n"
],
[
"# b) Retorne os elementos que estão da 3ª posição até a 7ª posição da lista\nprint('Lista:', l1)\nprint('\\nElementos da 3ª a 7ª posição da lista:', l1[2:7])",
"Lista: [0, 1, 7, 2, 4, 5, 6, 7, 8, 9, 6, 4]\n\nElementos da 3ª a 7ª posição da lista: [7, 2, 4, 5, 6]\n"
],
[
"# c) Retorne os elementos da lista de 3 em 3 elementos\nprint('Posições de 1 a 3: ', l1[:3])\nprint('Posições de 4 a 6: ', l1[3:6])\nprint('Posições de 7 a 9: ', l1[6:9])\nprint('Posições de 10 a 12: ', l1[9:12])",
"Posições de 1 a 3: [0, 1, 7]\nPosições de 4 a 6: [2, 4, 5]\nPosições de 7 a 9: [6, 7, 8]\nPosições de 10 a 12: [9, 6, 4]\n"
],
[
"# d) Retorne os 3 últimos elementos da lista\nprint('Lista:', l1)\nprint('\\n3 últimos elementos da lista:', l1[-3:])",
"Lista: [0, 1, 7, 2, 4, 5, 6, 7, 8, 9, 6, 4]\n\n3 últimos elementos da lista: [9, 6, 4]\n"
],
[
"# e) Retorne todos os elementos menos os 4 últimos da lista\nprint('Lista:', l1)\nprint('\\nTodos os elementos menos os 4 últimos da lista:', l1[:-4])",
"Lista: [0, 1, 7, 2, 4, 5, 6, 7, 8, 9, 6, 4]\n\nTodos os elementos menos os 4 últimos da lista: [0, 1, 7, 2, 4, 5, 6, 7]\n"
]
],
[
[
"### 6. Com a lista das questões anteriores, retorne o 6º elemento da lista.",
"_____no_output_____"
]
],
[
[
"print('Lista:', l1)\nprint('\\n6ª posição da lista:', l1[6])",
"Lista: [0, 1, 2, 4, 4, 5, 6, 7, 9, 12]\n\n6ª posição da lista: 7\n"
]
],
[
[
"### 7. Altere o valor do 7º elemento da lista para o valor 12.",
"_____no_output_____"
]
],
[
[
"print('Lista:', l1)",
"Lista: [0, 1, 7, 2, 4, 5, 6, 9, 6, 4]\n"
],
[
"l1[6] = 12\nprint('\\nLista com a alteração:', l1)",
"\nLista com a alteração: [0, 1, 7, 2, 4, 5, 12, 9, 6, 4]\n"
]
],
[
[
"### 8. Inverta a ordem dos elementos na lista.",
"_____no_output_____"
]
],
[
[
"print('Lista:', l1)",
"Lista: [0, 1, 2, 4, 4, 5, 6, 7, 9, 12]\n"
],
[
"l1.reverse()\nprint('\\nLista invertida:', l1)",
"\nLista invertida: [12, 9, 7, 6, 5, 4, 4, 2, 1, 0]\n"
]
],
[
[
"### 9. Ordene a lista",
"_____no_output_____"
]
],
[
[
"print('Lista:', l1)",
"Lista: [12, 9, 7, 6, 5, 4, 4, 2, 1, 0]\n"
],
[
"l1.sort()\nprint('\\nLista invertida:', l1)",
"\nLista invertida: [0, 1, 2, 4, 4, 5, 6, 7, 9, 12]\n"
]
],
[
[
"### 10. Crie uma tupla com números de 0 a 9 (em qualquer ordem) e tente:\n* a) Alterar o valor do 3º elemento da tupla para o valor 10\n* b) Verificar o índice (posição) do valor 5 na tupla",
"_____no_output_____"
]
],
[
[
"# Criando a tupla\nt1 = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\nt1",
"_____no_output_____"
],
[
"# a) Alterar o valor do 3º elemento da tupla para o valor 10\nt1[3] = 10\nt1\n\n# Tuplas não são alteráveis, somente as listas são.",
"_____no_output_____"
],
[
"# b) Verificar o índice (posição) do valor 5 na tupla\nprint('Tupla: ', t1)\nprint('\\nIndex do número 5 é:', t1.index(5))",
"Tupla: (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\n\nIndex do número 5 é: 5\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f358b16bf0e46a1705ce3afd4aa261907083b0 | 204,997 | ipynb | Jupyter Notebook | section_4/4-7.ipynb | PacktPublishing/Hands-On-Machine-Learning-with-Scikit-Learn-and-TensorFlow-2.0 | 8cfe9e6be7426ad2b59567f60f7436102f26a642 | [
"MIT"
] | 29 | 2020-02-20T09:52:18.000Z | 2020-05-31T18:06:57.000Z | section_4/4-7.ipynb | PacktPublishing/Practical-Machine-Learning-with-TensorFlow-2.0-and-Scikit-Learn | 8cfe9e6be7426ad2b59567f60f7436102f26a642 | [
"MIT"
] | null | null | null | section_4/4-7.ipynb | PacktPublishing/Practical-Machine-Learning-with-TensorFlow-2.0-and-Scikit-Learn | 8cfe9e6be7426ad2b59567f60f7436102f26a642 | [
"MIT"
] | 11 | 2020-07-15T19:51:50.000Z | 2022-01-27T23:17:21.000Z | 458.606264 | 99,520 | 0.922604 | [
[
[
"%config IPCompleter.greedy = True\n%config InlineBackend.figure_format = 'retina'\n%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\n\npd.set_option('mode.chained_assignment', None)\nsn.set(rc={'figure.figsize':(9,9)})\nsn.set(font_scale=1.4)",
"_____no_output_____"
]
],
[
[
"## Boltzmann Machines\n\nA Boltzmann machine is a type of stochastic recurrent neural network. It is a Markov random field (a undirected graphical model is a set of random variables that has the *Markov property* (the conditional probability distribution of future states of the process (conditional on both past and present states) depends only upon the present state, not on the sequence of events that preceded it)). They were one of the first neural networks capable of learning internal representations, and are able to represent and (given sufficient time) solve combinatoric problems.\n\nThey are named after the Boltzmann distribution in statistical mechanics, which is used in their sampling function. That's why they are called \"energy based models\" (EBM). They were invented in 1985 by Geoffrey Hinton, then a Professor at Carnegie Mellon University, and Terry Sejnowski, then a Professor at Johns Hopkins University.\n\n![Boltzmannexamplev1.png](attachment:Boltzmannexamplev1.png)\n[[1](https://en.wikipedia.org/wiki/File:Boltzmannexamplev1.png)]\n\n> A graphical representation of an example Boltzmann machine. Each undirected edge represents dependency. In this example there are 3 hidden units and 4 visible units. This is not a restricted Boltzmann machine.\n\nThe units in the Boltzmann machine are divided into 'visible' units, $\\mathbf{v}$, and 'hidden' units, $\\mathbf{h}$. The visible units are those that receive information from the 'environment', i.e. the training set is a set of binary vectors over the set $\\mathbf{v}$. The distribution over the training set is denoted $P^{+}(\\mathbf{v})$. Can see that all nodes form a complete graph (where all units are connected to all other units)\n\n# Restricted Boltzmann machine\n\nA restricted Boltzmann machine (RBM) is a generative stochastic artificial neural network that can learn a probability distribution over its set of inputs. RBMs are a variant of Boltzmann machines, with the restriction that their neurons must form a bipartite graph: a pair of nodes from each of the two groups of units (commonly referred to as the \"visible\" and \"hidden\" units respectively) may have a symmetric connection between them; and there are no connections between nodes within a group. By contrast, \"unrestricted\" Boltzmann machines may have connections between hidden units. This restriction allows for more efficient training algorithms than are available for the general class of Boltzmann machines, in particular the gradient-based contrastive divergence algorithm.\n\nRestricted Boltzmann machines can also be used in deep learning networks. In particular, deep belief networks can be formed by \"stacking\" RBMs and optionally fine-tuning the resulting deep network with gradient descent and backpropagation.\n\n![Restricted_Boltzmann_machine.svg.png](attachment:Restricted_Boltzmann_machine.svg.png)\n[[2](https://en.wikipedia.org/wiki/File:Restricted_Boltzmann_machine.svg)]\n\n> Diagram of a restricted Boltzmann machine with three visible units and four hidden units (no bias units).\n\n\nRestricted Boltzmann machines (RBM) are unsupervised nonlinear feature\nlearners based on a probabilistic model. The features extracted by an\nRBM or a hierarchy of RBMs often give good results when fed into a\nlinear classifier such as a linear SVM or a perceptron.\n\nThe model makes assumptions regarding the distribution of inputs. At the\nmoment, scikit-learn only provides `BernoulliRBM`\n, which assumes the inputs (and all units) are either binary values or\nvalues between 0 and 1, each encoding the probability that the specific\nfeature would be turned on.\n\nThe RBM tries to maximize the likelihood of the data using a particular\ngraphical model. The parameter learning algorithm used (`Stochastic Maximum Likelihood`) prevents the\nrepresentations from straying far from the input data, which makes them\ncapture interesting regularities, but makes the model less useful for\nsmall datasets, and usually not useful for density estimation.\n\nThe time complexity of this implementation is $O(d^2)$ assuming $d \\sim n_{features} \\sim n_{components}$.\n\nThe method gained popularity for initializing deep neural networks with\nthe weights of independent RBMs. This method is known as unsupervised\npre-training.\n\n#### Example : RBM features for digit classification\n\nFor greyscale image data where pixel values can be interpreted as degrees of blackness on a white background, like handwritten digit recognition, the Bernoulli Restricted Boltzmann machine model (`BernoulliRBM`) can perform effective non-linear feature extraction.\n\nIn order to learn good latent representations from a small dataset, we artificially generate more labeled data by perturbing the training data with linear shifts of 1 pixel in each direction.\n\nThis example shows how to build a classification pipeline with a BernoulliRBM feature extractor and a `LogisticRegression` classifier. The hyperparameters of the entire model (learning rate, hidden layer size, regularization) were optimized by grid search, but the search is not reproduced here because of runtime constraints.\n\nLogistic regression on raw pixel values is presented for comparison. The example shows that the features extracted by the BernoulliRBM help improve the classification accuracy.",
"_____no_output_____"
]
],
[
[
"from sklearn.neural_network import BernoulliRBM\n\nX = np.array([[0.5, 0, 0], [0, 0.7, 1], [1, 0, 1], [1, 0.2, 1]])\nrbm = BernoulliRBM(n_components=2)\nrbm.fit(X)\nprint('Shape of X: {}'.format(X.shape))\nX_r = rbm.transform(X)\nprint('Dimensionality reduced X : \\n{}'.format(X_r))",
"Shape of X: (4, 3)\nDimensionality reduced X : \n[[0.32772825 0.32760629]\n [0.29054342 0.28859006]\n [0.27822808 0.27568845]\n [0.26914416 0.26662256]]\n"
],
[
"from scipy.ndimage import convolve\nfrom sklearn import linear_model, datasets, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.base import clone\n\n\n# #############################################################################\n# Setting up\n\ndef nudge_dataset(X, Y):\n \"\"\"\n This produces a dataset 5 times bigger than the original one,\n by moving the 8x8 images in X around by 1px to left, right, down, up\n \"\"\"\n direction_vectors = [\n [[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0]]]\n\n def shift(x, w):\n return convolve(x.reshape((8, 8)), mode='constant', weights=w).ravel()\n\n X = np.concatenate([X] +\n [np.apply_along_axis(shift, 1, X, vector)\n for vector in direction_vectors])\n Y = np.concatenate([Y for _ in range(5)], axis=0)\n return X, Y\n\n\n# Load Data\nX, y = datasets.load_digits(return_X_y=True)\nX = np.asarray(X, 'float32')\nX, Y = nudge_dataset(X, y)\nX = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling\n\nX_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=0.2, random_state=0)\n\n# Models we will use\nlogistic = linear_model.LogisticRegression(solver='newton-cg', tol=1)\nrbm = BernoulliRBM(random_state=0, verbose=True)\n\nrbm_features_classifier = Pipeline(\n steps=[('rbm', rbm), ('logistic', logistic)])\n\n# #############################################################################\n# Training\n\n# Hyper-parameters. These were set by cross-validation,\n# using a GridSearchCV. Here we are not performing cross-validation to\n# save time.\nrbm.learning_rate = 0.06\nrbm.n_iter = 10\n# More components tend to give better prediction performance, but larger\n# fitting time\nrbm.n_components = 100\nlogistic.C = 6000\n\n# Training RBM-Logistic Pipeline\nrbm_features_classifier.fit(X_train, Y_train)\n\n# Training the Logistic regression classifier directly on the pixel\nraw_pixel_classifier = clone(logistic)\nraw_pixel_classifier.C = 100.\nraw_pixel_classifier.fit(X_train, Y_train)\n\n# #############################################################################\n# Evaluation\n\nY_pred = rbm_features_classifier.predict(X_test)\nprint(\"Logistic regression using RBM features:\\n%s\\n\" % (\n metrics.classification_report(Y_test, Y_pred)))\n\nY_pred = raw_pixel_classifier.predict(X_test)\nprint(\"Logistic regression using raw pixel features:\\n%s\\n\" % (\n metrics.classification_report(Y_test, Y_pred)))\n\n# #############################################################################\n# Plotting\nscale = 3.25\nplt.figure(figsize=(4.2 * scale, 4 * scale))\nfor i, comp in enumerate(rbm.components_):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,\n interpolation='nearest')\n plt.xticks(())\n plt.yticks(())\nplt.suptitle('100 components extracted by RBM', fontsize=16)\nplt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)\n\nplt.show()",
"[BernoulliRBM] Iteration 1, pseudo-likelihood = -25.39, time = 0.13s\n[BernoulliRBM] Iteration 2, pseudo-likelihood = -23.77, time = 0.17s\n[BernoulliRBM] Iteration 3, pseudo-likelihood = -22.94, time = 0.18s\n[BernoulliRBM] Iteration 4, pseudo-likelihood = -21.91, time = 0.17s\n[BernoulliRBM] Iteration 5, pseudo-likelihood = -21.69, time = 0.17s\n[BernoulliRBM] Iteration 6, pseudo-likelihood = -21.06, time = 0.17s\n[BernoulliRBM] Iteration 7, pseudo-likelihood = -20.89, time = 0.17s\n[BernoulliRBM] Iteration 8, pseudo-likelihood = -20.64, time = 0.16s\n[BernoulliRBM] Iteration 9, pseudo-likelihood = -20.36, time = 0.17s\n[BernoulliRBM] Iteration 10, pseudo-likelihood = -20.09, time = 0.15s\nLogistic regression using RBM features:\n precision recall f1-score support\n\n 0 0.99 0.98 0.99 174\n 1 0.92 0.94 0.93 184\n 2 0.95 0.95 0.95 166\n 3 0.96 0.89 0.92 194\n 4 0.96 0.95 0.95 186\n 5 0.93 0.91 0.92 181\n 6 0.98 0.98 0.98 207\n 7 0.93 0.99 0.96 154\n 8 0.87 0.89 0.88 182\n 9 0.88 0.91 0.89 169\n\n accuracy 0.94 1797\n macro avg 0.94 0.94 0.94 1797\nweighted avg 0.94 0.94 0.94 1797\n\n\nLogistic regression using raw pixel features:\n precision recall f1-score support\n\n 0 0.90 0.92 0.91 174\n 1 0.60 0.58 0.59 184\n 2 0.75 0.85 0.80 166\n 3 0.78 0.78 0.78 194\n 4 0.81 0.84 0.82 186\n 5 0.76 0.77 0.77 181\n 6 0.91 0.87 0.89 207\n 7 0.85 0.88 0.87 154\n 8 0.67 0.58 0.62 182\n 9 0.75 0.77 0.76 169\n\n accuracy 0.78 1797\n macro avg 0.78 0.78 0.78 1797\nweighted avg 0.78 0.78 0.78 1797\n\n\n"
]
],
[
[
"## Graphical model and parametrization\n\n\nThe graphical model of an RBM is a fully-connected bipartite graph.\n\n![rbm_graph.png](attachment:rbm_graph.png)\n[[3](https://scikit-learn.org/stable/modules/neural_networks_unsupervised.html#rbm)]\n\nThe nodes are random variables whose states depend on the state of the\nother nodes they are connected to. The model is therefore parameterized\nby the weights of the connections, as well as one intercept (bias) term\nfor each visible and hidden unit, omitted from the image for simplicity.\n\nThe energy function measures the quality of a joint assignment:\n\n$$E(\\mathbf{v}, \\mathbf{h}) = -\\sum_i \\sum_j w_{ij}v_ih_j - \\sum_i b_iv_i\n - \\sum_j c_jh_j$$\n\nIn the formula above, $\\mathbf{b}$ and $\\mathbf{c}$ are the intercept\nvectors for the visible and hidden layers, respectively. The joint\nprobability of the model is defined in terms of the energy:\n\n$$P(\\mathbf{v}, \\mathbf{h}) = \\frac{e^{-E(\\mathbf{v}, \\mathbf{h})}}{Z}$$\n\nThe word *restricted* refers to the bipartite structure of the model,\nwhich prohibits direct interaction between hidden units, or between\nvisible units. This means that the following conditional independencies\nare assumed:\n\n$$\\begin{aligned}\nh_i \\bot h_j | \\mathbf{v} \\\\\nv_i \\bot v_j | \\mathbf{h}\n\\end{aligned}$$\n\nThe bipartite structure allows for the use of efficient block Gibbs\nsampling for inference.\n\n### Bernoulli Restricted Boltzmann machines\n\n\nIn the `BernoulliRBM` , all units are\nbinary stochastic units. This means that the input data should either be\nbinary, or real-valued between 0 and 1 signifying the probability that\nthe visible unit would turn on or off. This is a good model for\ncharacter recognition, where the interest is on which pixels are active\nand which aren\\'t. For images of natural scenes it no longer fits\nbecause of background, depth and the tendency of neighbouring pixels to\ntake the same values.\n\nThe conditional probability distribution of each unit is given by the\nlogistic sigmoid activation function of the input it receives:\n\n$$\\begin{aligned}\nP(v_i=1|\\mathbf{h}) = \\sigma(\\sum_j w_{ij}h_j + b_i) \\\\\nP(h_i=1|\\mathbf{v}) = \\sigma(\\sum_i w_{ij}v_i + c_j)\n\\end{aligned}$$\n\nwhere $\\sigma$ is the logistic sigmoid function:\n\n$$\\sigma(x) = \\frac{1}{1 + e^{-x}}$$\n\n### Stochastic Maximum Likelihood learning\n\nThe training algorithm implemented in `BernoulliRBM`\n is known as Stochastic Maximum Likelihood (SML) or\nPersistent Contrastive Divergence (PCD). Optimizing maximum likelihood\ndirectly is infeasible because of the form of the data likelihood:\n\n$$\\log P(v) = \\log \\sum_h e^{-E(v, h)} - \\log \\sum_{x, y} e^{-E(x, y)}$$\n\nFor simplicity the equation above is written for a single training\nexample. The gradient with respect to the weights is formed of two terms\ncorresponding to the ones above. They are usually known as the positive\ngradient and the negative gradient, because of their respective signs.\nIn this implementation, the gradients are estimated over mini-batches of\nsamples.\n\nIn maximizing the log-likelihood, the positive gradient makes the model\nprefer hidden states that are compatible with the observed training\ndata. Because of the bipartite structure of RBMs, it can be computed\nefficiently. The negative gradient, however, is intractable. Its goal is\nto lower the energy of joint states that the model prefers, therefore\nmaking it stay true to the data. It can be approximated by Markov chain\nMonte Carlo using block Gibbs sampling by iteratively sampling each of\n$v$ and $h$ given the other, until the chain mixes. Samples generated in\nthis way are sometimes referred as fantasy particles. This is\ninefficient and it is difficult to determine whether the Markov chain\nmixes.\n\nThe Contrastive Divergence method suggests to stop the chain after a\nsmall number of iterations, $k$, usually even 1. This method is fast and\nhas low variance, but the samples are far from the model distribution.\n\nPersistent Contrastive Divergence addresses this. Instead of starting a\nnew chain each time the gradient is needed, and performing only one\nGibbs sampling step, in PCD we keep a number of chains (fantasy\nparticles) that are updated $k$ Gibbs steps after each weight update.\nThis allows the particles to explore the space more thoroughly.\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7f35f036eddfcce308011f4480f4c8fa2a2c7a1 | 276,265 | ipynb | Jupyter Notebook | ctdr_toy_example.ipynb | Aarya-Create/PBL-Mesh | 978bcac47b9c925da4caba22d1f64fc254d13916 | [
"MIT"
] | 9 | 2021-02-23T07:00:27.000Z | 2022-03-04T14:32:24.000Z | ctdr_toy_example.ipynb | Aarya-Create/PBL-Mesh | 978bcac47b9c925da4caba22d1f64fc254d13916 | [
"MIT"
] | 1 | 2021-10-02T14:49:31.000Z | 2021-10-02T23:43:08.000Z | ctdr_toy_example.ipynb | Aarya-Create/PBL-Mesh | 978bcac47b9c925da4caba22d1f64fc254d13916 | [
"MIT"
] | 1 | 2021-02-23T07:00:27.000Z | 2021-02-23T07:00:27.000Z | 423.070444 | 21,482 | 0.925043 | [
[
[
"<a href=\"https://colab.research.google.com/github/jakeoung/ShapeFromProjections/blob/master/ctdr_toy_example.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"To run the code, you need to enable the CUDA in the setting. You can enable in the menu: `Runtime > Change runtime type` and choose GPU in the hardware accelerator item.",
"_____no_output_____"
]
],
[
[
"# install shapefromprojections package\n%cd /content\n!git clone https://github.com/jakeoung/ShapeFromProjections\n%cd ShapeFromProjections\n!pip install -e .\nimport sys\nimport os\nsys.path.append(os.getcwd())",
"/content\nCloning into 'ShapeFromProjections'...\nremote: Enumerating objects: 206, done.\u001b[K\nremote: Counting objects: 100% (206/206), done.\u001b[K\nremote: Compressing objects: 100% (199/199), done.\u001b[K\nremote: Total 206 (delta 88), reused 71 (delta 5), pack-reused 0\u001b[K\nReceiving objects: 100% (206/206), 12.69 MiB | 5.67 MiB/s, done.\nResolving deltas: 100% (88/88), done.\n/content/ShapeFromProjections\nObtaining file:///content/ShapeFromProjections\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from ctdrm==0.9.0) (1.19.5)\nCollecting trimesh\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/67/3d/9dc83ffe2bd043f1600a347753bb824adfd9547295424ae0fdd943a21293/trimesh-3.9.1-py3-none-any.whl (628kB)\n\u001b[K |████████████████████████████████| 634kB 3.9MB/s \n\u001b[?25hRequirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (from ctdrm==0.9.0) (1.7.0+cu101)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from ctdrm==0.9.0) (3.2.2)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from ctdrm==0.9.0) (2.10.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from trimesh->ctdrm==0.9.0) (51.1.2)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch->ctdrm==0.9.0) (0.16.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.6/dist-packages (from torch->ctdrm==0.9.0) (3.7.4.3)\nRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from torch->ctdrm==0.9.0) (0.8)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ctdrm==0.9.0) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ctdrm==0.9.0) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ctdrm==0.9.0) (1.3.1)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ctdrm==0.9.0) (2.8.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py->ctdrm==0.9.0) (1.15.0)\nInstalling collected packages: trimesh, ctdrm\n Running setup.py develop for ctdrm\nSuccessfully installed ctdrm trimesh-3.9.1\n"
],
[
"# install CUDA kernels\n%cd ctdr/cuda\n!python build.py build_ext --inplace\n%cd ../../run",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport torch\nimport h5py\nimport time\n\nimport ctdr\nfrom parse_args import args, update_args\nfrom ctdr.model.vanilla import Model\nfrom ctdr.dataset import init_mesh\nfrom ctdr.utils import util_mesh\nfrom ctdr import optimize\nimport subprocess\n\n#torch.backends.cudnn.benchmark=True\n\n#------------------------------------------------\n# load data\n#------------------------------------------------\nfrom ctdr.dataset import dataset\n\nargs.data='2starA'\n# args.niter=3000\nupdate_args(args)\n\nif args.data.find(\"tomop\") > 0:\n args.nmaterials = int(args.data[-3:-1])+1\n\nds = dataset.SinoDataset(args.ddata, args.nmaterials, args.eta)\nwidth_physical = ds.proj_geom['DetectorSpacingX']*ds.proj_geom['DetectorColCount']\nheight_physical = ds.proj_geom['DetectorSpacingY']*ds.proj_geom['DetectorRowCount']\nphysical_unit = min(width_physical, height_physical)\n\nfinit_obj = args.ddata+'/init.obj'\n\n# if os.path.exists(finit_obj) == False:\nif True:\n init_mesh.save_init_mesh(finit_obj, args.data, args.nmaterials, physical_unit, args.subdiv)\nelse:\n print(f\"Use existing init file {finit_obj}\")\n\nuse_center_param = False\nmus = np.arange(ds.nmaterials) / (ds.nmaterials-1)\n\nprint(finit_obj)\n# refine\nmodel = Model(finit_obj, ds.proj_geom, args.nmaterials,\n mus, args.nmu0, wlap=args.wlap, wflat=args.wflat).cuda()\n",
"Namespace(b=0, cuda=-1, data='2starA', dataroot='../data/', ddata='../data/2starA/', dresult='../result/2starA/ours_-b_0_-eta_0_-lr_0.01_-niter_500_-niter0_0_-nmu0_1_-subdiv_4_-wedge_2.0_-wflat_0.01_-wlap_10.0_/', eta=0, lr=0.01, niter=500, niter0=0, nmaterials=2, nmu0=1, resroot='../result', subdiv=4, verbose=1, wedge=2.0, wflat=0.01, wlap=10.0)\n../data/2starA//init.obj\n@statistics of mesh: # of v: 2562, f: 5120\nset mu as parameters\ninitialize flatten loss\n"
],
[
"def get_params(model, exclude_mus=False):\n return model.parameters()\n\ndef run_simple(model, ds, niter, args):\n print(\"@ model.mus\", model.mus)\n \n params = get_params(model)\n \n opt = torch.optim.Adam(params, args.lr, betas=(0.9, 0.99))\n \n idx_angles_full = torch.LongTensor(np.arange(ds.nangles))\n p_full = ds.p.cuda()\n ds_loader = [ [ idx_angles_full, p_full ] ]\n\n mask_bg = ds.p < 1e-5\n mask_bg = mask_bg.cuda()\n\n print(f\"@ statistics of mesh: {model.vertices.shape[0]}, {model.faces.shape[0]}\\n\")\n\n #mask_bg = 1\n ledge = 0\n llap = 0.\n lflat = 0.\n\n for epoch in range(niter):\n # if epoch % 20 == 0 or epoch == niter-1:\n for idx_angles, p_batch in ds_loader:\n displace_prev = model.displace.data.clone()\n if args.b > 0:\n p_batch = p_batch.cuda()\n\n opt.zero_grad()\n \n phat, mask_valid, edge_loss, lap_loss, flat_loss = model(idx_angles, args.wedge) # full angles\n # phat[~mask_valid] = 0.0\n # mask_valid = mask_valid + mask_bg\n \n # l2 loss\n data_loss = (p_batch - phat)[mask_valid].pow(2).mean()\n\n loss = data_loss + args.wedge * edge_loss + args.wlap * lap_loss + args.wflat * flat_loss\n \n \n loss.backward()\n opt.step()\n \n loss_now = loss.item()\n model.mus.data.clamp_(min=0.0)\n\n if epoch % 20 == 0 or epoch == niter-1: \n if args.wedge > 0.:\n ledge = edge_loss.item()\n \n if args.wlap > 0.:\n llap = lap_loss.item()\n \n if args.wflat > 0.:\n lflat = flat_loss.item()\n\n plt.imshow(phat.detach().cpu().numpy()[1,:,:]); plt.show()\n print(f'~ {epoch:03d} l2_loss: {data_loss.item():.8f} edge: {ledge:.6f} lap: {llap:.6f} flat: {lflat:.6f} mus: {str(model.mus.cpu().detach().numpy())}')\n\n return phat ",
"_____no_output_____"
],
[
"args.wlap = 10.0\nargs.wflat = 0.0\nargs.wedge = 1.0\nphat = run_simple(model, ds, 200, args)",
"@ model.mus Parameter containing:\ntensor([0., 1.], device='cuda:0', requires_grad=True)\n@ statistics of mesh: 2562, 5120\n\n"
],
[
"# Show the projection image of data and our estimation\nplt.imshow(ds.p[1,:,:]); plt.show()\nplt.imshow(phat.detach().cpu().numpy()[1,:,:]); plt.show()",
"_____no_output_____"
],
[
"# Optional: save the results\n# vv = model.vertices.cpu()+model.displace.detach().cpu()\n# ff = model.faces.cpu()\n\n# labels_v, labels_f = model.labels_v_np, model.labels.cpu().numpy()\n# # util_vis.save_vf_as_img_labels(args.dresult+f'{epoch:04d}_render.png', vv, ff, labels_v, labels_f)\n# util_vis.save_sino_as_img(args.dresult+f'{epoch:04d}_sino.png', phat.detach().cpu().numpy())\n# util_mesh.save_mesh(args.dresult+f'{epoch:04d}.obj', vv.numpy(), ff.numpy(), labels_v, labels_f)\n\n# util_mesh.save_mesh(args.dresult+'mesh.obj', vv.numpy(), ff.numpy(), labels_v, labels_f)\n# util_vis.save_sino_as_img(args.dresult+f'{epoch:04d}_data.png', ds.p.cuda())",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f3611d974b2126380a15682483909608575982 | 74,794 | ipynb | Jupyter Notebook | notebooks/01_Exploratory/1.3-rp-hcad-data-view-extra_features.ipynb | RafaelPinto/hcad_pred | ea795f7b4233484e1fa88225ff60dbfe2b98235b | [
"BSD-3-Clause"
] | 1 | 2021-01-08T18:57:47.000Z | 2021-01-08T18:57:47.000Z | notebooks/01_Exploratory/1.3-rp-hcad-data-view-extra_features.ipynb | RafaelPinto/hcad_pred | ea795f7b4233484e1fa88225ff60dbfe2b98235b | [
"BSD-3-Clause"
] | null | null | null | notebooks/01_Exploratory/1.3-rp-hcad-data-view-extra_features.ipynb | RafaelPinto/hcad_pred | ea795f7b4233484e1fa88225ff60dbfe2b98235b | [
"BSD-3-Clause"
] | null | null | null | 38.296979 | 881 | 0.372222 | [
[
[
"# Find the comparables: extra_features.txt\n\nThe file `extra_features.txt` contains important property information like number and quality of pools, detached garages, outbuildings, canopies, and more. Let's load this file and grab a subset with the important columns to continue our study.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from pathlib import Path\nimport pickle\n\nimport pandas as pd\n\nfrom src.definitions import ROOT_DIR\nfrom src.data.utils import Table, save_pickle",
"_____no_output_____"
],
[
"extra_features_fn = ROOT_DIR / 'data/external/2016/Real_building_land/extra_features.txt'\nassert extra_features_fn.exists()",
"_____no_output_____"
],
[
"extra_features = Table(extra_features_fn, '2016')",
"_____no_output_____"
],
[
"extra_features.get_header()",
"_____no_output_____"
]
],
[
[
"# Load accounts of interest\nLet's remove the account numbers that don't meet free-standing single-family home criteria that we found while processing the `building_res.txt` file.",
"_____no_output_____"
]
],
[
[
"skiprows = extra_features.get_skiprows()",
"_____no_output_____"
],
[
"extra_features_df = extra_features.get_df(skiprows=skiprows)",
"_____no_output_____"
],
[
"extra_features_df.head()",
"_____no_output_____"
],
[
"extra_features_df.l_dscr.value_counts().head(25)",
"_____no_output_____"
]
],
[
[
"# Grab slice of the extra features of interest\nWith the value counts on the extra feature description performed above we can see that the majority of the features land in the top 15 categories. Let's filter out the rests of the columns.",
"_____no_output_____"
]
],
[
[
"cols = extra_features_df.l_dscr.value_counts().head(15).index",
"_____no_output_____"
],
[
"cond0 = extra_features_df['l_dscr'].isin(cols)\nextra_features_df = extra_features_df.loc[cond0, :]",
"_____no_output_____"
]
],
[
[
"# Build pivot tables for count and grade\nThere appear to be two important values related to each extra feature: uts (units area in square feet) and grade. Since a property can have multiple features of the same class, e.g. frame utility shed, let's aggregate them by adding the uts values, and also by taking the mean of the same class feature grades.\n\nLet's build individual pivot tables for each and merge them before saving them out.",
"_____no_output_____"
]
],
[
[
"extra_features_pivot_uts = extra_features_df.pivot_table(index='acct',\n columns='l_dscr',\n values='uts',\n aggfunc='sum',\n fill_value=0)",
"_____no_output_____"
],
[
"extra_features_pivot_uts.head()",
"_____no_output_____"
],
[
"extra_features_pivot_grade = extra_features_df.pivot_table(index='acct',\n columns='l_dscr',\n values='grade',\n aggfunc='mean',\n )",
"_____no_output_____"
],
[
"extra_features_pivot_grade.head()",
"_____no_output_____"
],
[
"extra_features_uts_grade = extra_features_pivot_uts.merge(extra_features_pivot_grade,\n how='left',\n left_index=True,\n right_index=True,\n suffixes=('_uts', '_grade'),\n validate='one_to_one')",
"_____no_output_____"
],
[
"extra_features_uts_grade.head()",
"_____no_output_____"
],
[
"assert extra_features_uts_grade.index.is_unique",
"_____no_output_____"
]
],
[
[
"add `acct` column to make easier the merging process ahead",
"_____no_output_____"
]
],
[
[
"extra_features_uts_grade.reset_index(inplace=True)",
"_____no_output_____"
]
],
[
[
"# Fix column names\nWe would like the column names to be all lower case, with no spaces nor non-alphanumeric characters.",
"_____no_output_____"
]
],
[
[
"from src.data.utils import fix_column_names",
"_____no_output_____"
],
[
"extra_features_uts_grade.columns",
"_____no_output_____"
],
[
"extra_features_uts_grade = fix_column_names(extra_features_uts_grade)",
"_____no_output_____"
],
[
"extra_features_uts_grade.columns",
"_____no_output_____"
]
],
[
[
"### Find duplicated rows",
"_____no_output_____"
]
],
[
[
"cond0 = extra_features_uts_grade.duplicated()\nextra_features_uts_grade.loc[cond0, :]",
"_____no_output_____"
]
],
[
[
"# Describe",
"_____no_output_____"
]
],
[
[
"extra_features_uts_grade.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 429701 entries, 0 to 429700\nData columns (total 31 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 acct 429701 non-null int64 \n 1 basic_outdoor_kitchen_uts 429701 non-null float64\n 2 brick_or_stone_detached_garage_uts 429701 non-null int64 \n 3 canopy_residential_uts 429701 non-null float64\n 4 carport_residential_uts 429701 non-null int64 \n 5 cracked_slab_uts 429701 non-null int64 \n 6 custom_outdoor_kitchen_uts 429701 non-null float64\n 7 foundation_repaired_uts 429701 non-null float64\n 8 frame_detached_garage_uts 429701 non-null int64 \n 9 frame_detached_garage_w_living_area_abov_uts 429701 non-null float64\n 10 frame_utility_shed_uts 429701 non-null float64\n 11 gunite_pool_uts 429701 non-null float64\n 12 metal_utility_shed_uts 429701 non-null int64 \n 13 pool_spa_with_heater_uts 429701 non-null float64\n 14 reinforced_concrete_pool_uts 429701 non-null int64 \n 15 residential_other_gross_value_uts 429701 non-null float64\n 16 basic_outdoor_kitchen_grade 2303 non-null float64\n 17 brick_or_stone_detached_garage_grade 13743 non-null float64\n 18 canopy_residential_grade 80708 non-null float64\n 19 carport_residential_grade 73048 non-null float64\n 20 cracked_slab_grade 16121 non-null float64\n 21 custom_outdoor_kitchen_grade 4294 non-null float64\n 22 foundation_repaired_grade 17653 non-null float64\n 23 frame_detached_garage_grade 180708 non-null float64\n 24 frame_detached_garage_w_living_area_abov_grade 7047 non-null float64\n 25 frame_utility_shed_grade 87819 non-null float64\n 26 gunite_pool_grade 87949 non-null float64\n 27 metal_utility_shed_grade 18365 non-null float64\n 28 pool_spa_with_heater_grade 37211 non-null float64\n 29 reinforced_concrete_pool_grade 3729 non-null float64\n 30 residential_other_gross_value_grade 14550 non-null float64\ndtypes: float64(24), int64(7)\nmemory usage: 101.6 MB\n"
],
[
"extra_features_uts_grade.describe()",
"_____no_output_____"
]
],
[
[
"# Export real_acct",
"_____no_output_____"
]
],
[
[
"save_fn = ROOT_DIR / 'data/raw/2016/extra_features_uts_grade_comps.pickle'\nsave_pickle(extra_features_uts_grade, save_fn)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f374963c43c1da1ce33d8ee8c8b1d6f5100aef | 218,846 | ipynb | Jupyter Notebook | dataproject/dataProject.ipynb | NumEconCopenhagen/projects-2019-tba | eea677b973b0205f293272027623ca3c13a3c23e | [
"MIT"
] | null | null | null | dataproject/dataProject.ipynb | NumEconCopenhagen/projects-2019-tba | eea677b973b0205f293272027623ca3c13a3c23e | [
"MIT"
] | 13 | 2019-04-08T17:01:11.000Z | 2019-05-14T18:47:37.000Z | dataproject/dataProject.ipynb | NumEconCopenhagen/projects-2019-tba | eea677b973b0205f293272027623ca3c13a3c23e | [
"MIT"
] | 2 | 2019-03-22T14:44:02.000Z | 2019-03-22T14:44:26.000Z | 59.679847 | 35,580 | 0.661118 | [
[
[
"# Data Analysis Project",
"_____no_output_____"
],
[
"In our data project, we use data directly imported from the World Data Bank. We have chosen to focus on nine different countries: Brazil, China, Denmark, India, Japan, Nigeria, Spain, Turkmenistan and the US. These countries are chosen because they are relatively different, which makes the analysis more interesting. The variables of interest are: GDP per Capita, GDP (current in US $), Total Population, Urban Population in %, Fertility Rate and Literacy Rate. \n\nThe notebook is organized as follows \n1.\tData Cleaning and Structuring\n - Setup\n - Download Data directly from World Bank\n - Overview of the Data and Adaption\n - Detection of Missing Data\n - Cleaned Data Set\n\n\n2.\tData Analysis and Visualisations \n - Interactive GDP per Capital Plot \n - World Map Displaying GDP per Capita \n - Data Visualization on Fertility Rate \n\n\n3.\tRegression\n\n\n \n",
"_____no_output_____"
],
[
"# Data Cleaning and Structuring",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"**We import the packages** we need. If we do not have the packages, we have to install them. Therefore, install:\n>`pip install pandas-datareader`\n>`pip install wbdata`",
"_____no_output_____"
]
],
[
[
"import pandas_datareader\nimport datetime",
"_____no_output_____"
]
],
[
[
"We import the setup to download data directly from world data bank:",
"_____no_output_____"
]
],
[
[
"from pandas_datareader import wb",
"_____no_output_____"
]
],
[
[
"## Download Data directly from the World Data Bank ",
"_____no_output_____"
],
[
"We define the countries for the download:\nChina, Japan, Brazil, U.S., Denmark, Spain, Turkmenistan, India, Nigeria.",
"_____no_output_____"
]
],
[
[
"countries = ['CN','JP','BR','US','DK','ES','TM','IN','NG']",
"_____no_output_____"
]
],
[
[
"We define the indicators for the download:\nGDP per capita, GDP (current US $), Population total, Urban Population in %, Fertility Rate, Literacy rate.",
"_____no_output_____"
]
],
[
[
"indicators = {\"NY.GDP.PCAP.KD\":\"GDP per capita\", \"NY.GDP.MKTP.CD\":\"GDP(current US $)\", \"SP.POP.TOTL\":\"Population total\", \n \"SP.URB.TOTL.IN.ZS\":\"Urban Population in %\", \"SP.DYN.TFRT.IN\":\"Fertility Rate\", \"SE.ADT.LITR.ZS\": \"Literacy rate, adult total in %\" }",
"_____no_output_____"
]
],
[
[
"We download the data and have a look at the table.",
"_____no_output_____"
]
],
[
[
"data_wb = wb.download(indicator= indicators, country= countries, start=1990, end=2017)\ndata_wb = data_wb.rename(columns = {\"NY.GDP.PCAP.KD\":\"gdp_pC\",\"NY.GDP.MKTP.CD\":\"gdp\", \"SP.POP.TOTL\":\"pop\", \"SP.URB.TOTL.IN.ZS\":\"urban_pop%\", \n \"SP.DYN.TFRT.IN\":\"frt\", \"SE.ADT.LITR.ZS\":\"litr\"})\ndata_wb = data_wb.reset_index()\ndata_wb.head(-5)",
"_____no_output_____"
]
],
[
[
"We save the data file as an excel sheet in the folder we saved the current file.",
"_____no_output_____"
]
],
[
[
"writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter')\ndata_wb.to_excel(r\"./data_wb1.xlsx\")",
"_____no_output_____"
]
],
[
[
"## Overview of the Data and Adaption",
"_____no_output_____"
]
],
[
[
"#Tonje \ndata_wb.dtypes",
"_____no_output_____"
]
],
[
[
"In order to ease the reading of the tables, we create a separation in all floats for the whole following file. Afterwards, we round the numbers with two decimals.",
"_____no_output_____"
]
],
[
[
"pd.options.display.float_format = '{:,}'.format\n\nround(data_wb.head(),2)",
"_____no_output_____"
]
],
[
[
"Since the gdp is inconvenient to work with, we create a new variable gdp_in_billions showing the gdp in billions US $ and add it to the dataset.\nWe have a look at the table to check whether it worked out.",
"_____no_output_____"
]
],
[
[
"data_wb['gdp_in_bil'] = data_wb['gdp']/1000000000\nround(data_wb.head(),2) #just to check",
"_____no_output_____"
]
],
[
[
"We delete the variable gdp since we will continue working exclusively with the variable gdp_in_bil.",
"_____no_output_____"
]
],
[
[
"del data_wb['gdp']\nround(data_wb.head(),2) #just to check",
"_____no_output_____"
]
],
[
[
"We have a look at the shape of the dataset in order to get an overview of the observations and variables.",
"_____no_output_____"
]
],
[
[
"data_wb.shape",
"_____no_output_____"
]
],
[
[
"We perform a summary statistics to get an overview of our dataset.",
"_____no_output_____"
]
],
[
[
"round(data_wb.describe(),2)",
"_____no_output_____"
]
],
[
[
"## Detection of Missing Data",
"_____no_output_____"
],
[
"We count the missing data:",
"_____no_output_____"
]
],
[
[
"data_wb.isnull().sum().sum()",
"_____no_output_____"
]
],
[
[
"We have a look at how many observations each variable has:",
"_____no_output_____"
]
],
[
[
"data_wb.count()",
"_____no_output_____"
]
],
[
[
"We search for the number of missing values of each variable. (Same step as before, only the other way around.)",
"_____no_output_____"
]
],
[
[
"data_wb.isnull().sum()",
"_____no_output_____"
]
],
[
[
"We drop the literacy rate, because this variable has nearly no data. ",
"_____no_output_____"
]
],
[
[
"data_wb.drop(['litr'], axis = 1, inplace = True)",
"_____no_output_____"
]
],
[
[
"We search for the nine missing values of fertility rate. It seems like there is no data for the fertility rate for the year 2017. ",
"_____no_output_____"
]
],
[
[
"round(data_wb.groupby('year').mean(),2)",
"_____no_output_____"
]
],
[
[
"We look whether every country misses the data for the fertility rate for the year 2017.",
"_____no_output_____"
]
],
[
[
"round(data_wb.loc[data_wb['year'] == '2017', :].head(-1),2)",
"_____no_output_____"
]
],
[
[
"We drop the year 2017.",
"_____no_output_____"
]
],
[
[
"I = data_wb['year'] == \"2017\"\ndata_wb.drop(data_wb[I].index, inplace = True)",
"_____no_output_____"
]
],
[
[
"## Cleaned data set",
"_____no_output_____"
],
[
"We perform a summary statistic of our cleaned dataset.",
"_____no_output_____"
]
],
[
[
"round(data_wb.describe(),2)",
"_____no_output_____"
]
],
[
[
"And we check the number of observations and variables.",
"_____no_output_____"
]
],
[
[
"data_wb.shape",
"_____no_output_____"
]
],
[
[
"We control whether the dataset is balanced.",
"_____no_output_____"
]
],
[
[
"data_wb.count()",
"_____no_output_____"
]
],
[
[
"The data set is balanced.",
"_____no_output_____"
],
[
"# Data Analysis and Visualisations ",
"_____no_output_____"
],
[
"We use the average level of every variable for each single country.\n\nThe overview shows that countries with a high gdp per capita have a low fertility rate. Countries with a high gdp per capita have a huge share of urban population. We can start to think about the relations between the variables. ",
"_____no_output_____"
]
],
[
[
"round(data_wb.groupby('country').mean(),2)",
"_____no_output_____"
]
],
[
[
"## Interactive plot",
"_____no_output_____"
],
[
"Now, we want to make an interactive plot which displays the development of GDP per capita over time\nfor the different countries. \n\nFirst, we import the necessary packages and tools: \n\n**Import the packages** we need. If we do not have the packages, we have to install them. Therefore, install:\n>`pip install matplotlib`\n>`pip install ipywidgets`",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline \nfrom ipywidgets import interact, interactive, fixed, interact_manual \nimport ipywidgets as widgets \n",
"_____no_output_____"
]
],
[
[
"Then, we define the relevant variables in a way which simplifies the coding: ",
"_____no_output_____"
]
],
[
[
"country=data_wb[\"country\"]\nyear=data_wb[\"year\"]\ngdp_pC=data_wb[\"gdp_pC\"]\n",
"_____no_output_____"
]
],
[
[
"We create a function constructing a figure: ",
"_____no_output_____"
]
],
[
[
"def interactive_figure(country, data_wb):\n \"\"\"define an interactive figure that uses countries and the dataframe as inputs \"\"\"\n \n data_country = data_wb[data_wb.country == country]\n year = data_country.year\n gdp_pC = data_country.gdp_pC\n fig = plt.figure(dpi=100)\n ax = fig.add_subplot(1,1,1)\n ax.plot(year, gdp_pC)\n ax.set_xlabel(\"Years\")\n ax.set_ylabel(\"GDP per Capita\")\n plt.xticks(rotation=90)\n plt.gca().invert_xaxis()\n\n",
"_____no_output_____"
]
],
[
[
"We make it interactive with a drop down menu:",
"_____no_output_____"
]
],
[
[
"widgets.interact(interactive_figure,\n year = widgets.fixed(year),\n data_wb = widgets.fixed(data_wb),\n country=widgets.Dropdown(description=\"Country\", options=data_wb.country.unique()),\n gdp_pC=widgets.fixed(gdp_pC)\n);",
"_____no_output_____"
]
],
[
[
"We can see that the overall trend for the selected countries is increasing GDP per capita.\nHowever, for the Western countries and Japan we can see the trace of the 2008 financial crisis. For Spain, \none of the countries that suffered most from this crisis, the dip is particularly visible. It is also worth noticing that China fared better than most industustrial nations during this crisis. This is partly due to Chinas closed nature, which made them less vulnerable to financial friction in the world economy. ",
"_____no_output_____"
],
[
"## World Map",
"_____no_output_____"
],
[
"After having a look at the first visualisations, we want to get an insight of the data by plotting it on a world map. This way we can easily compare and see whether countries in certain areas of the world have similar values in the variables we are interested in.\n\nFirst, we import the necessary package: \n\n**Import the package** we need. If we do not have the package, we have to install it. Therefore, install:\n>`pip install folium`",
"_____no_output_____"
]
],
[
[
"import folium",
"_____no_output_____"
]
],
[
[
"Our goal is to visualize the data on a world map using makers.\n\nIn order to define the location of the markers, we add the coordinates of the counries. Therefore, we add the variable 'Lat' for latitude and 'Lon' for longitude of the respecitve country to each observation in our data set.",
"_____no_output_____"
]
],
[
[
"row_indexes=data_wb[data_wb['country']== 'Brazil'].index\ndata_wb.loc[row_indexes,'Lat']= -14.2350\ndata_wb.loc[row_indexes,'Lon']= -51.9253\n\nrow_indexes=data_wb[data_wb['country']== 'China'].index\ndata_wb.loc[row_indexes,'Lat']= 33.5449\ndata_wb.loc[row_indexes,'Lon']= 103.149\n\nrow_indexes=data_wb[data_wb['country']== 'Denmark'].index\ndata_wb.loc[row_indexes,'Lat']= 56.2639\ndata_wb.loc[row_indexes,'Lon']= 9.5018\n\nrow_indexes=data_wb[data_wb['country']== 'Spain'].index\ndata_wb.loc[row_indexes,'Lat']= 40.4637\ndata_wb.loc[row_indexes,'Lon']= -3.7492\n\nrow_indexes=data_wb[data_wb['country']== 'India'].index\ndata_wb.loc[row_indexes,'Lat']= 20.5937\ndata_wb.loc[row_indexes,'Lon']= 78.9629\n\nrow_indexes=data_wb[data_wb['country']== 'Japan'].index\ndata_wb.loc[row_indexes,'Lat']= 36.2048\ndata_wb.loc[row_indexes,'Lon']= 138.2529\n\nrow_indexes=data_wb[data_wb['country']== 'Nigeria'].index\ndata_wb.loc[row_indexes,'Lat']= 9.0820\ndata_wb.loc[row_indexes,'Lon']= 8.6753\n\nrow_indexes=data_wb[data_wb['country']== 'Turkmenistan'].index\ndata_wb.loc[row_indexes,'Lat']= 38.9697\ndata_wb.loc[row_indexes,'Lon']= 59.5563\n\nrow_indexes=data_wb[data_wb['country']== 'United States'].index\ndata_wb.loc[row_indexes,'Lat']= 37.0902\ndata_wb.loc[row_indexes,'Lon']= -95.7129",
"_____no_output_____"
],
[
"round(data_wb.head(),4) #just to check",
"_____no_output_____"
]
],
[
[
"Now, we want to create the map.\n\n 1. We define the variables year (selectedyear) and variable (selectedvariable) we want to display.\n 2. We have to create an empty map. Since our countries are located all over the world, we have to display the whole world.\n 3. In order to run the loop later on, we create an overview of the data we are interested in based on the year and variable we defined in step 1. This overview is called year_overview.\n 4. Now, we run a for loop over every observation in our year_overview. In the loop, we:\n - create a marker on the map corresponding to the coordinates (location).\n - define the radius for the marker. It is important to adjust it depending on the variable chosen:\n - gdp_pC : 15\n - urban_pop% : 8000\n - frt : 200000\n - gdp_in_bil : 150\n - set the color to green.\n - decide on a filling for the circle.",
"_____no_output_____"
]
],
[
[
"# Definition of variables of interest\nselectedyear = 2010\n #select the year you are interested in\nselectedvariable = 'gdp_pC'\n ##select the variable you are interested in\n\n# Creation of an empty map\nmap = folium.Map(location=[0,0], tiles=\"Mapbox Bright\", zoom_start=2)\n\n#Creation of an overview data set displaying only the selected year\nyear_overview = data_wb.loc[data_wb['year']== str(selectedyear)]\n\n# Run of the for loop in order to add a marker one by one on the map\nfor i in range(0,len(year_overview)):\n folium.Circle(\n location=[year_overview.iloc[i]['Lat'], year_overview.iloc[i]['Lon']],\n radius=year_overview.iloc[i][selectedvariable]*15, #the smaller the original number, the higher the radius should be chosen\n color='green',\n fill=True\n ).add_to(map)\n\n#calling the map\nmap",
"_____no_output_____"
]
],
[
[
"Looking at the gdp per capita in the year 2010, we can see at one glance that developed countries have a substantially higher gdp per capita than emerging and developing countries. Mapping has the advantage of getting an overview and possible correlation of locations at one glance.",
"_____no_output_____"
],
[
"We save the map in the same folder as the file we are currently working on.",
"_____no_output_____"
]
],
[
[
"map.save('./map.py')",
"_____no_output_____"
]
],
[
[
"We drop the variables for the coordinates since they are no longer needed.",
"_____no_output_____"
]
],
[
[
"data_wb.drop(['Lat','Lon'], axis = 1, inplace = True)",
"_____no_output_____"
]
],
[
[
"## Fertility Rate per Country",
"_____no_output_____"
],
[
"The average annual fertility rate presents an overview of the fertility rate for the copuntries and shows that Japan and Spain have the lowest fertility rate, while Nigeria has the highest.",
"_____no_output_____"
]
],
[
[
"ax = data_wb.groupby('country').frt.mean().plot(kind='bar')\nax.set_ylabel('Avg. annual fertility rate')",
"_____no_output_____"
]
],
[
[
"The following graph presents annual growth rate of the fertility rate for each country. We observe that denmark is the only country with a negative growth rate. The leading country is India with a growth rate of 0.020 over the years. Surprisingly, Nigeria and the US have almost the same growth rate.",
"_____no_output_____"
]
],
[
[
"def annual_growth(x): \n x_last = x.values[-1]\n x_first = x.values[0]\n num_years = len(x)\n \n growth_annualized = (x_last/x_first)**(1/num_years) - 1.0\n return growth_annualized\n\nax = data_wb.groupby('country')['frt'].agg(annual_growth).plot(kind='bar')\nax.set_ylabel('Annual growth (fertility rate) from first to last year'); ",
"_____no_output_____"
]
],
[
[
"We look what kind of variables we have. Years should be a numeric variable for the next grapph, but it is a objective (string).",
"_____no_output_____"
]
],
[
[
"data_wb.dtypes",
"_____no_output_____"
]
],
[
[
"We convert year into a float variable.",
"_____no_output_____"
]
],
[
[
"data_wb['year'] = data_wb.year.astype(float)",
"_____no_output_____"
]
],
[
[
"We prove what we have done.",
"_____no_output_____"
]
],
[
[
"data_wb.dtypes",
"_____no_output_____"
]
],
[
[
" ## Fertility Rate per Country from 1990 until 2016",
"_____no_output_____"
]
],
[
[
"data_wb = data_wb.set_index([\"year\", \"country\"])",
"_____no_output_____"
],
[
"#plot fertility rate over the years\ndata_wb.unstack('country')['frt'].plot()",
"_____no_output_____"
]
],
[
[
"The fertility rate declines continously in most countries. An exception is Turkmenistan. In this country the fertility rate seems to oszilliate. The US had a little peak in 2007, but since then the fertility rate is declining. ",
"_____no_output_____"
],
[
"## Correlation Table",
"_____no_output_____"
],
[
"Before we proceed with a regression, we want to have a look at the correlations between the variables. This can be done with a heatmap:",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\nfig = plt.subplots(figsize = (10,10))\nsns.set(font_scale=1.5)\nsns.heatmap(data_wb.corr(),square = True,cbar=True,annot=True,annot_kws={'size': 10})\nplt.show()",
"_____no_output_____"
]
],
[
[
"This gives a good indication for what to expect from the regression. \nIn the following regression we are interested in ferility rate, and we can see this table that fertility rate is negatively correlated with GDP, urban population and population in general (although the effect is small)",
"_____no_output_____"
],
[
"## Panel Regression",
"_____no_output_____"
],
[
"We want to perform a regression with fertility rate as dependent variable and gdp per capita, population and urban population as independent variables.\n\n**Import the packages** we need. If we do not have the packages, we have to intall them. Therefore, install\n>`pip install linearmodels`",
"_____no_output_____"
]
],
[
[
"from linearmodels.panel import PooledOLS\nfrom linearmodels.panel import RandomEffects\nfrom linearmodels import PanelOLS\nimport statsmodels.api as sm",
"_____no_output_____"
]
],
[
[
"For year and country, check whether these variables are set as index. ",
"_____no_output_____"
]
],
[
[
"print(data_wb.head())",
" gdp_pC pop urban_pop% frt \\\nyear country \n2,016.0 Brazil 10,868.6534435352 207652865 86.042 1.726 \n2,015.0 Brazil 11,351.5657481703 205962108 85.77 1.74 \n2,014.0 Brazil 11,870.1484076345 204213133 85.492 1.753 \n2,013.0 Brazil 11,915.4170541095 202408632 85.209 1.765 \n2,012.0 Brazil 11,673.7705356922 200560983 84.923 1.777 \n\n gdp_in_bil \nyear country \n2,016.0 Brazil 1,793.98904840929 \n2,015.0 Brazil 1,802.21437374132 \n2,014.0 Brazil 2,455.99362515937 \n2,013.0 Brazil 2,472.80691990167 \n2,012.0 Brazil 2,465.1886744150297 \n"
]
],
[
[
"We can se that they are set as indexes. \nFor the following regressions, we need \"years\" to be the second index for the regression to work. \n\nTherefore, temporarily reset the index:",
"_____no_output_____"
]
],
[
[
"data_wb.reset_index(inplace = True )",
"_____no_output_____"
],
[
"print(data_wb.head())",
" country year gdp_pC pop urban_pop% frt \\\n0 Brazil 2,016.0 10,868.6534435352 207652865 86.042 1.726 \n1 Brazil 2,015.0 11,351.5657481703 205962108 85.77 1.74 \n2 Brazil 2,014.0 11,870.1484076345 204213133 85.492 1.753 \n3 Brazil 2,013.0 11,915.4170541095 202408632 85.209 1.765 \n4 Brazil 2,012.0 11,673.7705356922 200560983 84.923 1.777 \n\n gdp_in_bil \n0 1,793.98904840929 \n1 1,802.21437374132 \n2 2,455.99362515937 \n3 2,472.80691990167 \n4 2,465.1886744150297 \n"
],
[
"data_wb = data_wb.set_index([\"country\",\"year\"], append=False)",
"_____no_output_____"
]
],
[
[
"### Pooled OLS-Regression ",
"_____no_output_____"
],
[
"For the first regression, we do a pooled-OLS. We have nine entities (countries) and 27 years. ",
"_____no_output_____"
]
],
[
[
"exog_vars = ['gdp_pC', 'pop', 'urban_pop%']\nexog = sm.add_constant(data_wb[exog_vars])\nmod = PooledOLS(data_wb.frt, exog)\npooled_res = mod.fit()\nprint(pooled_res)",
" PooledOLS Estimation Summary \n================================================================================\nDep. Variable: frt R-squared: 0.6796\nEstimator: PooledOLS R-squared (Between): 0.7154\nNo. Observations: 243 R-squared (Within): -0.0943\nDate: Thu, Apr 04 2019 R-squared (Overall): 0.6796\nTime: 09:25:38 Log-likelihood -292.85\nCov. Estimator: Unadjusted \n F-statistic: 168.98\nEntities: 9 P-value 0.0000\nAvg Obs: 27.000 Distribution: F(3,239)\nMin Obs: 27.000 \nMax Obs: 27.000 F-statistic (robust): 168.98\n P-value 0.0000\nTime periods: 27 Distribution: F(3,239)\nAvg Obs: 9.0000 \nMin Obs: 9.0000 \nMax Obs: 9.0000 \n \n Parameter Estimates \n==============================================================================\n Parameter Std. Err. T-stat P-value Lower CI Upper CI\n------------------------------------------------------------------------------\nconst 7.2719 0.2561 28.391 0.0000 6.7674 7.7765\ngdp_pC -2.143e-06 4.424e-06 -0.4843 0.6286 -1.086e-05 6.573e-06\npop -2.113e-09 1.417e-10 -14.914 0.0000 -2.392e-09 -1.834e-09\nurban_pop% -0.0638 0.0045 -14.023 0.0000 -0.0727 -0.0548\n==============================================================================\n"
]
],
[
[
"The results are questionable. For example gdp per capita seems to have no effect on fertility rate. Moreover, the effect of gdp per capita and population is unlikely small.\n\nTherefore, we have a look at our dependent variable. It seems that python takes the variable correctly and the indexes are altso correct. Therefore, we try to run another regression with the same data.",
"_____no_output_____"
],
[
"### Panel OLS-regression ",
"_____no_output_____"
]
],
[
[
"data_wb.frt",
"_____no_output_____"
]
],
[
[
"Now, we run a Panel OLS regression, where we control for entity effects and time effects.",
"_____no_output_____"
]
],
[
[
"exog_vars = ['gdp_pC', 'pop', 'urban_pop%']\nexog = sm.add_constant(data_wb[exog_vars])\nmod = PanelOLS(data_wb.frt, exog, entity_effects=True, time_effects=True)\npooled_res = mod.fit()\nprint(pooled_res)",
" PanelOLS Estimation Summary \n================================================================================\nDep. Variable: frt R-squared: 0.6726\nEstimator: PanelOLS R-squared (Between): -5.3319\nNo. Observations: 243 R-squared (Within): -1.1795\nDate: Thu, Apr 04 2019 R-squared (Overall): -5.1484\nTime: 09:14:51 Log-likelihood 152.75\nCov. Estimator: Unadjusted \n F-statistic: 140.39\nEntities: 9 P-value 0.0000\nAvg Obs: 27.000 Distribution: F(3,205)\nMin Obs: 27.000 \nMax Obs: 27.000 F-statistic (robust): 140.39\n P-value 0.0000\nTime periods: 27 Distribution: F(3,205)\nAvg Obs: 9.0000 \nMin Obs: 9.0000 \nMax Obs: 9.0000 \n \n Parameter Estimates \n==============================================================================\n Parameter Std. Err. T-stat P-value Lower CI Upper CI\n------------------------------------------------------------------------------\nconst -0.3192 0.2853 -1.1191 0.2644 -0.8817 0.2432\ngdp_pC 8.134e-05 5.22e-06 15.581 0.0000 7.105e-05 9.163e-05\npop -1.577e-09 2.282e-10 -6.9131 0.0000 -2.027e-09 -1.128e-09\nurban_pop% 0.0266 0.0035 7.5169 0.0000 0.0196 0.0335\n==============================================================================\n\nF-test for Poolability: 230.04\nP-value: 0.0000\nDistribution: F(34,205)\n\nIncluded effects: Entity, Time\n"
]
],
[
[
"The panel data regressions has indeed an effect, but the results are still questionable. Population and GDP per capita are now both siginificant, but the effect is still unlikely low. Moreover, urban population seems to have a positive effect on the fertility rate. This is also unlikely. \n\nBased on our understanding, the the code the regression should be correct. However, these results are likely to be caused by omitted variables, as we have only included 3 variables in our regression. There are obviously also other factors affecting the fertility rate in a country, such as education, religion, use of contracetive methods, female labor force etc. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7f377ab0e2edaac4a75cb1d4c8516321a0f007e | 10,498 | ipynb | Jupyter Notebook | Exercises/Auto Encoder/Auto Encoder.ipynb | camilleAmaury/DeepLearningExercise | 5c328f871fa9db8fbeec951ea8e4df433b8b1c04 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | Exercises/Auto Encoder/Auto Encoder.ipynb | camilleAmaury/DeepLearningExercise | 5c328f871fa9db8fbeec951ea8e4df433b8b1c04 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | Exercises/Auto Encoder/Auto Encoder.ipynb | camilleAmaury/DeepLearningExercise | 5c328f871fa9db8fbeec951ea8e4df433b8b1c04 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | 29.488764 | 134 | 0.48133 | [
[
[
"# Auto Encoder\n\nThis notebook was created by Camille-Amaury JUGE, in order to better understand Auto Encoder principles and how they work.\n\n(it follows the exercices proposed by Hadelin de Ponteves on Udemy : https://www.udemy.com/course/le-deep-learning-de-a-a-z/)\n\n## Imports",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n# pytorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nimport sys\nimport csv",
"_____no_output_____"
]
],
[
[
"## Data preprocessing\n\nsame process as Boltzmann's machine (go there to see more details)",
"_____no_output_____"
]
],
[
[
"df_movies = pd.read_csv(\"ml-1m\\\\movies.dat\", sep=\"::\", header=None, engine=\"python\",\n encoding=\"latin-1\")\nusers = pd.read_csv(\"ml-1m\\\\users.dat\", sep=\"::\", header=None, engine=\"python\",\n encoding=\"latin-1\")\nratings = pd.read_csv(\"ml-1m\\\\ratings.dat\", sep=\"::\", header=None, engine=\"python\",\n encoding=\"latin-1\")",
"_____no_output_____"
],
[
"df_train = pd.read_csv(\"ml-100k\\\\u1.base\", delimiter=\"\\t\", header=None)\ndf_test = pd.read_csv(\"ml-100k\\\\u1.test\", delimiter=\"\\t\", header=None)",
"_____no_output_____"
],
[
"_users = list(set(np.concatenate((df_train[df_train.columns[0]].value_counts().index, \n df_test[df_test.columns[0]].value_counts().index), \n axis=0)))",
"_____no_output_____"
],
[
"_movies = list(set(np.concatenate((df_train[df_train.columns[1]].value_counts().index, \n df_test[df_test.columns[1]].value_counts().index), \n axis=0)))",
"_____no_output_____"
],
[
"def createMatrix(df, users, movies):\n matrix = []\n movies_nb = len(movies)\n user_nb = len(users)\n df_array = np.array(df, dtype=\"int\")\n for i,user in enumerate(users):\n filtered_movies = df_array[df_array[:,0] == user, 1]\n filtered_ratings = df_array[df_array[:,0] == user, 2]\n ratings = np.zeros(movies_nb)\n for j in range(len(filtered_movies)):\n ratings[filtered_movies[j] - 1] = filtered_ratings[j]\n matrix.append(ratings)\n \n sys.stdout.write(\"\\r Loading State : {} / {}\".format(i+1,user_nb))\n sys.stdout.flush()\n \n return matrix",
"_____no_output_____"
],
[
"matrix_train = createMatrix(df_train, _users, _movies)\nmatrix_test = createMatrix(df_test, _users, _movies)",
" Loading State : 943 / 943"
],
[
"train = torch.FloatTensor(matrix_train)\ntest = torch.FloatTensor(matrix_test) ",
"_____no_output_____"
],
[
"train.shape",
"_____no_output_____"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"class SparseAutoEncoder(nn.Module):\n def __init__(self, input_dim):\n super(SparseAutoEncoder, self).__init__()\n # creating input layer\n self.fully_connected_hidden_layer_1 = nn.Linear(input_dim, 20)\n self.fully_connected_hidden_layer_2 = nn.Linear(20, 10)\n self.fully_connected_hidden_layer_3 = nn.Linear(10, 20)\n self.fully_connected_hidden_layer_4 = nn.Linear(20, input_dim)\n self.activation = nn.Sigmoid()\n self.optimizer = optim.RMSprop(self.parameters(), lr=0.01, weight_decay=0.5)\n self.loss = nn.MSELoss()\n \n def forward(self, X):\n return self.fully_connected_hidden_layer_4(\n self.activation(self.fully_connected_hidden_layer_3(\n self.activation(self.fully_connected_hidden_layer_2(\n self.activation(self.fully_connected_hidden_layer_1(X)))))))\n \n def train_(self, X, epoch):\n self.X_train = X\n for i in range(epoch):\n print(\"Epoch => {}/{}\".format(i+1,epoch))\n train_loss = 0\n s = 0.\n for j in range(self.X_train.shape[0]):\n batch = Variable(self.X_train[j]).unsqueeze(0)\n target = batch.clone()\n if torch.sum(target.data > 0) > 0:\n output = self(batch)\n target.require_grad = False\n output[target == 0] = 0\n temp_loss = self.loss(output, target)\n \n mean_corrector = self.X_train.shape[1] / (float(torch.sum(target.data > 0)) + 1e-10)\n temp_loss.backward()\n train_loss += np.sqrt(temp_loss.item() * mean_corrector)\n s+=1.\n self.optimizer.step()\n print(\" => Loss : {}\".format((train_loss/s)))\n \n def test_(self, X):\n test_loss = 0\n s = 0.\n sys.stdout.write(\"\\r Processing\")\n sys.stdout.flush()\n \n for j in range(self.X_train.shape[0]):\n batch = Variable(self.X_train[j]).unsqueeze(0)\n target = Variable(X[j]).unsqueeze(0)\n if torch.sum(target.data > 0) > 0:\n output = self(batch)\n target.require_grad = False\n output[target == 0] = 0\n temp_loss = self.loss(output, target)\n \n mean_corrector = self.X_train.shape[1] / (float(torch.sum(target.data > 0)) + 1e-10)\n test_loss += np.sqrt(temp_loss.item() * mean_corrector)\n s+=1.\n sys.stdout.write(\"\\r Test Set => Loss : {}\".format((test_loss/s)))\n sys.stdout.flush()\n \n \n ",
"_____no_output_____"
],
[
"sae = SparseAutoEncoder(train.shape[1])",
"_____no_output_____"
],
[
"sae.train_(train, 20)",
"Epoch => 1/20\n => Loss : 1.7715910420976406\nEpoch => 2/20\n => Loss : 1.0966187315622766\nEpoch => 3/20\n => Loss : 1.0534908873056288\nEpoch => 4/20\n => Loss : 1.0380864423484002\nEpoch => 5/20\n => Loss : 1.0311407656719527\nEpoch => 6/20\n => Loss : 1.0265132566564796\nEpoch => 7/20\n => Loss : 1.0239976540198936\nEpoch => 8/20\n => Loss : 1.0220266959738937\nEpoch => 9/20\n => Loss : 1.0209420041093658\nEpoch => 10/20\n => Loss : 1.0196439537372004\nEpoch => 11/20\n => Loss : 1.0189271599642897\nEpoch => 12/20\n => Loss : 1.0183032493250952\nEpoch => 13/20\n => Loss : 1.0178964247724989\nEpoch => 14/20\n => Loss : 1.0173872598783607\nEpoch => 15/20\n => Loss : 1.0172698467725836\nEpoch => 16/20\n => Loss : 1.0166608819642282\nEpoch => 17/20\n => Loss : 1.0168078470610282\nEpoch => 18/20\n => Loss : 1.0165371745710432\nEpoch => 19/20\n => Loss : 1.0163025495834048\nEpoch => 20/20\n => Loss : 1.015942291449781\n"
],
[
"sae.test_(test)",
" Test Set => Loss : 1.0229144248873956"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f37a9f50fc32ca8f6ab367911dc6fb760bedc4 | 2,136 | ipynb | Jupyter Notebook | K3D_Animations/Cube+Ball.ipynb | K3D-tools/experiments | e1a92a8ff4a16c80ece6fe2f13ccf20af41d16b0 | [
"MIT"
] | 3 | 2019-02-09T02:58:30.000Z | 2020-02-16T12:23:44.000Z | K3D_Animations/Cube+Ball.ipynb | K3D-tools/experiments | e1a92a8ff4a16c80ece6fe2f13ccf20af41d16b0 | [
"MIT"
] | null | null | null | K3D_Animations/Cube+Ball.ipynb | K3D-tools/experiments | e1a92a8ff4a16c80ece6fe2f13ccf20af41d16b0 | [
"MIT"
] | 3 | 2018-09-14T10:55:16.000Z | 2021-09-13T04:29:53.000Z | 21.148515 | 116 | 0.528558 | [
[
[
"import numpy as np\nimport k3d\nfrom itertools import product",
"_____no_output_____"
],
[
"points_in_edge = 30\nlinspace = np.arange(-points_in_edge//2, points_in_edge//2)/(points_in_edge/2)\ncube = np.array(list(product(linspace, linspace, linspace)))\nball = cube[np.sqrt(np.sum(cube**2, axis=1)) < 0.5]",
"_____no_output_____"
],
[
"plot = k3d.plot()\nCube = k3d.points(cube, point_size=0.005, shader='flat')\nBall = k3d.points(ball, point_size=0.01, shader='flat', color=0xff0000)\nplot += Cube + Ball\nplot.display()",
"_____no_output_____"
],
[
"spikes = k3d.mesh(ball, np.random.randint(0,np.shape(ball)[0], size=(np.shape(ball)[0],3)), color=0x00ffff)\nplot += spikes",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7f37f463dc1dde7164faf559a2eab3f0df83539 | 31,375 | ipynb | Jupyter Notebook | sentiment-rnn/Sentiment_RNN_Exercise.ipynb | MiniMarvin/pytorch-v2 | e3e07e2162c39626aa3c74249f0d5035d204ee4a | [
"MIT"
] | null | null | null | sentiment-rnn/Sentiment_RNN_Exercise.ipynb | MiniMarvin/pytorch-v2 | e3e07e2162c39626aa3c74249f0d5035d204ee4a | [
"MIT"
] | 4 | 2020-09-26T00:39:02.000Z | 2022-02-10T01:12:52.000Z | sentiment-rnn/Sentiment_RNN_Exercise.ipynb | MiniMarvin/pytorch-v2 | e3e07e2162c39626aa3c74249f0d5035d204ee4a | [
"MIT"
] | null | null | null | 36.313657 | 643 | 0.595219 | [
[
[
"# Sentiment Analysis with an RNN\n\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. \n>Using an RNN rather than a strictly feedforward network is more accurate since we can include information about the *sequence* of words. \n\nHere we'll use a dataset of movie reviews, accompanied by sentiment labels: positive or negative.\n\n<img src=\"assets/reviews_ex.png\" width=40%>\n\n### Network Architecture\n\nThe architecture for this network is shown below.\n\n<img src=\"assets/network_diagram.png\" width=40%>\n\n>**First, we'll pass in words to an embedding layer.** We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the Word2Vec lesson. You can actually train an embedding with the Skip-gram Word2Vec model and use those embeddings as input, here. However, it's good enough to just have an embedding layer and let the network learn a different embedding table on its own. *In this case, the embedding layer is for dimensionality reduction, rather than for learning semantic representations.*\n\n>**After input words are passed to an embedding layer, the new embeddings will be passed to LSTM cells.** The LSTM cells will add *recurrent* connections to the network and give us the ability to include information about the *sequence* of words in the movie review data. \n\n>**Finally, the LSTM outputs will go to a sigmoid output layer.** We're using a sigmoid function because positive and negative = 1 and 0, respectively, and a sigmoid will output predicted, sentiment values between 0-1. \n\nWe don't care about the sigmoid outputs except for the **very last one**; we can ignore the rest. We'll calculate the loss by comparing the output at the last time step and the training label (pos or neg).",
"_____no_output_____"
],
[
"---\n### Load in and visualize the data",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n# read data from text files\nwith open('data/reviews.txt', 'r') as f:\n reviews = f.read()\nwith open('data/labels.txt', 'r') as f:\n labels = f.read()",
"_____no_output_____"
],
[
"print(reviews[:2000])\nprint()\nprint(labels[:20])",
"_____no_output_____"
]
],
[
[
"## Data pre-processing\n\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\n\nYou can see an example of the reviews data above. Here are the processing steps, we'll want to take:\n>* We'll want to get rid of periods and extraneous punctuation.\n* Also, you might notice that the reviews are delimited with newline characters `\\n`. To deal with those, I'm going to split the text into each review using `\\n` as the delimiter. \n* Then I can combined all the reviews back together into one big string.\n\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.",
"_____no_output_____"
]
],
[
[
"from string import punctuation\n\nprint(punctuation)\n\n# get rid of punctuation\nreviews = reviews.lower() # lowercase, standardize\nall_text = ''.join([c for c in reviews if c not in punctuation])",
"_____no_output_____"
],
[
"# split by new lines and spaces\nreviews_split = all_text.split('\\n')\nall_text = ' '.join(reviews_split)\n\n# create a list of words\nwords = all_text.split()",
"_____no_output_____"
],
[
"words[:30]",
"_____no_output_____"
]
],
[
[
"### Encoding the words\n\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\n\n> **Exercise:** Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers **start at 1, not 0**.\n> Also, convert the reviews to integers and store the reviews in a new list called `reviews_ints`. ",
"_____no_output_____"
]
],
[
[
"# feel free to use this import \nfrom collections import Counter\n\n## Build a dictionary that maps words to integers\nvocab_to_int = None\n\n## use the dict to tokenize each review in reviews_split\n## store the tokenized reviews in reviews_ints\nreviews_ints = []\n\n",
"_____no_output_____"
]
],
[
[
"**Test your code**\n\nAs a text that you've implemented the dictionary correctly, print out the number of unique words in your vocabulary and the contents of the first, tokenized review.",
"_____no_output_____"
]
],
[
[
"# stats about vocabulary\nprint('Unique words: ', len((vocab_to_int))) # should ~ 74000+\nprint()\n\n# print tokens in first review\nprint('Tokenized review: \\n', reviews_ints[:1])",
"_____no_output_____"
]
],
[
[
"### Encoding the labels\n\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\n\n> **Exercise:** Convert labels from `positive` and `negative` to 1 and 0, respectively, and place those in a new list, `encoded_labels`.",
"_____no_output_____"
]
],
[
[
"# 1=positive, 0=negative label conversion\nencoded_labels = None",
"_____no_output_____"
]
],
[
[
"### Removing Outliers\n\nAs an additional pre-processing step, we want to make sure that our reviews are in good shape for standard processing. That is, our network will expect a standard input text size, and so, we'll want to shape our reviews into a specific length. We'll approach this task in two main steps:\n\n1. Getting rid of extremely long or short reviews; the outliers\n2. Padding/truncating the remaining data so that we have reviews of the same length.\n\n<img src=\"assets/outliers_padding_ex.png\" width=40%>\n\nBefore we pad our review text, we should check for reviews of extremely short or long lengths; outliers that may mess with our training.",
"_____no_output_____"
]
],
[
[
"# outlier review stats\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))",
"_____no_output_____"
]
],
[
[
"Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. We'll have to remove any super short reviews and truncate super long reviews. This removes outliers and should allow our model to train more efficiently.\n\n> **Exercise:** First, remove *any* reviews with zero length from the `reviews_ints` list and their corresponding label in `encoded_labels`.",
"_____no_output_____"
]
],
[
[
"print('Number of reviews before removing outliers: ', len(reviews_ints))\n\n## remove any reviews/labels with zero length from the reviews_ints list.\n\nreviews_ints = \nencoded_labels = \n\nprint('Number of reviews after removing outliers: ', len(reviews_ints))",
"_____no_output_____"
]
],
[
[
"---\n## Padding sequences\n\nTo deal with both short and very long reviews, we'll pad or truncate all our reviews to a specific length. For reviews shorter than some `seq_length`, we'll pad with 0s. For reviews longer than `seq_length`, we can truncate them to the first `seq_length` words. A good `seq_length`, in this case, is 200.\n\n> **Exercise:** Define a function that returns an array `features` that contains the padded data, of a standard size, that we'll pass to the network. \n* The data should come from `review_ints`, since we want to feed integers to the network. \n* Each row should be `seq_length` elements long. \n* For reviews shorter than `seq_length` words, **left pad** with 0s. That is, if the review is `['best', 'movie', 'ever']`, `[117, 18, 128]` as integers, the row will look like `[0, 0, 0, ..., 0, 117, 18, 128]`. \n* For reviews longer than `seq_length`, use only the first `seq_length` words as the feature vector.\n\nAs a small example, if the `seq_length=10` and an input review is: \n```\n[117, 18, 128]\n```\nThe resultant, padded sequence should be: \n\n```\n[0, 0, 0, 0, 0, 0, 0, 117, 18, 128]\n```\n\n**Your final `features` array should be a 2D array, with as many rows as there are reviews, and as many columns as the specified `seq_length`.**\n\nThis isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.",
"_____no_output_____"
]
],
[
[
"def pad_features(reviews_ints, seq_length):\n ''' Return features of review_ints, where each review is padded with 0's \n or truncated to the input seq_length.\n '''\n ## implement function\n \n features=None\n \n return features",
"_____no_output_____"
],
[
"# Test your implementation!\n\nseq_length = 200\n\nfeatures = pad_features(reviews_ints, seq_length=seq_length)\n\n## test statements - do not change - ##\nassert len(features)==len(reviews_ints), \"Your features should have as many rows as reviews.\"\nassert len(features[0])==seq_length, \"Each feature row should contain seq_length values.\"\n\n# print first 10 values of the first 30 batches \nprint(features[:30,:10])",
"_____no_output_____"
]
],
[
[
"## Training, Validation, Test\n\nWith our data in nice shape, we'll split it into training, validation, and test sets.\n\n> **Exercise:** Create the training, validation, and test sets. \n* You'll need to create sets for the features and the labels, `train_x` and `train_y`, for example. \n* Define a split fraction, `split_frac` as the fraction of data to **keep** in the training set. Usually this is set to 0.8 or 0.9. \n* Whatever data is left will be split in half to create the validation and *testing* data.",
"_____no_output_____"
]
],
[
[
"split_frac = 0.8\n\n## split data into training, validation, and test data (features and labels, x and y)\n\n\n## print out the shapes of your resultant feature data\n\n",
"_____no_output_____"
]
],
[
[
"**Check your work**\n\nWith train, validation, and test fractions equal to 0.8, 0.1, 0.1, respectively, the final, feature data shapes should look like:\n```\n Feature Shapes:\nTrain set: \t\t (20000, 200) \nValidation set: \t(2500, 200) \nTest set: \t\t (2500, 200)\n```",
"_____no_output_____"
],
[
"---\n## DataLoaders and Batching\n\nAfter creating training, test, and validation data, we can create DataLoaders for this data by following two steps:\n1. Create a known format for accessing our data, using [TensorDataset](https://pytorch.org/docs/stable/data.html#) which takes in an input set of data and a target set of data with the same first dimension, and creates a dataset.\n2. Create DataLoaders and batch our training, validation, and test Tensor datasets.\n\n```\ntrain_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))\ntrain_loader = DataLoader(train_data, batch_size=batch_size)\n```\n\nThis is an alternative to creating a generator function for batching our data into full batches.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torch.utils.data import TensorDataset, DataLoader\n\n# create Tensor datasets\ntrain_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))\nvalid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))\ntest_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))\n\n# dataloaders\nbatch_size = 50\n\n# make sure to SHUFFLE your data\ntrain_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)\nvalid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)\ntest_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)",
"_____no_output_____"
],
[
"# obtain one batch of training data\ndataiter = iter(train_loader)\nsample_x, sample_y = dataiter.next()\n\nprint('Sample input size: ', sample_x.size()) # batch_size, seq_length\nprint('Sample input: \\n', sample_x)\nprint()\nprint('Sample label size: ', sample_y.size()) # batch_size\nprint('Sample label: \\n', sample_y)",
"_____no_output_____"
]
],
[
[
"---\n# Sentiment Network with PyTorch\n\nBelow is where you'll define the network.\n\n<img src=\"assets/network_diagram.png\" width=40%>\n\nThe layers are as follows:\n1. An [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) that converts our word tokens (integers) into embeddings of a specific size.\n2. An [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) defined by a hidden_state size and number of layers\n3. A fully-connected output layer that maps the LSTM layer outputs to a desired output_size\n4. A sigmoid activation layer which turns all outputs into a value 0-1; return **only the last sigmoid output** as the output of this network.\n\n### The Embedding Layer\n\nWe need to add an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) because there are 74000+ words in our vocabulary. It is massively inefficient to one-hot encode that many classes. So, instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using Word2Vec, then load it here. But, it's fine to just make a new layer, using it for only dimensionality reduction, and let the network learn the weights.\n\n\n### The LSTM Layer(s)\n\nWe'll create an [LSTM](https://pytorch.org/docs/stable/nn.html#lstm) to use in our recurrent network, which takes in an input_size, a hidden_dim, a number of layers, a dropout probability (for dropout between multiple layers), and a batch_first parameter.\n\nMost of the time, you're network will have better performance with more layers; between 2-3. Adding more layers allows the network to learn really complex relationships. \n\n> **Exercise:** Complete the `__init__`, `forward`, and `init_hidden` functions for the SentimentRNN model class.\n\nNote: `init_hidden` should initialize the hidden and cell state of an lstm layer to all zeros, and move those state to GPU, if available.",
"_____no_output_____"
]
],
[
[
"# First checking if GPU is available\ntrain_on_gpu=torch.cuda.is_available()\n\nif(train_on_gpu):\n print('Training on GPU.')\nelse:\n print('No GPU available, training on CPU.')",
"_____no_output_____"
],
[
"import torch.nn as nn\n\nclass SentimentRNN(nn.Module):\n \"\"\"\n The RNN model that will be used to perform Sentiment analysis.\n \"\"\"\n\n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):\n \"\"\"\n Initialize the model by setting up the layers.\n \"\"\"\n super(SentimentRNN, self).__init__()\n\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n \n # define all layers\n \n\n def forward(self, x, hidden):\n \"\"\"\n Perform a forward pass of our model on some input and hidden state.\n \"\"\"\n \n # return last sigmoid output and hidden state\n return sig_out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x hidden_dim,\n # initialized to zero, for hidden state and cell state of LSTM\n \n return hidden\n ",
"_____no_output_____"
]
],
[
[
"## Instantiate the network\n\nHere, we'll instantiate the network. First up, defining the hyperparameters.\n\n* `vocab_size`: Size of our vocabulary or the range of values for our input, word tokens.\n* `output_size`: Size of our desired output; the number of class scores we want to output (pos/neg).\n* `embedding_dim`: Number of columns in the embedding lookup table; size of our embeddings.\n* `hidden_dim`: Number of units in the hidden layers of our LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.\n* `n_layers`: Number of LSTM layers in the network. Typically between 1-3\n\n> **Exercise:** Define the model hyperparameters.\n",
"_____no_output_____"
]
],
[
[
"# Instantiate the model w/ hyperparams\nvocab_size = \noutput_size = \nembedding_dim = \nhidden_dim = \nn_layers = \n\nnet = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)\n\nprint(net)",
"_____no_output_____"
]
],
[
[
"---\n## Training\n\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. You can also add code to save a model by name.\n\n>We'll also be using a new kind of cross entropy loss, which is designed to work with a single Sigmoid output. [BCELoss](https://pytorch.org/docs/stable/nn.html#bceloss), or **Binary Cross Entropy Loss**, applies cross entropy loss to a single value between 0 and 1.\n\nWe also have some data and training hyparameters:\n\n* `lr`: Learning rate for our optimizer.\n* `epochs`: Number of times to iterate through the training dataset.\n* `clip`: The maximum gradient value to clip at (to prevent exploding gradients).",
"_____no_output_____"
]
],
[
[
"# loss and optimization functions\nlr=0.001\n\ncriterion = nn.BCELoss()\noptimizer = torch.optim.Adam(net.parameters(), lr=lr)\n",
"_____no_output_____"
],
[
"# training params\n\nepochs = 4 # 3-4 is approx where I noticed the validation loss stop decreasing\n\ncounter = 0\nprint_every = 100\nclip=5 # gradient clipping\n\n# move model to GPU, if available\nif(train_on_gpu):\n net.cuda()\n\nnet.train()\n# train for some number of epochs\nfor e in range(epochs):\n # initialize hidden state\n h = net.init_hidden(batch_size)\n\n # batch loop\n for inputs, labels in train_loader:\n counter += 1\n\n if(train_on_gpu):\n inputs, labels = inputs.cuda(), labels.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n\n # get the output from the model\n output, h = net(inputs, h)\n\n # calculate the loss and perform backprop\n loss = criterion(output.squeeze(), labels.float())\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n optimizer.step()\n\n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = net.init_hidden(batch_size)\n val_losses = []\n net.eval()\n for inputs, labels in valid_loader:\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n\n if(train_on_gpu):\n inputs, labels = inputs.cuda(), labels.cuda()\n\n output, val_h = net(inputs, val_h)\n val_loss = criterion(output.squeeze(), labels.float())\n\n val_losses.append(val_loss.item())\n\n net.train()\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.6f}...\".format(loss.item()),\n \"Val Loss: {:.6f}\".format(np.mean(val_losses)))",
"_____no_output_____"
]
],
[
[
"---\n## Testing\n\nThere are a few ways to test your network.\n\n* **Test data performance:** First, we'll see how our trained model performs on all of our defined test_data, above. We'll calculate the average loss and accuracy over the test data.\n\n* **Inference on user-generated data:** Second, we'll see if we can input just one example review at a time (without a label), and see what the trained model predicts. Looking at new, user input data like this, and predicting an output label, is called **inference**.",
"_____no_output_____"
]
],
[
[
"# Get test data loss and accuracy\n\ntest_losses = [] # track loss\nnum_correct = 0\n\n# init hidden state\nh = net.init_hidden(batch_size)\n\nnet.eval()\n# iterate over test data\nfor inputs, labels in test_loader:\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n if(train_on_gpu):\n inputs, labels = inputs.cuda(), labels.cuda()\n \n # get predicted outputs\n output, h = net(inputs, h)\n \n # calculate loss\n test_loss = criterion(output.squeeze(), labels.float())\n test_losses.append(test_loss.item())\n \n # convert output probabilities to predicted class (0 or 1)\n pred = torch.round(output.squeeze()) # rounds to the nearest integer\n \n # compare predictions to true label\n correct_tensor = pred.eq(labels.float().view_as(pred))\n correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())\n num_correct += np.sum(correct)\n\n\n# -- stats! -- ##\n# avg test loss\nprint(\"Test loss: {:.3f}\".format(np.mean(test_losses)))\n\n# accuracy over all test data\ntest_acc = num_correct/len(test_loader.dataset)\nprint(\"Test accuracy: {:.3f}\".format(test_acc))",
"_____no_output_____"
]
],
[
[
"### Inference on a test review\n\nYou can change this test_review to any text that you want. Read it and think: is it pos or neg? Then see if your model predicts correctly!\n \n> **Exercise:** Write a `predict` function that takes in a trained net, a plain text_review, and a sequence length, and prints out a custom statement for a positive or negative review!\n* You can use any functions that you've already defined or define any helper functions you want to complete `predict`, but it should just take in a trained net, a text review, and a sequence length.\n",
"_____no_output_____"
]
],
[
[
"# negative test review\ntest_review_neg = 'The worst movie I have seen; acting was terrible and I want my money back. This movie had bad acting and the dialogue was slow.'\n",
"_____no_output_____"
],
[
"def predict(net, test_review, sequence_length=200):\n ''' Prints out whether a give review is predicted to be \n positive or negative in sentiment, using a trained model.\n \n params:\n net - A trained net \n test_review - a review made of normal text and punctuation\n sequence_length - the padded length of a review\n '''\n \n \n # print custom response based on whether test_review is pos/neg\n \n ",
"_____no_output_____"
],
[
"# positive test review\ntest_review_pos = 'This movie had the best acting and the dialogue was so good. I loved it.'\n",
"_____no_output_____"
],
[
"# call function\n# try negative and positive reviews!\nseq_length=200\npredict(net, test_review_neg, seq_length)",
"_____no_output_____"
]
],
[
[
"### Try out test_reviews of your own!\n\nNow that you have a trained model and a predict function, you can pass in _any_ kind of text and this model will predict whether the text has a positive or negative sentiment. Push this model to its limits and try to find what words it associates with positive or negative.\n\nLater, you'll learn how to deploy a model like this to a production environment so that it can respond to any kind of user data put into a web app!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7f38f4986effe6097cc557a4c8a648a26461ddb | 107,808 | ipynb | Jupyter Notebook | apple_games/rating_prediction.ipynb | URLoper/My-Work-Repository | ec81ce444dd72d6f5f5b1beff329525e3fe247d7 | [
"BSD-3-Clause"
] | null | null | null | apple_games/rating_prediction.ipynb | URLoper/My-Work-Repository | ec81ce444dd72d6f5f5b1beff329525e3fe247d7 | [
"BSD-3-Clause"
] | null | null | null | apple_games/rating_prediction.ipynb | URLoper/My-Work-Repository | ec81ce444dd72d6f5f5b1beff329525e3fe247d7 | [
"BSD-3-Clause"
] | null | null | null | 149.941586 | 41,388 | 0.833157 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nimport easydatascience as eds\nimport scipy.stats as stats\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\npd.options.mode.chained_assignment = None\n\noriginal_data = pd.read_csv('appstore_games.csv').drop(['URL', 'Icon URL', 'Description'], axis=1)\ndata = original_data.loc[~original_data['Average User Rating'].isnull()]\n\ndata[['Subtitle', 'In-app Purchases']] = data[['Subtitle', \n 'In-app Purchases']].fillna('NA')\ndata['Languages'] = data['Languages'].fillna('EN')\ndata.columns = data.columns.str.replace(' ', '_')\ndata.Age_Rating = data.Age_Rating.str.replace('+', '').astype('int32')\n\ndata['Size_MB'] = round(data['Size']/1024**2, 3)\ndata = data.drop('Size', axis=1)\n\n#eds.look(data).sort_values(by='nulls', ascending=False)",
"_____no_output_____"
],
[
"#Instance frequency within these attributes was examined\ndata['Has_Subtitle'] = np.where(data['Subtitle']=='NA', 0, 1)\ndata['Free_to_Play'] = np.where(data['Price']==0, 1, 0)\ndata['In-app_Purchases'] = np.where(data['In-app_Purchases']=='NA', 0, 1)\ndata['Multilingual'] = np.where(data['Languages']=='EN', 0, 1)\ndata = data.drop(['Subtitle', 'Primary_Genre', 'Price', 'Languages'], axis=1)\n\ndata['Original_Release_Date'] = data['Original_Release_Date'].str.replace('/', '-')\ndata['Current_Version_Release_Date'] = data['Current_Version_Release_Date'].str.replace('/', '-')\n\ndata['Release_Year'] = pd.to_datetime(data['Original_Release_Date']).dt.year\ndata['Release_Month'] = pd.to_datetime(data['Original_Release_Date']).dt.month\ndata['Days_Since_Last_Update'] = (pd.Timestamp.now().date() - pd.to_datetime(\n data['Current_Version_Release_Date']).dt.date).astype('timedelta64[D]')\n\ndata = data.drop(['Original_Release_Date', 'Current_Version_Release_Date'], axis=1)\n\n#names = pd.read_csv('usafirstnames.csv')['Names'].str.lower().values.tolist()\n#names = '|'.join(names)\n#data['Developer'] = data['Developer'].str.lower()\n#data['Indie'] = np.where(data['Developer'].str.contains(names), 1, 0)\ndata = data.drop('Developer', axis=1)\n\ndata = eds.one_hot_encode(data, 'Genres')\ndata = eds.degrade_columns(data, ', ', filter=True)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import KBinsDiscretizer\n\nkbdisc = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='quantile')\n\ncorr_drop_list = eds.get_abv_corr(data, data['Average_User_Rating'], threshold=0.03)\nnum_data = data.drop(corr_drop_list, axis=1)\n\n#eds.plot_spread(num_data, ['User_Rating_Count', 'Size_MB'])\nnum_data[['User_Rating_Count', 'Size_MB']] = kbdisc.fit_transform(num_data[['User_Rating_Count', \n 'Size_MB']])\n\n#eds.look(train, pred='Average_User_Rating').sort_values(by='corrAverage_User_Rating',\n# ascending=False)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\ntrain, test = train_test_split(num_data, test_size=0.3, random_state=13)\n\ntest_IDN = test[['ID', 'Name']]\ntest = test.drop(['ID', 'Name'], axis=1)\nX_test, y_test = test.drop('Average_User_Rating', axis=1), test['Average_User_Rating']\n\n#Outliers if there are some...\n\ntrain_IDN = train[['ID', 'Name']]\ntrain = train.drop(['ID', 'Name'], axis=1)\nX_train, y_train = train.drop('Average_User_Rating', axis=1), train['Average_User_Rating']",
"_____no_output_____"
],
[
"#eds.print_abv_ft_corr(X_train, 0.45)\nimport xgboost\nfrom sklearn.linear_model import LinearRegression\n\nxgb_reg = eds.simple_reg_model(xgboost.XGBRegressor(objective ='reg:squarederror'),\n X_train, y_train, return_model=True)",
"R2 score: 0.25159002649250417\nRMSE: 0.6520850038676677\nCross-Validation: \n\tScores: [0.69491947 0.66223936 0.70677167 0.6573875 ]\n\tMean: 0.6803294984574422\n\tStandard deviation: 0.021009781238676327\n"
],
[
"#COMUNICATING INSIGHTS\n\n\"\"\"\nAs we see, predictions don't look very good, even if we don't drop statistically insignificant\nfeatures and leave the feature space at ~100 attributes, it still fails even to overfit. From \nthis, we can deduce that features are not good enough and we need more of them to make a\nprediction model (like the number of downloads, downloads in first few days, reported bugs, \nhow much did app show up on people's main page, did it get to top list, etc.).\nStill, that doesn't prevent us to do descriptive analytics on the features we have.\n\"\"\"",
"_____no_output_____"
],
[
"eds.look(a_data, 'Average_User_Rating').sort_values(by='corrAverage_User_Rating', ascending=False)",
"___________________________\nData types:\n int64 11\nint32 5\nfloat64 4\nName: types, dtype: int64\n___________________________\n"
],
[
"a_data = data.drop(['ID', 'Name'], axis=1)\neds.print_abv_corr(a_data, a_data['Average_User_Rating'], threshold=0.2)\n\nprint('\\nThese are features that correlate the best with the target.\\nStill not exceptional, but good enough to give us some intuition.')\n\nfig, ax = plt.subplots(1, 2, figsize=(16, 7))\n\nsns.catplot(x='Release_Year', y='Average_User_Rating', data=a_data, kind='point', ax=ax[0])\nsns.catplot(x='Average_User_Rating', y='Days_Since_Last_Update', data=a_data, kind='point', \n ax=ax[1])\nplt.close()\nplt.close()",
"Average_User_Rating 1.0\nRelease_Year 0.23170514868232348\nDays_Since_Last_Update -0.24744394567392528\n\nThese are features that correlate the best with the target.\nStill not exceptional, but good enough to give us some intuition.\n"
],
[
"print(\"The only other statistically significant value is Size_MB, but with only 0.06\\ncorrelation which, given stretched confidence intervals, isn't of much use.\")\n\nsns.catplot(x='Average_User_Rating', y='Size_MB', data=a_data, kind='point')\nplt.title('Point plot with app size in MB')",
"The only other statistically significant value is Size_MB, but with only 0.06\ncorrelation which, given stretched confidence intervals, isn't of much use.\n"
],
[
"print('We may argue that the genre affects the rating of the app but that is not the case.\\nWe can see that most of those features are statistically irrelevant.')\nprint('\\nCorrelation of genres with average user rating:\\n')\nprint(a_data[['Casual', 'Puzzle', 'Action', 'Role Playing', 'Card', 'Adventure', 'Simulation',\n 'Board', 'Entertainment']].corrwith(a_data['Average_User_Rating']))\n\nprint('\\nAlso, we might assume that features like age rating, number of ratings or the fact that the game if\\nfree to play but again, that is not the case.')\nprint('\\nCorrelation of Age_Rating, User_Rating_Count and Free_to_Play with the target:\\n')\nprint(a_data[['Age_Rating', 'User_Rating_Count', \n 'Free_to_Play']].corrwith(a_data['Average_User_Rating']))\n\nprint('\\nOn the other hand, subtitles and included in-app purchases seem to be important for overall user satisfaction.')\nprint('\\nCorrelation of Has_Subtitle and In-app_Purchases with the target:\\n')\nprint(a_data[['Has_Subtitle', 'In-app_Purchases']].corrwith(a_data['Average_User_Rating']))",
"We may argue that the genre affects the rating of the app but that is not the case.\nWe can see that most of those features are statistically irrelevant.\n\nCorrelation of genres with average user rating:\n\nCasual 0.052122\nPuzzle 0.046951\nAction 0.024461\nRole Playing 0.016357\nCard -0.002746\nAdventure -0.011440\nSimulation -0.043359\nBoard -0.086449\nEntertainment -0.037803\ndtype: float64\n\nAlso, we might assume that features like age rating, number of ratings or the fact that the game if\nfree to play but again, that is not the case.\n\nCorrelation of Age_Rating, User_Rating_Count and Free_to_Play with the target:\n\nAge_Rating 0.020307\nUser_Rating_Count 0.033025\nFree_to_Play 0.031072\ndtype: float64\n\nOn the other hand, subtitles and included in-app purchases seem to be important for overall user satisfaction.\n\nCorrelation of Has_Subtitle and In-app_Purchases with the target:\n\nHas_Subtitle 0.185376\nIn-app_Purchases 0.151692\ndtype: float64\n"
]
],
[
[
"__In the end, the average user rating can be a very biased metric of success since, for\nexample, most of the apps these days force users to give a 5-star rating for them to get something in return. This is just one of many examples but still, it is one of the better examples of user satisfaction. The best success metric of how well the app is doing is revenue,\nwhich was not examinable in this data set.__",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7f3947ef551b10d508960f51178d45c8cf8b3ba | 100,050 | ipynb | Jupyter Notebook | products/examples/nlp/bert_sst2_subset_finetuning.ipynb | pgagarinov/pytorch-hyperlight | dd0e291b4ebe0cb1538ac39d7f2046f9ec0fd3a1 | [
"Apache-2.0"
] | 10 | 2020-12-20T15:44:06.000Z | 2021-09-30T05:41:45.000Z | products/examples/nlp/bert_sst2_subset_finetuning.ipynb | pgagarinov/pytorch-hyperlight | dd0e291b4ebe0cb1538ac39d7f2046f9ec0fd3a1 | [
"Apache-2.0"
] | 6 | 2021-01-21T12:34:39.000Z | 2021-03-21T21:40:32.000Z | products/examples/nlp/bert_sst2_subset_finetuning.ipynb | pgagarinov/pytorch-hyperlight | dd0e291b4ebe0cb1538ac39d7f2046f9ec0fd3a1 | [
"Apache-2.0"
] | null | null | null | 134.838275 | 75,124 | 0.857601 | [
[
[
"import math\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytorch_hyperlight as pth\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport transformers as ppb\nfrom pytorch_hyperlight.tasks.classification import (\n AAutoClsHeadClassificationTaskWDAdamWWarmup,\n)\nfrom torch.utils.data import DataLoader, Dataset, Sampler, random_split\n\nimport torch.nn.functional as F\nimport random",
"_____no_output_____"
],
[
"warnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"EXPERIMENT_ID = 'bert_sst2_subset_finetuning'\nFAST_DEV_RUN = False\nSEED = 16",
"_____no_output_____"
],
[
"class ReviewsSampler(Sampler):\n def __init__(self, subset, batch_size):\n self.batch_size = batch_size\n self.subset = subset\n\n self.indices = subset.indices\n self.tokenized = np.array(subset.dataset.tokenized)[self.indices]\n\n def __iter__(self):\n\n batch_idx = []\n # index in sorted data\n for index in np.argsort(list(map(len, self.tokenized))):\n batch_idx.append(index)\n if len(batch_idx) == self.batch_size:\n yield batch_idx\n batch_idx = []\n\n if len(batch_idx) > 0:\n yield batch_idx\n\n def __len__(self):\n return math.ceil(len(self.subset) / self.batch_size)\n\n\ndef get_padded(values):\n max_len = 0\n for value in values:\n if len(value) > max_len:\n max_len = len(value)\n\n padded = np.array([value + [0] * (max_len - len(value)) for value in values])\n\n return padded\n\n\ndef collate_fn(batch):\n\n inputs = []\n labels = []\n for elem in batch:\n inputs.append(elem[\"tokenized\"])\n labels.append(elem[\"label\"])\n\n inputs = get_padded(inputs) # padded inputs\n attention_mask = np.where(inputs != 0, 1, 0)\n\n return {\n \"inputs\": torch.tensor(inputs),\n \"labels\": torch.FloatTensor(labels),\n \"attention_mask\": torch.tensor(attention_mask),\n }\n\n\nclass ReviewsDataset(Dataset):\n def __init__(self, reviews, tokenizer, labels, df_indices):\n self.labels = labels\n self.df_indices = df_indices\n # tokenized reviews\n self.tokenized = [tokenizer.encode(x, add_special_tokens=True) for x in reviews]\n\n def __getitem__(self, idx):\n return {\n \"tokenized\": self.tokenized[idx],\n \"label\": self.labels[idx],\n \"df_index\": self.df_indices[idx],\n }\n\n def __len__(self):\n return len(self.labels)",
"_____no_output_____"
],
[
"def configure_dataloaders(batch_size, n_workers=4):\n pl.seed_everything(SEED)\n df = pd.read_csv(\n \"https://github.com/clairett/pytorch-sentiment-classification/raw/master/data/SST2/train.tsv\",\n delimiter=\"\\t\",\n header=None,\n )\n\n tokenizer = ppb.DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased\")\n\n dataset = ReviewsDataset(\n df[0].to_list(), tokenizer, df[1].to_list(), df.index.to_list()\n )\n\n train_size, val_size = int(0.8 * len(dataset)), int(0.1 * len(dataset))\n train_dataset, val_dataset, test_dataset = random_split(\n dataset, [train_size, val_size, len(dataset) - train_size - val_size]\n )\n\n train_loader = DataLoader(\n train_dataset,\n batch_sampler=ReviewsSampler(train_dataset, batch_size=batch_size),\n collate_fn=collate_fn,\n num_workers=n_workers,\n pin_memory=True,\n )\n val_loader = DataLoader(\n val_dataset,\n batch_sampler=ReviewsSampler(val_dataset, batch_size=batch_size),\n collate_fn=collate_fn,\n num_workers=n_workers,\n pin_memory=True,\n )\n test_loader = DataLoader(\n test_dataset,\n batch_sampler=ReviewsSampler(test_dataset, batch_size=batch_size),\n collate_fn=collate_fn,\n num_workers=n_workers,\n pin_memory=True,\n )\n loaders_dict = {\n \"df\": df,\n \"train_dataset\": train_dataset,\n \"val_dataset\": val_dataset,\n \"test_dataset\": test_dataset,\n \"train_loader\": train_loader,\n \"val_loader\": val_loader,\n \"test_loader\": test_loader,\n }\n return loaders_dict",
"_____no_output_____"
],
[
"loaders_dict = configure_dataloaders(batch_size=3)\nloaders_dict\nbatch = next(iter(loaders_dict[\"train_loader\"]))",
"Global seed set to 16\n"
],
[
"batch",
"_____no_output_____"
],
[
"test_dataset = loaders_dict[\"test_dataset\"]\norig_df = loaders_dict[\"df\"]",
"_____no_output_____"
],
[
"def get_label(prob):\n if prob > 0.5:\n label = \"positive\"\n else:\n label = \"negative\"\n return label",
"_____no_output_____"
],
[
"def show_dataset(test_dataset, orig_df, n_samples, f_get_pred_prob=None):\n for _ in range(n_samples):\n ind = random.randrange(len(test_dataset))\n input_data_dict = test_dataset[ind]\n\n df_index = input_data_dict[\"df_index\"]\n input_sentence = orig_df.iloc[df_index][0]\n if f_get_pred_prob is not None:\n pred_prob = f_get_pred_prob(input_data_dict)\n pred_label = get_label(pred_prob)\n prefix = f\"Pred. label: {pred_label}\\n\"\n else:\n prefix = \"\"\n\n true_label = get_label(input_data_dict[\"label\"])\n print(f\"{prefix}True label: {true_label}\\n\\t [{input_sentence}]\\n\")",
"_____no_output_____"
],
[
"show_dataset(test_dataset, orig_df, 5)",
"True label: positive\n\t [fudges fact and fancy with such confidence that we feel as if we 're seeing something purer than the real thing]\n\nTrue label: positive\n\t [lrb d rrb espite its familiar subject matter , ice age is consistently amusing and engrossing]\n\nTrue label: negative\n\t [adam sandler 's heart may be in the right place , but he needs to pull his head out of his butt]\n\nTrue label: negative\n\t [what jackson has done is proven that no amount of imagination , no creature , no fantasy story and no incredibly outlandish scenery]\n\nTrue label: positive\n\t [it is scott 's convincing portrayal of roger the sad cad that really gives the film its oomph]\n\n"
],
[
"N_CLASSES = 1\nIS_CUDA = torch.cuda.is_available()\nGPU_PER_TRIAL = 0.3 * IS_CUDA",
"_____no_output_____"
],
[
"CONFIG = {\n \"classifier_lr\": 1e-3, # Initial learning rate\n \"rest_lr\": 3e-5, # Initial learning rate\n \"warmup\": 500, # For LinearSchedulerWihtWarmup\n \"gradient_clip_val\": 0,\n \"max_epochs\": 6, # the actual number can be less due to early stopping\n \"batch_size\": 32,\n \"n_classes\": N_CLASSES,\n \"classifier_dropout\": 0.2,\n \"weight_decay\": 0.01,\n \"no_weight_decay_param_names\": [\"bias\", \"LayerNorm.weight\"],\n}\n\nTUNE_CONFIG = {\n \"seed\": SEED, # just remove this if you do not want determenistic behavior\n \"metric_to_optimize\": \"val_f1_epoch\", # Ray + PTL Trainer\n \"ray_metrics_to_show\": [\n \"val_loss_epoch\",\n \"val_f1_epoch\",\n \"val_acc_epoch\",\n ], # for Ray Tune\n \"metric_opt_mode\": \"max\", # Ray + PTL Trainer\n \"cpu_per_trial\": 3, # Ray + DataLoaders\n \"gpu_per_trial\": GPU_PER_TRIAL, # for Ray Tune\n \"n_checkpoints_to_keep\": 1, # for Ray Tune\n \"grace_period\": 6, # for both PTL Trainer and Ray Tune scheduler\n \"epoch_upper_limit\": 45, # for Ray Tune\n \"n_samples\": 3, # for Ray Tune\n \"ptl_early_stopping_patience\": 7, # for PTL Trainer\n \"ptl_precision\": 32, # or 16, for PTL Trainer\n \"train_loader_name\": \"train_loader\",\n \"val_loader_name\": \"val_loader\",\n \"test_loader_name\": \"test_loader\",\n \"batch_size_main\": CONFIG[\n \"batch_size\"\n ], # batch size for revalidation and test phases\n # that run in the main process after all Ray Tune child processes are finished\n \"gpus\": -1 * IS_CUDA, # -1 - use GPU if available, 0 - use CPU, 1 - use single GPU,\n # >=2 - use multiple GPUs\n}\n\nif FAST_DEV_RUN:\n CONFIG[\"max_epochs\"] = 2\n TUNE_CONFIG[\"n_samples\"] = 2",
"_____no_output_____"
],
[
"class BertClassifier(AAutoClsHeadClassificationTaskWDAdamWWarmup):\n def __init__(self, hparams):\n model = ppb.DistilBertForSequenceClassification.from_pretrained(\n \"distilbert-base-uncased\"\n )\n criterion = nn.BCEWithLogitsLoss()\n super().__init__(hparams, model, criterion)\n\n def _forward_batch(self, batch):\n inputs = batch[\"inputs\"]\n attention_mask = batch[\"attention_mask\"]\n return self(inputs, attention_mask=attention_mask)[\"logits\"].squeeze(1)\n\n @staticmethod\n def _get_target_from_batch(batch):\n return batch[\"labels\"]\n\n def _get_classifier_module_name(self):\n return \"classifier\"",
"_____no_output_____"
],
[
"runner = pth.Runner(\n configure_dataloaders,\n is_debug=FAST_DEV_RUN,\n experiment_id=EXPERIMENT_ID,\n log2wandb=False,\n)",
"_____no_output_____"
],
[
"best_result = runner.run_single_trial(BertClassifier, CONFIG, TUNE_CONFIG)",
"GPU available: True, used: True\nTPU available: None, using: 0 TPU cores\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\nGlobal seed set to 16\nGlobal seed set to 16\nSome weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias']\n- This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['pre_classifier.weight', 'pre_classifier.bias', 'classifier.weight', 'classifier.bias']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\nGlobal seed set to 16\n\n | Name | Type | Params\n--------------------------------------------------------------------------\n0 | model | DistilBertForSequenceClassification | 67.5 M\n1 | criterion | BCEWithLogitsLoss | 0 \n2 | train_metric_calc | LitMetricsCalc | 0 \n3 | val_metric_calc | LitMetricsCalc | 0 \n4 | test_metric_calc | LitMetricsCalc | 0 \n--------------------------------------------------------------------------\n67.5 M Trainable params\n0 Non-trainable params\n67.5 M Total params\n"
],
[
"runner.get_metrics()[\"run_x_last_metric_df\"]",
"_____no_output_____"
],
[
"lmodule = best_result[\"lmodule_best\"].eval()",
"_____no_output_____"
],
[
"def get_pred_prob(input_data_dict):\n input_tensor = torch.tensor(\n input_data_dict[\"tokenized\"], device=lmodule.device\n ).unsqueeze(0)\n with torch.no_grad():\n pred_prob = F.sigmoid(lmodule(input_tensor)[\"logits\"]).item()\n return pred_prob",
"_____no_output_____"
],
[
"show_dataset(test_dataset, orig_df, 10, get_pred_prob)",
"Pred. label: positive\nTrue label: positive\n\t [fudges fact and fancy with such confidence that we feel as if we 're seeing something purer than the real thing]\n\nPred. label: positive\nTrue label: positive\n\t [lrb d rrb espite its familiar subject matter , ice age is consistently amusing and engrossing]\n\nPred. label: negative\nTrue label: negative\n\t [adam sandler 's heart may be in the right place , but he needs to pull his head out of his butt]\n\nPred. label: negative\nTrue label: negative\n\t [what jackson has done is proven that no amount of imagination , no creature , no fantasy story and no incredibly outlandish scenery]\n\nPred. label: positive\nTrue label: positive\n\t [it is scott 's convincing portrayal of roger the sad cad that really gives the film its oomph]\n\nPred. label: positive\nTrue label: negative\n\t [more successful at relating history than in creating an emotionally complex , dramatically satisfying heroine]\n\nPred. label: positive\nTrue label: positive\n\t [a treat for its depiction on not giving up on dreams when you 're a struggling nobody]\n\nPred. label: negative\nTrue label: negative\n\t [a loud , witless mess that has none of the charm and little of the intrigue from the tv series]\n\nPred. label: positive\nTrue label: positive\n\t [turturro is fabulously funny and over the top as a ` very sneaky ' butler who excels in the art of impossible disappearing reappearing acts]\n\nPred. label: positive\nTrue label: positive\n\t [the stunt work is top notch the dialogue and drama often food spittingly funny]\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f3a42f6154e5a8de575d3dcdee27abd8c3cc24 | 3,395 | ipynb | Jupyter Notebook | code/experiments/loss.ipynb | juvian/Manga-Text-Segmentation | de8f148c78978d70ad0e0ae3242566da6d70f3a5 | [
"MIT"
] | 37 | 2020-09-10T03:15:51.000Z | 2022-03-24T06:11:21.000Z | code/experiments/loss.ipynb | KUR-creative/Manga-Text-Segmentation | de8f148c78978d70ad0e0ae3242566da6d70f3a5 | [
"MIT"
] | 2 | 2020-10-17T16:13:19.000Z | 2021-02-06T22:52:02.000Z | code/experiments/loss.ipynb | juvian/Manga-Text-Segmentation | de8f148c78978d70ad0e0ae3242566da6d70f3a5 | [
"MIT"
] | 6 | 2021-02-27T23:13:46.000Z | 2022-03-13T06:07:46.000Z | 31.146789 | 154 | 0.545803 | [
[
[
"from fastai.vision import unet_learner, imagenet_stats, torch, Path, os, load_learner, models\nfrom experiments import getDatasets, getData, random_seed\nfrom losses import BCELoss, MixedLoss\nfrom metrics import MetricsCallback, getDatasetMetrics\nfrom fastai.callbacks import CSVLogger\nfrom config import *\n\n%load_ext autoreload\n%autoreload 2\n\ntorch.cuda.set_device(0)",
"_____no_output_____"
],
[
"EXPERIMENT_PATH = Path(EXPERIMENTS_PATH) / 'loss'\nMODELS_PATH = EXPERIMENT_PATH / \"models\"\nos.makedirs(MODELS_PATH, exist_ok=True)",
"_____no_output_____"
],
[
"allData = getData()",
"_____no_output_____"
],
[
"props = {'bs': 4, 'val_bs': 2, 'num_workers': 0}\nlosses = {'bce0.5': BCELoss(0.5), 'bce1': BCELoss(1), 'bce5': BCELoss(5), 'bce10': BCELoss(10), 'bce30': BCELoss(30), \n 'mixed_10_2': MixedLoss(10.0, 2.0), 'mixed_10_1': MixedLoss(10.0, 1.0),\n 'mixed_5_2': MixedLoss(5.0, 2.0), 'mixed_5_1': MixedLoss(5.0, 1.0),\n 'mixed_5_2': MixedLoss(2.0, 2.0), 'mixed_5_1': MixedLoss(2.0, 1.0),\n 'dice': MixedLoss(0.0, 1.0)\n }",
"_____no_output_____"
],
[
"for name, loss in losses.items():\n for index, dataset in enumerate(getDatasets(allData)):\n PATH = EXPERIMENT_PATH / name / str(index)\n if not (PATH / 'final model.pkl').exists():\n random_seed(42)\n data = dataset.databunch(**props).normalize(imagenet_stats)\n random_seed(42)\n learn = unet_learner(data, models.resnet18, callback_fns=[MetricsCallback, CSVLogger], model_dir='models', loss_func=loss, path=PATH)\n random_seed(42)\n learn.fit_one_cycle(10, 1e-4)\n learn.save('model')\n learn.export(file='final model.pkl')\n for index, dataset in enumerate(getDatasets(allData, crop=False, cutInHalf=False)): \n PATH = EXPERIMENT_PATH / name / str(index)\n if not (PATH / 'final predictions.csv').exists():\n learn = load_learner(PATH, 'final model.pkl')\n random_seed(42)\n m = getDatasetMetrics(dataset, learn)\n m.save(PATH / 'final predictions.csv')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7f3c5c5047b26461b0a3ff0656cf1c4e6087441 | 9,363 | ipynb | Jupyter Notebook | SceneClassification2017/1. Preprocess-KerasFolderClasses-Test_a.ipynb | StudyExchange/AIChallenger | e06cbccd6762bc4f7438cd8bdf2ca1fa54ab03ff | [
"MIT"
] | 1 | 2017-12-20T05:47:40.000Z | 2017-12-20T05:47:40.000Z | SceneClassification2017/1. Preprocess-KerasFolderClasses-Test_a.ipynb | StudyExchange/AIChallenger | e06cbccd6762bc4f7438cd8bdf2ca1fa54ab03ff | [
"MIT"
] | null | null | null | SceneClassification2017/1. Preprocess-KerasFolderClasses-Test_a.ipynb | StudyExchange/AIChallenger | e06cbccd6762bc4f7438cd8bdf2ca1fa54ab03ff | [
"MIT"
] | null | null | null | 24.835544 | 169 | 0.515967 | [
[
[
"# Scene Classification-Test_a\n## 1. Preprocess-KerasFolderClasses\n- Import pkg\n- Extract zip file\n- Preview \"scene_classes.csv\"\n- Preview \"scene_{0}_annotations_20170922.json\"\n- Test the image and pickle function\n- Split data into serval pickle file",
"_____no_output_____"
],
[
"This part need jupyter notebook start with \"jupyter notebook --NotebookApp.iopub_data_rate_limit=1000000000\" (https://github.com/jupyter/notebook/issues/2287)\n\nReference:\n- https://challenger.ai/competitions\n- https://github.com/jupyter/notebook/issues/2287",
"_____no_output_____"
],
[
"### Import pkg",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n# import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport seaborn as sns\n%matplotlib inline\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix",
"_____no_output_____"
],
[
"from keras.utils.np_utils import to_categorical # convert to one-hot-encoding\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import LearningRateScheduler, TensorBoard",
"Using TensorFlow backend.\n"
],
[
"# import zipfile\nimport os\nimport zipfile\nimport math\nfrom time import time\nfrom IPython.display import display\nimport pdb\nimport json\nfrom PIL import Image\nimport glob\nimport pickle",
"_____no_output_____"
]
],
[
[
"### Extract zip file",
"_____no_output_____"
]
],
[
[
"input_path = 'input'\ndatasetName = 'test_a'\ndate = '20170922'\n\ndatasetFolder = input_path + '\\\\data_{0}'.format(datasetName)\nzip_path = input_path + '\\\\ai_challenger_scene_{0}_{1}.zip'.format(datasetName, date)\nextract_path = input_path + '\\\\ai_challenger_scene_{0}_{1}'.format(datasetName, date)\nimage_path = extract_path + '\\\\scene_{0}_images_{1}'.format(datasetName, date)\nscene_classes_path = extract_path + '\\\\scene_classes.csv'\nscene_annotations_path = extract_path + '\\\\scene_{0}_annotations_{1}.json'.format(datasetName, date)\n\nprint(input_path)\nprint(datasetFolder)\nprint(zip_path)\nprint(extract_path)\nprint(image_path)\nprint(scene_classes_path)\nprint(scene_annotations_path)",
"input\ninput\\data_test_a\ninput\\ai_challenger_scene_test_a_20170922.zip\ninput\\ai_challenger_scene_test_a_20170922\ninput\\ai_challenger_scene_test_a_20170922\\scene_test_a_images_20170922\ninput\\ai_challenger_scene_test_a_20170922\\scene_classes.csv\ninput\\ai_challenger_scene_test_a_20170922\\scene_test_a_annotations_20170922.json\n"
],
[
"if not os.path.isdir(extract_path):\n with zipfile.ZipFile(zip_path) as file:\n for name in file.namelist():\n file.extract(name, input_path)",
"_____no_output_____"
]
],
[
[
"### Preview \"scene_classes.csv\"",
"_____no_output_____"
]
],
[
[
"scene_classes = pd.read_csv(scene_classes_path, header=None)\ndisplay(scene_classes.head())",
"_____no_output_____"
],
[
"def get_scene_name(lable_number, scene_classes_path):\n scene_classes = pd.read_csv(scene_classes_path, header=None)\n return scene_classes.loc[lable_number, 2]\nprint(get_scene_name(0, scene_classes_path))",
"airport_terminal\n"
]
],
[
[
"### Copy images to ./input/data_test_a/test",
"_____no_output_____"
]
],
[
[
"from shutil import copy2",
"_____no_output_____"
],
[
"cwd = os.getcwd()\ntest_folder = os.path.join(cwd, datasetFolder)\ntest_sub_folder = os.path.join(test_folder, 'test')\nif not os.path.isdir(test_folder):\n os.mkdir(test_folder)\n os.mkdir(test_sub_folder)\nprint(test_folder)\nprint(test_sub_folder)",
"E:\\AIChallenger\\SceneClassification2017\\input\\data_test_a\nE:\\AIChallenger\\SceneClassification2017\\input\\data_test_a\\test\n"
],
[
"trainDir = test_sub_folder\nfor image_id in os.listdir(os.path.join(cwd, image_path)):\n fileName = image_path + '/' + image_id\n# print(fileName)\n# print(trainDir)\n copy2(fileName, trainDir)",
"_____no_output_____"
],
[
"print('Done!')",
"Done!\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f3db796ca8c15a24a78de58afc54d7e8c091be | 9,591 | ipynb | Jupyter Notebook | nbs/30_traceable_edit_in_flask.ipynb | raynardj/forgebox | 4ed057ecb1fd0e1b062e6ffb64cf2e4279cbf3ac | [
"MIT"
] | 3 | 2020-02-05T08:55:22.000Z | 2021-12-24T06:42:29.000Z | nbs/30_traceable_edit_in_flask.ipynb | raynardj/forgebox | 4ed057ecb1fd0e1b062e6ffb64cf2e4279cbf3ac | [
"MIT"
] | 8 | 2020-08-25T10:28:53.000Z | 2021-08-03T09:43:11.000Z | nbs/30_traceable_edit_in_flask.ipynb | raynardj/forgebox | 4ed057ecb1fd0e1b062e6ffb64cf2e4279cbf3ac | [
"MIT"
] | null | null | null | 29.601852 | 102 | 0.472839 | [
[
[
"# 01 A logged editable table\n> Traceable editable table in flask",
"_____no_output_____"
]
],
[
[
"from flask import Flask\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_world():\n return 'Hello, World!'",
"_____no_output_____"
]
],
[
[
"## Run a simple applitcation",
"_____no_output_____"
]
],
[
[
"# default_exp editable",
"_____no_output_____"
],
[
"# export\nimport pandas as pd\nfrom datetime import datetime\nimport json\nfrom sqlalchemy import create_engine as ce\nfrom sqlalchemy import text\nfrom jinja2 import Template",
"_____no_output_____"
],
[
"# export\nfrom pathlib import Path\ndef get_static():\n import forgebox\n return Path(forgebox.__path__[0])/\"static\"",
"_____no_output_____"
],
[
"# export\ndef edit_js():\n with open(get_static()/\"edit.js\",\"r\") as f:\n return f\"<script>{f.read()}</script>\"\n\n\nclass DefaultTemp(Template):\n \"\"\"\n Jinjia template with some default render config\n \"\"\"\n def render(self,dt):\n dt.update(dict(type=type,now = datetime.now()))\n return super().render(dt)",
"_____no_output_____"
]
],
[
[
"## Create sample data",
"_____no_output_____"
]
],
[
[
"con = ce(\"sqlite:///sample.db\")\n\nsample_df = pd.DataFrame(dict(name=[\"Darrow\",\"Virginia\",\"Sevro\",]*20,\n house =[\"Andromedus\",\"Augustus\",\"Barca\"]*20,\n age=[20,18,17]*20))\n\nsample_df.to_sql(\"sample_table\",index_label=\"id\",\n index=True,\n con = con, method='multi',\n if_exists=\"replace\")",
"_____no_output_____"
],
[
"# export\nfrom flask import request\nfrom flask import g\nfrom datetime import datetime\n\nclass Editable:\n def __init__(self,name,app,table_name,con,id_col,\n log_con,log_table=\"editable_log\",columns = None):\n \"\"\"\n name: route name for url path, \n also it will be the task title appearning on the frontend\n app:flask app\n table_name: table to edit\n con:sqlachemy connnection, created by : con = sqlalchemy.create_engine\n id_col: a column with unique value\n log_con:sqlachemy connnection, for storaging change log\n \"\"\"\n self.name = name\n self.app = app\n self.table_name = table_name\n self.con = con\n self.log_con = log_con\n self.columns = \",\".join(columns) if columns!=None else \"*\"\n self.id_col = id_col\n \n self.t_workspace = self.load_temp(get_static()/\"workspace.html\")\n self.t_table = self.load_temp(get_static()/\"table.html\")\n self.assign()\n \n def assign(self): \n self.app.route(f\"/{self.name}\")(self.workspace)\n self.app.route(f\"/{self.name}/df_api\")(self.read_df)\n self.app.route(f\"/{self.name}/save_api\",\n methods=[\"POST\"])(self.save_data)\n\n def workspace(self):\n return self.t_workspace.render(dict(title=self.name,\n pk=self.id_col,\n edit_js = edit_js()))\n\n def save_data(self):\n data = json.loads(request.data)\n # update change and save log\n changes = data[\"changes\"]\n log_df = pd.DataFrame(list(self.single_row(change) for change in changes))\n \n log_df[\"idx\"] = log_df.idx.apply(str)\n log_df[\"original\"] = log_df.original.apply(str)\n log_df[\"changed\"] = log_df.changed.apply(str)\n log_df.to_sql(f\"editable_log\",con = self.log_con,index=False, if_exists=\"append\")\n \n print(log_df)\n # return updated table\n query = data[\"query\"]\n page = query[\"page\"]\n where = query[\"where\"]\n return self.data_table(page,where)\n \n def settype(self,k):\n if k[:3] == \"int\": return int\n elif \"float\" in k: return float\n elif k==\"str\":return str\n elif k==\"list\":return list\n elif k==\"dict\":return dict\n else: return eval(k)\n \n def single_row(self,row):\n row[\"ip\"]= request.remote_addr\n row[\"table_name\"] = self.table_name\n row[\"ts\"] = datetime.now() \n if row[\"original\"]==row[\"changed\"]: \n row['sql'] = \"\"\n return row\n else:\n col = row[\"col\"]\n val = row[\"changed\"] \n val = f\"'{val}'\" if 'str' in row[\"valtype\"] else val\n idx = row[\"idx\"]\n idx = f\"'{idx}'\" if type(idx)==str else idx\n set_clause = f\"SET {col}={val}\"\n sql = f\"\"\"UPDATE {self.table_name} \n {set_clause} WHERE {self.id_col}={idx}\n \"\"\"\n row['sql'] = sql\n self.con.execute(sql)\n return row\n \n def read_df(self):\n page = request.args.get('page')\n where = request.args.get('where')\n return self.data_table(page,where)\n \n def data_table(self,page,where):\n where_clause = \"\" if where.strip() == \"\" else f\"WHERE {where} \"\n sql = f\"\"\"SELECT {self.columns} FROM {self.table_name} {where_clause}\n ORDER BY {self.id_col} ASC LIMIT {page},20\n \"\"\"\n print(sql)\n df = pd.read_sql(sql,self.con)\n df = df.set_index(self.id_col)\n return self.t_table.render(dict(df = df))\n \n def load_temp(self,path):\n with open(path, \"r\") as f:\n return DefaultTemp(f.read())",
"_____no_output_____"
]
],
[
[
"## Testing editable frontend",
"_____no_output_____"
]
],
[
[
"app = Flask(__name__)\n\n# Create Editable pages around sample_table\nEditable(\"table1\", # route/task name\n app, # flask app to wrap around\n table_name=\"sample_table\", # target table name\n id_col=\"id\", # unique column\n con = con,\n log_con=con\n )\n\napp.run(host=\"0.0.0.0\",port = 4242,debug=False)",
"_____no_output_____"
]
],
[
[
"### Retrieve the log",
"_____no_output_____"
]
],
[
[
"from forgebox.df import PandasDisplay",
"_____no_output_____"
],
[
"with PandasDisplay(max_colwidth = 0,max_rows=100):\n display(pd.read_sql('editable_log',con = con))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f3feb8cab073e088dcd25b74a5ca8d5708bedf | 100,396 | ipynb | Jupyter Notebook | SageMaker Project.ipynb | praveenbandaru/Sentiment-Analysis-Web-App | 338c072ee2bf83ad4de8e9129871b95b39c04efd | [
"MIT"
] | null | null | null | SageMaker Project.ipynb | praveenbandaru/Sentiment-Analysis-Web-App | 338c072ee2bf83ad4de8e9129871b95b39c04efd | [
"MIT"
] | null | null | null | SageMaker Project.ipynb | praveenbandaru/Sentiment-Analysis-Web-App | 338c072ee2bf83ad4de8e9129871b95b39c04efd | [
"MIT"
] | null | null | null | 50.807692 | 1,155 | 0.596259 | [
[
[
"# Creating a Sentiment Analysis Web App\n## Using PyTorch and SageMaker\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nNow that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.\n\n## Instructions\n\nSome template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.\n\n> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.\n\n## General Outline\n\nRecall the general outline for SageMaker projects using a notebook instance.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nFor this project, you will be following the steps in the general outline with some modifications. \n\nFirst, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.\n\nIn addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.",
"_____no_output_____"
],
[
"## Step 1: Downloading the data\n\nAs in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)\n\n> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.",
"_____no_output_____"
]
],
[
[
"%mkdir ../data\n!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data",
"mkdir: cannot create directory ‘../data’: File exists\n--2019-12-22 07:15:17-- http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\nResolving ai.stanford.edu (ai.stanford.edu)... 171.64.68.10\nConnecting to ai.stanford.edu (ai.stanford.edu)|171.64.68.10|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84125825 (80M) [application/x-gzip]\nSaving to: ‘../data/aclImdb_v1.tar.gz’\n\n../data/aclImdb_v1. 100%[===================>] 80.23M 22.1MB/s in 4.7s \n\n2019-12-22 07:15:22 (17.1 MB/s) - ‘../data/aclImdb_v1.tar.gz’ saved [84125825/84125825]\n\n"
]
],
[
[
"## Step 2: Preparing and Processing the data\n\nAlso, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.",
"_____no_output_____"
]
],
[
[
"import os\nimport glob\n\ndef read_imdb_data(data_dir='../data/aclImdb'):\n data = {}\n labels = {}\n \n for data_type in ['train', 'test']:\n data[data_type] = {}\n labels[data_type] = {}\n \n for sentiment in ['pos', 'neg']:\n data[data_type][sentiment] = []\n labels[data_type][sentiment] = []\n \n path = os.path.join(data_dir, data_type, sentiment, '*.txt')\n files = glob.glob(path)\n \n for f in files:\n with open(f) as review:\n data[data_type][sentiment].append(review.read())\n # Here we represent a positive review by '1' and a negative review by '0'\n labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)\n \n assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \\\n \"{}/{} data size does not match labels size\".format(data_type, sentiment)\n \n return data, labels",
"_____no_output_____"
],
[
"data, labels = read_imdb_data()\nprint(\"IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg\".format(\n len(data['train']['pos']), len(data['train']['neg']),\n len(data['test']['pos']), len(data['test']['neg'])))",
"IMDB reviews: train = 12500 pos / 12500 neg, test = 12500 pos / 12500 neg\n"
]
],
[
[
"Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.",
"_____no_output_____"
]
],
[
[
"from sklearn.utils import shuffle\n\ndef prepare_imdb_data(data, labels):\n \"\"\"Prepare training and test sets from IMDb movie reviews.\"\"\"\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test",
"_____no_output_____"
],
[
"train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)\nprint(\"IMDb reviews (combined): train = {}, test = {}\".format(len(train_X), len(test_X)))",
"IMDb reviews (combined): train = 25000, test = 25000\n"
]
],
[
[
"Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.",
"_____no_output_____"
]
],
[
[
"print(train_X[100])\nprint(train_y[100])",
"I was not expecting much going in to this, but still came away disappointed. This was my least favorite Halestorm production I have seen. I thought it was supposed to be a comedy, but I only snickered at 3 or 4 jokes. Is it really a funny gag to see a fat guy eating donuts and falling down over and over? What was up with the janitor in Heaven scene? Fred Willard has been hilarious with some of his Christopher Guest collaborations, but this did not work. They must have spent all the budget on getting \"known\" actors to appear in this because there was no lighting budget. It looked like it was filmed with a video camera and most scenes were very dark. Does it really take that much film to show someone actually shoot and make a basket, as opposed to cutting away and editing a ball swishing through a basket? I try not to be too critical of low budget comedies, but if you want to see something funny go to a real Church basketball game instead of this movie.\n0\n"
]
],
[
[
"The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.",
"_____no_output_____"
]
],
[
[
"import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\n\nimport re\nfrom bs4 import BeautifulSoup\n\ndef review_to_words(review):\n nltk.download(\"stopwords\", quiet=True)\n stemmer = PorterStemmer()\n \n text = BeautifulSoup(review, \"html.parser\").get_text() # Remove HTML tags\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) # Convert to lower case\n words = text.split() # Split string into words\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n \n return words",
"_____no_output_____"
]
],
[
[
"The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.",
"_____no_output_____"
]
],
[
[
"# TODO: Apply review_to_words to a review (train_X[100] or any other review)\nreview_to_words(train_X[100])",
"_____no_output_____"
]
],
[
[
"**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?",
"_____no_output_____"
],
[
"**Answer:** The `review_to_words` method removes any non-alpha numeric characters that may appear in the input and converts it to lower case. It also splits the string into words and removes all the stop words.",
"_____no_output_____"
],
[
"The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.",
"_____no_output_____"
]
],
[
[
"import pickle\n\ncache_dir = os.path.join(\"../cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\ndef preprocess_data(data_train, data_test, labels_train, labels_test,\n cache_dir=cache_dir, cache_file=\"preprocessed_data.pkl\"):\n \"\"\"Convert each review to words; read from cache if available.\"\"\"\n\n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = pickle.load(f)\n print(\"Read preprocessed data from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Preprocess training and test data to obtain words for each review\n #words_train = list(map(review_to_words, data_train))\n #words_test = list(map(review_to_words, data_test))\n words_train = [review_to_words(review) for review in data_train]\n words_test = [review_to_words(review) for review in data_test]\n \n # Write to cache file for future runs\n if cache_file is not None:\n cache_data = dict(words_train=words_train, words_test=words_test,\n labels_train=labels_train, labels_test=labels_test)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n pickle.dump(cache_data, f)\n print(\"Wrote preprocessed data to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n words_train, words_test, labels_train, labels_test = (cache_data['words_train'],\n cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])\n \n return words_train, words_test, labels_train, labels_test",
"_____no_output_____"
],
[
"# Preprocess data\ntrain_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)",
"Read preprocessed data from cache file: preprocessed_data.pkl\n"
]
],
[
[
"## Transform the data\n\nIn the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.\n\nSince we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.",
"_____no_output_____"
],
[
"### (TODO) Create a word dictionary\n\nTo begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.\n\n> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndef build_dict(data, vocab_size = 5000):\n \"\"\"Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.\"\"\"\n \n # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a\n # sentence is a list of words.\n \n word_count = {} # A dict storing the words that appear in the reviews along with how often they occur\n for sentence in data: \n for word in sentence:\n if (word in word_count): \n word_count[word] += 1\n else: \n word_count[word] = 1\n \n # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and\n # sorted_words[-1] is the least frequently appearing word.\n sorted_words = [item[0] for item in sorted(word_count.items(), key=lambda x: x[1], reverse=True)]\n \n word_dict = {} # This is what we are building, a dictionary that translates words into integers\n for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'\n word_dict[word] = idx + 2 # 'infrequent' labels\n \n return word_dict",
"_____no_output_____"
],
[
"word_dict = build_dict(train_X)",
"_____no_output_____"
]
],
[
[
"**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?",
"_____no_output_____"
],
[
"**Answer:**\nThe five most frequently appearing words in the training set are ['movi', 'film', 'one', 'like', 'time'].\nYes it makes sense that these words appear frequently in the training set, as they are often used in reviews.",
"_____no_output_____"
]
],
[
[
"# TODO: Use this space to determine the five most frequently appearing words in the training set.\nlist(word_dict.keys())[:5]",
"_____no_output_____"
]
],
[
[
"### Save `word_dict`\n\nLater on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.",
"_____no_output_____"
]
],
[
[
"data_dir = '../data/pytorch' # The folder we will use for storing data\nif not os.path.exists(data_dir): # Make sure that the folder exists\n os.makedirs(data_dir)",
"_____no_output_____"
],
[
"with open(os.path.join(data_dir, 'word_dict.pkl'), \"wb\") as f:\n pickle.dump(word_dict, f)",
"_____no_output_____"
]
],
[
[
"### Transform the reviews\n\nNow that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.",
"_____no_output_____"
]
],
[
[
"def convert_and_pad(word_dict, sentence, pad=500):\n NOWORD = 0 # We will use 0 to represent the 'no word' category\n INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict\n \n working_sentence = [NOWORD] * pad\n \n for word_index, word in enumerate(sentence[:pad]):\n if word in word_dict:\n working_sentence[word_index] = word_dict[word]\n else:\n working_sentence[word_index] = INFREQ\n \n return working_sentence, min(len(sentence), pad)\n\ndef convert_and_pad_data(word_dict, data, pad=500):\n result = []\n lengths = []\n \n for sentence in data:\n converted, leng = convert_and_pad(word_dict, sentence, pad)\n result.append(converted)\n lengths.append(leng)\n \n return np.array(result), np.array(lengths)",
"_____no_output_____"
],
[
"train_X, train_X_len = convert_and_pad_data(word_dict, train_X)\ntest_X, test_X_len = convert_and_pad_data(word_dict, test_X)",
"_____no_output_____"
]
],
[
[
"As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?",
"_____no_output_____"
]
],
[
[
"# Use this cell to examine one of the processed reviews to make sure everything is working as intended.\nprint(train_X[100])\nprint('Length of train_X[100]: {}'.format(len(train_X[100])))",
"[ 443 64 630 1167 254 153 1816 2 174 2 56 47 4 24\n 84 4 1968 219 24 62 2 14 4 2042 2 1685 60 43\n 787 150 13 3518 1902 315 1486 224 6 47 858 2 244 1\n 736 315 13 133 685 2 3643 1 537 68 3707 8 329 267\n 747 201 121 12 3086 1079 779 181 660 3 1311 2 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\nLength of train_X[100]: 500\n"
]
],
[
[
"**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?",
"_____no_output_____"
],
[
"**Answer:**\nI don't see any problem with the `preprocess_data` method as it cleanses and convert the data to a list of words. The `convert_and_pad_data` can be problematic as it will limit the review to 500 words and also strips out all the infrequent words which are not present in the word dictionary which we generated just based on the training set. The word dictionary contains just 4998 words and may not reflect everything that appears in the test dataset.",
"_____no_output_____"
],
[
"## Step 3: Upload the data to S3\n\nAs in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.\n\n### Save the processed training dataset locally\n\nIt is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n \npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \\\n .to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Uploading the training data\n\n\nNext, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.",
"_____no_output_____"
]
],
[
[
"import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nbucket = sagemaker_session.default_bucket()\nprefix = 'sagemaker/sentiment_rnn'\n\nrole = sagemaker.get_execution_role()",
"_____no_output_____"
],
[
"input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.",
"_____no_output_____"
],
[
"## Step 4: Build and Train the PyTorch Model\n\nIn the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects\n\n - Model Artifacts,\n - Training Code, and\n - Inference Code,\n \neach of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.\n\nWe will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.",
"_____no_output_____"
]
],
[
[
"!pygmentize train/model.py",
"\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.nn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\r\n\r\n\u001b[34mclass\u001b[39;49;00m \u001b[04m\u001b[32mLSTMClassifier\u001b[39;49;00m(nn.Module):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m This is the simple RNN model we will be using to perform Sentiment Analysis.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n\r\n \u001b[34mdef\u001b[39;49;00m \u001b[32m__init__\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, embedding_dim, hidden_dim, vocab_size):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m Initialize the model by settingg up the various layers.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n \u001b[36msuper\u001b[39;49;00m(LSTMClassifier, \u001b[36mself\u001b[39;49;00m).\u001b[32m__init__\u001b[39;49;00m()\r\n\r\n \u001b[36mself\u001b[39;49;00m.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=\u001b[34m0\u001b[39;49;00m)\r\n \u001b[36mself\u001b[39;49;00m.lstm = nn.LSTM(embedding_dim, hidden_dim)\r\n \u001b[36mself\u001b[39;49;00m.dense = nn.Linear(in_features=hidden_dim, out_features=\u001b[34m1\u001b[39;49;00m)\r\n \u001b[36mself\u001b[39;49;00m.sig = nn.Sigmoid()\r\n \r\n \u001b[36mself\u001b[39;49;00m.word_dict = \u001b[36mNone\u001b[39;49;00m\r\n\r\n \u001b[34mdef\u001b[39;49;00m \u001b[32mforward\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, x):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m Perform a forward pass of our model on some input.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n x = x.t()\r\n lengths = x[\u001b[34m0\u001b[39;49;00m,:]\r\n reviews = x[\u001b[34m1\u001b[39;49;00m:,:]\r\n embeds = \u001b[36mself\u001b[39;49;00m.embedding(reviews)\r\n lstm_out, _ = \u001b[36mself\u001b[39;49;00m.lstm(embeds)\r\n out = \u001b[36mself\u001b[39;49;00m.dense(lstm_out)\r\n out = out[lengths - \u001b[34m1\u001b[39;49;00m, \u001b[36mrange\u001b[39;49;00m(\u001b[36mlen\u001b[39;49;00m(lengths))]\r\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mself\u001b[39;49;00m.sig(out.squeeze())\r\n"
]
],
[
[
"The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.\n\nFirst we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.utils.data\n\n# Read in only the first 250 rows\ntrain_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)\n\n# Turn the input pandas dataframe into tensors\ntrain_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()\ntrain_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()\n\n# Build the dataset\ntrain_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)\n# Build the dataloader\ntrain_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)",
"_____no_output_____"
],
[
"train_sample_X[100]",
"_____no_output_____"
]
],
[
[
"### (TODO) Writing the training method\n\nNext we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.",
"_____no_output_____"
]
],
[
[
"def train(model, train_loader, epochs, optimizer, loss_fn, device):\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n # Sets model to TRAIN mode\n model.train()\n # Makes predictions\n yhat = model(batch_X)\n # Computes loss\n loss = loss_fn(yhat, batch_y)\n # Computes gradients\n loss.backward()\n # Updates parameters and zeroes gradients\n optimizer.step()\n optimizer.zero_grad()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))",
"_____no_output_____"
]
],
[
[
"Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.",
"_____no_output_____"
]
],
[
[
"import torch.optim as optim\nfrom train.model import LSTMClassifier\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = LSTMClassifier(32, 100, 5000).to(device)\noptimizer = optim.Adam(model.parameters())\nloss_fn = torch.nn.BCELoss()\n\ntrain(model, train_sample_dl, 5, optimizer, loss_fn, device)",
"Epoch: 1, BCELoss: 0.6940271496772766\nEpoch: 2, BCELoss: 0.6846091747283936\nEpoch: 3, BCELoss: 0.6763787031173706\nEpoch: 4, BCELoss: 0.6675016164779664\nEpoch: 5, BCELoss: 0.6570009350776672\n"
]
],
[
[
"In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.",
"_____no_output_____"
],
[
"### (TODO) Training the model\n\nWhen a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.\n\n**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.\n\nThe way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.",
"_____no_output_____"
]
],
[
[
"from sagemaker.pytorch import PyTorch\n\nestimator = PyTorch(entry_point=\"train.py\",\n source_dir=\"train\",\n role=role,\n framework_version='0.4.0',\n train_instance_count=1,\n train_instance_type='ml.p2.xlarge',\n hyperparameters={\n 'epochs': 10,\n 'hidden_dim': 200,\n })",
"_____no_output_____"
],
[
"estimator.fit({'training': input_data})",
"2019-12-22 07:16:12 Starting - Starting the training job...\n2019-12-22 07:16:14 Starting - Launching requested ML instances.........\n2019-12-22 07:17:47 Starting - Preparing the instances for training......\n2019-12-22 07:18:51 Downloading - Downloading input data...\n2019-12-22 07:19:36 Training - Downloading the training image...\n2019-12-22 07:19:58 Training - Training image download completed. Training in progress.\u001b[34mbash: cannot set terminal process group (-1): Inappropriate ioctl for device\u001b[0m\n\u001b[34mbash: no job control in this shell\u001b[0m\n\u001b[34m2019-12-22 07:19:59,089 sagemaker-containers INFO Imported framework sagemaker_pytorch_container.training\u001b[0m\n\u001b[34m2019-12-22 07:19:59,117 sagemaker_pytorch_container.training INFO Block until all host DNS lookups succeed.\u001b[0m\n\u001b[34m2019-12-22 07:20:02,167 sagemaker_pytorch_container.training INFO Invoking user training script.\u001b[0m\n\u001b[34m2019-12-22 07:20:02,449 sagemaker-containers INFO Module train does not provide a setup.py. \u001b[0m\n\u001b[34mGenerating setup.py\u001b[0m\n\u001b[34m2019-12-22 07:20:02,449 sagemaker-containers INFO Generating setup.cfg\u001b[0m\n\u001b[34m2019-12-22 07:20:02,449 sagemaker-containers INFO Generating MANIFEST.in\u001b[0m\n\u001b[34m2019-12-22 07:20:02,449 sagemaker-containers INFO Installing module with the following command:\u001b[0m\n\u001b[34m/usr/bin/python -m pip install -U . -r requirements.txt\u001b[0m\n\u001b[34mProcessing /opt/ml/code\u001b[0m\n\u001b[34mCollecting pandas (from -r requirements.txt (line 1))\n Downloading https://files.pythonhosted.org/packages/74/24/0cdbf8907e1e3bc5a8da03345c23cbed7044330bb8f73bb12e711a640a00/pandas-0.24.2-cp35-cp35m-manylinux1_x86_64.whl (10.0MB)\u001b[0m\n\u001b[34mCollecting numpy (from -r requirements.txt (line 2))\u001b[0m\n\u001b[34m Downloading https://files.pythonhosted.org/packages/ab/e9/2561dbfbc05146bffa02167e09b9902e273decb2dc4cd5c43314ede20312/numpy-1.17.4-cp35-cp35m-manylinux1_x86_64.whl (19.8MB)\u001b[0m\n\u001b[34mCollecting nltk (from -r requirements.txt (line 3))\n Downloading https://files.pythonhosted.org/packages/f6/1d/d925cfb4f324ede997f6d47bea4d9babba51b49e87a767c170b77005889d/nltk-3.4.5.zip (1.5MB)\u001b[0m\n\u001b[34mCollecting beautifulsoup4 (from -r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/3b/c8/a55eb6ea11cd7e5ac4bacdf92bac4693b90d3ba79268be16527555e186f0/beautifulsoup4-4.8.1-py3-none-any.whl (101kB)\u001b[0m\n\u001b[34mCollecting html5lib (from -r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/a5/62/bbd2be0e7943ec8504b517e62bab011b4946e1258842bc159e5dfde15b96/html5lib-1.0.1-py2.py3-none-any.whl (117kB)\u001b[0m\n\u001b[34mRequirement already satisfied, skipping upgrade: python-dateutil>=2.5.0 in /usr/local/lib/python3.5/dist-packages (from pandas->-r requirements.txt (line 1)) (2.7.5)\u001b[0m\n\u001b[34mCollecting pytz>=2011k (from pandas->-r requirements.txt (line 1))\n Downloading https://files.pythonhosted.org/packages/e7/f9/f0b53f88060247251bf481fa6ea62cd0d25bf1b11a87888e53ce5b7c8ad2/pytz-2019.3-py2.py3-none-any.whl (509kB)\u001b[0m\n\u001b[34mRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.5/dist-packages (from nltk->-r requirements.txt (line 3)) (1.11.0)\u001b[0m\n\u001b[34mCollecting soupsieve>=1.2 (from beautifulsoup4->-r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/81/94/03c0f04471fc245d08d0a99f7946ac228ca98da4fa75796c507f61e688c2/soupsieve-1.9.5-py2.py3-none-any.whl\u001b[0m\n\u001b[34mCollecting webencodings (from html5lib->-r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl\u001b[0m\n\u001b[34mBuilding wheels for collected packages: nltk, train\n Running setup.py bdist_wheel for nltk: started\u001b[0m\n\u001b[34m Running setup.py bdist_wheel for nltk: finished with status 'done'\n Stored in directory: /root/.cache/pip/wheels/96/86/f6/68ab24c23f207c0077381a5e3904b2815136b879538a24b483\n Running setup.py bdist_wheel for train: started\u001b[0m\n\u001b[34m Running setup.py bdist_wheel for train: finished with status 'done'\n Stored in directory: /tmp/pip-ephem-wheel-cache-exzv230_/wheels/35/24/16/37574d11bf9bde50616c67372a334f94fa8356bc7164af8ca3\u001b[0m\n\u001b[34mSuccessfully built nltk train\u001b[0m\n\u001b[34mInstalling collected packages: numpy, pytz, pandas, nltk, soupsieve, beautifulsoup4, webencodings, html5lib, train\n Found existing installation: numpy 1.15.4\n Uninstalling numpy-1.15.4:\u001b[0m\n\u001b[34m Successfully uninstalled numpy-1.15.4\u001b[0m\n\u001b[34mSuccessfully installed beautifulsoup4-4.8.1 html5lib-1.0.1 nltk-3.4.5 numpy-1.17.4 pandas-0.24.2 pytz-2019.3 soupsieve-1.9.5 train-1.0.0 webencodings-0.5.1\u001b[0m\n\u001b[34mYou are using pip version 18.1, however version 19.3.1 is available.\u001b[0m\n\u001b[34mYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n\u001b[34m2019-12-22 07:20:14,310 sagemaker-containers INFO Invoking user script\n\u001b[0m\n\u001b[34mTraining Env:\n\u001b[0m\n\u001b[34m{\n \"log_level\": 20,\n \"input_dir\": \"/opt/ml/input\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"output_data_dir\": \"/opt/ml/output/data\",\n \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n \"job_name\": \"sagemaker-pytorch-2019-12-22-07-16-12-421\",\n \"num_cpus\": 4,\n \"channel_input_dirs\": {\n \"training\": \"/opt/ml/input/data/training\"\n },\n \"input_config_dir\": \"/opt/ml/input/config\",\n \"network_interface_name\": \"eth0\",\n \"current_host\": \"algo-1\",\n \"additional_framework_parameters\": {},\n \"module_name\": \"train\",\n \"input_data_config\": {\n \"training\": {\n \"S3DistributionType\": \"FullyReplicated\",\n \"TrainingInputMode\": \"File\",\n \"RecordWrapperType\": \"None\"\n }\n },\n \"num_gpus\": 1,\n \"framework_module\": \"sagemaker_pytorch_container.training:main\",\n \"output_dir\": \"/opt/ml/output\",\n \"module_dir\": \"s3://sagemaker-us-east-1-448461451902/sagemaker-pytorch-2019-12-22-07-16-12-421/source/sourcedir.tar.gz\",\n \"resource_config\": {\n \"hosts\": [\n \"algo-1\"\n ],\n \"network_interface_name\": \"eth0\",\n \"current_host\": \"algo-1\"\n },\n \"user_entry_point\": \"train.py\",\n \"model_dir\": \"/opt/ml/model\",\n \"hyperparameters\": {\n \"hidden_dim\": 200,\n \"epochs\": 10\n }\u001b[0m\n\u001b[34m}\n\u001b[0m\n\u001b[34mEnvironment variables:\n\u001b[0m\n\u001b[34mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"training\":\"/opt/ml/input/data/training\"},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_pytorch_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"epochs\":10,\"hidden_dim\":200},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"job_name\":\"sagemaker-pytorch-2019-12-22-07-16-12-421\",\"log_level\":20,\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-448461451902/sagemaker-pytorch-2019-12-22-07-16-12-421/source/sourcedir.tar.gz\",\"module_name\":\"train\",\"network_interface_name\":\"eth0\",\"num_cpus\":4,\"num_gpus\":1,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"train.py\"}\u001b[0m\n\u001b[34mSM_FRAMEWORK_PARAMS={}\u001b[0m\n\u001b[34mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n\u001b[34mSM_USER_ARGS=[\"--epochs\",\"10\",\"--hidden_dim\",\"200\"]\u001b[0m\n\u001b[34mSM_MODULE_NAME=train\u001b[0m\n\u001b[34mPYTHONPATH=/usr/local/bin:/usr/lib/python35.zip:/usr/lib/python3.5:/usr/lib/python3.5/plat-x86_64-linux-gnu:/usr/lib/python3.5/lib-dynload:/usr/local/lib/python3.5/dist-packages:/usr/lib/python3/dist-packages\u001b[0m\n\u001b[34mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n\u001b[34mSM_HP_EPOCHS=10\u001b[0m\n\u001b[34mSM_CHANNELS=[\"training\"]\u001b[0m\n\u001b[34mSM_HPS={\"epochs\":10,\"hidden_dim\":200}\u001b[0m\n\u001b[34mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n\u001b[34mSM_INPUT_DATA_CONFIG={\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}}\u001b[0m\n\u001b[34mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n\u001b[34mSM_CURRENT_HOST=algo-1\u001b[0m\n\u001b[34mSM_HOSTS=[\"algo-1\"]\u001b[0m\n\u001b[34mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n\u001b[34mSM_LOG_LEVEL=20\u001b[0m\n\u001b[34mSM_FRAMEWORK_MODULE=sagemaker_pytorch_container.training:main\u001b[0m\n\u001b[34mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n\u001b[34mSM_MODULE_DIR=s3://sagemaker-us-east-1-448461451902/sagemaker-pytorch-2019-12-22-07-16-12-421/source/sourcedir.tar.gz\u001b[0m\n\u001b[34mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n\u001b[34mSM_HP_HIDDEN_DIM=200\u001b[0m\n\u001b[34mSM_USER_ENTRY_POINT=train.py\u001b[0m\n\u001b[34mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n\u001b[34mSM_NUM_CPUS=4\u001b[0m\n\u001b[34mSM_CHANNEL_TRAINING=/opt/ml/input/data/training\u001b[0m\n\u001b[34mSM_NUM_GPUS=1\n\u001b[0m\n\u001b[34mInvoking script with the following command:\n\u001b[0m\n\u001b[34m/usr/bin/python -m train --epochs 10 --hidden_dim 200\n\n\u001b[0m\n\u001b[34mUsing device cuda.\u001b[0m\n\u001b[34mGet train data loader.\u001b[0m\n"
]
],
[
[
"## Step 5: Testing the model\n\nAs mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.\n\n## Step 6: Deploy the model for testing\n\nNow that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.\n\nThere is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.\n\n**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )\n\nSince we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**\n\n**TODO:** Deploy the trained model.",
"_____no_output_____"
]
],
[
[
"# TODO: Deploy the trained model\npredictor = estimator.deploy(initial_instance_count = 1, instance_type = 'ml.p2.xlarge')",
"---------------------------------------------------------------------------------------------------------------!"
]
],
[
[
"## Step 7 - Use the model for testing\n\nOnce deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.",
"_____no_output_____"
]
],
[
[
"test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)",
"_____no_output_____"
],
[
"# We split the data into chunks and send each chunk seperately, accumulating the results.\n\ndef predict(data, rows=512):\n split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))\n predictions = np.array([])\n for array in split_array:\n predictions = np.append(predictions, predictor.predict(array))\n \n return predictions",
"_____no_output_____"
],
[
"predictions = predict(test_X.values)\npredictions = [round(num) for num in predictions]",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(test_y, predictions)",
"_____no_output_____"
]
],
[
[
"**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?",
"_____no_output_____"
],
[
"**Answer:**\n- Both the models performed similarly on the IMDB dataset. The XGBoost (eXtreme Gradient Boosting) model we have created earlier in the `IMDB Sentiment Analysis - XGBoost - Web App` tutorial had an accuracy of 0.8574 while the new LSTM (Long short-term memory) model we created here has an accuracy of 0.85152. We can definitely increase the performance of the new LSTM model by adding some parameter tuning. \n- XGBoost is a scalable and accurate implementation of gradient boosting machines and was developed to efficiently reduce computing time and allocate an optimal usage of memory resources and can perform better on small datasets without requirng a GPU, while LSTM is a Recurrent Neural Network and a state of the art algorithm for sequential data and performs better on very large datasets.\n- LSTM is better for sentiment analysis as it can remembers its previous inputs, due to an internal memory, which makes it perfectly suited for Machine Learning problems that involve sequential data.",
"_____no_output_____"
],
[
"### (TODO) More testing\n\nWe now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.",
"_____no_output_____"
]
],
[
[
"test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'",
"_____no_output_____"
]
],
[
[
"The question we now need to answer is, how do we send this review to our model?\n\nRecall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.\n - Removed any html tags and stemmed the input\n - Encoded the review as a sequence of integers using `word_dict`\n \nIn order process the review we will need to repeat these two steps.\n\n**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.",
"_____no_output_____"
]
],
[
[
"# TODO: Convert test_review into a form usable by the model and save the results in test_data\nconverted, length = convert_and_pad(word_dict, review_to_words(test_review))\ntest_data = [[length, *converted]]",
"_____no_output_____"
]
],
[
[
"Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.",
"_____no_output_____"
]
],
[
[
"predictor.predict(test_data)",
"_____no_output_____"
]
],
[
[
"Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.",
"_____no_output_____"
],
[
"### Delete the endpoint\n\nOf course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.",
"_____no_output_____"
]
],
[
[
"estimator.delete_endpoint()",
"_____no_output_____"
]
],
[
[
"## Step 6 (again) - Deploy the model for the web app\n\nNow that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.\n\nAs we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.\n\nWe will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.\n\nWhen deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.\n - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.\n - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.\n - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.\n - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.\n\nFor the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.\n\n### (TODO) Writing inference code\n\nBefore writing our custom inference code, we will begin by taking a look at the code which has been provided.",
"_____no_output_____"
]
],
[
[
"!pygmentize serve/predict.py",
"\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mjson\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpickle\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msagemaker_containers\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpandas\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mpd\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mnumpy\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnp\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.nn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.optim\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36moptim\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.utils.data\u001b[39;49;00m\r\n\r\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mmodel\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m LSTMClassifier\r\n\r\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mutils\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m review_to_words, convert_and_pad\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32mmodel_fn\u001b[39;49;00m(model_dir):\r\n \u001b[33m\"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\u001b[39;49;00m\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mLoading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n\r\n \u001b[37m# First, load the parameters used to create the model.\u001b[39;49;00m\r\n model_info = {}\r\n model_info_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel_info.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_info_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model_info = torch.load(f)\r\n\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mmodel_info: {}\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m.format(model_info))\r\n\r\n \u001b[37m# Determine the device and construct the model.\u001b[39;49;00m\r\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n model = LSTMClassifier(model_info[\u001b[33m'\u001b[39;49;00m\u001b[33membedding_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mhidden_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mvocab_size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\r\n\r\n \u001b[37m# Load the store model parameters.\u001b[39;49;00m\r\n model_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model.load_state_dict(torch.load(f))\r\n\r\n \u001b[37m# Load the saved word_dict.\u001b[39;49;00m\r\n word_dict_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mword_dict.pkl\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(word_dict_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model.word_dict = pickle.load(f)\r\n\r\n model.to(device).eval()\r\n\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mDone loading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m model\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32minput_fn\u001b[39;49;00m(serialized_input_data, content_type):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mDeserializing the input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mif\u001b[39;49;00m content_type == \u001b[33m'\u001b[39;49;00m\u001b[33mtext/plain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\r\n data = serialized_input_data.decode(\u001b[33m'\u001b[39;49;00m\u001b[33mutf-8\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m data\r\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mRequested unsupported ContentType in content_type: \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m + content_type)\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32moutput_fn\u001b[39;49;00m(prediction_output, accept):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mSerializing the generated output.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mstr\u001b[39;49;00m(prediction_output)\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32mpredict_fn\u001b[39;49;00m(input_data, model):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mInferring sentiment of input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n\r\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n \r\n \u001b[34mif\u001b[39;49;00m model.word_dict \u001b[35mis\u001b[39;49;00m \u001b[36mNone\u001b[39;49;00m:\r\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mModel has not been loaded properly, no word_dict.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \r\n \u001b[37m# TODO: Process input_data so that it is ready to be sent to our model.\u001b[39;49;00m\r\n \u001b[37m# You should produce two variables:\u001b[39;49;00m\r\n \u001b[37m# data_X - A sequence of length 500 which represents the converted review\u001b[39;49;00m\r\n \u001b[37m# data_len - The length of the review\u001b[39;49;00m\r\n \r\n data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data))\r\n\r\n \u001b[37m# Using data_X and data_len we construct an appropriate input tensor. Remember\u001b[39;49;00m\r\n \u001b[37m# that our model expects input data of the form 'len, review[500]'.\u001b[39;49;00m\r\n data_pack = np.hstack((data_len, data_X))\r\n data_pack = data_pack.reshape(\u001b[34m1\u001b[39;49;00m, -\u001b[34m1\u001b[39;49;00m)\r\n \r\n data = torch.from_numpy(data_pack)\r\n data = data.to(device)\r\n\r\n \u001b[37m# Make sure to put the model into evaluation mode\u001b[39;49;00m\r\n model.eval()\r\n\r\n \u001b[37m# TODO: Compute the result of applying the model to the input data. The variable `result` should\u001b[39;49;00m\r\n \u001b[37m# be a numpy array which contains a single integer which is either 1 or 0\u001b[39;49;00m\r\n \r\n \u001b[37m# get the output from the model\u001b[39;49;00m\r\n output = model(data).detach().cpu().numpy()\r\n\r\n \u001b[37m# convert output probabilities to predicted class (0 or 1)\u001b[39;49;00m\r\n result = np.round(output).astype(np.int)\r\n \r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mPrediction value, pre-rounding: {:.6f}\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m.format(output.item()))\r\n\r\n \u001b[34mreturn\u001b[39;49;00m result\r\n"
]
],
[
[
"As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.\n\n**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.",
"_____no_output_____"
],
[
"### Deploying the model\n\nNow that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.\n\n**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.",
"_____no_output_____"
]
],
[
[
"from sagemaker.predictor import RealTimePredictor\nfrom sagemaker.pytorch import PyTorchModel\n\nclass StringPredictor(RealTimePredictor):\n def __init__(self, endpoint_name, sagemaker_session):\n super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')\n\nmodel = PyTorchModel(model_data=estimator.model_data,\n role = role,\n framework_version='0.4.0',\n entry_point='predict.py',\n source_dir='serve',\n predictor_cls=StringPredictor)\npredictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')",
"---------------------------------------------------------------------------------------------------------------!"
]
],
[
[
"### Testing the model\n\nNow that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.",
"_____no_output_____"
]
],
[
[
"import glob\n\ndef test_reviews(data_dir='../data/aclImdb', stop=250):\n \n results = []\n ground = []\n \n # We make sure to test both positive and negative reviews \n for sentiment in ['pos', 'neg']:\n \n path = os.path.join(data_dir, 'test', sentiment, '*.txt')\n files = glob.glob(path)\n \n files_read = 0\n \n print('Starting ', sentiment, ' files')\n \n # Iterate through the files and send them to the predictor\n for f in files:\n with open(f) as review:\n # First, we store the ground truth (was the review positive or negative)\n if sentiment == 'pos':\n ground.append(1)\n else:\n ground.append(0)\n # Read in the review and convert to 'utf-8' for transmission via HTTP\n review_input = review.read().encode('utf-8')\n # Send the review to the predictor and store the results\n results.append(int(predictor.predict(review_input)))\n \n # Sending reviews to our endpoint one at a time takes a while so we\n # only send a small number of reviews\n files_read += 1\n if files_read == stop:\n break\n \n return ground, results",
"_____no_output_____"
],
[
"ground, results = test_reviews()",
"Starting pos files\nStarting neg files\n"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(ground, results)",
"_____no_output_____"
]
],
[
[
"As an additional test, we can try sending the `test_review` that we looked at earlier.",
"_____no_output_____"
]
],
[
[
"predictor.predict(test_review)",
"_____no_output_____"
]
],
[
[
"Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.",
"_____no_output_____"
],
[
"## Step 7 (again): Use the model for the web app\n\n> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.\n\nSo far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.\n\n<img src=\"Web App Diagram.svg\">\n\nThe diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.\n\nIn the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.\n\nLastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.\n\n### Setting up a Lambda function\n\nThe first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.\n\n#### Part A: Create an IAM Role for the Lambda function\n\nSince we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.\n\nUsing the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.\n\nIn the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.\n\nLastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.\n\n#### Part B: Create a Lambda function\n\nNow it is time to actually create the Lambda function.\n\nUsing the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.\n\nOn the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below. \n\n```python\n# We need to use the low-level library to interact with SageMaker since the SageMaker API\n# is not available natively through Lambda.\nimport boto3\n\ndef lambda_handler(event, context):\n\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given\n response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created\n ContentType = 'text/plain', # The data format that is expected\n Body = event['body']) # The actual review\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n return {\n 'statusCode' : 200,\n 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },\n 'body' : result\n }\n```\n\nOnce you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.",
"_____no_output_____"
]
],
[
[
"predictor.endpoint",
"_____no_output_____"
]
],
[
[
"Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.\n\n### Setting up API Gateway\n\nNow that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.\n\nUsing AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.\n\nOn the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.\n\nNow we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.\n\nSelect the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.\n\nFor the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.\n\nType the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.\n\nThe last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.\n\nYou have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.",
"_____no_output_____"
],
[
"## Step 4: Deploying our web app\n\nNow that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.\n\nIn the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\\*\\*REPLACE WITH PUBLIC API URL\\*\\***. Replace this string with the url that you wrote down in the last step and then save the file.\n\nNow, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.\n\nIf you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!\n\n> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.\n\n**TODO:** Make sure that you include the edited `index.html` file in your project submission.",
"_____no_output_____"
],
[
"Now that your web app is working, trying playing around with it and see how well it works.\n\n**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?",
"_____no_output_____"
],
[
"**Answer:**\nMy review: A lot of notes were hit by Ford v Ferrari. The characters are fleshed out very well and give you the emotional attachment you're looking for in a movie. Bale and Damon's performances are great; they pull you into the story and completely disappear into their roles. James Mangold proves, once again, to be a master behind the camera. The action scenes are shot to perfection and will have you biting your nails.\n\nThe film's technical aspects were top-notch as well. Wait for this film to be nominated for sound design and editing. These technical aspects, during the climax of the film, will blow your hair back and give you goosebumps.\n\nAll in all, this film provides a pleasing experience for the crowd that not many films can even come close to delivering. I loved about every second of it, despite its long runtime.\n\nPrediction: Your review was POSITIVE!",
"_____no_output_____"
],
[
"### Delete the endpoint\n\nRemember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e7f3fffc10a7b276a261a97c7e5e5ddaa387eab2 | 6,502 | ipynb | Jupyter Notebook | notebooks/v4_color_calibration_lightcone.ipynb | ArgonneCPAC/skysim | f271debe3439efd1ae5230c6020b2dbc5f79d824 | [
"BSD-2-Clause"
] | 4 | 2020-08-08T10:01:49.000Z | 2022-02-27T07:21:00.000Z | notebooks/v4_color_calibration_lightcone.ipynb | ArgonneCPAC/skysim | f271debe3439efd1ae5230c6020b2dbc5f79d824 | [
"BSD-2-Clause"
] | 67 | 2018-07-16T22:12:16.000Z | 2020-07-02T01:12:48.000Z | notebooks/v4_color_calibration_lightcone.ipynb | aphearin/cosmodc2 | 5bc2abebd7123f29b424efc11c3ef374a51cd6c1 | [
"BSD-3-Clause"
] | null | null | null | 31.110048 | 401 | 0.593971 | [
[
[
"%matplotlib inline\n\nmorange = u'#ff7f0e'\nmblue = u'#1f77b4'\nmgreen = u'#2ca02c'\nmred = u'#d62728'\nmpurple = u'#9467bd'",
"_____no_output_____"
],
[
"brightest_rband = -24.64\nbrightest_rband = -np.inf\ndef apply_um_galacticus_matching_error(um_rest_old, rest_old, um_rest_new):\n \"\"\"\n \"\"\"\n mag_r_error = rest_old - um_rest_old\n rest_new = um_rest_new + mag_r_error\n return np.where(rest_new < brightest_rband, brightest_rband, rest_new)\n\ndef update_observed_rband(r_obs_old, r_rest_old, r_rest_new):\n \"\"\"\n \"\"\"\n dr = r_obs_old - r_rest_old\n return r_rest_new + dr\n\ndef update_mag_i_obs(ri_rest_old, mag_i_rest_old, mag_i_obs_old, mag_r_rest_new):\n mag_i_rest_new = mag_r_rest_new - ri_rest_old\n kcorrection_i = mag_i_obs_old - mag_i_rest_old\n mag_i_obs_new = kcorrection_i + mag_i_rest_new\n return mag_i_rest_new, mag_i_obs_new\n",
"_____no_output_____"
],
[
"import os\ndirname = \"/Volumes/simulation_data_2018/protoDC2_v4\"\nbasename = \"v4.1.dust136.1substep.all.aph_reduced.hdf5\"\n\nfname = os.path.join(dirname, basename)\n\nfrom astropy.table import Table\nmock = Table.read(fname, path='data')\n\nmock.rename_column('um_restframe_extincted_sdss_abs_magr', 'um_mag_r_rest')\nmock.rename_column('um_restframe_extincted_sdss_gr', 'um_gr_rest')\nmock.rename_column('um_restframe_extincted_sdss_ri', 'um_ri_rest')\nmock['um_mag_g_rest'] = mock['um_gr_rest'] + mock['um_mag_r_rest']\nmock['um_mag_i_rest'] = mock['um_mag_r_rest'] - mock['um_ri_rest']\nprint(mock.keys())",
"/Users/aphearin/anaconda/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n"
],
[
"from cosmodc2.stellar_mass_remapping import lift_high_mass_mstar\n\nnew_mstar = lift_high_mass_mstar(\n mock['um_mpeak'], mock['um_obs_sm'], mock['um_upid'], mock['redshift'])\nmock['new_um_mstar'] = new_mstar\n \nfrom cosmodc2.sdss_colors import magr_monte_carlo\nmock['new_um_mag_r_rest'] = magr_monte_carlo(\n mock['new_um_mstar'], mock['um_upid'], mock['redshift'])\nmock['new2_um_mag_r_rest'] = magr_monte_carlo(\n mock['new_um_mstar'], mock['um_upid'], mock['redshift'])\n\n# Estimate the new restframe Galacticus r-band\nmock['new_mag_r_rest'] = apply_um_galacticus_matching_error(\n mock['um_mag_r_rest'], mock['mag_r_rest'], mock['new_um_mag_r_rest'])\nmock['new2_mag_r_rest'] = apply_um_galacticus_matching_error(\n mock['um_mag_r_rest'], mock['mag_r_rest'], mock['new2_um_mag_r_rest'])\n\n# # Estimate the new observed Galacticus r-band\nmock['new_mag_r_obs'] = update_observed_rband(\n mock['mag_r_obs'], mock['mag_r_rest'], mock['new_um_mag_r_rest'])\nmock['new2_mag_r_obs'] = update_observed_rband(\n mock['mag_r_obs'], mock['mag_r_rest'], mock['new2_um_mag_r_rest'])\n",
"_____no_output_____"
],
[
"\n# # Estimate the new observed UniverseMachine i-band\n_mag_i_rest_new, _mag_i_obs_new = update_mag_i_obs(\n mock['um_ri_rest'], mock['um_mag_i_rest'], \n mock['mag_i_obs'], mock['new_um_mag_r_rest'])\nmock['new_mag_i_obs'] = _mag_i_obs_new\nmock['new_mag_i_rest'] = _mag_i_rest_new\n\n_mag_i_rest_new2, _mag_i_obs_new2 = update_mag_i_obs(\n mock['um_ri_rest'], mock['um_mag_i_rest'], \n mock['mag_i_obs'], mock['new2_um_mag_r_rest'])\nmock['new2_mag_i_obs'] = _mag_i_obs_new2\nmock['new2_mag_i_rest'] = _mag_i_rest_new2",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7f421cd66d7cf94938bf193557eaec298842567 | 1,783 | ipynb | Jupyter Notebook | monty_hall/Untitled2.ipynb | JacobJWalker/python_and_ai | c2ee70b14402d1b787bf73e56edcd242f89fb36b | [
"MIT"
] | null | null | null | monty_hall/Untitled2.ipynb | JacobJWalker/python_and_ai | c2ee70b14402d1b787bf73e56edcd242f89fb36b | [
"MIT"
] | null | null | null | monty_hall/Untitled2.ipynb | JacobJWalker/python_and_ai | c2ee70b14402d1b787bf73e56edcd242f89fb36b | [
"MIT"
] | null | null | null | 27.430769 | 129 | 0.540662 | [
[
[
"# Name: Monty Hall Game and Monte Carlo Simulation\n# Version: 0.2a3\n# Summary: A simple game that replicates the Monty Hall problem, and a Monte Carlo simulator to determine probabilities\n# Keywords: Monty Hall, Monte Carlo\n# Author: Jacob J. Walker\n#\n# Header comments based on meta-data specs at https://packaging.python.org/specifications/core-metadata/\n\nimport random\n\ncar_location = random.randint(1,3)\n\n# print(car_location)\n\n# Initialize doors\ndoor = {}dfdjjkj\nfor i in (1,2,3):\n door[i] = \"Goat\"\n# print(str(i) + \": \" + door[i])\n\ndoor[car_location] = \"Car\"\n\n# Get guess and tell player whether they won or not\nguess = input(\"Which door do you choose? \")\n\nif door[int(guess)] == \"Goat\":\n print(\"Sorry, you got a goat. If you chose door number \" + str(car_location) + \" you would have won the car...\")\nelif door[int(guess)] == \"Car\":\n print(\"You won a car!!!\")\nelse:\n print(\"Sorry, I did not understand.\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7f42d3544b7f3e3f75a2f72ac4deee2e749cde8 | 10,088 | ipynb | Jupyter Notebook | site/en-snapshot/hub/tutorials/tweening_conv3d.ipynb | phoenix-fork-tensorflow/docs-l10n | 2287738c22e3e67177555e8a41a0904edfcf1544 | [
"Apache-2.0"
] | 491 | 2020-01-27T19:05:32.000Z | 2022-03-31T08:50:44.000Z | site/en-snapshot/hub/tutorials/tweening_conv3d.ipynb | phoenix-fork-tensorflow/docs-l10n | 2287738c22e3e67177555e8a41a0904edfcf1544 | [
"Apache-2.0"
] | 511 | 2020-01-27T22:40:05.000Z | 2022-03-21T08:40:55.000Z | site/en-snapshot/hub/tutorials/tweening_conv3d.ipynb | phoenix-fork-tensorflow/docs-l10n | 2287738c22e3e67177555e8a41a0904edfcf1544 | [
"Apache-2.0"
] | 627 | 2020-01-27T21:49:52.000Z | 2022-03-28T18:11:50.000Z | 33.852349 | 339 | 0.53073 | [
[
[
"##### Copyright 2019 The TensorFlow Hub Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"_____no_output_____"
]
],
[
[
"# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================",
"_____no_output_____"
]
],
[
[
"# Video Inbetweening using 3D Convolutions\n",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/hub/tutorials/tweening_conv3d\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tweening_conv3d.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/hub/blob/master/examples/colab/tweening_conv3d.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/tweening_conv3d.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/google/tweening_conv3d_bair/1\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"Yunpeng Li, Dominik Roblek, and Marco Tagliasacchi. From Here to There: Video Inbetweening Using Direct 3D Convolutions, 2019.\n\nhttps://arxiv.org/abs/1905.10240\n\n\nCurrent Hub characteristics:\n- has models for BAIR Robot pushing videos and KTH action video dataset (though this colab uses only BAIR)\n- BAIR dataset already available in Hub. However, KTH videos need to be supplied by the users themselves.\n- only evaluation (video generation) for now\n- batch size and frame size are hard-coded\n",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"Since `tfds.load('bair_robot_pushing_small', split='test')` would download a 30GB archive that also contains the training data, we download a separated archive that only contains the 190MB test data. The used dataset has been published by [this paper](https://arxiv.org/abs/1710.05268) and is licensed as Creative Commons BY 4.0.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow_hub as hub\nimport tensorflow_datasets as tfds\n\nfrom tensorflow_datasets.core import SplitGenerator\nfrom tensorflow_datasets.video.bair_robot_pushing import BairRobotPushingSmall\n\nimport tempfile\nimport pathlib\n\nTEST_DIR = pathlib.Path(tempfile.mkdtemp()) / \"bair_robot_pushing_small/softmotion30_44k/test/\"",
"_____no_output_____"
],
[
"# Download the test split to $TEST_DIR\n!mkdir -p $TEST_DIR\n!wget -nv https://storage.googleapis.com/download.tensorflow.org/data/bair_test_traj_0_to_255.tfrecords -O $TEST_DIR/traj_0_to_255.tfrecords",
"_____no_output_____"
],
[
"# Since the dataset builder expects the train and test split to be downloaded,\n# patch it so it only expects the test data to be available\nbuilder = BairRobotPushingSmall()\ntest_generator = SplitGenerator(name='test', gen_kwargs={\"filedir\": str(TEST_DIR)})\nbuilder._split_generators = lambda _: [test_generator]\nbuilder.download_and_prepare()",
"_____no_output_____"
]
],
[
[
"## BAIR: Demo based on numpy array inputs",
"_____no_output_____"
]
],
[
[
"# @title Load some example data (BAIR).\nbatch_size = 16\n\n# If unable to download the dataset automatically due to \"not enough disk space\", please download manually to Google Drive and\n# load using tf.data.TFRecordDataset.\nds = builder.as_dataset(split=\"test\")\ntest_videos = ds.batch(batch_size)\nfirst_batch = next(iter(test_videos))\ninput_frames = first_batch['image_aux1'][:, ::15]\ninput_frames = tf.cast(input_frames, tf.float32)",
"_____no_output_____"
],
[
"# @title Visualize loaded videos start and end frames.\n\nprint('Test videos shape [batch_size, start/end frame, height, width, num_channels]: ', input_frames.shape)\nsns.set_style('white')\nplt.figure(figsize=(4, 2*batch_size))\n\nfor i in range(batch_size)[:4]:\n plt.subplot(batch_size, 2, 1 + 2*i)\n plt.imshow(input_frames[i, 0] / 255.0)\n plt.title('Video {}: First frame'.format(i))\n plt.axis('off')\n plt.subplot(batch_size, 2, 2 + 2*i)\n plt.imshow(input_frames[i, 1] / 255.0)\n plt.title('Video {}: Last frame'.format(i))\n plt.axis('off')",
"_____no_output_____"
]
],
[
[
"### Load Hub Module",
"_____no_output_____"
]
],
[
[
"hub_handle = 'https://tfhub.dev/google/tweening_conv3d_bair/1'\nmodule = hub.load(hub_handle).signatures['default']",
"_____no_output_____"
]
],
[
[
"### Generate and show the videos",
"_____no_output_____"
]
],
[
[
"filled_frames = module(input_frames)['default'] / 255.0",
"_____no_output_____"
],
[
"# Show sequences of generated video frames.\n\n# Concatenate start/end frames and the generated filled frames for the new videos.\ngenerated_videos = np.concatenate([input_frames[:, :1] / 255.0, filled_frames, input_frames[:, 1:] / 255.0], axis=1)\n\nfor video_id in range(4):\n fig = plt.figure(figsize=(10 * 2, 2))\n for frame_id in range(1, 16):\n ax = fig.add_axes([frame_id * 1 / 16., 0, (frame_id + 1) * 1 / 16., 1],\n xmargin=0, ymargin=0)\n ax.imshow(generated_videos[video_id, frame_id])\n ax.axis('off')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f432e747cddf4396b94cb85d67f122843ec4ee | 9,171 | ipynb | Jupyter Notebook | hello_world_binder[1].ipynb | paradise110302/Detect_dep | 8e1e58564506362822222be9585131d2cdc1a552 | [
"Apache-2.0"
] | null | null | null | hello_world_binder[1].ipynb | paradise110302/Detect_dep | 8e1e58564506362822222be9585131d2cdc1a552 | [
"Apache-2.0"
] | null | null | null | hello_world_binder[1].ipynb | paradise110302/Detect_dep | 8e1e58564506362822222be9585131d2cdc1a552 | [
"Apache-2.0"
] | null | null | null | 32.292254 | 153 | 0.578999 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f433a1d2cd6db007171c9d1431f6cb9dff02a7 | 41,685 | ipynb | Jupyter Notebook | sums/forward-precise.ipynb | danja/webplot | 8fc1cf03fb80ffeecd5bb7f17daaa7b6348cb7d0 | [
"MIT"
] | null | null | null | sums/forward-precise.ipynb | danja/webplot | 8fc1cf03fb80ffeecd5bb7f17daaa7b6348cb7d0 | [
"MIT"
] | null | null | null | sums/forward-precise.ipynb | danja/webplot | 8fc1cf03fb80ffeecd5bb7f17daaa7b6348cb7d0 | [
"MIT"
] | null | null | null | 304.270073 | 37,596 | 0.925921 | [
[
[
"# 5-Bar Linkage : Forward Kinematics\n#\n# from Study and Development of Parallel Robots Based On 5-Bar Linkage\n# Manh Tuong Hoang, Trung Tin Vuong, Cong Bang Pham 2015\n# https://www.researchgate.net/publication/283356024_Study_and_Development_of_Parallel_Robots_Based_On_5-Bar_Linkage\n\nimport math\nimport numpy as np\nimport mpmath\nfrom mpmath import mp, mpf, fp\nimport matplotlib.pyplot as plt\n\nmpmath.mp.dps = 1000 # decimal places\n\nnPoints = 100\n\nbase = mpf(10) # distance between servos\nhumerus = mpf(10) # from each servo to elbow\nradius = mpf(10) # elbow to pen\n\nangleMin = mpf(60 * mpmath.pi/180)\nangleMax = mpf(120 * mpmath.pi/180)\n\nanglesL = mpmath.linspace(angleMin, angleMax, nPoints)\nanglesR = mpmath.linspace(angleMin, angleMax, nPoints)\n\nbaselineX = mpmath.linspace(0, base, 100)\nbaselineY = mpmath.zeros(100, 1)\n\nplt.style.use('seaborn-whitegrid')\nfig, ax = plt.subplots()\nplt.grid(True)\n\ntwo = mpf(2)\nprint(type(two))\n\nfor angleL in anglesL:\n for angleR in anglesR:\n E = two*radius*(base+humerus*(mpmath.cos(angleR)-mpmath.cos(angleL)))\n F = two*humerus*radius*(mpmath.sin(angleR)-mpmath.sin(angleL))\n G = base*base+two*humerus*humerus+two*base*humerus*mpmath.cos(angleR)-two*humerus*humerus*mpmath.cos(angleR-angleL)\n\n if G-E != 0 and E*E+F*F-G*G > 0: # avoid div by zero, sqrt of negative\n\n lumpXplus = (-F+mpmath.sqrt(E*E+F*F-G*G)) / (G-E)\n lumpXminus = (-F-mpmath.sqrt(E*E+F*F-G*G)) / (G-E)\n lumpYplus = (-F+mpmath.sqrt(E*E+F*F-G*G)) / (G-E)\n lumpYminus = (-F-mpmath.sqrt(E*E+F*F-G*G)) / (G-E)\n \n xPlus = base+humerus*mpmath.cos(angleR)+radius*mpmath.cos(two*mpmath.atan(lumpXplus))\n xMinus = base+humerus*mpmath.cos(angleR)+radius*mpmath.cos(two*mpmath.atan(lumpXminus))\n \n yPlus = humerus*mpmath.sin(angleR)+radius*mpmath.sin(two*mpmath.atan(lumpYplus))\n yMinus = humerus*mpmath.sin(angleR)+radius*mpmath.sin(two*mpmath.atan(lumpYminus))\n \n ax.plot(xMinus, yMinus, 'o', color='blue')\n ax.plot(xMinus, yPlus, 'o', color='green')\n ax.plot(xPlus, yMinus, 'o', color='red')\n ax.plot(xPlus, yPlus, 'o', color='yellow')\n \n \nax.plot(baselineX, baselineY, color='black')\nplt.show()\n\n",
"<class 'mpmath.ctx_mp_python.mpf'>\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7f436c3fa94b6bd15b926f7ac65d78bc5138966 | 78,896 | ipynb | Jupyter Notebook | Backdoor_attack.ipynb | Junjie-Chu/ML_Leak_and_Badnet | 4a04fb85a020d0216066f7ea7eee9b7d227677af | [
"MIT"
] | 1 | 2022-03-17T10:01:03.000Z | 2022-03-17T10:01:03.000Z | Backdoor_attack.ipynb | Junjie-Chu/CISPA_codetest | 4a04fb85a020d0216066f7ea7eee9b7d227677af | [
"MIT"
] | null | null | null | Backdoor_attack.ipynb | Junjie-Chu/CISPA_codetest | 4a04fb85a020d0216066f7ea7eee9b7d227677af | [
"MIT"
] | null | null | null | 78,896 | 78,896 | 0.784083 | [
[
[
"# ***This is a simple implement of basic backdoor attack on MNIST and CIFAR10***",
"_____no_output_____"
],
[
"## ***Install all independency here***",
"_____no_output_____"
]
],
[
[
"!pip3 install torch==1.10.2+cpu torchvision==0.11.3+cpu torchaudio==0.10.2+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html\n!pip3 install ipywidgets\n!jupyter nbextension enable --py widgetsnbextension",
"_____no_output_____"
]
],
[
[
"## ***Import all dependency here***",
"_____no_output_____"
]
],
[
[
"# to monitor the progress\nfrom tqdm import tqdm\nimport time\n# basic dependency\nimport numpy as np\nimport random\n# pytorch related\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\n# for visulization\nimport matplotlib.pyplot as plt\n# for mount from Colab to my drive\nfrom google.colab import drive\ndrive.mount('/content/drive')\nimport os\nos.chdir('/content/drive/My Drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
],
[
[
"##***Define a custom dataset class***",
"_____no_output_____"
]
],
[
[
"# to use dataloader, a custom class is needed, since the 'MNIST' or 'CIFAR10' object does not support item assignment\n# this class has some mandotory functions\n# another way is to use TensorDataset(x,y), to make code more clear, I use custom dataset\nclass poisoned_dataset(Dataset):\n\n def __init__(self, dataset, fakelabel, portion = 0.01,transform = None, device=torch.device(\"cpu\"),dataname='MNIST'):\n self.dataset, self.poison_index = self.add_trigger(dataset, fakelabel, portion)\n self.device = device\n self.transform = transform\n self.dataname = dataname\n\n def __getitem__(self, item):\n # extract img, img should be an array\n img = self.dataset[item][0]\n if self.transform:\n img = self.transform(img)\n #print(type(img))\n # if MNIST dataset, we need to add one dimension\n if self.dataname == 'MNIST':\n img = img[..., np.newaxis]\n else:\n pass\n #print(img.shape)\n # img shoud be like (channel, row, col)\n img = torch.Tensor(img).permute(2, 0, 1)\n # extract label\n # one hot encoding so that MSELoss are used\n # 10 classes thus 10 dimension\n label = np.zeros(10)\n label[self.dataset[item][1]] = 1\n label = torch.Tensor(label)\n # send img,label to device\n img = img.to(self.device)\n label = label.to(self.device)\n \n return img, label\n\n def __len__(self):\n return len(self.dataset)\n\n def add_trigger(self, dataset, fakelabel, portion):\n print(\"Generating Backdoor Images:\")\n random.seed(19260817)\n length = len(dataset)\n count = int(length*portion)\n sample_index = np.array(random.sample(range(length), int(length * portion)))\n # to change the dataset, convert type:dataset to type:list\n poisoned_dataset = list(dataset)\n # convert type:img to type:array\n for i in range(length):\n poisoned_dataset[i] = (np.array(poisoned_dataset[i][0]),poisoned_dataset[i][1])\n # create poisoned images \n for i in tqdm(sample_index):\n # extract the sampled data(image,label)\n data = poisoned_dataset[i]\n # create a poisoned image\n # img is like (row, col, channels)\n img = data[0]\n row = img.shape[0]\n col = img.shape[1]\n img[row - 1][col - 6] = 255\n img[row - 3][col - 6] = 255\n img[row - 2][col - 3] = 255\n img[row - 3][col - 3] = 255\n img[row - 4][col - 3] = 255\n img[row - 5][col - 6] = 255\n img[row - 1][col - 2] = 255\n img[row - 1][col - 3] = 255\n img[row - 1][col - 4] = 255\n img[row - 5][col - 2] = 255\n img[row - 5][col - 3] = 255\n img[row - 5][col - 4] = 255\n\n # give the poisoned image and fake label to original dataset\n poisoned_dataset[i] = (img,fakelabel)\n\n print(str(count) + \" Backdoor Images, \" + str(length - count) + \" Clean Images\")\n return poisoned_dataset, sample_index",
"_____no_output_____"
]
],
[
[
"## ***Define a Neural Network***",
"_____no_output_____"
]
],
[
[
"class BadNet(nn.Module):\n\n def __init__(self,inputchannels,outputclasses):\n super().__init__()\n self.conv1 = nn.Conv2d(inputchannels, 16, 5)\n self.conv2 = nn.Conv2d(16, 32, 5)\n self.pool = nn.AvgPool2d(2)\n if inputchannels == 3:\n inputfeatures = 800\n else:\n inputfeatures = 512\n self.fc1 = nn.Linear(inputfeatures, 512)\n self.fc2 = nn.Linear(512, outputclasses)\n\n def forward(self, x):\n # conv block1\n x = self.conv1(x)\n x = F.relu(x)\n x = self.pool(x)\n # conv block2\n x = self.conv2(x)\n x = F.relu(x)\n x = self.pool(x)\n # reshape(flat) the feature to be the input of full connect layer\n x = x.view(-1, self.num_features(x))\n # fc block1\n x = self.fc1(x)\n x = F.relu(x)\n # fc block2\n x = self.fc2(x)\n x = F.softmax(x,dim=-1)\n return x\n\n def num_features(self, x):\n # size of different dimensions\n size_D = x.size()[1:]\n total = 1\n for i in size_D:\n total = total*i\n return total",
"_____no_output_____"
]
],
[
[
"## ***Functions for training and evaluating***",
"_____no_output_____"
]
],
[
[
"def train(model, dataloader, criterion, opt):\n running_loss = 0\n # switch to model:train\n # no difference here since no dropout and BN\n model.train()\n count = 0\n for i, data in tqdm(enumerate(dataloader)):\n opt.zero_grad()\n imgs, labels = data\n predict = model(imgs)\n loss = criterion(predict, labels)\n loss.backward()\n opt.step()\n count = i\n running_loss += loss\n return running_loss / count\n\n\ndef evaluation(model, dataloader, batch_size=64):\n # switch to model:eval\n # no difference here since no dropout and BN \n model.eval()\n # y_tensorlist is a list consists of some tensors\n y_true_tensorlist = []\n y_predict_tensorlist = []\n for step, (batch_x, batch_y) in enumerate(dataloader):\n batch_y_predict = model(batch_x)\n \n batch_y_predict = torch.argmax(batch_y_predict, dim=1)\n #print(batch_y_predict)\n y_predict_tensorlist.append(batch_y_predict)\n #one hot code of label asks for this argmax\n batch_y = torch.argmax(batch_y, dim=1)\n y_true_tensorlist.append(batch_y)\n \n # combine the tensors in the list into one\n y_true = torch.cat(y_true_tensorlist,0)\n y_predict = torch.cat(y_predict_tensorlist,0)\n\n # compute accuracy\n length = len(y_true)\n right_length = torch.sum(y_true == y_predict)\n #print(right_length/length)\n \n return right_length/length",
"_____no_output_____"
]
],
[
[
"## ***Main part***",
"_____no_output_____"
]
],
[
[
"# prepare original data\ntrain_data_MNIST = datasets.MNIST(root=\"./data_1/\", train=True,download=True)\ntest_data_MNIST = datasets.MNIST(root=\"./data_1/\",train=False,download=True)\n\ntrain_data_CIFAR10 = datasets.CIFAR10(root=\"./data_1/\", train=True,download=True)\ntest_data_CIFAR10 = datasets.CIFAR10(root=\"./data_1/\",train=False,download=True)",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
],
[
"# prepare poisoned data\n# no gpu, thus device is only cpu\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# if needed, more transform could be put here,\n# for example, normalize could speed up\n# e.g.transforms = T.Compose([T.Normalize()])\ntransforms = None\n\n# MNIST\npoisoned_train_data_MNIST = poisoned_dataset(train_data_MNIST, fakelabel=0, portion=0.1, transform=transforms, device=device,dataname='MNIST')\nclean_test_data_MNIST = poisoned_dataset(test_data_MNIST, fakelabel=0, portion=0, transform=transforms, device=device,dataname='MNIST')\npoisoned_test_data_MNIST = poisoned_dataset(test_data_MNIST, fakelabel=0, portion=1, transform=transforms, device=device,dataname='MNIST')\n\n#CIFAR10\npoisoned_train_data_CIFAR10 = poisoned_dataset(train_data_CIFAR10, fakelabel=0, portion=0.1, device=device,dataname='CIFAR10')\nclean_test_data_CIFAR10 = poisoned_dataset(test_data_CIFAR10, 0, portion=0, device=device,dataname='CIFAR10')\npoisoned_test_data_CIFAR10 = poisoned_dataset(test_data_CIFAR10, 0, portion=1, device=device,dataname='CIFAR10')",
"Generating Backdoor Images:\n"
],
[
"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n# check if really poisoned\n# step 1. image in train data\ntrain_poisoned_sample_index_MNIST = poisoned_train_data_MNIST.poison_index\ntrain_poisoned_sample_index_CIFAR10 = poisoned_train_data_CIFAR10.poison_index\n\nindex_MNIST = train_poisoned_sample_index_MNIST[7]\nindex_CIFAR10 = train_poisoned_sample_index_CIFAR10[7]\n\n%matplotlib inline\n#%matplotlib notebook\nprint('check MNIST:')\nprint('clean:')\nplt.figure(1)\nplt.imshow(np.array(train_data_MNIST[index_MNIST][0]),cmap = 'gray')\nprint('poisoned')\nplt.figure(2)\nplt.imshow(poisoned_train_data_MNIST.dataset[index_MNIST][0],cmap = 'gray')\nprint('check MNIST:')\nprint('clean:')\nplt.figure(3)\nplt.imshow(np.array(train_data_CIFAR10[index_CIFAR10][0]))\nprint('poisoned')\nplt.figure(4)\nplt.imshow(poisoned_train_data_CIFAR10.dataset[index_CIFAR10][0])",
"check MNIST:\nclean:\n"
],
[
"# step 2. label in test data\n# the label should all be 0\ntest_poisoned_sample_index_MNIST = poisoned_test_data_MNIST.poison_index\ntest_poisoned_sample_index_CIFAR10 = poisoned_test_data_CIFAR10.poison_index\n\nfor i in test_poisoned_sample_index_MNIST:\n if poisoned_test_data_MNIST.dataset[i][1] != 0:\n print('Error! In: ',i)\nprint('Finish!')\n \nfor i in test_poisoned_sample_index_CIFAR10: \n if poisoned_test_data_CIFAR10.dataset[i][1] != 0:\n print('Error! In: ',i)\nprint('Finish!')",
"Finish!\nFinish!\n"
],
[
"# training on MNIST\n# no gpu, thus device is only cpu\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# load data\ntrain_poisoned_data_loader = DataLoader(dataset=poisoned_train_data_MNIST,\n batch_size=64,\n shuffle=True)\ntest_data_clean_loader = DataLoader(dataset=clean_test_data_MNIST,\n batch_size=64,\n shuffle=True)\ntest_data_poisoned_loader = DataLoader(dataset=poisoned_test_data_MNIST,\n batch_size=64,\n shuffle=True)\n\n# load model\n# if use MNIST, inputchannels=1, if use CIFAR10, inputchannels=3, both output classes = 10\n# related info could be get by using input_channels=train_data_loader.dataset.channels, output_num=train_data_loader.dataset.class_num\nbadnet = BadNet(inputchannels=1, outputclasses=10).to(device)\n# settings\ncriterion = nn.MSELoss()\nsgd = optim.SGD(badnet.parameters(), lr=0.001, momentum=0.9)\nepoch = 50\n\n# train\nprint(\"start training: \")\nfor i in range(epoch):\n loss_train = train(badnet, train_poisoned_data_loader, criterion, sgd)\n acc_train = evaluation(badnet, train_poisoned_data_loader, batch_size=64)\n acc_test_clean = evaluation(badnet, test_data_clean_loader, batch_size=64)\n acc_test_poisoned = evaluation(badnet, test_data_poisoned_loader, batch_size=64)\n print(\"epoch%d loss: %.5f training poisoned accuracy: %.5f testing clean accuracy: %.5f testing poisoned accuracy: %.5f\"\\\n % (i + 1, loss_train, acc_train, acc_test_clean, acc_test_poisoned))\n torch.save(badnet.state_dict(), \"./models/badnet_MNIST.pth\")",
"start training: \n"
],
[
"# training on CIFAR10\n# no gpu, thus device is only cpu\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# load data\ntrain_poisoned_data_loader = DataLoader(dataset=poisoned_train_data_CIFAR10,\n batch_size=64,\n shuffle=True)\ntest_data_clean_loader = DataLoader(dataset=clean_test_data_CIFAR10,\n batch_size=64,\n shuffle=True)\ntest_data_poisoned_loader = DataLoader(dataset=poisoned_test_data_CIFAR10,\n batch_size=64,\n shuffle=True)\n\n# load model\n# if use MNIST, inputchannels=1, if use CIFAR10, inputchannels=3, both output classes = 10\nbadnet = BadNet(inputchannels=3, outputclasses=10).to(device)\n# settings\ncriterion = nn.MSELoss()\nsgd = optim.SGD(badnet.parameters(), lr=0.001, momentum=0.9)\nepoch = 50\n\n# train\nprint(\"start training: \")\nfor i in range(epoch):\n loss_train = train(badnet, train_poisoned_data_loader, criterion, sgd)\n acc_train = evaluation(badnet, train_poisoned_data_loader)\n acc_test_clean = evaluation(badnet, test_data_clean_loader, batch_size=64)\n acc_test_poisoned = evaluation(badnet, test_data_poisoned_loader, batch_size=64)\n \n print(\"epoch%d loss: %.5f training poisoned accuracy: %.5f testing clean accuracy: %.5f testing poisoned accuracy: %.5f\"\\\n % (i + 1, loss_train, acc_train, acc_test_clean, acc_test_poisoned))\n torch.save(badnet.state_dict(), \"./models/badnet_CIFAR10.pth\")",
"start training: \n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f43de925ec80eda7ba54e0cddfa558c65aa64a | 2,535 | ipynb | Jupyter Notebook | working introduction/05 - e) APL Errors.ipynb | RojerGS/dyalog-jupyter-notebooks | 21b8cd5a52ce5d561214398e0c60118dc9d0c635 | [
"MIT"
] | 26 | 2018-07-24T07:42:17.000Z | 2022-03-25T01:24:45.000Z | working introduction/05 - e) APL Errors.ipynb | RojerGS/dyalog-jupyter-notebooks | 21b8cd5a52ce5d561214398e0c60118dc9d0c635 | [
"MIT"
] | 6 | 2018-07-23T14:42:49.000Z | 2022-02-28T13:51:49.000Z | working introduction/05 - e) APL Errors.ipynb | RojerGS/dyalog-jupyter-notebooks | 21b8cd5a52ce5d561214398e0c60118dc9d0c635 | [
"MIT"
] | 15 | 2018-09-08T06:26:48.000Z | 2022-03-06T00:02:46.000Z | 23.045455 | 300 | 0.576331 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f43fb3d695de00ee25e7743bfe930a867325ac | 80,923 | ipynb | Jupyter Notebook | Visualization.ipynb | jeongwhanchoi/CarND-Behavioral-Cloning | 2e8251aca7e42fd46c84b38fd1964f4b87306cf7 | [
"MIT"
] | null | null | null | Visualization.ipynb | jeongwhanchoi/CarND-Behavioral-Cloning | 2e8251aca7e42fd46c84b38fd1964f4b87306cf7 | [
"MIT"
] | null | null | null | Visualization.ipynb | jeongwhanchoi/CarND-Behavioral-Cloning | 2e8251aca7e42fd46c84b38fd1964f4b87306cf7 | [
"MIT"
] | null | null | null | 249.762346 | 38,572 | 0.88882 | [
[
[
"from IPython.display import display, Image, SVG\nfrom keras.models import load_model\n\ndef loadModel(modelPath):\n \"\"\"\n Loads the model `modelPath`.\n \"\"\"\n model = load_model(modelPath)\n return model",
"Using TensorFlow backend.\n"
],
[
"def getLayerConfig(layer):\n \"\"\"\n Extract configuration from `layer`.\n \"\"\"\n layerType = layer.__class__.__name__\n output = { 'type': layerType }\n config = layer.get_config()\n if layerType == 'Lambda':\n _, x, y, d = config['batch_input_shape']\n output['input'] = (x, y, d)\n if layerType == 'Cropping2D':\n output['cropping'] = config['cropping']\n if layerType == 'Convolution2D':\n output['activation'] = config['activation']\n output['strides'] = config['subsample']\n output['filters'] = config['nb_filter']\n output['kernel'] = ( config['nb_col'], config['nb_row'] )\n if layerType == 'Dense':\n output['activation'] = config['activation']\n output['output'] = config['output_dim']\n output['input'] = config['input_dim']\n return output\n\nfrom functools import reduce\ndef compressLayers(layers):\n \"\"\"\n Compress the common layers into a single structure for visualization.\n \"\"\"\n def reductor(acc, layer):\n if len(acc) == 0:\n acc.append(layer)\n return acc\n \n last = acc[-1]\n if last['type'] == layer['type']:\n try:\n last['items'].append(layer)\n except KeyError:\n acc[-1] = { 'type': layer['type'], 'items': [last, layer]}\n else:\n acc.append(layer)\n return acc\n \n return reduce(reductor, layers, [])",
"_____no_output_____"
],
[
"import graphviz as gv\nimport functools\nimport json",
"_____no_output_____"
],
[
"def createNode(nodeName, layer, g, style='filled', fillcolor='white', fontcolor='black'):\n \"\"\"\n Creates a node with the information from `layer` in the `g`.\n \"\"\"\n type = layer['type']\n label = type + '\\n'\n for key, value in layer.items():\n if (key != 'type'):\n label += '{}: {}\\n'.format(key, value)\n g.node(nodeName, label=label, style=style, fillcolor=fillcolor, fontcolor=fontcolor)\n\ndef visualizeLayers(layers, outputPath):\n \"\"\"\n Visualize `layers` and store the image at `outputPath`.\n \"\"\"\n fillcolors = { 'Convolution2D':'#AAAAAA', 'Dense':'#006699' }\n g = gv.Digraph(format='png')\n for index, layer in enumerate(layers):\n nodeName = str(index)\n try:\n items = layer['items']\n subGraphType = layer['type']\n fillcolor = fillcolors[subGraphType]\n g.node(nodeName, label=subGraphType, style='filled', fillcolor=fillcolor, fontcolor='white')\n subG = gv.Digraph(format='png')\n for i, subLayer in enumerate(items):\n subNodeName = nodeName + str(i)\n createNode(subNodeName, subLayer, subG, fillcolor=fillcolor, fontcolor='white')\n if i != 0:\n subG.edge(nodeName + str(i - 1), subNodeName)\n \n g.subgraph(subG)\n \n except KeyError:\n createNode(nodeName, layer, g)\n if index != 0:\n g.edge(str(index - 1), nodeName)\n \n styles = {\n 'graph': {\n },\n 'nodes': {\n 'fontname': 'Helvetica',\n 'shape': 'rectangle'\n },\n 'edges': {\n 'arrowhead': 'open'\n }\n }\n g.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n g.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n g.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n \n g.render(outputPath)\n figure = Image(outputPath + '.png')\n display(figure)",
"_____no_output_____"
],
[
"def visualizeModel(modelPath, imagePath):\n \"\"\"\n Visualize the model found at `modelPath` to a SVG at `imagePath`\n \"\"\"\n model = loadModel(modelPath)\n layerData = list(map(getLayerConfig, model.layers))\n compressedLayers = compressLayers(layerData)\n visualizeLayers(compressedLayers, imagePath)",
"_____no_output_____"
],
[
"visualizeModel('models/model.h5', 'img/model')",
"_____no_output_____"
],
[
"loadModel('models/model.h5').summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\nlambda_1 (Lambda) (None, 160, 320, 3) 0 lambda_input_2[0][0] \n____________________________________________________________________________________________________\ncropping2d_1 (Cropping2D) (None, 85, 320, 3) 0 lambda_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_1 (Convolution2D) (None, 41, 158, 24) 1824 cropping2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_2 (Convolution2D) (None, 19, 77, 36) 21636 convolution2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_3 (Convolution2D) (None, 8, 37, 48) 43248 convolution2d_2[0][0] \n____________________________________________________________________________________________________\nconvolution2d_4 (Convolution2D) (None, 6, 35, 64) 27712 convolution2d_3[0][0] \n____________________________________________________________________________________________________\nconvolution2d_5 (Convolution2D) (None, 4, 33, 64) 36928 convolution2d_4[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 8448) 0 convolution2d_5[0][0] \n____________________________________________________________________________________________________\ndense_1 (Dense) (None, 1164) 9834636 flatten_1[0][0] \n____________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 1164) 0 dense_1[0][0] \n____________________________________________________________________________________________________\ndense_2 (Dense) (None, 100) 116500 dropout_1[0][0] \n____________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 100) 0 dense_2[0][0] \n____________________________________________________________________________________________________\ndense_3 (Dense) (None, 50) 5050 dropout_2[0][0] \n____________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 50) 0 dense_3[0][0] \n____________________________________________________________________________________________________\ndense_4 (Dense) (None, 10) 510 dropout_3[0][0] \n____________________________________________________________________________________________________\ndense_5 (Dense) (None, 1) 11 dense_4[0][0] \n====================================================================================================\nTotal params: 10,088,055\nTrainable params: 10,088,055\nNon-trainable params: 0\n____________________________________________________________________________________________________\n"
],
[
"from keras.models import Model\nimport matplotlib.pyplot as plt\n\n# dict_keys(['loss', 'val_loss'])\n# Loss\nloss = [0.0488, 0.0309, 0.0281, 0.0260, 0.0242, 0.0226, 0.0218, 0.0206, 0.0201, 0.0191]\n# Validation Loss\nvalid_loss = [0.0332, 0.0256, 0.0253, 0.0232, 0.0249, 0.0226, 0.0228, 0.0214, 0.0252, 0.0186]\n\n### plot the training and validation loss for each epoch\nplt.plot(loss)\nplt.plot(valid_loss)\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.grid(color='black', linestyle='--', linewidth=1)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f440dc3ba2983cc6c1b0d549087eb8b5370a22 | 14,118 | ipynb | Jupyter Notebook | isaid/iSAID - Build Graph - ORCID.ipynb | skybristol/pylinkedcmd | bc3a416b6d5abead7b62706f1c47762daf22bb78 | [
"Unlicense"
] | 5 | 2020-08-05T20:48:21.000Z | 2021-07-21T23:10:59.000Z | isaid/iSAID - Build Graph - ORCID.ipynb | skybristol/pylinkedcmd | bc3a416b6d5abead7b62706f1c47762daf22bb78 | [
"Unlicense"
] | 1 | 2020-08-20T13:40:17.000Z | 2021-08-11T18:04:40.000Z | isaid/iSAID - Build Graph - ORCID.ipynb | skybristol/pylinkedcmd | bc3a416b6d5abead7b62706f1c47762daf22bb78 | [
"Unlicense"
] | 2 | 2020-09-03T03:48:42.000Z | 2021-03-03T21:13:24.000Z | 39.216667 | 915 | 0.463805 | [
[
[
"# ORCID People\n\nMost USGS staff who are publishing authors, data creators, or otherwise contributors to some published works now have ORCID identifiers as a matter of policy. Much more than just a convenient globally unique and persistent identifier, the ORCID system and its evolving schema provides a way for us to get at a wealth of additional useful details and linkages on people. In our metadata harvesting process, we regularly identify ORCIDs of interest from across various systems, queue those up for processing, and then retrieve ORCID details into a cache. Content negotiation against orcid.org is pretty reliable, but we still encounter a number of error conditions that are useful to pre-process through and have the need for occasional re-processing of information into our graph or other forms of this information. This makes caching the ORCID data for those identities we care about a reasonable practice.\n\nWe split the process up here just a bit; first pulling in anything new or updated in terms of basic identifying information. In many cases, we are already going to have encountered a person and included their ORCID identifier in properties.\n\n# Note\nI need to come back to this one and break out entity creation from relationship creation.",
"_____no_output_____"
]
],
[
[
"import isaid_helpers\nimport pandas as pd",
"_____no_output_____"
],
[
"pd.read_csv(isaid_helpers.f_graphable_orcid).head()",
"_____no_output_____"
],
[
"%%time\nwith isaid_helpers.graph_driver.session(database=isaid_helpers.graphdb) as session:\n session.run(\"\"\"\n LOAD CSV WITH HEADERS FROM '%(source_path)s/%(source_file)s' AS row\n WITH row\n MATCH (p:Person {orcid: row.orcid})\n \n WITH p, row\n WHERE row.entity_type = \"Organization\"\n MERGE (o:Organization {name: row.name})\n ON CREATE\n SET o.alternate_name = row.alternate_name,\n o.grid_id = row.grid_id,\n o.url = row.url,\n o.doi = row.doi,\n o.ringgold_id = row.ringgold_id\n MERGE (p)-[rel:AFFILIATED_WITH]->(o)\n SET rel.date_qualifier = row.date_qualifier,\n rel.reference = row.reference\n \"\"\" % {\n \"source_path\": isaid_helpers.local_cache_path,\n \"source_file\": isaid_helpers.f_graphable_orcid\n })",
"CPU times: user 2.46 ms, sys: 1.76 ms, total: 4.22 ms\nWall time: 1min 10s\n"
],
[
"%%time\nwith isaid_helpers.graph_driver.session(database=isaid_helpers.graphdb) as session:\n session.run(\"\"\"\n LOAD CSV WITH HEADERS FROM '%(source_path)s/%(source_file)s' AS row\n WITH row\n MATCH (p:Person {orcid: row.orcid})\n \n WITH p, row\n WHERE row.entity_type = \"CreativeWork\" AND NOT row.doi IS NULL\n MERGE (w:CreativeWork {doi: row.doi})\n ON CREATE\n SET w.url = row.url,\n w.name = row.name,\n w.source = \"ORCID\"\n ON MATCH\n SET w.url = row.url,\n w.name = row.name\n\n WITH p, w, row\n WHERE row.rel_type = \"AUTHOR_OF\"\n MERGE (p)-[rel:AUTHOR_OF]->(w)\n SET rel.date_qualifier = row.date_qualifier,\n rel.reference = row.reference\n\n WITH p, w, row\n WHERE row.rel_type = \"FUNDER_OF\"\n MERGE (p)-[rel:FUNDER_OF]->(w)\n SET rel.date_qualifier = row.date_qualifier,\n rel.reference = row.reference\n \"\"\" % {\n \"source_path\": isaid_helpers.local_cache_path,\n \"source_file\": isaid_helpers.f_graphable_orcid\n })",
"CPU times: user 5.03 ms, sys: 3.48 ms, total: 8.51 ms\nWall time: 3min 54s\n"
],
[
"%%time\nwith isaid_helpers.graph_driver.session(database=isaid_helpers.graphdb) as session:\n session.run(\"\"\"\n LOAD CSV WITH HEADERS FROM '%(source_path)s/%(source_file)s' AS row\n WITH row\n MATCH (p:Person {orcid: row.orcid})\n \n WITH p, row\n WHERE row.entity_type = \"CreativeWork\" AND row.doi IS NULL\n MERGE (w:CreativeWork {name: row.name})\n ON CREATE\n SET w.url = row.url,\n w.source = \"ORCID\"\n\n WITH p, w, row\n WHERE row.rel_type = \"AUTHOR_OF\"\n MERGE (p)-[rel:AUTHOR_OF]->(w)\n SET rel.date_qualifier = row.date_qualifier,\n rel.reference = row.reference\n\n WITH p, w, row\n WHERE row.rel_type = \"FUNDER_OF\"\n MERGE (p)-[rel:FUNDER_OF]->(w)\n SET rel.date_qualifier = row.date_qualifier,\n rel.reference = row.reference\n \"\"\" % {\n \"source_path\": isaid_helpers.local_cache_path,\n \"source_file\": isaid_helpers.f_graphable_orcid\n })",
"CPU times: user 7.56 ms, sys: 4.46 ms, total: 12 ms\nWall time: 5min 44s\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4450b3a311be90d5476cb2c0895ccee454d29 | 31,334 | ipynb | Jupyter Notebook | DCRI/Staff/hsm/Baseline.ipynb | Kao-PMP/Pilot_Project | 79451e75b64d832644a4967f828bf5d685cd5a2d | [
"Apache-2.0"
] | null | null | null | DCRI/Staff/hsm/Baseline.ipynb | Kao-PMP/Pilot_Project | 79451e75b64d832644a4967f828bf5d685cd5a2d | [
"Apache-2.0"
] | null | null | null | DCRI/Staff/hsm/Baseline.ipynb | Kao-PMP/Pilot_Project | 79451e75b64d832644a4967f828bf5d685cd5a2d | [
"Apache-2.0"
] | null | null | null | 88.764873 | 7,991 | 0.529138 | [
[
[
"setwd('/mnt/workspace')\nlibrary(dplyr)\nlibrary(boot)\ninstall.packages('table1')\nlibrary(table1)\n#library(gmodels)\n#getwd()\nall=read.csv('combined-5.csv')",
"Installing package into '/usr/lib64/R/library'\n(as 'lib' is unspecified)\nUpdating HTML index of packages in '.Library'\nMaking 'packages.html' ... done\n\nAttaching package: 'table1'\n\nThe following objects are masked from 'package:Hmisc':\n\n label, label<-, units\n\nThe following objects are masked from 'package:base':\n\n units, units<-\n\n"
],
[
"table1(~BP.s + BP.d + factor(HxDM) | study, data=all)",
"_____no_output_____"
],
[
"#Mean = function(x) base::mean(x, na.rm=TRUE)\n#SD = function(x) stats::sd(x, na.rm=TRUE)\n\ncont_mean = function(x){\n Meanx=round(mean(x, na.rm=TRUE), digits=1)\n SDx=round(sd(x, na.rm=TRUE), digits=1)\n val = paste(Meanx, \" (\", SDx, \")\" , sep=\"\")\n return(val)\n}\ncont_med = function(x){\n med=quantile(x, c(0.25, 0.5, 0.75), na.rm=TRUE)\n val=paste(med[2], \"(\", med[1],\", \",med[3],\")\" , sep=\"\")\n return(val)\n}\ncat_func = function(x){\n tab=CrossTable(all$study, x, na.rm=TRUE)\n n = tab$t\n prop=round((tab$prop.row)*100, digits=2)\n val = paste(n[,2], \" (\", prop[,2],\"%)\", sep=\"\")\n val1= t(data.frame(study=rownames(n), label=val))\n return(val1)\n}\ncat_func(all$HxDM)\n\n#tab=CrossTable(all$study, all$HxDM, na.rm=TRUE)\ntabular(Heading(\"Systolic BP\")*(BP.s)~ (study + 1)*Heading()*(cont_med), data=all)\n\n",
"\n \n Cell Contents\n|-------------------------|\n| N |\n| Chi-square contribution |\n| N / Row Total |\n| N / Col Total |\n| N / Table Total |\n|-------------------------|\n\n \nTotal Observations in Table: 17107 \n\n \n | x \n all$study | 0 | 1 | Row Total | \n-------------|-----------|-----------|-----------|\n ACCORD | 0 | 10251 | 10251 | \n | 2744.466 | 1003.405 | | \n | 0.000 | 1.000 | 0.599 | \n | 0.000 | 0.818 | | \n | 0.000 | 0.599 | | \n-------------|-----------|-----------|-----------|\n AIMHIGH | 2256 | 1158 | 3414 | \n | 1970.324 | 720.371 | | \n | 0.661 | 0.339 | 0.200 | \n | 0.493 | 0.092 | | \n | 0.132 | 0.068 | | \n-------------|-----------|-----------|-----------|\n ALLHAT | 0 | 0 | 0 | \n | NaN | NaN | | \n | NaN | NaN | 0.000 | \n | 0.000 | 0.000 | | \n | 0.000 | 0.000 | | \n-------------|-----------|-----------|-----------|\n BARI2D | 0 | 0 | 0 | \n | NaN | NaN | | \n | NaN | NaN | 0.000 | \n | 0.000 | 0.000 | | \n | 0.000 | 0.000 | | \n-------------|-----------|-----------|-----------|\n TOPCAT | 2324 | 1118 | 3442 | \n | 2134.489 | 780.391 | | \n | 0.675 | 0.325 | 0.201 | \n | 0.507 | 0.089 | | \n | 0.136 | 0.065 | | \n-------------|-----------|-----------|-----------|\nColumn Total | 4580 | 12527 | 17107 | \n | 0.268 | 0.732 | | \n-------------|-----------|-----------|-----------|\n\n \n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7f44addbefb845a627efe626283932d85b29102 | 255,698 | ipynb | Jupyter Notebook | frozenYoghourt/Qbraid - Implementing Improved Multiple Controlled Toffoli.ipynb | mehilagarwal/qchack | 35d08b64f8441e642aa588f750b88df5f1bb4fc5 | [
"Apache-2.0"
] | 7 | 2021-04-10T14:26:36.000Z | 2022-02-02T17:11:17.000Z | frozenYoghourt/Qbraid - Implementing Improved Multiple Controlled Toffoli.ipynb | Parv-01/qchack | 951b038e035db3aa4074e724ff52c0911e68139a | [
"Apache-2.0"
] | 4 | 2021-04-11T03:29:12.000Z | 2021-04-11T14:13:06.000Z | frozenYoghourt/Qbraid - Implementing Improved Multiple Controlled Toffoli.ipynb | Parv-01/qchack | 951b038e035db3aa4074e724ff52c0911e68139a | [
"Apache-2.0"
] | 41 | 2021-04-10T14:43:08.000Z | 2021-11-01T05:40:05.000Z | 163.280971 | 100,249 | 0.864328 | [
[
[
"### Tables of Content\n\n#### Linear Algebra Tools",
"_____no_output_____"
],
[
"1. Operator Matrices\n - Pauli: I, X, Y, Z\n - Hadamard: H\n - Phase: P\n - Sqrt(X): SX\n - Sqrt(Z): S\n - Sqrt(H): SH\n - 4rt (Z): T\n - X root: Xrt(s)\n - H root: Hrt(s)\n - Rotation Matrices: Rx($\\theta$), Ry($\\theta$), Rz($\\theta$)\n - U3 Matrix: U3($\\theta, \\phi, \\lambda$)\n - Controlled-Not: CX\n \n</br>\n\n2. Common Statevectors\n - $|0\\rangle$: zero\n - $|1\\rangle$: one\n - $|+\\rangle$: plus\n - $|-\\rangle$: minus\n - $| \\uparrow \\rangle$: up\n - $| \\downarrow \\rangle$: down\n - Bell States: B00, B01, B10, B11\n \n</br>\n\n3. Lambda Methods\n - ndarray to list: to_list(array)\n - tensor: *****initial_state\n - matmul: *****initial_state\n \n</br>\n \n4. Full Methods\n - Calculate Hermitian Conjugate: dagger(mat)\n - Build CU matrix: cu_matrix(no_qubits, control, target, U, little_edian)\n - Find RX, RY for arbitrary U3: angles_from_state_vectors(output_statevector)\n \n</br>\n\n5. Visualizations\n - view(mat, rounding = 10)\n",
"_____no_output_____"
],
[
"#### Qiskit Tools",
"_____no_output_____"
],
[
"1. Linear Algebra\n - Short-hand QC: q(*****regs, name=None, global_phase=0)\n - Multi-controlled Unitary: control_unitary(circ, unitary, *****controls, target)\n - Control Phase: control_phase(circ, angle, control_bit, target_bit, recip=True, pi_on=True)\n\n</br>\n\n2. Visualizations\n - Draw Circuit: milk(circ)\n - Draw Transpiled Circuit: dtp(circ, print_details = True, visual = True, return_values = False)\n - Get Unitary / Statevector Function: get(circ, types = 'unitary', nice = True)\n - Displaying Histogram / Bloch / Counts: sim(circ, visual = 'hist')\n \n</br>\n\n3. Toffoli Optimizaton Specific\n - Unitary Checker: unitary_check(test_unitary)\n - Multi-Hadamard Composition: h_relief(n, no_h)",
"_____no_output_____"
],
[
"### Import",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sympy as sp\nfrom sympy.solvers.solveset import linsolve\n\nimport matplotlib\nimport matplotlib.pyplot as plt \nmatplotlib.use('Agg')\n\nfrom sympy import Matrix, init_printing\n\nimport qiskit\nfrom qiskit import *\nfrom qiskit.aqua.circuits import *\n\n# Representing Data\nfrom qiskit.providers.aer import QasmSimulator, StatevectorSimulator, UnitarySimulator\nfrom qiskit.tools.visualization import plot_histogram, plot_state_city, plot_bloch_multivector\n\n# Monitor Job on Real Machine\nfrom qiskit.tools.monitor import job_monitor\n\nfrom functools import reduce # perform sucessive tensor product\n\n# Calculating cost\nfrom sklearn.metrics import mean_squared_error\n\n# Generating random unitary matrix\nfrom scipy.stats import unitary_group\n\n# Measure run time\nimport time\n\n# Almost Equal\nfrom numpy.testing import assert_almost_equal as aae",
"Duplicate key in file '/Users/minhpham/.matplotlib/matplotlibrc' line #2.\nDuplicate key in file '/Users/minhpham/.matplotlib/matplotlibrc' line #3.\n"
]
],
[
[
"### Linear Algebra Tools",
"_____no_output_____"
]
],
[
[
"# Matrices\nI = np.array([[1, 0], [0, 1]])\nX = np.array([[0, 1], [1, 0]])\nY = np.array([[0, -1j], [1j, 0]])\nZ = np.array([[1, 0], [0, -1]])\nH = 1/np.sqrt(2)*np.array([[1, 1], [1, -1]])\nP = lambda theta: np.array([[1, 0], [0, np.exp(1j*theta)]])\n\n# sqrt(X)\nSX = 1/2 * np.array([[1+1j, 1-1j], [1-1j, 1+1j]])\n\n# sqrt(Z)\nS = np.array([[1, 0], [0, 1j]])\n\n# sqrt(H)\nSH = (1j/4-1/4)*np.array([[np.sqrt(2) + 2j, np.sqrt(2)], [np.sqrt(2), -np.sqrt(2)+2j]])\n\n# 4th root of Z\nT = np.array([[1, 0], [0, 1/np.sqrt(2) + 1/np.sqrt(2)*1j]])\n\n# X power\nXp = lambda t: 1/2 * np.array([[1, 1], [1, 1]]) + np.exp(1j*np.pi*t)/(2) * np.array([[1, -1], [-1, 1]])\n\n# H power\nHp = lambda t: np.exp(-1j*np.pi*t/2) * np.array([[np.cos(np.pi*t/2) + 1j/np.sqrt(2)* np.sin(np.pi*t/2), 1j/np.sqrt(2) * np.sin(np.pi*t/2)], \n [1j/np.sqrt(2) * np.sin(np.pi*t/2), np.cos(np.pi*t/2)-1j/np.sqrt(2)* np.sin(np.pi*t/2)]])\n\nCX = np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n\n# Rn Matrix Function\nRx = lambda theta: np.array([[np.cos(theta/2), -1j*np.sin(theta/2)], [-1j*np.sin(theta/2), np.cos(theta/2)]])\nRy = lambda theta: np.array([[np.cos(theta/2), -np.sin(theta/2)], [np.sin(theta/2), np.cos(theta/2)]])\nRz = lambda theta: np.array([[np.exp(-1j*theta/2), 0], [0, np.exp(1j*theta/2)]])\n\n# U3 Matrix\nU3 = lambda theta, phi, lam: np.array([[np.cos(theta/2), -np.exp(1j*lam)*np.sin(theta/2)], \n [np.exp(1j*phi)*np.sin(theta/2), np.exp(1j*lam + 1j*phi)*np.cos(theta/2)]])\n\n# Eigenvectors of Pauli Matrices\nzero = np.array([[1], [0]]) # Z plus basis state\none = np.array([[0], [1]]) # Z plus basis state\n\nplus = np.array([[1], [1]])/np.sqrt(2) # X plus basis state\nminus = np.array([[1], [-1]])/np.sqrt(2) # X minus basis state\n\nup = np.array([[1], [1j]])/np.sqrt(2) # Y plus basis state\ndown = np.array([[1], [-1j]])/np.sqrt(2) # Y plus basis state\n\n# Bell States\nB00 = np.array([[1], [0], [0], [1]])/np.sqrt(2) # Bell of 00\nB01 = np.array([[1], [0], [0], [-1]])/np.sqrt(2) # Bell of 01\nB10 = np.array([[0], [1], [1], [0]])/np.sqrt(2) # Bell of 10\nB11 = np.array([[0], [-1], [1], [0]])/np.sqrt(2) # Bell of 11\n\n# ndarray to list\nto_list = lambda array: list(np.squeeze(array))\n\n# Tensor Product of 2+ matrices/ vectors\ntensor = lambda *initial_state: reduce(lambda x, y: np.kron(x, y), initial_state)\n\n# Matrix Multiplicaton of 2+ matrices / vectors\nmat_mul = lambda *initial_state: reduce(lambda x, y: np.dot(x, y), initial_state)",
"_____no_output_____"
]
],
[
[
"###### Calculate Hermitian Conjugate",
"_____no_output_____"
]
],
[
[
"def dagger(mat):\n \n # Calculate Hermitian conjugate\n mat_dagger = np.conj(mat.T)\n \n # Assert Hermitian identity\n aae(np.dot(mat_dagger, mat), np.identity(mat.shape[0]))\n \n return mat_dagger",
"_____no_output_____"
]
],
[
[
"###### CU Matrix",
"_____no_output_____"
]
],
[
[
"def cu_matrix(no_qubits, control, target, U, little_edian = True):\n \n \"\"\"\n Manually build the unitary matrix for non-adjacent CX gates\n \n Parameters:\n -----------\n no_qubits: int\n Number of qubits in the circuit\n control: int\n Index of the control qubit (1st qubit is index 0)\n target: int\n Index of the target qubit (1st qubit is index 0)\n U: ndarray\n Target unitary matrix\n edian: bool (True: qiskit convention)\n Qubits order convention\n \n Returns:\n --------\n cx_out:\n Unitary matrix for CU gate\n \"\"\"\n \n left = [I]*no_qubits\n right = [I]*no_qubits\n \n left[control] = np.dot(zero, zero.T)\n right[control] = np.dot(one, one.T)\n\n right[target] = U\n\n if little_edian:\n cx_out = tensor(*reversed(left)) + tensor(*reversed(right))\n else:\n cx_out = tensor(*left) + tensor(*right)\n \n # This returns a unitary in qiskit 'little eddian', to switch back, simply switch the target for control\n \n return cx_out",
"_____no_output_____"
]
],
[
[
"###### Angles from Statevector",
"_____no_output_____"
]
],
[
[
"def angles_from_statevectors(output_statevector):\n \n \"\"\"\n Calculate correct x, y rotation angles from an arbitrary output statevector\n \n Paramters:\n ----------\n output_statevector: ndarray\n Desired output state\n \n Returns:\n --------\n phi: float\n Angle to rotate about the y-axis [0, 2pi)\n theta: float\n Angle to rotate about the x-axis [0, 2pi)\n \n \"\"\"\n \n # Extract the components\n x, z = output_statevector.real\n y, w = output_statevector.imag\n \n # Calculate the correct angles\n phi = 2*np.arctan2(z,x)[0]\n theta = 2*np.arctan2(y,z)[0]\n \n print(f'phi: {phi}')\n print(f'theta: {theta}')\n \n return phi, theta",
"_____no_output_____"
]
],
[
[
"###### View Matrix",
"_____no_output_____"
]
],
[
[
"def view(mat, rounding = 10):\n display(Matrix(np.round(mat, rounding)))",
"_____no_output_____"
]
],
[
[
"### Qiskit Tools",
"_____no_output_____"
],
[
"###### Short-hand Qiskit Circuit",
"_____no_output_____"
]
],
[
[
"q = lambda *regs, name=None, global_phase=0: QuantumCircuit(*regs, name=None, global_phase=0)",
"_____no_output_____"
]
],
[
[
"###### Controlled Unitary",
"_____no_output_____"
]
],
[
[
"def control_unitary(circ, unitary, controls, target):\n \n \"\"\"\n Composed a multi-controlled single unitary target gate\n \n Parameters:\n -----------\n circ: QuantumCircuit\n Qiskit circuit of appropriate size, no less qubit than the size of the controlled gate\n unitary: ndarray of (2, 2)\n Unitary operator for the target qubit\n controls: list\n Indices of controlled qubit on the original circuit\n target: int\n Index of target bit\n \n \n Returns:\n --------\n new_circ: QuantumCircuit\n Composed circuit with unitary target\n \"\"\"\n\n # Get info about circuit parameters\n \n no_controls = len(controls)\n unitary_size = np.log2(len(unitary))\n\n\n # Build unitary circuit\n\n qc = QuantumCircuit(unitary_size)\n qc.unitary(unitary, range(int(unitary_size)))\n qc = qc.control(no_controls)\n\n # Composed the control part in the circuit\n\n new_circ = circ.compose(qc, (*controls, target))\n \n return new_circ",
"_____no_output_____"
]
],
[
[
"##### Controlled Phase",
"_____no_output_____"
]
],
[
[
"def control_phase(circ, angle, control_bit, target_bit, recip = True, pi_on = True):\n \n \"\"\"\n Add a controlled-phase gate\n \n Parameters:\n -----------\n circ: QuantumCircuit\n Inputted circuit\n \n angle: float\n Phase Angle\n \n control_bit: int\n Index of control bit\n \n target_bit: int\n Index of target bit\n \n recip: bool (True)\n Take the reciprocal of the angle\n \n pi_on: bool (True)\n Multiply pi to the angle\n \n Returns:\n --------\n circ: QuantumCircuit\n Circuit with built-in CP\n \n \"\"\"\n \n if recip:\n angle = 1/angle\n if pi_on:\n angle *=np.pi\n \n \n circ.cp(angle, control_bit, target_bit)\n \n return circ",
"_____no_output_____"
]
],
[
[
"###### Draw Circuit",
"_____no_output_____"
]
],
[
[
"def milk(circ):\n return circ.draw('mpl')",
"_____no_output_____"
]
],
[
[
"###### Draw Transpiled Circuit",
"_____no_output_____"
]
],
[
[
"def dtp(circ, print_details = True, nice = True, return_values = False):\n \n \"\"\"\n Draw and/or return information about the transpiled circuit\n \n Parameters:\n -----------\n circ: QuantumCircuit\n QuantumCircuit to br transpiled\n print_details: bool (True)\n Print the number of u3 and cx gates used\n nice: bool (True)\n Show the transpiled circuit\n return_values: bool (True)\n Return the number of u3 and cx gates used\n \n Returns:\n --------\n no_cx: int\n Number of cx gates used\n no_u3: int\n Number of u3 gates used\n \n \"\"\"\n \n # Transpile Circuit\n circ = transpile(circ, basis_gates= ['u3', 'cx'], optimization_level=3)\n\n # Count operations\n gates = circ.count_ops()\n\n # Compute cost\n try:\n no_u3 = gates['u3']\n except:\n no_u3 = 0\n \n try:\n no_cx = gates['cx']\n except:\n no_cx = 0\n \n cost = no_u3 + 10*no_cx\n\n if print_details:\n # Print Circuit Details\n print(f'cx: {no_cx}')\n\n print(f'u3: {no_u3}')\n print(f'Total cost: {cost}')\n \n if nice:\n return circ.draw('mpl')\n \n if return_values:\n return no_cx, no_u3",
"_____no_output_____"
]
],
[
[
"###### Get Unitary/StateVector Function",
"_____no_output_____"
]
],
[
[
"def get(circ, types = 'unitary', nice = True):\n \n \"\"\"\n This function return the statevector or the unitary of the inputted circuit\n \n Parameters:\n -----------\n circ: QuantumCircuit\n Inputted circuit without measurement gate\n types: str ('unitary')\n Get 'unitary' or 'statevector' option\n nice: bool\n Display the result nicely option or just return unitary/statevector as ndarray\n \n Returns:\n --------\n out: ndarray\n Outputted unitary of statevector\n \n \"\"\"\n \n if types == 'statevector':\n backend = BasicAer.get_backend('statevector_simulator')\n out = execute(circ, backend).result().get_statevector()\n else: \n backend = BasicAer.get_backend('unitary_simulator')\n out = execute(circ, backend).result().get_unitary()\n \n if nice:\n display(Matrix(np.round(out, 10))) \n else:\n return out",
"_____no_output_____"
]
],
[
[
"###### Displaying Histogram / Bloch / Counts",
"_____no_output_____"
]
],
[
[
"def sim(circ, visual = 'hist'):\n \n \"\"\"\n Displaying output of quantum circuit\n \n Parameters:\n -----------\n circ: QuantumCircuit\n QuantumCircuit with or without measurement gates\n visual: str ('hist')\n 'hist' (counts on histogram) or 'bloch' (statevectors on Bloch sphere) or None (get counts only)\n \n Returns:\n --------\n counts: dict\n Counts of each CBS state\n \"\"\"\n \n # Simulate circuit and display counts on a histogram\n if visual == 'hist':\n simulator = Aer.get_backend('qasm_simulator')\n results = execute(circ, simulator).result()\n counts = results.get_counts(circ)\n plot_histogram(counts)\n \n return counts\n \n # Get the statevector and display on a Bloch sphere\n elif visual == 'bloch':\n backend = BasicAer.get_backend('statevector_simulator')\n statevector = execute(circ, backend).result().get_statevector()\n get(circ)\n plot_bloch_multivector(statevector)\n \n # Just get counts\n else:\n simulator = Aer.get_backend('qasm_simulator')\n results = execute(circ, simulator).result()\n counts = results.get_counts(circ)\n \n return counts",
"_____no_output_____"
]
],
[
[
"###### Unitary Checker",
"_____no_output_____"
]
],
[
[
"def unitary_check(test_unitary, perfect = False):\n \n \"\"\"\n Check if the CnX unitary is correct\n \n Parameters:\n -----------\n test_unitary: ndarray\n Unitary generated by the circuit\n perfect: ndarray\n Account for phase difference\n \n \"\"\"\n \n # Get length of unitary\n\n if not perfect:\n test_unitary = np.abs(test_unitary)\n \n size = test_unitary.shape[0]\n \n cx_theory = np.identity(size)\n\n # Change all the difference\n cx_theory[int(size/2) - 1, size - 1] = 1\n cx_theory[size - 1, int(size/2) - 1] = 1\n cx_theory[int(size/2) -1, int(size/2) -1] = 0\n cx_theory[size - 1, size - 1] = 0\n\n # Assert Similarity\n aae(cx_theory, test_unitary)\n \n print('Unitary is correct')",
"_____no_output_____"
]
],
[
[
"# Task: Implementing Improved Multiple Controlled Toffoli",
"_____no_output_____"
],
[
"### Abstract",
"_____no_output_____"
],
[
"Multiple controlled Toffoli gates are crucial in the implementation of modular exponentiation [4], like that used in Shor's algorithm. In today's practical realm of small number of qubits devices, there is a real need for efficient realization of multiple controlled Toffoli gate for 6 to 10 controls.\n\nShende and Markov proved that the implementation of the $n$-qubit analogue of the $TOFFOLI$ requires at least $2n \\ CNOT$ gates [1]. Currently, the best known upper bound is outlined by Maslov stands at $6n-12$ with the used of $\\lceil \\frac{n-3}{2} \\rceil$ ancilla bits [2]. For implementaion without ancillae, we look at the technique outlined in Corollary 7.6 which has $\\Theta(n^2)$ complexity [3]. The aboved mention technique however, still has a high implementation cost for relatively low number of controls. This is due to the high coefficient of the $n^2$ term. ",
"_____no_output_____"
],
[
"Note that in this notebook, $n$ qubits Toffli gates will simply be referred to as $CnX$ gate where $n$ is the number of control bits.",
"_____no_output_____"
],
[
"For this project, we outline a technique for building $CnX$ gate with modulo phase shift whose unitary satisfies $UU = I$. For a few examples from $n = 2$ to $n = 15$, we provided some values to compare and contrast our circuit cost versus that of qiskit. We then postulated with high confidence the complexity of the technique to be $O(2^{\\frac{n}{2}})$. Comparing this to the quadratic technique in Corollary 7.6 of [3], we found that our circuits are superior for $n = 7, 8, ..., 11$ . At the end, we offers some possible implementation cases for our technique.",
"_____no_output_____"
],
[
"### Motivating the General Circuit",
"_____no_output_____"
],
[
"The general $CnX$ gate takes in $n+1$ qubits as inputs ($n$ controls, $1$ target). It's action on a set of qubits $\\{q_i\\}_{i = 0}^{n}$ is defined as followed.\n\n$$CnX(\\{q_i\\}_{i = 0}^{n}) = \\big{(} \\bigwedge_{i = 0}^{n-1} q_i \\big{)} \\oplus q_n$$\n\nSimply stated, the gate flips the target bit if all the controls are $1$s. For example, for $n = 2$, we have the well-known Toffoli gate",
"_____no_output_____"
]
],
[
[
"circ = q(3)\ncirc.ccx(0, 1, 2)\nmilk(circ)",
"_____no_output_____"
]
],
[
[
"And for higher $n$, $6$ for example, the circuit would take this form.",
"_____no_output_____"
]
],
[
[
"circ = q(7)\ncirc.mct(list(range(6)), 6)\nmilk(circ)",
"_____no_output_____"
]
],
[
[
"The cost for the Qiskit implementation of $CnX$ gate from $n = 2$ to $n = 11$ are listed above in terms of the basic operations ($CX$ and $U3$). Note that the general cost is defined as $10CX + U3$.",
"_____no_output_____"
],
[
"n | CX | U3 | General Cost \n--- | --- | --- | --- \n2 | 6 | 8 | 68 \n3 | 20 | 22 | 222\n4 | 44 | 46 | 486\n5 | 92 | 94 | 1014\n6 | 188 | 190 | 2070\n7 | 380 | 382 | 4182\n8 | 764 | 766 | 8406\n9 | 1532 | 1534 | 16854\n10 | 3068 | 3070 | 33750\n11 | 6140 | 6142 | 67542",
"_____no_output_____"
],
[
"As outlined in Corolllary 7.1 [3]. The number of $CX$ grows by $3\\cdot 2^{n-1} - 4$, and $U3$ grows by $3\\cdot 2^{n-1} - 2$. Overall, we see an $O(2^n)$ complexity of the general cost.",
"_____no_output_____"
],
[
"Our technique takes advantage of the superposition identity that\n\n$$H Z H = X$$\n\nFor an arbitrary $CnX$, we split the control into two groups (one controlled by $H$, and one controlled by $Z$). If we defined the number of control bits on the $H$ gates as $a$, we have the circuit $C(a)H - C(n-a)Z - C(a)H$. An example of $n = 7, a = 3$ is shown below.",
"_____no_output_____"
]
],
[
[
"circ = q(8)\ncirc = control_unitary(circ, H, [0, 1, 2], 7)\ncirc = control_unitary(circ, Z, [3, 4, 5, 6], 7)\ncirc = control_unitary(circ, H, [0, 1, 2], 7)\n\nmilk(circ)",
"_____no_output_____"
]
],
[
[
"The two outer most gates are $C3H$, and the middle gate is $C4Z$. Together they create $C7X$ with a negative phase in 7 columns of the unitary. In general, the number of negative phase in the unitary has the form $2^a - 1$. Although $a$ can be varied, for each $n$, there exists a unique value of $a$ that is optimal for the respective circuit. We run and tested out all the different combination of $n$s and $a$s. And we generate the set of opimal combinations shown below.",
"_____no_output_____"
],
[
"n | H-a | CX | U3 | General Cost \n--- | --- | --- | --- | --- \n2 | 1 | 3 | 4 | 34\n3 | 1 | 6 | 7 | 67\n4 | 1 | 20 | 25 | 225\n5 | 2 | 34 | 53 | 393\n6 | 2 | 50 | 72 | 572\n7 | 3 | 70 | 101 | 801\n8 | 4 | 102 | 143 | 1163\n9 | 4 | 146 | 196 | 1656\n10 | 4 | 222 | 286 | 2506\n11 | 5 | 310 | 395 | 3495",
"_____no_output_____"
],
[
"### Implementing the General Circuit",
"_____no_output_____"
],
[
"The circuit will be implemented recursively using three base cases. When $n = 1$, when have the $CX$ gate. When $n = 2$, we have the below structure.",
"_____no_output_____"
]
],
[
[
"milk(CnX(2))",
"_____no_output_____"
]
],
[
[
"$n = 3$",
"_____no_output_____"
]
],
[
[
"dtp(CnX(3))",
"cx: 6\nu3: 7\nTotal cost: 67\n"
]
],
[
[
"We sketch the following for the general circuit of $CnX$\n\n![image0.jpg](attachment:image0.jpg)",
"_____no_output_____"
],
[
"We also provide the qiskit code implementation of for the general $CnX$ below. At the end is the list of the best implementation for each CnX gate. To use, simply assign ```best[n] ``` to an object and use like a normal QuantumCircuit. Note that $n$ represents the number of controls in the desired $CnX$.",
"_____no_output_____"
],
[
"###### CnX/CnP (Multiple-controlled Not modulo phase shift circuit)",
"_____no_output_____"
]
],
[
[
"def CnX(n, control_list = None, target = None, circ = None, theta = 1):\n \n \"\"\"\n Create a CnX modulo phase shift gate\n \n Parameters:\n -----------\n n: int\n Number of control bits\n control_list: list\n Index of control bits on inputted circuit (if any)\n target: int\n Index of control bits on inputted circuit (if any)\n circ: QuantumCircuit\n Inputted circuit to compose CnX on\n theta: int\n 1/theta power X n-bit controlled circuit\n \n Returns:\n --------\n circ: QuantumCircuit\n CnX modulo phase shift gate\n \n \"\"\"\n \n # Build New Circuit\n if circ == None:\n circ = q(n+1)\n control_list = list(range(n))\n target = n\n \n # Base Case\n if n == 1:\n \n circ.cx(*control_list, target)\n \n return circ\n \n if n==2:\n circ.ch(control_list[0], target)\n circ.cz(control_list[1], target)\n circ.ch(control_list[0], target)\n \n return circ\n \n if n == 3:\n circ.rcccx(*control_list, target)\n \n return circ\n \n # New Case\n \n # CH\n circ.ch(control_list[0], target)\n \n # CP2\n circ = control_phase(circ, theta*2, control_list[-1], target)\n \n # C(n-2)X\n circ = CnX(n-2, control_list[1:-1], control_list[-1], circ)\n \n # -CP2\n circ = control_phase(circ, -theta*2, control_list[-1], target)\n \n # C(n-2)X\n circ = CnX(n-2, control_list[1:-1], control_list[-1], circ)\n \n # CnP\n circ = CnP(n-2, control_list[1:-1], target, circ, theta*2)\n \n # CH\n circ.ch(control_list[0], target)\n \n return circ\n\ndef CnP(n, control_list = None, target = None, circ = None, theta = 1):\n \n \"\"\"\n Create a CnP modulo phase shift gate\n \n Parameters:\n -----------\n n: int\n Number of control bits\n control_list: list\n Index of control bits on inputted circuit (if any)\n target: int\n Index of control bits on inputted circuit (if any)\n circ: QuantumCircuit\n Inputted circuit to compose CnP on\n theta: int\n 1/theta power Z n-bit controlled circuit\n \n Returns:\n --------\n circ: QuantumCircuit\n CnP modulo phase shift gate\n \n \"\"\"\n \n # Build New Circuit\n if circ == None:\n circ = q(n+1)\n control_list = list(range(n))\n target = n\n \n # Base Case\n if n ==1:\n circ = control_phase(circ, theta, control_list, target)\n \n return circ \n \n # New Case\n \n # CP\n circ = control_phase(circ, theta*2, control_list[-1], target)\n \n # C(n-1)X\n circ = CnX(n-1, control_list[:-1], control_list[-1], circ)\n \n # -CP\n circ = control_phase(circ, -theta*2, control_list[-1], target)\n \n # C(n-1)X\n circ = CnX(n-1, control_list[:-1], control_list[-1], circ)\n \n # C(n-1)P\n circ = CnP(n-1, control_list[:-1], target, circ, theta*2)\n \n return circ",
"_____no_output_____"
]
],
[
[
"###### CnH / Multi-Hadamard Composition",
"_____no_output_____"
]
],
[
[
"def CnH(n, control_list = None, target = None, circ = None, theta = 1):\n \n \"\"\"\n Create a CnH modulo phase shift gate\n \n Parameters:\n -----------\n n: int\n Number of control bits\n control_list: list\n Index of control bits on inputted circuit (if any)\n target: int\n Index of control bits on inputted circuit (if any)\n circ: QuantumCircuit\n Inputted circuit to compose CnH on\n theta: int\n 1/theta power H n-bit controlled circuit\n \n Returns:\n --------\n circ: QuantumCircuit\n CnH modulo phase shift gate\n \n \"\"\"\n \n # Build New Circuit\n if circ == None:\n circ = q(n+1)\n control_list = list(range(n))\n target = n\n \n # Base Case\n \n if n ==1 and theta ==1:\n circ.ch(control_list, target)\n \n return circ\n \n if n ==1:\n circ.unitary(cu_matrix(2, 0, 1, Hp(1/theta)), [control_list, target])\n \n return circ \n \n # New Case\n \n # CH\n circ.unitary(cu_matrix(2, 0, 1, Hp(1/(theta*2))), [control_list[-1], target])\n \n # C(n-1)X\n circ = CnX(n-1, control_list[:-1], control_list[-1], circ)\n \n # CH\n circ.unitary(cu_matrix(2, 0, 1, Hp(-1/(theta*2))), [control_list[-1], target])\n \n # C(n-1)X\n circ = CnX(n-1, control_list[:-1], control_list[-1], circ)\n \n # C(n-1)P\n circ = CnH(n-1, control_list[:-1], target, circ, theta*2)\n \n return circ\n\ndef h_relief(n, no_h, return_circ = False):\n \n \"\"\"\n Implementing the general CaH-C(n-a)Z-CaH architecture\n \n Paramters:\n ----------\n n: int\n Total number of control bits\n no_h: int\n Total number of control bits for the CnH gate\n return_circ: bool\n Return circuit as a QuantumCircuit object\n \n Returns:\n --------\n circ: QuantumCircuit\n Circuit with CnX and Hadamard Relief\n \n \"\"\"\n \n # n is the number of control qubit\n # no_h is the number of control qubit on the side hadamard\n circ = q(n+1)\n circ= CnH(no_h, list(range(no_h)), n, circ)\n\n circ = CnP(n-no_h, list(range(no_h, n)), n, circ)\n circ= CnH(no_h, list(range(no_h)), n, circ)\n\n '''# Test for accuracy\n test = get(circ, nice = False)\n unitary_check(test)'''\n \n if return_circ:\n return circ\n \n dtp(circ, nice = False)",
"_____no_output_____"
],
[
"### List of opimal combinations\n\nbest = [None, None, CnX(2), CnX(3), CnX(4), h_relief(5, 2, return_circ = True), h_relief(6, 2, return_circ = True), \n h_relief(7, 3, return_circ = True), h_relief(8, 4, return_circ = True), h_relief(9, 4, return_circ = True), \n h_relief(10, 4, return_circ = True), h_relief(11, 5, return_circ = True), h_relief(12, 6, return_circ = True)]",
"_____no_output_____"
]
],
[
[
"### Postulate for Complexity of the General Cost",
"_____no_output_____"
],
[
"We have two lists below showing the number of $U3$ and $CX$ used for the qiskit technique and our technique",
"_____no_output_____"
]
],
[
[
"## Qiskit\n\ncx_q = np.array([6, 20, 44, 92, 188, 380, 764, 1532, 3068, 6140])\nu3_q = np.array([8, 22, 46, 94, 190, 382, 766, 1534, 3070, 6142])\n\n## Our\n\ncx_o = np.array([3, 6, 20, 34, 50, 70, 102, 146, 222, 310])\nu3_o = np.array([4, 7, 25, 53, 72, 101, 143, 196, 286, 395])",
"_____no_output_____"
]
],
[
[
"We find the common ratios by taking $a_{n+1}/a_n$, and taking the average of these ratio when $n > 3$ to mitigate the impact of the additive factor.",
"_____no_output_____"
]
],
[
[
"## Qiskit\n\nrat_1 = cx_q[1:] / cx_q[:-1]\nrat_1 = np.mean(rat_1[3:])\n\nrat_2 = u3_q[1:] / u3_q[:-1]\nrat_2 = np.mean(rat_2[3:])\n\n## Our\n\nrat_3 = cx_o[1:] / cx_o[:-1]\nrat_3 = np.mean(rat_3[3:])\n\nrat_4 = u3_o[1:] / u3_o[:-1]\nrat_4 = np.mean(rat_4[3:])",
"_____no_output_____"
],
[
"rat_1, rat_2, rat_3, rat_4",
"_____no_output_____"
]
],
[
[
"We see that the geometric ratio of our technique is superior to that of qiskit. In base $2$, we can roughly see the following complexity.\n\n$$CX \\approx O(1.446^n) \\approx O(2^{\\frac{n}{2}})$$\n\n$$U3 \\approx O(1.380^n) \\approx O(2^{\\frac{n}{2}})$$",
"_____no_output_____"
],
[
"### Compare and Contrast with the $O(n^2)$ technique in Corollary 7.6 of [3]",
"_____no_output_____"
],
[
"Lemma 7.5 shows an example of $C8X$ built using 2 $C7X$ and 1 $C7V$. For our purposes, we can assume that the cost of $C7V$ is equal to that of $C7X$. In actuality, the cost of any CnU gate is much greater than that of $CnX$ gates so therefore this assumption gives us a lower bound of the cost of the circuit.\n\n![Picture1.png](attachment:Picture1.png)\n\nPrevious lemmas and corollaries show that these can gates can be broken down further into smaller $C2X$ and $C3X$ gates.\n\n$$\\begin{align}C5X &= 12 \\ C2X = 12\\cdot34 = 408 \\\\ C7X &= 2 \\ C5X + 2 \\ C3X = 2\\cdot408 + 2\\cdot67 = 950 \\\\ C8X &= 3 \\ C7X \\end{align}$$\n\nIf we let use our implementation of $C2X$ and $C3X$. Then we would have the general cost of $C8X = 2850$. However, as our circuit allow for the use of phase differences, we would also allow this circuit to be used to built bigger examples like shown below.",
"_____no_output_____"
]
],
[
[
"circ = q(10)\ncirc = control_unitary(circ, H, [0, 1], 9)\ncirc.h(9)\ncirc.mct([2, 3, 4, 5, 6, 7, 8], 9)\ncirc.h(9)\ncirc = control_unitary(circ, H, [0, 1], 9)\n\nmilk(circ)",
"_____no_output_____"
]
],
[
[
"The $3$ middle gates will have the effect of $C8Z$, and the two gate outside are $C2Z$. This will leads to $C10X$ with phase difference. Now we made one last modification to the implementation of Lemma 7.5. If we look back to the table from before, we can see that our implementation of $C7X$ has a lower than $950$. Because the phase difference does not affect the control operation, we can replace the paper's $C7X$ with ours.",
"_____no_output_____"
]
],
[
[
"print(1)\ndtp(CnH(1), nice = False)\nprint('\\n')\nprint(2)\ndtp(CnH(2), nice = False)\nprint('\\n')\nprint(3)\ndtp(CnH(3), nice = False)",
"1\ncx: 1\nu3: 2\nTotal cost: 12\n\n\n2\ncx: 8\nu3: 16\nTotal cost: 96\n\n\n3\ncx: 18\nu3: 31\nTotal cost: 211\n"
]
],
[
[
"Using the $CnH$ implementation cost of $96$. The lower bound of the general cost of $C8X, C9X, C10X$ with and without phase difference are:\n\nn | H-a | Our Cost | Lower Bound\n--- | --- | --- | ---\n8 | 0 | 1163 | 2403\n9 | 1 | 1656 | 2427\n10 | 2 | 2506 | 2595\n11 | 3 | 3495 | 2825",
"_____no_output_____"
],
[
"This conclusively shows that our technique is superior for $n = 8, 9, 10, 11$. It's easily provable that this supremacy holds for $n = 5, 6, 7$.",
"_____no_output_____"
],
[
"### References",
"_____no_output_____"
],
[
"1. https://arxiv.org/pdf/0803.2316.pdf\n2. https://arxiv.org/pdf/1508.03273.pdf\n3. https://arxiv.org/pdf/quant-ph/9503016.pdf\n4. https://arxiv.org/abs/quant-ph/9508027",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7f45ce6ae868cd1a1c2145505d680d1817c1d31 | 2,381 | ipynb | Jupyter Notebook | nbs/.ipynb_checkpoints/01_rmath-checkpoint.ipynb | perceptualrobots/pct | 9690d64fa89c2802299229d4f4b01baea7c1a881 | [
"Apache-2.0"
] | 6 | 2021-03-26T22:15:15.000Z | 2021-11-17T14:33:13.000Z | nbs/.ipynb_checkpoints/01_rmath-checkpoint.ipynb | perceptualrobots/pct | 9690d64fa89c2802299229d4f4b01baea7c1a881 | [
"Apache-2.0"
] | null | null | null | nbs/.ipynb_checkpoints/01_rmath-checkpoint.ipynb | perceptualrobots/pct | 9690d64fa89c2802299229d4f4b01baea7c1a881 | [
"Apache-2.0"
] | null | null | null | 19.048 | 112 | 0.50693 | [
[
[
"#hide\nfrom nbdev import *\n%nbdev_default_export utilities.rmath",
"Cells will be exported to pct.utilities.rmath,\nunless a different module is specified after an export flag: `%nbdev_export special.module`\n"
],
[
"#%nbdev_default_class_level 3",
"_____no_output_____"
]
],
[
[
"# Math\n\n Some extra math functions.\n ",
"_____no_output_____"
]
],
[
[
"%nbdev_export\ndef smooth( newVal, oldVal, weight) :\n \"An exponential smoothing function. The weight is the smoothing factor applied to the old value.\"\n return newVal * (1 - weight) + oldVal * weight;",
"_____no_output_____"
],
[
"smooth(2, 10, 0.9)",
"_____no_output_____"
],
[
"assert smooth(2, 10, 0.9)==9.2",
"_____no_output_____"
],
[
"#hide\nfrom nbdev import *\nnotebook2script()",
"Converted 00_core.ipynb.\nConverted 01_rmath.ipynb.\nConverted 02_functions.ipynb.\nConverted 03_nodes.ipynb.\nConverted 04_hierarchy.ipynb.\nConverted index.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f46b842b2a1635ef0312298a5524054773a72f | 53,573 | ipynb | Jupyter Notebook | MetacartelVentures/MetacartelVentures DAO Analysis.ipynb | Xqua/dao-research | f45943d95fb57bbf597d2743e0f66980c3f8705a | [
"MIT"
] | null | null | null | MetacartelVentures/MetacartelVentures DAO Analysis.ipynb | Xqua/dao-research | f45943d95fb57bbf597d2743e0f66980c3f8705a | [
"MIT"
] | 1 | 2021-12-01T13:39:25.000Z | 2021-12-01T13:39:25.000Z | MetacartelVentures/MetacartelVentures DAO Analysis.ipynb | Xqua/dao-research | f45943d95fb57bbf597d2743e0f66980c3f8705a | [
"MIT"
] | 3 | 2021-11-19T01:45:52.000Z | 2022-03-08T02:01:21.000Z | 37.229326 | 241 | 0.392716 | [
[
[
"from datetime import datetime\nimport pandas as pd\nimport networkx as nx\nimport json",
"_____no_output_____"
]
],
[
[
"Metacartel Ventures\n\nFirst block: 9484668 (Feb-15-2020 01:32:52 AM +UTC)\nhttps://etherscan.io/block/9484668\n\nOther blocks:\n10884668 (Sep-18-2020 06:57:11 AM +UTC)\n\nRecent block (for reference): 13316507 (Sep-28-2021 08:58:06 PM +UTC)\n\nsummoner: 0x8c8b237bea3c23317d08b73d7137e90cafdf68e6",
"_____no_output_____"
]
],
[
[
"# Approximate seconds per block\n\nsec_per_block = (datetime(2021, 9, 28, 8, 58, 6) - datetime(2020, 2, 15, 1, 32, 52)).seconds / (13316507 - 9484668)\n\nprint(\"approx. this many blocks per days:\", sec_per_block * 86400)\n# 86400 seconds per day\n\n# >>> from datetime import datetime\n# >>> a = datetime(2011,11,24,0,0,0)\n# >>> b = datetime(2011,11,17,23,59,59)\n# >>> a-b\n# datetime.timedelta(6, 1)\n# >>> (a-b).days\n# 6",
"approx. this many blocks per days: 602.3451402838167\n"
],
[
"9484668 + 602\n",
"_____no_output_____"
],
[
"# Load data\nwith open(\"./data/10884668-results.json\", \"r\") as f:\n results_09182020 = json.load(f)\nf.close()\n\nwith open(\"./data/13316507-results.json\", \"r\") as f:\n results_09282021 = json.load(f)\nf.close()",
"_____no_output_____"
],
[
"df_09182020_members = pd.DataFrame.from_dict(results_09182020[\"data\"][\"moloches\"][0][\"members\"])\ndf_09182020_proposals = pd.DataFrame.from_dict(results_09182020[\"data\"][\"moloches\"][0][\"proposals\"])",
"_____no_output_____"
],
[
"df_09182020_members[\"id\"][0].split('-')[2]",
"_____no_output_____"
],
[
"df_09182020_members[\"id\"] = df_09182020_members.apply(lambda row: row[\"id\"].split('-')[2], axis=1)",
"_____no_output_____"
],
[
"df_09182020_members",
"_____no_output_____"
],
[
"print(\"Number of members:\", df_09182020_members[\"id\"].nunique())",
"Number of members: 110\n"
],
[
"df_09182020_proposals",
"_____no_output_____"
],
[
"cols_proposal = [\"applicant\", \"details\", \"didPass\", \"lootRequested\", \"sharesRequested\", \"aborted\", \"cancelled\", \"createdAt\", \"yesShares\", \"yesVotes\", \"noShares\", \"noVotes\", \"maxTotalSharesAndLootAtYesVote\"]\ndf_09182020_proposals[cols_proposal]\n# df_09182020_proposals.columns",
"_____no_output_____"
],
[
"df_09182020_members.columns",
"_____no_output_____"
],
[
"df_09182020_member_proposals = pd.merge(df_09182020_members, df_09182020_proposals[cols_proposal], how=\"left\", left_on=[\"id\"], right_on = [\"applicant\"]).sort_values([\"createdAt_y\", \"applicant\"])\ndf_09182020_member_proposals.head()",
"_____no_output_____"
],
[
"\ndf_09182020_member_proposals[[\"id\", \"applicant\"]].groupby(\"id\").count()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4796ba86177a92f64d21ed5ecf9a60d5fce57 | 265,277 | ipynb | Jupyter Notebook | indexing_part1_introduction.ipynb | derekmahar/JuliaCon2020-DataFrames-Tutorial | 1a2511946c6323386d45f23fe6ac43dfd29004f3 | [
"MIT"
] | null | null | null | indexing_part1_introduction.ipynb | derekmahar/JuliaCon2020-DataFrames-Tutorial | 1a2511946c6323386d45f23fe6ac43dfd29004f3 | [
"MIT"
] | null | null | null | indexing_part1_introduction.ipynb | derekmahar/JuliaCon2020-DataFrames-Tutorial | 1a2511946c6323386d45f23fe6ac43dfd29004f3 | [
"MIT"
] | null | null | null | 58.251427 | 6,282 | 0.51283 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f4797932c62bd2a87327d55e446404a8c0feb5 | 5,972 | ipynb | Jupyter Notebook | ionic_liquids/datasets/.ipynb_checkpoints/DataCleaning-checkpoint.ipynb | qize/ionic_liquids | 64eebaf505b1d89b2e2ef82f8fa8959b0a0c3956 | [
"MIT"
] | 2 | 2020-10-21T08:41:41.000Z | 2021-08-05T10:20:24.000Z | ionic_liquids/datasets/.ipynb_checkpoints/DataCleaning-checkpoint.ipynb | qize/ionic_liquids | 64eebaf505b1d89b2e2ef82f8fa8959b0a0c3956 | [
"MIT"
] | null | null | null | ionic_liquids/datasets/.ipynb_checkpoints/DataCleaning-checkpoint.ipynb | qize/ionic_liquids | 64eebaf505b1d89b2e2ef82f8fa8959b0a0c3956 | [
"MIT"
] | 3 | 2017-11-08T23:03:10.000Z | 2020-10-21T08:41:48.000Z | 27.145455 | 105 | 0.395177 | [
[
[
"import numpy as np\nimport pandas as pd\n",
"_____no_output_____"
],
[
"data = pd.read_excel(\"inputdata.xlsx\")",
"_____no_output_____"
],
[
"data['EC_value'], data['EC_error'] = zip(*data['ELE_COD'].map(lambda x: x.split('±')))",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom __future__ import print_function\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Descriptors\nfrom rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator as Calculator\n\n#Setting up for molecular descriptors\n\n\n#the number of atoms\nnum_atoms = m.GetNumAtoms()\nmol_wt = MolWt\nexact_mol_wt = ExactMolWt\nNO_Count = NOCount\nNum_H_Donors = NumHDonors\nRing_count = RingCount\nNum_Arom_Sat_Ali = Num{Aromatic,Saturated,Aliphatic}Rings\n\n\n#Neural network\n#gridsearch for paramaters in scikit",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7f47aa0ff51ab4205c4a705215754dd70279e58 | 409,254 | ipynb | Jupyter Notebook | P1_Trading_with_Momentum/project_notebook.ipynb | hemang-75/AI_for_Trading | e975dd9588840554f7fd10956d1f872ac3b03e3a | [
"Apache-2.0"
] | null | null | null | P1_Trading_with_Momentum/project_notebook.ipynb | hemang-75/AI_for_Trading | e975dd9588840554f7fd10956d1f872ac3b03e3a | [
"Apache-2.0"
] | null | null | null | P1_Trading_with_Momentum/project_notebook.ipynb | hemang-75/AI_for_Trading | e975dd9588840554f7fd10956d1f872ac3b03e3a | [
"Apache-2.0"
] | null | null | null | 56.100617 | 38,129 | 0.558489 | [
[
[
"# Project 1: Trading with Momentum\n## Instructions\nEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.\n\n## Packages\nWhen you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.\n\nThe other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.\n\n### Install Packages",
"_____no_output_____"
]
],
[
[
"import sys\n!{sys.executable} -m pip install -r requirements.txt",
"Requirement already satisfied: colour==0.1.5 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 1)) (0.1.5)\nCollecting cvxpy==1.0.3 (from -r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a1/59/2613468ffbbe3a818934d06b81b9f4877fe054afbf4f99d2f43f398a0b34/cvxpy-1.0.3.tar.gz (880kB)\n\u001b[K 100% |████████████████████████████████| 880kB 8.6MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: cycler==0.10.0 in /opt/conda/lib/python3.6/site-packages/cycler-0.10.0-py3.6.egg (from -r requirements.txt (line 3)) (0.10.0)\nCollecting numpy==1.13.3 (from -r requirements.txt (line 4))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/57/a7/e3e6bd9d595125e1abbe162e323fd2d06f6f6683185294b79cd2cdb190d5/numpy-1.13.3-cp36-cp36m-manylinux1_x86_64.whl (17.0MB)\n\u001b[K 100% |████████████████████████████████| 17.0MB 2.2MB/s eta 0:00:01 35% |███████████▏ | 5.9MB 32.4MB/s eta 0:00:01 78% |█████████████████████████▏ | 13.3MB 25.2MB/s eta 0:00:01\n\u001b[?25hCollecting pandas==0.21.1 (from -r requirements.txt (line 5))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/3a/e1/6c514df670b887c77838ab856f57783c07e8760f2e3d5939203a39735e0e/pandas-0.21.1-cp36-cp36m-manylinux1_x86_64.whl (26.2MB)\n\u001b[K 100% |████████████████████████████████| 26.2MB 1.6MB/s eta 0:00:01 10% |███▍ | 2.8MB 32.4MB/s eta 0:00:01 32% |██████████▍ | 8.5MB 29.6MB/s eta 0:00:01 53% |█████████████████▏ | 14.1MB 28.9MB/s eta 0:00:01 69% |██████████████████████▏ | 18.1MB 27.8MB/s eta 0:00:01 84% |███████████████████████████ | 22.1MB 25.6MB/s eta 0:00:01\n\u001b[?25hCollecting plotly==2.2.3 (from -r requirements.txt (line 6))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/99/a6/8214b6564bf4ace9bec8a26e7f89832792be582c042c47c912d3201328a0/plotly-2.2.3.tar.gz (1.1MB)\n\u001b[K 100% |████████████████████████████████| 1.1MB 16.2MB/s ta 0:00:01\n\u001b[?25hRequirement already satisfied: pyparsing==2.2.0 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 7)) (2.2.0)\nRequirement already satisfied: python-dateutil==2.6.1 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 8)) (2.6.1)\nRequirement already satisfied: pytz==2017.3 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 9)) (2017.3)\nRequirement already satisfied: requests==2.18.4 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 10)) (2.18.4)\nCollecting scipy==1.0.0 (from -r requirements.txt (line 11))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d8/5e/caa01ba7be11600b6a9d39265440d7b3be3d69206da887c42bef049521f2/scipy-1.0.0-cp36-cp36m-manylinux1_x86_64.whl (50.0MB)\n\u001b[K 100% |████████████████████████████████| 50.0MB 704kB/s eta 0:00:01 7% |██▍ | 3.7MB 25.1MB/s eta 0:00:02 9% |███ | 4.8MB 21.7MB/s eta 0:00:03 14% |████▌ | 7.1MB 24.1MB/s eta 0:00:02 24% |███████▉ | 12.3MB 23.3MB/s eta 0:00:02 30% |█████████▉ | 15.4MB 23.1MB/s eta 0:00:02 37% |████████████ | 18.7MB 21.1MB/s eta 0:00:02 39% |████████████▋ | 19.7MB 22.3MB/s eta 0:00:02 41% |█████████████▍ | 20.9MB 24.5MB/s eta 0:00:02 43% |██████████████ | 21.9MB 21.0MB/s eta 0:00:02 53% |█████████████████ | 26.5MB 23.9MB/s eta 0:00:01 57% |██████████████████▎ | 28.6MB 22.2MB/s eta 0:00:01 59% |███████████████████ | 29.8MB 35.9MB/s eta 0:00:01 63% |████████████████████▍ | 31.9MB 21.3MB/s eta 0:00:01 68% |█████████████████████▉ | 34.1MB 22.0MB/s eta 0:00:01 70% |██████████████████████▋ | 35.4MB 24.3MB/s eta 0:00:01 75% |████████████████████████ | 37.6MB 26.2MB/s eta 0:00:01 77% |████████████████████████▉ | 38.8MB 25.6MB/s eta 0:00:01 86% |███████████████████████████▋ | 43.2MB 22.5MB/s eta 0:00:01 88% |████████████████████████████▍ | 44.4MB 20.1MB/s eta 0:00:01 91% |█████████████████████████████▎ | 45.7MB 24.1MB/s eta 0:00:01 98% |███████████████████████████████▌| 49.2MB 26.7MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: scikit-learn==0.19.1 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 12)) (0.19.1)\nRequirement already satisfied: six==1.11.0 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 13)) (1.11.0)\nCollecting tqdm==4.19.5 (from -r requirements.txt (line 14))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/3c/341b4fa23cb3abc335207dba057c790f3bb329f6757e1fcd5d347bcf8308/tqdm-4.19.5-py2.py3-none-any.whl (51kB)\n\u001b[K 100% |████████████████████████████████| 61kB 7.6MB/s ta 0:00:01\n\u001b[?25hCollecting osqp (from cvxpy==1.0.3->-r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/6c/59/2b80e881be227eecef3f2b257339d182167b55d22a1315ff4303ddcfd42f/osqp-0.6.1-cp36-cp36m-manylinux1_x86_64.whl (208kB)\n\u001b[K 100% |████████████████████████████████| 215kB 16.9MB/s ta 0:00:01\n\u001b[?25hCollecting ecos>=2 (from cvxpy==1.0.3->-r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/55/ed/d131ff51f3a8f73420eb1191345eb49f269f23cadef515172e356018cde3/ecos-2.0.7.post1-cp36-cp36m-manylinux1_x86_64.whl (147kB)\n\u001b[K 100% |████████████████████████████████| 153kB 15.2MB/s ta 0:00:01\n\u001b[?25hCollecting scs>=1.1.3 (from cvxpy==1.0.3->-r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/1a/72/33be87cce255d4e9dbbfef547e9fd6ec7ee94d0d0910bb2b13badea3fbbe/scs-2.1.2.tar.gz (3.5MB)\n\u001b[K 100% |████████████████████████████████| 3.6MB 9.7MB/s eta 0:00:01 43% |█████████████▉ | 1.5MB 22.3MB/s eta 0:00:01\n\u001b[?25hCollecting multiprocess (from cvxpy==1.0.3->-r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/58/17/5151b6ac2ac9b6276d46c33369ff814b0901872b2a0871771252f02e9192/multiprocess-0.70.9.tar.gz (1.6MB)\n\u001b[K 100% |████████████████████████████████| 1.6MB 8.1MB/s eta 0:00:01 98% |███████████████████████████████▍| 1.5MB 28.1MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: fastcache in /opt/conda/lib/python3.6/site-packages (from cvxpy==1.0.3->-r requirements.txt (line 2)) (1.0.2)\nRequirement already satisfied: toolz in /opt/conda/lib/python3.6/site-packages (from cvxpy==1.0.3->-r requirements.txt (line 2)) (0.8.2)\nRequirement already satisfied: decorator>=4.0.6 in /opt/conda/lib/python3.6/site-packages (from plotly==2.2.3->-r requirements.txt (line 6)) (4.0.11)\nRequirement already satisfied: nbformat>=4.2 in /opt/conda/lib/python3.6/site-packages (from plotly==2.2.3->-r requirements.txt (line 6)) (4.4.0)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10)) (3.0.4)\nRequirement already satisfied: idna<2.7,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10)) (2.6)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10)) (1.22)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10)) (2019.11.28)\nRequirement already satisfied: future in /opt/conda/lib/python3.6/site-packages (from osqp->cvxpy==1.0.3->-r requirements.txt (line 2)) (0.16.0)\nCollecting dill>=0.3.1 (from multiprocess->cvxpy==1.0.3->-r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz (151kB)\n\u001b[K 100% |████████████████████████████████| 153kB 16.7MB/s ta 0:00:01\n\u001b[?25hRequirement already satisfied: ipython-genutils in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6)) (0.2.0)\nRequirement already satisfied: traitlets>=4.1 in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6)) (4.3.2)\nRequirement already satisfied: jupyter-core in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6)) (4.4.0)\n"
]
],
[
[
"### Load Packages",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport helper\nimport project_helper\nimport project_tests",
"_____no_output_____"
]
],
[
[
"## Market Data\n### Load Data\nThe data we use for most of the projects is end of day data. This contains data for many stocks, but we'll be looking at stocks in the S&P 500. We also made things a little easier to run by narrowing down our range of time period instead of using all of the data.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../../data/project_1/eod-quotemedia.csv', parse_dates=['date'], index_col=False)\n\nclose = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')\n\nprint('Loaded Data')",
"Loaded Data\n"
]
],
[
[
"### View Data\nRun the cell below to see what the data looks like for `close`.",
"_____no_output_____"
]
],
[
[
"project_helper.print_dataframe(close)",
"_____no_output_____"
]
],
[
[
"### Stock Example\nLet's see what a single stock looks like from the closing prices. For this example and future display examples in this project, we'll use Apple's stock (AAPL). If we tried to graph all the stocks, it would be too much information.",
"_____no_output_____"
]
],
[
[
"apple_ticker = 'AAPL'\nproject_helper.plot_stock(close[apple_ticker], '{} Stock'.format(apple_ticker))",
"_____no_output_____"
]
],
[
[
"## Resample Adjusted Prices\n\nThe trading signal you'll develop in this project does not need to be based on daily prices, for instance, you can use month-end prices to perform trading once a month. To do this, you must first resample the daily adjusted closing prices into monthly buckets, and select the last observation of each month.\n\nImplement the `resample_prices` to resample `close_prices` at the sampling frequency of `freq`.",
"_____no_output_____"
]
],
[
[
"def resample_prices(close_prices, freq='M'):\n \"\"\"\n Resample close prices for each ticker at specified frequency.\n \n Parameters\n ----------\n close_prices : DataFrame\n Close prices for each ticker and date\n freq : str\n What frequency to sample at\n For valid freq choices, see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \n Returns\n -------\n prices_resampled : DataFrame\n Resampled prices for each ticker and date\n \"\"\"\n # TODO: Implement Function\n \n# print(close_prices)\n prices_resampled = close_prices.resample(freq).last()\n# print(prices_resampled)\n \n return prices_resampled\n\nproject_tests.test_resample_prices(resample_prices)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's apply this function to `close` and view the results.",
"_____no_output_____"
]
],
[
[
"monthly_close = resample_prices(close)\nproject_helper.plot_resampled_prices(\n monthly_close.loc[:, apple_ticker],\n close.loc[:, apple_ticker],\n '{} Stock - Close Vs Monthly Close'.format(apple_ticker))",
"_____no_output_____"
]
],
[
[
"## Compute Log Returns\n\nCompute log returns ($R_t$) from prices ($P_t$) as your primary momentum indicator:\n\n$$R_t = log_e(P_t) - log_e(P_{t-1})$$\n\nImplement the `compute_log_returns` function below, such that it accepts a dataframe (like one returned by `resample_prices`), and produces a similar dataframe of log returns. Use Numpy's [log function](https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html) to help you calculate the log returns.",
"_____no_output_____"
]
],
[
[
"def compute_log_returns(prices):\n \"\"\"\n Compute log returns for each ticker.\n \n Parameters\n ----------\n prices : DataFrame\n Prices for each ticker and date\n \n Returns\n -------\n log_returns : DataFrame\n Log returns for each ticker and date\n \"\"\"\n # TODO: Implement Function\n log_returns = np.log(prices) - np.log(prices.shift(1))\n \n return log_returns\n\nproject_tests.test_compute_log_returns(compute_log_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nUsing the same data returned from `resample_prices`, we'll generate the log returns.",
"_____no_output_____"
]
],
[
[
"monthly_close_returns = compute_log_returns(monthly_close)\nproject_helper.plot_returns(\n monthly_close_returns.loc[:, apple_ticker],\n 'Log Returns of {} Stock (Monthly)'.format(apple_ticker))",
"_____no_output_____"
]
],
[
[
"## Shift Returns\nImplement the `shift_returns` function to shift the log returns to the previous or future returns in the time series. For example, the parameter `shift_n` is 2 and `returns` is the following:\n\n```\n Returns\n A B C D\n2013-07-08 0.015 0.082 0.096 0.020 ...\n2013-07-09 0.037 0.095 0.027 0.063 ...\n2013-07-10 0.094 0.001 0.093 0.019 ...\n2013-07-11 0.092 0.057 0.069 0.087 ...\n... ... ... ... ...\n```\n\nthe output of the `shift_returns` function would be:\n```\n Shift Returns\n A B C D\n2013-07-08 NaN NaN NaN NaN ...\n2013-07-09 NaN NaN NaN NaN ...\n2013-07-10 0.015 0.082 0.096 0.020 ...\n2013-07-11 0.037 0.095 0.027 0.063 ...\n... ... ... ... ...\n```\nUsing the same `returns` data as above, the `shift_returns` function should generate the following with `shift_n` as -2:\n```\n Shift Returns\n A B C D\n2013-07-08 0.094 0.001 0.093 0.019 ...\n2013-07-09 0.092 0.057 0.069 0.087 ...\n... ... ... ... ... ...\n... ... ... ... ... ...\n... NaN NaN NaN NaN ...\n... NaN NaN NaN NaN ...\n```\n_Note: The \"...\" represents data points we're not showing._",
"_____no_output_____"
]
],
[
[
"def shift_returns(returns, shift_n):\n \"\"\"\n Generate shifted returns\n \n Parameters\n ----------\n returns : DataFrame\n Returns for each ticker and date\n shift_n : int\n Number of periods to move, can be positive or negative\n \n Returns\n -------\n shifted_returns : DataFrame\n Shifted returns for each ticker and date\n \"\"\"\n # TODO: Implement Function\n \n return returns.shift(shift_n)\n\nproject_tests.test_shift_returns(shift_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's get the previous month's and next month's returns.",
"_____no_output_____"
]
],
[
[
"monthly_close_returns",
"_____no_output_____"
],
[
"prev_returns = shift_returns(monthly_close_returns, 1)\nlookahead_returns = shift_returns(monthly_close_returns, -1)\n\nproject_helper.plot_shifted_returns(\n prev_returns.loc[:, apple_ticker],\n monthly_close_returns.loc[:, apple_ticker],\n 'Previous Returns of {} Stock'.format(apple_ticker))\nproject_helper.plot_shifted_returns(\n lookahead_returns.loc[:, apple_ticker],\n monthly_close_returns.loc[:, apple_ticker],\n 'Lookahead Returns of {} Stock'.format(apple_ticker))",
"_____no_output_____"
]
],
[
[
"## Generate Trading Signal\n\nA trading signal is a sequence of trading actions, or results that can be used to take trading actions. A common form is to produce a \"long\" and \"short\" portfolio of stocks on each date (e.g. end of each month, or whatever frequency you desire to trade at). This signal can be interpreted as rebalancing your portfolio on each of those dates, entering long (\"buy\") and short (\"sell\") positions as indicated.\n\nHere's a strategy that we will try:\n> For each month-end observation period, rank the stocks by _previous_ returns, from the highest to the lowest. Select the top performing stocks for the long portfolio, and the bottom performing stocks for the short portfolio.\n\nImplement the `get_top_n` function to get the top performing stock for each month. Get the top performing stocks from `prev_returns` by assigning them a value of 1. For all other stocks, give them a value of 0. For example, using the following `prev_returns`:\n\n```\n Previous Returns\n A B C D E F G\n2013-07-08 0.015 0.082 0.096 0.020 0.075 0.043 0.074\n2013-07-09 0.037 0.095 0.027 0.063 0.024 0.086 0.025\n... ... ... ... ... ... ... ...\n```\n\nThe function `get_top_n` with `top_n` set to 3 should return the following:\n```\n Previous Returns\n A B C D E F G\n2013-07-08 0 1 1 0 1 0 0\n2013-07-09 0 1 0 1 0 1 0\n... ... ... ... ... ... ... ...\n```\n*Note: You may have to use Panda's [`DataFrame.iterrows`](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.iterrows.html) with [`Series.nlargest`](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.Series.nlargest.html) in order to implement the function. This is one of those cases where creating a vecorization solution is too difficult.*",
"_____no_output_____"
]
],
[
[
"def get_top_n(prev_returns, top_n):\n \"\"\"\n Select the top performing stocks\n \n Parameters\n ----------\n prev_returns : DataFrame\n Previous shifted returns for each ticker and date\n top_n : int\n The number of top performing stocks to get\n \n Returns\n -------\n top_stocks : DataFrame\n Top stocks for each ticker and date marked with a 1\n \"\"\"\n # TODO: Implement Function\n \n top_stocks = pd.DataFrame(0,index=prev_returns.index,columns=prev_returns.columns)\n for date,row in prev_returns.iterrows():\n top_idx = row.nlargest(top_n).index \n top_stocks.loc[date,top_idx]=1\n \n return top_stocks\n\nproject_tests.test_get_top_n(get_top_n)",
"Tests Passed\n"
]
],
[
[
"### View Data\nWe want to get the best performing and worst performing stocks. To get the best performing stocks, we'll use the `get_top_n` function. To get the worst performing stocks, we'll also use the `get_top_n` function. However, we pass in `-1*prev_returns` instead of just `prev_returns`. Multiplying by negative one will flip all the positive returns to negative and negative returns to positive. Thus, it will return the worst performing stocks.",
"_____no_output_____"
]
],
[
[
"top_bottom_n = 50\ndf_long = get_top_n(prev_returns, top_bottom_n)\ndf_short = get_top_n(-1*prev_returns, top_bottom_n)\nproject_helper.print_top(df_long, 'Longed Stocks')\nproject_helper.print_top(df_short, 'Shorted Stocks')",
"10 Most Longed Stocks:\nINCY, AMD, AVGO, NFX, SWKS, NFLX, ILMN, UAL, NVDA, MU\n10 Most Shorted Stocks:\nRRC, FCX, CHK, MRO, GPS, WYNN, DVN, FTI, SPLS, TRIP\n"
]
],
[
[
"## Projected Returns\nIt's now time to check if your trading signal has the potential to become profitable!\n\nWe'll start by computing the net returns this portfolio would return. For simplicity, we'll assume every stock gets an equal dollar amount of investment. This makes it easier to compute a portfolio's returns as the simple arithmetic average of the individual stock returns.\n\nImplement the `portfolio_returns` function to compute the expected portfolio returns. Using `df_long` to indicate which stocks to long and `df_short` to indicate which stocks to short, calculate the returns using `lookahead_returns`. To help with calculation, we've provided you with `n_stocks` as the number of stocks we're investing in a single period.",
"_____no_output_____"
]
],
[
[
"def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks):\n \"\"\"\n Compute expected returns for the portfolio, assuming equal investment in each long/short stock.\n \n Parameters\n ----------\n df_long : DataFrame\n Top stocks for each ticker and date marked with a 1\n df_short : DataFrame\n Bottom stocks for each ticker and date marked with a 1\n lookahead_returns : DataFrame\n Lookahead returns for each ticker and date\n n_stocks: int\n The number number of stocks chosen for each month\n \n Returns\n -------\n portfolio_returns : DataFrame\n Expected portfolio returns for each ticker and date\n \"\"\"\n # TODO: Implement Function\n \n# print(lookahead_returns)\n# print(df_long)\n# print(df_short)\n returns = (lookahead_returns*df_long - lookahead_returns*df_short)/3\n# print(returns)\n return returns\n\nproject_tests.test_portfolio_returns(portfolio_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nTime to see how the portfolio did.",
"_____no_output_____"
]
],
[
[
"expected_portfolio_returns",
"_____no_output_____"
],
[
"expected_portfolio_returns = portfolio_returns(df_long, df_short, lookahead_returns, 2*top_bottom_n)\nproject_helper.plot_returns(expected_portfolio_returns.T.sum(), 'Portfolio Returns')",
"_____no_output_____"
]
],
[
[
"## Statistical Tests\n### Annualized Rate of Return",
"_____no_output_____"
]
],
[
[
"expected_portfolio_returns_by_date = expected_portfolio_returns.T.sum().dropna()\nportfolio_ret_mean = expected_portfolio_returns_by_date.mean()\nportfolio_ret_ste = expected_portfolio_returns_by_date.sem()\nportfolio_ret_annual_rate = (np.exp(portfolio_ret_mean * 12) - 1) * 100\n\nprint(\"\"\"\nMean: {:.6f}\nStandard Error: {:.6f}\nAnnualized Rate of Return: {:.2f}%\n\"\"\".format(portfolio_ret_mean, portfolio_ret_ste, portfolio_ret_annual_rate))",
"\nMean: 0.106159\nStandard Error: 0.071935\nAnnualized Rate of Return: 257.48%\n\n"
]
],
[
[
"The annualized rate of return allows you to compare the rate of return from this strategy to other quoted rates of return, which are usually quoted on an annual basis. \n\n### T-Test\nOur null hypothesis ($H_0$) is that the actual mean return from the signal is zero. We'll perform a one-sample, one-sided t-test on the observed mean return, to see if we can reject $H_0$.\n\nWe'll need to first compute the t-statistic, and then find its corresponding p-value. The p-value will indicate the probability of observing a t-statistic equally or more extreme than the one we observed if the null hypothesis were true. A small p-value means that the chance of observing the t-statistic we observed under the null hypothesis is small, and thus casts doubt on the null hypothesis. It's good practice to set a desired level of significance or alpha ($\\alpha$) _before_ computing the p-value, and then reject the null hypothesis if $p < \\alpha$.\n\nFor this project, we'll use $\\alpha = 0.05$, since it's a common value to use.\n\nImplement the `analyze_alpha` function to perform a t-test on the sample of portfolio returns. We've imported the `scipy.stats` module for you to perform the t-test.\n\nNote: [`scipy.stats.ttest_1samp`](https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.ttest_1samp.html) performs a two-sided test, so divide the p-value by 2 to get 1-sided p-value",
"_____no_output_____"
]
],
[
[
"from scipy import stats\n\ndef analyze_alpha(expected_portfolio_returns_by_date):\n \"\"\"\n Perform a t-test with the null hypothesis being that the expected mean return is zero.\n \n Parameters\n ----------\n expected_portfolio_returns_by_date : Pandas Series\n Expected portfolio returns for each date\n \n Returns\n -------\n t_value\n T-statistic from t-test\n p_value\n Corresponding p-value\n \"\"\"\n # TODO: Implement Function\n\n (t_value,p_value) = stats.ttest_1samp(expected_portfolio_returns_by_date,0) \n return t_value,p_value*0.5\n\nproject_tests.test_analyze_alpha(analyze_alpha)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's see what values we get with our portfolio. After you run this, make sure to answer the question below.",
"_____no_output_____"
]
],
[
[
"t_value, p_value = analyze_alpha(expected_portfolio_returns_by_date)\nprint(\"\"\"\nAlpha analysis:\n t-value: {:.3f}\n p-value: {:.6f}\n\"\"\".format(t_value, p_value))",
"\nAlpha analysis:\n t-value: 1.476\n p-value: 0.073339\n\n"
]
],
[
[
"### Question: What p-value did you observe? And what does that indicate about your signal?",
"_____no_output_____"
],
[
"*#TODO: Put Answer In this Cell*\n\n- The p-value coming from the Alpha analysis is 0.0733 , since the alpha we have set is 0.05 that means our p-value > alpha and so the true mean of these returns might actually be zero or less than zero.\n- So based on this T- statistic test, we can say that mean return which we got i.e 0.106 is might be due to some fluctuation and so our signal is not good to generate some positive returns and we need to make improvements in our strategy to generate some alpha.",
"_____no_output_____"
],
[
"## Submission\nNow that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7f48e75e68278a0464462ce2434255125a839e5 | 25,209 | ipynb | Jupyter Notebook | tri-results/how-to-web-scrape.ipynb | kbridge14/how2py | 21cbfde70906d820d3d98e8580c20f4e183275e9 | [
"MIT"
] | null | null | null | tri-results/how-to-web-scrape.ipynb | kbridge14/how2py | 21cbfde70906d820d3d98e8580c20f4e183275e9 | [
"MIT"
] | null | null | null | tri-results/how-to-web-scrape.ipynb | kbridge14/how2py | 21cbfde70906d820d3d98e8580c20f4e183275e9 | [
"MIT"
] | null | null | null | 39.266355 | 1,960 | 0.518981 | [
[
[
"<!doctype html>\n<html id=\"ng-app\" ng-app=\"results\" class=\"ng-scope\">\n <body>\n <div class=\"results-app ng-isolate-scope\" results-app>\n <div class=\"page\" ng-show=\"eventconfig.schema\" aria-hidden=\"false\" style>\n <md-content class=\"xact-contact _md\">\n <div ui-view=\"content\" class=\"ng-scope\" style>\n <div class=\"xact-search ng-scope layout-column flex\" layout=\"column\" flex>\n <md-content flex class=\"_md flex\">\n <md-table-container ng-show=\"!loading\" aria-hidden=\"false\" class style>\n <table md-table md-progress=\"promise\" class=\"md-table ng-isolate-scope\">\n <tbody md-body class=\"md-body\">\n <tr md-row md-select=\"entrant\" md-select-id=\"name\" md-auto-select ng-repeat=\"entrant in entrants\" ng-click=\"showEntrantInfo(entrant)\" class=\"md-row ng-scope ng-isolate-scope\" role=\"button\" tabindex=\"0\" style>\n <td md-cell class=\"md-cell ng-binding\">4445</td>\n <td md-cell class=\"md-cell\">\n <b class=\"ng-binding\">AARON FIGURA</b>\n <br>\n <small class=\"ng-binding\">LONG, M/34</small>\n </td>\n <td md-cell ng-show=\"show_net\" class=\"md-cell\" aria-hidden=\"false\">\n <span ng-show=\"entrant.chiptime\" class=\"ng-binding\" aria-hidden=\"false\">5:25:36</span>\n </td>",
"_____no_output_____"
]
],
[
[
"import pandas\nimport requests\nimport time\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup",
"_____no_output_____"
],
[
"url = \"http://results2.xacte.com/#/e/2306/searchable\"\nresponse = requests.get(url)",
"_____no_output_____"
],
[
"if response.status_code==200:\n print(response.text)",
"<!DOCTYPE html><html id=ng-app ng-app=results><head><title>Xact Results</title><meta charset=utf-8><meta http-equiv=X-UA-Compatible content=\"IE=edge\"><meta name=viewport content=\"width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no\"><meta name=format-detection content=\"telephone=no\"><meta name=apple-mobile-web-app-capable content=yes><meta name=apple-mobile-web-app-status-bar-style content=black-translucent><link href=\"https://fonts.googleapis.com/css?family=Unica+One|Roboto:300,400,700\" rel=stylesheet type=text/css><link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=stylesheet><link rel=stylesheet href=\"css/ng-quick-date-default-theme.css?v=1552475396\"><link rel=stylesheet href=\"css/ng-quick-date.css?v=1552475396\"><link rel=stylesheet href=\"css/md-data-table.css?v=1552475396\"><link rel=stylesheet href=\"css/main.css?v=1552475396\"><link rel=stylesheet href=\"css/angular-material.min.css?v=1552475396\"></head><body><div class=results-app results-app></div><script src=\"https://maps.google.com/maps/api/js?libraries=geometry&key=AIzaSyBVif8cack80Pf-QQMdSe43lPs4DNqbyKk\"></script><script>\n\t\t (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n\t\t (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n\t\t m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n\t\t\t})(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n\t\tga('create', 'UA-48980153-1', { 'cookieDomain': 'none' });\n\t\t</script><script src=\"app.js?v=1552475396\"></script></body></html>\n"
],
[
"# https://www.freecodecamp.org/news/how-to-scrape-websites-with-python-and-beautifulsoup-5946935d93fe/\n# https://codeburst.io/web-scraping-101-with-python-beautiful-soup-bb617be1f486\n\nsoup = BeautifulSoup(response.content, 'html.parser')",
"_____no_output_____"
],
[
"print(soup.prettify())",
"<!DOCTYPE html>\n<html id=\"ng-app\" ng-app=\"results\">\n <head>\n <title>\n Xact Results\n </title>\n <meta charset=\"utf-8\"/>\n <meta content=\"IE=edge\" http-equiv=\"X-UA-Compatible\"/>\n <meta content=\"width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no\" name=\"viewport\"/>\n <meta content=\"telephone=no\" name=\"format-detection\"/>\n <meta content=\"yes\" name=\"apple-mobile-web-app-capable\"/>\n <meta content=\"black-translucent\" name=\"apple-mobile-web-app-status-bar-style\"/>\n <link href=\"https://fonts.googleapis.com/css?family=Unica+One|Roboto:300,400,700\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\"/>\n <link href=\"css/ng-quick-date-default-theme.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/ng-quick-date.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/md-data-table.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/main.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/angular-material.min.css?v=1552475396\" rel=\"stylesheet\"/>\n </head>\n <body>\n <div class=\"results-app\" results-app=\"\">\n </div>\n <script src=\"https://maps.google.com/maps/api/js?libraries=geometry&key=AIzaSyBVif8cack80Pf-QQMdSe43lPs4DNqbyKk\">\n </script>\n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n\t\t (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n\t\t m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n\t\t\t})(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n\t\tga('create', 'UA-48980153-1', { 'cookieDomain': 'none' });\n </script>\n <script src=\"app.js?v=1552475396\">\n </script>\n </body>\n</html>\n"
],
[
"soup.find_all(class_=\"results-app\")",
"_____no_output_____"
]
],
[
[
"<tr md-row=\"\" md-select=\"entrant\" md-select-id=\"name\" md-auto-select=\"\" ng-repeat=\"entrant in entrants\" ng-click=\"showEntrantInfo(entrant)\" class=\"md-row ng-scope ng-isolate-scope\" role=\"button\" tabindex=\"0\" style=\"\"><td md-cell=\"\" class=\"md-cell ng-binding\">4445</td><td md-cell=\"\" class=\"md-cell\"><b class=\"ng-binding\">AARON FIGURA</b><br><small class=\"ng-binding\">LONG, M/34</small></td><td md-cell=\"\" hide-xs=\"\" class=\"hide-xs md-cell ng-binding\">MANHATTAN BEACH, CA</td><td md-cell=\"\" ng-show=\"show_net\" class=\"md-cell\" aria-hidden=\"false\"><span ng-show=\"entrant.chiptime\" class=\"ng-binding\" aria-hidden=\"false\">5:25:36</span></td><td md-cell=\"\" ng-show=\"show_clock\" class=\"md-cell ng-hide\" aria-hidden=\"true\"><span ng-show=\"entrant.clocktime\" class=\"ng-binding\" aria-hidden=\"false\">5:25:36</span></td></tr>",
"_____no_output_____"
]
],
[
[
"# aria-hidden=false when the box with info is closed; true when you open up the box.\n# you'll want to set it to true when viewing all the information per individual\n\n\"\"\"\n<md-backdrop class=\"md-dialog-backdrop md-opaque ng-scope\" style=\"position: fixed;\" aria-hidden=\"true\"></md-backdrop>\n\"\"\"",
"_____no_output_____"
],
[
"soup.find_all(name = \"md-row\")#, class_=\"md-select\")",
"_____no_output_____"
],
[
"stuff = []\nfor i in range(36, len(soup.findAll('a')) + 1): #'a' tags are for links\n one_a_tag = soup.findAll('a')[i]\n link = one_a_tag['href']\n download_url = url + link\n stuff.append(urllib.request.urlretrieve(download_url,'./'+link[link.find('/turnstile_')+1:]))\n time.sleep(1) #pause the code for a sec",
"_____no_output_____"
],
[
"stuff",
"_____no_output_____"
],
[
"soup.find_all('head')",
"_____no_output_____"
],
[
"soup.script",
"_____no_output_____"
],
[
"# https://www.geeksforgeeks.org/implementing-web-scraping-python-beautiful-soup/\n\nsoup2 = BeautifulSoup(response.content, 'html5lib')",
"_____no_output_____"
],
[
"print(soup2.prettify())",
"<!DOCTYPE html>\n<html id=\"ng-app\" ng-app=\"results\">\n <head>\n <title>\n Xact Results\n </title>\n <meta charset=\"utf-8\"/>\n <meta content=\"IE=edge\" http-equiv=\"X-UA-Compatible\"/>\n <meta content=\"width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no\" name=\"viewport\"/>\n <meta content=\"telephone=no\" name=\"format-detection\"/>\n <meta content=\"yes\" name=\"apple-mobile-web-app-capable\"/>\n <meta content=\"black-translucent\" name=\"apple-mobile-web-app-status-bar-style\"/>\n <link href=\"https://fonts.googleapis.com/css?family=Unica+One|Roboto:300,400,700\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\"/>\n <link href=\"css/ng-quick-date-default-theme.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/ng-quick-date.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/md-data-table.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/main.css?v=1552475396\" rel=\"stylesheet\"/>\n <link href=\"css/angular-material.min.css?v=1552475396\" rel=\"stylesheet\"/>\n </head>\n <body>\n <div class=\"results-app\" results-app=\"\">\n </div>\n <script src=\"https://maps.google.com/maps/api/js?libraries=geometry&key=AIzaSyBVif8cack80Pf-QQMdSe43lPs4DNqbyKk\">\n </script>\n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n\t\t (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n\t\t m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n\t\t\t})(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n\t\tga('create', 'UA-48980153-1', { 'cookieDomain': 'none' });\n </script>\n <script src=\"app.js?v=1552475396\">\n </script>\n </body>\n</html>\n"
],
[
"soup.prettify() == soup2.prettify()",
"_____no_output_____"
],
[
"# https://pythonprogramming.net/introduction-scraping-parsing-beautiful-soup-tutorial/\n\nimport urllib.request\n\nsource = urllib.request.urlopen(url).read()",
"_____no_output_____"
],
[
"soup3 = BeautifulSoup(source,'lxml')",
"_____no_output_____"
],
[
"# title of the page\nprint(soup3.title)\n\n# get attributes:\nprint(soup3.title.name)\n\n# get values:\nprint(soup3.title.string)\n\n# beginning navigation:\nprint(soup3.title.parent.name)\n\n# getting specific values:\nprint(soup3.p)\n\nprint(soup3.div)",
"<title>Xact Results</title>\ntitle\nXact Results\nhead\nNone\n<div class=\"results-app\" results-app=\"\"></div>\n"
],
[
"print(soup3.get_text())",
"Xact Results\n\t\t (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n\t\t (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n\t\t m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n\t\t\t})(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n\t\tga('create', 'UA-48980153-1', { 'cookieDomain': 'none' });\n\t\t\n"
],
[
"browser = webdriver.Chrome()\nbrowser.get('https://google.com')",
"_____no_output_____"
],
[
"# https://sites.google.com/a/chromium.org/chromedriver/downloads for most recent version\n# At the time of writing, I downloaded Version 78:\n# https://chromedriver.storage.googleapis.com/index.html?path=78.0.3904.70/\n# Mac: once downloaded, move the driver from Downloads to /usr/local/bin\n# Windows: once downloaded, move somewhere relevant and then add to PATH\n\ndriver = webdriver.Chrome()\n",
"_____no_output_____"
],
[
"driver.get(url)",
"_____no_output_____"
],
[
"# get web page\ndriver.get(url)\n# execute script to scroll down the page\ndriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n# sleep for 30s\ntime.sleep(30)\n# driver.quit()",
"_____no_output_____"
],
[
"\"\"\"\n<!doctype html>\n<html id=\"ng-app\" ng-app=\"results\" class=\"ng-scope\">\n <body>\n <div class=\"results-app ng-isolate-scope\" results-app>\n <div class=\"page\" ng-show=\"eventconfig.schema\" aria-hidden=\"false\" style>\n <md-content class=\"xact-contact _md\">\n <div ui-view=\"content\" class=\"ng-scope\" style>\n <div class=\"xact-search ng-scope layout-column flex\" layout=\"column\" flex>\n <md-content flex class=\"_md flex\">\n <md-table-container ng-show=\"!loading\" aria-hidden=\"false\" class style>\n <table md-table md-progress=\"promise\" class=\"md-table ng-isolate-scope\">\n <tbody md-body class=\"md-body\">\n <tr md-row md-select=\"entrant\" md-select-id=\"name\" md-auto-select ng-repeat=\"entrant in entrants\" ng-click=\"showEntrantInfo(entrant)\" class=\"md-row ng-scope ng-isolate-scope\" role=\"button\" tabindex=\"0\" style>\n <td md-cell class=\"md-cell ng-binding\">4445</td>\n <td md-cell class=\"md-cell\">\n <b class=\"ng-binding\">AARON FIGURA</b>\n <br>\n <small class=\"ng-binding\">LONG, M/34</small>\n </td>\n <td md-cell ng-show=\"show_net\" class=\"md-cell\" aria-hidden=\"false\">\n <span ng-show=\"entrant.chiptime\" class=\"ng-binding\" aria-hidden=\"false\">5:25:36</span>\n </td>\n\"\"\"",
"_____no_output_____"
],
[
"# find elements by xpath\n#results = driver.find_elements_by_xpath(\"//*[@id='componentsContainer']//*[contains(@id,'listingsContainer')]//*[@class='product active']//*[@class='title productTitle']\")\n#results = driver.find_elements_by_xpath(\"//[@b class='ng-binding']\")\nresults = driver.find_elements_by_xpath(\"//div[contains(@class, 'xact-search ng-scope layout-column flex')]\")\nprint('Number of results', len(results))\n\n\n",
"Number of results 1\n"
],
[
"results = driver.find_elements_by_xpath(\"//div[contains(@class, 'xact-search ng-scope layout-column flex')]\")\nprint('Number of results', len(results))\n\n",
"Number of results 1\n"
],
[
"browser.quit()",
"_____no_output_____"
],
[
"driver.quit()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4913177f74ea909d65449bda67aea1ecf8a68 | 27,491 | ipynb | Jupyter Notebook | ShanKwanCho-NaiveBayesian-2/test/test (1).ipynb | ShanKwanCho/Data_Mining | 32bba958774b23b03c624a3fd294d6d628e6d10c | [
"MIT"
] | null | null | null | ShanKwanCho-NaiveBayesian-2/test/test (1).ipynb | ShanKwanCho/Data_Mining | 32bba958774b23b03c624a3fd294d6d628e6d10c | [
"MIT"
] | null | null | null | ShanKwanCho-NaiveBayesian-2/test/test (1).ipynb | ShanKwanCho/Data_Mining | 32bba958774b23b03c624a3fd294d6d628e6d10c | [
"MIT"
] | null | null | null | 42.228879 | 1,714 | 0.393656 | [
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"# Load the Drive helper and mount.\nfrom google.colab import drive \n\n# This will prompt for authorization.\ndrive.mount('/content/drive', force_remount=True)",
"Mounted at /content/drive\n"
],
[
"df = pd.read_csv(('adult.test.csv'), header = None)",
"_____no_output_____"
],
[
"df.columns = ['age', 'workclass', 'fnlwgt', 'education','education-num', 'marital-status','occupation', 'relationship','race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']",
"_____no_output_____"
],
[
"df['workclass'].mode",
"_____no_output_____"
],
[
"df['occupation'].mode()",
"_____no_output_____"
],
[
"df['native-country'].mode()",
"_____no_output_____"
],
[
"df['workclass'] = df['workclass'].replace(' ?', ' Private')\ndf['occupation'] = df['occupation'].replace(' ?',' Prof-specialty')\ndf['native-country'] = df['native-country'].replace(' ?',' United-States')\ndf['income'] = df['income'].replace(' <=50K.',' <=50K')\ndf['income'] = df['income'].replace(' >50K.',' >50K')",
"_____no_output_____"
],
[
"df.head() ",
"_____no_output_____"
],
[
"df = df.drop(['fnlwgt', 'education-num'] , 1)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.to_csv('replaceData_test1.csv', sep = ',', index = False)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f496658ea023b5cfcf2cbf1690b00bc6d37628 | 53,005 | ipynb | Jupyter Notebook | 04_train_phydnet.ipynb | shawnwang-tech/moving_mnist | d7cb4703e76440865e93caf214ffa2afe57dbd69 | [
"Apache-2.0"
] | null | null | null | 04_train_phydnet.ipynb | shawnwang-tech/moving_mnist | d7cb4703e76440865e93caf214ffa2afe57dbd69 | [
"Apache-2.0"
] | null | null | null | 04_train_phydnet.ipynb | shawnwang-tech/moving_mnist | d7cb4703e76440865e93caf214ffa2afe57dbd69 | [
"Apache-2.0"
] | null | null | null | 90.606838 | 17,048 | 0.804415 | [
[
[
"from fastai.vision.all import *\nfrom moving_mnist.models.phy_original import *\nfrom moving_mnist.models.seq2seq import TeacherForcing\nfrom moving_mnist.models.conv_rnn import StackUnstack, StackLoss, MultiImageDice\nfrom moving_mnist.data import *",
"_____no_output_____"
],
[
"if torch.cuda.is_available():\n torch.cuda.set_device(0)\n print(torch.cuda.get_device_name())\n device = 0",
"GeForce RTX 2080 Ti\n"
]
],
[
[
"# Train PhyDNet:",
"_____no_output_____"
],
[
"We wil predict:\n- `n_in`: 5 images\n- `n_out`: 5 images \n- `n_obj`: up to 3 objects",
"_____no_output_____"
]
],
[
[
"Path.cwd()",
"_____no_output_____"
],
[
"DATA_PATH = Path.cwd()/'data'",
"_____no_output_____"
],
[
"ds = MovingMNIST(DATA_PATH, n_in=5, n_out=5, n_obj=[1,2], th=None)",
"_____no_output_____"
],
[
"train_tl = TfmdLists(range(120), ImageTupleTransform(ds))\nvalid_tl = TfmdLists(range(120), ImageTupleTransform(ds))",
"_____no_output_____"
],
[
"# i=0\n# fat_tensor = torch.stack([torch.cat(train_tl[i][0], 0) for i in range(100)])\n\n# m,s = fat_tensor.mean(), fat_tensor.std()",
"_____no_output_____"
],
[
"dls = DataLoaders.from_dsets(train_tl, valid_tl, bs=64,#).cuda()\n after_batch=[Normalize.from_stats(*mnist_stats)]).cuda()",
"_____no_output_____"
],
[
"mse_loss = StackLoss(MSELossFlat(axis=1))\nmetrics = []",
"_____no_output_____"
]
],
[
[
"Left: Input, Right: Target",
"_____no_output_____"
]
],
[
[
"dls.show_batch()",
"_____no_output_____"
],
[
"b = dls.one_batch()",
"_____no_output_____"
],
[
"explode_types(b)",
"_____no_output_____"
]
],
[
[
"## PhyDNet",
"_____no_output_____"
]
],
[
[
"phycell = PhyCell(input_shape=(16,16), input_dim=64, F_hidden_dims=[49], n_layers=1, kernel_size=(7,7)) \nconvlstm = ConvLSTM(input_shape=(16,16), input_dim=64, hidden_dims=[128,128,64], n_layers=3, kernel_size=(3,3)) \nencoder = EncoderRNN(phycell, convlstm)",
"_____no_output_____"
],
[
"model = StackUnstack(PhyDNet(encoder, sigmoid=False, moment=True), dim=1).cuda()",
"_____no_output_____"
]
],
[
[
"A handy callback to include the loss computed inside the model to the target loss",
"_____no_output_____"
]
],
[
[
"#export\nclass PHyCallback(Callback):\n def after_pred(self):\n self.learn.pred, self.loss_phy = self.pred\n def after_loss(self):\n self.learn.loss += self.loss_phy",
"_____no_output_____"
],
[
"learn = Learner(dls, model, loss_func=mse_loss, metrics=metrics, \n cbs=[TeacherForcing(10), PHyCallback()], opt_func=ranger)",
"_____no_output_____"
],
[
"learn.lr_find()",
"_____no_output_____"
],
[
"learn.fit_flat_cos(25, 3e-3)",
"_____no_output_____"
],
[
"p,t = learn.get_preds(1)",
"_____no_output_____"
],
[
"len(p), p[0].shape",
"_____no_output_____"
],
[
"def show_res(t, idx, argmax=False):\n if argmax:\n im_seq = ImageSeq.create([t[i][idx].argmax(0).unsqueeze(0) for i in range(5)], TensorMask)\n else:\n im_seq = ImageSeq.create([t[i][idx] for i in range(5)])\n im_seq.show(figsize=(8,4));",
"_____no_output_____"
],
[
"k = random.randint(0,99)\nshow_res(t,k)\nshow_res(p,k)",
"_____no_output_____"
],
[
"learn.save('phydnet')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4a19d43e34636eabda8b82108158a7fcb61cd | 6,822 | ipynb | Jupyter Notebook | dataAnalysis/ETCClassifier.ipynb | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null | dataAnalysis/ETCClassifier.ipynb | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null | dataAnalysis/ETCClassifier.ipynb | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null | 27.959016 | 334 | 0.578423 | [
[
[
"## Project led by Nikolas Papastavrou\n## Code developed by Varun Bopardikar\n## Data Analysis conducted by Selina Ho, Hana Ahmed",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np \nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom datetime import datetime\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression",
"_____no_output_____"
]
],
[
[
"# Load Data",
"_____no_output_____"
]
],
[
[
"def gsev(val): \n \"\"\"\n Records whether or not a number is greater than 7. \n \"\"\"\n if val <= 7: \n return 0\n else: \n return 1\n\ndf = pd.read_csv('../../fservice.csv')\ndf['Just Date'] = df['Just Date'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d'))\ndf['Seven'] = df['ElapsedDays'].apply(gsev, 0)",
"/Users/varunbopardikar/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3057: DtypeWarning: Columns (10,33) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
]
],
[
[
"# Parameters ",
"_____no_output_____"
]
],
[
[
"c = ['Anonymous','AssignTo', 'RequestType', 'RequestSource','CD','Direction', 'ActionTaken', 'APC' ,'AddressVerified']\nd = ['Latitude', 'Longitude']",
"_____no_output_____"
]
],
[
[
"# Feature Cleaning ",
"_____no_output_____"
]
],
[
[
"#Put desired columns into dataframe, drop nulls. \ndfn = df.filter(items = c + d + ['ElapsedDays'] + ['Seven'])\ndfn = dfn.dropna()\n \n#Separate data into explanatory and response variables\nXCAT = dfn.filter(items = c).values\nXNUM = dfn.filter(items = d).values\n \ny = dfn['ElapsedDays'] <= 7\n \n#Encode cateogrical data and merge with numerical data\nlabelencoder_X = LabelEncoder()\nfor num in range(len(c)): \n XCAT[:, num] = labelencoder_X.fit_transform(XCAT[:, num])\n \nonehotencoder = OneHotEncoder()\nXCAT = onehotencoder.fit_transform(XCAT).toarray()\n \nX = np.concatenate((XCAT, XNUM), axis=1)",
"/Users/varunbopardikar/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n"
]
],
[
[
"# Algorithms and Hyperparameters",
"_____no_output_____"
]
],
[
[
"##Used Random Forest in Final Model \n\ngnb = GaussianNB()\ndc = tree.DecisionTreeClassifier(criterion = 'entropy', max_depth = 20)\nrf = RandomForestClassifier(n_estimators = 50, max_depth = 20)\nlr = LogisticRegression()",
"_____no_output_____"
]
],
[
[
"# Validation Set",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.2, random_state = 0)\n\n#Train Model\nclassifier = rf\n\nclassifier.fit(X_train, y_train)\n\n#Test model\ny_vpred = classifier.predict(X_val)\n\n#Print Accuracy Function results\nprint(\"Accuracy:\",metrics.accuracy_score(y_val, y_vpred))\nprint(\"Precision, Recall, F1Score:\",metrics.precision_recall_fscore_support(y_val, y_vpred, average = 'binary'))",
"Accuracy: 0.9385983549336814\nPrecision, Recall, F1Score: (0.946896616482519, 0.9893259382317161, 0.9676463908853341, None)\n"
]
],
[
[
"# Test Set",
"_____no_output_____"
]
],
[
[
"#Train Model\n\n#Test model\ny_tpred = classifier.predict(X_test)\n\n#Print Accuracy Function results\n\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_tpred))\nprint(\"Precision, Recall, F1Score:\",metrics.precision_recall_fscore_support(y_test, y_tpred, average = 'binary'))",
"Accuracy: 0.9387186223709323\nPrecision, Recall, F1Score: (0.9468199376863904, 0.9895874917412928, 0.9677314319565967, None)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f4c61182f135fdc715da98d95ca2362059c24e | 87,069 | ipynb | Jupyter Notebook | Image_Classification_with_TensorBoard_Application.ipynb | shivtejshete/Computer-Vision | 284ab7ca93a0284de65d66b073b706131dedbbe9 | [
"MIT"
] | null | null | null | Image_Classification_with_TensorBoard_Application.ipynb | shivtejshete/Computer-Vision | 284ab7ca93a0284de65d66b073b706131dedbbe9 | [
"MIT"
] | null | null | null | Image_Classification_with_TensorBoard_Application.ipynb | shivtejshete/Computer-Vision | 284ab7ca93a0284de65d66b073b706131dedbbe9 | [
"MIT"
] | null | null | null | 144.153974 | 45,394 | 0.841918 | [
[
[
"<a href=\"https://colab.research.google.com/github/shivtejshete/Computer-Vision/blob/master/Image_Classification_with_TensorBoard_Application.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Deep Learning : Simple DNN to Classify Images, and application of TensorBoard.dev",
"_____no_output_____"
]
],
[
[
"#Importing the necessary libraries\nimport tensorflow as tf\nimport keras\nimport tensorflow.keras.datasets.fashion_mnist as data\nimport numpy as np\nfrom time import time \nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"###1. Loading Data",
"_____no_output_____"
]
],
[
[
"#Assigning the raw datafrom Keras dataset - Fashion MNIST\nraw_data = data",
"_____no_output_____"
],
[
"#Loading the dataset into training and validation dataset\n(train_image, train_label), (test_image, test_label) = raw_data.load_data( )",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\n"
]
],
[
[
"###2. Data Inspection",
"_____no_output_____"
]
],
[
[
"#checking the input volume shape \nprint(\"Total Training Images :{}\".format(train_image.shape[0]))\nprint(\"Training Images Shape (ht,wd) :{} X {}\".format(train_image.shape[1],train_image.shape[2]))\nprint(\"Total Testing Images :{}\".format(test_image.shape[0]))\nprint(\"Testing Images Shape (ht,wd) :{} X {}\".format(test_image.shape[1],test_image.shape[2]))\n",
"Total Training Images :60000\nTraining Images Shape (ht,wd) :28 X 28\nTotal Testing Images :10000\nTesting Images Shape (ht,wd) :28 X 28\n"
]
],
[
[
"###3. Rescaling Data",
"_____no_output_____"
]
],
[
[
"#rescaling the images for better training of Neural Network\ntrain_image = train_image/255.0 \ntest_image = test_image/255.0",
"_____no_output_____"
],
[
"#Existing Image classes from Fashion MNIST - in original Order\nclass_labels= ['T-shirt/top',\n 'Trouser',\n 'Pullover',\n 'Dress',\n 'Coat',\n 'Sandal',\n 'Shirt',\n 'Sneaker',\n 'Bag',\n 'Ankle boot']",
"_____no_output_____"
]
],
[
[
"###4. Sample Images Visualization",
"_____no_output_____"
]
],
[
[
"#Visualizing some of the training images\n\nfig, ax= plt.subplots(3,3, figsize=(10,10) )\nfor i,img in enumerate(ax.flatten()):\n img.pcolor(train_image[i])\n img.set_title(class_labels[train_label[i]])\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"###5. Building the Model Architecture",
"_____no_output_____"
]
],
[
[
"#Defining a very Simple Deep Neural Network with Softmax as activation function of the top layer for multi-class classification\nmodel = keras.Sequential()\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(256, activation= 'relu', use_bias= True))\nmodel.add(keras.layers.Dropout(rate= .2))\nmodel.add(keras.layers.Dense(64, activation='relu', use_bias=True))\nmodel.add(keras.layers.Dropout(rate= .2))\nmodel.add(keras.layers.Dense(10, activation='softmax' ))",
"_____no_output_____"
]
],
[
[
"###6. Defining TensorBoard for Training visualization",
"_____no_output_____"
]
],
[
[
"#creating a tensorboard object to be called while training the model\ntensorboard = keras.callbacks.TensorBoard(log_dir='.../logs', histogram_freq=1, batch_size=1000, write_grads=True, write_images=True )",
"/usr/local/lib/python3.6/dist-packages/keras/callbacks/tensorboard_v2.py:92: UserWarning: The TensorBoard callback `batch_size` argument (for histogram computation) is deprecated with TensorFlow 2.0. It will be ignored.\n warnings.warn('The TensorBoard callback `batch_size` argument '\n/usr/local/lib/python3.6/dist-packages/keras/callbacks/tensorboard_v2.py:97: UserWarning: The TensorBoard callback does not support gradients display when using TensorFlow 2.0. The `write_grads` argument is ignored.\n warnings.warn('The TensorBoard callback does not support '\n"
]
],
[
[
"###7. Model Training",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics= ['accuracy'])",
"_____no_output_____"
],
[
"# Fitting the model with tensorboard object as callbacks\nmodel.fit(train_image, train_label, batch_size=1000, epochs = 24, validation_data=(test_image, test_label), callbacks=[tensorboard] )",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/24\n60000/60000 [==============================] - 2s 37us/step - loss: 0.8993 - accuracy: 0.6824 - val_loss: 0.5451 - val_accuracy: 0.8151\nEpoch 2/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.5324 - accuracy: 0.8156 - val_loss: 0.4557 - val_accuracy: 0.8432\nEpoch 3/24\n60000/60000 [==============================] - 2s 26us/step - loss: 0.4532 - accuracy: 0.8406 - val_loss: 0.4259 - val_accuracy: 0.8447\nEpoch 4/24\n60000/60000 [==============================] - 2s 28us/step - loss: 0.4132 - accuracy: 0.8542 - val_loss: 0.3961 - val_accuracy: 0.8590\nEpoch 5/24\n60000/60000 [==============================] - 2s 28us/step - loss: 0.3872 - accuracy: 0.8636 - val_loss: 0.3810 - val_accuracy: 0.8628\nEpoch 6/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.3673 - accuracy: 0.8701 - val_loss: 0.3669 - val_accuracy: 0.8680\nEpoch 7/24\n60000/60000 [==============================] - 2s 28us/step - loss: 0.3517 - accuracy: 0.8750 - val_loss: 0.3575 - val_accuracy: 0.8681\nEpoch 8/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.3392 - accuracy: 0.8778 - val_loss: 0.3513 - val_accuracy: 0.8729\nEpoch 9/24\n60000/60000 [==============================] - 2s 26us/step - loss: 0.3301 - accuracy: 0.8800 - val_loss: 0.3450 - val_accuracy: 0.8747\nEpoch 10/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.3190 - accuracy: 0.8842 - val_loss: 0.3410 - val_accuracy: 0.8765\nEpoch 11/24\n60000/60000 [==============================] - 2s 26us/step - loss: 0.3123 - accuracy: 0.8869 - val_loss: 0.3368 - val_accuracy: 0.8775\nEpoch 12/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.3043 - accuracy: 0.8896 - val_loss: 0.3425 - val_accuracy: 0.8769\nEpoch 13/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2983 - accuracy: 0.8914 - val_loss: 0.3262 - val_accuracy: 0.8835\nEpoch 14/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2939 - accuracy: 0.8934 - val_loss: 0.3279 - val_accuracy: 0.8811\nEpoch 15/24\n60000/60000 [==============================] - 2s 28us/step - loss: 0.2864 - accuracy: 0.8959 - val_loss: 0.3195 - val_accuracy: 0.8841\nEpoch 16/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2774 - accuracy: 0.8983 - val_loss: 0.3211 - val_accuracy: 0.8818\nEpoch 17/24\n60000/60000 [==============================] - 2s 26us/step - loss: 0.2751 - accuracy: 0.9001 - val_loss: 0.3280 - val_accuracy: 0.8816\nEpoch 18/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2687 - accuracy: 0.9020 - val_loss: 0.3195 - val_accuracy: 0.8856\nEpoch 19/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2663 - accuracy: 0.9020 - val_loss: 0.3277 - val_accuracy: 0.8799\nEpoch 20/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2593 - accuracy: 0.9040 - val_loss: 0.3197 - val_accuracy: 0.8858\nEpoch 21/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2552 - accuracy: 0.9062 - val_loss: 0.3189 - val_accuracy: 0.8855\nEpoch 22/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2515 - accuracy: 0.9073 - val_loss: 0.3191 - val_accuracy: 0.8870\nEpoch 23/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2473 - accuracy: 0.9101 - val_loss: 0.3091 - val_accuracy: 0.8893\nEpoch 24/24\n60000/60000 [==============================] - 2s 27us/step - loss: 0.2403 - accuracy: 0.9111 - val_loss: 0.3104 - val_accuracy: 0.8908\n"
],
[
"model.summary()",
"Model: \"sequential_9\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_8 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_20 (Dense) (None, 256) 200960 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_21 (Dense) (None, 64) 16448 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_22 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 218,058\nTrainable params: 218,058\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"###8. Uploading the logs to TensorBoard.dev",
"_____no_output_____"
]
],
[
[
"# #checking out the TensorBoard dashboard to analyze training and validation performance with other statistics during the training of model\n%reload_ext tensorboard\n!tensorboard dev upload --logdir '.../logs' --name \"Deep Learning : Tensorboard\" --description \"Modeling a very simple Image Classifier based on Fashion MNIST dataset \" ",
"_____no_output_____"
]
],
[
[
"Live Link : https://tensorboard.dev/experiment/u6hGU2LaQqKn1b1udgL1RA/",
"_____no_output_____"
],
[
"###9. Making a Sample Prediction",
"_____no_output_____"
]
],
[
[
"#selection of an image\nsample = test_image[6]\nplt.imshow(sample)\nplt.xlabel(class_labels[test_label[6]])\nplt.title(test_label[6])\nplt.tight_layout()",
"_____no_output_____"
],
[
"#Prediction using trained model\nresults= model.predict(test_image[6].reshape(1,28,28))\nplt.bar(np.arange(0,10),results[0], tick_label=class_labels, )\nplt.xticks(rotation=45)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"###Summary\n1. This Simple Deep learning Model performance shows it achieved an accuracy of 91% at the end of 24th Epochs, and validation accuracy of 89%.\n2. The divergence between the accuracies or loss function values indicates a potential overfitting. Model can be generalized using other regularization methodologies such as weight regularization, bias regularization, dropout and others.\n3. TensorBoard offered us high understanding of online-training performance and more details in the form of Graphs, Histograms and Distributions.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7f4c6ae7f9d2204e04dc3e99040fcb0cd2d0ac1 | 195,708 | ipynb | Jupyter Notebook | CMD_Star_Cluster.ipynb | tiseever/Adv.-Astro.-Lab-Code | 4f7dd87b8604f5236193d1abe4b28d1f0bc92b63 | [
"MIT"
] | null | null | null | CMD_Star_Cluster.ipynb | tiseever/Adv.-Astro.-Lab-Code | 4f7dd87b8604f5236193d1abe4b28d1f0bc92b63 | [
"MIT"
] | null | null | null | CMD_Star_Cluster.ipynb | tiseever/Adv.-Astro.-Lab-Code | 4f7dd87b8604f5236193d1abe4b28d1f0bc92b63 | [
"MIT"
] | null | null | null | 172.582011 | 75,612 | 0.856015 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as sp",
"_____no_output_____"
],
[
"Objects = pd.read_excel('/Users/hitom/ASTR136_Code/Reduced_CSV/All_Objects.xlsx')",
"_____no_output_____"
],
[
"Objects",
"_____no_output_____"
],
[
"Magnitude_Blue = Objects['Magnitude Blue:']\nMagnitude_Visible = Objects['Magnitude Visible:']\nMagnitude_Blue_Uncertainty = Objects['MB Unc:']\nMagnitude_Visible_Uncertainty = Objects['MV Unc:']\nColor = Objects['Color:']\nColor_Uncertainty = Objects['Color Unc:']",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,7))\nplt.errorbar(Color,Magnitude_Visible,xerr=200*Color_Uncertainty,yerr=2000*Magnitude_Visible_Uncertainty,fmt='ro',ecolor='k',\\\n markersize=4.5)\nplt.xlabel('Color Index(B-V)',fontsize='15')\nplt.ylabel('Apparent Visual Magnitude',fontsize='15')\nplt.xticks(np.arange(-1,2,step=0.5),fontsize='12')\nplt.yticks(np.arange(5,16),fontsize='12')\nplt.ylim(15,5)\nplt.title('CMD of IC 4665',fontsize='17')\nplt.savefig('CMD',bbox='tight');",
"_____no_output_____"
],
[
"Isochrone = pd.ExcelFile('/Users/hitom/Documents/isocrone.xlsx',sheet=0)\nIsochrone.sheet_names\nI1 = Isochrone.parse('7.634-8')\nI2 = Isochrone.parse('7-7.634')\nI3 = Isochrone.parse('7.434-7.834')\nI4 = Isochrone.parse('8-8.5')\nI5 = Isochrone.parse('6.5-7')",
"_____no_output_____"
],
[
"Bmag1 = I1['Bmag']\nVmag1 = I1['Vmag']\nColorI1 = I1['Color']\nBmag2 = I2['Bmag']\nVmag2 = I2['Vmag']\nColorI2 = I2['Color']\nBmag3 = I3['Bmag']\nVmag3 = I3['Vmag']\nColorI3 = I3['Color']\nBmag4 = I4['Bmag']\nVmag4 = I4['Vmag']\nColorI4 = I4['Color']\nBmag5 = I5['Bmag']\nVmag5 = I5['Vmag']\nColorI5 = I5['Color']",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,7))\nplt.plot(ColorI1,Vmag1,label='Logage = 7.634-8.000 age/year')\nplt.plot(ColorI2,Vmag2,label='Logage = 7.000-7.634 age/year')\nplt.plot(ColorI3,Vmag3,label='Logage = 7.434-7.834 age/year')\nplt.xlabel('Color Index(B-V)',fontsize='15')\nplt.ylabel('Absolute Visual Magnitude',fontsize='15')\nplt.title('Isotrope Overlay of Cluster Objects using Color vs. Magnitude',fontsize='17')\nplt.yticks(np.arange(-6,18,step=2))\nplt.ylim(16,-6)\nplt.xticks(np.arange(-0.5,2.5,step=.5));",
"_____no_output_____"
],
[
"MV = Magnitude_Visible.values\nMV",
"_____no_output_____"
],
[
"Absolute_MV = np.empty(MV.shape)\nd = 352\nfor i in range(len(MV)):\n Absolute_MV[i] = MV[i] + 5 - 5*np.log10(d)\nAbsolute_MV",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,7))\nplt.errorbar(Color,Absolute_MV,xerr=200*Color_Uncertainty,yerr=2000*Magnitude_Visible_Uncertainty,fmt='ro',ecolor='k',\\\n markersize=4.5,label = 'Measured Objects around Cluster')\nplt.plot(ColorI1,Vmag1,label='Logage = 7.634-8.000 age/year')\nplt.plot(ColorI2,Vmag2,label='Logage = 7.000-7.634 age/year')\nplt.plot(ColorI3,Vmag3,label='Logage = 7.434-7.834 age/year')\nplt.plot(ColorI4,Vmag4,label='Logage = 8.000-8.500 age/year')\nplt.plot(ColorI5,Vmag5,label='Logage = 6.500-7.000 age/year')\nplt.xlabel('Color Index(B-V)',fontsize='15')\nplt.ylabel('Absolute Visual Magnitude',fontsize='15')\nplt.xticks(np.arange(-1,2,step=0.5),fontsize='12')\nplt.yticks(np.arange(-2,6),fontsize='12')\nplt.xlim(-1,1.5)\nplt.ylim(6,-2)\nplt.title('Isochrones Over IC 4665 Objects',fontsize='17')\nplt.legend(fontsize=10)\nplt.savefig('CMDOverlayed',bbox='tight');",
"_____no_output_____"
],
[
"def cs(x,y):\n Matched_Array = np.zeros(Absolute_MV.shape)\n for i in range(len(Absolute_MV)):\n Flag = 0\n for j in range(len(y.values)):\n if(Flag==1):\n continue\n elif(np.abs(x.values[j]-Absolute_MV[i])<1):\n Matched_Array[i] = y.values[j]\n Flag = 1\n else:\n continue\n cs = sp.chisquare(Absolute_MV,Matched_Array)\n return cs",
"_____no_output_____"
],
[
"CS1 = cs(Vmag1,ColorI1)[0]/len(Absolute_MV)\nCS2 = cs(Vmag2,ColorI2)[0]/len(Absolute_MV)\nCS3 = cs(Vmag3,ColorI3)[0]/len(Absolute_MV)\nCS4 = cs(Vmag4,ColorI4)[0]/len(Absolute_MV)\nCS5 = cs(Vmag5,ColorI5)[0]/len(Absolute_MV)",
"_____no_output_____"
],
[
"CS1",
"_____no_output_____"
],
[
"CS2",
"_____no_output_____"
],
[
"CS3",
"_____no_output_____"
],
[
"CS4",
"_____no_output_____"
],
[
"CS5",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4ca1013356b5c454dcd5c1b0e324e2b788dbd | 10,222 | ipynb | Jupyter Notebook | DataWrangling.ipynb | niharikabalachandra/Data-Wrangling-Example | d0ed9906f45fd54e2f4499c892a679e5454873e6 | [
"MIT"
] | null | null | null | DataWrangling.ipynb | niharikabalachandra/Data-Wrangling-Example | d0ed9906f45fd54e2f4499c892a679e5454873e6 | [
"MIT"
] | null | null | null | DataWrangling.ipynb | niharikabalachandra/Data-Wrangling-Example | d0ed9906f45fd54e2f4499c892a679e5454873e6 | [
"MIT"
] | null | null | null | 44.060345 | 479 | 0.611818 | [
[
[
"# Dimensional Mechanics Coding Challenge\n\n## Problem Statement\n\n“You are given a dictionary (dictionary.txt), containing a list of words, one per line. Imagine you have seven tiles. Each tile is either blank or contains a single lowercase letter (a-z).
Please list all the words from the dictionary that can be produced by using some or all of the seven tiles, in any order. A blank tile is a wildcard, and can be used in place of any letter.\n\n1. Find all of the words that can be formed if you don't have to deal with blank tiles. \n2. Find all of the words that can be formed, including those where blank tiles are used as wildcards.\n3. Please bear in mind you will need to process several hundred of 7-tile sets with the same dictionary.",
"_____no_output_____"
],
[
"## Solution\n\nConsider the 7 tiles, each tile can either be a blank or contains a single lowercase letter from a to z. We have the option of using some or all of the given 7 tiles. Therefore, each tile can be filled in 27 ways (a-z or blank). The word that we generate can either be a 1, 2, 3, 4, 5, 6, or 7 letter word. The sample space for all the possible tile combinations as per the above requirements will have 10862674479 (27^1 + 27^2 + 27^3 + 27^4 + 27^5 + 27^6 +27^7) words. \n\nHowever, since we only have to worry about the tile combinations (of characters a-z or blank) that form words which match with the ones in the dictionary, our new sample space is all the words in the given dictionary.\n\nSince we can only form words that are 7 letter or smaller, we eliminated all word that have more than 7 letter from the given dictionary. This would also be all of the words that can be formed if you don't have to deal with blank tiles. In this code, 'valid_words' stores this list. You can view this list of words in the 'wordlist.csv' file.\n\nTo find all of the words that can be formed, including those where blank tiles are used as wildcards we have to realize all the possible combination of words that are formed if the blank is replaced by any letter. For example, the word 'b-girl' represents the following set of combinations:\n\n['bagirl', 'bbgirl', 'bcgirl', 'bdgirl', 'begirl', 'bfgirl', 'bggirl', 'bhgirl', 'bigirl', 'bjgirl', 'bkgirl', 'blgirl', 'bmgirl', 'bngirl', 'bogirl', 'bpgirl', 'bqgirl', 'brgirl', 'bsgirl', 'btgirl', 'bugirl', 'bvgirl', 'bwgirl', 'bxgirl', 'bygirl', 'bzgirl']\n\nThe function replace() executes this idea and we apply this function to all the words that contain a blank in the list 'valid_words' ('valid_words' houses all the words of interest from the given dictionary). The combination of words that the blank wildcard represents is stored in the 'new_dictionary' list. \n \nThe 'new_dictionary' list is added to the 'valid_words' list to form all of the words that can be formed, including those where blank tiles are used as wildcards. This combined list is new_dictionary_final. You can view this combined list of words in the 'wordlistforwildcards.csv' file. \n\n## Big O Analysis\n\n1.\tSection 1 of the code has a for loop that iterates over the list 'dictionary' and thus a O(n) time complexity for n iterations of the loop.\n2.\tSection 2 of the code has a for loop nested within another for loop and thus a O(nxn)= O(n^2) time complexity, where n is the number of iterations of the for loop.\n3.\tSection 3 of the code has a for loop that iterates over the list 'valid_words' and thus a O(n) time complexity for n iterations of the loop. \n\nThe overall complexity is thus O(n + n^2 + n^2), since n<< n^2 the complexity can be approximated to O(n^2)\n",
"_____no_output_____"
]
],
[
[
"#section 1\nimport csv\nimport pandas as pd\n\nf = open(\"dictionary.txt\",\"r\")\ntext_file= f.read()\ndictionary= text_file.split(\"\\n\")\n\n#'valid_words' stores this list. You can view this list of words in the 'wordlist.csv' file present \n#in the root directory (read instruction on how to access 'wordlist.csv')\nvalid_words=[]\nfor i in dictionary:\n if len(i)<=7:\n valid_words.append(i)\n \nprint(\"The number of 7 letter words in given dictionary\")\nprint(len(valid_words)) \nprint(\"This step has a for loop that iterates over the list 'dictionary' and thus a O(n) time complexity for n iterations of the loop\")\n\n#read instruction on how to access 'wordlist.csv'\ndf = pd.DataFrame(valid_words, columns=[\"Possible Matches\"])\ndf.to_csv('wordlist.csv', index=False)",
"The number of 7 letter words in given dictionary\n26387\nThis step has a for loop that iterates over the list 'dictionary' and thus a O(n) time complexity for n iterations of the loop\n"
],
[
"#section 2\ndef replace(word, alphabets=[\"a\",\"b\",\"c\",\"d\", \"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n):\n word= list(word)\n expanded=[]\n copy=word\n for i,j in enumerate(word):\n if j==\"-\":\n for k in alphabets:\n copy[i]=k\n wildcard_option= ''.join(copy)\n expanded.append(wildcard_option)\n return(expanded) \n\nprint(\"The replace function illustrated:\")\nword_expanded= replace('b-girl')\nprint(word_expanded)\nprint(\"The function has a for loop nested within another for loop and thus a O(n^2) time complexity, where n is the number of iterations of the for loop\")",
"The replace function illustrated:\n['bagirl', 'bbgirl', 'bcgirl', 'bdgirl', 'begirl', 'bfgirl', 'bggirl', 'bhgirl', 'bigirl', 'bjgirl', 'bkgirl', 'blgirl', 'bmgirl', 'bngirl', 'bogirl', 'bpgirl', 'bqgirl', 'brgirl', 'bsgirl', 'btgirl', 'bugirl', 'bvgirl', 'bwgirl', 'bxgirl', 'bygirl', 'bzgirl']\nThe function has a for loop nested within another for loop and thus a O(n^2) time complexity, where n is the number of iterations of the for loop\n"
],
[
"#section 3\nnew_dictionary=[]\ncount=0 \n#The combination of words that the blank wildcard represents is stored in the 'new_dictionary' list. \nfor l in valid_words:\n if \"-\" in l:\n count= count+1\n word_expanded= replace(l)\n new_dictionary= new_dictionary + word_expanded\n \nprint(\"This step has a for loop that iterates over the list 'valid_words' and thus a O(n) time complexity for n iterations of the loop\") \nprint(\"The number of wildcard words in the above subset\")\nprint(count)\n\nprint(\"The number of possible representations the wildcard words correspond to in the above subset\")\nprint(len(new_dictionary))",
"This step has a for loop that iterates over the list 'valid_words' and thus a O(n) time complexity for n iterations of the loop\nThe number of wildcard words in the above subset\n118\nThe number of possible representations the wildcard words correspond to in the above subset\n3068\n"
],
[
"#The 'new_dictionary' list is added to the 'valid_words' list to form all of the words that can be formed, \n#including those where blank tiles are used as wildcards. This combined list is new_dictionary_final. \n#You can view this combined list of words in the 'wordlistforwildcards.csv' file present in the root directory \n\nnew_dictionary_final= new_dictionary + valid_words\n\nprint(\"The number of words that can be formed, including those where blank tiles are used as wildcards\")\nprint(len(new_dictionary_final))\n\n#read instruction on how to access 'wordlistforwildcards.csv'\ndf = pd.DataFrame(new_dictionary_final, columns=[\"Possible Matches\"])\ndf.to_csv('wordlistforwildcards.csv', index=False)\n\n ",
"The number of words that can be formed, including those where blank tiles are used as wildcards\n29455\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f4e8d2452d836bd92a1877306122c953a65e36 | 17,368 | ipynb | Jupyter Notebook | GRCh37/GenomeSpecific/HG005_GRCh37_CNV_exclusion_bed_generation.ipynb | nate-d-olson/genome-stratifications | d145a47b0c5ac0ee8fc58f4db0a18e0faeedcef1 | [
"Unlicense"
] | null | null | null | GRCh37/GenomeSpecific/HG005_GRCh37_CNV_exclusion_bed_generation.ipynb | nate-d-olson/genome-stratifications | d145a47b0c5ac0ee8fc58f4db0a18e0faeedcef1 | [
"Unlicense"
] | null | null | null | GRCh37/GenomeSpecific/HG005_GRCh37_CNV_exclusion_bed_generation.ipynb | nate-d-olson/genome-stratifications | d145a47b0c5ac0ee8fc58f4db0a18e0faeedcef1 | [
"Unlicense"
] | null | null | null | 50.783626 | 327 | 0.694093 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7f4e941af32f67444c1abb2b20cbd67b7cdac78 | 425,435 | ipynb | Jupyter Notebook | s17/Gaussian.ipynb | mraje16/cs181-demos | aacee9553a5e3d96b41abfdb50c95f98565ee9b8 | [
"MIT"
] | 5 | 2018-06-13T06:59:11.000Z | 2021-04-27T03:33:48.000Z | s17/Gaussian.ipynb | harvard-ml-courses/cs181-demos | aacee9553a5e3d96b41abfdb50c95f98565ee9b8 | [
"MIT"
] | null | null | null | s17/Gaussian.ipynb | harvard-ml-courses/cs181-demos | aacee9553a5e3d96b41abfdb50c95f98565ee9b8 | [
"MIT"
] | 18 | 2017-01-30T20:22:24.000Z | 2020-01-27T17:09:06.000Z | 244.784235 | 172,892 | 0.87595 | [
[
[
"import sklearn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats\nimport math\nimport seaborn\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10, 10)\nseaborn.set_context(\"talk\")",
"_____no_output_____"
],
[
"from scipy.stats import multivariate_normal\n",
"_____no_output_____"
],
[
"%matplotlib notebook\nfrom mpl_toolkits.mplot3d import Axes3D\ndef show(data):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot(data[:, 0], data[:, 1], data[:, 2])",
"_____no_output_____"
],
[
"def countour(data):\n d = data[:, 2]\n CS = plt.contour(np.linspace(-3,3), np.linspace(-3,3), d.reshape(math.sqrt(d.shape[0]), -1 ).transpose())",
"_____no_output_____"
],
[
"def compute(mu, sigma):\n var = multivariate_normal(mean=mu, cov=sigma)\n data = [(x, y, var.pdf([x,y])) for x in np.linspace(-3, 3) for y in np.linspace(-3, 3)]\n return np.array(data)",
"_____no_output_____"
],
[
"show(compute([0,0], [[1,1.5],\n [1,3]]))",
"_____no_output_____"
],
[
"countour(compute([0,0], [[1,1.5],\n [1, 3]]))",
"/usr/local/lib/python3.4/dist-packages/ipykernel/__main__.py:3: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n app.launch_new_instance()\n"
],
[
"#plt.ylim([-3,5])\n#plt.xlim([-3,5])\ncountour(compute([0.,0.], [[2.,0.],\n [0., 2.]]))",
"/usr/local/lib/python3.4/dist-packages/ipykernel/__main__.py:3: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n app.launch_new_instance()\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4f1ce018ca2f8e0198b903e9edfeea74b6499 | 135,724 | ipynb | Jupyter Notebook | notebooks/Video 1 - Linear regression.ipynb | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
] | 104 | 2020-11-26T09:46:03.000Z | 2022-03-18T06:22:03.000Z | notebooks/Video 1 - Linear regression.ipynb | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
] | 83 | 2021-03-02T15:54:26.000Z | 2022-03-10T08:09:05.000Z | notebooks/Video 1 - Linear regression.ipynb | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
] | 10 | 2021-02-04T14:27:36.000Z | 2022-03-31T17:39:34.000Z | 329.427184 | 44,408 | 0.933667 | [
[
[
"# Video 1 - Linear regression with swyft",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pylab as plt\nfrom scipy.linalg import inv\nfrom scipy import stats",
"_____no_output_____"
]
],
[
[
"## Linear regression for a second order polynomial",
"_____no_output_____"
],
[
"$$y(x) = v_0 + v_1\\cdot x + v_2 \\cdot x^2$$\n\n$$\nd_i \\sim \\mathcal{N}(y(x_i), \\sigma = 0.05)\\;, \\quad \\text{with}\\quad x_i = 0,\\; 0.1,\\; 0.2, \\;\\dots,\\; 1.0\n$$",
"_____no_output_____"
]
],
[
[
"# Model and reference parameters\nN = 11\nx = np.linspace(0, 1, N)\nT = np.array([x**0, x**1, x**2]).T\nv_true = np.array([-0.2, 0., 0.2])\n\n# Mock data\nSIGMA = 0.05\nnp.random.seed(42)\nDATA = T.dot(v_true) + np.random.randn(N)*SIGMA",
"_____no_output_____"
],
[
"# Linear regression\nv_lr = inv(T.T.dot(T)).dot(T.T.dot(DATA))\ny_lr = T.dot(v_lr)\n\n# Fisher estimation of errors\nI = np.array([[(T[:,i]*T[:,j]).sum()/SIGMA**2 for i in range(3)] for j in range(3)])\nSigma = inv(I)\nv_fisher_err = np.diag(Sigma)**0.5\n\n# Plot\nplt.plot(x, DATA, ls='', marker='x', label = 'Data')\nplt.plot(x, T.dot(v_true), 'r:', label='Ground truth')\nplt.plot(x, y_lr, 'k', label = 'Linear regression')\nplt.legend()\nplt.xlabel(\"x\")\nplt.ylabel('y');\n\nfor i in range(3):\n print(\"v_%i = %.3f +- %.3f (%.3f)\"%(i, v_lr[i], v_fisher_err[i], v_true[i]))",
"v_0 = -0.188 +- 0.038 (-0.200)\nv_1 = 0.098 +- 0.177 (0.000)\nv_2 = 0.079 +- 0.171 (0.200)\n"
]
],
[
[
"## SWYFT!",
"_____no_output_____"
]
],
[
[
"import swyft",
"_____no_output_____"
],
[
"def model(v):\n y = T.dot(v)\n return dict(y=y)\n\nsim = swyft.Simulator(model, ['v0', 'v1', 'v2'], dict(y=(11,)))",
"_____no_output_____"
],
[
"def noise(sim, v):\n d = sim['y'] + np.random.randn(11)*SIGMA\n return dict(d=d)",
"_____no_output_____"
],
[
"store = swyft.Store.memory_store(sim)",
"Creating new store.\n"
],
[
"prior = swyft.Prior(lambda u: u*2 - 1, 3) # Uniform(-1, 1)",
"_____no_output_____"
],
[
"store.add(20000, prior)",
"Store: Adding 19918 new samples to simulator store.\n"
],
[
"store.simulate()",
"_____no_output_____"
],
[
"dataset = swyft.Dataset(20000, prior, store, simhook = noise)",
"_____no_output_____"
],
[
"post = swyft.Posteriors(dataset)",
"_____no_output_____"
],
[
"%%time\nmarginals = [0, 1, 2]\npost.add(marginals, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))\npost.train(marginals)",
"Training: lr=0.001, Epoch=15, VL=1.865\nCPU times: user 37.2 s, sys: 5.05 s, total: 42.3 s\nWall time: 55.8 s\n"
],
[
"%%time\nobs = dict(d=DATA)\nsamples = post.sample(1000000, obs)",
"CPU times: user 9.49 s, sys: 1.45 s, total: 10.9 s\nWall time: 10.9 s\n"
],
[
"fig, diag = swyft.plot_1d(samples, [0, 1, 2], bins = 50, figsize=(15,4))\nfor i in range(3):\n x = np.linspace(-1, 1, 100)\n fig.axes[i].plot(x, stats.norm.pdf(x, v_lr[i], v_fisher_err[i]))",
"_____no_output_____"
],
[
"swyft.plot_corner(samples, [0, 1, 2])",
"_____no_output_____"
],
[
"%%time\nmarginals = [(0, 1), (0, 2)]\npost.add(marginals, device =torch.device('cuda' if torch.cuda.is_available() else 'cpu'))\npost.train(marginals)",
"Training: lr=0.001, Epoch=17, VL=0.3455\nCPU times: user 39.6 s, sys: 4.81 s, total: 44.4 s\nWall time: 1min 2s\n"
],
[
"samples = post.sample(1000000, obs)\nswyft.plot_corner(samples, [0, 1, 2]);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f4f4d61499d40bbde943668f02e838fd176623 | 217,483 | ipynb | Jupyter Notebook | docs/source2/examples/notebooks/generated/discrete_choice_example.ipynb | GreatWei/pythonStates | c4a9b326bfa312e2ae44a70f4dfaaf91f2d47a37 | [
"BSD-3-Clause"
] | 76 | 2019-12-28T08:37:10.000Z | 2022-03-29T02:19:41.000Z | docs/source2/examples/notebooks/generated/discrete_choice_example.ipynb | GreatWei/pythonStates | c4a9b326bfa312e2ae44a70f4dfaaf91f2d47a37 | [
"BSD-3-Clause"
] | null | null | null | docs/source2/examples/notebooks/generated/discrete_choice_example.ipynb | GreatWei/pythonStates | c4a9b326bfa312e2ae44a70f4dfaaf91f2d47a37 | [
"BSD-3-Clause"
] | 35 | 2020-02-04T14:46:25.000Z | 2022-03-24T03:56:17.000Z | 153.589689 | 36,308 | 0.858177 | [
[
[
"# Discrete Choice Models",
"_____no_output_____"
],
[
"## Fair's Affair data",
"_____no_output_____"
],
[
"A survey of women only was conducted in 1974 by *Redbook* asking about extramarital affairs.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import logit",
"_____no_output_____"
],
[
"print(sm.datasets.fair.SOURCE)",
"\nFair, Ray. 1978. \"A Theory of Extramarital Affairs,\" `Journal of Political\nEconomy`, February, 45-61.\n\nThe data is available at http://fairmodel.econ.yale.edu/rayfair/pdf/2011b.htm\n\n"
],
[
"print( sm.datasets.fair.NOTE)",
"::\n\n Number of observations: 6366\n Number of variables: 9\n Variable name definitions:\n\n rate_marriage : How rate marriage, 1 = very poor, 2 = poor, 3 = fair,\n 4 = good, 5 = very good\n age : Age\n yrs_married : No. years married. Interval approximations. See\n original paper for detailed explanation.\n children : No. children\n religious : How relgious, 1 = not, 2 = mildly, 3 = fairly,\n 4 = strongly\n educ : Level of education, 9 = grade school, 12 = high\n school, 14 = some college, 16 = college graduate,\n 17 = some graduate school, 20 = advanced degree\n occupation : 1 = student, 2 = farming, agriculture; semi-skilled,\n or unskilled worker; 3 = white-colloar; 4 = teacher\n counselor social worker, nurse; artist, writers;\n technician, skilled worker, 5 = managerial,\n administrative, business, 6 = professional with\n advanced degree\n occupation_husb : Husband's occupation. Same as occupation.\n affairs : measure of time spent in extramarital affairs\n\n See the original paper for more details.\n\n"
],
[
"dta = sm.datasets.fair.load_pandas().data",
"_____no_output_____"
],
[
"dta['affair'] = (dta['affairs'] > 0).astype(float)\nprint(dta.head(10))",
" rate_marriage age yrs_married children religious educ occupation \\\n0 3.0 32.0 9.0 3.0 3.0 17.0 2.0 \n1 3.0 27.0 13.0 3.0 1.0 14.0 3.0 \n2 4.0 22.0 2.5 0.0 1.0 16.0 3.0 \n3 4.0 37.0 16.5 4.0 3.0 16.0 5.0 \n4 5.0 27.0 9.0 1.0 1.0 14.0 3.0 \n5 4.0 27.0 9.0 0.0 2.0 14.0 3.0 \n6 5.0 37.0 23.0 5.5 2.0 12.0 5.0 \n7 5.0 37.0 23.0 5.5 2.0 12.0 2.0 \n8 3.0 22.0 2.5 0.0 2.0 12.0 3.0 \n9 3.0 27.0 6.0 0.0 1.0 16.0 3.0 \n\n occupation_husb affairs affair \n0 5.0 0.111111 1.0 \n1 4.0 3.230769 1.0 \n2 5.0 1.400000 1.0 \n3 5.0 0.727273 1.0 \n4 4.0 4.666666 1.0 \n5 4.0 4.666666 1.0 \n6 4.0 0.852174 1.0 \n7 3.0 1.826086 1.0 \n8 3.0 4.799999 1.0 \n9 5.0 1.333333 1.0 \n"
],
[
"print(dta.describe())",
" rate_marriage age yrs_married children religious \\\ncount 6366.000000 6366.000000 6366.000000 6366.000000 6366.000000 \nmean 4.109645 29.082862 9.009425 1.396874 2.426170 \nstd 0.961430 6.847882 7.280120 1.433471 0.878369 \nmin 1.000000 17.500000 0.500000 0.000000 1.000000 \n25% 4.000000 22.000000 2.500000 0.000000 2.000000 \n50% 4.000000 27.000000 6.000000 1.000000 2.000000 \n75% 5.000000 32.000000 16.500000 2.000000 3.000000 \nmax 5.000000 42.000000 23.000000 5.500000 4.000000 \n\n educ occupation occupation_husb affairs affair \ncount 6366.000000 6366.000000 6366.000000 6366.000000 6366.000000 \nmean 14.209865 3.424128 3.850141 0.705374 0.322495 \nstd 2.178003 0.942399 1.346435 2.203374 0.467468 \nmin 9.000000 1.000000 1.000000 0.000000 0.000000 \n25% 12.000000 3.000000 3.000000 0.000000 0.000000 \n50% 14.000000 3.000000 4.000000 0.000000 0.000000 \n75% 16.000000 4.000000 5.000000 0.484848 1.000000 \nmax 20.000000 6.000000 6.000000 57.599991 1.000000 \n"
],
[
"affair_mod = logit(\"affair ~ occupation + educ + occupation_husb\"\n \"+ rate_marriage + age + yrs_married + children\"\n \" + religious\", dta).fit()",
"Optimization terminated successfully.\n Current function value: 0.545314\n Iterations 6\n"
],
[
"print(affair_mod.summary())",
" Logit Regression Results \n==============================================================================\nDep. Variable: affair No. Observations: 6366\nModel: Logit Df Residuals: 6357\nMethod: MLE Df Model: 8\nDate: Tue, 24 Dec 2019 Pseudo R-squ.: 0.1327\nTime: 14:49:03 Log-Likelihood: -3471.5\nconverged: True LL-Null: -4002.5\nCovariance Type: nonrobust LLR p-value: 5.807e-224\n===================================================================================\n coef std err z P>|z| [0.025 0.975]\n-----------------------------------------------------------------------------------\nIntercept 3.7257 0.299 12.470 0.000 3.140 4.311\noccupation 0.1602 0.034 4.717 0.000 0.094 0.227\neduc -0.0392 0.015 -2.533 0.011 -0.070 -0.009\noccupation_husb 0.0124 0.023 0.541 0.589 -0.033 0.057\nrate_marriage -0.7161 0.031 -22.784 0.000 -0.778 -0.655\nage -0.0605 0.010 -5.885 0.000 -0.081 -0.040\nyrs_married 0.1100 0.011 10.054 0.000 0.089 0.131\nchildren -0.0042 0.032 -0.134 0.893 -0.066 0.058\nreligious -0.3752 0.035 -10.792 0.000 -0.443 -0.307\n===================================================================================\n"
]
],
[
[
"How well are we predicting?",
"_____no_output_____"
]
],
[
[
"affair_mod.pred_table()",
"_____no_output_____"
]
],
[
[
"The coefficients of the discrete choice model do not tell us much. What we're after is marginal effects.",
"_____no_output_____"
]
],
[
[
"mfx = affair_mod.get_margeff()\nprint(mfx.summary())",
" Logit Marginal Effects \n=====================================\nDep. Variable: affair\nMethod: dydx\nAt: overall\n===================================================================================\n dy/dx std err z P>|z| [0.025 0.975]\n-----------------------------------------------------------------------------------\noccupation 0.0293 0.006 4.744 0.000 0.017 0.041\neduc -0.0072 0.003 -2.538 0.011 -0.013 -0.002\noccupation_husb 0.0023 0.004 0.541 0.589 -0.006 0.010\nrate_marriage -0.1308 0.005 -26.891 0.000 -0.140 -0.121\nage -0.0110 0.002 -5.937 0.000 -0.015 -0.007\nyrs_married 0.0201 0.002 10.327 0.000 0.016 0.024\nchildren -0.0008 0.006 -0.134 0.893 -0.012 0.011\nreligious -0.0685 0.006 -11.119 0.000 -0.081 -0.056\n===================================================================================\n"
],
[
"respondent1000 = dta.iloc[1000]\nprint(respondent1000)",
"rate_marriage 4.000000\nage 37.000000\nyrs_married 23.000000\nchildren 3.000000\nreligious 3.000000\neduc 12.000000\noccupation 3.000000\noccupation_husb 4.000000\naffairs 0.521739\naffair 1.000000\nName: 1000, dtype: float64\n"
],
[
"resp = dict(zip(range(1,9), respondent1000[[\"occupation\", \"educ\",\n \"occupation_husb\", \"rate_marriage\",\n \"age\", \"yrs_married\", \"children\",\n \"religious\"]].tolist()))\nresp.update({0 : 1})\nprint(resp)",
"{1: 3.0, 2: 12.0, 3: 4.0, 4: 4.0, 5: 37.0, 6: 23.0, 7: 3.0, 8: 3.0, 0: 1}\n"
],
[
"mfx = affair_mod.get_margeff(atexog=resp)\nprint(mfx.summary())",
" Logit Marginal Effects \n=====================================\nDep. Variable: affair\nMethod: dydx\nAt: overall\n===================================================================================\n dy/dx std err z P>|z| [0.025 0.975]\n-----------------------------------------------------------------------------------\noccupation 0.0400 0.008 4.711 0.000 0.023 0.057\neduc -0.0098 0.004 -2.537 0.011 -0.017 -0.002\noccupation_husb 0.0031 0.006 0.541 0.589 -0.008 0.014\nrate_marriage -0.1788 0.008 -22.743 0.000 -0.194 -0.163\nage -0.0151 0.003 -5.928 0.000 -0.020 -0.010\nyrs_married 0.0275 0.003 10.256 0.000 0.022 0.033\nchildren -0.0011 0.008 -0.134 0.893 -0.017 0.014\nreligious -0.0937 0.009 -10.722 0.000 -0.111 -0.077\n===================================================================================\n"
]
],
[
[
"`predict` expects a `DataFrame` since `patsy` is used to select columns.",
"_____no_output_____"
]
],
[
[
"respondent1000 = dta.iloc[[1000]]\naffair_mod.predict(respondent1000)",
"_____no_output_____"
],
[
"affair_mod.fittedvalues[1000]",
"_____no_output_____"
],
[
"affair_mod.model.cdf(affair_mod.fittedvalues[1000])",
"_____no_output_____"
]
],
[
[
"The \"correct\" model here is likely the Tobit model. We have an work in progress branch \"tobit-model\" on github, if anyone is interested in censored regression models.",
"_____no_output_____"
],
[
"### Exercise: Logit vs Probit",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nsupport = np.linspace(-6, 6, 1000)\nax.plot(support, stats.logistic.cdf(support), 'r-', label='Logistic')\nax.plot(support, stats.norm.cdf(support), label='Probit')\nax.legend();",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nsupport = np.linspace(-6, 6, 1000)\nax.plot(support, stats.logistic.pdf(support), 'r-', label='Logistic')\nax.plot(support, stats.norm.pdf(support), label='Probit')\nax.legend();",
"_____no_output_____"
]
],
[
[
"Compare the estimates of the Logit Fair model above to a Probit model. Does the prediction table look better? Much difference in marginal effects?",
"_____no_output_____"
],
[
"### Generalized Linear Model Example",
"_____no_output_____"
]
],
[
[
"print(sm.datasets.star98.SOURCE)",
"\nJeff Gill's `Generalized Linear Models: A Unified Approach`\n\nhttp://jgill.wustl.edu/research/books.html\n\n"
],
[
"print(sm.datasets.star98.DESCRLONG)",
"\nThis data is on the California education policy and outcomes (STAR program\nresults for 1998. The data measured standardized testing by the California\nDepartment of Education that required evaluation of 2nd - 11th grade students\nby the the Stanford 9 test on a variety of subjects. This dataset is at\nthe level of the unified school district and consists of 303 cases. The\nbinary response variable represents the number of 9th graders scoring\nover the national median value on the mathematics exam.\n\nThe data used in this example is only a subset of the original source.\n\n"
],
[
"print(sm.datasets.star98.NOTE)",
"::\n\n Number of Observations - 303 (counties in California).\n\n Number of Variables - 13 and 8 interaction terms.\n\n Definition of variables names::\n\n NABOVE - Total number of students above the national median for the\n math section.\n NBELOW - Total number of students below the national median for the\n math section.\n LOWINC - Percentage of low income students\n PERASIAN - Percentage of Asian student\n PERBLACK - Percentage of black students\n PERHISP - Percentage of Hispanic students\n PERMINTE - Percentage of minority teachers\n AVYRSEXP - Sum of teachers' years in educational service divided by the\n number of teachers.\n AVSALK - Total salary budget including benefits divided by the number\n of full-time teachers (in thousands)\n PERSPENK - Per-pupil spending (in thousands)\n PTRATIO - Pupil-teacher ratio.\n PCTAF - Percentage of students taking UC/CSU prep courses\n PCTCHRT - Percentage of charter schools\n PCTYRRND - Percentage of year-round schools\n\n The below variables are interaction terms of the variables defined\n above.\n\n PERMINTE_AVYRSEXP\n PEMINTE_AVSAL\n AVYRSEXP_AVSAL\n PERSPEN_PTRATIO\n PERSPEN_PCTAF\n PTRATIO_PCTAF\n PERMINTE_AVTRSEXP_AVSAL\n PERSPEN_PTRATIO_PCTAF\n\n"
],
[
"dta = sm.datasets.star98.load_pandas().data\nprint(dta.columns)",
"Index(['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',\n 'PERMINTE', 'AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF',\n 'PCTCHRT', 'PCTYRRND', 'PERMINTE_AVYRSEXP', 'PERMINTE_AVSAL',\n 'AVYRSEXP_AVSAL', 'PERSPEN_PTRATIO', 'PERSPEN_PCTAF', 'PTRATIO_PCTAF',\n 'PERMINTE_AVYRSEXP_AVSAL', 'PERSPEN_PTRATIO_PCTAF'],\n dtype='object')\n"
],
[
"print(dta[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP', 'PERMINTE']].head(10))",
" NABOVE NBELOW LOWINC PERASIAN PERBLACK PERHISP PERMINTE\n0 452.0 355.0 34.39730 23.299300 14.235280 11.411120 15.918370\n1 144.0 40.0 17.36507 29.328380 8.234897 9.314884 13.636360\n2 337.0 234.0 32.64324 9.226386 42.406310 13.543720 28.834360\n3 395.0 178.0 11.90953 13.883090 3.796973 11.443110 11.111110\n4 8.0 57.0 36.88889 12.187500 76.875000 7.604167 43.589740\n5 1348.0 899.0 20.93149 28.023510 4.643221 13.808160 15.378490\n6 477.0 887.0 53.26898 8.447858 19.374830 37.905330 25.525530\n7 565.0 347.0 15.19009 3.665781 2.649680 13.092070 6.203008\n8 205.0 320.0 28.21582 10.430420 6.786374 32.334300 13.461540\n9 469.0 598.0 32.77897 17.178310 12.484930 28.323290 27.259890\n"
],
[
"print(dta[['AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF', 'PCTCHRT', 'PCTYRRND']].head(10))",
" AVYRSEXP AVSALK PERSPENK PTRATIO PCTAF PCTCHRT PCTYRRND\n0 14.70646 59.15732 4.445207 21.71025 57.03276 0.0 22.222220\n1 16.08324 59.50397 5.267598 20.44278 64.62264 0.0 0.000000\n2 14.59559 60.56992 5.482922 18.95419 53.94191 0.0 0.000000\n3 14.38939 58.33411 4.165093 21.63539 49.06103 0.0 7.142857\n4 13.90568 63.15364 4.324902 18.77984 52.38095 0.0 0.000000\n5 14.97755 66.97055 3.916104 24.51914 44.91578 0.0 2.380952\n6 14.67829 57.62195 4.270903 22.21278 32.28916 0.0 12.121210\n7 13.66197 63.44740 4.309734 24.59026 30.45267 0.0 0.000000\n8 16.41760 57.84564 4.527603 21.74138 22.64574 0.0 0.000000\n9 12.51864 57.80141 4.648917 20.26010 26.07099 0.0 0.000000\n"
],
[
"formula = 'NABOVE + NBELOW ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT '\nformula += '+ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'",
"_____no_output_____"
]
],
[
[
"#### Aside: Binomial distribution",
"_____no_output_____"
],
[
"Toss a six-sided die 5 times, what's the probability of exactly 2 fours?",
"_____no_output_____"
]
],
[
[
"stats.binom(5, 1./6).pmf(2)",
"_____no_output_____"
],
[
"from scipy.special import comb\ncomb(5,2) * (1/6.)**2 * (5/6.)**3",
"_____no_output_____"
],
[
"from statsmodels.formula.api import glm\nglm_mod = glm(formula, dta, family=sm.families.Binomial()).fit()",
"_____no_output_____"
],
[
"print(glm_mod.summary())",
" Generalized Linear Model Regression Results \n================================================================================\nDep. Variable: ['NABOVE', 'NBELOW'] No. Observations: 303\nModel: GLM Df Residuals: 282\nModel Family: Binomial Df Model: 20\nLink Function: logit Scale: 1.0000\nMethod: IRLS Log-Likelihood: -2998.6\nDate: Tue, 24 Dec 2019 Deviance: 4078.8\nTime: 14:50:24 Pearson chi2: 4.05e+03\nNo. Iterations: 5 \nCovariance Type: nonrobust \n============================================================================================\n coef std err z P>|z| [0.025 0.975]\n--------------------------------------------------------------------------------------------\nIntercept 2.9589 1.547 1.913 0.056 -0.073 5.990\nLOWINC -0.0168 0.000 -38.749 0.000 -0.018 -0.016\nPERASIAN 0.0099 0.001 16.505 0.000 0.009 0.011\nPERBLACK -0.0187 0.001 -25.182 0.000 -0.020 -0.017\nPERHISP -0.0142 0.000 -32.818 0.000 -0.015 -0.013\nPCTCHRT 0.0049 0.001 3.921 0.000 0.002 0.007\nPCTYRRND -0.0036 0.000 -15.878 0.000 -0.004 -0.003\nPERMINTE 0.2545 0.030 8.498 0.000 0.196 0.313\nAVYRSEXP 0.2407 0.057 4.212 0.000 0.129 0.353\nPERMINTE:AVYRSEXP -0.0141 0.002 -7.391 0.000 -0.018 -0.010\nAVSALK 0.0804 0.014 5.775 0.000 0.053 0.108\nPERMINTE:AVSALK -0.0040 0.000 -8.450 0.000 -0.005 -0.003\nAVYRSEXP:AVSALK -0.0039 0.001 -4.059 0.000 -0.006 -0.002\nPERMINTE:AVYRSEXP:AVSALK 0.0002 2.99e-05 7.428 0.000 0.000 0.000\nPERSPENK -1.9522 0.317 -6.162 0.000 -2.573 -1.331\nPTRATIO -0.3341 0.061 -5.453 0.000 -0.454 -0.214\nPERSPENK:PTRATIO 0.0917 0.015 6.321 0.000 0.063 0.120\nPCTAF -0.1690 0.033 -5.169 0.000 -0.233 -0.105\nPERSPENK:PCTAF 0.0490 0.007 6.574 0.000 0.034 0.064\nPTRATIO:PCTAF 0.0080 0.001 5.362 0.000 0.005 0.011\nPERSPENK:PTRATIO:PCTAF -0.0022 0.000 -6.445 0.000 -0.003 -0.002\n============================================================================================\n"
]
],
[
[
"The number of trials ",
"_____no_output_____"
]
],
[
[
"glm_mod.model.data.orig_endog.sum(1)",
"_____no_output_____"
],
[
"glm_mod.fittedvalues * glm_mod.model.data.orig_endog.sum(1)",
"_____no_output_____"
]
],
[
[
"First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact\non the response variables:",
"_____no_output_____"
]
],
[
[
"exog = glm_mod.model.data.orig_exog # get the dataframe",
"_____no_output_____"
],
[
"means25 = exog.mean()\nprint(means25)",
"Intercept 1.000000\nLOWINC 41.409877\nPERASIAN 5.896335\nPERBLACK 5.636808\nPERHISP 34.398080\nPCTCHRT 1.175909\nPCTYRRND 11.611905\nPERMINTE 14.694747\nAVYRSEXP 14.253875\nPERMINTE:AVYRSEXP 209.018700\nAVSALK 58.640258\nPERMINTE:AVSALK 879.979883\nAVYRSEXP:AVSALK 839.718173\nPERMINTE:AVYRSEXP:AVSALK 12585.266464\nPERSPENK 4.320310\nPTRATIO 22.464250\nPERSPENK:PTRATIO 96.295756\nPCTAF 33.630593\nPERSPENK:PCTAF 147.235740\nPTRATIO:PCTAF 747.445536\nPERSPENK:PTRATIO:PCTAF 3243.607568\ndtype: float64\n"
],
[
"means25['LOWINC'] = exog['LOWINC'].quantile(.25)\nprint(means25)",
"Intercept 1.000000\nLOWINC 26.683040\nPERASIAN 5.896335\nPERBLACK 5.636808\nPERHISP 34.398080\nPCTCHRT 1.175909\nPCTYRRND 11.611905\nPERMINTE 14.694747\nAVYRSEXP 14.253875\nPERMINTE:AVYRSEXP 209.018700\nAVSALK 58.640258\nPERMINTE:AVSALK 879.979883\nAVYRSEXP:AVSALK 839.718173\nPERMINTE:AVYRSEXP:AVSALK 12585.266464\nPERSPENK 4.320310\nPTRATIO 22.464250\nPERSPENK:PTRATIO 96.295756\nPCTAF 33.630593\nPERSPENK:PCTAF 147.235740\nPTRATIO:PCTAF 747.445536\nPERSPENK:PTRATIO:PCTAF 3243.607568\ndtype: float64\n"
],
[
"means75 = exog.mean()\nmeans75['LOWINC'] = exog['LOWINC'].quantile(.75)\nprint(means75)",
"Intercept 1.000000\nLOWINC 55.460075\nPERASIAN 5.896335\nPERBLACK 5.636808\nPERHISP 34.398080\nPCTCHRT 1.175909\nPCTYRRND 11.611905\nPERMINTE 14.694747\nAVYRSEXP 14.253875\nPERMINTE:AVYRSEXP 209.018700\nAVSALK 58.640258\nPERMINTE:AVSALK 879.979883\nAVYRSEXP:AVSALK 839.718173\nPERMINTE:AVYRSEXP:AVSALK 12585.266464\nPERSPENK 4.320310\nPTRATIO 22.464250\nPERSPENK:PTRATIO 96.295756\nPCTAF 33.630593\nPERSPENK:PCTAF 147.235740\nPTRATIO:PCTAF 747.445536\nPERSPENK:PTRATIO:PCTAF 3243.607568\ndtype: float64\n"
]
],
[
[
"Again, `predict` expects a `DataFrame` since `patsy` is used to select columns.",
"_____no_output_____"
]
],
[
[
"resp25 = glm_mod.predict(pd.DataFrame(means25).T)\nresp75 = glm_mod.predict(pd.DataFrame(means75).T)\ndiff = resp75 - resp25",
"_____no_output_____"
]
],
[
[
"The interquartile first difference for the percentage of low income households in a school district is:",
"_____no_output_____"
]
],
[
[
"print(\"%2.4f%%\" % (diff[0]*100))",
"-11.8863%\n"
],
[
"nobs = glm_mod.nobs\ny = glm_mod.model.endog\nyhat = glm_mod.mu",
"_____no_output_____"
],
[
"from statsmodels.graphics.api import abline_plot\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, ylabel='Observed Values', xlabel='Fitted Values')\nax.scatter(yhat, y)\ny_vs_yhat = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()\nfig = abline_plot(model_results=y_vs_yhat, ax=ax)",
"_____no_output_____"
]
],
[
[
"#### Plot fitted values vs Pearson residuals",
"_____no_output_____"
],
[
"Pearson residuals are defined to be\n\n$$\\frac{(y - \\mu)}{\\sqrt{(var(\\mu))}}$$\n\nwhere var is typically determined by the family. E.g., binomial variance is $np(1 - p)$",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, title='Residual Dependence Plot', xlabel='Fitted Values',\n ylabel='Pearson Residuals')\nax.scatter(yhat, stats.zscore(glm_mod.resid_pearson))\nax.axis('tight')\nax.plot([0.0, 1.0],[0.0, 0.0], 'k-');",
"_____no_output_____"
]
],
[
[
"#### Histogram of standardized deviance residuals with Kernel Density Estimate overlaid",
"_____no_output_____"
],
[
"The definition of the deviance residuals depends on the family. For the Binomial distribution this is\n\n$$r_{dev} = sign\\left(Y-\\mu\\right)*\\sqrt{2n(Y\\log\\frac{Y}{\\mu}+(1-Y)\\log\\frac{(1-Y)}{(1-\\mu)}}$$\n\nThey can be used to detect ill-fitting covariates",
"_____no_output_____"
]
],
[
[
"resid = glm_mod.resid_deviance\nresid_std = stats.zscore(resid)\nkde_resid = sm.nonparametric.KDEUnivariate(resid_std)\nkde_resid.fit()",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, title=\"Standardized Deviance Residuals\")\nax.hist(resid_std, bins=25, density=True);\nax.plot(kde_resid.support, kde_resid.density, 'r');",
"_____no_output_____"
]
],
[
[
"#### QQ-plot of deviance residuals",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nfig = sm.graphics.qqplot(resid, line='r', ax=ax)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f50d83c2d20d8acdedb4ce5a24e42a84b0e65c | 79,747 | ipynb | Jupyter Notebook | Model/3-NeuarlNetwork7-Copy1.ipynb | skawns0724/KOSA-Big-Data_Vision | af123dfe0a82a82795bb6732285c390be86e83b7 | [
"MIT"
] | 1 | 2021-09-04T02:42:12.000Z | 2021-09-04T02:42:12.000Z | Model/3-NeuarlNetwork7-Copy1.ipynb | skawns0724/KOSA-Big-Data_Vision | af123dfe0a82a82795bb6732285c390be86e83b7 | [
"MIT"
] | null | null | null | Model/3-NeuarlNetwork7-Copy1.ipynb | skawns0724/KOSA-Big-Data_Vision | af123dfe0a82a82795bb6732285c390be86e83b7 | [
"MIT"
] | 7 | 2021-09-13T02:13:30.000Z | 2021-09-23T01:26:38.000Z | 100.437028 | 49,220 | 0.783365 | [
[
[
"# Import Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom keras.initializers import glorot_uniform\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom sklearn.metrics import confusion_matrix",
"Using TensorFlow backend.\n"
],
[
"#!pip show tensorflow",
"_____no_output_____"
]
],
[
[
"# Background\n\n_Credit default_ can defined as the failure to repay a debt including interest or principal on a loan or security on the due date.This can cause losses for lenders so that preventive measures is a must, in which early detection for potential default can be one of those. This case study can be categorized as the binary classification.\n\nArtifical Neural Network (ANN) is one of models for classification problems, having the ability to capture the linier and also the non-linear model trends from data so that it can give predictions for the new data (having the same distributions).\n\nIn jupyter notebook, the effectiveness of ANN model will be tried to classify the _credit default customer_ and hope that it can reach 95% accuracy.",
"_____no_output_____"
],
[
"# Data Understanding\n\nThe data used in this task is a public dataset from UCI Machine Learning entitled \"Default of Credit Card Clients Dataset\" containing information on default payments, demographic factors, credit data, history of payment, and bill statements of credit card clients in Taiwan from April 2005 to September 2005. \n\nThis dataset contains 30,000 data observations with 25 variables consisting of 1 ID, 23 predictor variables, and 1 response variable as the default payment next month.\n\nHere are some samples of the data.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('credit_cards_dataset.csv')\ndf.head()",
"_____no_output_____"
]
],
[
[
"The description of each column/variable can be seen below :\n- ID: ID of each client\n- LIMIT_BAL: Amount of given credit in NT dollars (includes individual and family/supplementary credit\n- SEX: Gender (1=male, 2=female)\n- EDUCATION: (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown)\n- MARRIAGE: Marital status (1=married, 2=single, 3=others)\n- AGE: Age in years\n- PAY_0: Repayment status in September, 2005 (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, … 8=payment delay for eight months, 9=payment delay for nine months and above)\n- PAY_2: Repayment status in August, 2005 (scale same as above)\n- PAY_3: Repayment status in July, 2005 (scale same as above)\n- PAY_4: Repayment status in June, 2005 (scale same as above)\n- PAY_5: Repayment status in May, 2005 (scale same as above)\n- PAY_6: Repayment status in April, 2005 (scale same as above)\n- BILL_AMT1: Amount of bill statement in September, 2005 (NT dollar)\n- BILL_AMT2: Amount of bill statement in August, 2005 (NT dollar)\n- BILL_AMT3: Amount of bill statement in July, 2005 (NT dollar)\n- BILL_AMT4: Amount of bill statement in June, 2005 (NT dollar)\n- BILL_AMT5: Amount of bill statement in May, 2005 (NT dollar)\n- BILL_AMT6: Amount of bill statement in April, 2005 (NT dollar)\n- PAY_AMT1: Amount of previous payment in September, 2005 (NT dollar)\n- PAY_AMT2: Amount of previous payment in August, 2005 (NT dollar)\n- PAY_AMT3: Amount of previous payment in July, 2005 (NT dollar)\n- PAY_AMT4: Amount of previous payment in June, 2005 (NT dollar)\n- PAY_AMT5: Amount of previous payment in May, 2005 (NT dollar)\n- PAY_AMT6: Amount of previous payment in April, 2005 (NT dollar)\n- default.payment.next.month: Default payment (1=yes, 0=no)",
"_____no_output_____"
],
[
"## Data Exploratory\nAs we can see the description of each column/variable, those are the numerical data so that the data summary are all based on basic statistics in mean, median, minimum and maximum etc which detailed below.",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"Next, we want see the correlation between all of features and label in the dataset by using the Pearson correlation formula below. <br>\n$$Covarian (S_{xy}) =\\frac{\\sum(x_{i}-\\bar{x})(y_{i}-\\bar{y})}{n-1}$$\n\n\nThe plot below is the correlation between all features (predictor variables) toward label.",
"_____no_output_____"
]
],
[
[
"# Using Pearson Correlation\nplt.figure(figsize=(14,14))\ncor = df.iloc[:,1:].corr()\nx = cor [['default.payment.next.month']]\nsns.heatmap(x, annot=True, cmap=plt.cm.Reds)\nplt.show()",
"_____no_output_____"
]
],
[
[
"As we can see in the plot above, the repayment status of customers (PAY_0 - PAY_6) have the higher correlation towards the label (default.payment.next.month) in compared to other features.",
"_____no_output_____"
],
[
"# Data Preparation\n## Data Cleansing\nBefore implementing the ANN to predict the \"credit default customer\", we have to check the data, whether it needs cleaning or not.",
"_____no_output_____"
]
],
[
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"After checking the summary of missing value in the dataset, the result shows that the data has no missing values so that the data is ready to the next stage.",
"_____no_output_____"
],
[
"## Splitting Data to Training and Test Data\nIn this stage, the clean data will be splitted into 2 categories, train data and test data. The train data will be utilized in the training ANN model, and the data test will be used to test the trained model whether the model has good generalization or not in predicting the future data. In this stage, 80% data will be used as the train data and the rest as the test data.\n\nBefore splitting, the dataset will be grouped into 2 variables, the data from 2nd to 24rd column as the predictor features (the first columns is not included as predictor) will be groped as X, and the data from 25th columns (label) will be renamed as y.",
"_____no_output_____"
]
],
[
[
"X = df.iloc[:, 1:24].values\ny = df.iloc[:, 24].values\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)",
"_____no_output_____"
]
],
[
[
"## Data Standardization\n\nAfter splitting data, the numeric data will be standardized by scaling the data to have mean of 0 and variance of 1. \n$$X_{stand} = \\frac{X - \\mu}{\\sigma}$$",
"_____no_output_____"
]
],
[
[
"sc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)",
"_____no_output_____"
]
],
[
[
"# Modelling\n\nOn the Modeling phase, we create the ANN model with 5 hidden layer (with 50,40,30,20, and 10 neurons respectively) with _relu_ activation function, and 1 output layer with 1 neuron with _sigmoid_ activation function. Furthermore, we choose the 'Adam' optimizer to optimize the parameter in the created model.",
"_____no_output_____"
]
],
[
[
"hl = 6 # number of hidden layer\nnohl = [50, 60, 40, 30, 20, 10] # number of neurons in each hidden layer\n\nclassifier = Sequential()\n\n# Hidden Layer\nfor i in range(hl):\n if i==0:\n classifier.add(Dense(units=nohl[i], input_dim=X_train.shape[1], kernel_initializer='uniform', activation='relu'))\n else :\n classifier.add(Dense(units=nohl[i], kernel_initializer=glorot_uniform(seed=0), activation='relu'))\n\n# Output Layer\nclassifier.add(Dense(units=1, kernel_initializer=glorot_uniform(seed=0), activation='sigmoid'))\n\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"Here below the summary of created model architecture by ANN with the parameters needed.",
"_____no_output_____"
]
],
[
[
"classifier.summary()",
"Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 50) 1200 \n_________________________________________________________________\ndense_2 (Dense) (None, 60) 3060 \n_________________________________________________________________\ndense_3 (Dense) (None, 40) 2440 \n_________________________________________________________________\ndense_4 (Dense) (None, 30) 1230 \n_________________________________________________________________\ndense_5 (Dense) (None, 20) 620 \n_________________________________________________________________\ndense_6 (Dense) (None, 10) 210 \n_________________________________________________________________\ndense_7 (Dense) (None, 1) 11 \n=================================================================\nTotal params: 8,771\nTrainable params: 8,771\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"After create the model architecture by ANN, we train the model by a certain number of epoch and batch.",
"_____no_output_____"
]
],
[
[
"classifier.fit(X_train, y_train, epochs=50, batch_size=512)",
"Epoch 1/50\n21000/21000 [==============================] - 1s 50us/step - loss: 0.5517 - accuracy: 0.7773\nEpoch 2/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4703 - accuracy: 0.7833\nEpoch 3/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4506 - accuracy: 0.8121 0s - loss: 0.4488 - accuracy: 0.81\nEpoch 4/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4441 - accuracy: 0.8132\nEpoch 5/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4386 - accuracy: 0.8149\nEpoch 6/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4305 - accuracy: 0.8203\nEpoch 7/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4298 - accuracy: 0.8207\nEpoch 8/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4286 - accuracy: 0.8214\nEpoch 9/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4262 - accuracy: 0.8207 0s - loss: 0.4281 - accuracy: 0.\nEpoch 10/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4253 - accuracy: 0.8220\nEpoch 11/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4253 - accuracy: 0.8227\nEpoch 12/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4293 - accuracy: 0.8202\nEpoch 13/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4229 - accuracy: 0.8228\nEpoch 14/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4235 - accuracy: 0.8220\nEpoch 15/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4207 - accuracy: 0.8222\nEpoch 16/50\n21000/21000 [==============================] - 0s 9us/step - loss: 0.4230 - accuracy: 0.8200\nEpoch 17/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4189 - accuracy: 0.8229\nEpoch 18/50\n21000/21000 [==============================] - 0s 9us/step - loss: 0.4182 - accuracy: 0.8242\nEpoch 19/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4191 - accuracy: 0.8226\nEpoch 20/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4215 - accuracy: 0.8234\nEpoch 21/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4278 - accuracy: 0.8227\nEpoch 22/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4159 - accuracy: 0.8240\nEpoch 23/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4157 - accuracy: 0.8249\nEpoch 24/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4146 - accuracy: 0.8255\nEpoch 25/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4155 - accuracy: 0.8233\nEpoch 26/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4182 - accuracy: 0.8245\nEpoch 27/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4140 - accuracy: 0.8261\nEpoch 28/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4131 - accuracy: 0.8264\nEpoch 29/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4119 - accuracy: 0.8251\nEpoch 30/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4125 - accuracy: 0.8242\nEpoch 31/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4126 - accuracy: 0.8258\nEpoch 32/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4122 - accuracy: 0.8249\nEpoch 33/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4123 - accuracy: 0.8246\nEpoch 34/50\n21000/21000 [==============================] - 0s 13us/step - loss: 0.4110 - accuracy: 0.8255\nEpoch 35/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4101 - accuracy: 0.8255\nEpoch 36/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4079 - accuracy: 0.8267\nEpoch 37/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4123 - accuracy: 0.8263\nEpoch 38/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4068 - accuracy: 0.8275\nEpoch 39/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4064 - accuracy: 0.8274\nEpoch 40/50\n21000/21000 [==============================] - 0s 9us/step - loss: 0.4080 - accuracy: 0.8276\nEpoch 41/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4128 - accuracy: 0.8270\nEpoch 42/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4097 - accuracy: 0.8286\nEpoch 43/50\n21000/21000 [==============================] - 0s 10us/step - loss: 0.4053 - accuracy: 0.8278\nEpoch 44/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4051 - accuracy: 0.8291\nEpoch 45/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4025 - accuracy: 0.8291\nEpoch 46/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4076 - accuracy: 0.8282\nEpoch 47/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4013 - accuracy: 0.8302\nEpoch 48/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4110 - accuracy: 0.8256\nEpoch 49/50\n21000/21000 [==============================] - 0s 11us/step - loss: 0.4025 - accuracy: 0.8296\nEpoch 50/50\n21000/21000 [==============================] - 0s 12us/step - loss: 0.4019 - accuracy: 0.8290\n"
]
],
[
[
"# Evaluation\nIn this classification problem, we evaluate model by looking at how many of their predictions are correct in which the threshold is 50%. This can be plotted into Confusion Matrix.\n\nHere is the confusion matrix from the ANN model after doing prediction to the dataset :",
"_____no_output_____"
]
],
[
[
"y_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\nconf_matr = confusion_matrix(y_test, y_pred)\n\nTP = conf_matr[0,0]; FP = conf_matr[0,1]; TN = conf_matr[1,1]; FN = conf_matr[1,0]\nprint('Confusion Matrix : ')\nprint(conf_matr)\nprint()\nprint('True Positive (TP) : ',TP)\nprint('False Positive (FP) : ',FP)\nprint('True Negative (TN) : ',TN)\nprint('False Negative (FN) : ',FN)",
"Confusion Matrix : \n[[6695 345]\n [1332 628]]\n\nTrue Positive (TP) : 6695\nFalse Positive (FP) : 345\nTrue Negative (TN) : 628\nFalse Negative (FN) : 1332\n"
]
],
[
[
"in which \n- True Positive (TP) means the model predict customer will pay the credit and the prediction is correct.\n- False Positive (FP) means the model predict customer will will pay the credit and the prediction is incorrect.\n- True Negative (TN) means the model predict customer will not will pay the credit and the prediction is correct.\n- False Negative (FN) means the model predict customer will not will pay the credit and the prediction is incorrect.\n\nBased of the result above, then we can start doing evaluation using 3 different metrics: accuracy, recall, and precision.",
"_____no_output_____"
],
[
"### Accuracy\nAccuracy means how many prediction is true compared to the total data. The metric will be calculated by following formula.\n\n$$Accuray = \\frac{TP+TN}{TP+TN+FP+FN}$$",
"_____no_output_____"
]
],
[
[
"acc = (TP+TN)/(TP+TN+FP+FN)\nprint('By this metric, only '+ str(round(acc*100)) + '% of them are correctly predicted.')",
"_____no_output_____"
]
],
[
[
"### Precision\nIn this metric (precision), it only concern on how many positive prediction that are actually correct and this will be calculated by formula below.\n \n$$Precision = \\frac{TP}{TP+FP}$$",
"_____no_output_____"
]
],
[
[
"pre = TP/(TP+FP)\nprint('From those classification result, by calculating the precision, there are '+ str(round(pre*100)) + '% of them who are actually pay the credit.')",
"_____no_output_____"
]
],
[
[
"After reviewing the model performance by the accurary and precision metric, it seems that the created model included the hyper-parameter used has not be able the reach the 95% accuray so that there are some possible actions which can be taken, such as :\n- tuning the hyper-parameter tp get the better performance before releasing the model into the real use\n- release the model while also developing a better model\n- trying another classfication model (such as decision-tree, Naive-bayes)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7f52691e426df7f9695669e8d794640d9ad5e26 | 37,426 | ipynb | Jupyter Notebook | hurricane_ike_water_levels.ipynb | ocefpaf/hurricane-ike-water-levels | 07da9cf51c4f4ffbcbad9f4699359a1f92552462 | [
"BSD-3-Clause"
] | null | null | null | hurricane_ike_water_levels.ipynb | ocefpaf/hurricane-ike-water-levels | 07da9cf51c4f4ffbcbad9f4699359a1f92552462 | [
"BSD-3-Clause"
] | null | null | null | hurricane_ike_water_levels.ipynb | ocefpaf/hurricane-ike-water-levels | 07da9cf51c4f4ffbcbad9f4699359a1f92552462 | [
"BSD-3-Clause"
] | null | null | null | 31.033167 | 365 | 0.599316 | [
[
[
"# Hurricane Ike Maximum Water Levels\nCompute the maximum water level during Hurricane Ike on a 9 million node triangular mesh storm surge model. Plot the results with Datashader. ",
"_____no_output_____"
]
],
[
[
"import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport fsspec\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom dask.distributed import Client, progress, performance_report\nfrom dask_kubernetes import KubeCluster",
"_____no_output_____"
]
],
[
[
"### Start a dask cluster to crunch the data",
"_____no_output_____"
]
],
[
[
"cluster = KubeCluster()",
"_____no_output_____"
],
[
"cluster.scale(15);",
"_____no_output_____"
],
[
"cluster",
"_____no_output_____"
],
[
"import dask; print(dask.__version__)",
"_____no_output_____"
]
],
[
[
"For demos, I often click in this cell and do \"Cell=>Run All Above\", then wait until the workers appear. This can take several minutes (up to 6!) for instances to spin up and Docker containers to be downloaded. Then I shutdown the notebook and run again from the beginning, and the workers will fire up quickly because the instances have not spun down yet. ",
"_____no_output_____"
]
],
[
[
"#cluster.adapt(maximum=15);",
"_____no_output_____"
],
[
"client = Client(cluster)",
"_____no_output_____"
]
],
[
[
"### Read the data using the cloud-friendly zarr data format",
"_____no_output_____"
]
],
[
[
"ds = xr.open_zarr(fsspec.get_mapper('s3://pangeo-data-uswest2/esip/adcirc/ike', anon=False, requester_pays=True))",
"_____no_output_____"
],
[
"#ds = xr.open_zarr(fsspec.get_mapper('gcs://pangeo-data/rsignell/adcirc_test01'))",
"_____no_output_____"
],
[
"ds['zeta']",
"_____no_output_____"
]
],
[
[
"How many GB of sea surface height data do we have?",
"_____no_output_____"
]
],
[
[
"ds['zeta'].nbytes/1.e9",
"_____no_output_____"
]
],
[
[
"Take the maximum over the time dimension and persist the data on the workers in case we would like to use it later. This is the computationally intensive step.",
"_____no_output_____"
]
],
[
[
"%%time\nwith performance_report(filename=\"dask-zarr-report.html\"):\n max_var = ds['zeta'].max(dim='time').compute()",
"_____no_output_____"
]
],
[
[
"### Visualize data on mesh using HoloViz.org tools",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport datashader as dshade\nimport holoviews as hv\nimport geoviews as gv\nimport cartopy.crs as ccrs\nimport hvplot.xarray\nimport holoviews.operation.datashader as dshade\n\ndshade.datashade.precompute = True\nhv.extension('bokeh')",
"_____no_output_____"
],
[
"v = np.vstack((ds['x'], ds['y'], max_var)).T\nverts = pd.DataFrame(v, columns=['x','y','vmax'])",
"_____no_output_____"
],
[
"points = gv.operation.project_points(gv.Points(verts, vdims=['vmax']))",
"_____no_output_____"
],
[
"tris = pd.DataFrame(ds['element'].values.astype('int')-1, columns=['v0','v1','v2'])",
"_____no_output_____"
],
[
"tiles = gv.tile_sources.OSM",
"_____no_output_____"
],
[
"value = 'max water level'\nlabel = '{} (m)'.format(value)\ntrimesh = gv.TriMesh((tris, points), label=label)\nmesh = dshade.rasterize(trimesh).opts(\n cmap='rainbow', colorbar=True, width=600, height=400)",
"_____no_output_____"
],
[
"tiles * mesh",
"_____no_output_____"
]
],
[
[
"### Extract a time series at a specified lon, lat location",
"_____no_output_____"
],
[
"Because Xarray does not yet understand that `x` and `y` are coordinate variables on this triangular mesh, we create our own simple function to find the closest point. If we had a lot of these, we could use a more fancy tree algorithm.",
"_____no_output_____"
]
],
[
[
"# find the indices of the points in (x,y) closest to the points in (xi,yi)\ndef nearxy(x,y,xi,yi):\n ind = np.ones(len(xi),dtype=int)\n for i in range(len(xi)):\n dist = np.sqrt((x-xi[i])**2+(y-yi[i])**2)\n ind[i] = dist.argmin()\n return ind",
"_____no_output_____"
],
[
"#just offshore of Galveston\nlat = 29.2329856\nlon = -95.1535041",
"_____no_output_____"
],
[
"ind = nearxy(ds['x'].values,ds['y'].values,[lon], [lat])",
"_____no_output_____"
],
[
"ds['zeta'][:,ind].hvplot(x='time', grid=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f5283e64dcb8b0a7ca4ea518bd3377edf8e507 | 3,414 | ipynb | Jupyter Notebook | Section 2/#2.1 List comprehension.ipynb | PacktPublishing/Getting-Productive-with-Modern-Python | ae4970df5461a4a17dff2ca02d07728bbbf66aac | [
"MIT"
] | 4 | 2019-09-20T12:05:10.000Z | 2021-10-11T17:54:34.000Z | Section 2/#2.1 List comprehension.ipynb | PacktPublishing/Getting-Productive-with-Modern-Python | ae4970df5461a4a17dff2ca02d07728bbbf66aac | [
"MIT"
] | null | null | null | Section 2/#2.1 List comprehension.ipynb | PacktPublishing/Getting-Productive-with-Modern-Python | ae4970df5461a4a17dff2ca02d07728bbbf66aac | [
"MIT"
] | 2 | 2019-11-15T17:29:47.000Z | 2020-09-23T16:11:54.000Z | 22.76 | 85 | 0.491213 | [
[
[
"# List comprehension simply means to be pythonic-\n\n# Here is a list of 8 numbers-\nno_list = [98,76,55,45,35,22,32,12]\n\n# Now we'll double all the evens-\nTwice_Evens = []\n\n# Using For loop to iterate over elements, scan and double the even numbers-\nfor n in no_list:\n if n % 2 == 0:\n Twice_Evens.append(n * 2)\n# Also, after the loop the final value of n becomes to '12'\n# Which is the last number \n\n# We may check the value of n by-\nprint(\"n =\", no_list)\n",
"n = [98, 76, 55, 45, 35, 22, 32, 12]\n"
],
[
"# After using list comprehension\n# Our code is only left with a single line-\nTwice_Evens_2 = [n * 2 for n in no_list if n % 2 == 0]\n\n# Here we can check if the results are same or not-\nprint(Twice_Evens==Twice_Evens_2)\n# As we can see the True boolean value of our result!\n",
"True\n"
],
[
"# Some advanced list comprehension-\n\n# Here these complex big loops are making a logic to \neven_odd = []\nfor i in range(2):\n for j in range(10):\n if j%2==i:\n even_odd.append(j)\n\n\n# We have done the same thing with a single line of code-\neven_odd_2 = [j for j in range(10) for i in range(2) if j%2==i]\n\n\n\n",
"_____no_output_____"
],
[
"# We may check the value of n by-\nprint(\"n Ist one =\", even_odd)\nprint(\"n IInd one =\", even_odd_2)\n\n",
"n Ist one = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]\nn IInd one = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"# Here we can check if the results are same or not-\nprint(even_odd==even_odd_2)\n\n\n\n# Containers in Python\n# These data types store data in a container-\n# Examples of built-in containers are- list, dict, tuple, set\n",
"False\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7f52a845e43f15a24444c8d3d0adcfc42df5867 | 411,853 | ipynb | Jupyter Notebook | week01-pytorch_intro/seminar_pytorch.ipynb | mikita-zhuryk/deep_vision_and_graphics | 87c51f60eb19b65665b3bd6b196beb3c572f0267 | [
"MIT"
] | null | null | null | week01-pytorch_intro/seminar_pytorch.ipynb | mikita-zhuryk/deep_vision_and_graphics | 87c51f60eb19b65665b3bd6b196beb3c572f0267 | [
"MIT"
] | null | null | null | week01-pytorch_intro/seminar_pytorch.ipynb | mikita-zhuryk/deep_vision_and_graphics | 87c51f60eb19b65665b3bd6b196beb3c572f0267 | [
"MIT"
] | 1 | 2021-12-10T15:26:48.000Z | 2021-12-10T15:26:48.000Z | 123.828322 | 121,016 | 0.811702 | [
[
[
"# Hello, PyTorch\n\n![img](https://pytorch.org/tutorials/_static/pytorch-logo-dark.svg)\n\n__This notebook__ will teach you to use PyTorch low-level core. If you're running this notebook outside the course environment, you can install it [here](https://pytorch.org).\n\n__PyTorch feels__ differently than tensorflow/theano on almost every level. TensorFlow makes your code live in two \"worlds\" simultaneously: symbolic graphs and actual tensors. First you declare a symbolic \"recipe\" of how to get from inputs to outputs, then feed it with actual minibatches of data. In PyTorch, __there's only one world__: all tensors have a numeric value.\n\nYou compute outputs on the fly without pre-declaring anything. The code looks exactly as in pure numpy with one exception: PyTorch computes gradients for you. And can run stuff on GPU. And has a number of pre-implemented building blocks for your neural nets. [And a few more things.](https://medium.com/towards-data-science/pytorch-vs-tensorflow-spotting-the-difference-25c75777377b)\n\nAnd now we finally shut up and let PyTorch do the talking.",
"_____no_output_____"
]
],
[
[
"import sys, os\nif 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):\n !wget -q https://raw.githubusercontent.com/yandexdataschool/deep_vision_and_graphics/fall21/week01-pytorch_intro/notmnist.py\n !touch .setup_complete",
"_____no_output_____"
],
[
"import numpy as np\nimport torch\nprint(torch.__version__)",
"1.10.0+cu113\n"
],
[
"# numpy world\n\nx = np.arange(16).reshape(4, 4)\n\nprint(\"X:\\n%s\\n\" % x)\nprint(\"X.shape: %s\\n\" % (x.shape,))\nprint(\"add 5:\\n%s\\n\" % (x + 5))\nprint(\"X*X^T:\\n%s\\n\" % np.dot(x, x.T))\nprint(\"mean over rows:\\n%s\\n\" % (x.mean(axis=-1)))\nprint(\"cumsum of cols:\\n%s\\n\" % (np.cumsum(x, axis=0)))",
"X:\n[[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]\n [12 13 14 15]]\n\nX.shape: (4, 4)\n\nadd 5:\n[[ 5 6 7 8]\n [ 9 10 11 12]\n [13 14 15 16]\n [17 18 19 20]]\n\nX*X^T:\n[[ 14 38 62 86]\n [ 38 126 214 302]\n [ 62 214 366 518]\n [ 86 302 518 734]]\n\nmean over rows:\n[ 1.5 5.5 9.5 13.5]\n\ncumsum of cols:\n[[ 0 1 2 3]\n [ 4 6 8 10]\n [12 15 18 21]\n [24 28 32 36]]\n\n"
],
[
"# PyTorch world\n\nx = np.arange(16).reshape(4, 4)\n\nx = torch.tensor(x, dtype=torch.float32) # or torch.arange(0, 16).view(4, 4)\n\nprint(\"X:\\n%s\" % x)\nprint(\"X.shape: %s\\n\" % (x.shape,))\nprint(\"add 5:\\n%s\" % (x + 5))\nprint(\"X*X^T:\\n%s\" % torch.matmul(x, x.transpose(1, 0))) # short: x.mm(x.t())\nprint(\"mean over rows:\\n%s\" % torch.mean(x, dim=-1))\nprint(\"cumsum of cols:\\n%s\" % torch.cumsum(x, dim=0))",
"X:\ntensor([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\nX.shape: torch.Size([4, 4])\n\nadd 5:\ntensor([[ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.],\n [13., 14., 15., 16.],\n [17., 18., 19., 20.]])\nX*X^T:\ntensor([[ 14., 38., 62., 86.],\n [ 38., 126., 214., 302.],\n [ 62., 214., 366., 518.],\n [ 86., 302., 518., 734.]])\nmean over rows:\ntensor([ 1.5000, 5.5000, 9.5000, 13.5000])\ncumsum of cols:\ntensor([[ 0., 1., 2., 3.],\n [ 4., 6., 8., 10.],\n [12., 15., 18., 21.],\n [24., 28., 32., 36.]])\n"
]
],
[
[
"## NumPy and PyTorch\n\nAs you can notice, PyTorch allows you to hack stuff much the same way you did with NumPy. No graph declaration, no placeholders, no sessions. This means that you can _see the numeric value of any tensor at any moment of time_. Debugging such code can be done with by printing tensors or using any debug tool you want (e.g. [PyCharm debugger](https://www.jetbrains.com/help/pycharm/part-1-debugging-python-code.html) or [gdb](https://wiki.python.org/moin/DebuggingWithGdb)).\n\nYou could also notice the a few new method names and a different API. So no, there's no compatibility with NumPy [yet](https://github.com/pytorch/pytorch/issues/2228) and yes, you'll have to memorize all the names again. Get excited!\n\n![img](http://i0.kym-cdn.com/entries/icons/original/000/017/886/download.jpg)\n\nFor example, \n* If something takes a list/tuple of axes in NumPy, you can expect it to take `*args` in PyTorch\n * `x.reshape([1,2,8]) -> x.view(1,2,8)`\n* You should swap `axis` for `dim` in operations like `mean` or `cumsum`\n * `x.sum(axis=-1) -> x.sum(dim=-1)`\n* Most mathematical operations are the same, but types an shaping is different\n * `x.astype('int64') -> x.type(torch.LongTensor)`\n\nTo help you acclimatize, there's a [table](https://github.com/torch/torch7/wiki/Torch-for-NumPy-users) covering most new things. There's also a neat [documentation page](http://pytorch.org/docs/master/).\n\nFinally, if you're stuck with a technical problem, we recommend searching [PyTorch forums](https://discuss.pytorch.org/). Or just googling, which usually works just as efficiently. \n\nIf you feel like you almost give up, remember two things: __GPU__ and __free gradients__. Besides you can always jump back to NumPy with `x.numpy()`.",
"_____no_output_____"
],
[
"### Warmup: trigonometric knotwork\n_inspired by [this post](https://www.quora.com/What-are-the-most-interesting-equation-plots)_\n\nThere are some simple mathematical functions with cool plots. For one, consider this:\n\n$$ x(t) = t - 1.5 * cos(15 t) $$\n$$ y(t) = t - 1.5 * sin(16 t) $$",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\nt = torch.linspace(-10, 10, steps=10000)\n\n# compute x(t) and y(t) as defined above\nx = t - 1.5 * torch.cos(15 * t)\ny = t - 1.5 * torch.sin(0.5 * t)\n\nplt.plot(x.numpy(), y.numpy())",
"_____no_output_____"
]
],
[
[
"If you're done early, try adjusting the formula and seeing how it affects the function.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## Automatic gradients\n\nAny self-respecting DL framework must do your backprop for you. Torch handles this with the `autograd` module.\n\nThe general pipeline looks like this:\n* When creating a tensor, you mark it as `requires_grad`:\n * `torch.zeros(5, requires_grad=True)`\n * `torch.tensor(np.arange(5), dtype=torch.float32, requires_grad=True)`\n* Define some differentiable `loss = arbitrary_function(a)`\n* Call `loss.backward()`\n* Gradients are now available as ```a.grad```\n\n__Here's an example:__ let's fit a linear regression on Boston house prices.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_boston\nboston = load_boston()\nplt.scatter(boston.data[:, -1], boston.target)",
"/home/mzhuryk/shad/deep_vision_and_graphics/env/lib/python3.8/site-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function load_boston is deprecated; `load_boston` is deprecated in 1.0 and will be removed in 1.2.\n\n The Boston housing prices dataset has an ethical problem. You can refer to\n the documentation of this function for further details.\n\n The scikit-learn maintainers therefore strongly discourage the use of this\n dataset unless the purpose of the code is to study and educate about\n ethical issues in data science and machine learning.\n\n In this special case, you can fetch the dataset from the original\n source::\n\n import pandas as pd\n import numpy as np\n\n\n data_url = \"http://lib.stat.cmu.edu/datasets/boston\"\n raw_df = pd.read_csv(data_url, sep=\"\\s+\", skiprows=22, header=None)\n data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])\n target = raw_df.values[1::2, 2]\n\n Alternative datasets include the California housing dataset (i.e.\n :func:`~sklearn.datasets.fetch_california_housing`) and the Ames housing\n dataset. You can load the datasets as follows::\n\n from sklearn.datasets import fetch_california_housing\n housing = fetch_california_housing()\n\n for the California housing dataset and::\n\n from sklearn.datasets import fetch_openml\n housing = fetch_openml(name=\"house_prices\", as_frame=True)\n\n for the Ames housing dataset.\n \n warnings.warn(msg, category=FutureWarning)\n"
],
[
"NLR_DEGREE = 3\nLR = 1e-2\n\nw = torch.rand(NLR_DEGREE + 1, requires_grad=True)\n\nx = torch.tensor(boston.data[:, -1] / 10, dtype=torch.float32)\nx = x.unsqueeze(-1) ** torch.arange(NLR_DEGREE + 1)\ny = torch.tensor(boston.target, dtype=torch.float32)",
"_____no_output_____"
],
[
"y_pred = x @ w.T\nloss = torch.mean((y_pred - y)**2)\n\n# propagate gradients\nloss.backward()",
"_____no_output_____"
]
],
[
[
"The gradients are now stored in `.grad` of those variables that require them.",
"_____no_output_____"
]
],
[
[
"print(\"dL/dw = \\n\", w.grad)\n# print(\"dL/db = \\n\", b.grad)",
"dL/dw = \n tensor([ -43.1492, -43.7833, -60.1212, -103.8557])\n"
]
],
[
[
"If you compute gradient from multiple losses, the gradients will add up at variables, therefore it's useful to __zero the gradients__ between iteratons.",
"_____no_output_____"
]
],
[
[
"from IPython.display import clear_output\n\nfor i in range(int(1e5)):\n y_pred = x @ w.T # + b\n loss = torch.mean((y_pred - y)**2)\n loss.backward()\n\n w.data -= LR * w.grad.data\n# b.data -= LR * b.grad.data\n\n # zero gradients\n w.grad.data.zero_()\n# b.grad.data.zero_()\n\n # the rest of code is just bells and whistles\n with torch.no_grad():\n if (i + 1) % int(1e4) == 0:\n clear_output(True)\n plt.scatter(x[:, 1].numpy(), y.numpy())\n plt.scatter(x[:, 1].numpy(), y_pred.numpy(), color='orange', linewidth=5)\n plt.show()\n\n print(\"loss = \", loss.numpy())\n if loss.numpy() < 0.5:\n print(\"Done!\")\n break",
"_____no_output_____"
]
],
[
[
"__Bonus quest__: try implementing and writing some nonlinear regression. You can try quadratic features or some trigonometry, or a simple neural network. The only difference is that now you have more variables and a more complicated `y_pred`. ",
"_____no_output_____"
],
[
"# High-level PyTorch\n\nSo far we've been dealing with low-level PyTorch API. While it's absolutely vital for any custom losses or layers, building large neural nets in it is a bit clumsy.\n\nLuckily, there's also a high-level PyTorch interface with pre-defined layers, activations and training algorithms. \n\nWe'll cover them as we go through a simple image recognition problem: classifying letters into __\"A\"__ vs __\"B\"__.\n",
"_____no_output_____"
]
],
[
[
"from notmnist import load_notmnist\nX_train, y_train, X_test, y_test = load_notmnist(letters='AB')\nX_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784])\n\nprint(\"Train size = %i, test_size = %i\" % (len(X_train), len(X_test)))",
"Parsing...\nfound broken img: ./notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png [it's ok if <10 images are broken]\nDone\nTrain size = 2808, test_size = 937\n"
],
[
"for i in [0, 1]:\n plt.subplot(1, 2, i + 1)\n plt.imshow(X_train[i].reshape([28, 28]))\n plt.title(str(y_train[i]))",
"_____no_output_____"
]
],
[
[
"Let's start with layers. The main abstraction here is __`torch.nn.Module`__:",
"_____no_output_____"
]
],
[
[
"from torch import nn\nimport torch.nn.functional as F\n\nprint(nn.Module.__doc__)",
"Base class for all neural network modules.\n\n Your models should also subclass this class.\n\n Modules can also contain other Modules, allowing to nest them in\n a tree structure. You can assign the submodules as regular attributes::\n\n import torch.nn as nn\n import torch.nn.functional as F\n\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\n\n Submodules assigned in this way will be registered, and will have their\n parameters converted too when you call :meth:`to`, etc.\n\n :ivar training: Boolean represents whether this module is in training or\n evaluation mode.\n :vartype training: bool\n \n"
]
],
[
[
"There's a vast library of popular layers and architectures already built for ya'.\n\nThis is a binary classification problem, so we'll train __Logistic Regression__.\n$$P(y_i | X_i) = \\sigma(W \\cdot X_i + b) ={ 1 \\over {1+e^{- [W \\cdot X_i + b]}} }$$\n",
"_____no_output_____"
]
],
[
[
"# create a network that stacks layers on top of each other\nmodel = nn.Sequential()\n\n# add first \"dense\" layer with 784 input units and 1 output unit.\nmodel.add_module('l1', nn.Linear(784, 1))\n\n# add softmax activation for probabilities. Normalize over axis 1\n# note: layer names must be unique\nmodel.add_module('l2', nn.Sigmoid())",
"_____no_output_____"
],
[
"print(\"Weight shapes:\", [w.shape for w in model.parameters()])",
"Weight shapes: [torch.Size([1, 784]), torch.Size([1])]\n"
],
[
"# create dummy data with 3 samples and 784 features\nx = torch.tensor(X_train[:3], dtype=torch.float32)\ny = torch.tensor(y_train[:3], dtype=torch.float32)\n\n# compute outputs given inputs, both are variables\ny_predicted = model(x)[:, 0]\n\ny_predicted # display what we've got",
"_____no_output_____"
]
],
[
[
"Let's now define a loss function for our model.\n\nThe natural choice is to use binary crossentropy (aka logloss, negative llh):\n$$ L = {1 \\over N} \\underset{X_i,y_i} \\sum - [ y_i \\cdot log P(y_i=1 | X_i) + (1-y_i) \\cdot log (1-P(y_i=1 | X_i)) ]$$\n\n",
"_____no_output_____"
]
],
[
[
"crossentropy_lambda = lambda input, target: -(target * torch.log(input) + (1 - target) * torch.log(1 - input))\ncrossentropy = crossentropy_lambda(y_predicted, y)\n\nloss = crossentropy.mean()\n\nassert tuple(crossentropy.size()) == (\n 3,), \"Crossentropy must be a vector with element per sample\"\nassert tuple(loss.size()) == tuple(\n), \"Loss must be scalar. Did you forget the mean/sum?\"\nassert loss.data.numpy() > 0, \"Crossentropy must non-negative, zero only for perfect prediction\"\nassert loss.data.numpy() <= np.log(\n 3), \"Loss is too large even for untrained model. Please double-check it.\"",
"_____no_output_____"
]
],
[
[
"__Note:__ you can also find many such functions in `torch.nn.functional`, just type __`F.<tab>`__.",
"_____no_output_____"
],
[
"__Torch optimizers__\n\nWhen we trained Linear Regression above, we had to manually `.zero_()` gradients on both our variables. Imagine that code for a 50-layer network.\n\nAgain, to keep it from getting dirty, there's `torch.optim` module with pre-implemented algorithms:",
"_____no_output_____"
]
],
[
[
"opt = torch.optim.RMSprop(model.parameters(), lr=0.01)\n\n# here's how it's used:\nopt.zero_grad() # clear gradients\nloss.backward() # add new gradients\nopt.step() # change weights",
"_____no_output_____"
],
[
"# dispose of old variables to avoid bugs later\ndel x, y, y_predicted, loss, y_pred",
"_____no_output_____"
]
],
[
[
"### Putting it all together",
"_____no_output_____"
]
],
[
[
"# create network again just in case\nmodel = nn.Sequential()\nmodel.add_module('first', nn.Linear(784, 1))\nmodel.add_module('second', nn.Sigmoid())\n\nopt = torch.optim.Adam(model.parameters(), lr=1e-3)",
"_____no_output_____"
],
[
"history = []\n\nfor i in range(100):\n\n # sample 256 random images\n ix = np.random.randint(0, len(X_train), 256)\n x_batch = torch.tensor(X_train[ix], dtype=torch.float32)\n y_batch = torch.tensor(y_train[ix], dtype=torch.float32)\n\n # predict probabilities\n y_predicted = model(x_batch).squeeze()\n\n assert y_predicted.dim(\n ) == 1, \"did you forget to select first column with [:, 0]\"\n\n # compute loss, just like before\n loss = crossentropy_lambda(y_predicted, y_batch).mean()\n\n # compute gradients\n loss.backward()\n\n # Adam step\n opt.step()\n\n # clear gradients\n opt.zero_grad()\n\n history.append(loss.data.numpy())\n\n if i % 10 == 0:\n print(\"step #%i | mean loss = %.3f\" % (i, np.mean(history[-10:])))",
"step #0 | mean loss = 0.682\nstep #10 | mean loss = 0.371\nstep #20 | mean loss = 0.233\nstep #30 | mean loss = 0.168\nstep #40 | mean loss = 0.143\nstep #50 | mean loss = 0.126\nstep #60 | mean loss = 0.121\nstep #70 | mean loss = 0.114\nstep #80 | mean loss = 0.108\nstep #90 | mean loss = 0.109\n"
]
],
[
[
"__Debugging tips:__\n* Make sure your model predicts probabilities correctly. Just print them and see what's inside.\n* Don't forget the _minus_ sign in the loss function! It's a mistake 99% people do at some point.\n* Make sure you zero-out gradients after each step. Seriously:)\n* In general, PyTorch's error messages are quite helpful, read 'em before you google 'em.\n* if you see nan/inf, print what happens at each iteration to find our where exactly it occurs.\n * If loss goes down and then turns nan midway through, try smaller learning rate. (Our current loss formula is unstable).",
"_____no_output_____"
],
[
"### Evaluation\n\nLet's see how our model performs on test data",
"_____no_output_____"
]
],
[
[
"# use your model to predict classes (0 or 1) for all test samples\nwith torch.no_grad():\n predicted_y_test = (model(torch.from_numpy(X_test)) > 0.5).squeeze().numpy()\n\nassert isinstance(predicted_y_test, np.ndarray), \"please return np array, not %s\" % type(\n predicted_y_test)\nassert predicted_y_test.shape == y_test.shape, \"please predict one class for each test sample\"\nassert np.in1d(predicted_y_test, y_test).all(), \"please predict class indexes\"\n\naccuracy = np.mean(predicted_y_test == y_test)\n\nprint(\"Test accuracy: %.5f\" % accuracy)\nassert accuracy > 0.95, \"try training longer\"",
"Test accuracy: 0.96051\n"
]
],
[
[
"## More about PyTorch:\n* Using torch on GPU and multi-GPU - [link](http://pytorch.org/docs/master/notes/cuda.html)\n* More tutorials on PyTorch - [link](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html)\n* PyTorch examples - a repo that implements many cool DL models in PyTorch - [link](https://github.com/pytorch/examples)\n* Practical PyTorch - a repo that implements some... other cool DL models... yes, in PyTorch - [link](https://github.com/spro/practical-pytorch)\n* And some more - [link](https://www.reddit.com/r/pytorch/comments/6z0yeo/pytorch_and_pytorch_tricks_for_kaggle/)\n\n---",
"_____no_output_____"
],
[
"# Homework tasks\n\nThere will be three tasks worth 2, 3 and 5 points respectively. \nIf you get stuck with no progress, try switching to the next task and returning later.",
"_____no_output_____"
],
[
"### Task I (2 points) - tensormancy\n\n![img](https://media.giphy.com/media/3o751UMCYtSrRAFRFC/giphy.gif)\n\nWhen dealing with more complex stuff like neural network, it's best if you use tensors the way samurai uses his sword. \n\n\n__1.1 The Cannabola__ \n[(_disclaimer_)](https://gist.githubusercontent.com/justheuristic/e2c1fa28ca02670cabc42cacf3902796/raw/fd3d935cef63a01b85ed2790b5c11c370245cbd7/stddisclaimer.h)\n\nLet's write another function, this time in polar coordinates:\n$$\\rho(\\theta) = (1 + 0.9 \\cdot cos (8 \\cdot \\theta) ) \\cdot (1 + 0.1 \\cdot cos(24 \\cdot \\theta)) \\cdot (0.9 + 0.05 \\cdot cos(200 \\cdot \\theta)) \\cdot (1 + sin(\\theta))$$\n\n\nThen convert it into cartesian coordinates ([howto](http://www.mathsisfun.com/polar-cartesian-coordinates.html)) and plot the results.\n\nUse torch tensors only: no lists, loops, numpy arrays, etc.",
"_____no_output_____"
]
],
[
[
"theta = torch.linspace(- np.pi, np.pi, steps=1000)\n\n# compute rho(theta) as per formula above\nrho = (1 + 0.9 * torch.cos(8 * theta)) * (1 + 0.1 * torch.cos(24 * theta)) * (0.9 + 0.05 * torch.cos(200 * theta)) * (1 + torch.sin(theta))\n\n# Now convert polar (rho, theta) pairs into cartesian (x,y) to plot them.\nx = rho * torch.cos(theta)\ny = rho * torch.sin(theta)\n\n\nplt.figure(figsize=[6, 6])\nplt.fill(x.numpy(), y.numpy(), color='green')\nplt.grid()",
"_____no_output_____"
]
],
[
[
"### Task II: The Game of Life (3 points)\n\nNow it's time for you to make something more challenging. We'll implement Conway's [Game of Life](http://web.stanford.edu/~cdebs/GameOfLife/) in _pure PyTorch_. \n\nWhile this is still a toy task, implementing game of life this way has one cool benefit: __you'll be able to run it on GPU!__ Indeed, what could be a better use of your GPU than simulating Game of Life on 1M/1M grids?\n\n![img](https://cdn.tutsplus.com/gamedev/authors/legacy/Stephane%20Beniak/2012/09/11/Preview_Image.png)\nIf you've skipped the URL above out of sloth, here's the Game of Life:\n* You have a 2D grid of cells, where each cell is \"alive\"(1) or \"dead\"(0)\n* Any living cell that has 2 or 3 neighbors survives, else it dies [0,1 or 4+ neighbors]\n* Any cell with exactly 3 neighbors becomes alive (if it was dead)\n\nFor this task, you are given a reference NumPy implementation that you must convert to PyTorch.\n_[NumPy code inspired by: https://github.com/rougier/numpy-100]_\n\n\n__Note:__ You can find convolution in `torch.nn.functional.conv2d(Z,filters)`. Note that it has a different input format.\n\n__Note 2:__ From the mathematical standpoint, PyTorch convolution is actually cross-correlation. Those two are very similar operations. More info: [video tutorial](https://www.youtube.com/watch?v=C3EEy8adxvc), [scipy functions review](http://programmerz.ru/questions/26903/2d-convolution-in-python-similar-to-matlabs-conv2-question), [stack overflow source](https://stackoverflow.com/questions/31139977/comparing-matlabs-conv2-with-scipys-convolve2d).",
"_____no_output_____"
]
],
[
[
"from scipy.signal import correlate2d\n\ndef np_update(Z):\n # Count neighbours with convolution\n filters = np.array([[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n N = correlate2d(Z, filters, mode='same')\n\n # Apply rules\n birth = (N == 3) & (Z == 0)\n survive = ((N == 2) | (N == 3)) & (Z == 1)\n\n Z[:] = birth | survive\n return Z",
"_____no_output_____"
],
[
"def torch_update(Z):\n \"\"\"\n Implement an update function that does to Z exactly the same as np_update.\n :param Z: torch.FloatTensor of shape [height,width] containing 0s(dead) an 1s(alive)\n :returns: torch.FloatTensor Z after updates.\n\n You can opt to create a new tensor or change Z inplace.\n \"\"\"\n\n filters = torch.FloatTensor([[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n \n N = F.conv2d(Z.view(1, 1, *Z.shape), filters.view(1, 1, *filters.shape), padding=1).squeeze()\n \n birth = (N == 3) & (Z == 0)\n survive = ((N == 2) | (N == 3)) & (Z == 1)\n\n Z[:] = birth | survive\n return Z",
"_____no_output_____"
],
[
"# initial frame\nZ_numpy = np.random.choice([0, 1], p=(0.5, 0.5), size=(100, 100))\nZ = torch.from_numpy(Z_numpy).type(torch.FloatTensor)\n\n# your debug polygon :)\nZ_new = torch_update(Z.clone())\n\n# tests\nZ_reference = np_update(Z_numpy.copy())\nassert np.all(Z_new.numpy() == Z_reference), \\\n \"your PyTorch implementation doesn't match np_update. Look into Z and np_update(ZZ) to investigate.\"\nprint(\"Well done!\")",
"Well done!\n"
],
[
"%matplotlib notebook\nplt.ion()\n\n# initialize game field\nZ = np.random.choice([0, 1], size=(100, 100))\nZ = torch.from_numpy(Z).type(torch.FloatTensor)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.show()\n\nfor _ in range(100):\n # update\n Z = torch_update(Z)\n\n # re-draw image\n ax.clear()\n ax.imshow(Z.numpy(), cmap='gray')\n fig.canvas.draw()",
"_____no_output_____"
],
[
"# Some fun setups for your amusement\n\n# parallel stripes\nZ = np.arange(100) % 2 + np.zeros([100, 100])\n# with a small imperfection\nZ[48:52, 50] = 1\n\nZ = torch.from_numpy(Z).type(torch.FloatTensor)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.show()\n\nfor _ in range(100):\n Z = torch_update(Z)\n ax.clear()\n ax.imshow(Z.numpy(), cmap='gray')\n fig.canvas.draw()",
"_____no_output_____"
]
],
[
[
"More fun with Game of Life: [video](https://www.youtube.com/watch?v=C2vgICfQawE)",
"_____no_output_____"
],
[
"### Task III: Going deeper (5 points)\n<img src=\"http://download.gamezone.com/uploads/image/data/1190338/article_post_width_a88.jpg\" width=360>\nYour ultimate task for this week is to build your first neural network [almost] from scratch and pure PyTorch.\n\nThis time you will solve the same digit recognition problem, but at a larger scale\n\n* 10 different letters\n* 20k samples\n\nWe want you to build a network that reaches at least 80% accuracy and has at least 2 linear layers in it. Naturally, it should be nonlinear to beat logistic regression.\n\nWith 10 classes you will need to use __Softmax__ at the top instead of sigmoid and train using __categorical crossentropy__ (see [here](http://wiki.fast.ai/index.php/Log_Loss)). Write your own loss or use `torch.nn.functional.nll_loss`. Just make sure you understand what it accepts as input.\n\nNote that you are not required to build 152-layer monsters here. A 2-layer (one hidden, one output) neural network should already give you an edge over logistic regression.\n\n\n__[bonus kudos]__\nIf you've already beaten logistic regression with a two-layer net, but enthusiasm still ain't gone, you can try improving the test accuracy even further! It should be possible to reach 90% without convnets.\n\n__SPOILERS!__\nAt the end of the notebook you will find a few tips and frequent errors. \nIf you feel confident enough, just start coding right away and get there ~~if~~ once you need to untangle yourself.",
"_____no_output_____"
]
],
[
[
"from notmnist import load_notmnist\nX_train, y_train, X_test, y_test = load_notmnist(letters='ABCDEFGHIJ')\nX_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784])",
"Parsing...\nfound broken img: ./notMNIST_small/F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png [it's ok if <10 images are broken]\nfound broken img: ./notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png [it's ok if <10 images are broken]\nDone\n"
],
[
"%matplotlib inline\nplt.figure(figsize=[12, 4])\nfor i in range(20):\n plt.subplot(2, 10, i+1)\n plt.imshow(X_train[i].reshape([28, 28]))\n plt.title(str(y_train[i]))",
"_____no_output_____"
],
[
"from tqdm.notebook import tqdm\nfrom torch.utils.data import TensorDataset, DataLoader\n\nINPUT_SHAPE = 784\nEPOCHS = 15\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nmodel = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Linear(256, 128),\n nn.ReLU(),\n nn.Linear(128, 10),\n nn.Softmax(dim=-1))\n\nmodel.to(DEVICE)\n\ntrain_dataset = TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))\ntest_dataset = TensorDataset(torch.from_numpy(X_test), torch.from_numpy(y_test))\ntrain_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=2, prefetch_factor=2)\ntest_dataloader = DataLoader(test_dataset, batch_size=16, shuffle=False, num_workers=2, prefetch_factor=2)\n\nloss = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters())\n\n\ndef train_for_epoch():\n \n def measure_accuracy(y_pred, y):\n return (y_pred == y).sum() / len(y)\n \n train_outputs = []\n train_gt = []\n test_outputs = []\n test_gt = []\n for x, y in train_dataloader:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n optimizer.zero_grad()\n y_pred = model(x)\n loss_value = loss(y_pred, y)\n loss_value.backward()\n optimizer.step()\n train_outputs.extend(y_pred.detach().argmax(dim=-1).cpu().tolist())\n train_gt.extend(y.tolist())\n \n train_accuracy = measure_accuracy(np.array(train_outputs), train_gt)\n print(f'Train accuracy: {train_accuracy:.3f}')\n \n with torch.no_grad():\n for x, y in test_dataloader:\n y_pred = model(x.to(DEVICE))\n test_outputs.extend(y_pred.detach().argmax(dim=-1).cpu().tolist())\n test_gt.extend(y.tolist())\n \n test_accuracy = measure_accuracy(np.array(test_outputs), test_gt)\n print(f'Test accuracy: {test_accuracy:.3f}')\n\nfor ep in tqdm(range(EPOCHS)):\n train_for_epoch()",
"_____no_output_____"
]
],
[
[
"<br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/>",
"_____no_output_____"
],
[
"# SPOILERS!\n\nRecommended pipeline:\n\n* Adapt logistic regression from previous assignment to classify one letter against others (e.g. A vs the rest)\n* Generalize it to multiclass logistic regression.\n - Either try to remember lecture 0 or google it.\n - Instead of weight vector you'll have to use matrix (feature_id x class_id)\n - Softmax (exp over sum of exps) can be implemented manually or as `nn.Softmax` (layer) or `F.softmax` (function)\n - Probably better to use STOCHASTIC gradient descent (minibatch) for greater speed\n - You can also try momentum/rmsprop/adawhatever\n - in which case the dataset should probably be shuffled (or use random subsamples on each iteration)\n* Add a hidden layer. Now your logistic regression uses hidden neurons instead of inputs.\n - Hidden layer uses the same math as output layer (ex-logistic regression), but uses some nonlinearity (e.g. sigmoid) instead of softmax\n - You need to train both layers, not just the output layer :)\n - 50 hidden neurons and a sigmoid nonlinearity will do for a start. Many ways to improve. \n - In ideal case this totals to 2 `torch.matmul`'s, 1 softmax and 1 ReLU/sigmoid\n - __Make sure this neural network works better than logistic regression!__\n \n* Now's the time to try improving the network. Consider layers (size, neuron count), nonlinearities, optimization methods, initialization — whatever you want, but please avoid convolutions for now.\n \n* If anything seems wrong, try going through one step of training and printing everything you compute.\n* If you see NaNs midway through optimization, you can estimate $\\log P(y \\mid x)$ as `F.log_softmax(layer_before_softmax)`.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7f532d4d3066da912f043c766ccde24992a0abf | 13,138 | ipynb | Jupyter Notebook | recipes/frame_dicom_data.ipynb | ASonay/kili-playground | 9624073703b5e6151cf496f44f17f531576875b7 | [
"Apache-2.0"
] | 1 | 2021-12-14T20:03:57.000Z | 2021-12-14T20:03:57.000Z | recipes/frame_dicom_data.ipynb | x213212/kili-playground | dfb94c2d54bedfd7fec452b91f811587a2156c13 | [
"Apache-2.0"
] | null | null | null | recipes/frame_dicom_data.ipynb | x213212/kili-playground | dfb94c2d54bedfd7fec452b91f811587a2156c13 | [
"Apache-2.0"
] | null | null | null | 24.788679 | 351 | 0.533719 | [
[
[
"# Kili Tutorial: Importing medical data into a frame project ",
"_____no_output_____"
],
[
"In this tutorial, we will show you how to import dicom data into a [Frame Kili project](https://cloud.kili-technology.com/docs/video-interfaces/multi-frames-classification/#docsNav). Such projects allow you to annotate volumes of image data.\n\nThe data we use comes from [The Cancer Genome Atlas Lung Adenocarcinoma (TCGA-LUAD) data collection](https://wiki.cancerimagingarchive.net/display/Public/TCGA-LUAD). We selected 3 scans out of this dataset.",
"_____no_output_____"
],
[
"## Downloading data",
"_____no_output_____"
],
[
"Let's first import the scans. We host these files in a .zip on GDrive.",
"_____no_output_____"
]
],
[
[
"import os\nimport subprocess\n\nimport tqdm",
"_____no_output_____"
],
[
"if 'recipes' in os.getcwd():\n os.chdir('..')",
"_____no_output_____"
],
[
"os.makedirs(os.path.expanduser('~/Downloads'), exist_ok=True)",
"_____no_output_____"
]
],
[
[
"We will use a small package to help downloading the file hosted on Google Drive",
"_____no_output_____"
]
],
[
[
"%%bash\npip install gdown\ngdown https://drive.google.com/uc?id=1q3qswXthFh3xMtAAnePph6vav3N7UtOF -O ~/Downloads/TCGA-LUAD.zip",
"fatal: destination path 'download_google_drive' already exists and is not an empty directory.\n\r0.00B [00:00, ?B/s]\r32.0kB [00:00, 145MB/s]\n"
],
[
"!apt-get install unzip",
"/bin/sh: apt-get: command not found\r\n"
],
[
"!unzip -o ~/Downloads/TCGA-LUAD.zip -d ~/Downloads/ > /dev/null",
"_____no_output_____"
]
],
[
[
"## Reading data",
"_____no_output_____"
],
[
"We can then read the dicom files with [pydicom](https://pydicom.github.io/pydicom/stable/).",
"_____no_output_____"
]
],
[
[
"ASSET_ROOT = os.path.expanduser('~/Downloads/TCGA-LUAD')\n\nsorted_files = {}\nasset_number = 0\nfor root, dirs, files in os.walk(ASSET_ROOT):\n if len(files) > 0:\n file_paths = list(map(lambda path: os.path.join(root, path), files))\n sorted_files[f'asset-{asset_number+1}'] = sorted(file_paths,\n key=lambda path: int(path.split('/')[-1].split('-')[1].split('.')[0]))\n asset_number += 1",
"_____no_output_____"
]
],
[
[
"Let's see what is inside the dataset :",
"_____no_output_____"
]
],
[
[
"!pip install Pillow pydicom\nfrom PIL import Image\nimport pydicom\n\ndef read_dcm_image(path):\n dicom = pydicom.dcmread(path)\n image = dicom.pixel_array\n # Currently, Kili does not support windowing in the application.\n # This will soon change, but until then we advise you to reduce the range to 256 values.\n image = (image - image.min()) / (image.max() - image.min()) * 256\n return Image.fromarray(image).convert('RGB')\n\nfor asset_key in sorted_files.keys():\n print(asset_key)\n im = read_dcm_image(sorted_files[asset_key][20])\n im.save(f'./recipes/img/frame_dicom_data_{asset_key}.png')",
"Requirement already satisfied: Pillow in /opt/anaconda3/lib/python3.7/site-packages (8.4.0)\nRequirement already satisfied: pydicom in /opt/anaconda3/lib/python3.7/site-packages (2.0.0)\nasset-1\nasset-2\nasset-3\n"
]
],
[
[
"![asset-1](./img/frame_dicom_data_asset-1.png)",
"_____no_output_____"
],
[
"![asset-2](./img/frame_dicom_data_asset-2.png)",
"_____no_output_____"
],
[
"![asset-3](./img/frame_dicom_data_asset-3.png)",
"_____no_output_____"
],
[
"## Extracting and serving images",
"_____no_output_____"
],
[
"For each of the dicom `.dcm` files, let's extract its content (image) and save it into a `.jpeg` image.",
"_____no_output_____"
]
],
[
[
"sorted_images = {}\nfor asset_key, files in sorted_files.items():\n images = []\n for file in tqdm.tqdm(files):\n print(file)\n im = read_dcm_image(file)\n im_file = file.replace('.dcm', '.jpeg')\n im.save(im_file, format='JPEG')\n images.append(im_file)\n sorted_images[asset_key] = images",
"100%|██████████| 201/201 [00:02<00:00, 85.82it/s]\n100%|██████████| 227/227 [00:02<00:00, 105.77it/s]\n100%|██████████| 329/329 [00:02<00:00, 112.38it/s]\n"
]
],
[
[
"We now have extracted jpeg images processable by Kili.",
"_____no_output_____"
],
[
"## Creating the project",
"_____no_output_____"
],
[
"We can now import those assets into a FRAME project !\n\nLet's begin by creating a project",
"_____no_output_____"
]
],
[
[
"## You can also directly create the interface on the application.\ninterface = {\n\t\"jobRendererWidth\": 0.17,\n\t\"jobs\": {\n\t\t\"JOB_0\": {\n\t\t\t\"mlTask\": \"OBJECT_DETECTION\",\n\t\t\t\"tools\": [\n\t\t\t\t\"semantic\"\n\t\t\t],\n\t\t\t\"instruction\": \"Segment the right class\",\n\t\t\t\"required\": 1,\n\t\t\t\"isChild\": False,\n\t\t\t\"content\": {\n\t\t\t\t\"categories\": {\n\t\t\t\t\t\"BONE\": {\n\t\t\t\t\t\t\"name\": \"Bone\",\n\t\t\t\t\t\t\"children\": [],\n\t\t\t\t\t\t\"color\": \"#0755FF\"\n\t\t\t\t\t},\n\t\t\t\t\t\"LUNG\": {\n\t\t\t\t\t\t\"name\": \"Lung\",\n\t\t\t\t\t\t\"children\": [],\n\t\t\t\t\t\t\"color\": \"#EEBA00\"\n\t\t\t\t\t},\n\t\t\t\t\t\"TISSUE_0\": {\n\t\t\t\t\t\t\"name\": \"Tissue\",\n\t\t\t\t\t\t\"children\": [],\n\t\t\t\t\t\t\"color\": \"#941100\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"input\": \"radio\"\n\t\t\t}\n\t\t}\n\t}\n}",
"_____no_output_____"
],
[
"## Authentication\nfrom kili.client import Kili\n\napi_key = os.getenv('KILI_USER_API_KEY')\napi_endpoint = os.getenv('KILI_API_ENDPOINT') # If you use Kili SaaS, use the url 'https://cloud.kili-technology.com/api/label/v2/graphql'\nkili = Kili(api_key=api_key, api_endpoint=api_endpoint)\n\n## Project creation\nproject = kili.create_project(\n description='Demo FRAME project',\n input_type='FRAME',\n json_interface=interface,\n title='Lungs from TCGA-LUAD'\n)\nproject_id = project['id']",
"/Users/maximeduval/Documents/kili-playground/kili/authentication.py:97: UserWarning: Kili Playground version should match with Kili API version.\nPlease install version: \"pip install kili==2.100.0\"\n warnings.warn(message, UserWarning)\n"
]
],
[
[
"## Importing images",
"_____no_output_____"
],
[
"Finally, let's import the volumes using `appendManyToDataset` (see [link](https://staging.cloud.kili-technology.com/docs/python-graphql-api/python-api/#append_many_to_dataset)). The key argument is `json_content_array`, which is a list of list of strings. Each element is the list of urls or paths pointing to images of the volume considered.\n - Let's host these images locally to demonstrate how we would do it with cloud URLs for example :",
"_____no_output_____"
]
],
[
[
"subprocess.Popen(f'python -m http.server 8001 --directory {ASSET_ROOT}',\n shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)\nROOT_URL = 'http://localhost:8001/'",
"_____no_output_____"
],
[
"def files_to_urls(files):\n return list(map(lambda file: ROOT_URL + file.split('TCGA-LUAD')[1], files))",
"_____no_output_____"
],
[
"kili.append_many_to_dataset(\n project_id=project_id,\n external_id_array=list(sorted_images.keys()),\n json_content_array=list(map(files_to_urls, sorted_images.values()))\n)",
"_____no_output_____"
]
],
[
[
"Or, as mentionned, you can simply provide the paths to your images, and call the function like below : ",
"_____no_output_____"
]
],
[
[
"kili.append_many_to_dataset(\n project_id=project_id,\n external_id_array=list(map(lambda key: f'local-path-{key}',sorted_images.keys())),\n json_content_array=list(sorted_images.values())\n)",
"_____no_output_____"
]
],
[
[
"## Back to the interface",
"_____no_output_____"
],
[
"We can see our assets were imported...",
"_____no_output_____"
]
],
[
[
"ds_size = kili.count_assets(project_id=project_id)\nprint(ds_size)\nassert ds_size == 6",
"6\n"
]
],
[
[
"![assets_inserted](img/assets_inserted.png)",
"_____no_output_____"
],
[
"...we can now annotate those assets !",
"_____no_output_____"
],
[
"![frame_annotation](img/frame_annotation.png)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7f53e4b671cdd892d23b26cb50eeee61a6a25f8 | 460,208 | ipynb | Jupyter Notebook | notebooks_workflow_complete/0.0_PEST_parameterization.ipynb | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | notebooks_workflow_complete/0.0_PEST_parameterization.ipynb | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | notebooks_workflow_complete/0.0_PEST_parameterization.ipynb | usgs/neversink_workflow | acd61435b8553e38d4a903c8cd7a3afc612446f9 | [
"CC0-1.0"
] | null | null | null | 131.864756 | 46,944 | 0.81645 | [
[
[
"import sys\nsys.path.append('../python_packages_static/')\nimport flopy as fp\nimport pyemu\nimport re\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, shutil, glob, sys\nimport json",
"_____no_output_____"
]
],
[
[
"# PEST setup\nThis notebook reads in the existing MF6 model built using modflow-setup with the script `../scripts/setup_model.py`. This notebook makes extensive use of the `PstFrom` functionality in `pyemu` to set up multipliers on parameters. There are a few custom parameterization steps as well. \n\nObservations are also defined, assigned initial values, and weights based on preliminary assumptions about error.",
"_____no_output_____"
]
],
[
[
"pyemu.__version__",
"_____no_output_____"
]
],
[
[
"#### define locations and other global variables",
"_____no_output_____"
]
],
[
[
"sim_ws = '../neversink_mf6/' # folder containing the MODFLOW6 files\ntemplate_ws = '../run_data' # folder to create and write the PEST setup to\nnoptmax0_dir = '../noptmax0_testing/' # folder in which to write noptmax=0 test run version of PST file",
"_____no_output_____"
]
],
[
[
"#### kill the `original` folder (a relic from the mfsetup process)",
"_____no_output_____"
]
],
[
[
"if os.path.exists(os.path.join(sim_ws,'original')):\n shutil.rmtree(os.path.join(sim_ws,'original'))",
"_____no_output_____"
],
[
"run_MF6 = True # option to run MF6 to generate output but not needed if already been run in sim_ws\ncdir = os.getcwd()\n\n\n# optionally run MF6 to generate model output\nif run_MF6:\n os.chdir(sim_ws)\n os.system('mf6')\n os.chdir(cdir)",
"_____no_output_____"
]
],
[
[
"### create land surface observations we will need at the end\nThese will be used as inequality observations (less than) to enforce that heads should not exceed the model top. Option for spatial frequency is set below.",
"_____no_output_____"
]
],
[
[
"irch_file = f'{sim_ws}/irch.dat' # file with the highest active layer identified\nid3_file = f'{sim_ws}/idomain_003.dat' # deepest layer idomain - gives the maximum lateral footprint\ntop_file = f'{sim_ws}/top.dat' # the model top",
"_____no_output_____"
],
[
"top = np.loadtxt(top_file)\ntop[top<-8000] = np.nan\nplt.imshow(top)\n\nplt.colorbar()",
"_____no_output_____"
],
[
"id3 = np.loadtxt(id3_file, dtype=int)\nplt.imshow(id3)",
"_____no_output_____"
],
[
"irch = np.loadtxt(irch_file, dtype=int) \nirch -= 1 # note that this is 1-based, not 0-based because it's a MF6 file\nplt.imshow(irch)\nplt.colorbar()",
"_____no_output_____"
],
[
"# set frequency for land surface observations lateralls, in model cells\nlsobs_every_n_cells = 50 ",
"_____no_output_____"
],
[
"# make a grid of cells spaced at the spacing suggested above\nnrow, ncol = id3.shape\nj = list(range(ncol))[0:ncol:lsobs_every_n_cells]\ni = list(range(nrow))[0:nrow:lsobs_every_n_cells]\nJ,I = np.meshgrid(j,i)\npoints = list(zip(I.ravel(),J.ravel()))",
"_____no_output_____"
],
[
"# now keep only those that are in active cells (using ibound of layer4 as the basis) and drop a few others",
"_____no_output_____"
],
[
"keep_points = [(irch[i,j],i,j) for i,j in points if id3[i,j]==1]\ndrop_points = [(0, 150, 50),(3, 150, 100),(3, 100, 50)]\nkeep_points = [i for i in keep_points if i not in drop_points]",
"_____no_output_____"
]
],
[
[
"### make list of indices",
"_____no_output_____"
]
],
[
[
"with open(os.path.join(sim_ws,'land_surf_obs-indices.csv'), 'w') as ofp:\n ofp.write('k,i,j,obsname\\n')\n [ofp.write('{0},{1},{2},land_surf_obs_{1}_{2}\\n'.format(*i)) for i in keep_points]",
"_____no_output_____"
]
],
[
[
"### make an observations file",
"_____no_output_____"
]
],
[
[
"with open(os.path.join(sim_ws,'land_surf_obs-observations.csv'), 'w') as ofp:\n ofp.write('obsname,obsval\\n')\n [ofp.write('land_surf_obs_{1}_{2},{3}\\n'.format(*i, top[i[1],i[2]])) for i in keep_points]",
"_____no_output_____"
]
],
[
[
"# Start setting up the `PstFrom` object to create PEST inputs\n### load up the simulation",
"_____no_output_____"
]
],
[
[
"sim = fp.mf6.MFSimulation.load(sim_ws=sim_ws)",
"loading simulation...\n loading simulation name file...\n loading tdis package...\n loading model gwf6...\n loading package dis...\n loading package ic...\n loading package npf...\n loading package rch...\n loading package oc...\n loading package wel...\n loading package obs...\n loading package chd...\nINFORMATION: maxbound in ('gwf6', 'chd', 'dimensions') changed to 176 based on size of stress_period_data\n loading package sfr...\n loading ims package neversink...\n"
],
[
"m = sim.get_model()",
"_____no_output_____"
],
[
"# manually create a spatial reference object from the grid.json metadata\n# this file created by modflow-setup\ngrid_data = json.load(open(os.path.join(sim_ws,'neversink_grid.json')))\nsr_model = pyemu.helpers.SpatialReference(delr=grid_data['delr'],\n delc=grid_data['delc'],\n rotation= grid_data['angrot'],\n epsg = grid_data['epsg'],\n xul = grid_data['xul'],\n yul = grid_data['yul'],\n units='meters',\n lenuni=grid_data['lenuni'])",
"_____no_output_____"
],
[
"# create the PstFrom object \npf = pyemu.utils.PstFrom(original_d=sim_ws, new_d=template_ws,\n remove_existing=True,\n longnames=True, spatial_reference=sr_model,\n zero_based=False)",
"2021-03-26 16:08:56.834646 starting: opening PstFrom.log for logging\n2021-03-26 16:08:56.835640 starting PstFrom process\n2021-03-26 16:08:56.868554 starting: setting up dirs\n2021-03-26 16:08:56.869551 starting: removing existing new_d '..\\run_data'\n2021-03-26 16:08:57.058048 finished: removing existing new_d '..\\run_data' took: 0:00:00.188497\n2021-03-26 16:08:57.058048 starting: copying original_d '..\\neversink_mf6' to new_d '..\\run_data'\n2021-03-26 16:08:58.243337 finished: copying original_d '..\\neversink_mf6' to new_d '..\\run_data' took: 0:00:01.185289\n2021-03-26 16:08:58.245331 finished: setting up dirs took: 0:00:01.376777\n"
]
],
[
[
"## we will parameterize....\n- pilot points for k, k33, r\n- zones for l, k33, r\n- constant for R\n- sfr conductance by reach\n- well pumping \n- CHDs",
"_____no_output_____"
],
[
"## parameterize list-directed well and chd packages\n",
"_____no_output_____"
]
],
[
[
"list_tags = {'wel_':[.8,1.2], 'chd_':[.8,1.2]}",
"_____no_output_____"
],
[
"for tag,bnd in list_tags.items():\n lb,ub = bnd\n filename = os.path.basename(glob.glob(os.path.join(template_ws, '*{}*'.format(tag)))[0])\n pf.add_parameters(filenames=filename, par_type = 'grid',\n upper_bound=ub, lower_bound=lb, par_name_base=tag,\n index_cols=[0,1,2], use_cols=[3],pargp=tag[:-1],alt_inst_str='',\n comment_char='#')",
"2021-03-26 16:08:58.270602 starting: adding grid type multiplier style parameters for file(s) ['wel_000.dat']\n2021-03-26 16:08:58.271600 starting: loading list ..\\run_data\\wel_000.dat\n2021-03-26 16:08:58.272597 starting: reading list ..\\run_data\\wel_000.dat\n2021-03-26 16:08:58.279579 finished: reading list ..\\run_data\\wel_000.dat took: 0:00:00.006982\n2021-03-26 16:08:58.279579 loaded list '..\\run_data\\wel_000.dat' of shape (34, 5)\n2021-03-26 16:08:58.284565 finished: loading list ..\\run_data\\wel_000.dat took: 0:00:00.012965\n2021-03-26 16:08:58.285562 starting: writing list-based template file '..\\run_data\\wel__0_grid.csv.tpl'\n2021-03-26 16:08:58.336024 finished: writing list-based template file '..\\run_data\\wel__0_grid.csv.tpl' took: 0:00:00.050462\n2021-03-26 16:08:58.369854 finished: adding grid type multiplier style parameters for file(s) ['wel_000.dat'] took: 0:00:00.099252\n2021-03-26 16:08:58.371847 starting: adding grid type multiplier style parameters for file(s) ['chd_000.dat']\n2021-03-26 16:08:58.371847 starting: loading list ..\\run_data\\chd_000.dat\n2021-03-26 16:08:58.372844 starting: reading list ..\\run_data\\chd_000.dat\n2021-03-26 16:08:58.376835 finished: reading list ..\\run_data\\chd_000.dat took: 0:00:00.003991\n2021-03-26 16:08:58.377831 loaded list '..\\run_data\\chd_000.dat' of shape (176, 4)\n2021-03-26 16:08:58.381821 finished: loading list ..\\run_data\\chd_000.dat took: 0:00:00.009974\n2021-03-26 16:08:58.382818 starting: writing list-based template file '..\\run_data\\chd__0_grid.csv.tpl'\n2021-03-26 16:08:58.418722 finished: writing list-based template file '..\\run_data\\chd__0_grid.csv.tpl' took: 0:00:00.035904\n2021-03-26 16:08:58.441661 finished: adding grid type multiplier style parameters for file(s) ['chd_000.dat'] took: 0:00:00.069814\n"
]
],
[
[
"## now set up pilot points",
"_____no_output_____"
]
],
[
[
"k_ub = 152 # ultimate upper bound on K\n# set up pilot points\npp_tags = {'k':[.01,10.,k_ub], 'k33':[.01,10.,k_ub/10]}",
"_____no_output_____"
]
],
[
[
"### we will use idomain to define zones for pilot points as going in active areas of each layer.",
"_____no_output_____"
]
],
[
[
"idm = m.dis.idomain.array",
"_____no_output_____"
],
[
"plt.imshow(idm[2])\nplt.colorbar()",
"_____no_output_____"
],
[
"idm[idm==-1]=0 # make pass through cells (e.g. idomain==-1) the same as inactive (e.g. idomain == 0)",
"_____no_output_____"
],
[
"for i in range(4):\n plt.figure()\n plt.imshow(idm[i])\n plt.colorbar()",
"_____no_output_____"
]
],
[
[
"#### before setting up K, need to edit the zone files to only have nonzero values in active cells",
"_____no_output_____"
]
],
[
[
"kzonefile = '../processed_data/padded_L{}_K_Zone_50mGrid.dat'\nzonearrs = {}\nfor i in range(m.dis.nlay.data):\n kz = np.loadtxt(kzonefile.format(i)).astype(int)\n kz[idm[i] != 1] = 0 \n zonearrs[i] = kz\n",
"_____no_output_____"
],
[
"for i in range(4):\n plt.figure()\n plt.imshow(zonearrs[i])\n plt.colorbar()",
"_____no_output_____"
],
[
"# quick take a look at unique zones present in each layer\n[np.unique(kz) for _,kz in zonearrs.items()]",
"_____no_output_____"
],
[
"## set up for K\nfor tag,bnd in pp_tags.items():\n lb, ub, ultub = bnd\n if tag == 'k':\n arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag) & ('k33' not in f)])\n else:\n arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag)])\n \n for arr_file in arrfiles: \n currlay = int(re.findall('\\d+',arr_file.replace('k33',''))[-1])\n \n # pilot points\n # set pilot point spacing: NB every 5 cells in the smaller-zone layers, and every 20 cells in others\n if currlay in [1,2]:\n pp_space = 5 \n else: \n pp_space = 20\n v = pyemu.utils.geostats.ExpVario(a=sr_model.delr[0]*3*pp_space,contribution=1.0)\n gs = pyemu.utils.geostats.GeoStruct(variograms=v,nugget=0.0, transform='log')\n\n print('pps for layer {} --- filename: {}: idomain_sum: {}'.format(currlay, arr_file, idm[currlay].sum()))\n pf.add_parameters(filenames=arr_file, par_type='pilotpoints', pp_space=pp_space,\n upper_bound=ub, lower_bound=lb, geostruct=gs,\n par_name_base='{}_pp'.format(tag),alt_inst_str='',\n zone_array=idm[currlay], pargp='pp_{}'.format(tag),\n ult_ubound=ultub)\n # zones\n print('zones for layer {} --- filename: {}: idomain_sum: {}'.format(currlay, arr_file, idm[currlay].sum()))\n pf.add_parameters(filenames=arr_file, par_type='zone',alt_inst_str='',\n zone_array = zonearrs[currlay],lower_bound=lb,upper_bound=ub,\n pargp='zn_{}'.format(tag), par_name_base='{}_{}'.format(tag,currlay),\n ult_ubound=ultub)\n ",
"pps for layer 0 --- filename: k0.dat: idomain_sum: 85051\n2021-03-26 16:09:05.525010 starting: adding pilotpoints type multiplier style parameters for file(s) ['k0.dat']\n2021-03-26 16:09:05.525010 starting: loading array ..\\run_data\\k0.dat\n2021-03-26 16:09:06.059836 finished: loading array ..\\run_data\\k0.dat took: 0:00:00.534826\n2021-03-26 16:09:06.059836 loaded array '..\\neversink_mf6\\k0.dat' of shape (680, 619)\n2021-03-26 16:09:06.626456 starting: writing array-based template file '..\\run_data\\k_pp_0_pilotpoints.csv.tpl'\n2021-03-26 16:09:06.627454 starting: setting up pilot point parameters\n2021-03-26 16:09:06.627454 No spatial reference (containing cell spacing) passed.\n2021-03-26 16:09:06.628450 OK - using spatial reference in parent object.\n2021-03-26 16:09:07.724942 202 pilot point parameters created\n2021-03-26 16:09:07.726936 pilot point 'pargp':k_pp_:0\n2021-03-26 16:09:07.726936 finished: setting up pilot point parameters took: 0:00:01.099482\n2021-03-26 16:09:07.847615 starting: calculating factors for pargp=k_pp_:0\n2021-03-26 16:09:07.847615 saving krige variance file:..\\run_data\\k_pp_0pp.var.dat\n2021-03-26 16:09:07.848612 saving krige factors file:..\\run_data\\k_pp_0pp.fac\nstarting interp point loop for 420920 points\nstarting 0\nstarting 1\nstarting 2\nstarting 3\nstarting 4\nstarting 5\nstarting 6\nstarting 7\nstarting 8\nstarting 9\n"
],
[
"# recharge as special case because no idomain for R\nrtags= {'rch':[0.8,1.2, np.max(m.rch.recharge.array)*1.2]}",
"_____no_output_____"
],
[
"for tag,bnd in rtags.items():\n lb, ub, ultub = bnd\n if tag == 'k':\n arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag) & ('k33' not in f)])\n else:\n arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag)])\n \n for arr_file in arrfiles:\n # pilot points\n pf.add_parameters(filenames=arr_file, par_type='pilotpoints', pp_space=pp_space,\n upper_bound=ub, lower_bound=lb, geostruct=gs,\n par_name_base='{}_pp'.format(tag),\n zone_array=idm[3],alt_inst_str='',\n pargp='pp_{}'.format(tag),\n ult_ubound=ultub)\n # constant\n pf.add_parameters(filenames=arr_file, par_type='constant',\n upper_bound=ub-0.1, lower_bound=lb+0.1, \n par_name_base='{}_const'.format(tag),\n zone_array=idm[3],alt_inst_str='',\n pargp='pp_{}'.format(tag),\n ult_ubound=ultub)\n",
"2021-03-26 16:43:15.342865 starting: adding pilotpoints type multiplier style parameters for file(s) ['rch_000.dat']\n2021-03-26 16:43:15.343727 starting: loading array ..\\run_data\\rch_000.dat\n2021-03-26 16:43:15.704764 finished: loading array ..\\run_data\\rch_000.dat took: 0:00:00.361037\n2021-03-26 16:43:15.705800 loaded array '..\\neversink_mf6\\rch_000.dat' of shape (680, 619)\n2021-03-26 16:43:16.246920 starting: writing array-based template file '..\\run_data\\rch_pp_0_pilotpoints.csv.tpl'\n2021-03-26 16:43:16.246920 starting: setting up pilot point parameters\n2021-03-26 16:43:16.246920 No spatial reference (containing cell spacing) passed.\n2021-03-26 16:43:16.246920 OK - using spatial reference in parent object.\n2021-03-26 16:43:17.495149 470 pilot point parameters created\n2021-03-26 16:43:17.496147 pilot point 'pargp':rch_pp_:0\n2021-03-26 16:43:17.496147 finished: setting up pilot point parameters took: 0:00:01.249227\n2021-03-26 16:43:17.692599 starting: writing array-based template file '..\\run_data\\rch_pp_0pp.dat.tpl'\n2021-03-26 16:43:17.692599 saving zone array ..\\run_data\\rch_pp_0pp.dat.zone for tpl file ..\\run_data\\rch_pp_0pp.dat.tpl\n2021-03-26 16:43:17.854377 finished: adding pilotpoints type multiplier style parameters for file(s) ['rch_000.dat'] took: 0:00:02.511512\n2021-03-26 16:43:17.855375 starting: adding constant type multiplier style parameters for file(s) ['rch_000.dat']\n2021-03-26 16:43:17.856372 starting: loading array ..\\run_data\\rch_000.dat\n2021-03-26 16:43:18.242922 finished: loading array ..\\run_data\\rch_000.dat took: 0:00:00.386550\n2021-03-26 16:43:18.242922 loaded array '..\\neversink_mf6\\rch_000.dat' of shape (680, 619)\n2021-03-26 16:43:18.963246 starting: writing array-based template file '..\\run_data\\rch_const_0_constant.csv.tpl'\n2021-03-26 16:43:18.963246 starting: writing template file ..\\run_data\\rch_const_0_constant.csv.tpl for ['rch_const_:0']\n2021-03-26 16:43:18.990173 WARNING: get_xy() warning: position of i and j in index_cols not specified, assume (i,j) are final two entries in index_cols.\n2021-03-26 16:43:21.603479 finished: writing template file ..\\run_data\\rch_const_0_constant.csv.tpl for ['rch_const_:0'] took: 0:00:02.640233\n2021-03-26 16:43:21.603479 finished: writing array-based template file '..\\run_data\\rch_const_0_constant.csv.tpl' took: 0:00:02.640233\n2021-03-26 16:43:21.603479 saving zone array ..\\run_data\\rch_const_0_constant.csv.zone for tpl file ..\\run_data\\rch_const_0_constant.csv.tpl\n2021-03-26 16:43:22.372912 finished: adding constant type multiplier style parameters for file(s) ['rch_000.dat'] took: 0:00:04.517537\n"
]
],
[
[
"## the `build_pst` method compiles all the parameters we've added and makes a `Pst` object",
"_____no_output_____"
]
],
[
[
"pst = pf.build_pst('tmp.pst')",
"noptmax:0, npar_adj:4172, nnz_obs:0\n"
]
],
[
[
"### Make a TPL file for SFR and add it to the `pst` object",
"_____no_output_____"
]
],
[
[
"sfrfilename = 'neversink_packagedata.dat'\n\nprint('working on {}'.format(sfrfilename))\n# read in and strip and split the input lines\ninsfr = [line.strip().split() for line in open(os.path.join(template_ws,sfrfilename), 'r').readlines() if '#' not in line]\nheaderlines = [line.strip() for line in open(os.path.join(template_ws,sfrfilename), 'r').readlines() if '#' in line]\n\n# set up the template line strings by segment\ntpl_char = ['~ sfrk_{} ~'.format(line[-1]) for line in insfr]\n\n# stick the tpl text in the K column. NB -> gotta count from the end because of \n# the possibility of NONE or i,j,k as indexing\nfor line,tpl in zip(insfr,tpl_char):\n line[-6] = tpl\n\n# revert back to a space delimited file\ninsfr = [' '.join(line) for line in insfr]\n\n# write out the TPL file\nwith open(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)), 'w') as ofp:\n ofp.write('ptf ~\\n')\n [ofp.write('{}\\n'.format(line)) for line in headerlines]\n [ofp.write('{}\\n'.format(line)) for line in insfr]",
"working on neversink_packagedata.dat\n"
],
[
"pst.add_parameters(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)), pst_path='.')",
"error trying to read input file with tpl file:different values 1.7662957:1.0 for par sfrk_700039914 on in line 15\n915 pars added from template file .\\neversink_packagedata.dat.tpl\n"
],
[
"parval1 = pyemu.pst_utils.try_read_input_file_with_tpl(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)),\n os.path.join(template_ws,sfrfilename))",
"error trying to read input file with tpl file:different values 1.7662957:1.0 for par sfrk_700039914 on in line 15\n"
],
[
"pst.parameter_data.loc[pst.parameter_data.parnme.str.startswith('sfr'),'pargp'] = 'sfrk'",
"_____no_output_____"
],
[
"pst.parameter_data.loc[pst.parameter_data.parnme == 'sfrk_700039914']",
"_____no_output_____"
]
],
[
[
"# Add in the observations",
"_____no_output_____"
],
[
"## Assign meaningful observation values and prepare to run `noptmax=0` test run prior to reweighting",
"_____no_output_____"
]
],
[
[
"update_forward_run=True\nrun_local=True\nupdate_all_obs = True",
"_____no_output_____"
]
],
[
[
"### if `update_all_obs` is True, run the get_observations.py script to get a new INS file and reset all observations in the PEST object",
"_____no_output_____"
]
],
[
[
"if update_all_obs is True:\n shutil.copy2('../scripts/get_observations.py',os.path.join(template_ws,'get_observations.py'))\n shutil.copy2('../scripts/get_observations.py',os.path.join(sim_ws,'get_observations.py'))\n \n os.system('python {} {} True'.format(os.path.join(sim_ws,'get_observations.py'), sim_ws))\n [shutil.copy2(cf, os.path.join(template_ws, os.path.basename(cf))) \n for cf in glob.glob(os.path.join(sim_ws, '*.ins'))]\n [shutil.copy2(cf, os.path.join(template_ws, os.path.basename(cf))) \n for cf in glob.glob(os.path.join(sim_ws, 'land_*.csv'))]\n \n pst.observation_data.loc[:,:] = np.nan\n pst.observation_data.dropna(inplace=True)\n pst.add_observations(os.path.join(template_ws,'obs_mf6.dat.ins'), pst_path='.')",
"526 obs added from instruction file ../run_data\\.\\obs_mf6.dat.ins\n"
]
],
[
[
"### set the observation groups",
"_____no_output_____"
]
],
[
[
"obs = pst.observation_data",
"_____no_output_____"
],
[
"obs.obgnme = 'head'",
"_____no_output_____"
],
[
"obs.loc[obs.index.str.startswith('q_'), 'obgnme'] = 'flux'",
"_____no_output_____"
],
[
"obs.loc[obs.index.str.startswith('perc'), 'obgnme'] = 'budget'\nobs.loc[obs.index.str.startswith('land'), 'obgnme'] = 'land_surface'",
"_____no_output_____"
]
],
[
[
"### Set observation values\n",
"_____no_output_____"
]
],
[
[
"set_obs = True",
"_____no_output_____"
],
[
"if set_obs:\n # read in sfr; make sfr obsnme/obsval dict to map to pst observation_data\n sfr_df = pd.read_csv('../processed_data/NWIS_DV_STREAMSTATS_SITES.csv')\n sfr_df['obsnme'] = 'q_' + sfr_df['site_id'].astype(str)\n sfr_df['obsval'] = (sfr_df['Mean_Annual_Flow_cfs'] * sfr_df['Average_BFI_value']) * 2446.5755455 # convert from cfs to m^3/day\n sfr_df[['obsnme', 'obsval']]\n sfr_dict = pd.Series(sfr_df['obsval'].values,index=sfr_df['obsnme']).to_dict()\n \n # read in nwis heads; make nwis head obsnme/obsval dict\n nwis_gw_df = pd.read_csv('../processed_data/NWIS_GW_DV_data.csv')\n nwis_gw_df['obsnme'] = 'h_' + nwis_gw_df['site_no'].astype(str)\n nwis_gw_df['obsval'] = nwis_gw_df['gw_elev_m']\n nwis_gw_dict = pd.Series(nwis_gw_df['obsval'].values,index=nwis_gw_df['obsnme']).to_dict()\n \n # read in DEC heads; make DEC heads obsnme/obsval dict\n DEC_gw_df = pd.read_csv('../processed_data/NY_DEC_GW_sites.csv')\n DEC_gw_df['obsnme'] = ('h_' + DEC_gw_df['WellNO'].astype(str)).str.lower()\n DEC_gw_df['obsval'] = DEC_gw_df['gw_elev_m']\n DEC_gw_dict = pd.Series(DEC_gw_df['obsval'].values,index=DEC_gw_df['obsnme']).to_dict()\n \n # map SFR values to observation_data\n obs.loc[obs.obsnme.isin(sfr_dict.keys()), 'obsval'] = obs.obsnme.map(sfr_dict)\n \n # map nwis heads to observation_data\n obs.loc[obs.obsnme.isin(nwis_gw_dict.keys()), 'obsval'] = obs.obsnme.map(nwis_gw_dict)\n \n # map DEC heads to SRF observation_data\n obs.loc[obs.obsnme.isin(DEC_gw_dict.keys()), 'obsval'] = obs.obsnme.map(DEC_gw_dict)\n \n # set up percent discrepancy as dummy value\n obs.loc[obs.obgnme=='budget', 'obsval'] = -99999\n \n # get the land surface obs\n lsobs_df = pd.read_csv('../neversink_mf6/land_surf_obs-observations.csv', index_col=0)\n \n obs.loc[obs.obgnme=='land_surface', 'obsval'] = lsobs_df.obsval",
"_____no_output_____"
]
],
[
[
"### first cut at weights",
"_____no_output_____"
]
],
[
[
"# weights based on coefficient of variation of 3.33 and 10, respecively\nobs.loc[obs.obsnme=='q_1436500', 'weight'] = 3.33/obs.loc[obs.obsnme=='q_1436500'].obsval\nobs.loc[obs.obsnme=='q_1366650', 'weight'] = 10/obs.loc[obs.obsnme=='q_1366650'].obsval\n",
"_____no_output_____"
],
[
"# these initial weights assume that heads within 5m for measured heads or 10m for land-surface obs is acceptable\nobs.loc[obs.obgnme=='head', 'weight'] = 1/5\nobs.loc[obs.obgnme=='land_surface', 'weight'] = 1/10",
"_____no_output_____"
],
[
"obs.loc[obs.obgnme=='budget', 'weight'] = 0.0",
"_____no_output_____"
]
],
[
[
"## update some parameter bounds",
"_____no_output_____"
]
],
[
[
"pars = pst.parameter_data",
"_____no_output_____"
]
],
[
[
"### K-zones set to not get too crazy high",
"_____no_output_____"
]
],
[
[
"# read in k value lookup table to df\n\n# original table\n\nk_df_original = pd.read_excel(\n '../processed_data/Rondout_Neversink_GeologyLookupTable.xlsx',\n sheet_name='Sheet2'\n)\nk_df_original.index = k_df_original.Lookup_Code\n\nk_df = pd.read_excel(\n '../processed_data/Rondout_Neversink_GeologyLookupTable_jhw.xlsx',\n sheet_name='Sheet2'\n)\n\nk_df.index = k_df.Lookup_Code\n\nprint('Using mean K value')\nk_df['Kh_ft_d_mean'] = (k_df['Kh_ft_d_lower'] + k_df['Kh_ft_d_upper']) / 2\nk_df['Kh_m_d'] = k_df['Kh_ft_d_mean'] * 0.3048\n \nk_df['Kh_m_d_lower'] = k_df['Kh_ft_d_lower'] * .3048\nk_df['Kh_m_d_upper'] = k_df['Kh_ft_d_upper'] * .3048\n\nk_df['K_upper_mult'] = k_df['Kh_m_d_upper'] / k_df['Kh_m_d']\nk_df['K_lower_mult'] = k_df['Kh_m_d_lower'] / k_df['Kh_m_d']\n\n\nk_df",
"Using mean K value\n"
],
[
"k_mult_zones = [int(i.split(':')[-1]) for i in pars.loc[pars.parnme.str.startswith('multiplier_k')].index]\nnp.unique(k_mult_zones)",
"_____no_output_____"
],
[
"upper_mults = [k_df.loc[i].K_upper_mult for i in k_mult_zones]\nlower_mults = [k_df.loc[i].K_lower_mult for i in k_mult_zones]",
"_____no_output_____"
],
[
"pars.loc[pars.parnme.str.startswith('multiplier_k'), 'parlbnd'] = lower_mults\npars.loc[pars.parnme.str.startswith('multiplier_k'), 'parubnd'] = upper_mults\n",
"_____no_output_____"
]
],
[
[
"### pilot points set to mean upper and lower bounds diffs",
"_____no_output_____"
]
],
[
[
"mean_lower = k_df.K_lower_mult.mean()\nmean_upper = k_df.K_upper_mult.mean()\nmean_lower,mean_upper",
"_____no_output_____"
],
[
"pars.loc[pars.pargp.str.startswith('k'), 'parlbnd'] = mean_lower + 0.01\npars.loc[pars.pargp.str.startswith('k'), 'parubnd'] = mean_upper - 0.01\npars.loc[pars.pargp.str.startswith('sfrk'), 'parlbnd'] = 0.1\npars.loc[pars.pargp.str.startswith('sfrk'), 'parubnd'] = 10.0",
"_____no_output_____"
]
],
[
[
"### Set CHD parameters to 'fixed'. They will not be estimated, but are present to evaluate in global sensitivity analysis which means we will free them only for that purpose",
"_____no_output_____"
]
],
[
[
"pars.loc[pars.pargp=='chd', 'partrans'] = 'fixed'",
"_____no_output_____"
]
],
[
[
"#### `pyemu` can write out a summary file with summary of the parameterization - note that all parameters are multipliers",
"_____no_output_____"
]
],
[
[
"parsum = pst.write_par_summary_table('../figures/initial_parsum.xlsx', report_in_linear_space=True)\nparsum",
"Warning: because log-transformed values being reported in linear space, stdev NOT reported\n"
]
],
[
[
"## update the forward run to run",
"_____no_output_____"
]
],
[
[
"# note there are ways to do this within PstFrom but we were unaware of that when we set this up\n# note also we are putting the model run and postprocessing lines just above if __name__ == \"__main__\" line\nfrunlines = open(os.path.join(template_ws, 'forward_run.py'), 'r').readlines()\nif update_forward_run is True and './mf6' not in ' '.join([i.strip() for i in frunlines]):\n print('updating forward_run.py')\n with open(os.path.join(template_ws, 'forward_run.py'), 'w') as ofp:\n for line in frunlines:\n if '__main__' in line:\n ofp.write(\" os.system('./mf6')\\n\")\n ofp.write(\" os.system('python get_observations.py . false')\\n\")\n ofp.write('{}\\n'.format(line)) \n elif 'import os' in line:\n ofp.write('import os, sys\\n')\n ofp.write(\"sys.path.append('../python_packages_static/')\\n\")\n else:\n ofp.write(line) ",
"updating forward_run.py\n"
]
],
[
[
"### set noptmax = 0 and a couple ++ options and write out PST file",
"_____no_output_____"
]
],
[
[
"pst.pestpp_options[\"ies_num_reals\"] = 500 \npst.pestpp_options[\"ies_bad_phi_sigma\"] = 2\npst.pestpp_options[\"overdue_giveup_fac\"] = 4\npst.pestpp_options[\"ies_save_rescov\"] = True\npst.pestpp_options[\"ies_no_noise\"] = True\npst.pestpp_options[\"ies_drop_conflicts\"] = True\npst.pestpp_options[\"ies_pdc_sigma_distance\"] = 2.0\npst.control_data.noptmax = 0",
"_____no_output_____"
]
],
[
[
"### write out the PST file",
"_____no_output_____"
]
],
[
[
"pst.write(os.path.join(template_ws,'prior_mc.pst'))",
"noptmax:0, npar_adj:4911, nnz_obs:525\n"
]
],
[
[
"# copy over the entire pest directory to a separate folder identified by the `noptmax0_dir` variable. This is to keep the `emplate_ws` clean and allow for various testing to take place int he `noptmax0_dir` location",
"_____no_output_____"
]
],
[
[
"if os.path.exists(noptmax0_dir):\n shutil.rmtree(noptmax0_dir)\nshutil.copytree(template_ws, noptmax0_dir)",
"_____no_output_____"
],
[
"pst.write_obs_summary_table('../figures/obs_initial.xlsx')",
"_____no_output_____"
]
],
[
[
"### If running on Windows, remove backslashes from `mult2model_info.csv` for running on linux cluster",
"_____no_output_____"
]
],
[
[
"if sys.platform == 'win32':\n f = open(os.path.join(template_ws, 'mult2model_info.csv'), \"r\")\n lines = f.readlines()\n f.close()\n\n output_lines = []\n for line in lines:\n output_lines.append(line.replace('\\\\', \"/\"))\n\n f = open(os.path.join(template_ws, 'mult2model_info.csv'), \"w\")\n f.write(''.join(output_lines))\n f.close()",
"_____no_output_____"
]
],
[
[
"### and the pest file",
"_____no_output_____"
]
],
[
[
"if sys.platform == 'win32':\n f = open(os.path.join(template_ws, 'prior_mc.pst'), \"r\")\n lines = f.readlines()\n f.close()\n\n output_lines = []\n for line in lines:\n output_lines.append(line.replace('\\\\', \"/\"))\n\n f = open(os.path.join(template_ws, 'prior_mc.pst'), \"w\")\n f.write(''.join(output_lines))\n f.close()",
"_____no_output_____"
]
],
[
[
"### and update the forward run command",
"_____no_output_____"
]
],
[
[
"if sys.platform == 'win32':\n f = open(os.path.join(template_ws, 'forward_run.py'), \"r\")\n lines = f.readlines()\n f.close()\n\n output_lines = []\n for line in lines:\n output_lines.append(line.replace('./mf6', \"mf6\"))\n\n # fix in run_dir \n f = open(os.path.join(template_ws, 'forward_run.py'), \"w\")\n f.write(''.join(output_lines))\n f.close()\n \n # fix in noptmax_0_testing\n f = open(os.path.join(noptmax0_dir, 'forward_run.py'), \"w\")\n f.write(''.join(output_lines))\n f.close()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f5521be79ba59ecfba78c6fe7ac27b08bb70ec | 98,204 | ipynb | Jupyter Notebook | Kmeans-k=3,4,5,10.ipynb | ahmetemrekilic/Jupyter-Machine-Learning | 57e6bd1dad14a9df027d07ee81da7db1fa7b1b75 | [
"MIT"
] | null | null | null | Kmeans-k=3,4,5,10.ipynb | ahmetemrekilic/Jupyter-Machine-Learning | 57e6bd1dad14a9df027d07ee81da7db1fa7b1b75 | [
"MIT"
] | null | null | null | Kmeans-k=3,4,5,10.ipynb | ahmetemrekilic/Jupyter-Machine-Learning | 57e6bd1dad14a9df027d07ee81da7db1fa7b1b75 | [
"MIT"
] | null | null | null | 363.718519 | 18,752 | 0.938322 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %% create dataset\n\n# class1\nx1 = np.random.normal(25,5,1000)\ny1 = np.random.normal(25,5,1000)\n\n# class2\nx2 = np.random.normal(55,5,1000)\ny2 = np.random.normal(60,5,1000)\n\n# class3\nx3 = np.random.normal(55,5,1000)\ny3 = np.random.normal(15,5,1000)",
"_____no_output_____"
],
[
"x = np.concatenate((x1,x2,x3),axis = 0)\ny = np.concatenate((y1,y2,y3),axis = 0)",
"_____no_output_____"
],
[
"dictionary = {\"x\":x,\"y\":y}\n\ndata = pd.DataFrame(dictionary)\n\nplt.scatter(x1,y1)\nplt.scatter(x2,y2)\nplt.scatter(x3,y3)\nplt.show()\n\n## %% kmeans algoritması bunu gorecek\n#plt.scatter(x1,y1,color = \"black\")\n#plt.scatter(x2,y2,color = \"black\")\n#plt.scatter(x3,y3,color = \"black\")\n#plt.show()",
"_____no_output_____"
],
[
"# %% KMEANS\n\nfrom sklearn.cluster import KMeans\nwcss = []\n\nfor k in range(1,15):\n kmeans = KMeans(n_clusters=k)\n kmeans.fit(data)\n wcss.append(kmeans.inertia_)\n \nplt.plot(range(1,15),wcss)\nplt.xlabel(\"number of k (cluster) value\")\nplt.ylabel(\"wcss\")\nplt.show()",
"_____no_output_____"
],
[
"#%% k = 3 icin modelim\n\nkmeans2 = KMeans(n_clusters=3)\nclusters = kmeans2.fit_predict(data)\n\ndata[\"label\"] = clusters\n\nplt.scatter(data.x[data.label == 0 ],data.y[data.label == 0],color = \"red\")\nplt.scatter(data.x[data.label == 1 ],data.y[data.label == 1],color = \"green\")\nplt.scatter(data.x[data.label == 2 ],data.y[data.label == 2],color = \"blue\")\nplt.scatter(kmeans2.cluster_centers_[:,0],kmeans2.cluster_centers_[:,1],color = \"yellow\")\nplt.show()",
"_____no_output_____"
],
[
"#%% k = 4 icin modelim\n\nkmeans2 = KMeans(n_clusters=4)\nclusters = kmeans2.fit_predict(data)\n\ndata[\"label\"] = clusters\n\nplt.scatter(data.x[data.label == 0 ],data.y[data.label == 0],color = \"red\")\nplt.scatter(data.x[data.label == 1 ],data.y[data.label == 1],color = \"green\")\nplt.scatter(data.x[data.label == 2 ],data.y[data.label == 2],color = \"blue\")\nplt.scatter(kmeans2.cluster_centers_[:,0],kmeans2.cluster_centers_[:,1],color = \"yellow\")\nplt.show()",
"_____no_output_____"
],
[
"#%% k = 5 icin modelim\n\nkmeans2 = KMeans(n_clusters=5)\nclusters = kmeans2.fit_predict(data)\n\ndata[\"label\"] = clusters\n\nplt.scatter(data.x[data.label == 0 ],data.y[data.label == 0],color = \"red\")\nplt.scatter(data.x[data.label == 1 ],data.y[data.label == 1],color = \"green\")\nplt.scatter(data.x[data.label == 2 ],data.y[data.label == 2],color = \"blue\")\nplt.scatter(kmeans2.cluster_centers_[:,0],kmeans2.cluster_centers_[:,1],color = \"yellow\")\nplt.show()",
"_____no_output_____"
],
[
"#%% k = 10 icin modelim\n\nkmeans2 = KMeans(n_clusters=5)\nclusters = kmeans2.fit_predict(data)\n\ndata[\"label\"] = clusters\n\nplt.scatter(data.x[data.label == 0 ],data.y[data.label == 0],color = \"red\")\nplt.scatter(data.x[data.label == 1 ],data.y[data.label == 1],color = \"green\")\nplt.scatter(data.x[data.label == 2 ],data.y[data.label == 2],color = \"blue\")\nplt.scatter(kmeans2.cluster_centers_[:,0],kmeans2.cluster_centers_[:,1],color = \"yellow\")\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f5552b92cba551be01c4b37c6e7f1211c666c1 | 6,238 | ipynb | Jupyter Notebook | notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | null | null | null | notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | null | null | null | notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb | syeehyn/spug | 216976e0171bbc14042377fbbb535180bd2efaf3 | [
"Apache-2.0"
] | 1 | 2021-12-05T22:54:28.000Z | 2021-12-05T22:54:28.000Z | 27.240175 | 289 | 0.494069 | [
[
[
"## imports",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append('../')\nfrom glob import glob\nimport torch\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## get and split data",
"_____no_output_____"
]
],
[
[
"from spug.dataset import DatasetGenerator\n\ndata_root = '../data'\n\nsotck_path = os.path.join(\n data_root, 'raw', 'stock', 'raw.csv'\n)\n\nsec_path = os.path.join(\n data_root, 'raw', 'sec'\n)\n\noutput_path = os.path.join(\n data_root, 'processed'\n)\n\ndata_list = sorted(glob(\n os.path.join(\n data_root, 'raw', 'twitter', '*q*.npy'\n )\n))\ndg = DatasetGenerator(\n data_list = data_list,\n stock_path=sotck_path,\n sec_path=sec_path,\n freq='quarter'\n)",
"_____no_output_____"
]
],
[
[
"## model definition",
"_____no_output_____"
]
],
[
[
"from spug.model import GCN",
"_____no_output_____"
]
],
[
[
"## model training",
"_____no_output_____"
]
],
[
[
"import argparse\nfrom torch_geometric_temporal.signal import temporal_signal_split\nfrom spug.utils import Trainer\n\ndataset = dg.process()\ntrain_dataset, test_dataset = temporal_signal_split(dataset, train_ratio=0.8)\n\n\nINPUT_SHAPE = next(iter(train_dataset)).x.shape[1]\nmodel = GCN(input_size = INPUT_SHAPE, hidden_dims=64)\nargs = argparse.Namespace(\n num_epochs = 500,\n learning_rate = 1e-3,\n device = \"cpu\",\n val_size = .1,\n verbose = False\n)",
"_____no_output_____"
],
[
"trainer = Trainer(model, train_dataset, args, test_dataset)",
"_____no_output_____"
],
[
"model = trainer.train()",
"100%|███████████████████████████████████████████████████████████████████████████| 500/500 [00:08<00:00, 56.69it/s]"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f563f59ab02c592a09943e7273478383dafabd | 340,570 | ipynb | Jupyter Notebook | notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb | SalishSeaCast/SalishSeaNowcast | 947ba6fbb8952c7ae989a3aa96614b900748f55d | [
"Apache-2.0"
] | 4 | 2020-02-06T01:10:13.000Z | 2021-12-11T01:06:10.000Z | notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb | SalishSeaCast/SalishSeaNowcast | 947ba6fbb8952c7ae989a3aa96614b900748f55d | [
"Apache-2.0"
] | 30 | 2020-02-03T23:54:10.000Z | 2022-03-18T18:50:31.000Z | notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb | SalishSeaCast/SalishSeaNowcast | 947ba6fbb8952c7ae989a3aa96614b900748f55d | [
"Apache-2.0"
] | null | null | null | 801.341176 | 118,004 | 0.948413 | [
[
[
"# Test `second_narrows_current` Module\n\nRender figure object produced by the `nowcast.figures.fvcom.second_narrows_current` module.\nProvides data for visual testing to confirm that refactoring has not adversely changed figure for web page.\n\nSet-up and function call replicates as nearly as possible what is done in the `nowcast.workers.make_plots` worker.",
"_____no_output_____"
],
[
"Notebooks like this should be developed in a\n[Nowcast Figures Development Environment](https://salishsea-nowcast.readthedocs.io/en/latest/figures/fig_dev_env.html)\nso that all of the necessary dependency packages are installed.\nThe development has to be done on a workstation that has the Vancouver Harbour & Fraser River FVCOM model results `/opp/` parition mounted.",
"_____no_output_____"
]
],
[
[
"import io\nfrom pathlib import Path\nimport shlex\nimport subprocess\n\nimport arrow\nimport xarray\nimport yaml\n\nfrom nowcast.figures import website_theme\nfrom nowcast.figures.fvcom.publish import second_narrows_current",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"# Supress arrow.get() parser warnings re: changes coming in v0.15.0\n# See https://github.com/crsmithdev/arrow/issues/612\n# We don't use date strings that aren't included in the supported date tokens set mentioned in issue #612\n\nimport warnings\nfrom arrow.factory import ArrowParseWarning\n\nwarnings.simplefilter(\"ignore\", ArrowParseWarning)",
"_____no_output_____"
]
],
[
[
"The bits of `config/nowcast.yaml` that are required:",
"_____no_output_____"
]
],
[
[
"config = '''\n vhfr fvcom runs:\n stations dataset filename:\n x2: vh_x2_station_timeseries.nc\n r12: vh_r12_station_timeseries.nc\n results archive:\n nowcast x2: /opp/fvcom/nowcast-x2/\n forecast x2: /opp/fvcom/forecast-x2/\n nowcast r12: /opp/fvcom/nowcast-r12/\n'''\n\nconfig = yaml.safe_load(io.StringIO(config))",
"_____no_output_____"
]
],
[
[
"The bits that the `make_plots` worker must provide:",
"_____no_output_____"
],
[
"Rename FVCOM dataset layer and leval variables because `xarray` won't accept\nvariables and coordinates that have the same name.",
"_____no_output_____"
]
],
[
[
"def _rename_fvcom_vars(fvcom_dataset_path):\n cmd = (\n f'ncrename -O -v siglay,sigma_layer -v siglev,sigma_level '\n f'{fvcom_dataset_path} /tmp/{fvcom_dataset_path.name}')\n subprocess.check_output(shlex.split(cmd))",
"_____no_output_____"
]
],
[
[
"### Nowcast `X2` Figure",
"_____no_output_____"
]
],
[
[
"run_date = arrow.get('2019-07-23')\nmodel_config = \"x2\"\nrun_type = 'nowcast'\n\nddmmmyy = run_date.format('DDMMMYY').lower()\n\nfvcom_stns_datasets = {}\nif run_type == 'nowcast':\n model_configs = (\"x2\", \"r12\") if model_config == \"r12\" else (\"x2\",)\n for mdl_cfg in model_configs:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg]\n results_dir = Path(\n config['vhfr fvcom runs']['results archive'][f\"{run_type} {mdl_cfg}\"], ddmmmyy\n )\n fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}')\nelse:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][\"x2\"]\n nowcast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy\n )\n nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename)\n forecast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy\n )\n forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename)\n fvcom_stns_dataset_path = Path(\"/tmp\", fvcom_stns_dataset_filename)\n cmd = (\n f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} '\n f'-o {fvcom_stns_dataset_path}'\n )\n subprocess.check_output(shlex.split(cmd))\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[model_config] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}')\n\nobs_dataset = xarray.open_dataset(\n \"https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1\"\n)",
"_____no_output_____"
],
[
"%%timeit -n1 -r1\n\nfrom importlib import reload\nreload(website_theme)\nreload(second_narrows_current)\n\nfig = second_narrows_current.make_figure(\n '2nd Narrows', fvcom_stns_datasets, obs_dataset\n)",
"1.41 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)\n"
]
],
[
[
"### Nowcast `R12` Figure",
"_____no_output_____"
]
],
[
[
"run_date = arrow.get('2019-07-23')\nmodel_config = \"r12\"\nrun_type = 'nowcast'\n\nddmmmyy = run_date.format('DDMMMYY').lower()\n\nfvcom_stns_datasets = {}\nif run_type == 'nowcast':\n model_configs = (\"x2\", \"r12\") if model_config == \"r12\" else (\"x2\",)\n for mdl_cfg in model_configs:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg]\n results_dir = Path(\n config['vhfr fvcom runs']['results archive'][f\"{run_type} {mdl_cfg}\"], ddmmmyy\n )\n fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}')\nelse:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][\"x2\"]\n nowcast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy\n )\n nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename)\n forecast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy\n )\n forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename)\n fvcom_stns_dataset_path = Path(\"/tmp\", fvcom_stns_dataset_filename)\n cmd = (\n f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} '\n f'-o {fvcom_stns_dataset_path}'\n )\n subprocess.check_output(shlex.split(cmd))\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[model_config] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}')\n\nobs_dataset = xarray.open_dataset(\n \"https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1\"\n)",
"_____no_output_____"
],
[
"%%timeit -n1 -r1\n\nfrom importlib import reload\nreload(website_theme)\nreload(second_narrows_current)\n\nfig = second_narrows_current.make_figure(\n '2nd Narrows', fvcom_stns_datasets, obs_dataset\n)",
"522 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)\n"
]
],
[
[
"### Forecast `X2` Figure",
"_____no_output_____"
]
],
[
[
"run_date = arrow.get('2019-07-23')\nmodel_config = \"x2\"\nrun_type = 'forecast'\n\nddmmmyy = run_date.format('DDMMMYY').lower()\n\nfvcom_stns_datasets = {}\nif run_type == 'nowcast':\n model_configs = (\"x2\", \"r12\") if model_config == \"r12\" else (\"x2\",)\n for mdl_cfg in model_configs:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg]\n results_dir = Path(\n config['vhfr fvcom runs']['results archive'][f\"{run_type} {mdl_cfg}\"], ddmmmyy\n )\n fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}')\nelse:\n fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][\"x2\"]\n nowcast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy\n )\n nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename)\n forecast_results_dir = Path(\n config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy\n )\n forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename)\n fvcom_stns_dataset_path = Path(\"/tmp\", fvcom_stns_dataset_filename)\n cmd = (\n f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} '\n f'-o {fvcom_stns_dataset_path}'\n )\n subprocess.check_output(shlex.split(cmd))\n _rename_fvcom_vars(fvcom_stns_dataset_path)\n fvcom_stns_datasets[model_config] = xarray.open_dataset(fvcom_stns_dataset_path)\n\nobs_dataset = xarray.open_dataset(\n \"https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1\"\n)",
"_____no_output_____"
],
[
"%%timeit -n1 -r1\n\nfrom importlib import reload\nreload(second_narrows_current)\n\nfig = second_narrows_current.make_figure(\n '2nd Narrows', fvcom_stns_datasets, obs_dataset\n)",
"565 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |