{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Data Preparation " ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [], "source": [ "!poetry add -qqq python-dotenv datasets wandb didkit\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [], "source": [ "import os\n", "from dotenv import load_dotenv, find_dotenv\n", "if os.path.exists('../env'):\n", " load_dotenv(find_dotenv())\n", "import wandb" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m Calling wandb.login() after wandb.init() has no effect.\n" ] }, { "data": { "text/html": [ "Finishing last run (ID:pnvhnkh8) before initializing another..." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "19afd929911c42f6ab4e9604948ade90", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(Label(value='0.001 MB of 0.001 MB uploaded\\r'), FloatProgress(value=1.0, max=1.0)))" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run verida_data_raw at: https://wandb.ai/orion-agents/verida-pii/runs/pnvhnkh8
View project at: https://wandb.ai/orion-agents/verida-pii
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Find logs at: ./wandb/run-20240825_035519-pnvhnkh8/logs" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "The new W&B backend becomes opt-out in version 0.18.0; try it out with `wandb.require(\"core\")`! See https://wandb.me/wandb-core for more information." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Successfully finished last run (ID:pnvhnkh8). Initializing new run:
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "76495a6b8c1843b09162b9ec31d99dfe", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(Label(value='Waiting for wandb.init()...\\r'), FloatProgress(value=0.01112003148947325, max=1.0)…" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Tracking run with wandb version 0.17.7" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /Users/nullzero/Documents/repos/github.com/privacy-identity/vda-simulation-medical/vda-sim-medical/wandb/run-20240825_035604-69f6mbdr" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run verida_data_raw to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/orion-agents/verida-pii" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/orion-agents/verida-pii/runs/69f6mbdr" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 1:13:41.968146, resuming normal operation.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 0:42:49.841123, resuming normal operation.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 0:18:54.049113, resuming normal operation.\n" ] } ], "source": [ "wandb.login(key=os.getenv('WANDB_API_KEY'))\n", "run = wandb.init(project=\"verida-pii\", name=\"verida_data_raw\")" ] }, { "cell_type": "code", "execution_count": 62, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "539\n" ] } ], "source": [ "from datasets import load_dataset\n", "import pandas as pd\n", "data_name=\"Ezi/medical_and_legislators_synthetic\"\n", "data = load_dataset(path=data_name, split='train')\n", "data_df = data.to_pandas()\n", "data_df.head()\n", "print(len(data_df))\n", "\n" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'mps'" ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n", "device" ] }, { "cell_type": "code", "execution_count": 63, "metadata": {}, "outputs": [], "source": [ "# DiD Generator\n", "import didkit\n", "\n", "def generate_did():\n", " key = didkit.generate_ed25519_key()\n", " did = didkit.key_to_did(\"key\", key)\n", " return did, key" ] }, { "cell_type": "code", "execution_count": 64, "metadata": {}, "outputs": [], "source": [ "from tqdm import tqdm, tqdm_notebook, tqdm_pandas\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": 70, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Index(['last_name', 'first_name', 'middle_name', 'suffix', 'nickname',\n", " 'full_name', 'birthday', 'gender', 'type', 'state', 'district',\n", " 'senate_class', 'party', 'url', 'address', 'phone', 'contact_form',\n", " 'rss_url', 'twitter', 'facebook', 'youtube', 'youtube_id',\n", " 'bioguide_id', 'thomas_id', 'opensecrets_id', 'lis_id', 'fec_ids',\n", " 'cspan_id', 'govtrack_id', 'votesmart_id', 'ballotpedia_id',\n", " 'washington_post_id', 'icpsr_id', 'wikipedia_id', 'last_name.1',\n", " 'first_name.1', 'middle_name.1', 'suffix.1', 'nickname.1',\n", " 'full_name.1', 'birthday.1', 'gender.1', 'type.1', 'state.1',\n", " 'district.1', 'senate_class.1', 'party.1', 'url.1', 'address.1',\n", " 'phone.1', 'contact_form.1', 'rss_url.1', 'twitter.1', 'facebook.1',\n", " 'youtube.1', 'youtube_id.1', 'bioguide_id.1', 'thomas_id.1',\n", " 'opensecrets_id.1', 'lis_id.1', 'fec_ids.1', 'cspan_id.1',\n", " 'govtrack_id.1', 'votesmart_id.1', 'ballotpedia_id.1',\n", " 'washington_post_id.1', 'icpsr_id.1', 'wikipedia_id.1'],\n", " dtype='object')" ] }, "execution_count": 70, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#data_df['did'] = data_df.apply(lambda x: generate_did()[0], axis=1)\n", "#data_df['key'] = data_df.apply(lambda x: generate_did()[1], axis=1)\n", "cleaned_df = data_df.copy()\n", "cleaned_df.head()\n", "cleaned_df.isna().sum()\n", "cleaned_df.isna().dropna()\n", "cleaned_df.describe()\n", "cleaned_df.shape\n", "cleaned_df.columns" ] }, { "cell_type": "code", "execution_count": 71, "metadata": {}, "outputs": [], "source": [ "data_did = data_df.copy()\n", "data_did.to_csv(\"data_did.csv\")\n", "data_did" ] }, { "cell_type": "code", "execution_count": 76, "metadata": {}, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'DatasetDict'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[76], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Back to dataset\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mDatasetDict\u001b[39;00m \n\u001b[1;32m 3\u001b[0m secure_mode \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 4\u001b[0m train_split \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0.8\u001b[39m\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'DatasetDict'" ] } ], "source": [ "# Back to dataset\n", "\n", "secure_mode = False\n", "train_split = 0.8\n", "test_every = 5\n", "batch_size = 800\n", "\n", "ds = data_did\n", "train_len = int(train_split * len(ds))\n", "test_len = len(ds) - train_len\n", "\n", "print(f\"{train_len} samples for training, {test_len} for testing\")\n", "\n", "train_ds, test_ds = torch.utils.data.random_split(ds, [train_len, test_len])\n" ] }, { "cell_type": "code", "execution_count": 78, "metadata": {}, "outputs": [ { "ename": "TypeError", "evalue": "expected str, bytes or os.PathLike object, not Subset", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[78], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dataset\n\u001b[0;32m----> 2\u001b[0m ds \u001b[38;5;241m=\u001b[39m \u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_ds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest_ds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msplit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtest\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m~/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages/datasets/load.py:2588\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[1;32m 2586\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m data_files \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data_files:\n\u001b[1;32m 2587\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEmpty \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mdata_files\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdata_files\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m. It should be either non-empty or None (default).\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m-> 2588\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mPath\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mDATASET_STATE_JSON_FILENAME\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mexists():\n\u001b[1;32m 2589\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 2590\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are trying to load a dataset that was saved using `save_to_disk`. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 2591\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPlease use `load_from_disk` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 2592\u001b[0m )\n\u001b[1;32m 2594\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m streaming \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:871\u001b[0m, in \u001b[0;36mPath.__new__\u001b[0;34m(cls, *args, **kwargs)\u001b[0m\n\u001b[1;32m 869\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m Path:\n\u001b[1;32m 870\u001b[0m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;241m=\u001b[39m WindowsPath \u001b[38;5;28;01mif\u001b[39;00m os\u001b[38;5;241m.\u001b[39mname \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mnt\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m PosixPath\n\u001b[0;32m--> 871\u001b[0m \u001b[38;5;28mself\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_from_parts\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 872\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_flavour\u001b[38;5;241m.\u001b[39mis_supported:\n\u001b[1;32m 873\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcannot instantiate \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m on your system\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 874\u001b[0m \u001b[38;5;241m%\u001b[39m (\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m,))\n", "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:509\u001b[0m, in \u001b[0;36mPurePath._from_parts\u001b[0;34m(cls, args)\u001b[0m\n\u001b[1;32m 504\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 505\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_from_parts\u001b[39m(\u001b[38;5;28mcls\u001b[39m, args):\n\u001b[1;32m 506\u001b[0m \u001b[38;5;66;03m# We need to call _parse_args on the instance, so as to get the\u001b[39;00m\n\u001b[1;32m 507\u001b[0m \u001b[38;5;66;03m# right flavour.\u001b[39;00m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;28mself\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mobject\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__new__\u001b[39m(\u001b[38;5;28mcls\u001b[39m)\n\u001b[0;32m--> 509\u001b[0m drv, root, parts \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_args\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 510\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_drv \u001b[38;5;241m=\u001b[39m drv\n\u001b[1;32m 511\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_root \u001b[38;5;241m=\u001b[39m root\n", "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:493\u001b[0m, in \u001b[0;36mPurePath._parse_args\u001b[0;34m(cls, args)\u001b[0m\n\u001b[1;32m 491\u001b[0m parts \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m a\u001b[38;5;241m.\u001b[39m_parts\n\u001b[1;32m 492\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 493\u001b[0m a \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mfspath(a)\n\u001b[1;32m 494\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(a, \u001b[38;5;28mstr\u001b[39m):\n\u001b[1;32m 495\u001b[0m \u001b[38;5;66;03m# Force-cast str subclasses to str (issue #21127)\u001b[39;00m\n\u001b[1;32m 496\u001b[0m parts\u001b[38;5;241m.\u001b[39mappend(\u001b[38;5;28mstr\u001b[39m(a))\n", "\u001b[0;31mTypeError\u001b[0m: expected str, bytes or os.PathLike object, not Subset" ] } ], "source": [ "from datasets import load_dataset\n", "ds = load_dataset(train_ds, test_ds, split=[\"train\", \"test\"])\n" ] }, { "cell_type": "code", "execution_count": 79, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "\n", "class CharByteEncoder(nn.Module):\n", " \"\"\"\n", " This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also\n", " perform the opposite operation to check a result.\n", " Examples:\n", " >>> encoder = CharByteEncoder()\n", " >>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])\n", " >>> encoder.decode(t) # returns \"Ślusàrski\"\n", " \"\"\"\n", "\n", " def __init__(self):\n", " super().__init__()\n", " self.start_token = \"\"\n", " self.end_token = \"\"\n", " self.pad_token = \"\"\n", "\n", " self.start_idx = 256\n", " self.end_idx = 257\n", " self.pad_idx = 258\n", "\n", " def forward(self, s: str, pad_to=0) -> torch.LongTensor:\n", " \"\"\"\n", " Encodes a string. It will append a start token (id=self.start_idx) and an end token \n", " (id=self.end_idx).\n", " Args:\n", " s: The string to encode.\n", " pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.\n", " Defaults to 0.\n", " Returns:\n", " The encoded LongTensor of indices.\n", " \"\"\"\n", " encoded = s.encode()\n", " n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0\n", " return torch.LongTensor(\n", " [self.start_idx]\n", " + [c for c in encoded] # noqa\n", " + [self.end_idx]\n", " + [self.pad_idx for _ in range(n_pad)]\n", " )\n", "\n", " def decode(self, char_ids_tensor: torch.LongTensor) -> str:\n", " \"\"\"\n", " The inverse of `forward`. Keeps the start, end, and pad indices.\n", " \"\"\"\n", " char_ids = char_ids_tensor.cpu().detach().tolist()\n", "\n", " out = []\n", " buf = []\n", " for c in char_ids:\n", " if c < 256:\n", " buf.append(c)\n", " else:\n", " if buf:\n", " out.append(bytes(buf).decode())\n", " buf = []\n", " if c == self.start_idx:\n", " out.append(self.start_token)\n", " elif c == self.end_idx:\n", " out.append(self.end_token)\n", " elif c == self.pad_idx:\n", " out.append(self.pad_token)\n", "\n", " if buf: # in case some are left\n", " out.append(bytes(buf).decode())\n", " return \"\".join(out)\n", "\n", " def __len__(self):\n", " \"\"\"\n", " The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars\n", " (start, end, pad).\n", " Returns:\n", " 259\n", " \"\"\"\n", " return 259" ] }, { "cell_type": "code", "execution_count": 80, "metadata": {}, "outputs": [], "source": [ "from torch.nn.utils.rnn import pad_sequence\n", "\n", "def padded_collate(batch, padding_idx=0):\n", " x = pad_sequence(\n", " [elem[0] for elem in batch], batch_first=True, padding_value=padding_idx\n", " )\n", " y = torch.stack([elem[1] for elem in batch]).long()\n", "\n", " return x, y" ] }, { "cell_type": "code", "execution_count": 74, "metadata": {}, "outputs": [], "source": [ "from torch.utils.data import Dataset\n", "from pathlib import Path\n", "\n", "\n", "class NamesDataset(Dataset):\n", " def __init__(self, root):\n", " self.root = Path(root)\n", "\n", " self.labels = list({langfile.stem for langfile in self.root.iterdir()})\n", " self.labels_dict = {label: i for i, label in enumerate(self.labels)}\n", " self.encoder = CharByteEncoder()\n", " self.samples = self.construct_samples()\n", "\n", " def __getitem__(self, i):\n", " return self.samples[i]\n", "\n", " def __len__(self):\n", " return len(self.samples)\n", "\n", " def construct_samples(self):\n", " samples = []\n", " for langfile in self.root.iterdir():\n", " label_name = langfile.stem\n", " label_id = self.labels_dict[label_name]\n", " with open(langfile, \"r\") as fin:\n", " for row in fin:\n", " samples.append(\n", " (self.encoder(row.strip()), torch.tensor(label_id).long())\n", " )\n", " return samples\n", "\n", " def label_count(self):\n", " cnt = Counter()\n", " for _x, y in self.samples:\n", " label = self.labels[int(y)]\n", " cnt[label] += 1\n", " return cnt\n", "\n", "\n", "VOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters." ] }, { "cell_type": "code", "execution_count": 81, "metadata": {}, "outputs": [], "source": [ "# Data Loaders\n", "from torch.utils.data import DataLoader\n", "\n", "train_loader = DataLoader(\n", " train_ds,\n", " batch_size=batch_size,\n", " pin_memory=True,\n", " collate_fn=padded_collate,\n", ")\n", "\n", "test_loader = DataLoader(\n", " test_ds,\n", " batch_size=2 * batch_size,\n", " shuffle=False,\n", " pin_memory=True,\n", " collate_fn=padded_collate,\n", ")" ] }, { "cell_type": "code", "execution_count": 85, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "\n", "df = pd.read_csv(\"hf://datasets/synavate/medical_records_did/data_did.csv\")" ] }, { "cell_type": "code", "execution_count": 93, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Index(['last_name', 'first_name', 'full_name', 'birthday', 'gender', 'type',\n", " 'state', 'district', 'senate_class', 'party', 'url', 'address', 'phone',\n", " 'contact_form', 'rss_url', 'twitter', 'facebook', 'youtube',\n", " 'youtube_id', 'bioguide_id', 'thomas_id', 'opensecrets_id', 'lis_id',\n", " 'fec_ids', 'cspan_id', 'govtrack_id', 'votesmart_id', 'ballotpedia_id',\n", " 'washington_post_id', 'icpsr_id', 'wikipedia_id', 'last_name.1',\n", " 'first_name.1', 'middle_name.1', 'suffix.1', 'nickname.1',\n", " 'full_name.1', 'birthday.1', 'gender.1', 'type.1', 'state.1',\n", " 'district.1', 'senate_class.1', 'party.1', 'url.1', 'address.1',\n", " 'phone.1', 'contact_form.1', 'rss_url.1', 'twitter.1', 'facebook.1',\n", " 'youtube.1', 'youtube_id.1', 'bioguide_id.1', 'thomas_id.1',\n", " 'opensecrets_id.1', 'lis_id.1', 'fec_ids.1', 'cspan_id.1',\n", " 'govtrack_id.1', 'votesmart_id.1', 'ballotpedia_id.1',\n", " 'washington_post_id.1', 'icpsr_id.1', 'wikipedia_id.1'],\n", " dtype='object')" ] }, "execution_count": 93, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df_drop=df.copy()\n", "df_drop.isnull().drop(index=1)\n", "df_drop.isna().sum()\n", "df_drop.drop(columns=['Unnamed: 0', 'middle_name', 'suffix', 'nickname'], inplace=True)\n", "df_drop.columns" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install scikit-learn\n", "from sklearn import train_test_split\n", "\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 2 }