{ "cells": [ { "cell_type": "markdown", "id": "11353711-2a7e-47a3-b2f0-287a6b5d2e99", "metadata": {}, "source": [ "## Imports" ] }, { "cell_type": "code", "execution_count": null, "id": "d217c9c8-a9be-4920-9196-48e20a98db56", "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n", "from torch.utils.data import TensorDataset, DataLoader\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import label_binarize\n", "from sklearn.metrics import roc_curve, auc\n", "from imblearn.over_sampling import SMOTE\n", "from sklearn.decomposition import PCA\n", "from collections import Counter\n", "\n", "import torch.nn.functional as F\n", "import matplotlib.pyplot as plt\n", "import torch.optim as optim\n", "import torch.nn as nn\n", "import seaborn as sns\n", "import numpy as np\n", "\n", "import imblearn\n", "import optuna\n", "import torch\n", "import json\n", "import os\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ] }, { "cell_type": "markdown", "id": "6270ebec-f9d2-4d5e-a146-7d16a149445e", "metadata": {}, "source": [ "## Load CLS-tokens and map 'incomplete-classes' to their respective full classes" ] }, { "cell_type": "code", "execution_count": 6, "id": "ae95e2b9-b741-4582-b36c-e47a05c56d4c", "metadata": {}, "outputs": [], "source": [ "X = torch.load('/home/evan/D1/project/code/sorted_cls_tokens_features.pt', map_location=device)\n", "#X = torch.load('/home/evan/D1/project/code/stretched_cls_tokens.pt', map_location=device)\n", "#X = torch.load('/home/evan/D1/project/code/reflected_cls_tokens.pt', map_location=device)\n", "\n", "y = np.load('/home/evan/D1/project/code/sorted_cls_tokens_labels.npy')\n", "frame_counts = np.load('/home/evan/D1/project/code/frame_counts.npy')\n", "\n", "\n", "class_mapping = {0:0, 1: 1, 2: 2, 3: 1, 4: 2}\n", "\n", "for i, label in enumerate(y):\n", " y[i] = class_mapping[label]\n", "print('Done')" ] }, { "cell_type": "markdown", "id": "2114fe95-f71c-47fc-a954-975cbfec28d4", "metadata": {}, "source": [ "## Split into train, val on games" ] }, { "cell_type": "code", "execution_count": 7, "id": "458096f5-39fb-4dda-9c26-b580213cc22b", "metadata": {}, "outputs": [], "source": [ "# Calculate cumulative start indices of each video in the concatenated tensor\n", "cumulative_starts = np.insert(np.cumsum(frame_counts), 0, 0)[:-1]\n", "\n", "\n", "split_ratio = (0.7, 0.15, 0.15) #train, validation, test\n", "\n", "num_videos = len(frame_counts)\n", "num_train_videos = int(num_videos * split_ratio[0])\n", "num_val_videos = int(num_videos * split_ratio[1])\n", "\n", "# Ensure total does not exceed the number of videos\n", "num_test_videos = num_videos - num_train_videos - num_val_videos\n", "\n", "# Shuffle video indices to split into training, validation, and test sets\n", "video_indices = np.arange(num_videos)\n", "np.random.seed(42)\n", "np.random.shuffle(video_indices)\n", "\n", "train_video_indices = video_indices[:num_train_videos]\n", "val_video_indices = video_indices[num_train_videos:num_train_videos + num_val_videos]\n", "test_video_indices = video_indices[num_train_videos + num_val_videos:]\n", "\n", "# Initialize lists for indices\n", "train_indices, val_indices, test_indices = [], [], []\n", "\n", "# Populate the index lists\n", "for idx in train_video_indices:\n", " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n", " train_indices.extend(range(start, end))\n", "\n", "for idx in val_video_indices:\n", " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n", " val_indices.extend(range(start, end))\n", "\n", "for idx in test_video_indices:\n", " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n", " test_indices.extend(range(start, end))\n", "\n", "# Convert indices to tensors and extract corresponding subsets\n", "train_indices = torch.tensor(train_indices)\n", "val_indices = torch.tensor(val_indices)\n", "test_indices = torch.tensor(test_indices)\n", "\n", "X_train, y_train = X[train_indices], y[train_indices]\n", "X_val, y_val = X[val_indices], y[val_indices]\n", "X_test, y_test = X[test_indices], y[test_indices]" ] }, { "cell_type": "markdown", "id": "b97efc93-d7e0-4a2a-aa36-ef2ade8a60e3", "metadata": {}, "source": [ "## Undersample and create undersampled train, val, test datasets" ] }, { "cell_type": "code", "execution_count": 8, "id": "3af2b8d2-014b-439f-bf68-5d59d64c2812", "metadata": {}, "outputs": [], "source": [ "def undersample_data(X, y, target_counts):\n", " unique_classes, counts = np.unique(y, return_counts=True)\n", " undersampled_indices = []\n", " print(counts)\n", " \n", "\n", " for cls in unique_classes:\n", " if cls == 0 or cls == 2 or cls == 1:\n", " cls_indices = np.where(y == cls)[0]\n", "\n", " undersampled_cls_indices = np.random.choice(cls_indices, target_counts[int(cls)], replace=False)\n", " undersampled_indices.extend(undersampled_cls_indices)\n", " else:\n", " cls_indices = np.where(y == cls)[0]\n", " \n", " max_count = len(cls_indices)\n", "\n", " undersampled_cls_indices = np.random.choice(cls_indices, max_count, replace=False)\n", " undersampled_indices.extend(undersampled_cls_indices)\n", "\n", " np.random.shuffle(undersampled_indices) # Shuffle to mix classes\n", " X_undersampled = X[undersampled_indices]\n", " y_undersampled = y[undersampled_indices]\n", "\n", " return X_undersampled, y_undersampled\n", "\n", "np.random.seed(42) # Set a specific random seed for reproducibility\n", "\n", "#X_train, y_train = undersample_data(X_train, y_train, {0: 2750, 1: 2750, 2: 5500})\n", "X_train, y_train = undersample_data(X_train, y_train, {0: 4000, 1: 4000, 2: 4000})\n", "\n", "\n", "#X_val, y_val = undersample_data(X_val, y_val, {0: 688, 1: 688, 2: 688})\n", "#X_val, y_val = undersample_data(X_val, y_val, {0: 500, 1: 500, 2: 500})\n", "X_val, y_val = undersample_data(X_val, y_val, {0: 1000, 1: 1000, 2: 1000})\n", "\n", "\n", "\n", "#X_test, y_test = undersample_data(X_test, y_test, {0: 688, 1: 688, 2: 688})\n", "#X_test, y_test = undersample_data(X_test, y_test, {0: 500, 1: 500, 2: 500})\n", "X_test, y_test = undersample_data(X_test, y_test, {0: 1000, 1: 1000, 2: 1000})" ] }, { "cell_type": "markdown", "id": "d0902a93-f7e4-4b00-8e1c-50e860f045cc", "metadata": {}, "source": [ "## Check counts" ] }, { "cell_type": "code", "execution_count": 9, "id": "c4ffc18f-b5c6-4932-98d4-38e926c3ca4f", "metadata": {}, "outputs": [], "source": [ "print(np.unique(y_train, return_counts=True))\n", "print(np.unique(y_val, return_counts=True)) \n", "print(np.unique(y_test, return_counts=True))" ] }, { "cell_type": "markdown", "id": "7be26503-cbdf-490c-bac7-9df3604b10fc", "metadata": {}, "source": [ "## Move all to device" ] }, { "cell_type": "code", "execution_count": 10, "id": "cd691b16-56a1-4bbb-bb66-e39b292263c1", "metadata": {}, "outputs": [], "source": [ "X_train = torch.tensor(X_train, dtype=torch.float32, device=device)\n", "y_train = torch.tensor(y_train, dtype=torch.long, device=device)\n", "\n", "X_val = torch.tensor(X_val, dtype=torch.float32, device=device)\n", "y_val = torch.tensor(y_val, dtype=torch.long, device=device)\n", "\n", "X_test = torch.tensor(X_test, dtype=torch.float32, device=device)\n", "y_test = torch.tensor(y_test, dtype=torch.long, device=device)" ] }, { "cell_type": "markdown", "id": "e4c6da28-32d2-4979-baa3-274d8304eb89", "metadata": {}, "source": [ "## Create dataloaders for train, val, test" ] }, { "cell_type": "code", "execution_count": 11, "id": "7084da3b-f3ff-42be-afcc-8dc30d4ef3e2", "metadata": {}, "outputs": [], "source": [ "batch_size = 128\n", "\n", "train_dataset = TensorDataset(X_train, y_train)\n", "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n", "\n", "val_dataset = TensorDataset(X_val, y_val)\n", "val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n", "\n", "test_dataset = TensorDataset(X_test, y_test)\n", "test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)" ] }, { "cell_type": "markdown", "id": "9121e433-74be-4391-a60a-f30f18005259", "metadata": { "tags": [] }, "source": [ "## Set folder-name to save data" ] }, { "cell_type": "code", "execution_count": 12, "id": "0552af31-7025-4afd-b639-069e6704ca0b", "metadata": {}, "outputs": [], "source": [ "folder = '/home/evan/D1/project/code/smaller_model/raw/'\n", "\n", "if not os.path.exists(folder):\n", " os.makedirs(folder, exist_ok=True)" ] }, { "cell_type": "markdown", "id": "d4bdb1c0-4bac-4092-aee3-bd7833fc0036", "metadata": {}, "source": [ "## Create and initalize model" ] }, { "cell_type": "code", "execution_count": 13, "id": "6f34a0e4-1fe8-45e9-a459-07feee5cde69", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", "class BaseModel(nn.Module):\n", " def __init__(self):\n", " super(BaseModel, self).__init__()\n", " \n", " self.fc1 = nn.Linear(1024, 64)\n", " self.dropout1 = nn.Dropout(0.4)\n", " self.bn1 = nn.BatchNorm1d(64)\n", " \n", " self.fc2 = nn.Linear(64, 32)\n", " self.dropout2 = nn.Dropout(0.3)\n", " self.bn2 = nn.BatchNorm1d(32)\n", " \n", " \n", " self.fc3 = nn.Linear(32, 16)\n", " self.dropout3 = nn.Dropout(0.2)\n", " self.bn3 = nn.BatchNorm1d(16)\n", " \n", " \n", " self.fc4 = nn.Linear(16, 3)\n", "\n", " def forward(self, x):\n", " x = F.relu(self.bn1(self.fc1(x)))\n", " x = self.dropout1(x)\n", " \n", " x = F.relu(self.bn2(self.fc2(x)))\n", " x = self.dropout2(x)\n", " \n", " x = F.relu(self.bn3(self.fc3(x)))\n", " x = self.dropout3(x)\n", " \n", " x = self.fc4(x)\n", " return x\n", "\n", "model = BaseModel()\n", "print(model)\n" ] }, { "cell_type": "markdown", "id": "c4e9289a-d03b-40ce-961f-07b4d5e61bce", "metadata": {}, "source": [ "## Check model parameters" ] }, { "cell_type": "code", "execution_count": 14, "id": "7142cdef-fa50-4c21-aab2-39b7d54b4e17", "metadata": {}, "outputs": [], "source": [ "def count_parameters(model):\n", " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n", "\n", "#model = EnhancedMultiLayerClassifier(1024, 3)\n", "print(\"Number of trainable parameters:\", count_parameters(model))\n" ] }, { "cell_type": "markdown", "id": "2b9c4905-3eae-44be-8aec-968bd0ed735e", "metadata": { "tags": [] }, "source": [ "## L1-regularization" ] }, { "cell_type": "code", "execution_count": 15, "id": "8b6b7352-6e2b-4a74-8289-122d8d521382", "metadata": {}, "outputs": [], "source": [ "def l1_regularization(model, lambda_l1):\n", " l1_penalty = torch.tensor(0., device=device) \n", " for param in model.parameters():\n", " l1_penalty += torch.norm(param, 1)\n", " return lambda_l1 * l1_penalty" ] }, { "cell_type": "markdown", "id": "430e52ef-e84c-4c4b-8567-2a4376bc3bf6", "metadata": { "tags": [] }, "source": [ "## Training loop" ] }, { "cell_type": "code", "execution_count": 542, "id": "8bb58a7d-66d2-46e5-9281-e1476d6f9da3", "metadata": {}, "outputs": [], "source": [ "config = [\n", "{\n", "'lr': 0.00023189475417053056, 'weight_decay': 0.06013631013820486, 'lambda_l1': 7.530339626757409e-05,\n", "'epochs': 80,\n", "'break_margin': 2,\n", "'loss_function': 'CrossEntropy', # or 'FocalLoss'\n", "'alpha': 0.9186381075849595, # Only relevant if using FocalLoss\n", "'gamma': 0.2157540954710035 # Only relevant if using FocalLoss\n", "}]\n", "\n", "def run_experiment(config):\n", " #model = EnhancedMultiLayerClassifier(1024, 3).to(device)\n", " model = BaseModel().to(device)\n", " \n", " if config['loss_function'] == 'CrossEntropy':\n", " criterion = nn.CrossEntropyLoss().to(device)\n", " \n", " elif config['loss_function'] == 'FocalLoss':\n", " # Assuming FocalLoss is defined elsewhere and compatible with your requirements\n", " criterion = FocalLoss(alpha=config['alpha'], gamma=config['gamma'], reduction='mean').to(device)\n", " \n", " optimizer = optim.Adam(model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n", " epochs = config['epochs']\n", " break_margin = config['break_margin']\n", " best_f1 = 0.0\n", " time_to_break = 0\n", " best_loss = float('inf')\n", " train_losses, val_losses = [], []\n", " output_file_path = os.path.join(folder, 'training_output_base.txt')\n", "\n", " \n", " with open(output_file_path, 'w') as f:\n", " for epoch in range(epochs):\n", " model.train()\n", " train_loss = 0\n", " for X_batch, y_batch in train_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " optimizer.zero_grad()\n", " outputs = model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", "\n", " # Calculate L1 regularization penalty to prevent overfitting\n", " l1_penalty = l1_regularization(model, config['lambda_l1'])\n", "\n", " # Add L1 penalty to the loss\n", " loss += l1_penalty\n", "\n", " loss.backward()\n", " optimizer.step()\n", " train_loss += loss.item()\n", " train_losses.append(train_loss / len(train_loader))\n", "\n", " model.eval()\n", " val_loss = 0\n", " all_preds, all_targets, all_outputs = [], [], []\n", " with torch.no_grad():\n", " for X_batch, y_batch in val_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " val_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_batch.cpu().numpy())\n", " all_outputs.extend(outputs.cpu().numpy())\n", " val_losses.append(val_loss / len(val_loader))\n", "\n", " #precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, labels=[2], average='macro', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", " \n", " \n", " output_str = f'Epoch {epoch+1}: Train Loss: {train_losses[-1]:.4f}, Val Loss: {val_losses[-1]:.4f}, Precision: {precision_0:.4f}, Recall: {recall_0:.4f}, F1: {f1_0:.4f}, Accuracy: {accuracy:.4f}\\n'\n", " #f.write(output_str)\n", " print(output_str, end='')\n", " \n", " \n", " # Save the model if the f1 of the current epoch is the best\n", " if f1_0 > best_f1:\n", " best_f1 = f1_0\n", " best_epoch = epoch\n", " best_model_state_dict = model.state_dict()\n", " best_all_targets = all_targets\n", " best_all_preds = all_preds\n", " # Define path for saving the model\n", " best_model_path = os.path.join(folder, 'best_model_for_class_test.pt')\n", " \n", " if val_loss < best_loss:\n", " best_loss = val_loss\n", " time_to_break = 0\n", " else:\n", " time_to_break += 1\n", " if time_to_break == break_margin:\n", " print('Break margin hit')\n", " break\n", "\n", " \n", " return best_model_state_dict, best_all_targets, best_all_preds\n", "\n", "for config_index, config_ in enumerate(config):\n", " print(f'Running configuration {config_index + 1}/{len(config)}')\n", " best_model_state_dict, all_targets, all_preds = run_experiment(config_)\n", "model.load_state_dict(best_model_state_dict)" ] }, { "cell_type": "markdown", "id": "fc553e10-5f2b-465d-be1f-db80c2463272", "metadata": {}, "source": [ "## Check entropy/mutual information in features" ] }, { "cell_type": "code", "execution_count": 69, "id": "f109848e-7ec5-43f1-9c2f-74b5598aeba6", "metadata": {}, "outputs": [], "source": [ "from sklearn.feature_selection import mutual_info_classif\n", "\n", "mi_scores = mutual_info_classif(X_val.cpu(), y_val)\n", "\n", "\n", "# Calculate the average mutual information per feature\n", "average_mi = np.mean(mi_scores)\n", "print(\"Average Mutual Information per feature:\", average_mi)\n", "\n", "plt.bar(range(len(mi_scores)), mi_scores, edgecolor='none')\n", "plt.xlabel('Features')\n", "plt.ylabel('Mutual Information Score')\n", "plt.title('MI Scores for Zero Padded Frame Features')\n", "#plt.savefig(\"padded_mutual_information_feature.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "fbb1330f-3288-43fe-9f0e-873fe9d10bae", "metadata": {}, "source": [ "## Optuna optimalization" ] }, { "cell_type": "code", "execution_count": 36, "id": "51110ed4-0ee8-4642-b177-0b1343c021dd", "metadata": {}, "outputs": [], "source": [ "import logging\n", "import sys\n", "import time\n", "\n", "SEED = 13\n", "torch.manual_seed(SEED)\n", "\n", "\n", "def objective(trial):\n", " lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)\n", " weight_decay = trial.suggest_float(\"weight_decay\", 0, 0.1)\n", " lambda_l1 = trial.suggest_float('lambda_l1', 0, 1e-2)\n", " #gamma = trial.suggest_float('gamma', 0, 2)\n", " #alpha = trial.suggest_float('alpha', 0, 1)\n", " \n", " model = BaseModel().to(device)\n", "\n", " \n", " criterion = nn.CrossEntropyLoss().to(device)\n", " \n", " optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n", " epochs = 40\n", " best_val_f1 = 0\n", " epochs_no_improve = 0 \n", " \n", " # Now I do. with smaller model!\n", " early_stop_threshold = 2\n", " \n", " train_losses, val_losses = [], []\n", " #output_file_path = os.path.join(folder, 'base_training_output.txt')\n", "\n", " for epoch in range(epochs):\n", " model.train()\n", " train_loss = 0\n", " for X_batch, y_batch in train_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " optimizer.zero_grad()\n", " outputs = model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", "\n", " # Calculate L1 regularization penalty to prevent overfitting\n", " l1_penalty = l1_regularization(model, lambda_l1)\n", "\n", " # Add L1 penalty to the loss\n", " loss += l1_penalty\n", "\n", " loss.backward()\n", " optimizer.step()\n", " train_loss += loss.item()\n", " train_losses.append(train_loss / len(train_loader))\n", "\n", " model.eval()\n", " val_loss = 0\n", " all_preds, all_targets, all_outputs = [], [], []\n", " with torch.no_grad():\n", " for X_batch, y_batch in val_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " val_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_batch.cpu().numpy())\n", " all_outputs.extend(outputs.cpu().numpy())\n", " val_losses.append(val_loss / len(val_loader))\n", "\n", " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " #precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, labels=[2], average='macro', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", " \n", " if f1_0 > best_val_f1:\n", " best_val_f1 = f1_0\n", " epochs_no_improve = 0\n", " else:\n", " epochs_no_improve += 1\n", "\n", " if epochs_no_improve >= early_stop_threshold:\n", " print(\"Stopping early due to no improvement\")\n", " break\n", " trial.report(f1_0, epoch)\n", " if trial.should_prune():\n", " raise optuna.TrialPruned()\n", " return f1_0\n", "\n", "\n", "#optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n", "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler(seed=SEED))\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "start_time = time.time()\n", "study.optimize(objective, n_trials=150)\n", "end_time = time.time()\n", "elapsed_time = end_time - start_time\n", "\n", "print(f\"Optimization took {elapsed_time:.2f} seconds.\")\n" ] }, { "cell_type": "markdown", "id": "4e81ee90-ddc9-430a-b526-02ce5fc8ed49", "metadata": { "tags": [] }, "source": [ "### Evaluate best model on test test" ] }, { "cell_type": "code", "execution_count": 202, "id": "ece25538-1719-45b8-b53a-077b99370761", "metadata": {}, "outputs": [], "source": [ "#model.load_state_dict(torch.load(os.path.join(folder, 'best_model_for_class_test.pt')))\n", "#model.to(device)\n", "\n", "model.eval()\n", "test_loss = 0\n", "all_preds = []\n", "all_targets = []\n", "\n", "\n", "with torch.no_grad(): \n", " for X_batch, y_batch in test_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " test_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_batch.cpu().numpy())\n", "\n", "test_loss /= len(val_loader)\n", "precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", "accuracy = accuracy_score(all_targets, all_preds)\n", "\n", "test_output_str = f'Test Loss: {test_loss:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}\\n'\n", "print(test_output_str)\n", "\n", "\n", "cm = showConfMatrix(all_targets, all_preds)\n", "showClassWiseAcc(cm)" ] }, { "cell_type": "markdown", "id": "e925a5cf-4961-4f8a-b3df-5a2876a19e4f", "metadata": { "tags": [] }, "source": [ "# Plots and metrics" ] }, { "cell_type": "markdown", "id": "e4c8321e-674a-4328-aedd-d2b6c7b1c0cc", "metadata": { "tags": [] }, "source": [ "## Plot imports" ] }, { "cell_type": "code", "execution_count": 24, "id": "cf291c8d-d70c-4daf-bd74-483a5b071897", "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import precision_recall_curve\n", "from sklearn.preprocessing import label_binarize\n", "from sklearn.metrics import roc_curve, auc\n", "from itertools import cycle\n", "from sklearn.metrics import classification_report" ] }, { "cell_type": "markdown", "id": "7da930eb-005d-4d22-9295-5b3f38143ccd", "metadata": { "tags": [] }, "source": [ "### Metric-classes" ] }, { "cell_type": "code", "execution_count": 278, "id": "58460399-1a01-4353-9198-f142a492d19a", "metadata": {}, "outputs": [], "source": [ "def showConfMatrix(all_targets, all_preds):\n", " conf_matrix = confusion_matrix(all_targets, all_preds)\n", " # conf_matrix = confusion_matrix(all_preds, all_targets)\n", " labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n", " #labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n", "\n", "\n", " sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n", " # plt.title('Confusion Matrix')\n", " plt.xlabel('Predicted Label')\n", " plt.ylabel('True Label')\n", " #plt.savefig(f\"{folder}/confusionMatrixSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", " plt.show()\n", " return conf_matrix" ] }, { "cell_type": "code", "execution_count": 279, "id": "4f658992-815e-48e0-b2b5-f8d049b306f2", "metadata": {}, "outputs": [], "source": [ "def showClassWiseAcc(conf_matrix):\n", " # Calculate accuracy per class\n", " class_accuracies = conf_matrix.diagonal() / conf_matrix.sum(axis=1)\n", "\n", " # Prepare accuracy data for writing to file\n", " accuracy_data = \"\\n\".join([f\"Accuracy for class {i}: {class_accuracies[i]:.4f}\" for i in range(len(class_accuracies))])\n", "\n", " # Print accuracy per class and write to a file\n", " print(accuracy_data) # Print to console\n", "\n", " # Define the filename\n", " accuracy_file_path = os.path.join(folder, \"class_accuracies.txt\")" ] }, { "cell_type": "markdown", "id": "dcad796c-110f-49ea-8346-37f065f0da63", "metadata": { "tags": [] }, "source": [ "## Confusion Matrix" ] }, { "cell_type": "code", "execution_count": 40, "id": "619c80b5-7740-48af-b12e-bff5a310475e", "metadata": {}, "outputs": [], "source": [ "cm = showConfMatrix(all_targets, all_preds)" ] }, { "cell_type": "markdown", "id": "45e7bf15-8100-4659-a51e-5a1b3d02f303", "metadata": { "tags": [] }, "source": [ "## Accuracy per class" ] }, { "cell_type": "code", "execution_count": 545, "id": "a81ccac7-98d4-42af-ae0a-26327cdd4483", "metadata": {}, "outputs": [], "source": [ "cm = showConfMatrix(all_targets, all_preds)\n", "showClassWiseAcc(cm)\n", "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n", "\n", "print(classification_report(all_targets, all_preds, target_names=labels))\n", "#torch.save(model.state_dict(), f'{folder}/class_2_74_93_82.pt')\n" ] }, { "cell_type": "markdown", "id": "f9bd689a-50eb-45d0-b150-0cb2c2fad1c1", "metadata": { "jp-MarkdownHeadingCollapsed": true, "tags": [] }, "source": [ "## ROC Curve" ] }, { "cell_type": "code", "execution_count": 74, "id": "698d1d4c-d02a-4fd9-899b-42afabccc652", "metadata": {}, "outputs": [], "source": [ "y_score= np.array(all_outputs)\n", "fpr = dict()\n", "tpr = dict()\n", "roc_auc = dict()\n", "n_classes = len(labels) \n", "\n", "y_test_one_hot = np.eye(n_classes)[y_val.cpu()]\n", "\n", "for i in range(n_classes):\n", " fpr[i], tpr[i], _ = roc_curve(y_test_one_hot[:, i], y_score[:, i])\n", " roc_auc[i] = auc(fpr[i], tpr[i])\n", "\n", "# Plot all ROC curves\n", "plt.figure()\n", "colors = ['blue', 'red', 'green', 'darkorange', 'purple']\n", "for i, color in zip(range(n_classes), colors):\n", " plt.plot(fpr[i], tpr[i], color=color, lw=2,\n", " label='ROC curve of class {0} (area = {1:0.2f})'\n", " ''.format(labels[i], roc_auc[i]))\n", "\n", "plt.plot([0, 1], [0, 1], 'k--', lw=2)\n", "plt.xlim([0.0, 1.0])\n", "plt.ylim([0.0, 1.05])\n", "plt.xlabel('False Positive Rate')\n", "plt.ylabel('True Positive Rate')\n", "print('Receiver operating characteristic for multi-class')\n", "plt.legend(loc=\"lower right\")\n", "plt.savefig(f\"{folder}/ROCCurveSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "3b7c80c8-6fad-4b70-b8e3-b584639fce97", "metadata": { "tags": [] }, "source": [ "## Multi-Class Precision-Recall Cruve" ] }, { "cell_type": "code", "execution_count": 75, "id": "71c9b34d-46be-4be2-b77a-f9734a6f5683", "metadata": {}, "outputs": [], "source": [ "y_test_bin = label_binarize(y_val.cpu(), classes=range(n_classes))\n", "\n", "precision_recall = {}\n", "\n", "for i in range(n_classes):\n", " precision, recall, _ = precision_recall_curve(y_test_bin[:, i], y_score[:, i])\n", " precision_recall[i] = (precision, recall)\n", "\n", "colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])\n", "\n", "plt.figure(figsize=(6, 4))\n", "\n", "for i, color in zip(range(n_classes), colors):\n", " precision, recall = precision_recall[i]\n", " plt.plot(recall, precision, color=color, lw=2, label=f'{labels[i]}')\n", "\n", "plt.xlabel('Recall')\n", "plt.ylabel('Precision')\n", "print('Multi-Class Precision-Recall Curve')\n", "plt.legend(loc='best')\n", "plt.savefig(f\"{folder}/MultiClassPRCurveSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "bbf3db37-57cd-4e32-80ba-0a29047d2015", "metadata": { "tags": [] }, "source": [ "# Meta Learner" ] }, { "cell_type": "markdown", "id": "6e9d5d36-1a7d-4b8e-8c16-04aa416ff1a0", "metadata": {}, "source": [ "By stacking outputs from 3 models, all with one speciality, we train a meta-model/meta-learner by feeding it all 3 models input and the correct label, so it is able to learn where there are strenghts and weaknesses of the other three models. This is done by stacking the base-models outputs to later use as input data for training the meta-learner." ] }, { "cell_type": "code", "execution_count": 16, "id": "51db8fbd-067f-4f67-8871-591338657714", "metadata": {}, "outputs": [], "source": [ "folder1 = '/home/evan/D1/project/code/smaller_model/raw_training/'" ] }, { "cell_type": "markdown", "id": "ee8bb02e-fb57-416d-b056-021e6e53a843", "metadata": {}, "source": [ "## Generate base-model outputs" ] }, { "cell_type": "code", "execution_count": 18, "id": "6f766c76-c779-4007-b299-324106fae0ce", "metadata": {}, "outputs": [], "source": [ "model0 = BaseModel().to(device)\n", "model1 = BaseModel().to(device)\n", "model2 = BaseModel().to(device)\n", "\n", "\n", " \n", "model0.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_0/class_0_65_79_71.pt')))\n", "model0.to(device)\n", "\n", "model1.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_1/class_1_74_93_83.pt')))\n", "model1.to(device)\n", "\n", "model2.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_2/class_2_84_90_87.pt')))\n", "model2.to(device)\n", "\n", "base_model_outputs = []\n", "\n", "with torch.no_grad():\n", " for X_batch, _ in val_loader:\n", " X_batch = X_batch.to(device)\n", " # Store the probabilities, not the class predictions\n", " probs0 = torch.softmax(model0(X_batch), dim=1)\n", " probs1 = torch.softmax(model1(X_batch), dim=1)\n", " probs2 = torch.softmax(model2(X_batch), dim=1)\n", " \n", " # Concatenate the model outputs along feature dimension\n", " model_output = torch.cat((probs0, probs1, probs2), dim=1)\n", " \n", " base_model_outputs.append(model_output)\n", "\n", "# Stack all batches to form the complete set of base model outputs\n", "base_model_outputs = torch.cat(base_model_outputs, dim=0)\n" ] }, { "cell_type": "markdown", "id": "342dd954-803b-4d4d-bcec-56bdd2fd3166", "metadata": {}, "source": [ "### Meta-Learner class" ] }, { "cell_type": "code", "execution_count": 19, "id": "6ec860ca-ff28-402b-b1c2-263283057b27", "metadata": {}, "outputs": [], "source": [ "import torch.nn as nn\n", "import torch.nn.init as init\n", "\n", "class MetaModel(nn.Module):\n", " def __init__(self, input_size, num_classes=3):\n", " super(MetaModel, self).__init__()\n", " \n", " self.network = nn.Sequential(\n", " nn.Linear(input_size, 32),\n", " nn.BatchNorm1d(32), \n", " nn.ReLU(),\n", " nn.Dropout(0.3),\n", " \n", " nn.Linear(32, num_classes),\n", " nn.LogSoftmax(dim=1)\n", " )\n", " \n", " # Apply kaiming initialization to all linear layers\n", " self.apply(self.initialize_weights)\n", "\n", " def forward(self, x):\n", " x = self.network(x)\n", " return x\n", "\n", " def initialize_weights(self, m):\n", " if isinstance(m, nn.Linear):\n", " init.kaiming_uniform_(m.weight, nonlinearity='relu')\n", " if m.bias is not None:\n", " init.constant_(m.bias, 0)\n" ] }, { "cell_type": "markdown", "id": "580ad04e-8ad0-40a9-b8ce-94006dd7d114", "metadata": {}, "source": [ "## Split basemodel-outputs to train, val" ] }, { "cell_type": "code", "execution_count": 20, "id": "7c935eb9-daba-45e4-807d-17b7e47d57ed", "metadata": {}, "outputs": [], "source": [ "y_val = torch.cat([y for _, y in val_loader], dim=0) # Just to make sure y_val is y_val, extract from val_loader\n", "\n", "print(np.unique(y_val.cpu().numpy(), return_counts=True))\n", "\n", "X_meta_train, X_meta_val, y_meta_train, y_meta_val = train_test_split(\n", " base_model_outputs.cpu().numpy(), \n", " y_val.cpu().numpy(), \n", " test_size=0.2, \n", " random_state=42\n", ")\n", "\n", "input_size = base_model_outputs.size(1) # Will be 3*num_classes (3 models now) so 9" ] }, { "cell_type": "markdown", "id": "79008d19-0814-4b21-beaf-b2851d6d5616", "metadata": {}, "source": [ "## Create dataloaders" ] }, { "cell_type": "code", "execution_count": 21, "id": "772650c2-90ba-40d6-b0e5-1be0c60b664e", "metadata": {}, "outputs": [], "source": [ "\n", "# Convert numpy arrays back to tensors for training\n", "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n", "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n", "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n", "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n", "\n", "train_meta_dataset = TensorDataset(X_meta_train, y_meta_train)\n", "train_meta_loader = DataLoader(train_meta_dataset, batch_size=64, shuffle=True)\n", "\n", "val_meta_dataset = TensorDataset(X_meta_val, y_meta_val)\n", "val_meta_loader = DataLoader(val_meta_dataset, batch_size=64, shuffle=False)\n" ] }, { "cell_type": "markdown", "id": "7341faac-bfb6-4aff-be2b-2b98a398d55e", "metadata": {}, "source": [ "## Optimize meta-learner" ] }, { "cell_type": "code", "execution_count": 181, "id": "bbb99bb4-875f-4df2-8173-70ad2679f9a8", "metadata": {}, "outputs": [], "source": [ "import logging\n", "import sys\n", "\n", "SEED = 13\n", "torch.manual_seed(SEED)\n", "\n", "#criterion = FocalLoss(alpha=1, gamma=2, reduction='mean')\n", "\n", "# Convert numpy arrays back to tensors for training\n", "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n", "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n", "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n", "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n", "\n", "train_meta_dataset = TensorDataset(X_meta_train, y_meta_train)\n", "train_meta_loader = DataLoader(train_meta_dataset, batch_size=64, shuffle=True)\n", "\n", "val_meta_dataset = TensorDataset(X_meta_val, y_meta_val)\n", "val_meta_loader = DataLoader(val_meta_dataset, batch_size=64, shuffle=False)\n", "\n", "\n", "\n", "def objective(trial):\n", " lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)\n", " weight_decay = trial.suggest_float(\"weight_decay\", 0, 0.1)\n", " lambda_l1 = trial.suggest_float('lambda_l1', 0, 1e-2)\n", " #gamma = trial.suggest_float('gamma', 0, 2)\n", " #alpha = trial.suggest_float('alpha', 0, 1)\n", " \n", " meta_model = MetaModel(input_size=input_size).to(device) \n", " optimizer = torch.optim.Adam(meta_model.parameters(), lr=lr, weight_decay=weight_decay)\n", " criterion = nn.CrossEntropyLoss()\n", "\n", " #model0, model1, model2 = get_models()\n", "\n", " \n", " \n", " epochs = 400\n", " best_val_f1 = 0\n", " epochs_no_improve = 0\n", " early_stop_threshold = 2\n", " \n", " train_losses, val_losses = [], []\n", " #output_file_path = os.path.join(folder, 'training_output.txt')\n", "\n", " for epoch in range(epochs):\n", " model.train()\n", " train_loss = 0\n", " for X_batch, y_batch in train_meta_loader:\n", " \n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " optimizer.zero_grad()\n", " \n", " outputs = meta_model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", "\n", " # Calculate L1 regularization penalty to prevent overfitting\n", " l1_penalty = l1_regularization(model, lambda_l1)\n", "\n", " # Add L1 penalty to the loss\n", " loss += l1_penalty\n", "\n", " loss.backward()\n", " optimizer.step()\n", " train_loss += loss.item()\n", " train_losses.append(train_loss / len(train_loader))\n", "\n", " model.eval()\n", " val_loss = 0\n", " all_preds, all_targets, all_outputs = [], [], []\n", " with torch.no_grad():\n", " for X_batch, y_batch in val_meta_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = meta_model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " val_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_batch.cpu().numpy())\n", " all_outputs.extend(outputs.cpu().numpy())\n", " val_losses.append(val_loss / len(val_loader))\n", "\n", " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", " \n", " if f1_0 > best_val_f1:\n", " best_val_f1 = f1_0\n", " epochs_no_improve = 0\n", " else:\n", " epochs_no_improve += 1\n", "\n", " if epochs_no_improve >= early_stop_threshold:\n", " print(\"Stopping early due to no improvement\")\n", " break\n", " trial.report(f1_0, epoch)\n", " if trial.should_prune():\n", " raise optuna.TrialPruned()\n", " return f1_0\n", "\n", "#optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n", "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler(seed=SEED))\n", "study.optimize(objective, n_trials=150)" ] }, { "cell_type": "markdown", "id": "c8bd0c66-1a1f-41aa-80d1-b921f8dfb4c1", "metadata": {}, "source": [ "## Print optuna-stats" ] }, { "cell_type": "code", "execution_count": 182, "id": "0b0a7fd5-d1e0-4466-8a57-f435ba68e1d2", "metadata": {}, "outputs": [], "source": [ "\n", "# Get the best parameters\n", "best_params = study.best_params\n", "\n", "# Print the best parameters\n", "print(\"Best parameters:\", best_params)\n", "\n", "# Get the best parameters\n", "best_trial = study.best_trial\n", "\n", "# Print the best parameters\n", "print(\"Best parameters:\", best_trial)\n" ] }, { "cell_type": "markdown", "id": "c9b28dbc-4266-4485-a814-548ed9e01c3c", "metadata": {}, "source": [ "### Meta-learner training" ] }, { "cell_type": "markdown", "id": "2ad3c476-178b-4ad6-9eba-19a5727ad48b", "metadata": {}, "source": [ "Found out that cross entropy gives more stable accuracies across classes." ] }, { "cell_type": "code", "execution_count": 26, "id": "4eb430e4-1dee-455f-9279-7e71ac9005cb", "metadata": {}, "outputs": [], "source": [ "y_val = torch.cat([y for _, y in val_loader], dim=0) # Just to make sure y_val is y_val, extract from val_loader\n", "\n", "print(np.unique(y_val.cpu().numpy(), return_counts=True))\n", "# Split the data into train, validation, and test sets\n", "X_meta_train, X_meta_temp, y_meta_train, y_meta_temp = train_test_split(\n", " base_model_outputs.cpu().numpy(), \n", " y_val.cpu().numpy(), \n", " test_size=0.4, \n", " random_state=42\n", ")\n", "\n", "X_meta_val, X_meta_test, y_meta_val, y_meta_test = train_test_split(\n", " X_meta_temp, \n", " y_meta_temp, \n", " test_size=0.5, \n", " random_state=42\n", ")\n", "\n", "input_size = base_model_outputs.size(1) # Will be 3*num_classes (3 models now) so 9\n", "\n", "config = {\n", " 'lr': 0.007728103291008411, \n", " 'weight_decay': 0.003503652410143732, \n", " 'lambda_l1': 0.002984494708891794\n", "}\n", "\n", "meta_model = MetaModel(input_size=input_size).to(device) \n", "optimizer = torch.optim.Adam(meta_model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n", "lambda_l1 = config['lambda_l1']\n", "\n", "criterion = nn.CrossEntropyLoss()\n", "\n", "# Convert numpy arrays back to tensors for training\n", "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n", "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n", "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n", "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n", "X_meta_test = torch.tensor(X_meta_test, dtype=torch.float).to(device)\n", "y_meta_test = torch.tensor(y_meta_test, dtype=torch.long).to(device)\n", "\n", "all_preds, all_targets, all_outputs = [], [], []\n", "train_losses, val_losses = [], []\n", "\n", "best_val_f1 = 0\n", "epochs_no_improve = 0\n", "early_stop_threshold = 2\n", "\n", "best_f1 = 0.0\n", "\n", "# Training loop for the meta-model\n", "epochs = 80\n", "\n", "for epoch in range(epochs):\n", " meta_model.train()\n", " train_loss = 0\n", " for X_batch, y_batch in train_meta_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " optimizer.zero_grad()\n", " outputs = meta_model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " l1_penalty = l1_regularization(meta_model, lambda_l1)\n", " loss += l1_penalty\n", " loss.backward()\n", " optimizer.step()\n", " train_loss += loss.item()\n", " train_losses.append(train_loss / len(train_meta_loader))\n", "\n", " meta_model.eval()\n", " val_loss = 0\n", " all_preds, all_targets, all_outputs = [], [], []\n", " with torch.no_grad():\n", " for X_batch, y_batch in val_meta_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = meta_model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " val_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_batch.cpu().numpy())\n", " all_outputs.extend(outputs.cpu().numpy())\n", " val_losses.append(val_loss / len(val_meta_loader))\n", "\n", " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", "\n", " output_str = f'Epoch {epoch+1}: Train Loss: {train_losses[-1]:.4f}, Val Loss: {val_losses[-1]:.4f}, Precision: {precision_0:.4f}, Recall: {recall_0:.4f}, F1: {f1_0:.4f}, Accuracy: {accuracy:.4f}\\n'\n", " print(output_str)\n", "\n", " if f1_0 > best_f1:\n", " best_f1 = f1_0\n", " best_epoch = epoch\n", " best_model_state_dict = meta_model.state_dict()\n", " best_all_targets = all_targets\n", " best_all_preds = all_preds\n", " epochs_no_improve = 0\n", " else:\n", " epochs_no_improve += 1\n", "\n", " if epochs_no_improve >= early_stop_threshold:\n", " print(\"Stopping early due to no improvement\")\n", " break\n", "\n", "meta_model.load_state_dict(best_model_state_dict)\n" ] }, { "cell_type": "markdown", "id": "041c315b-068b-4ebb-94d7-629b75450d71", "metadata": {}, "source": [ "## Inference on test set" ] }, { "cell_type": "code", "execution_count": null, "id": "6384288c-63d7-4bca-88fb-5bca1df47d7c", "metadata": {}, "outputs": [], "source": [ "# Evaluate on the test set\n", "meta_model.eval()\n", "test_loss = 0\n", "all_test_preds, all_test_targets = [], []\n", "with torch.no_grad():\n", " for X_batch, y_batch in test_meta_loader:\n", " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n", " outputs = meta_model(X_batch)\n", " loss = criterion(outputs, y_batch)\n", " test_loss += loss.item()\n", " _, predicted = torch.max(outputs.data, 1)\n", " all_test_preds.extend(predicted.cpu().numpy())\n", " all_test_targets.extend(y_batch.cpu().numpy())\n", "\n", "test_loss /= len(test_meta_loader)\n", "precision_test, recall_test, f1_test, _ = precision_recall_fscore_support(all_test_targets, all_test_preds, average='weighted', zero_division=0)\n", "accuracy_test = accuracy_score(all_test_targets, all_test_preds)\n", "\n", "print(f'Test Loss: {test_loss:.4f}, Test Precision: {precision_test:.4f}, Test Recall: {recall_test:.4f}, Test F1: {f1_test:.4f}, Test Accuracy: {accuracy_test:.4f}')" ] }, { "cell_type": "markdown", "id": "a17da765-d795-49f3-abbb-06850b7f9264", "metadata": { "tags": [] }, "source": [ "### Conf-matrix and class-wise acc for meta-learner" ] }, { "cell_type": "code", "execution_count": 27, "id": "eb41e5f6-d038-4696-ae55-3d2e3ebe7c2e", "metadata": {}, "outputs": [], "source": [ "conf_matrix = confusion_matrix(all_targets, all_preds)\n", "# conf_matrix = confusion_matrix(all_preds, all_targets)\n", "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n", "#labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n", "\n", "\n", "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n", "# plt.title('Confusion Matrix')\n", "plt.xlabel('Predicted Label')\n", "plt.ylabel('True Label')\n", "#plt.savefig(f\"{folder1}/meta/MetaModel_stretched.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()\n", "\n", "# Calculate accuracy per class\n", "class_accuracies = conf_matrix.diagonal() / conf_matrix.sum(axis=1)\n", "\n", "# Prepare accuracy data for writing to file\n", "accuracy_data = \"\\n\".join([f\"Accuracy for class {i}: {class_accuracies[i]:.4f}\" for i in range(len(class_accuracies))])\n", "print(classification_report(all_targets, all_preds, target_names=labels))\n", "\n", "\n", "# Print accuracy per class and write to a file\n", "print(accuracy_data) # Print to console\n", "\n", "#torch.save(meta_model.state_dict(), f'{folder1}/meta/meta_model.pt')\n", "\n", "# Define the filename\n", "#accuracy_file_path = os.path.join(folder, \"class_accuracies.txt\")\n", "\n", "# Write accuracies to a file in the specified folder\n", "#with open(accuracy_file_path, 'w') as f:\n", "# f.write(f\"Samples: {len(all_preds)}\\n\") # Write the number of samples\n", "# f.write(accuracy_data) # Write the accuracy data" ] }, { "cell_type": "markdown", "id": "67a4e623-b813-4d1c-bcc5-45cca41140f9", "metadata": {}, "source": [ "## Roc curve" ] }, { "cell_type": "code", "execution_count": 64, "id": "894c35a1-a52f-438b-93ce-044429bdaf2f", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from sklearn.metrics import roc_curve, auc\n", "from sklearn.preprocessing import label_binarize\n", "import matplotlib.pyplot as plt\n", "\n", "# Class names and count\n", "class_names = ['background', 'tackle-live', 'tackle-replay']\n", "n_classes = len(class_names)\n", "\n", "# Binarize the targets and predictions for ROC curve computation\n", "test_targets_bin = label_binarize(targets, classes=[0, 1, 2])\n", "test_predictions_bin = label_binarize(predictions, classes=[0, 1, 2])\n", "\n", "# ROC curve and AUC for each class\n", "fpr = {}\n", "tpr = {}\n", "roc_auc = {}\n", "\n", "for i in range(n_classes):\n", " fpr[i], tpr[i], _ = roc_curve(test_targets_bin[:, i], test_predictions_bin[:, i])\n", " roc_auc[i] = auc(fpr[i], tpr[i])\n", "\n", "# Plot ROC curves for each class\n", "plt.figure(figsize=(8, 6))\n", "for i in range(n_classes):\n", " plt.plot(fpr[i], tpr[i], label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})')\n", "plt.plot([0, 1], [0, 1], 'k--')\n", "plt.xlim([0.0, 1.0])\n", "plt.grid(visible=True)\n", "plt.ylim([0.0, 1.05])\n", "plt.xlabel('False Positive Rate')\n", "plt.ylabel('True Positive Rate')\n", "plt.title('Multi-class ROC Curve')\n", "plt.legend(loc='lower right')\n", "plt.savefig(\"baseline-roc.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "580a616d-5b01-4725-9b5b-71c71537bf22", "metadata": {}, "source": [ "## Check model agreement" ] }, { "cell_type": "code", "execution_count": 241, "id": "bbe695f7-458d-4559-bc58-0b14dba6785d", "metadata": {}, "outputs": [], "source": [ "import torch\n", "from sklearn.metrics import precision_recall_fscore_support, accuracy_score\n", "\n", "def evaluate_model_agreement(meta_model, base_models, val_loader, device, criterion):\n", " meta_model.eval()\n", " agreement_counts = [0] * len(base_models) # Agreement count for each base model\n", " total_predictions = 0 # Total predictions made\n", "\n", " all_preds = []\n", " all_targets = []\n", "\n", " with torch.no_grad():\n", " for X_batch, y_meta_val in val_loader:\n", " X_batch = X_batch.to(device)\n", " y_meta_val = y_meta_val.to(device)\n", "\n", " # Get predictions from each base model and the MetaModel\n", " base_probs = [torch.softmax(model(X_batch), dim=1) for model in base_models]\n", " base_predictions = [torch.max(probs, 1)[1] for probs in base_probs]\n", "\n", " # Concatenate the model outputs along feature dimension for MetaModel input\n", " meta_input = torch.cat(base_probs, dim=1)\n", " meta_outputs = meta_model(meta_input)\n", " meta_predictions = torch.max(meta_outputs, 1)[1]\n", "\n", " # Compare MetaModel predictions with each base model's predictions\n", " for i, base_preds in enumerate(base_predictions):\n", " agreement_counts[i] += (base_preds == meta_predictions).sum().item()\n", "\n", " total_predictions += y_meta_val.size(0)\n", " \n", " # Collect predictions for evaluation\n", " all_preds.extend(meta_predictions.cpu().numpy())\n", " all_targets.extend(y_meta_val.cpu().numpy())\n", "\n", " # Compute loss (optional)\n", " val_loss = criterion(meta_outputs, y_meta_val)\n", "\n", " # Calculate precision, recall, f1-score, and accuracy\n", " precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", "\n", " # Calculate agreement percentages\n", " agreement_percentages = [count / total_predictions * 100 for count in agreement_counts]\n", "\n", " print(f\"Loss: {val_loss.item()}\")\n", " print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}')\n", " return agreement_percentages\n", "\n", "# Usage\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "#meta_model = best_meta_model # Your loaded MetaModel\n", "base_models = [model0, model1, model2] # List of your base models\n", "criterion = torch.nn.CrossEntropyLoss() # Your loss function\n", "\n", "agreement_percentages = evaluate_model_agreement(meta_model, base_models, val_loader, device, criterion)\n", "for i, pct in enumerate(agreement_percentages):\n", " print(f\"Model {i} Agreement Percentage: {pct:.2f}%\")\n", "\n", " \n", "print(classification_report(all_targets, all_preds, target_names=labels))\n", " " ] }, { "cell_type": "markdown", "id": "b4a9897e-6a1c-4f13-8789-435ba42308c9", "metadata": {}, "source": [ "## Inference" ] }, { "cell_type": "code", "execution_count": 28, "id": "80e2b209-0a62-45dd-a7c5-a655d3653648", "metadata": {}, "outputs": [], "source": [ "all_preds, all_targets, all_outputs = [], [], []\n", "import time\n", "\n", "batch_times = []\n", "\n", "with torch.no_grad():\n", " start_time = time.time()\n", " \n", " for batch_idx, (X_batch, y_meta_val) in enumerate(test_loader):\n", " X_batch = X_batch.to(device)\n", " # Store the probabilities, not the class predictions\n", " probs0 = torch.softmax(model0(X_batch), dim=1)\n", " probs1 = torch.softmax(model1(X_batch), dim=1)\n", " probs2 = torch.softmax(model2(X_batch), dim=1)\n", " \n", " # Concatenate the model outputs along feature dimension\n", " model_output = torch.cat((probs0, probs1, probs2), dim=1)\n", " \n", " val_outputs = meta_model(model_output)\n", " \n", "\n", " val_loss = criterion(val_outputs, y_meta_val)\n", "\n", " _, predicted = torch.max(val_outputs.data, 1)\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(y_meta_val.cpu().numpy())\n", " all_outputs.extend(val_outputs.cpu().numpy())\n", " batch_time = time.time() - start_time\n", " batch_times.append(batch_time)\n", " print(f\"Batch {batch_idx + 1}: Time = {batch_time:.7f} seconds\")\n", "\n", " precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n", " accuracy = accuracy_score(all_targets, all_preds)\n", " \n", " print(f\"Loss: {loss.item()}, Val Loss: {val_loss.item()}\")\n", " \n", " print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}')\n", "\n", "average_batch_time = sum(batch_times) / len(batch_times)\n", "\n", "print(f\"Average Batch Time: {average_batch_time:.7f} seconds\")\n", "\n", "#torch.save(meta_model.state_dict(), '/home/evan/D1/project/code/meta_model/meta_model_3')" ] } ], "metadata": { "kernelspec": { "display_name": "Python (evan31818)", "language": "python", "name": "evan31818" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.19" } }, "nbformat": 4, "nbformat_minor": 5 }