{ "cells": [ { "cell_type": "markdown", "id": "7033b456-1f53-4201-bb2d-64a02e01ffa8", "metadata": {}, "source": [ "## Imports" ] }, { "cell_type": "code", "execution_count": 1, "id": "33731b76-6b28-4fd4-b193-036a317f3f28", "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n", "from sklearn.utils.class_weight import compute_class_weight\n", "\n", "from torch.utils.data import TensorDataset, DataLoader\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import MinMaxScaler\n", "from collections import Counter\n", "from tqdm.notebook import tqdm\n", "\n", "import torch.nn.functional as F\n", "import matplotlib.pyplot as plt\n", "import torch.optim as optim\n", "import torch.nn as nn\n", "import seaborn as sns\n", "import pandas as pd\n", "import numpy as np\n", "import warnings\n", "import imblearn\n", "import optuna\n", "import torch\n", "import copy\n", "import json\n", "import os\n", "\n", "warnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ] }, { "cell_type": "markdown", "id": "5ade242d-aabb-4b16-b10a-0572f4198a95", "metadata": {}, "source": [ "## Load CLS-tokens and map 'incomplete-classes' to their respective full classes\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "b994e92d-effd-4575-b1db-27154b903ad2", "metadata": {}, "outputs": [], "source": [ "#X = torch.load('/home/evan/D1/project/code/raw_cls_tokens_features.pt', map_location=device)\n", "#X = torch.load('/home/evan/D1/project/code/stretched_cls_tokens.pt', map_location=device)\n", "#X = torch.load('/home/evan/D1/project/code/reflected_cls_tokens.pt', map_location=device)\n", "\n", "y = np.load('/home/evan/D1/project/code/cls_tokens_labels.npy')\n", "frame_counts = np.load('/home/evan/D1/project/code/frame_counts.npy')\n", "\n", "\n", "class_mapping = {0:0, 1: 1, 2: 2, 3: 1, 4: 2}\n", "\n", "for i, label in enumerate(y):\n", " y[i] = class_mapping[label]\n", "print('Done')" ] }, { "cell_type": "markdown", "id": "f1d19317-da86-4539-bafb-1d0b35f6e46a", "metadata": {}, "source": [ "## Helper functions to split sequence, extract and (add context frames - inactive)" ] }, { "cell_type": "code", "execution_count": 3, "id": "14c04222-3270-4adf-95b0-d92be6f771ed", "metadata": {}, "outputs": [], "source": [ "def split_sequences_np(arr):\n", " \"\"\"Find the difference between consecutive elements\"\"\"\n", " \n", " diffs = np.diff(arr)\n", " # Identify where the difference is not 1 (i.e., breaks in consecutive sequences)\n", " breaks = np.where(diffs != 1)[0] + 1\n", " \n", " # Use numpy split to divide the array at every break point\n", " return np.split(arr, breaks)\n", "\n", "def extract_data(games, boundaries, X, y, indices):\n", " X_split, y_split, idx_split = [], [], []\n", " for g in games:\n", " start = 0 if g == 0 else boundaries[g-1]\n", " end = boundaries[g]\n", " X_split.append(X[start:end])\n", " y_split.append(y[start:end])\n", " idx_split.extend(indices[start:end])\n", " return torch.cat(X_split), torch.cat(y_split), idx_split\n", "\n", "def add_context_frames(seq, total_frames, context_size=0, last_context_end=0):\n", " \"\"\"Adds context frames to the sequence, ensuring no overlap with other sequences or video boundaries\"\"\"\n", " seq_start_idx, seq_end_idx = seq[0], seq[-1]\n", " \n", " # Calculate start and end of context\n", " start_with_context = max(seq_start_idx - context_size, last_context_end + 1, 0)\n", " end_with_context = min(seq_end_idx + context_size, total_frames - 1)\n", " \n", " return start_with_context, end_with_context\n" ] }, { "cell_type": "markdown", "id": "7e946392-df5b-4773-9808-55cc58c57840", "metadata": {}, "source": [ "## Undersample tackle-sequences" ] }, { "cell_type": "code", "execution_count": null, "id": "b2f72e9f-9755-4f59-bf06-242d3e0696fc", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import torch\n", "\n", "def extract_tackle_sequences_and_undersample(X, y, frame_counts, device='cuda', max_tackles=500):\n", " new_X, new_y, kept_indices = [], [], []\n", " all_seq = []\n", " class_lengths = {} # Stores sequence lengths and counts by class\n", " total_sequences = 0 # Total count of all sequences\n", " tackle_count = 0 # Counter for class 0 sequences\n", " tackle_count2 = 0 # Counter for class 2 sequences\n", "\n", " start_idx = 0\n", " last_context_end = -1\n", "\n", " for z, count in enumerate(frame_counts):\n", " end_idx = start_idx + count\n", "\n", " key_frame_indices = np.where(y[start_idx:end_idx] != 0)[0] + start_idx\n", " seq_splitted = split_sequences_np(key_frame_indices)\n", "\n", " background_frame_indices = np.where(y[start_idx:end_idx] == 0)[0] + start_idx\n", " bg_seq_splitted = split_sequences_np(background_frame_indices)\n", "\n", " for bg_seq in bg_seq_splitted:\n", " if len(bg_seq) >= 70:\n", " if y[bg_seq[0]] == 0: # Check if the sequence is of class 0\n", " if tackle_count >= max_tackles:\n", " continue # Skip if maximum number of tackles has been reached\n", " else:\n", " tackle_count += 1\n", " start_random = np.random.randint(35, len(bg_seq) - 34)\n", " random_seq = bg_seq[start_random:start_random + 25]\n", " all_seq.append(random_seq)\n", " total_sequences += 1\n", "\n", " class_id = y[random_seq[0]]\n", " if class_id not in class_lengths:\n", " class_lengths[class_id] = {'lengths': [], 'count': 0}\n", " class_lengths[class_id]['lengths'].append(len(random_seq))\n", " class_lengths[class_id]['count'] += 1\n", "\n", " new_bg_seq_x = X[random_seq[0]:random_seq[-1] + 1]\n", " new_bg_seq_y = y[random_seq[0]:random_seq[-1] + 1]\n", "\n", " new_X.extend(new_bg_seq_x)\n", " new_y.extend(new_bg_seq_y)\n", "\n", " for seq in seq_splitted:\n", " if seq.size > 0:\n", " if y[seq[0]] == 2: # Check if the sequence is of class 0\n", " if tackle_count2 >= 280:\n", " continue # Skip if maximum number of tackles has been reached\n", " else:\n", " tackle_count2 += 1\n", "\n", " all_seq.append(seq)\n", " total_sequences += 1\n", "\n", " class_id = y[seq[0]]\n", " if class_id not in class_lengths:\n", " class_lengths[class_id] = {'lengths': [], 'count': 0}\n", " class_lengths[class_id]['lengths'].append(len(seq))\n", " class_lengths[class_id]['count'] += 1\n", "\n", " \n", " # This line will return the same values for start and end, as 0 context is added. background is sampled randomly instead in loop before this.\n", " context_start, context_end = add_context_frames(seq, end_idx, last_context_end=last_context_end)\n", " \n", " new_X.extend(X[context_start:context_end + 1])\n", " new_y.extend(y[context_start:context_end + 1])\n", "\n", " last_context_end = context_end\n", "\n", " start_idx = end_idx\n", "\n", " new_X = torch.stack(new_X)\n", " new_X = torch.tensor(new_X, dtype=torch.float32, device=device)\n", " new_y = torch.tensor(new_y, dtype=torch.long, device=device)\n", "\n", " return new_X, new_y, kept_indices, all_seq, class_lengths, total_sequences\n", "\n", "new_X, new_y, kept_indices, all_seq, class_lengths, total_sequences = extract_tackle_sequences_and_undersample(X, y, frame_counts)\n", "\n", "# Compute and print average lengths and counts for each class\n", "for class_id, info in class_lengths.items():\n", " average_length = np.mean(info['lengths'])\n", " print(f'Class {class_id} - Average Sequence Length: {average_length:.2f}, Count: {info[\"count\"]}')\n", "\n", "print(f'Total sequences processed: {total_sequences}')\n", "print(new_X.shape)\n", "print(new_y.shape)\n", "print(np.unique(new_y.cpu(), return_counts=True))\n" ] }, { "cell_type": "markdown", "id": "efe164b4-4541-4c9e-8ec4-af08d33062db", "metadata": {}, "source": [ "## Split games into either train, val, test to ensure no data leakage" ] }, { "cell_type": "code", "execution_count": 7, "id": "0fd784d3-b7d5-41d7-bc18-9429fa07f2f4", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import torch\n", "\n", "def split_data_by_game(X, y, kept_indices, frame_counts, split_ratio=(0.7, 0.15, 0.15), seed=None):\n", " # Set the random seed for reproducibility\n", " np.random.seed(seed)\n", " \n", " # Calculate the cumulative sum of frame counts to determine game boundaries\n", " boundaries = np.cumsum(frame_counts)\n", " \n", " # Create lists to hold data for train, validation, and test sets\n", " X_train, y_train, X_val, y_val, X_test, y_test = [], [], [], [], [], []\n", " idx_train, idx_val, idx_test = [], [], []\n", " \n", " # Shuffle the indices to randomize which games go into which set\n", " game_indices = np.arange(len(frame_counts))\n", " np.random.shuffle(game_indices)\n", " \n", " total_games = len(game_indices)\n", " num_train = int(total_games * split_ratio[0])\n", " num_val = int(total_games * split_ratio[1])\n", " \n", " print('Number of total games: ', total_games)\n", " print('Number of train games: ', num_train)\n", " print('Number of val games: ', num_val)\n", " \n", " \n", " # Assign games to train, validation, and test sets\n", " train_games = game_indices[:num_train]\n", " val_games = game_indices[num_train:num_train + num_val]\n", " test_games = game_indices[num_train + num_val:]\n", "\n", "\n", " # Extract data for each split\n", " X_train, y_train, idx_train = extract_data(train_games, boundaries, X, y, kept_indices)\n", " X_val, y_val, idx_val = extract_data(val_games, boundaries, X, y, kept_indices)\n", " X_test, y_test, idx_test = extract_data(test_games, boundaries, X, y, kept_indices)\n", " \n", " return (X_train, y_train, idx_train), (X_val, y_val, idx_val), (X_test, y_test, idx_test)\n", "\n", "# Set the random seed for reproducibility\n", "seed = 42\n", "\n", "split_ratio = (0.7, 0.15, 0.15)\n", "(X_train, y_train, idx_train), (X_val, y_val, idx_val), (X_test, y_test, idx_test) = split_data_by_game(new_X, new_y, kept_indices, frame_counts, split_ratio, seed)\n", "\n", "print(np.unique(y_train.cpu(), return_counts=True)[1])\n", "print(np.unique(y_val.cpu(), return_counts=True)[1])\n", "print(np.unique(y_test.cpu(), return_counts=True)[1])\n" ] }, { "cell_type": "markdown", "id": "894031df-d48f-4f99-ab80-1ad37cd021e3", "metadata": {}, "source": [ "## Create tensors" ] }, { "cell_type": "code", "execution_count": 8, "id": "40f2921a-5ea6-40fc-898b-58816a1887ab", "metadata": {}, "outputs": [], "source": [ "X_train = torch.tensor(X_train, dtype=torch.float32, device=device)\n", "y_train = torch.tensor(y_train, dtype=torch.long, device=device)\n", "\n", "X_val = torch.tensor(X_val, dtype=torch.float32, device=device)\n", "y_val = torch.tensor(y_val, dtype=torch.long, device=device)\n", "\n", "X_test = torch.tensor(X_test, dtype=torch.float32, device=device)\n", "y_test = torch.tensor(y_test, dtype=torch.long, device=device)" ] }, { "cell_type": "markdown", "id": "69eb8af6-0455-4f01-b7ca-19faf2027041", "metadata": {}, "source": [ "## Shifting Window - Dataset + DataLoaders " ] }, { "cell_type": "code", "execution_count": 143, "id": "e0f71c66-506d-428d-a4e5-008f3eb04fa4", "metadata": {}, "outputs": [], "source": [ "class FrameSequenceDataset(TensorDataset):\n", " def __init__(self, X, y, window_size):\n", " self.X = X\n", " self.y = y\n", " self.window_size = window_size\n", " \n", " def __len__(self):\n", " # Calculate how many complete non-overlapping windows fit into the dataset\n", " return (len(self.y) // self.window_size)\n", " \n", " def __getitem__(self, index):\n", " # Calculate the start and end of the sequence based on the window size and index\n", " start = index * self.window_size\n", " end = start + self.window_size\n", " X_seq = self.X[start:end]\n", " \n", " y_seq = self.y[start:end]\n", " return X_seq, y_seq\n", "\n", "window_size = 75 # 25 fps\n", "train_dataset = FrameSequenceDataset(X_train, y_train, window_size)\n", "val_dataset = FrameSequenceDataset(X_val, y_val, window_size)\n", "test_dataset = FrameSequenceDataset(X_test, y_test, window_size)\n", "\n", "\n", "train_loader = DataLoader(train_dataset, batch_size=256, shuffle=False)\n", "val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False)\n", "test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)" ] }, { "cell_type": "markdown", "id": "ac9b8966-22a8-45d1-8991-170ea1311b1d", "metadata": {}, "source": [ "## Sliding Window - Dataset + DataLoaders " ] }, { "cell_type": "code", "execution_count": 152, "id": "07938ef3-2bac-4ac3-805b-f01e0920540e", "metadata": {}, "outputs": [], "source": [ "class FrameSequenceDataset(TensorDataset):\n", " def __init__(self, X, y, window_size):\n", " self.X = X\n", " self.y = y\n", " self.window_size = window_size\n", " \n", " def __len__(self):\n", " # Adjust the length to allow for complete windows only\n", " return len(self.y) - self.window_size + 1\n", " \n", " def __getitem__(self, index):\n", " # Extract a window of data starting at `index`\n", " X_seq = self.X[index:index + self.window_size]\n", " y_seq = self.y[index:index + self.window_size]\n", " return X_seq, y_seq\n", "\n", "\n", "window_size = 25 # 25 fps\n", "train_dataset = FrameSequenceDataset(X_train, y_train, window_size)\n", "val_dataset = FrameSequenceDataset(X_val, y_val, window_size)\n", "test_dataset = FrameSequenceDataset(X_test, y_test, window_size)\n", "\n", "\n", "train_loader = DataLoader(train_dataset, batch_size=128, shuffle=False)\n", "val_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)\n", "test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)" ] }, { "cell_type": "markdown", "id": "41cfd268-5fc0-40bd-84c6-6a2ce89abdf4", "metadata": { "tags": [] }, "source": [ "## TempTAC and Attention-module" ] }, { "cell_type": "code", "execution_count": 35, "id": "2005f77b-9722-41bf-ac86-ceaef2912bde", "metadata": {}, "outputs": [], "source": [ "class AttentionModule(nn.Module):\n", " def __init__(self, input_size, heads):\n", " super().__init__()\n", " self.self_attention = nn.MultiheadAttention(embed_dim=input_size, num_heads=heads)\n", " \n", " def forward(self, x):\n", " # x: [seq_len, batch_size, features]\n", " x = x.permute(1, 0, 2) # Adjusting for MultiheadAttention\n", " attn_output, _ = self.self_attention(x, x, x)\n", " return attn_output.permute(1, 0, 2) # Return same shape\n", "\n", "\n", "\n", "class TempTAC(nn.Module):\n", " def __init__(self, input_size, hidden_size, output_dim, num_layers, device, dropout_prob=0.5):\n", " super().__init__()\n", " self.lstm = nn.LSTM(input_size=input_size + output_dim, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=dropout_prob, bidirectional=True)\n", " self.linear = nn.Linear(hidden_size * 2, output_dim)\n", " self.dropout = nn.Dropout(dropout_prob)\n", " self.device = device\n", " self.attention = AttentionModule(input_size, heads=8) # Attention layer after LSTM\n", " self.prev_output_weight = nn.Parameter(torch.tensor(1.0)) # Initialize learnable weight for previous output influence\n", " self.hidden_size = hidden_size\n", " self.num_layers = num_layers\n", " self.output_dim = output_dim\n", " \n", " \n", " def init_hidden(self, batch_size):\n", " return (torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(self.device),\n", " torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(self.device))\n", "\n", " def forward(self, x, hidden, current_batch_size):\n", " batch_size, seq_len, _ = x.size()\n", " prev_output = torch.zeros(batch_size, 1, self.output_dim, device=self.device)\n", " \n", " # Apply attention to the output of the current timestep\n", " x = self.attention(x) # Remove sequence length dimension for attention\n", " \n", " outputs = []\n", " for t in range(seq_len):\n", " weighted_prev_output = prev_output * self.prev_output_weight # Apply learned weight to previous output\n", " \n", " lstm_input = torch.cat((x[:, t:t+1, :], weighted_prev_output), dim=-1)\n", " \n", " lstm_out, hidden = self.lstm(lstm_input, hidden)\n", " lstm_out = self.dropout(lstm_out)\n", " \n", " prev_output = self.linear(lstm_out) # Linear layer and unsqueeze to match dimensions\n", " outputs.append(prev_output)\n", "\n", " return torch.cat(outputs, dim=1), hidden" ] }, { "cell_type": "markdown", "id": "f08e28c7-2082-42e6-97dc-9144f44f5227", "metadata": { "tags": [] }, "source": [ "### Models parameter counter" ] }, { "cell_type": "code", "execution_count": 317, "id": "0616329c-5aaf-4681-a820-ea078f80042a", "metadata": {}, "outputs": [], "source": [ "def count_parameters(model):\n", " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n", "\n", "#model = EnhancedMultiLayerClassifier(1024, 3)\n", "print(\"Number of trainable parameters:\", count_parameters(model))\n", "print(\"Number of training instances:\", sum(np.unique(y_train.cpu(), return_counts=True)[1]))" ] }, { "cell_type": "markdown", "id": "cd58d059-8f30-4562-aac3-82762f4afbe7", "metadata": { "tags": [] }, "source": [ "## Training Loop" ] }, { "cell_type": "code", "execution_count": 133, "id": "aba45731-cd21-41bc-8812-a3c7f30f1b06", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "from sklearn.metrics import classification_report\n", "from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\n", "\n", "\n", "config = {'l1_lambda': 3.326004484093452e-05,\n", "'lr': 0.0004256783934105,\n", "'weight_decay': 1.4334181994254526e-05}\n", "\n", "\n", "def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10):\n", " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", " model.to(device)\n", " \n", " training_losses = []\n", " validation_losses = []\n", " \n", " best_val = 100\n", " break_margin = 2\n", " \n", " best_f1 = 0.0\n", "\n", " l1_lambda = config['l1_lambda']\n", "\n", " batch_size = 256\n", " current_lr = optimizer.param_groups[0]['lr']\n", " \n", " \n", " for epoch in range(num_epochs):\n", " model.train()\n", " total_train_loss = 0\n", " \n", " # Initialize hidden state for the first batch size\n", " initial_batch = next(iter(train_loader))\n", " \n", " initial_batch_size = initial_batch[0].size(0)\n", " \n", " hidden = model.init_hidden(initial_batch_size)\n", " clip_value = 1\n", " for inputs, labels in train_loader:\n", " inputs, labels = inputs.to(device), labels.to(device)\n", " current_batch_size = inputs.size(0)\n", " \n", " # Adjust hidden state size if current batch size differs from the initial batch size\n", " if current_batch_size != initial_batch_size:\n", " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n", " hidden[1][:, :current_batch_size, :].contiguous())\n", "\n", " else:\n", " adjusted_hidden = hidden\n", " \n", " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n", " \n", " hidden = tuple([h.data for h in adjusted_hidden]) # Detach hidden state from graph to prevent backprop through entire dataset\n", " \n", " optimizer.zero_grad()\n", " \n", " \n", " # Calculate loss for the entire sequence at once\n", " loss = criterion(outputs.transpose(1, 2), labels)\n", " \n", " #l1_norm = sum(torch.linalg.norm(p, 1) for p in model.parameters())\n", " l1_norm = sum(p.abs().sum() for p in model.parameters())\n", "\n", " loss = loss + l1_lambda * l1_norm\n", " \n", " loss.backward()\n", " torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value)\n", " optimizer.step()\n", " total_train_loss += loss.item()\n", "\n", " average_train_loss = total_train_loss / len(train_loader)\n", " training_losses.append(average_train_loss) \n", " \n", " \n", " model.eval()\n", " total_val_loss = 0\n", " all_preds = []\n", " all_targets = []\n", " total, correct = 0, 0\n", " with torch.no_grad():\n", " initial_batch = next(iter(val_loader))\n", " initial_batch_size = initial_batch[0].size(0)\n", " hidden = model.init_hidden(initial_batch_size)\n", " \n", " for inputs, labels in val_loader:\n", " inputs, labels = inputs.to(device), labels.to(device)\n", " current_batch_size = inputs.size(0)\n", " \n", " # Adjust hidden state size if current batch size differs from the initial batch size\n", " if current_batch_size != initial_batch_size:\n", " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n", " hidden[1][:, :current_batch_size, :].contiguous())\n", " else:\n", " adjusted_hidden = hidden\n", " \n", " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n", " val_loss = criterion(outputs.transpose(1, 2), labels)\n", " total_val_loss += val_loss.item()\n", "\n", " for i in range(outputs.shape[1]):\n", " _, predicted = torch.max(outputs[:, i, :].data, 1)\n", " total += labels[:, i].size(0)\n", " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n", " \n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(labels[:, i].cpu().numpy())\n", "\n", " average_val_loss = total_val_loss / len(val_loader)\n", " validation_losses.append(average_val_loss) \n", " accuracy = 100 * correct / total\n", " scheduler.step(val_loss)\n", " \n", " precision, recall, f1, _ = precision_recall_fscore_support(np.array(all_targets).flatten(), np.array(all_preds).flatten(), average='weighted', zero_division=0)\n", " \n", " if f1 > best_f1:\n", " best_f1 = f1\n", " best_epoch = epoch\n", " best_model_state_dict = model.state_dict()\n", " best_all_targets = all_targets\n", " best_all_preds = all_preds\n", " \n", " if (int(epoch) % 5) == 0:\n", " print(f'Epoch [{epoch+1}/{num_epochs}], Training Loss: {average_train_loss:.4f}, Validation Loss: {average_val_loss:.4f}, Accuracy: {accuracy:.2f}%')\n", " print(f'Epoch [{epoch+1}/{num_epochs}] Current Learning Rate: {current_lr}')\n", " \n", " \n", " current_lr = optimizer.param_groups[0]['lr']\n", " \n", " if average_val_loss < best_val:\n", " best_val = average_val_loss\n", " time_to_break = 0\n", " print('New best val loss: ', average_val_loss)\n", " else:\n", " time_to_break += 1\n", " \n", " if time_to_break > break_margin:\n", " print('Break margin hit!')\n", " break\n", "\n", " \n", " return best_model_state_dict, best_all_targets, best_all_preds, validation_losses, training_losses\n", "\n", "\n", "model = TempTAC(input_size=1024, hidden_size=256, output_dim=3, num_layers=2, device=device, dropout_prob=0.5)\n", "model\n", "\n", "classes = np.unique(y_train.cpu())\n", "weights = compute_class_weight(class_weight='balanced', classes=classes, y=y_train.cpu().numpy())\n", "class_weights = torch.tensor(weights, dtype=torch.float32).to(device)\n", "\n", "optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n", "\n", "criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)\n", "scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=4, verbose=True)\n", "\n", "best_model_state_dict, all_targets, all_preds, validation_losses, training_losses = train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=400)\n", "\n", "all_targets = np.array(all_targets).flatten()\n", "all_preds = np.array(all_preds).flatten()\n", "\n", "model.load_state_dict(best_model_state_dict)\n", "\n", "print(classification_report(all_targets, all_preds, target_names=['background', 'tackle-live', 'tackle-replay']))\n" ] }, { "cell_type": "markdown", "id": "9d90b440-75a7-40d0-92f7-770973c52c48", "metadata": {}, "source": [ "## Optimize hyperparams" ] }, { "cell_type": "code", "execution_count": null, "id": "79d2914f-7397-42a8-9ba3-180b7aeba5cd", "metadata": {}, "outputs": [], "source": [ "import logging\n", "import sys\n", "import torch\n", "import pandas as pd\n", "import torch.nn as nn\n", "from sklearn.metrics import classification_report\n", "from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\n", "\n", "\n", "def objective(trial, train_loader, val_loader, num_epochs=10, device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")):\n", " \n", " model = TempTAC(input_size=1024, hidden_size=256, output_dim=3, num_layers=2, device=device, dropout_prob=0.5)\n", "\n", " \n", " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", " model.to(device)\n", " \n", " training_losses, validation_losses = [], []\n", " \n", " best_val = float('inf')\n", " break_margin = 3\n", " best_f1 = 0.0\n", " \n", " # Updated to use suggest_float with log=True\n", " lr = trial.suggest_float('lr', 1e-6, 1e-2, log=True)\n", " weight_decay = trial.suggest_float('weight_decay', 1e-6, 1e-3, log=True)\n", " l1_lambda = trial.suggest_float('l1_lambda', 0, 0.001)\n", " \n", " classes = np.unique(y_train.cpu())\n", " weights = compute_class_weight(class_weight='balanced', classes=classes, y=y_train.cpu().numpy())\n", " class_weights = torch.tensor(weights, dtype=torch.float32).to(device)\n", "\n", "\n", " optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n", " criterion = nn.CrossEntropyLoss(weight=class_weights).to(device) \n", " scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=4, verbose=True)\n", " \n", " batch_size = 256\n", " \n", " current_lr = optimizer.param_groups[0]['lr']\n", " \n", " \n", " for epoch in range(num_epochs):\n", " model.train()\n", " total_train_loss = 0\n", "\n", " # Initialize hidden state for the first batch size\n", " initial_batch = next(iter(train_loader))\n", " \n", " initial_batch_size = initial_batch[0].size(0)\n", " \n", " hidden = model.init_hidden(initial_batch_size)\n", " clip_value = 1\n", " for inputs, labels in train_loader:\n", " inputs, labels = inputs.to(device), labels.to(device)\n", " current_batch_size = inputs.size(0)\n", " \n", " # Adjust hidden state size if current batch size differs from the initial batch size\n", " if current_batch_size != initial_batch_size:\n", " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n", " hidden[1][:, :current_batch_size, :].contiguous())\n", "\n", " else:\n", " adjusted_hidden = hidden\n", " \n", " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n", " \n", " hidden = tuple([h.data for h in adjusted_hidden]) # Detach hidden state from graph to prevent backprop through entire dataset\n", " \n", " optimizer.zero_grad()\n", " \n", " # Calculate loss for the entire sequence at once\n", " loss = criterion(outputs.transpose(1, 2), labels)\n", " \n", " #l1_norm = sum(torch.linalg.norm(p, 1) for p in model.parameters())\n", " l1_norm = sum(p.abs().sum() for p in model.parameters())\n", "\n", " loss = loss + l1_lambda * l1_norm\n", " \n", " loss.backward()\n", " torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value) # gradient clipping\n", " optimizer.step()\n", " total_train_loss += loss.item()\n", "\n", " average_train_loss = total_train_loss / len(train_loader)\n", " training_losses.append(average_train_loss) \n", " \n", " \n", " model.eval()\n", " total_val_loss = 0\n", " all_preds = []\n", " all_targets = []\n", " total, correct = 0, 0\n", " with torch.no_grad():\n", " initial_batch = next(iter(val_loader))\n", " initial_batch_size = initial_batch[0].size(0)\n", " hidden = model.init_hidden(initial_batch_size)\n", " \n", " for inputs, labels in val_loader:\n", " inputs, labels = inputs.to(device), labels.to(device)\n", " current_batch_size = inputs.size(0)\n", " \n", " # Adjust hidden state size if current batch size differs from the initial batch size\n", " if current_batch_size != initial_batch_size:\n", " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n", " hidden[1][:, :current_batch_size, :].contiguous())\n", " else:\n", " adjusted_hidden = hidden\n", " \n", " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n", " val_loss = criterion(outputs.transpose(1, 2), labels)\n", " total_val_loss += val_loss.item()\n", "\n", " for i in range(outputs.shape[1]):\n", " _, predicted = torch.max(outputs[:, i, :].data, 1)\n", " total += labels[:, i].size(0)\n", " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n", " \n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(labels[:, i].cpu().numpy())\n", "\n", " average_val_loss = total_val_loss / len(val_loader)\n", " validation_losses.append(average_val_loss) \n", " accuracy = 100 * correct / total\n", " scheduler.step(average_val_loss)\n", " \n", " precision, recall, f1, _ = precision_recall_fscore_support(np.array(all_targets).flatten(), np.array(all_preds).flatten(), average='weighted', zero_division=0)\n", " \n", " if f1 > best_f1:\n", " best_f1 = f1\n", " best_epoch = epoch\n", " best_model_state_dict = model.state_dict()\n", " best_all_targets = all_targets\n", " best_all_preds = all_preds\n", " \n", " current_lr = optimizer.param_groups[0]['lr']\n", " \n", " if val_loss < best_val:\n", " best_val = val_loss\n", " time_to_break = 0\n", " else:\n", " time_to_break += 1\n", " \n", " if time_to_break > break_margin:\n", " #print('Break margin hit!')\n", " break\n", " \n", " trial.report(f1, epoch)\n", " if trial.should_prune():\n", " raise optuna.TrialPruned()\n", " \n", " \n", "\n", " \n", " return f1\n", "\n", "\n", "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler())\n", "\n", "optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n", "\n", "study.optimize(lambda trial: objective(trial, train_loader, val_loader, num_epochs=1000, device=device), n_trials=100)\n", "\n", "\n", "# Save study data to a df\n", "study_data = study.trials_dataframe()\n", "\n", "# Save the df to a csv\n", "study_data.to_csv('study_data.csv', index=False)\n", "\n", "print(\"Best trial:\")\n", "trial = study.best_trial\n", "print(f\" Value: {trial.value}\")\n", "print(\" Params: \")\n", "for key, value in trial.params.items():\n", " print(f\" {key}: {value}\")" ] }, { "cell_type": "markdown", "id": "07e04c1d-2a4c-43f5-a0c8-1fb20f84959f", "metadata": {}, "source": [ "## Read saved optuna-study" ] }, { "cell_type": "code", "execution_count": 27, "id": "ce47bc92-5b33-4561-bbc9-edd10f6a86d7", "metadata": {}, "outputs": [], "source": [ "# Load the csv saved above\n", "study_data = pd.read_csv('study_data.csv')\n", "\n", "# Find the best trial (higher value is better = idxmax, min use idxmin)\n", "best_trial = study_data.loc[study_data['value'].idxmax()]\n", "\n", "# Print info about best trial\n", "print(\"Best Trial:\")\n", "print(best_trial)\n", "\n", "# Print parameters of the best trial\n", "print(\"\\nBest Trial Parameters:\")\n", "for param in [col for col in study_data.columns if col.startswith('params_')]:\n", " print(f\"{param.replace('params_', '')}: {best_trial[param]}\")\n" ] }, { "cell_type": "code", "execution_count": 1250, "id": "afae28e4-417f-48a5-baf9-7e1be9aa3e15", "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n", "conf_matrix = confusion_matrix(np.array(all_targets).flatten(), np.array(all_preds).flatten())\n", "# conf_matrix = confusion_matrix(all_preds, all_targets)\n", "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n", "#labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n", "\n", " \n", "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n", "# plt.title('Confusion Matrix')\n", "plt.xlabel('Predicted Label')\n", "plt.ylabel('True Label')\n", "#plt.savefig(\"new_best_8_context_frames_window_25.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "fa1b431a-63e7-4eb1-bca0-1d1c33ba0101", "metadata": { "tags": [] }, "source": [ "## Plot loss" ] }, { "cell_type": "code", "execution_count": 134, "id": "1f6c292f-e5d9-4aad-a4df-fa0be0bb34d8", "metadata": {}, "outputs": [], "source": [ "scaler = MinMaxScaler()\n", "\n", "length=len(training_losses)\n", "\n", "# For better plotting, we normalize\n", "training_losses_normalized = scaler.fit_transform(np.array(training_losses).reshape(-1, 1)).flatten()\n", "validation_losses_normalized = scaler.fit_transform(np.array(validation_losses).reshape(-1, 1)).flatten()\n", "\n", "# Update df with normalized values\n", "df = pd.DataFrame({\n", " 'Epoch': np.arange(length),\n", " 'Train Loss': training_losses_normalized,\n", " 'Validation Loss': validation_losses_normalized\n", "})\n", "\n", "# Plot normalized losses\n", "plt.figure(figsize=(10, 6))\n", "plt.plot(df['Epoch'], df['Train Loss'], label='Train Loss (Normalized)', marker='o')\n", "plt.plot(df['Epoch'], df['Validation Loss'], label='Validation Loss (Normalized)', marker='o')\n", "plt.title('Training vs Validation Loss (Normalized)')\n", "plt.xlabel('Epoch')\n", "plt.ylabel('Normalized Loss')\n", "plt.legend()\n", "plt.grid(True)\n", "plt.tight_layout()\n", "\n", "# Save loss\n", "#np.save('Optimized-reflected-run/training_losses-undersampled-window-75-sliding-window.npy', training_losses)\n", "#np.save('Optimized-reflected-run/validation_losses-undersampled-window-75-sliding-window.npy', validation_losses)\n", "#plt.savefig(f'Optimized-reflected-run/train_val_loss-undersampled-window-75-sliding-window.pdf', format='pdf')\n", "\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "6d39fcce-8d36-42ed-ab48-2721e4d6dc20", "metadata": {}, "source": [ "## Run inference on test-set" ] }, { "cell_type": "code", "execution_count": 153, "id": "a03c40bf-6f14-4f48-a09a-5736ed2ce083", "metadata": {}, "outputs": [], "source": [ "# For loading prev models\n", "# model.load_state_dict(torch.load('Optimized-reflected-run/sliding-window/best_lstm-undersampled-window-25-sliding-window.pt'))\n", "\n", "def run_inference_test_set(): \n", " all_targets = []\n", " all_preds = []\n", "\n", " initial_batch_size = 256\n", "\n", " initial_batch = next(iter(test_loader))\n", "\n", " initial_batch_size = initial_batch[0].size(0)\n", "\n", " hidden = model.init_hidden(initial_batch_size)\n", "\n", " total_val_loss = 0\n", " total = 0\n", " correct = 0\n", "\n", " for i, (inputs, labels) in enumerate(test_loader):\n", " inputs, labels = inputs.to(device), labels.to(device)\n", " current_batch_size = inputs.size(0)\n", "\n", " # Adjust hidden state size if current batch size differs from the initial batch size\n", " if current_batch_size != initial_batch_size:\n", " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n", " hidden[1][:, :current_batch_size, :].contiguous())\n", " else:\n", " adjusted_hidden = hidden\n", "\n", "\n", "\n", " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n", "\n", " val_loss = criterion(outputs.transpose(1, 2), labels)\n", " total_val_loss += val_loss.item()\n", "\n", " for i in range(outputs.shape[1]):\n", " _, predicted = torch.max(outputs[:, i, :].data, 1)\n", " total += labels[:, i].size(0)\n", " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n", "\n", " all_preds.extend(predicted.cpu().numpy())\n", " all_targets.extend(labels[:, i].cpu().numpy())\n", " return all_preds, all_targets \n", "\n", "\n", "test_predictions, test_targets = run_inference_test_set()\n", "\n", "\n", "# Plot classification report\n", "print(classification_report(test_targets, test_predictions, target_names=['background', 'tackle-live', 'tackle-replay']))\n", "\n", "# Create CM\n", "conf_matrix = confusion_matrix(test_targets, test_predictions)\n", "\n", "\n", "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n", "\n", " \n", "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n", "# plt.title('Confusion Matrix')\n", "plt.xlabel('Predicted Label')\n", "plt.ylabel('True Label')\n", "#plt.savefig(\"Optimized-reflected-run/best_lstm-undersampled-window-75-sliding-window.pdf\", format=\"pdf\", bbox_inches=\"tight\") \n", "plt.show()\n", "\n", "#torch.save(model.state_dict(), 'Optimized-reflected-run/best_lstm-undersampled-window-75-sliding-window.pt')" ] }, { "cell_type": "markdown", "id": "e850e4dd-72e2-4c94-8938-483787968faf", "metadata": {}, "source": [ "## Print ROC-curves" ] }, { "cell_type": "code", "execution_count": 154, "id": "bc0464b9-c3d7-4e4f-b821-7cca0b824b97", "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import roc_curve, auc\n", "from sklearn.preprocessing import label_binarize\n", "\n", "class_names = ['background', 'tackle-live', 'tackle-replay']\n", "n_classes = len(class_names)\n", "\n", "# binarize the targets and predictions for roc curve computation\n", "test_targets_bin = label_binarize(test_targets, classes=[0, 1, 2])\n", "test_predictions_bin = label_binarize(test_predictions, classes=[0, 1, 2])\n", "\n", "# roc curve and auc for each class\n", "fpr = {}\n", "tpr = {}\n", "roc_auc = {}\n", "\n", "for i in range(n_classes):\n", " fpr[i], tpr[i], _ = roc_curve(test_targets_bin[:, i], test_predictions_bin[:, i])\n", " roc_auc[i] = auc(fpr[i], tpr[i])\n", "\n", "# plot rpc curves for each class\n", "plt.figure(figsize=(8, 6))\n", "for i in range(n_classes):\n", " plt.plot(fpr[i], tpr[i], label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})')\n", " \n", " \n", "plt.plot([0, 1], [0, 1], 'k--')\n", "plt.xlim([0.0, 1.0])\n", "plt.grid(visible=True)\n", "plt.ylim([0.0, 1.05])\n", "plt.xlabel('False Positive Rate')\n", "plt.ylabel('True Positive Rate')\n", "plt.title('Multi-class ROC Curve')\n", "plt.legend(loc='lower right')\n", "#plt.savefig(\"Optimized-reflected-run/roc-curve-25-sliding-window.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "796b2578-bc9d-40f9-80be-0dafa79f5920", "metadata": {}, "source": [ "## Baseline class" ] }, { "cell_type": "code", "execution_count": 166, "id": "b1802e66", "metadata": {}, "outputs": [], "source": [ "class Simple1DCNN(nn.Module):\n", " def __init__(self, num_channels, num_classes):\n", " super(Simple1DCNN, self).__init__()\n", " self.conv1 = nn.Conv1d(in_channels=num_channels, out_channels=32, kernel_size=3, padding=1)\n", " self.fc1 = nn.Linear(32 * (sequence_length // 2), num_classes) # adjust based on pooling and input length\n", "\n", " def forward(self, x):\n", " # x.shape = (batch, channels, sequence_length)\n", " x = F.relu(self.conv1(x))\n", " x = F.max_pool1d(x, kernel_size=2)\n", " \n", " # flatten to fully connected layer\n", " x = x.view(x.size(0), -1)\n", " \n", " x = self.fc1(x)\n", " return x" ] }, { "cell_type": "markdown", "id": "2b6936c0-3ced-4a97-b7d7-f4c86afdc5d9", "metadata": {}, "source": [ "## Loss plotting" ] }, { "cell_type": "code", "execution_count": 245, "id": "73975c4d-ca4c-454f-9a62-9849037c7426", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import seaborn as sns\n", "import matplotlib.pyplot as plt\n", "\n", "epochs = len(training_losses) # number of epochs\n", "\n", "df = pd.DataFrame({\n", " 'Epoch': [i for i in range(epochs)],\n", " 'Train Loss': training_losses,\n", " 'Validation Loss': validation_losses\n", "})\n", "\n", "plt.figure(figsize=(10, 6))\n", "plt.plot(df['Epoch'], df['Train Loss'], label='Train Loss', marker='o')\n", "plt.plot(df['Epoch'], df['Validation Loss'], label='Validation Loss', marker='o')\n", "plt.title('Training vs Validation Loss')\n", "plt.xlabel('Epoch')\n", "plt.ylabel('Loss')\n", "plt.legend()\n", "plt.grid(True)\n", "plt.tight_layout()\n", "\n", "plt.savefig(f'baseline_cnn_training_validation_loss_plot.pdf', format='pdf')\n", "plt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python (evan31818)", "language": "python", "name": "evan31818" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.19" } }, "nbformat": 4, "nbformat_minor": 5 }