Classification with 3D Trajectories

Note

Last updated: 5 PM, 9/27/2020

import pickle as pkl
import numpy as np
import pandas as pd

import torch
from torch import nn, optim
from torch.utils.data import Dataset, TensorDataset, DataLoader
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix

import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
pio.renderers.default = 'notebook'

from IPython.display import display, clear_output
from scipy import interpolate
from sympy import lambdify, bspline_basis_set, symbols
from sympy.abc import t
import warnings
warnings.filterwarnings('ignore')
with open('trajectories_updated.pkl', 'rb') as f:
    data = pkl.load(f)
traj_df = data['traj_df']
mean_df = data['mean_df']
clipdata_df = data['clipdata_df']

Instead of using B-spline basis function coefficients, we can also try to perform classification on the original 3D trajectories, with inputs as the (x,y,z) coordinates. Since this uses the original data, it should have a greater classification accuracy than using coefficients, coming at the expense of training time.

We want the inputs to be the same length. Since the trajectories are all of different lengths, we’ll pad the trajectories with zeros at the beginning.

# find smoothed
num_coeff = 50
max_clip_len = max(traj_df['clip_len'])+1
data_np = np.empty((0,max_clip_len*3))
labels_np = traj_df.drop_duplicates(subset=['pid','clip'], keep='first')['clip'].to_numpy()

for clip_name in clipdata_df['clip_name']:
    print(clip_name)
    temp_df = traj_df[(traj_df.clip_name==clip_name)]
    clip_len = temp_df['clip_len'].iloc[0]
    for pid in range(max(temp_df.pid)):
        data = temp_df[temp_df.pid==pid+1][['x','y','z']].to_numpy()
        tck, u = interpolate.splprep(data.T, k=3, u=np.linspace(0,1,clip_len), t=np.concatenate((np.array([0,0,0]),np.linspace(0,1,num_coeff-2),np.array([1,1,1]))), task=-1)
        data = interpolate.splev(u, tck, der=0)
        
        pid_np = np.zeros(0)
        for i in range(clip_len):
            pid_np = np.append(pid_np, [data[0][i],data[1][i],data[2][i]])    
        pid_np = np.append(np.zeros((max_clip_len-clip_len)*3),pid_np)    
        data_np = np.vstack((data_np, pid_np))
        
clear_output()
        
# save numpy arrays
with open('trajectories_smoothed.pkl', 'wb') as f:
    pkl.dump({'data':data_np, 'labels':labels_np}, f, protocol=4)
    


# find unsmoothed
num_coeff = 50
max_clip_len = max(traj_df['clip_len'])+1
data_np = np.empty((0,max_clip_len*3))
labels_np = traj_df.drop_duplicates(subset=['pid','clip'], keep='first')['clip'].to_numpy()

for clip_name in clipdata_df['clip_name']:
    print(clip_name)
    temp_df = traj_df[(traj_df.clip_name==clip_name)]
    clip_len = temp_df['clip_len'].iloc[0]
    for pid in range(max(temp_df.pid)):
        data = temp_df[temp_df.pid==pid+1][['x','y','z']].to_numpy().T
        pid_np = np.zeros(0)
        for i in range(clip_len):
            pid_np = np.append(pid_np, [data[0][i],data[1][i],data[2][i]])    
        pid_np = np.append(np.zeros((max_clip_len-clip_len)*3),pid_np)    
        data_np = np.vstack((data_np, pid_np))
        
clear_output()
        
# save numpy arrays
with open('trajectories_unsmoothed.pkl', 'wb') as f:
    pkl.dump({'data':data_np, 'labels':labels_np}, f, protocol=4)
with open('trajectories_smoothed.pkl', 'rb') as f:
    data = pkl.load(f)
smoothed_data = data['data']
labels = data['labels']
smoothed_dataset = TensorDataset(torch.from_numpy(smoothed_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

with open('trajectories_unsmoothed.pkl', 'rb') as f:
    data = pkl.load(f)
unsmoothed_data = data['data']
labels = data['labels']
unsmoothed_dataset = TensorDataset(torch.from_numpy(unsmoothed_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

Multilayer Perceptron (MLP)

class MLP(nn.Module):
    def __init__(self, k_input, k_hidden, k_layers, k_output, dropout=0):
        nn.Module.__init__(self) #super().__init__()
        self.k_layers = k_layers
        self.fc = nn.ModuleList([])
        for i in range(k_layers+2):
            if (i==0):
                self.fc.append(nn.Linear(k_input, k_hidden))
            elif (i==k_layers+1):
                self.fc.append(nn.Linear(k_hidden, k_output))
            else:
                self.fc.append(nn.Linear(k_hidden, k_hidden))
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        for i in range(self.k_layers+1):
            x = self.dropout(F.relu(self.fc[i](x)))
        y = self.fc[i+1](x)
        return y
def train(model, criterion, optimizer, dataloader, device): 
    model.train()
    model = model.to(device)
    criterion = criterion.to(device)
    
    running_loss = 0.0
    running_corrects = 0.0

    for inputs, labels in dataloader:
        
        inputs = inputs.to(device)
        labels = labels.to(device)

        outputs = model(inputs)
        loss = criterion(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        _, preds = torch.max(outputs, 1)
        running_loss += loss.detach() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)
        
    epoch_loss = running_loss / len(dataloader.dataset)
    epoch_acc = running_corrects.float() / len(dataloader.dataset)
    
    return epoch_loss, epoch_acc



def evaluate(model, criterion, dataloader, device):
    model.eval()
    model = model.to(device)
    criterion = criterion.to(device)
    
    running_loss = 0.0
    running_corrects = 0.0
    
    with torch.no_grad():
        for inputs, labels in dataloader:

            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            _, preds = torch.max(outputs, 1)
            running_loss += loss.detach() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
        
    epoch_loss = running_loss / len(dataloader.dataset)
    epoch_acc = running_corrects.float() / len(dataloader.dataset)
    
    return epoch_loss, epoch_acc



# evaluate and return a confusion matrix
def evaluate_cm(model, criterion, dataloader, device):
    model.eval()
    model = model.to(device)
    criterion = criterion.to(device)
    
    running_loss = 0.0
    running_corrects = 0.0
    y_true = np.zeros(0,dtype=np.int)
    y_pred = np.zeros(0,dtype=np.int)
    
    with torch.no_grad():
        for inputs, labels in dataloader:

            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            _, preds = torch.max(outputs, 1)
            running_loss += loss.detach() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
            y_true = np.append(y_true, labels.cpu().numpy())
            y_pred = np.append(y_pred, preds.cpu().numpy())
        
    epoch_loss = running_loss / len(dataloader.dataset)
    epoch_acc = running_corrects.float() / len(dataloader.dataset)
    
    # confusion matrix
    cm = confusion_matrix(y_true, y_pred)
    
    return epoch_loss, epoch_acc, cm
def grid_search_mlp(train_dl, val_dl, num_epochs, k_hidden, k_layers, scoring, criterion, device, dropout=0):
    val_scores = np.zeros((len(k_hidden),len(k_layers)))
    
    for i,hidden in enumerate(k_hidden):
        for j,layers in enumerate(k_layers):
            model = MLP(783,hidden,layers,15,dropout)
            optimizer = optim.Adam(model.parameters())
            avg_val_score = 0

            for epoch in range(num_epochs-5):
                train(model, criterion, optimizer, train_dl, device)

            for epoch in range(5):
                train(model, criterion, optimizer, train_dl, device)
                val_loss, val_acc = evaluate(model, criterion, val_dl, device)
#                 if (scoring=='acc' or scoring=='accuracy'):
#                     avg_val_score += val_acc
#                 elif (scoring=='loss'):
#                     avg_val_score += val_loss
                avg_val_score += val_acc # omit if statement for speed

            avg_val_score /= 5
            val_scores[i][j] = avg_val_score
            
    return val_scores
def nested_cv_mlp(dataset, outer_kfold, num_outer_epochs, inner_kfold, num_inner_epochs, batch_size, k_hidden, k_layers, k_seed, scoring, device, opt_title, opt_caption, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CV
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    total_val_score = np.zeros((len(k_hidden),len(k_layers)))
    total_freq = np.zeros((len(k_hidden),len(k_layers)), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    inner_kfold = KFold(n_splits=inner_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Outer fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)

        
        
        # Inner CV (train/val split)
        current_inner_fold = 1
        inner_val_score = np.zeros((len(k_hidden),len(k_layers)))

        for train_index, val_index in inner_kfold.split(x_trainval, y_trainval):
            
            print(f'  Inner fold {current_inner_fold}/{inner_kfold.n_splits}')
            
            train_data = TensorDataset(x_trainval[train_index], y_trainval[train_index])
            val_data = TensorDataset(x_trainval[val_index], y_trainval[val_index])
            train_dl = DataLoader(train_data, batch_size=batch_size, shuffle=True)
            val_dl = DataLoader(val_data, batch_size=batch_size, shuffle=True)

            fold_val_score = grid_search_mlp(train_dl, val_dl, num_inner_epochs, k_hidden, k_layers, scoring, criterion, device, dropout)
            inner_val_score = np.add(inner_val_score, fold_val_score)

            current_inner_fold += 1

        if (scoring=='acc' or scoring=='accuracy'):
            best_params = np.unravel_index(np.argmax(inner_val_score, axis=None), inner_val_score.shape)
        elif (scoring=='loss'):
            best_params = np.unravel_index(np.argmin(inner_val_score, axis=None), inner_val_score.shape)
        total_freq[best_params[0],best_params[1]] += 1
        
    
    
        model = MLP(783,k_hidden[best_params[0]],k_layers[best_params[1]],15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
        total_val_score = np.add(total_val_score, inner_val_score)

    
    
    # PLOT HYPERPARAMETERS
    clear_output()
    avg_val_score = total_val_score / (outer_kfold.n_splits * inner_kfold.n_splits)
    avg_freq = total_freq / outer_kfold.n_splits
    plotly_data = []
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_val_score,axis=0), y=np.flip(np.array(k_hidden)), x=k_layers, colorscale='blues',
                   zmin=np.amin(avg_val_score), zmax=np.amax(avg_val_score),
                   name='Validation<br>Accuracy',
                   customdata=np.flip(avg_freq,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_layers: %{x}<br>'+scoring+': %{z:.5f}<br>rel freq: %{customdata:.3f}',
                   visible=True))
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_freq,axis=0), y=np.flip(np.array(k_hidden)), x=k_layers, colorscale='blues',
                   zmin=np.amin(avg_freq), zmax=np.amax(avg_freq),
                   name='Relative<br>Frequency',
                   customdata=np.flip(avg_val_score,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_layers: %{x}<br>'+scoring+': %{customdata:.5f}<br>rel freq: %{z:.3f}',
                   visible=False))
    
    acc_annotations = []
    mid = (np.amin(avg_val_score)+np.amax(avg_val_score))/2
    for i, row in enumerate(avg_val_score.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            acc_annotations.append({
                'x': k_layers[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,5)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    acc_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    freq_annotations = []
    mid = (np.amin(avg_freq)+np.amax(avg_freq))/2
    for i, row in enumerate(avg_freq.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            freq_annotations.append({
                'x': k_layers[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    freq_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':100},
        xaxis={'title': 'k_layers', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'k_hidden', 'fixedrange':True, 'type':'category'},
        title={'text':opt_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=acc_annotations,
        updatemenus=[{'type':'buttons',
                      'direction':'left',
                      'pad':{'l':0, 'r':0, 't':0, 'b':0},
                      'xanchor':'left',
                      'yanchor':'top',
                      'x':0,
                      'y':1.055,
                      'buttons':[
                          {'label':'Val. Acc.',
                           'method': 'update',
                           'args':[{'visible': [True,False]},
                                   {'annotations': acc_annotations}]},
                          {'label':'Rel. Freq.',
                           'method': 'update',
                           'args':[{'visible': [False,True]},
                                   {'annotations': freq_annotations}]}
                      ]}])

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
nested_cv_mlp(
    dataset=unsmoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[75,100,150],
    k_layers=[1,2,3],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cpu'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='MLP Hyperparameter Optimization (Unsmoothed)',
    opt_caption='<b>Fig. 1.</b> MLP hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='MLP Loss and Accuracy (Unsmoothed)',
    lossacc_caption='<b>Fig. 2.</b> MLP mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='MLP Confusion Matrix (Unsmoothed)',
    cm_caption='<b>Fig. 3.</b> Mean confusion matrix for trained MLP model across outer folds of 10-fold nested cross validation.'
)

We can look at how smoothing trajectories using B-splines affects classification.

nested_cv_mlp(
    dataset=smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[75,100,150],
    k_layers=[1,2,3],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cpu'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='MLP Hyperparameter Optimization (Smoothed)',
    opt_caption='<b>Fig. 4.</b> MLP hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='MLP Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 5.</b> MLP mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='MLP Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 6.</b> Mean confusion matrix for trained MLP model across outer folds of 10-fold nested cross validation.'
)

The MLP learned quickly and accuracy remained fairly constant after only 20 epochs. The smoothed trajectories yielded slightly higher accuracy, but the difference is not significant.

Compared to other models, the MLP performed surprisingly well. Using trajectory coordinates, it also outperformed the basis spline coefficients by about 4%. This is significant, but basis spline coefficients still seem like a plausible input to classification that maintains most of the important information.

Long Short-Term Memory (LSTM)

class LSTM(nn.Module):
    def __init__(self, k_input, k_hidden, k_layers, k_output, dropout=0):
        nn.Module.__init__(self) #super().__init__()
        self.lstm = nn.LSTM(k_input, k_hidden, batch_first=True, num_layers=k_layers, dropout=dropout)
        self.fc = nn.Linear(k_hidden, k_output)
        
    def forward(self, x):
        batch_size = x.shape[0]
        x = x.view(batch_size,261,3) # (batch_size, seq_len, input_size)
        out, (hidden, cell) = self.lstm(x)
        #hidden = hidden.cuda()
        y = self.fc(hidden[-1])
        return y
def grid_search_lstm(train_dl, val_dl, num_epochs, k_hidden, k_layers, scoring, criterion, device, dropout):
    val_scores = np.zeros((len(k_hidden),len(k_layers)))
    
    for i,hidden in enumerate(k_hidden):
        for j,layers in enumerate(k_layers):
            model = LSTM(3,hidden,layers,15,dropout=dropout)
            optimizer = optim.Adam(model.parameters())
            avg_val_score = 0

            for epoch in range(num_epochs-5):
                train(model, criterion, optimizer, train_dl, device)

            for epoch in range(5):
                train(model, criterion, optimizer, train_dl, device)
                val_loss, val_acc = evaluate(model, criterion, val_dl, device)
#                 if (scoring=='acc' or scoring=='accuracy'):
#                     avg_val_score += val_acc
#                 elif (scoring=='loss'):
#                     avg_val_score += val_loss
                avg_val_score += val_acc # no if statement for speed

            avg_val_score /= 5
            val_scores[i][j] = avg_val_score
            
    return val_scores
def nested_cv_lstm(dataset, outer_kfold, num_outer_epochs, inner_kfold, num_inner_epochs, batch_size, k_hidden, k_layers, k_seed, scoring, device, opt_title, opt_caption, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CV
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    total_val_score = np.zeros((len(k_hidden),len(k_layers)))
    total_freq = np.zeros((len(k_hidden),len(k_layers)), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    inner_kfold = KFold(n_splits=inner_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Outer fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)

        
        
        # Inner CV (train/val split)
        current_inner_fold = 1
        inner_val_score = np.zeros((len(k_hidden),len(k_layers)))

        for train_index, val_index in inner_kfold.split(x_trainval, y_trainval):
            
            print(f'  Inner fold {current_inner_fold}/{inner_kfold.n_splits}')
            
            train_data = TensorDataset(x_trainval[train_index], y_trainval[train_index])
            val_data = TensorDataset(x_trainval[val_index], y_trainval[val_index])
            train_dl = DataLoader(train_data, batch_size=batch_size, shuffle=True)
            val_dl = DataLoader(val_data, batch_size=batch_size, shuffle=True)

            fold_val_score = grid_search_lstm(train_dl, val_dl, num_inner_epochs, k_hidden, k_layers, scoring, criterion, device, dropout)
            inner_val_score = np.add(inner_val_score, fold_val_score)

            current_inner_fold += 1

        if (scoring=='acc' or scoring=='accuracy'):
            best_params = np.unravel_index(np.argmax(inner_val_score, axis=None), inner_val_score.shape)
        elif (scoring=='loss'):
            best_params = np.unravel_index(np.argmin(inner_val_score, axis=None), inner_val_score.shape)
        total_freq[best_params[0],best_params[1]] += 1
        
    
    
        model = LSTM(3,k_hidden[best_params[0]],k_layers[best_params[1]],15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
        total_val_score = np.add(total_val_score, inner_val_score)

    
    
    # PLOT HYPERPARAMETERS
    clear_output()
    avg_val_score = total_val_score / (outer_kfold.n_splits * inner_kfold.n_splits)
    avg_freq = total_freq / outer_kfold.n_splits
    plotly_data = []
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_val_score,axis=0), y=np.flip(np.array(k_hidden)), x=k_layers, colorscale='blues',
                   zmin=np.amin(avg_val_score), zmax=np.amax(avg_val_score),
                   name='Validation<br>Accuracy',
                   customdata=np.flip(avg_freq,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_layers: %{x}<br>'+scoring+': %{z:.5f}<br>rel freq: %{customdata:.3f}',
                   visible=True))
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_freq,axis=0), y=np.flip(np.array(k_hidden)), x=k_layers, colorscale='blues',
                   zmin=np.amin(avg_freq), zmax=np.amax(avg_freq),
                   name='Relative<br>Frequency',
                   customdata=np.flip(avg_val_score,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_layers: %{x}<br>'+scoring+': %{customdata:.5f}<br>rel freq: %{z:.3f}',
                   visible=False))
    
    acc_annotations = []
    mid = (np.amin(avg_val_score)+np.amax(avg_val_score))/2
    for i, row in enumerate(avg_val_score.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            acc_annotations.append({
                'x': k_layers[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,5)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    acc_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    freq_annotations = []
    mid = (np.amin(avg_freq)+np.amax(avg_freq))/2
    for i, row in enumerate(avg_freq.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            freq_annotations.append({
                'x': k_layers[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    freq_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':100},
        xaxis={'title': 'k_layers', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'k_hidden', 'fixedrange':True, 'type':'category'},
        title={'text':opt_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=acc_annotations,
        updatemenus=[{'type':'buttons',
                      'direction':'left',
                      'pad':{'l':0, 'r':0, 't':0, 'b':0},
                      'xanchor':'left',
                      'yanchor':'top',
                      'x':0,
                      'y':1.055,
                      'buttons':[
                          {'label':'Val. Acc.',
                           'method': 'update',
                           'args':[{'visible': [True,False]},
                                   {'annotations': acc_annotations}]},
                          {'label':'Rel. Freq.',
                           'method': 'update',
                           'args':[{'visible': [False,True]},
                                   {'annotations': freq_annotations}]}
                      ]}])

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
nested_cv_lstm(
    dataset=unsmoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[30,50,150],
    k_layers=[1,2,3],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='LSTM Hyperparameter Optimization (Unsmoothed)',
    opt_caption='<b>Fig. 7.</b> LSTM hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='LSTM Loss and Accuracy (Unsmoothed)',
    lossacc_caption='<b>Fig. 8.</b> LSTM mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='LSTM Confusion Matrix (Unsmoothed)',
    cm_caption='<b>Fig. 9.</b> Mean confusion matrix for trained LSTM model across outer folds of 10-fold nested cross validation.'
)
nested_cv_lstm(
    dataset=smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[30,50,150],
    k_layers=[1,2,3],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='LSTM Hyperparameter Optimization (Smoothed)',
    opt_caption='<b>Fig. 10.</b> LSTM hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='LSTM Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 11.</b> LSTM mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='LSTM Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 12.</b> Mean confusion matrix for trained LSTM model across outer folds of 10-fold nested cross validation.'
)

Similar to the MLPs, the LSTM models saw a slightly higher classification accuracy for smoothed trajectories. LSTMs with coordinate inputs also outperformed basis spline coefficients by about 3%.

Unlike the MLPs, the LSTMs seem to continue to learn past epoch 50. Let’s take a look at more epochs, using just an outer CV with the best hyperparameters to save computational time.

def cv_lstm(dataset, outer_kfold, num_outer_epochs, batch_size, k_hidden, k_layers, k_seed, scoring, device, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CV
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)
    
        model = LSTM(3,k_hidden,k_layers,15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
    clear_output()
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
cv_lstm(
    dataset=smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=500,
    batch_size=16,
    k_hidden=150,
    k_layers=3,
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    lossacc_title='LSTM Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 13.</b> LSTM mean loss and accuracy for train and test sets across folds of 10-fold cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='LSTM Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 14.</b> Mean confusion matrix for trained LSTM model across folds of 10-fold cross validation.'
)

We can see that the LSTM actually continues to learn until about 300 epochs. It outperforms the MLP, though at the cost of significantly greater computation.

Temporal Convolutional Network (TCN)

class TCN(nn.Module):
    def __init__(self, k_input, k_hidden, k_window, k_output, dropout=0):
        nn.Module.__init__(self) #super().__init__()
        self.conv = nn.Conv1d(in_channels=k_input, out_channels=k_hidden, kernel_size=k_window, stride=1)
        self.pad = nn.ConstantPad1d((k_window-1, 0, 0, 0), 0) # left pad zeros
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(k_hidden*261, k_output)

    def forward(self, x):
        batch_size = x.shape[0]
        x = x.view(batch_size,3,261) # (batch_size, in_channels, data_dim)
        x = F.relu(self.conv(self.pad(x)))
        x = x.flatten(1)
        x = self.dropout(x)
        y = self.fc(x)
        return y
def grid_search_tcn(train_dl, val_dl, num_epochs, k_hidden, k_window, scoring, criterion, device, dropout=0):
    val_scores = np.zeros((len(k_hidden),len(k_window)))
    
    for i,hidden in enumerate(k_hidden):
        for j,window in enumerate(k_window):
            model = TCN(3,hidden,window,15,dropout)
            optimizer = optim.Adam(model.parameters())
            avg_val_score = 0

            for epoch in range(num_epochs-5):
                train(model, criterion, optimizer, train_dl, device)

            for epoch in range(5):
                train(model, criterion, optimizer, train_dl, device)
                val_loss, val_acc = evaluate(model, criterion, val_dl, device)
#                 if (scoring=='acc' or scoring=='accuracy'):
#                     avg_val_score += val_acc
#                 elif (scoring=='loss'):
#                     avg_val_score += val_loss
                avg_val_score += val_acc # no if statement for speed

            avg_val_score /= 5
            val_scores[i][j] = avg_val_score
            
    return val_scores
def nested_cv_tcn(dataset, outer_kfold, num_outer_epochs, inner_kfold, num_inner_epochs, batch_size, k_hidden, k_window, k_seed, scoring, device, opt_title, opt_caption, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0.5):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CVhide-input
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    total_val_score = np.zeros((len(k_hidden),len(k_window)))
    total_freq = np.zeros((len(k_hidden),len(k_window)), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    inner_kfold = KFold(n_splits=inner_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Outer fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)

        
        
        # Inner CV (train/val split)
        current_inner_fold = 1
        inner_val_score = np.zeros((len(k_hidden),len(k_window)))

        for train_index, val_index in inner_kfold.split(x_trainval, y_trainval):
            
            print(f'  Inner fold {current_inner_fold}/{inner_kfold.n_splits}')
            
            train_data = TensorDataset(x_trainval[train_index], y_trainval[train_index])
            val_data = TensorDataset(x_trainval[val_index], y_trainval[val_index])
            train_dl = DataLoader(train_data, batch_size=batch_size, shuffle=True)
            val_dl = DataLoader(val_data, batch_size=batch_size, shuffle=True)

            fold_val_score = grid_search_tcn(train_dl, val_dl, num_inner_epochs, k_hidden, k_window, scoring, criterion, device, dropout)
            inner_val_score = np.add(inner_val_score, fold_val_score)

            current_inner_fold += 1

        if (scoring=='acc' or scoring=='accuracy'):
            best_params = np.unravel_index(np.argmax(inner_val_score, axis=None), inner_val_score.shape)
        elif (scoring=='loss'):
            best_params = np.unravel_index(np.argmin(inner_val_score, axis=None), inner_val_score.shape)
        total_freq[best_params[0],best_params[1]] += 1
        
    
    
        model = TCN(3,k_hidden[best_params[0]],k_window[best_params[1]],15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
        total_val_score = np.add(total_val_score, inner_val_score)

    
    
    # PLOT HYPERPARAMETERS
    clear_output()
    avg_val_score = total_val_score / (outer_kfold.n_splits * inner_kfold.n_splits)
    avg_freq = total_freq / outer_kfold.n_splits
    plotly_data = []
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_val_score,axis=0), y=np.flip(np.array(k_hidden)), x=k_window, colorscale='blues',
                   zmin=np.amin(avg_val_score), zmax=np.amax(avg_val_score),
                   name='Validation<br>Accuracy',
                   customdata=np.flip(avg_freq,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_window: %{x}<br>'+scoring+': %{z:.5f}<br>rel freq: %{customdata:.3f}',
                   visible=True))
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_freq,axis=0), y=np.flip(np.array(k_hidden)), x=k_window, colorscale='blues',
                   zmin=np.amin(avg_freq), zmax=np.amax(avg_freq),
                   name='Relative<br>Frequency',
                   customdata=np.flip(avg_val_score,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_window %{x}<br>'+scoring+': %{customdata:.5f}<br>rel freq: %{z:.3f}',
                   visible=False))
    
    acc_annotations = []
    mid = (np.amin(avg_val_score)+np.amax(avg_val_score))/2
    for i, row in enumerate(avg_val_score.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            acc_annotations.append({
                'x': k_window[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,5)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    acc_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    freq_annotations = []
    mid = (np.amin(avg_freq)+np.amax(avg_freq))/2
    for i, row in enumerate(avg_freq.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            freq_annotations.append({
                'x': k_window[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    freq_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':100},
        xaxis={'title': 'k_window', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'k_hidden', 'fixedrange':True, 'type':'category'},
        title={'text':opt_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=acc_annotations,
        updatemenus=[{'type':'buttons',
                      'direction':'left',
                      'pad':{'l':0, 'r':0, 't':0, 'b':0},
                      'xanchor':'left',
                      'yanchor':'top',
                      'x':0,
                      'y':1.055,
                      'buttons':[
                          {'label':'Val. Acc.',
                           'method': 'update',
                           'args':[{'visible': [True,False]},
                                   {'annotations': acc_annotations}]},
                          {'label':'Rel. Freq.',
                           'method': 'update',
                           'args':[{'visible': [False,True]},
                                   {'annotations': freq_annotations}]}
                      ]}])

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
nested_cv_tcn(
    dataset=unsmoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[50,100,150],
    k_window=[5,10,20,30],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='TCN Hyperparameter Optimization (Unsmoothed)',
    opt_caption='<b>Fig. 15.</b> TCN hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='TCN Loss and Accuracy (Unsmoothed)',
    lossacc_caption='<b>Fig. 16.</b> TCN mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='TCN Confusion Matrix (Unsmoothed)',
    cm_caption='<b>Fig. 17.</b> Mean confusion matrix for trained TCN model across outer folds of 10-fold nested cross validation.'
)
nested_cv_tcn(
    dataset=smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=50,
    inner_kfold=10,
    num_inner_epochs=50,
    batch_size=16,
    k_hidden=[50,100,150],
    k_window=[5,10,20,30],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='TCN Hyperparameter Optimization (Smoothed)',
    opt_caption='<b>Fig. 18.</b> TCN hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='TCN Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 19.</b> TCN mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='TCN Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 20.</b> Mean confusion matrix for trained TCN model across outer folds of 10-fold nested cross validation.'
)

The TCN is the best performing model. Classification accuracy is actually higher for unsmoothed trajectories, though the difference remains insignificant. Overall, it seems that smoothing trajectories does not have a significant impact on classification accuracy.

Similar to the LSTM, the TCN also seems to continue learning past 50 epochs.

def cv_tcn(dataset, outer_kfold, num_outer_epochs, batch_size, k_hidden, k_window, k_seed, scoring, device, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CV
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)
    
        model = TCN(3,k_hidden,k_window,15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
    clear_output()
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
cv_tcn(
    dataset=smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=200,
    batch_size=16,
    k_hidden=100,
    k_window=30,
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    lossacc_title='TCN Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 21.</b> TCN mean loss and accuracy for train and test sets across folds of 10-fold cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='TCN Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 22.</b> Mean confusion matrix for trained TCN model across folds of 10-fold cross validation.'
)

The TCN stops learning after about 150 epochs, which is less than the LSTM but greater than the MLP. Its classification accuracy remains as the greatest among all models.

Convolutional Neural Network (CNN)

We can also remove all temporal information by converting trajectories into 3-dimensional images, in which cubic pixels of length 1 are assigned a value of one when the trajectory passes through it, and a value of zero otherwise. To reduce the input size, we’ll limit the volume to the minimum possible volume that encompass all trajectories. These boundaries are shown below.

display(traj_df)
print('    Min       Max')
for dim in ['x','y','z']:
    print(f'{dim}   {min(traj_df[dim]):.3f}   {max(traj_df[dim]):.3f}')

print('\n    Min   Max')
for dim in ['x','y','z']:
    print(f'{dim}   {int(min(traj_df[dim]))-1}   {int(max(traj_df[dim]))+1}')
    Min       Max
x   -24.312   24.291
y   -29.889   30.656
z   -24.226   26.534
# find conv
smoothed = True
num_coeff = 50
eps = 0.5
xmin, xmax, ymin, ymax, zmin, zmax = -25, 25, -30, 31, -25, 27
data_np = np.empty((0,int((xmax-xmin)/eps),int((ymax-ymin)/eps),int((zmax-zmin)/eps)))
labels_np = traj_df.drop_duplicates(subset=['pid','clip'], keep='first')['clip'].to_numpy()

if smoothed:
    smoothed = 'smoothed'
    for clip_name in clipdata_df['clip_name']:
        print(clip_name)
        temp_df = traj_df[(traj_df.clip_name==clip_name)]
        for pid in range(max(temp_df.pid)):
            data = temp_df[temp_df.pid==pid+1][['x','y','z']].to_numpy()
            tck, u = interpolate.splprep(data.T, k=3, u=np.linspace(0,1,temp_df['clip_len'].iloc[0]), t=np.concatenate((np.array([0,0,0]),np.linspace(0,1,num_coeff-2),np.array([1,1,1]))), task=-1)
            data = interpolate.splev(np.linspace(0,1,(temp_df['clip_len'].iloc[0]-1)*10+1), tck, der=0)
            pid_np = np.zeros((1,int((xmax-xmin)/eps),int((ymax-ymin)/eps),int((zmax-zmin)/eps)))

            for t in range(len(np.linspace(0,1,(temp_df['clip_len'].iloc[0]-1)*10+1))):
                pid_np[0][int((data[0][t]-xmin)//eps)][int((data[1][t]-ymin)//eps)][int((data[2][t]-zmin)//eps)] = 1

            data_np = np.vstack((data_np, pid_np))
else:
    smoothed = 'unsmoothed'
    for clip_name in clipdata_df['clip_name']:
        print(clip_name)
        temp_df = traj_df[(traj_df.clip_name==clip_name)]
        clip_len = temp_df['clip_len'].iloc[0]
        for pid in range(max(temp_df.pid)):
            data = temp_df[temp_df.pid==pid+1][['x','y','z']].to_numpy().T
            pid_np = np.zeros((1,int((xmax-xmin)/eps),int((ymax-ymin)/eps),int((zmax-zmin)/eps)))
                
            for t in range(len(np.linspace(0,1,temp_df['clip_len'].iloc[0]))):
                pid_np[0][int((data[0][t]-xmin)//eps)][int((data[1][t]-ymin)//eps)][int((data[2][t]-zmin)//eps)] = 1
            
            data_np = np.vstack((data_np, pid_np))
        
clear_output()

# save numpy arrays
with open('conv_'+str(eps)+'_'+smoothed+'.pkl', 'wb') as f:
    pkl.dump({'data':data_np, 'labels':labels_np}, f, protocol=4)
# with open('conv_1.0_unsmoothed.pkl', 'rb') as f:
#     data = pkl.load(f)
# conv_data = data['data']
# labels = data['labels']
# conv_10_unsmoothed_dataset = TensorDataset(torch.from_numpy(conv_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

# with open('conv_0.5_unsmoothed.pkl', 'rb') as f:
#     data = pkl.load(f)
# conv_data = data['data']
# labels = data['labels']
# conv_05_unsmoothed_dataset = TensorDataset(torch.from_numpy(conv_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

with open('conv_1.0_smoothed.pkl', 'rb') as f:
    data = pkl.load(f)
conv_data = data['data']
labels = data['labels']
conv_10_smoothed_dataset = TensorDataset(torch.from_numpy(conv_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

with open('conv_0.5_smoothed.pkl', 'rb') as f:
    data = pkl.load(f)
conv_data = data['data']
labels = data['labels']
conv_05_smoothed_dataset = TensorDataset(torch.from_numpy(conv_data.astype(np.float32)), torch.from_numpy(labels.astype(np.int)))

Let’s look at a trajectory when converted to a convolutional input.

# calculate
num_coeff = 50
eps = 1.0
xmin, xmax, ymin, ymax, zmin, zmax = -25, 25, -30, 31, -25, 27

temp_df = traj_df[traj_df.clip_name=='oceans'][traj_df.pid==57]
data = temp_df[['x','y','z']].to_numpy()
tck, u = interpolate.splprep(data.T, k=3, u=np.linspace(0,1,temp_df['clip_len'].iloc[0]), t=np.concatenate((np.array([0,0,0]),np.linspace(0,1,num_coeff-2),np.array([1,1,1]))), task=-1)
data = interpolate.splev(np.linspace(0,1,(temp_df['clip_len'].iloc[0]-1)*10+1), tck, der=0)
data_np = np.zeros((int((xmax-xmin)/eps),int((ymax-ymin)/eps),int((zmax-zmin)/eps)))
for t in range(len(np.linspace(0,1,(temp_df['clip_len'].iloc[0]-1)*10+1))):
    data_np[int((data[0][t]-xmin)//eps)][int((data[1][t]-ymin)//eps)][int((data[2][t]-zmin)//eps)] = 1

    
    
# plot
plotly_data = []
    
for x in np.arange(xmin,xmax,eps):
    for y in np.arange(ymin,ymax,eps):
        for z in np.arange(zmin,zmax,eps):
            if (data_np[int((x-xmin)//eps)][int((y-ymin)//eps)][int((z-zmin)//eps)] == 1):
                conv = go.Mesh3d(
                    # 8 vertices of a cube
                    x=[x, x, x+eps, x+eps, x, x, x+eps, x+eps],
                    y=[y, y+eps, y+eps, y, y, y+eps, y+eps, y],
                    z=[z, z, z, z, z+eps, z+eps, z+eps, z+eps],
                    colorscale=['mediumblue','mediumblue'],
                    intensity = [0],
                    i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
                    j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
                    k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],
                    name='image',
                    opacity=0.2,
                    showscale=False,
                    hoverlabel={'bgcolor':'mediumblue'}
                )
                plotly_data.append(conv)

traj = go.Scatter3d(
    x=data[0],
    y=data[1],
    z=data[2],
    name='trajectory',
    mode='markers+lines',
    line={'width':5, 'color':'mediumblue'},
    marker={'size':1, 'color':'mediumblue'},
    opacity=1,
    customdata=pd.DataFrame(data={'a':np.linspace(0,temp_df['clip_len'].iloc[0]-1,len(data[0])), 'b':np.ones(len(data[0]))*(temp_df['clip_len'].iloc[0]-1)}),
    hovertemplate='x: %{x:.3f}<br>y: %{y:.3f}<br>z: %{z:.3f}<br>t: %{customdata[0]:.1f}/%{customdata[1]}'
)
plotly_data.append(traj)

# formatting
plotly_layout = go.Layout(
    showlegend=False, 
    autosize=False,
    width=800, 
    height=600,
    margin={'l':0, 'r':0, 't':35, 'b':60},
    title={'text':'Trajectory Input for CNN',
           'xanchor':'center',
           'yanchor':'top',
           'x':0.5,
           'y':0.98},
    annotations=[{'xref':'paper',
                  'yref':'paper',
                  'xanchor':'center',
                  'yanchor':'bottom',
                  'x':0.5,
                  'y':-0.105,
                  'showarrow':False,
                  'text':'<b>Fig. 23.</b> Individual trajectory for "oceans" participant 57 as an image. 3-space is split into a grid of cubes, each of length '+str(eps)+'.<br> Cubes that the trajectory passes through are given a value of 1 and colored in. All other cubes have value 0 and are empty.'}],
    updatemenus=[{'type':'buttons',
                  'active':2,
                  'direction':'left',
                  'pad':{'l':0, 'r':0, 't':0, 'b':0},
                  'xanchor':'left',
                  'yanchor':'top',
                  'x':0,
                  'y':1.08,
                  'buttons':[
                      {'label':'Image',
                       'method': 'update',
                       'args':[{'visible': ([True]*(len(plotly_data)-1))+[False]}]},
                      {'label':'Trajectory',
                       'method': 'update',
                       'args':[{'visible': ([False]*(len(plotly_data)-1))+[True]}]},
                      {'label':'Both',
                       'method': 'update',
                       'args':[{'visible': [True]*len(plotly_data)}]}
                  ]}])

plotly_config = {'displaylogo':False,
                 'modeBarButtonsToRemove': ['resetCameraLastSave3d','orbitRotation','hoverClosest3d']}

fig = go.Figure(data=plotly_data, layout=plotly_layout)
fig.show(config=plotly_config)
class CNN(nn.Module):
    def __init__(self, kx, ky, kz, k_input, k_hidden, k_kernel, k_output, dropout=0):
        nn.Module.__init__(self) #super().__init__()
        self.k_output = k_output
        self.conv = nn.Conv3d(in_channels=k_input, out_channels=k_hidden, kernel_size=k_kernel, stride=1)
        self.pad = nn.ConstantPad3d((k_kernel-1,0,k_kernel-1,0,k_kernel-1,0),0)
        #self.pool = nn.MaxPool3d(kernel_size=5, stride=5)
        self.fc = nn.Linear(k_hidden*kx*ky*kz, k_output)
        #self.fc = nn.Linear(k_hidden*int(kx/5)*int(ky/5)*int(kz/5), k_output)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = x.view(x.shape[0],1,x.shape[1],x.shape[2],x.shape[3])
        x = F.relu(self.conv(self.pad(x)))
        #x = self.pool(x)
        x = x.flatten(1)
        x = self.dropout(x)
        y = self.fc(x)
        return y
def grid_search_cnn(kx, ky, kz, train_dl, val_dl, num_epochs, k_hidden, k_kernel, scoring, criterion, device, dropout=0):
    val_scores = np.zeros((len(k_hidden),len(k_kernel)))
    
    for i,hidden in enumerate(k_hidden):
        for j,kernel in enumerate(k_kernel):
            model = CNN(kx, ky, kz, 1,hidden,kernel,15,dropout)
            optimizer = optim.Adam(model.parameters())
            avg_val_score = 0

            for epoch in range(num_epochs-5):
                train(model, criterion, optimizer, train_dl, device)

            for epoch in range(5):
                train(model, criterion, optimizer, train_dl, device)
                val_loss, val_acc = evaluate(model, criterion, val_dl, device)
#                 if (scoring=='acc' or scoring=='accuracy'):
#                     avg_val_score += val_acc
#                 elif (scoring=='loss'):
#                     avg_val_score += val_loss
                avg_val_score += val_acc # omit if statement for speed

            avg_val_score /= 5
            val_scores[i][j] = avg_val_score
            
    return val_scores
def nested_cv_cnn(dataset, outer_kfold, num_outer_epochs, inner_kfold, num_inner_epochs, batch_size, k_hidden, k_kernel, k_seed, scoring, device, opt_title, opt_caption, lossacc_title, lossacc_caption, cm_title, cm_caption, dropout=0.5):
    
    # set seed for reproducibility
    torch.manual_seed(k_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # NESTED CV
    loss_acc_df = pd.DataFrame(columns=['fold','epoch','train_loss','train_acc','test_loss','test_acc'])
    x, y = dataset[:]
    kx,ky,kz = list(x.shape[1:4])
    cm = np.zeros((torch.max(y).item()+1,torch.max(y).item()+1), dtype=np.int)
    total_val_score = np.zeros((len(k_hidden),len(k_kernel)))
    total_freq = np.zeros((len(k_hidden),len(k_kernel)), dtype=np.int)
    outer_kfold = KFold(n_splits=outer_kfold, shuffle=True, random_state=0)
    inner_kfold = KFold(n_splits=inner_kfold, shuffle=True, random_state=0)
    criterion = nn.CrossEntropyLoss()
    
    # Outer CV (trainval/test split)
    current_outer_fold = 1
    for trainval_index, test_index in outer_kfold.split(x, y):
        
        print(f'Outer fold {current_outer_fold}/{outer_kfold.n_splits}')
        
        x_trainval = x[trainval_index]
        y_trainval = y[trainval_index]
        x_test = x[test_index]
        y_test = y[test_index]
        trainval_data = TensorDataset(x_trainval, y_trainval)
        test_data = TensorDataset(x_test, y_test)

        
        
        # Inner CV (train/val split)
        current_inner_fold = 1
        inner_val_score = np.zeros((len(k_hidden),len(k_kernel)))

        for train_index, val_index in inner_kfold.split(x_trainval, y_trainval):
            
            print(f'  Inner fold {current_inner_fold}/{inner_kfold.n_splits}')
            
            train_data = TensorDataset(x_trainval[train_index], y_trainval[train_index])
            val_data = TensorDataset(x_trainval[val_index], y_trainval[val_index])
            train_dl = DataLoader(train_data, batch_size=batch_size, shuffle=True)
            val_dl = DataLoader(val_data, batch_size=batch_size, shuffle=True)

            fold_val_score = grid_search_cnn(kx, ky, kz, train_dl, val_dl, num_inner_epochs, k_hidden, k_kernel, scoring, criterion, device, dropout)
            inner_val_score = np.add(inner_val_score, fold_val_score)

            current_inner_fold += 1

        if (scoring=='acc' or scoring=='accuracy'):
            best_params = np.unravel_index(np.argmax(inner_val_score, axis=None), inner_val_score.shape)
        elif (scoring=='loss'):
            best_params = np.unravel_index(np.argmin(inner_val_score, axis=None), inner_val_score.shape)
        total_freq[best_params[0],best_params[1]] += 1
        
    
    
        model = CNN(kx,ky,kz,1,k_hidden[best_params[0]],k_kernel[best_params[1]],15,dropout)
        optimizer = optim.Adam(model.parameters())
        trainval_dl = DataLoader(trainval_data, batch_size=batch_size, shuffle=True)
        test_dl = DataLoader(test_data, batch_size=batch_size, shuffle=True)
        
        for epoch in range(1,num_outer_epochs+1):
            trainval_loss, trainval_acc = train(model, criterion, optimizer, trainval_dl, device)
            if (epoch==num_outer_epochs):
                test_loss, test_acc, fold_cm = evaluate_cm(model, criterion, test_dl, device)
                cm += fold_cm
            else:
                test_loss, test_acc = evaluate(model, criterion, test_dl, device)
            loss_acc_df = loss_acc_df.append({
                'fold':current_outer_fold,
                'epoch':epoch,
                'train_loss':trainval_loss.item(),
                'train_acc':trainval_acc.item(),
                'test_loss':test_loss.item(),
                'test_acc':test_acc.item()},
                ignore_index=True)
                
        current_outer_fold += 1
        total_val_score = np.add(total_val_score, inner_val_score)

    
    
    # PLOT HYPERPARAMETERS
    clear_output()
    avg_val_score = total_val_score / (outer_kfold.n_splits * inner_kfold.n_splits)
    avg_freq = total_freq / outer_kfold.n_splits
    plotly_data = []
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_val_score,axis=0), y=np.flip(np.array(k_hidden)), x=k_kernel, colorscale='blues',
                   zmin=np.amin(avg_val_score), zmax=np.amax(avg_val_score),
                   name='Validation<br>Accuracy',
                   customdata=np.flip(avg_freq,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_kernel: %{x}<br>'+scoring+': %{z:.5f}<br>rel freq: %{customdata:.3f}',
                   visible=True))
    plotly_data.append(
        go.Heatmap(z=np.flip(avg_freq,axis=0), y=np.flip(np.array(k_hidden)), x=k_kernel, colorscale='blues',
                   zmin=np.amin(avg_freq), zmax=np.amax(avg_freq),
                   name='Relative<br>Frequency',
                   customdata=np.flip(avg_val_score,axis=0),
                   hovertemplate='k_hidden: %{y}<br>k_kernel %{x}<br>'+scoring+': %{customdata:.5f}<br>rel freq: %{z:.3f}',
                   visible=False))
    
    acc_annotations = []
    mid = (np.amin(avg_val_score)+np.amax(avg_val_score))/2
    for i, row in enumerate(avg_val_score.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            acc_annotations.append({
                'x': k_kernel[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,5)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    acc_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    freq_annotations = []
    mid = (np.amin(avg_freq)+np.amax(avg_freq))/2
    for i, row in enumerate(avg_freq.T):
        for j, value in enumerate(row):
            if (value > mid):
                color = 'white'
            else:
                color = 'black'
            freq_annotations.append({
                'x': k_kernel[i],
                'y': k_hidden[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    freq_annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.14,
        'showarrow':False,
        'text':opt_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':100},
        xaxis={'title': 'k_kernel', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'k_hidden', 'fixedrange':True, 'type':'category'},
        title={'text':opt_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=acc_annotations,
        updatemenus=[{'type':'buttons',
                      'direction':'left',
                      'pad':{'l':0, 'r':0, 't':0, 'b':0},
                      'xanchor':'left',
                      'yanchor':'top',
                      'x':0,
                      'y':1.055,
                      'buttons':[
                          {'label':'Val. Acc.',
                           'method': 'update',
                           'args':[{'visible': [True,False]},
                                   {'annotations': acc_annotations}]},
                          {'label':'Rel. Freq.',
                           'method': 'update',
                           'args':[{'visible': [False,True]},
                                   {'annotations': freq_annotations}]}
                      ]}])

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
    
    
    
    # PLOT LOSS / ACCURACY
    loss_acc_df['mean_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('mean')
    loss_acc_df['std_train_loss'] = loss_acc_df.groupby('epoch')['train_loss'].transform('std')
    loss_acc_df['mean_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('mean')
    loss_acc_df['std_train_acc'] = loss_acc_df.groupby('epoch')['train_acc'].transform('std')
    loss_acc_df['mean_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('mean')
    loss_acc_df['std_test_loss'] = loss_acc_df.groupby('epoch')['test_loss'].transform('std')
    loss_acc_df['mean_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('mean')
    loss_acc_df['std_test_acc'] = loss_acc_df.groupby('epoch')['test_acc'].transform('std')
    loss_acc_df = loss_acc_df[loss_acc_df.fold==1]
    
    fig = make_subplots(rows=2, cols=1, 
                        shared_xaxes=True,
                        vertical_spacing=0.05,
                        subplot_titles=('Loss','Accuracy'), 
                        specs=[[{'type':'scatter'}], [{'type':'scatter'}]])
    
    for dataset in ['train','test']:
        if (dataset=='train'):
            color = 'mediumblue'
        elif (dataset=='test'):
            color = 'crimson'
    
        # loss (no std)
        loss = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_loss'],
            customdata=loss_acc_df['std_'+dataset+'_loss'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=True,
            visible=True,
            hovertemplate='epoch: %{x}<br>loss: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(loss, row=1, col=1)

        # loss (std)
        upper = loss_acc_df['mean_'+dataset+'_loss'] + loss_acc_df['std_'+dataset+'_loss']
        lower = loss_acc_df['mean_'+dataset+'_loss'] - loss_acc_df['std_'+dataset+'_loss']
        loss = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(loss, row=1, col=1)

        # acc (no std)
        acc = go.Scatter(
            x=loss_acc_df['epoch'],
            y=loss_acc_df['mean_'+dataset+'_acc'],
            customdata=loss_acc_df['std_'+dataset+'_acc'],
            mode='markers+lines',
            line={'width':2, 'color':color},
            marker={'size':4, 'color':color},
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hovertemplate='epoch: %{x}<br>acc: %{y:.5f}<br>sd: %{customdata:.5f}'
        )
        fig.add_trace(acc, row=2, col=1)

        # acc (std)
        upper = loss_acc_df['mean_'+dataset+'_acc'] + loss_acc_df['std_'+dataset+'_acc']
        lower = loss_acc_df['mean_'+dataset+'_acc'] - loss_acc_df['std_'+dataset+'_acc']
        acc = go.Scatter(
            x=np.concatenate([loss_acc_df.index, loss_acc_df.index[::-1]])-loss_acc_df.index[0]+1,
            y=pd.concat([upper, lower[::-1]]),
            fill='toself',
            mode='lines',
            line={'width':0, 'color':color},
            opacity=0.7,
            name=dataset,
            legendgroup=dataset,
            showlegend=False,
            visible=True,
            hoverinfo='skip'
        )
        fig.add_trace(acc, row=2, col=1)

    # formatting
    fig.update_layout(
        autosize=False,
        width=800, 
        height=800, 
        margin={'l':0, 'r':0, 't':70, 'b':100},
        legend={'orientation':'h',
                'itemsizing':'constant',
                'xanchor':'center',
                'yanchor':'bottom',
                'y':-0.07,
                'x':0.5},
        title={'text':lossacc_title,
                'xanchor':'center',
                'yanchor':'top',
                'x':0.5,
                'y':0.98},
        hovermode='x')
    fig['layout']['annotations'] += (
        {'xref':'paper',
         'yref':'paper',
         'xanchor':'center',
         'yanchor':'bottom',
         'x':0.5,
         'y':-0.14,
         'showarrow':False,
         'text':lossacc_caption
        },
    )

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d']}

    fig.show(config=plotly_config)
    
    
    
    
    
    # PLOT CONFUSION MATRIX
    cm = cm.astype('float32')
    for i in range(len(cm)):
        cm[i] = cm[i]/np.sum(cm[i]) # rows/columns total to 1
    
    labels=clipdata_df['clip_name'].to_numpy()
    plotly_data = go.Heatmap(z=np.rot90(cm.T), y=np.flip(labels), x=labels, colorscale="blues",
                             zmin=0, zmax=1,
                             hovertemplate='True: %{y}<br>Predicted: %{x}<br>p: %{z:.5f}')
    
    annotations = []
    for i, row in enumerate(cm.T):
        for j, value in enumerate(row):
            if (value > 0.5):
                color = 'white'
            else:
                color = 'black'
            annotations.append({
                'x': labels[i],
                'y': labels[j],
                'font': {'color': color},
                'text': str(round(value,3)),
                'xref': 'x1',
                'yref': 'y1',
                'showarrow': False
            })
    annotations.append({
        'xref':'paper',
        'yref':'paper',
        'xanchor':'center',
        'yanchor':'bottom',
        'x':0.5,
        'y':-0.18,
        'showarrow':False,
        'text':cm_caption
    })
    
    plotly_layout = go.Layout(
        autosize=False,
        width=800, 
        height=800,
        margin={'l':0, 'r':0, 't':40, 'b':120},
        xaxis={'title': 'Predicted', 'fixedrange':True, 'type':'category'},
        yaxis={'title': 'True', 'fixedrange':True, 'type':'category'},
        title={'text':cm_title,
               'xanchor':'center',
               'yanchor':'top',
               'x':0.5,
               'y':0.98},
        annotations=annotations)

    plotly_config = {'displaylogo':False,
                     'modeBarButtonsToRemove': ['autoScale2d','toggleSpikelines','hoverClosestCartesian','hoverCompareCartesian','lasso2d','select2d','zoom2d','pan2d','zoomIn2d','zoomOut2d','resetScale2d']}
    
    fig = go.Figure(data=plotly_data, layout=plotly_layout)
    fig.show(config=plotly_config)
nested_cv_cnn(
    dataset=conv_10_smoothed_dataset,
    outer_kfold=10,
    num_outer_epochs=10,
    inner_kfold=10,
    num_inner_epochs=10,
    batch_size=16,
    k_hidden=[1,3,5,10],
    k_kernel=[3,5,10],
    k_seed=330,
    scoring='acc',
    dropout=0.5,
    device=torch.device('cuda:0'), #torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
    opt_title='CNN Hyperparameter Optimization (Smoothed)',
    opt_caption='<b>Fig. 22.</b> CNN hyperparameter optimization using 10-fold nested cross validation. (A) Mean validation accuracy.<br>(B) Relative frequency of being chosen as the best model from the inner CV.',
    lossacc_title='CNN Loss and Accuracy (Smoothed)',
    lossacc_caption='<b>Fig. 23.</b> CNN mean loss and accuracy for train and test sets across outer folds of 10-fold nested cross validation.<br>Error bars show the standard deviation of the mean loss and accuracy at each epoch.',
    cm_title='CNN Confusion Matrix (Smoothed)',
    cm_caption='<b>Fig. 24.</b> Mean confusion matrix for trained CNN model across outer folds of 10-fold nested cross validation.'
)

It appears that the CNN learns very quickly (in 10 epochs), but also quickly reaches a maximum classification accuracy. Without temporal information, it has a lower classification accuracy than all of the other models.

Using cubic pixels with lengths smaller than 1 was considered for classification, since more detail should lead to a more accurate representation of the trajectory. However, reducing length to 0.5 yields 8 times more total pixels, greatly increasing training time to the point where it is no longer a realistic model to use. Therefore, length 1 was the minimum length that yielded high classification accuracy while maintaining a fairly low training time.