Window - 2020 January 10

[1]:
import datetime

import numpy as np
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
import seaborn as sns

from tst import Transformer
from tst.loss import OZELoss

from src.dataset import OzeDataset
from src.utils import visual_sample, compute_loss
[2]:
# Training parameters
DATASET_PATH = 'datasets/dataset_CAPTrocadero_v5.npz'
BATCH_SIZE = 4
NUM_WORKERS = 4
LR = 2e-4
EPOCHS = 30

# Model parameters
d_model = 48 # Lattent dim
q = 8 # Query size
v = 8 # Value size
h = 4 # Number of heads
N = 4 # Number of encoder and decoder to stack
attention_size = 24 # Attention window size
dropout = 0.2 # Dropout rate
pe = None # Positional encoding
chunk_mode = "window"

d_input = 39 # From dataset
d_output = 8 # From dataset

# Config
sns.set()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device {device}")
Using device cuda:0

Training

Load dataset

[3]:
ozeDataset = OzeDataset(DATASET_PATH)

dataset_train, dataset_val, dataset_test = random_split(ozeDataset, (12000, 500, 500))

dataloader_train = DataLoader(dataset_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=NUM_WORKERS,
                              pin_memory=False
                             )

dataloader_val = DataLoader(dataset_val,
                            batch_size=BATCH_SIZE,
                            shuffle=True,
                            num_workers=NUM_WORKERS
                           )

dataloader_test = DataLoader(dataset_test,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=NUM_WORKERS
                            )

Load network

[4]:
# Load transformer with Adam optimizer and MSE loss function
net = Transformer(d_input, d_model, d_output, q, v, h, N, attention_size=attention_size, dropout=dropout, chunk_mode=chunk_mode, pe=pe).to(device)
optimizer = optim.Adam(net.parameters(), lr=LR)
loss_function = OZELoss(alpha=0.3)

Train

[5]:
model_save_path = f'models/model_{datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S")}.pth'
val_loss_best = np.inf

# Prepare loss history
hist_loss = np.zeros(EPOCHS)
hist_loss_val = np.zeros(EPOCHS)
for idx_epoch in range(EPOCHS):
    running_loss = 0
    with tqdm(total=len(dataloader_train.dataset), desc=f"[Epoch {idx_epoch+1:3d}/{EPOCHS}]") as pbar:
        for idx_batch, (x, y) in enumerate(dataloader_train):
            optimizer.zero_grad()

            # Propagate input
            netout = net(x.to(device))

            # Comupte loss
            loss = loss_function(y.to(device), netout)

            # Backpropage loss
            loss.backward()

            # Update weights
            optimizer.step()

            running_loss += loss.item()
            pbar.set_postfix({'loss': running_loss/(idx_batch+1)})
            pbar.update(x.shape[0])

        train_loss = running_loss/len(dataloader_train)
        val_loss = compute_loss(net, dataloader_val, loss_function, device).item()
        pbar.set_postfix({'loss': train_loss, 'val_loss': val_loss})

        hist_loss[idx_epoch] = train_loss
        hist_loss_val[idx_epoch] = val_loss

        if val_loss < val_loss_best:
            val_loss_best = val_loss
            torch.save(net.state_dict(), model_save_path)

plt.plot(hist_loss, 'o-', label='train')
plt.plot(hist_loss_val, 'o-', label='val')
plt.legend()
print(f"model exported to {model_save_path} with loss {val_loss_best:5f}")
[Epoch   1/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.48it/s, loss=0.00826, val_loss=0.00478]
[Epoch   2/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.00403, val_loss=0.0032]
[Epoch   3/30]: 100%|██████████| 12000/12000 [06:48<00:00, 29.36it/s, loss=0.00273, val_loss=0.00225]
[Epoch   4/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.00217, val_loss=0.00182]
[Epoch   5/30]: 100%|██████████| 12000/12000 [06:49<00:00, 29.30it/s, loss=0.0018, val_loss=0.00155]
[Epoch   6/30]: 100%|██████████| 12000/12000 [06:47<00:00, 29.44it/s, loss=0.00152, val_loss=0.00134]
[Epoch   7/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.00132, val_loss=0.00114]
[Epoch   8/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.00118, val_loss=0.00106]
[Epoch   9/30]: 100%|██████████| 12000/12000 [06:48<00:00, 29.40it/s, loss=0.00103, val_loss=0.000951]
[Epoch  10/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.57it/s, loss=0.000919, val_loss=0.00132]
[Epoch  11/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.000829, val_loss=0.000809]
[Epoch  12/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.50it/s, loss=0.000756, val_loss=0.000734]
[Epoch  13/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.57it/s, loss=0.000701, val_loss=0.000649]
[Epoch  14/30]: 100%|██████████| 12000/12000 [06:48<00:00, 29.40it/s, loss=0.000651, val_loss=0.000719]
[Epoch  15/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.52it/s, loss=0.000608, val_loss=0.000567]
[Epoch  16/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.53it/s, loss=0.000569, val_loss=0.000607]
[Epoch  17/30]: 100%|██████████| 12000/12000 [06:47<00:00, 29.48it/s, loss=0.000538, val_loss=0.000533]
[Epoch  18/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.55it/s, loss=0.000519, val_loss=0.000519]
[Epoch  19/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.59it/s, loss=0.000497, val_loss=0.000472]
[Epoch  20/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.55it/s, loss=0.000468, val_loss=0.000667]
[Epoch  21/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.56it/s, loss=0.000458, val_loss=0.000544]
[Epoch  22/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.58it/s, loss=0.000427, val_loss=0.00039]
[Epoch  23/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.56it/s, loss=0.00042, val_loss=0.000406]
[Epoch  24/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.48it/s, loss=0.000401, val_loss=0.000395]
[Epoch  25/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.54it/s, loss=0.000392, val_loss=0.000384]
[Epoch  26/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.61it/s, loss=0.000377, val_loss=0.000438]
[Epoch  27/30]: 100%|██████████| 12000/12000 [06:44<00:00, 29.64it/s, loss=0.00036, val_loss=0.000381]
[Epoch  28/30]: 100%|██████████| 12000/12000 [06:45<00:00, 29.62it/s, loss=0.000358, val_loss=0.000331]
[Epoch  29/30]: 100%|██████████| 12000/12000 [06:46<00:00, 29.55it/s, loss=0.000352, val_loss=0.000318]
[Epoch  30/30]: 100%|██████████| 12000/12000 [06:47<00:00, 29.45it/s, loss=0.000335, val_loss=0.000324]
model exported to models/model_2020_01_10__082029.pth with loss 0.000318
../../_images/notebooks_trainings_training_2020_01_10__114522_9_2.png

Validation

[6]:
_ = net.eval()

Plot results on a sample

[7]:
visual_sample(dataloader_test, net, device)
plt.savefig("fig")
../../_images/notebooks_trainings_training_2020_01_10__114522_13_0.png

Plot encoding attention map

[8]:
# Select first encoding layer
encoder = net.layers_encoding[0]

# Get the first attention map
attn_map = encoder.attention_map[0].cpu()

# Plot
plt.figure(figsize=(20, 20))
sns.heatmap(attn_map)
plt.savefig("attention_map")
../../_images/notebooks_trainings_training_2020_01_10__114522_15_0.png

Evaluate on the test dataset

[9]:
predictions = np.empty(shape=(len(dataloader_test.dataset), 672, 8))

idx_prediction = 0
with torch.no_grad():
    for x, y in tqdm(dataloader_test, total=len(dataloader_test)):
        netout = net(x.to(device)).cpu().numpy()
        predictions[idx_prediction:idx_prediction+x.shape[0]] = netout
        idx_prediction += x.shape[0]
100%|██████████| 125/125 [00:06<00:00, 19.13it/s]
[10]:
fig, axes = plt.subplots(8, 1)
fig.set_figwidth(20)
fig.set_figheight(40)
plt.subplots_adjust(bottom=0.05)

occupancy = (dataloader_test.dataset.dataset._x.numpy()[..., dataloader_test.dataset.dataset.labels["Z"].index("occupancy")].mean(axis=0)>0.5).astype(float)
y_true_full = dataloader_test.dataset.dataset._y[dataloader_test.dataset.indices].numpy()

for idx_label, (label, ax) in enumerate(zip(dataloader_test.dataset.dataset.labels['X'], axes)):
    # Select output to plot
    y_true = y_true_full[..., idx_label]
    y_pred = predictions[..., idx_label]

    # Rescale
    y_true = dataloader_test.dataset.dataset.rescale(y_true, idx_label)
    y_pred = dataloader_test.dataset.dataset.rescale(y_pred, idx_label)

    # Compute delta, mean and std
    delta = np.abs(y_true - y_pred)

    mean = delta.mean(axis=0)
    std = delta.std(axis=0)

    # Plot
    # Labels for consumption and temperature
    if label.startswith('Q_'):
        y_label_unit = 'kW'
    else:
        y_label_unit = '°C'

    # Occupancy
    occupancy_idxes = np.where(np.diff(occupancy) != 0)[0]
    for idx in range(0, len(occupancy_idxes), 2):
        ax.axvspan(occupancy_idxes[idx], occupancy_idxes[idx+1], facecolor='green', alpha=.15)

    # Std
    ax.fill_between(np.arange(mean.shape[0]), (mean - std), (mean + std), alpha=.4, label='std')

    # Mean
    ax.plot(mean, label='mean')

    # Title and labels
    ax.set_title(label)
    ax.set_xlabel('time', fontsize=16)
    ax.set_ylabel(y_label_unit, fontsize=16)
    ax.legend()

plt.savefig('error_mean_std')
../../_images/notebooks_trainings_training_2020_01_10__114522_18_0.png