Skip to content

Commit

Permalink
Clean up scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
clberube committed Aug 5, 2022
1 parent 3990b47 commit 29faf79
Show file tree
Hide file tree
Showing 13 changed files with 19 additions and 41 deletions.
2 changes: 1 addition & 1 deletion 0-create_data_LHS.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# @Author: charles
# @Date: 2021-01-24 20:01:11
# @Last modified by: charles
# @Last modified time: 2021-01-24 20:01:57
# @Last modified time: 2022-08-05 15:08:83


import torch
Expand Down
2 changes: 1 addition & 1 deletion 1-plot_fits_learning_curves.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:27


import os
Expand Down
2 changes: 1 addition & 1 deletion 2-dimension_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:49


import os
Expand Down
2 changes: 1 addition & 1 deletion 3-sensitivity_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:85


import numpy as np
Expand Down
2 changes: 1 addition & 1 deletion 4a-parameter_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:85


import os
Expand Down
2 changes: 1 addition & 1 deletion 4b-cond_parameter_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:68


import string
Expand Down
2 changes: 1 addition & 1 deletion 5a-generative_para.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:30


import os
Expand Down
2 changes: 1 addition & 1 deletion 5b-cond_generative_para.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:74
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:99
# @Last modified time: 2022-08-05 15:08:85


import numpy as np
Expand Down
2 changes: 1 addition & 1 deletion autoencoders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-09 15:09:95
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-14 13:09:49
# @Last modified time: 2022-08-05 15:08:12


from .lstm_ae import LSTM_AE
Expand Down
11 changes: 5 additions & 6 deletions autoencoders/reg_VAE.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
# @Author: charles
# @Date: 2021-05-15 15:05:15
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-05-15 15:05:38
# @Last modified time: 2022-08-05 15:08:10


import math

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.prune as prune

from utilities import softclip

Expand Down Expand Up @@ -39,11 +38,11 @@ def __init__(self, input_dim, num_hidden=3, hidden_dim=128, latent_dim=2,
self.log_sigma = torch.zeros([])

def reconstruction_loss(self, x_hat, x):
""" Computes the NLL of the data given the latent variable.
""" Computes the NLL of x given z.
"""
self.log_sigma = ((x - x_hat) ** 2).mean([0, 1], keepdim=True).sqrt().log()
# Learning the variance can become unstable in some cases. Softly limiting log_sigma to a minimum of -6
# ensures stable training.
# Softly limiting log_sigma to a minimum of -6
# ensures stable training. (not actually necessary here)
log_sigma = softclip(self.log_sigma, -6)
rec = self.gaussian_nll(x_hat, log_sigma, x).sum()
return rec
Expand Down
2 changes: 1 addition & 1 deletion plotlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-24 09:09:18
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-24 09:09:02
# @Last modified time: 2022-08-05 15:08:87


import numpy as np
Expand Down
2 changes: 1 addition & 1 deletion ppip_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 13:09:44
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 13:09:13
# @Last modified time: 2022-08-05 15:08:16


import numpy as np
Expand Down
27 changes: 3 additions & 24 deletions utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Date: 2021-09-08 14:09:79
# @Email: charles.berube@polymtl.ca
# @Last modified by: charles
# @Last modified time: 2021-09-08 14:09:28
# @Last modified time: 2022-08-05 15:08:61


import os
Expand All @@ -21,11 +21,8 @@
"ignore", message="Initializing zero-element tensors is a no-op")


def softclip(tensor, min):
""" Clips the tensor values at the minimum value min in a softway.
Taken from Handful of Trials """
result_tensor = min + F.softplus(tensor - min)
return result_tensor
def softclip(x, min):
return min + F.softplus(x - min)


def min_max_scale(x):
Expand Down Expand Up @@ -169,9 +166,6 @@ def train(model, train_loader, verbose, lr, n_epoch, device=None, beta=None,
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# if beta is None:
# beta = torch.ones(n_epoch)

train_losses = ['log_sigma', 'NLL', 'KLD', 'AUX', 'train']
valid_losses = ['valid']
grads = ['input_grad_total']
Expand All @@ -182,10 +176,6 @@ def train(model, train_loader, verbose, lr, n_epoch, device=None, beta=None,

optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# reg_loss = torch.nn.MSELoss(reduction='sum')
# reg_loss = torch.nn.L1Loss(reduction='mean')
# mlp_loss = torch.nn.BCEWithLogitsLoss()

start_time = timer()
model.to(device)
for e in range(n_epoch):
Expand All @@ -205,10 +195,7 @@ def train(model, train_loader, verbose, lr, n_epoch, device=None, beta=None,
Xp, mu, logvar, p = model(X, c)
if y.shape[-1] == 0:
AUX = torch.tensor(0)
# elif y.shape[-1] == 1:
# AUX = mlp_loss(p, y)
elif y.shape[-1] > 1:
# AUX = reg_loss(p, y)
AUX = model.reconstruction_loss(p, y)

NLL, KLD = model.vae_loss(Xp, X, mu, logvar)
Expand All @@ -221,8 +208,6 @@ def train(model, train_loader, verbose, lr, n_epoch, device=None, beta=None,
running_loss['AUX'] += AUX.item()*X.size(0)
running_loss['train'] += total_loss.item()

# if (e + 1 == n_epoch) and (c.shape[-1] > 0):
# if (e == 0) and (c.shape[-1] > 0):
if c.shape[-1] > 0:
y_grad_total = torch.autograd.grad(
NLL, c, retain_graph=True)[0]
Expand Down Expand Up @@ -256,16 +241,10 @@ def train(model, train_loader, verbose, lr, n_epoch, device=None, beta=None,
Xp, mu, logvar, p = model(X, c)
if y.shape[-1] == 0:
AUX = torch.tensor(0)
# elif y.shape[-1] == 1:
# AUX = mlp_loss(p, y)
elif y.shape[-1] > 1:
# AUX = reg_loss(p, y)
AUX = model.reconstruction_loss(p, y)

NLL, KLD = model.vae_loss(Xp, X, mu, logvar)
# total_loss = NLL + beta[e]*KLD + AUX
# NLL_scaled = X.shape[-1]*NLL/(2*logvar.exp())
# total_loss = X.shape[-1]*sigma.log() + NLL_scaled + KLD + AUX
total_loss = NLL + KLD + AUX
running_loss['valid'] += total_loss.item()

Expand Down

0 comments on commit 29faf79

Please sign in to comment.