Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .actions/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ def _meta_file(folder: str) -> str:
@staticmethod
def augment_script(fpath: str):
"""Add template header and footer to the python base script.

Args:
fpath: path to python script
"""
Expand Down Expand Up @@ -313,6 +314,7 @@ def parse_requirements(dir_path: str):
@staticmethod
def copy_notebooks(path_root: str, path_docs_ipynb: str = "docs/source/notebooks"):
"""Copy all notebooks from a folder to doc folder.

Args:
path_root: source path to the project root in this tutorials
path_docs_ipynb: destination path to the notebooks location
Expand Down Expand Up @@ -362,7 +364,7 @@ def update_env_details(dir_path: str):
req = [r.strip() for r in req]

def _parse(pkg: str, keys: str = " <=>") -> str:
"""Parsing just the package name"""
"""Parsing just the package name."""
if any(c in pkg for c in keys):
ix = min(pkg.index(c) for c in keys if c in pkg)
pkg = pkg[:ix]
Expand Down
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@ repos:
args: [--py36-plus]
name: Upgrade code

- repo: https://github.com/myint/docformatter
rev: v1.4
hooks:
- id: docformatter
args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120]

- repo: https://github.com/PyCQA/isort
rev: 5.9.2
hooks:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,14 +184,14 @@ def show_imgs(imgs):
class MaskedConvolution(nn.Module):

def __init__(self, c_in, c_out, mask, **kwargs):
"""
Implements a convolution with mask applied on its weights.
Inputs:
c_in - Number of input channels
c_out - Number of output channels
mask - Tensor of shape [kernel_size_H, kernel_size_W] with 0s where
"""Implements a convolution with mask applied on its weights.

Args:
c_in: Number of input channels
c_out: Number of output channels
mask: Tensor of shape [kernel_size_H, kernel_size_W] with 0s where
the convolution should be masked, and 1s otherwise.
kwargs - Additional arguments for the convolution
kwargs: Additional arguments for the convolution
"""
super().__init__()
# For simplicity: calculate padding automatically
Expand Down Expand Up @@ -290,12 +290,12 @@ def __init__(self, c_in, c_out, kernel_size=3, mask_center=False, **kwargs):


def show_center_recep_field(img, out):
"""
Calculates the gradients of the input with respect to the output center pixel,
and visualizes the overall receptive field.
Inputs:
img - Input image for which we want to calculate the receptive field on.
out - Output features/loss which is used for backpropagation, and should be
"""Calculates the gradients of the input with respect to the output center pixel, and visualizes the overall
receptive field.

Args:
img: Input image for which we want to calculate the receptive field on.
out: Output features/loss which is used for backpropagation, and should be
the output of the network/computation graph.
"""
# Determine gradients
Expand Down Expand Up @@ -476,9 +476,7 @@ def show_center_recep_field(img, out):
class GatedMaskedConv(nn.Module):

def __init__(self, c_in, **kwargs):
"""
Gated Convolution block implemented the computation graph shown above.
"""
"""Gated Convolution block implemented the computation graph shown above."""
super().__init__()
self.conv_vert = VerticalStackConvolution(c_in, c_out=2 * c_in, **kwargs)
self.conv_horiz = HorizontalStackConvolution(c_in, c_out=2 * c_in, **kwargs)
Expand Down Expand Up @@ -558,10 +556,10 @@ def __init__(self, c_in, c_hidden):
self.example_input_array = train_set[0][0][None]

def forward(self, x):
"""
Forward image through model and return logits for each pixel.
Inputs:
x - Image tensor with integer values between 0 and 255.
"""Forward image through model and return logits for each pixel.

Args:
x: Image tensor with integer values between 0 and 255.
"""
# Scale input from 0 to 255 back to -1 to 1
x = (x.float() / 255.0) * 2 - 1
Expand Down Expand Up @@ -589,11 +587,11 @@ def calc_likelihood(self, x):

@torch.no_grad()
def sample(self, img_shape, img=None):
"""
Sampling function for the autoregressive model.
Inputs:
img_shape - Shape of the image to generate (B,C,H,W)
img (optional) - If given, this tensor will be used as
"""Sampling function for the autoregressive model.

Args:
img_shape: Shape of the image to generate (B,C,H,W)
img (optional): If given, this tensor will be used as
a starting image. The pixels to fill
should be -1 in the input tensor.
"""
Expand Down
28 changes: 12 additions & 16 deletions course_UvA-DL/deep-autoencoders/Deep_Autoencoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,11 @@ def __init__(
self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU
):
"""
Inputs:
- num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
- base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
- latent_dim : Dimensionality of latent representation z
- act_fn : Activation function used throughout the encoder network
Args:
num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
latent_dim : Dimensionality of latent representation z
act_fn : Activation function used throughout the encoder network
"""
super().__init__()
c_hid = base_channel_size
Expand Down Expand Up @@ -195,11 +195,11 @@ def __init__(
self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU
):
"""
Inputs:
- num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
- base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.
- latent_dim : Dimensionality of latent representation z
- act_fn : Activation function used throughout the decoder network
Args:
num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.
latent_dim : Dimensionality of latent representation z
act_fn : Activation function used throughout the decoder network
"""
super().__init__()
c_hid = base_channel_size
Expand Down Expand Up @@ -263,17 +263,13 @@ def __init__(
self.example_input_array = torch.zeros(2, num_input_channels, width, height)

def forward(self, x):
"""
The forward function takes in an image and returns the reconstructed image
"""
"""The forward function takes in an image and returns the reconstructed image."""
z = self.encoder(x)
x_hat = self.decoder(z)
return x_hat

def _get_reconstruction_loss(self, batch):
"""
Given a batch of images, this function returns the reconstruction loss (MSE in our case)
"""
"""Given a batch of images, this function returns the reconstruction loss (MSE in our case)"""
x, _ = batch # We do not need the labels
x_hat = self.forward(x)
loss = F.mse_loss(x, x_hat, reduction="none")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -333,11 +333,11 @@ class Sampler:

def __init__(self, model, img_shape, sample_size, max_len=8192):
"""
Inputs:
model - Neural network to use for modeling E_theta
img_shape - Shape of the images to model
sample_size - Batch size of the samples
max_len - Maximum number of data points to keep in the buffer
Args:
model: Neural network to use for modeling E_theta
img_shape: Shape of the images to model
sample_size: Batch size of the samples
max_len: Maximum number of data points to keep in the buffer
"""
super().__init__()
self.model = model
Expand All @@ -347,11 +347,11 @@ def __init__(self, model, img_shape, sample_size, max_len=8192):
self.examples = [(torch.rand((1, ) + img_shape) * 2 - 1) for _ in range(self.sample_size)]

def sample_new_exmps(self, steps=60, step_size=10):
"""
Function for getting a new batch of "fake" images.
Inputs:
steps - Number of iterations in the MCMC algorithm
step_size - Learning rate nu in the algorithm above
"""Function for getting a new batch of "fake" images.

Args:
steps: Number of iterations in the MCMC algorithm
step_size: Learning rate nu in the algorithm above
"""
# Choose 95% of the batch from the buffer, 5% generate from scratch
n_new = np.random.binomial(self.sample_size, 0.05)
Expand All @@ -369,14 +369,14 @@ def sample_new_exmps(self, steps=60, step_size=10):

@staticmethod
def generate_samples(model, inp_imgs, steps=60, step_size=10, return_img_per_step=False):
"""
Function for sampling images for a given model.
Inputs:
model - Neural network to use for modeling E_theta
inp_imgs - Images to start from for sampling. If you want to generate new images, enter noise between -1 and 1.
steps - Number of iterations in the MCMC algorithm.
step_size - Learning rate nu in the algorithm above
return_img_per_step - If True, we return the sample at every iteration of the MCMC
"""Function for sampling images for a given model.

Args:
model: Neural network to use for modeling E_theta
inp_imgs: Images to start from for sampling. If you want to generate new images, enter noise between -1 and 1.
steps: Number of iterations in the MCMC algorithm.
step_size: Learning rate nu in the algorithm above
return_img_per_step: If True, we return the sample at every iteration of the MCMC
"""
# Before MCMC: set model parameters to "required_grad=False"
# because we are only interested in the gradients of the input.
Expand Down
84 changes: 42 additions & 42 deletions course_UvA-DL/graph-neural-networks/GNN_overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ def __init__(self, c_in, c_out):

def forward(self, node_feats, adj_matrix):
"""
Inputs:
node_feats - Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix - Batch of adjacency matrices of the graph. If there is an edge from i to j,
Args:
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix: Batch of adjacency matrices of the graph. If there is an edge from i to j,
adj_matrix[b,i,j]=1 else 0. Supports directed edges by non-symmetric matrices.
Assumes to already have added the identity connections.
Shape: [batch_size, num_nodes, num_nodes]
Expand Down Expand Up @@ -302,13 +302,13 @@ class GATLayer(nn.Module):

def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
"""
Inputs:
c_in - Dimensionality of input features
c_out - Dimensionality of output features
num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The
Args:
c_in: Dimensionality of input features
c_out: Dimensionality of output features
num_heads: Number of heads, i.e. attention mechanisms to apply in parallel. The
output features are equally split up over the heads if concat_heads=True.
concat_heads - If True, the output of the different heads is concatenated instead of averaged.
alpha - Negative slope of the LeakyReLU activation.
concat_heads: If True, the output of the different heads is concatenated instead of averaged.
alpha: Negative slope of the LeakyReLU activation.
"""
super().__init__()
self.num_heads = num_heads
Expand All @@ -328,10 +328,10 @@ def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):

def forward(self, node_feats, adj_matrix, print_attn_probs=False):
"""
Inputs:
node_feats - Input features of the node. Shape: [batch_size, c_in]
adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes]
print_attn_probs - If True, the attention weights are printed during the forward pass
Args:
node_feats: Input features of the node. Shape: [batch_size, c_in]
adj_matrix: Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes]
print_attn_probs: If True, the attention weights are printed during the forward pass
(for debugging purposes)
"""
batch_size, num_nodes = node_feats.size(0), node_feats.size(1)
Expand Down Expand Up @@ -507,14 +507,14 @@ def __init__(
**kwargs,
):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of the output features. Usually number of classes in classification
num_layers - Number of "hidden" graph layers
layer_name - String of the graph layer to use
dp_rate - Dropout rate to apply throughout the network
kwargs - Additional arguments for the graph layer (e.g. number of heads for GAT)
Args:
c_in: Dimension of input features
c_hidden: Dimension of hidden features
c_out: Dimension of the output features. Usually number of classes in classification
num_layers: Number of "hidden" graph layers
layer_name: String of the graph layer to use
dp_rate: Dropout rate to apply throughout the network
kwargs: Additional arguments for the graph layer (e.g. number of heads for GAT)
"""
super().__init__()
gnn_layer = gnn_layer_by_name[layer_name]
Expand All @@ -533,9 +533,9 @@ def __init__(

def forward(self, x, edge_index):
"""
Inputs:
x - Input features per node
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
Args:
x: Input features per node
edge_index: List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
"""
for layer in self.layers:
# For graph layers, we need to add the "edge_index" tensor as additional input
Expand All @@ -560,12 +560,12 @@ class MLPModel(nn.Module):

def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of the output features. Usually number of classes in classification
num_layers - Number of hidden layers
dp_rate - Dropout rate to apply throughout the network
Args:
c_in: Dimension of input features
c_hidden: Dimension of hidden features
c_out: Dimension of the output features. Usually number of classes in classification
num_layers: Number of hidden layers
dp_rate: Dropout rate to apply throughout the network
"""
super().__init__()
layers = []
Expand All @@ -578,8 +578,8 @@ def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):

def forward(self, x, *args, **kwargs):
"""
Inputs:
x - Input features per node
Args:
x: Input features per node
"""
return self.layers(x)

Expand Down Expand Up @@ -858,12 +858,12 @@ class GraphGNNModel(nn.Module):

def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of output features (usually number of classes)
dp_rate_linear - Dropout rate before the linear layer (usually much higher than inside the GNN)
kwargs - Additional arguments for the GNNModel object
Args:
c_in: Dimension of input features
c_hidden: Dimension of hidden features
c_out: Dimension of output features (usually number of classes)
dp_rate_linear: Dropout rate before the linear layer (usually much higher than inside the GNN)
kwargs: Additional arguments for the GNNModel object
"""
super().__init__()
self.GNN = GNNModel(
Expand All @@ -876,10 +876,10 @@ def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):

def forward(self, x, edge_index, batch_idx):
"""
Inputs:
x - Input features per node
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
batch_idx - Index of batch element for each node
Args:
x: Input features per node
edge_index: List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
batch_idx: Index of batch element for each node
"""
x = self.GNN(x, edge_index)
x = geom_nn.global_mean_pool(x, batch_idx) # Average pooling
Expand Down
Loading