Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ repos:
- id: detect-private-key

- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
rev: v2.4.1
hooks:
- id: codespell
additional_dependencies: [tomli]
Expand All @@ -45,7 +45,7 @@ repos:
args: ["--print-width=120"]

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.21
rev: 0.7.22
hooks:
- id: mdformat
additional_dependencies:
Expand All @@ -55,7 +55,7 @@ repos:
args: ["--number"]

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.6
rev: v0.11.4
hooks:
# try to fix what is possible
- id: ruff
Expand Down
2 changes: 1 addition & 1 deletion _docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@
project + " Documentation",
author,
project,
"" "Miscellaneous", # about.__docs__,
"Miscellaneous", # about.__docs__,
),
]

Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/01-introduction-to-pytorch/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -883,7 +883,7 @@ def train_model(model, optimizer, data_loader, loss_module, num_epochs=100):
#
# $$acc = \frac{\#\text{correct predictions}}{\#\text{all predictions}} = \frac{TP+TN}{TP+TN+FP+FN}$$
#
# where TP are the true positives, TN true negatives, FP false positives, and FN the fale negatives.
# where TP are the true positives, TN true negatives, FP false positives, and FN the false negatives.
#
# When evaluating the model, we don't need to keep track of the computation graph as we don't intend to calculate the gradients.
# This reduces the required memory and speed up the model.
Expand All @@ -910,7 +910,7 @@ def eval_model(model, data_loader):
num_preds += data_labels.shape[0]

acc = true_preds / num_preds
print(f"Accuracy of the model: {100.0*acc:4.2f}%")
print(f"Accuracy of the model: {100.0 * acc:4.2f}%")


# %%
Expand Down
18 changes: 9 additions & 9 deletions course_UvA-DL/02-activation-functions/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,12 +343,12 @@ def load_model(model_path, model_name, net=None):

"""
config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)
assert os.path.isfile(
config_file
), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
assert os.path.isfile(
model_file
), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
assert os.path.isfile(config_file), (
f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
)
assert os.path.isfile(model_file), (
f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
)
with open(config_file) as f:
config_dict = json.load(f)
if net is None:
Expand Down Expand Up @@ -548,7 +548,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over
############
net.train()
true_preds, count = 0.0, 0
for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch+1}", leave=False):
for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch + 1}", leave=False):
imgs, labels = imgs.to(device), labels.to(device) # To GPU
optimizer.zero_grad() # Zero-grad can be placed anywhere before "loss.backward()"
preds = net(imgs)
Expand All @@ -566,7 +566,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over
val_acc = test_model(net, val_loader)
val_scores.append(val_acc)
print(
f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%"
f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%"
)

if len(val_scores) == 1 or val_acc > val_scores[best_val_epoch]:
Expand All @@ -587,7 +587,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over

load_model(CHECKPOINT_PATH, model_name, net=net)
test_acc = test_model(net, test_loader)
print((f" Test accuracy: {test_acc*100.0:4.2f}% ").center(50, "=") + "\n")
print((f" Test accuracy: {test_acc * 100.0:4.2f}% ").center(50, "=") + "\n")
return test_acc


Expand Down
24 changes: 12 additions & 12 deletions course_UvA-DL/03-initialization-and-optimization/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,19 +580,19 @@ def _get_result_file(model_path, model_name):
def load_model(model_path, model_name, net=None):
config_file = _get_config_file(model_path, model_name)
model_file = _get_model_file(model_path, model_name)
assert os.path.isfile(
config_file
), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
assert os.path.isfile(
model_file
), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
assert os.path.isfile(config_file), (
f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
)
assert os.path.isfile(model_file), (
f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
)
with open(config_file) as f:
config_dict = json.load(f)
if net is None:
act_fn_name = config_dict["act_fn"].pop("name").lower()
assert (
act_fn_name in act_fn_by_name
), f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.'
assert act_fn_name in act_fn_by_name, (
f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.'
)
act_fn = act_fn_by_name[act_fn_name]()
net = BaseNetwork(act_fn=act_fn, **config_dict)
net.load_state_dict(torch.load(model_file))
Expand Down Expand Up @@ -678,7 +678,7 @@ def train_model(net, model_name, optim_func, max_epochs=50, batch_size=256, over
plt.show()
plt.close()

print((f" Test accuracy: {results['test_acc']*100.0:4.2f}% ").center(50, "=") + "\n")
print((f" Test accuracy: {results['test_acc'] * 100.0:4.2f}% ").center(50, "=") + "\n")
return results


Expand All @@ -700,7 +700,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader,
# Record statistics during training
true_preds += (preds.argmax(dim=-1) == labels).sum().item()
count += labels.shape[0]
t.set_description(f"Epoch {epoch+1}: loss={loss.item():4.2f}")
t.set_description(f"Epoch {epoch + 1}: loss={loss.item():4.2f}")
epoch_losses.append(loss.item())
train_acc = true_preds / count

Expand All @@ -709,7 +709,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader,
##############
val_acc = test_model(net, val_loader)
print(
f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%"
f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%"
)
return train_acc, val_acc, epoch_losses

Expand Down
4 changes: 2 additions & 2 deletions course_UvA-DL/04-inception-resnet-densenet/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -1183,8 +1183,8 @@ def forward(self, x):
table = [
[
model_name,
f"{100.0*model_results['val']:4.2f}%",
f"{100.0*model_results['test']:4.2f}%",
f"{100.0 * model_results['val']:4.2f}%",
f"{100.0 * model_results['test']:4.2f}%",
f"{sum(np.prod(p.shape) for p in model.parameters()):,}",
]
for model_name, model_results, model in all_models
Expand Down
6 changes: 3 additions & 3 deletions course_UvA-DL/13-contrastive-learning/SimCLR.py
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ def get_smaller_dataset(original_dataset, num_imgs_per_label):
plt.show()

for k, score in zip(dataset_sizes, test_scores):
print(f"Test accuracy for {k:3d} images per label: {100*score:4.2f}%")
print(f"Test accuracy for {k:3d} images per label: {100 * score:4.2f}%")

# %% [markdown]
# As one would expect, the classification performance improves the more data we have.
Expand Down Expand Up @@ -781,8 +781,8 @@ def train_resnet(batch_size, max_epochs=100, **kwargs):

# %%
resnet_model, resnet_result = train_resnet(batch_size=64, num_classes=10, lr=1e-3, weight_decay=2e-4, max_epochs=100)
print(f"Accuracy on training set: {100*resnet_result['train']:4.2f}%")
print(f"Accuracy on test set: {100*resnet_result['test']:4.2f}%")
print(f"Accuracy on training set: {100 * resnet_result['train']:4.2f}%")
print(f"Accuracy on test set: {100 * resnet_result['test']:4.2f}%")

# %% [markdown]
# The ResNet trained from scratch achieves 73.31% on the test set.
Expand Down
Loading