diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29901406b..cd4ef0c6f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: detect-private-key - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.1 hooks: - id: codespell additional_dependencies: [tomli] @@ -45,7 +45,7 @@ repos: args: ["--print-width=120"] - repo: https://github.com/executablebooks/mdformat - rev: 0.7.21 + rev: 0.7.22 hooks: - id: mdformat additional_dependencies: @@ -55,7 +55,7 @@ repos: args: ["--number"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.11.4 hooks: # try to fix what is possible - id: ruff diff --git a/_docs/source/conf.py b/_docs/source/conf.py index f37d8ecb2..c23630734 100644 --- a/_docs/source/conf.py +++ b/_docs/source/conf.py @@ -201,7 +201,7 @@ project + " Documentation", author, project, - "" "Miscellaneous", # about.__docs__, + "Miscellaneous", # about.__docs__, ), ] diff --git a/course_UvA-DL/01-introduction-to-pytorch/notebook.py b/course_UvA-DL/01-introduction-to-pytorch/notebook.py index 0aef3eb94..807e16a33 100644 --- a/course_UvA-DL/01-introduction-to-pytorch/notebook.py +++ b/course_UvA-DL/01-introduction-to-pytorch/notebook.py @@ -883,7 +883,7 @@ def train_model(model, optimizer, data_loader, loss_module, num_epochs=100): # # $$acc = \frac{\#\text{correct predictions}}{\#\text{all predictions}} = \frac{TP+TN}{TP+TN+FP+FN}$$ # -# where TP are the true positives, TN true negatives, FP false positives, and FN the fale negatives. +# where TP are the true positives, TN true negatives, FP false positives, and FN the false negatives. # # When evaluating the model, we don't need to keep track of the computation graph as we don't intend to calculate the gradients. # This reduces the required memory and speed up the model. @@ -910,7 +910,7 @@ def eval_model(model, data_loader): num_preds += data_labels.shape[0] acc = true_preds / num_preds - print(f"Accuracy of the model: {100.0*acc:4.2f}%") + print(f"Accuracy of the model: {100.0 * acc:4.2f}%") # %% diff --git a/course_UvA-DL/02-activation-functions/notebook.py b/course_UvA-DL/02-activation-functions/notebook.py index 2591de973..7775687e3 100644 --- a/course_UvA-DL/02-activation-functions/notebook.py +++ b/course_UvA-DL/02-activation-functions/notebook.py @@ -343,12 +343,12 @@ def load_model(model_path, model_name, net=None): """ config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name) - assert os.path.isfile( - config_file - ), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?' - assert os.path.isfile( - model_file - ), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?' + assert os.path.isfile(config_file), ( + f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?' + ) + assert os.path.isfile(model_file), ( + f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?' + ) with open(config_file) as f: config_dict = json.load(f) if net is None: @@ -548,7 +548,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over ############ net.train() true_preds, count = 0.0, 0 - for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch+1}", leave=False): + for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch + 1}", leave=False): imgs, labels = imgs.to(device), labels.to(device) # To GPU optimizer.zero_grad() # Zero-grad can be placed anywhere before "loss.backward()" preds = net(imgs) @@ -566,7 +566,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over val_acc = test_model(net, val_loader) val_scores.append(val_acc) print( - f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%" + f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%" ) if len(val_scores) == 1 or val_acc > val_scores[best_val_epoch]: @@ -587,7 +587,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over load_model(CHECKPOINT_PATH, model_name, net=net) test_acc = test_model(net, test_loader) - print((f" Test accuracy: {test_acc*100.0:4.2f}% ").center(50, "=") + "\n") + print((f" Test accuracy: {test_acc * 100.0:4.2f}% ").center(50, "=") + "\n") return test_acc diff --git a/course_UvA-DL/03-initialization-and-optimization/notebook.py b/course_UvA-DL/03-initialization-and-optimization/notebook.py index 9a1f878ea..6373c7239 100644 --- a/course_UvA-DL/03-initialization-and-optimization/notebook.py +++ b/course_UvA-DL/03-initialization-and-optimization/notebook.py @@ -580,19 +580,19 @@ def _get_result_file(model_path, model_name): def load_model(model_path, model_name, net=None): config_file = _get_config_file(model_path, model_name) model_file = _get_model_file(model_path, model_name) - assert os.path.isfile( - config_file - ), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?' - assert os.path.isfile( - model_file - ), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?' + assert os.path.isfile(config_file), ( + f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?' + ) + assert os.path.isfile(model_file), ( + f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?' + ) with open(config_file) as f: config_dict = json.load(f) if net is None: act_fn_name = config_dict["act_fn"].pop("name").lower() - assert ( - act_fn_name in act_fn_by_name - ), f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.' + assert act_fn_name in act_fn_by_name, ( + f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.' + ) act_fn = act_fn_by_name[act_fn_name]() net = BaseNetwork(act_fn=act_fn, **config_dict) net.load_state_dict(torch.load(model_file)) @@ -678,7 +678,7 @@ def train_model(net, model_name, optim_func, max_epochs=50, batch_size=256, over plt.show() plt.close() - print((f" Test accuracy: {results['test_acc']*100.0:4.2f}% ").center(50, "=") + "\n") + print((f" Test accuracy: {results['test_acc'] * 100.0:4.2f}% ").center(50, "=") + "\n") return results @@ -700,7 +700,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader, # Record statistics during training true_preds += (preds.argmax(dim=-1) == labels).sum().item() count += labels.shape[0] - t.set_description(f"Epoch {epoch+1}: loss={loss.item():4.2f}") + t.set_description(f"Epoch {epoch + 1}: loss={loss.item():4.2f}") epoch_losses.append(loss.item()) train_acc = true_preds / count @@ -709,7 +709,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader, ############## val_acc = test_model(net, val_loader) print( - f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%" + f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%" ) return train_acc, val_acc, epoch_losses diff --git a/course_UvA-DL/04-inception-resnet-densenet/notebook.py b/course_UvA-DL/04-inception-resnet-densenet/notebook.py index c33e5bae5..5bc6249f4 100644 --- a/course_UvA-DL/04-inception-resnet-densenet/notebook.py +++ b/course_UvA-DL/04-inception-resnet-densenet/notebook.py @@ -1183,8 +1183,8 @@ def forward(self, x): table = [ [ model_name, - f"{100.0*model_results['val']:4.2f}%", - f"{100.0*model_results['test']:4.2f}%", + f"{100.0 * model_results['val']:4.2f}%", + f"{100.0 * model_results['test']:4.2f}%", f"{sum(np.prod(p.shape) for p in model.parameters()):,}", ] for model_name, model_results, model in all_models diff --git a/course_UvA-DL/13-contrastive-learning/SimCLR.py b/course_UvA-DL/13-contrastive-learning/SimCLR.py index 67e67a120..8e499d359 100644 --- a/course_UvA-DL/13-contrastive-learning/SimCLR.py +++ b/course_UvA-DL/13-contrastive-learning/SimCLR.py @@ -642,7 +642,7 @@ def get_smaller_dataset(original_dataset, num_imgs_per_label): plt.show() for k, score in zip(dataset_sizes, test_scores): - print(f"Test accuracy for {k:3d} images per label: {100*score:4.2f}%") + print(f"Test accuracy for {k:3d} images per label: {100 * score:4.2f}%") # %% [markdown] # As one would expect, the classification performance improves the more data we have. @@ -781,8 +781,8 @@ def train_resnet(batch_size, max_epochs=100, **kwargs): # %% resnet_model, resnet_result = train_resnet(batch_size=64, num_classes=10, lr=1e-3, weight_decay=2e-4, max_epochs=100) -print(f"Accuracy on training set: {100*resnet_result['train']:4.2f}%") -print(f"Accuracy on test set: {100*resnet_result['test']:4.2f}%") +print(f"Accuracy on training set: {100 * resnet_result['train']:4.2f}%") +print(f"Accuracy on test set: {100 * resnet_result['test']:4.2f}%") # %% [markdown] # The ResNet trained from scratch achieves 73.31% on the test set.