-
Notifications
You must be signed in to change notification settings - Fork 149
/
5.3-part2-lightning-torchmetrics.py
91 lines (70 loc) · 2.84 KB
/
5.3-part2-lightning-torchmetrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# Unit 5.3. Computing Metrics Efficiently with TorchMetrics
# Part 2. A Lightning Module with TorchMetrics
import lightning as L
import torch
import torch.nn.functional as F
import torchmetrics
from shared_utilities import PyTorchMLP, get_dataset_loaders
from watermark import watermark
class LightningModel(L.LightningModule):
def __init__(self, model, learning_rate):
super().__init__()
self.learning_rate = learning_rate
self.model = model
# NEW !!!
# Set up attributes for computing the accuracy
self.train_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10)
self.val_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
features, true_labels = batch
logits = self(features)
loss = F.cross_entropy(logits, true_labels)
self.log("train_loss", loss)
# NEW !!!
# Computes training accuracy batch by batch
# and logs it after epoch is completed.
# This is not exactly the same as before since model weights change
# after each batch; but it's much faster (no extra iteration).
predicted_labels = torch.argmax(logits, dim=1)
self.train_acc(predicted_labels, true_labels)
self.log(
"train_acc", self.train_acc, prog_bar=True, on_epoch=True, on_step=False
)
return loss
def validation_step(self, batch, batch_idx):
features, true_labels = batch
logits = self(features)
loss = F.cross_entropy(logits, true_labels)
self.log("val_loss", loss, prog_bar=True)
# NEW !!!
# Computes validation accuracy on whole validation set
predicted_labels = torch.argmax(logits, dim=1)
self.val_acc(predicted_labels, true_labels)
self.log("val_acc", self.val_acc, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=self.learning_rate)
return optimizer
if __name__ == "__main__":
print(watermark(packages="torch,lightning", python=True))
print("Torch CUDA available?", torch.cuda.is_available())
train_loader, val_loader, test_loader = get_dataset_loaders()
pytorch_model = PyTorchMLP(num_features=784, num_classes=10)
lightning_model = LightningModel(model=pytorch_model, learning_rate=0.05)
trainer = L.Trainer(
max_epochs=10,
accelerator="cpu", #"auto",
devices=1, #"auto",
)
trainer.fit(
model=lightning_model,
train_dataloaders=train_loader,
val_dataloaders=val_loader,
)
PATH = "lightning.pt"
torch.save(pytorch_model.state_dict(), PATH)
# To load model:
# model = PyTorchMLP(num_features=784, num_classes=10)
# model.load_state_dict(torch.load(PATH))
# model.eval()