/
train.py
executable file
·98 lines (73 loc) · 2.98 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#!/usr/bin/env python
import argparse
import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from dataset import mnist
from model import MNISTModel
# NOTE: Reproducibility
torch.manual_seed(0)
torch.use_deterministic_algorithms(True)
def parse_args() -> argparse.Namespace:
"""Parses the command line arguments."""
parser = argparse.ArgumentParser("MNIST model training")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--shuffle", action="store_false", help="whether to shuffle the dataset")
parser.add_argument("--num_workers", type=int, default=8, help="worker threads")
parser.add_argument("--learning_rate", type=float, default=2e-3, help="learning rate")
parser.add_argument("--weight_decay", type=float, default=1e-5, help="weight decay")
parser.add_argument("--epochs", type=int, default=4, help="number of epochs")
parser.add_argument("--model_dir", type=str, default="model", help="directory to store models")
parser.add_argument("--model_name", type=str, default="model.pt", help="model path")
return parser.parse_args()
def main() -> None:
"""Runs the training loop."""
args = parse_args()
wandb.init(project="mnist", config=vars(args))
dataloader = mnist(
batch_size=args.batch_size,
shuffle=args.shuffle,
num_workers=args.num_workers,
train=True,
)
model = MNISTModel()
model = nn.DataParallel(model) # type: ignore
model.train()
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
)
for epoch in range(args.epochs):
running_loss = 0.0
for idx, (features, labels) in enumerate(dataloader):
# move features and labels onto the device
features = features.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(features)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# accumulate loss and log
running_loss += loss.item()
if idx % 375 == 374:
print(f"Epoch {epoch} | Steps: {idx + 1:<4} | Loss: {running_loss / 375:.3f}")
wandb.log({"epoch": epoch, "steps": idx + 1, "loss": round(running_loss / 375, 3)})
running_loss = 0.0
# Create the directory for storing models if it does not already exist
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
# Save the model
torch.save(model.module.state_dict(), os.path.join(args.model_dir, args.model_name))
# Save the parameters
with open(os.path.join(args.model_dir, "params.json"), "w") as file:
json.dump(vars(args), file, indent=4)
if __name__ == "__main__":
main()