-
Notifications
You must be signed in to change notification settings - Fork 16
/
train.py
95 lines (75 loc) · 3.37 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import copy
import time
import torch
import numpy as np
import os
import csv
from loss import dice_coeff, FocalLoss
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
def train_and_test(model, dataloaders, optimizer, criterion, num_epochs=3, show_images=False):
since = time.time()
best_loss = 1e10
# Use gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
fieldnames = ['epoch', 'training_loss', 'test_loss', 'training_dice_coeff', 'test_dice_coeff']
train_epoch_losses = []
test_epoch_losses = []
for epoch in range(1, num_epochs + 1):
print(f'Epoch {epoch}/{num_epochs}')
print('-' * 10)
# Each epoch has a training and validation phase
# Initialize batch summary
batchsummary = {a: [0] for a in fieldnames}
batch_train_loss = 0.0
batch_test_loss = 0.0
for phase in ['training', 'test']:
if phase == 'training':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
# Iterate over data.
for sample in iter(dataloaders[phase]):
if show_images:
grid_img = make_grid(sample['image'])
grid_img = grid_img.permute(1, 2, 0)
plt.imshow(grid_img)
plt.show()
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# track history only in training phase
with torch.set_grad_enabled(phase == 'training'):
outputs = model(inputs)
loss = criterion(outputs, masks)
y_pred = outputs.data.cpu().numpy().ravel()
y_true = masks.data.cpu().numpy().ravel()
batchsummary[f'{phase}_dice_coeff'].append(dice_coeff(y_pred, y_true))
# back-propagation
if phase == 'training':
loss.backward()
optimizer.step()
# accumulate batch loss
batch_train_loss += loss.item() * sample['image'].size(0)
else:
batch_test_loss += loss.item() * sample['image'].size(0)
# save epoch losses
if phase == 'training':
epoch_train_loss = batch_train_loss / len(dataloaders['training'])
train_epoch_losses.append(epoch_train_loss)
else:
epoch_test_loss = batch_test_loss / len(dataloaders['test'])
test_epoch_losses.append(epoch_test_loss)
batchsummary['epoch'] = epoch
# batchsummary[f'{phase}_loss'] = epoch_train_loss.item()
print('{} Loss: {:.4f}'.format(phase, loss))
best_loss = np.max(batchsummary['test_dice_coeff'])
for field in fieldnames[3:]:
batchsummary[field] = np.mean(batchsummary[field])
print(
f'\t\t\t train_dice_coeff: {batchsummary["training_dice_coeff"]}, test_dice_coeff: {batchsummary["test_dice_coeff"]}')
# summary
print('Best dice coefficient: {:4f}'.format(best_loss))
return model, train_epoch_losses, test_epoch_losses