This repository has been archived by the owner on Mar 22, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
151 lines (123 loc) · 4.65 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
from typing import Any
import numpy as np
import lightning as L
from lightning.pytorch.utilities.types import STEP_OUTPUT
import torchmetrics as tm
import torch.optim as optim
import segmentation_models_pytorch as smp
import timm
from transformers import Swinv2Config, Swinv2Model
from torch import nn
import torch
from transformers import get_cosine_schedule_with_warmup
from ema import EMAOptimizer
from lightning.pytorch.utilities import grad_norm
def loss_warp(output, mask):
dice_r = 0.75
loss_Dice = smp.losses.DiceLoss(smp.losses.BINARY_MODE, from_logits=True)
loss_BCE = nn.BCEWithLogitsLoss()
loss = (1 - dice_r) * loss_BCE(output, mask) + dice_r * loss_Dice(output, mask)
return loss
class SARModel(L.LightningModule):
def __init__(self, arch, encoder_name, in_channels, encoder_weights=None):
super().__init__()
self.model = smp.create_model(
arch,
encoder_name=encoder_name,
encoder_weights=encoder_weights,
in_channels=in_channels,
classes=1,
)
# self.model = timm.create_model('convnextv2_huge.fcmae_ft_in22k_in1k_512', pretrained=True)
# self.loss_fn = smp.losses.DiceLoss(smp.losses.BINARY_MODE, from_logits=True)
# self.loss_fn = nn.BCEWithLogitsLoss()
self.loss_fn = loss_warp
self.f1_func = tm.F1Score(task="binary")
if torch.cuda.is_available():
self.f1_func = tm.F1Score(task="binary")
self.validation_step_outputs_mask = []
self.validation_step_outputs_pre = []
self.train_step_outputs_mask = []
self.train_step_outputs_pre = []
def forward(self, batch):
output = self.model(batch[0])
return output
def on_before_optimizer_step(self, optimizer):
norm_type = 2
norms_dict = grad_norm(self.layer, norm_type=norm_type)
grad = norms_dict[f"grad_{float(norm_type)}_norm_total"]
self.log(f"grad_norm_{norm_type}", grad)
def training_step(self, batch, batch_idx):
# sar, mask, idx
image = batch[0]
mask = batch[1]
# img -> (bs, 6, h, w)
# output & mask (bs, 1, h, w)
output = self.model(image)
train_loss = self.loss_fn(output, mask)
sig_mask = output.sigmoid() > 0.5
pred_mask = (sig_mask).float()
self.train_step_outputs_mask.append(mask.flatten().to("cpu"))
self.train_step_outputs_pre.append(pred_mask.flatten().to("cpu"))
self.log(
"train_loss",
train_loss,
on_step=False,
on_epoch=True,
logger=True,
prog_bar=True,
)
return train_loss
def validation_step(self, batch, batch_idx):
image = batch[0]
mask = batch[1]
output = self.model(image)
val_loss = self.loss_fn(output, mask)
sig_mask = output.sigmoid() > 0.5
pred_mask = (sig_mask).float()
self.validation_step_outputs_mask.append(mask.flatten().to("cpu"))
self.validation_step_outputs_pre.append(pred_mask.flatten().to("cpu"))
self.log(
"val_loss",
val_loss,
on_step=False,
on_epoch=True,
logger=True,
prog_bar=True,
)
return val_loss
def on_train_epoch_end(self):
all_mask = torch.cat(self.train_step_outputs_mask)
all_pre = torch.cat(self.train_step_outputs_pre)
f1_fin_train = self.f1_func(all_pre, all_mask)
self.train_step_outputs_mask.clear()
self.train_step_outputs_pre.clear()
self.log(
"score_f1_train",
f1_fin_train,
on_epoch=True,
logger=True,
prog_bar=True,
)
def on_validation_epoch_end(self):
all_mask = torch.cat(self.validation_step_outputs_mask)
all_pre = torch.cat(self.validation_step_outputs_pre)
f1_fin_val = self.f1_func(all_pre, all_mask)
self.validation_step_outputs_mask.clear()
self.validation_step_outputs_pre.clear()
self.log(
"score_f1_val",
f1_fin_val,
on_epoch=True,
logger=True,
prog_bar=True,
)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=4e-5, weight_decay=4e-3)
optimizer = EMAOptimizer(optimizer=optimizer, device=torch.device("cuda"))
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_training_steps=self.trainer.max_steps,
num_warmup_steps=int(self.trainer.max_steps * 0.2),
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]