-
Notifications
You must be signed in to change notification settings - Fork 11
/
train.py
180 lines (135 loc) · 6.31 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
#!/usr/bin/env python3
import copy
from dataclasses import asdict
import numpy as np
import torch
import torchvision
import torchvision.utils as vutils
import wandb
from accelerate import Accelerator
from diffusers import AutoencoderKL
from PIL.Image import Image
from torch import Tensor, nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from tld.denoiser import Denoiser
from tld.diffusion import DiffusionGenerator
from tld.configs import ModelConfig
def eval_gen(diffuser: DiffusionGenerator, labels: Tensor, img_size: int) -> Image:
class_guidance = 4.5
seed = 10
out, _ = diffuser.generate(
labels=torch.repeat_interleave(labels, 2, dim=0),
num_imgs=16,
class_guidance=class_guidance,
seed=seed,
n_iter=40,
exponent=1,
sharp_f=0.1,
img_size=img_size
)
out = to_pil((vutils.make_grid((out + 1) / 2, nrow=8, padding=4)).float().clip(0, 1))
out.save(f"emb_val_cfg:{class_guidance}_seed:{seed}.png")
return out
def count_parameters(model: nn.Module):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_parameters_per_layer(model: nn.Module):
for name, param in model.named_parameters():
print(f"{name}: {param.numel()} parameters")
to_pil = torchvision.transforms.ToPILImage()
def update_ema(ema_model: nn.Module, model: nn.Module, alpha: float = 0.999):
with torch.no_grad():
for ema_param, model_param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(model_param.data, alpha=1 - alpha)
def main(config: ModelConfig) -> None:
"""main train loop to be used with accelerate"""
denoiser_config = config.denoiser_config
train_config = config.train_config
dataconfig = config.data_config
log_with="wandb" if train_config.use_wandb else None
accelerator = Accelerator(mixed_precision="fp16", log_with=log_with)
accelerator.print("Loading Data:")
latent_train_data = torch.tensor(np.load(dataconfig.latent_path), dtype=torch.float32)
train_label_embeddings = torch.tensor(np.load(dataconfig.text_emb_path), dtype=torch.float32)
emb_val = torch.tensor(np.load(dataconfig.val_path), dtype=torch.float32)
dataset = TensorDataset(latent_train_data, train_label_embeddings)
train_loader = DataLoader(dataset, batch_size=train_config.batch_size, shuffle=True)
vae = AutoencoderKL.from_pretrained(config.vae_cfg.vae_name, torch_dtype=config.vae_cfg.vae_dtype)
if accelerator.is_main_process:
vae = vae.to(accelerator.device)
model = Denoiser(**asdict(denoiser_config))
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=train_config.lr)
if train_config.compile:
accelerator.print("Compiling model:")
model = torch.compile(model)
if not train_config.from_scratch:
accelerator.print("Loading Model:")
wandb.restore(
train_config.model_name, run_path=f"apapiu/cifar_diffusion/runs/{train_config.run_id}", replace=True
)
full_state_dict = torch.load(train_config.model_name)
model.load_state_dict(full_state_dict["model_ema"])
optimizer.load_state_dict(full_state_dict["opt_state"])
global_step = full_state_dict["global_step"]
else:
global_step = 0
if accelerator.is_local_main_process:
ema_model = copy.deepcopy(model).to(accelerator.device)
diffuser = DiffusionGenerator(ema_model, vae, accelerator.device, torch.float32)
accelerator.print("model prep")
model, train_loader, optimizer = accelerator.prepare(model, train_loader, optimizer)
if train_config.use_wandb:
accelerator.init_trackers(project_name="cifar_diffusion", config=asdict(config))
accelerator.print(count_parameters(model))
accelerator.print(count_parameters_per_layer(model))
### Train:
for i in range(1, train_config.n_epoch + 1):
accelerator.print(f"epoch: {i}")
for x, y in tqdm(train_loader):
x = x / config.vae_cfg.vae_scale_factor
noise_level = torch.tensor(
np.random.beta(train_config.beta_a, train_config.beta_b, len(x)), device=accelerator.device
)
signal_level = 1 - noise_level
noise = torch.randn_like(x)
x_noisy = noise_level.view(-1, 1, 1, 1) * noise + signal_level.view(-1, 1, 1, 1) * x
x_noisy = x_noisy.float()
noise_level = noise_level.float()
label = y
prob = 0.15
mask = torch.rand(y.size(0), device=accelerator.device) < prob
label[mask] = 0 # OR replacement_vector
if global_step % train_config.save_and_eval_every_iters == 0:
accelerator.wait_for_everyone()
if accelerator.is_main_process:
##eval and saving:
out = eval_gen(diffuser=diffuser, labels=emb_val, img_size=denoiser_config.image_size)
out.save("img.jpg")
if train_config.use_wandb:
accelerator.log({f"step: {global_step}": wandb.Image("img.jpg")})
opt_unwrapped = accelerator.unwrap_model(optimizer)
full_state_dict = {
"model_ema": ema_model.state_dict(),
"opt_state": opt_unwrapped.state_dict(),
"global_step": global_step,
}
if train_config.save_model:
accelerator.save(full_state_dict, train_config.model_name)
if train_config.use_wandb:
wandb.save(train_config.model_name)
model.train()
with accelerator.accumulate():
###train loop:
optimizer.zero_grad()
pred = model(x_noisy, noise_level.view(-1, 1), label)
loss = loss_fn(pred, x)
accelerator.log({"train_loss": loss.item()}, step=global_step)
accelerator.backward(loss)
optimizer.step()
if accelerator.is_main_process:
update_ema(ema_model, model, alpha=train_config.alpha)
global_step += 1
accelerator.end_training()
# args = (config, data_path, val_path)
# notebook_launcher(training_loop)