-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
92 lines (74 loc) · 2.75 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
import argparse
import torch
from logger import utils
from data_cnpop import get_data_loaders
from solver import train
from ddsp.vocoder import SawSub, Sins, Full
from ddsp.loss import MSSLoss
def parse_args(args=None, namespace=None):
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
type=str,
required=True,
help="path to the config file")
return parser.parse_args(args=args, namespace=namespace)
if __name__ == '__main__':
# parse commands
cmd = parse_args()
# load config
args = utils.load_config(cmd.config)
print(' > config:', cmd.config)
print(' > exp:', args.env.expdir)
# load model
model = None
if args.model.type == 'Sins':
model = Sins(
sampling_rate=args.data.sampling_rate,
block_size=args.data.block_size,
n_harmonics=args.model.n_harmonics,
n_mag_noise=args.model.n_mag_noise,
n_mels=args.data.n_mels)
elif args.model.type == 'SawSub':
model = SawSub(
sampling_rate=args.data.sampling_rate,
block_size=args.data.block_size,
n_mag_harmonic=args.model.n_mag_harmonic,
n_mag_noise=args.model.n_mag_noise,
n_mels=args.data.n_mels)
elif args.model.type == 'Full':
model = Full(
sampling_rate=args.data.sampling_rate,
block_size=args.data.block_size,
n_mag_harmonic=args.model.n_mag_harmonic,
n_mag_noise=args.model.n_mag_noise,
n_harmonics=args.model.n_harmonics,
n_sub_harmonics=args.model.n_sub_harmonics,
n_mels=args.data.n_mels)
else:
raise ValueError(f" [x] Unknown Model: {args.model.type}")
# load parameters
initial_global_step = 0
optimizer = torch.optim.Adam(model.parameters())
initial_global_step, model, optimizer = utils.load_model(args.env.expdir, model, optimizer, device=args.device)
for param_group in optimizer.param_groups:
param_group['lr'] = args.train.lr
# loss
loss_func = MSSLoss(args.loss.n_ffts)
# device
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if args.device == 'cuda':
torch.cuda.set_device(args.env.gpu_id)
model.to(args.device)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(args.device)
loss_func.to(args.device)
# datas
loader_train, loader_valid = get_data_loaders(args, whole_audio=False)
# run
train(args, initial_global_step, model, optimizer, loss_func, loader_train, loader_valid)