-
Notifications
You must be signed in to change notification settings - Fork 4
/
preprocess.py
108 lines (87 loc) · 3.51 KB
/
preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
import shutil
import argparse
import torch
from modules.common import load_config
from modules.dataset.prepare import make_metadata
from modules.dataset.preprocess import PreprocessorParameters
from modules.dataset.preprocess import preprocess_spkinfo, preprocess_main
from modules.dataset.loader import get_datasets
def parse_args(args=None, namespace=None):
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
type=str,
required=True,
help="path to the config file")
parser.add_argument(
"-d",
"--device",
type=str,
default=None,
required=False,
help="cpu or cuda, auto if not set")
return parser.parse_args(args=args, namespace=namespace)
if __name__ == '__main__':
# parse commands
cmd = parse_args()
device = cmd.device
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# load config
args = load_config(cmd.config)
# make metadatas
make_metadata(args.data.dataset_path, args.data.extensions)
mel_vocoder_args = {}
if "Diffusion" in args.model.type:
mel_vocoder_args.update({
'mel_vocoder_type': args.model.vocoder.type,
'mel_vocoder_ckpt': args.model.vocoder.ckpt,
})
# preprocessor parameters
params = PreprocessorParameters(
args.data.dataset_path,
sample_rate=args.data.sampling_rate,
block_size=args.data.block_size,
use_f0=True,
f0_extractor=args.data.f0_extractor,
f0_min=args.data.f0_min,
f0_max=args.data.f0_max,
use_speaker_embed=args.model.use_speaker_embed,
speaker_embed_encoder=args.data.spk_embed_encoder,
speaker_embed_encoder_path=args.data.spk_embed_encoder_ckpt,
speaker_embed_encoder_sample_rate=args.data.spk_embed_encoder_sample_rate,
per_file_speaker_embed=args.data.per_file_speaker_embed,
units_encoder=args.data.encoder,
units_encoder_path=args.data.encoder_ckpt,
units_encoder_sample_rate=args.data.encoder_sample_rate,
units_encoder_hop_size=args.data.encoder_hop_size,
units_encoder_extract_layers=args.model.units_layers,
volume_extractor_window_size=args.data.volume_window_size,
use_mel="Diffusion" in args.model.type,
**mel_vocoder_args,
device=device)
# get dataset
ds_train = get_datasets(os.path.join(args.data.dataset_path, 'train.csv'))
test_csv = os.path.join(args.data.dataset_path, 'test.csv')
if os.path.isfile(test_csv):
ds_test = get_datasets(test_csv)
else:
ds_test = None
# process speaker embed
if args.model.use_speaker_embed:
preprocess_spkinfo(args.data.dataset_path, ds_train, params=params)
# process units, f0 and volume
preprocess_main(args.data.dataset_path, ds_train, params=params)
if ds_test is not None:
preprocess_main(args.data.dataset_path, ds_test, params=params)
os.makedirs(os.path.join(args.env.expdir), exist_ok=True)
if os.path.isdir(os.path.join(args.env.expdir, "states", "cp0")):
# skipping copy files if already exists
print("Skipping copy to pretrained weights, already exists")
else:
print("Copying pretrained weights...")
shutil.copytree("./models/pretrained/mnp-svc/states", os.path.join(args.env.expdir, "states"), dirs_exist_ok=True)
print("Done!")