-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add MixIT support. It is unsupervised only. Semi-supervised config is…
… not available for now.
- Loading branch information
1 parent
6d52365
commit b70d77b
Showing
8 changed files
with
310 additions
and
95 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
74 changes: 74 additions & 0 deletions
74
egs2/wsj0_2mix/enh1/conf/tuning/train_enh_mixit_conv_tasnet.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
optim: adam | ||
init: xavier_uniform | ||
max_epoch: 100 | ||
batch_type: folded | ||
# When dynamic mixing is enabled, the actual batch_size will | ||
# be (batch_size / num_spk) | ||
batch_size: 16 # batch_size 16 can be trained on 4 RTX 2080ti | ||
iterator_type: chunk | ||
chunk_length: 32000 | ||
num_workers: 4 | ||
optim_conf: | ||
lr: 1.0e-03 | ||
eps: 1.0e-08 | ||
weight_decay: 0 | ||
patience: 4 | ||
val_scheduler_criterion: | ||
- valid | ||
- loss | ||
best_model_criterion: | ||
- - valid | ||
- si_snr | ||
- max | ||
- - valid | ||
- loss | ||
- min | ||
keep_nbest_models: 1 | ||
|
||
scheduler: reducelronplateau | ||
scheduler_conf: | ||
mode: min | ||
factor: 0.5 | ||
patience: 1 | ||
encoder: conv | ||
encoder_conf: | ||
channel: 256 | ||
kernel_size: 20 | ||
stride: 10 | ||
decoder: conv | ||
decoder_conf: | ||
channel: 256 | ||
kernel_size: 20 | ||
stride: 10 | ||
separator: tcn | ||
separator_conf: | ||
num_spk: 4 | ||
layer: 8 | ||
stack: 4 | ||
bottleneck_dim: 256 | ||
hidden_dim: 512 | ||
kernel: 3 | ||
causal: False | ||
norm_type: "gLN" | ||
nonlinear: relu | ||
|
||
# dynamic_mixing related | ||
# dynamic_mixing_gain_db: | ||
# The maximum random gain (in dB) for each source before the mixing. | ||
# The gain (in dB) of each source is unifromly sampled in | ||
# [-dynamic_mixing_gain_db, dynamic_mixing_gain_db] | ||
preprocessor: dynamic_mixing | ||
preprocessor_conf: | ||
num_utts: 2 | ||
dynamic_mixing_gain_db: 0.0 | ||
source_scp_name: "wav.scp" | ||
mixture_source_name: "speech_mix" | ||
|
||
criterions: | ||
# The first criterion | ||
- name: snr | ||
conf: | ||
eps: 1.0e-7 | ||
wrapper: mixit | ||
wrapper_conf: | ||
weight: 1.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
import itertools | ||
|
||
import torch | ||
|
||
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss | ||
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper | ||
|
||
|
||
class MixITSolver(AbsLossWrapper): | ||
def __init__( | ||
self, | ||
criterion: AbsEnhLoss, | ||
weight=1.0, | ||
): | ||
"""Mixture Invariant Training Solver. | ||
Args: | ||
criterion (AbsEnhLoss): an instance of AbsEnhLoss | ||
weight (float): weight (between 0 and 1) of current loss | ||
for multi-task learning. | ||
""" | ||
super().__init__() | ||
self.criterion = criterion | ||
self.weight = weight | ||
|
||
@property | ||
def type(self): | ||
return "mixit" | ||
|
||
def forward(self, ref, inf, others={}): | ||
"""MixIT solver. | ||
Args: | ||
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk | ||
inf (List[torch.Tensor]): [(batch, ...), ...] x n_est | ||
Returns: | ||
loss: (torch.Tensor): minimum loss with the best permutation | ||
stats: dict, for collecting training status | ||
others: dict, in this PIT solver, permutation order will be returned | ||
""" | ||
num_inf = len(inf) | ||
num_ref = num_inf // 2 | ||
device = ref[0].device | ||
|
||
ref_tensor = torch.stack(ref[:num_ref], dim=1) # (batch, num_ref, ...) | ||
inf_tensor = torch.stack(inf, dim=1) # (batch, num_inf, ...) | ||
|
||
# all permutation assignments: | ||
# [(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 0), ..., (1, 1, 1, 1)] | ||
all_assignments = list(itertools.product(range(num_ref), repeat=num_inf)) | ||
all_mixture_matrix = torch.stack( | ||
[ | ||
torch.nn.functional.one_hot( | ||
torch.tensor(asm, dtype=torch.int64, device=device), | ||
num_classes=num_ref, | ||
).transpose(1, 0) | ||
for asm in all_assignments | ||
], | ||
dim=0, | ||
).float() # (num_ref ^ num_inf, num_ref, num_inf) | ||
|
||
def pair_loss(matrix): | ||
mix_estimated = torch.matmul(matrix[None], inf_tensor) | ||
return ( | ||
sum( | ||
[ | ||
self.criterion(ref_tensor[:, i], mix_estimated[:, i]) | ||
for i in range(num_ref) | ||
] | ||
) | ||
/ num_ref | ||
) | ||
|
||
losses = torch.stack( | ||
[pair_loss(matrix) for matrix in all_mixture_matrix], | ||
dim=1, | ||
) # (batch, num_ref ^ num_inf) | ||
loss, perm = torch.min(losses, dim=1) | ||
perm = torch.index_select(all_mixture_matrix, 0, perm) | ||
|
||
loss = loss.mean() | ||
|
||
stats = dict() | ||
stats[f"{self.criterion.name}_{self.type}"] = loss.detach() | ||
|
||
return loss.mean(), stats, {"perm": perm} |
Oops, something went wrong.