diff --git a/PyTorch/SpeechSynthesis/FastPitch/fastpitch/attention.py b/PyTorch/SpeechSynthesis/FastPitch/fastpitch/attention.py index 59a7397d6..21bdbc3c9 100644 --- a/PyTorch/SpeechSynthesis/FastPitch/fastpitch/attention.py +++ b/PyTorch/SpeechSynthesis/FastPitch/fastpitch/attention.py @@ -90,7 +90,6 @@ def __init__(self, n_mel_channels=80, n_speaker_dim=128, self.softmax = torch.nn.Softmax(dim=3) self.log_softmax = torch.nn.LogSoftmax(dim=3) self.query_proj = Invertible1x1ConvLUS(n_mel_channels) - self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1) self.align_query_enc_type = align_query_enc_type self.use_query_proj = bool(use_query_proj) diff --git a/PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attention.py b/PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attention.py index 59a7397d6..21bdbc3c9 100644 --- a/PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attention.py +++ b/PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attention.py @@ -90,7 +90,6 @@ def __init__(self, n_mel_channels=80, n_speaker_dim=128, self.softmax = torch.nn.Softmax(dim=3) self.log_softmax = torch.nn.LogSoftmax(dim=3) self.query_proj = Invertible1x1ConvLUS(n_mel_channels) - self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1) self.align_query_enc_type = align_query_enc_type self.use_query_proj = bool(use_query_proj)