From bbb9b277c0967adbaa5e6c0a4870dbeec5d32d3d Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 3 Nov 2021 19:29:16 +0100 Subject: [PATCH] Rename. --- src/transformers/models/unispeech/modeling_unispeech.py | 4 ++-- .../models/unispeech_sat/modeling_unispeech_sat.py | 4 ++-- src/transformers/models/wav2vec2/modeling_wav2vec2.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 3592e9036d56..a8a89c302b75 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -949,8 +949,8 @@ def _conv_out_length(input_length, kernel_size, stride): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. - mask = attention_mask.cumsum(dim=-1)[:, -1] - output_lengths = self._get_feat_extract_output_lengths(mask).to(torch.long) + non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] + output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index 2361c248c6c7..c5f8243bf115 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -950,8 +950,8 @@ def _conv_out_length(input_length, kernel_size, stride): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. - mask = attention_mask.cumsum(dim=-1)[:, -1] - output_lengths = self._get_feat_extract_output_lengths(mask).to(torch.long) + non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] + output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 3b35991a0a3e..6548f245f0e8 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -991,8 +991,8 @@ def _conv_out_length(input_length, kernel_size, stride): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. - mask = attention_mask.cumsum(dim=-1)[:, -1] - output_lengths = self._get_feat_extract_output_lengths(mask).to(torch.long) + non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] + output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros(