-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
e2e_st_transformer.py
494 lines (436 loc) · 21.7 KB
/
e2e_st_transformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
from argparse import Namespace
from distutils.util import strtobool
import logging
import math
import torch
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_st import Reporter
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import LabelSmoothingLoss
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.mask import target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.st_interface import STInterface
class E2E(STInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group.add_argument("--transformer-init", type=str, default="pytorch",
choices=["pytorch", "xavier_uniform", "xavier_normal",
"kaiming_uniform", "kaiming_normal"],
help='how to initialize transformer parameters')
group.add_argument("--transformer-input-layer", type=str, default="conv2d",
choices=["conv2d", "linear", "embed"],
help='transformer input layer type')
group.add_argument('--transformer-attn-dropout-rate', default=None, type=float,
help='dropout in transformer attention. use --dropout-rate if None is set')
group.add_argument('--transformer-lr', default=10.0, type=float,
help='Initial value of learning rate')
group.add_argument('--transformer-warmup-steps', default=25000, type=int,
help='optimizer warmup steps')
group.add_argument('--transformer-length-normalized-loss', default=True, type=strtobool,
help='normalize loss by length')
group.add_argument('--dropout-rate', default=0.0, type=float,
help='Dropout rate for the encoder')
# Encoder
group.add_argument('--elayers', default=4, type=int,
help='Number of encoder layers (for shared recognition part in multi-speaker asr mode)')
group.add_argument('--eunits', '-u', default=300, type=int,
help='Number of encoder hidden units')
# Attention
group.add_argument('--adim', default=320, type=int,
help='Number of attention transformation dimensions')
group.add_argument('--aheads', default=4, type=int,
help='Number of heads for multi head attention')
# Decoder
group.add_argument('--dlayers', default=1, type=int,
help='Number of decoder layers')
group.add_argument('--dunits', default=320, type=int,
help='Number of decoder hidden units')
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate
)
self.decoder = Decoder(
odim=odim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate
)
self.pad = 0
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = [1]
self.reporter = Reporter()
# self.lsm_weight = a
self.criterion = LabelSmoothingLoss(self.odim, self.ignore_id, args.lsm_weight,
args.transformer_length_normalized_loss)
# self.verbose = args.verbose
self.adim = args.adim
# submodule for ASR task
self.mtlalpha = args.mtlalpha
self.asr_weight = getattr(args, "asr_weight", 0.0)
if self.asr_weight > 0 and args.mtlalpha < 1:
self.decoder_asr = Decoder(
odim=odim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
# submodule for MT task
self.mt_weight = getattr(args, "mt_weight", 0.0)
if self.mt_weight > 0:
self.encoder_mt = Encoder(
idim=odim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
input_layer='embed',
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
padding_idx=0
)
self.reset_parameters(args) # place after the submodule initialization
if args.mtlalpha > 0.0:
self.ctc = CTC(odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True)
else:
self.ctc = None
if self.asr_weight > 0 and (args.report_cer or args.report_wer):
from espnet.nets.e2e_asr_common import ErrorCalculator
self.error_calculator = ErrorCalculator(args.char_list,
args.sym_space, args.sym_blank,
args.report_cer, args.report_wer)
else:
self.error_calculator = None
self.rnnlm = None
# multilingual E2E-ST related
self.multilingual = getattr(args, "multilingual", False)
self.replace_sos = getattr(args, "replace_sos", False)
if self.multilingual:
assert self.replace_sos
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
initialize(self, args.transformer_init)
if self.mt_weight > 0:
torch.nn.init.normal_(self.encoder_mt.embed[0].weight, mean=0, std=args.adim ** -0.5)
torch.nn.init.constant_(self.encoder_mt.embed[0].weight[self.pad], 0)
def forward(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param torch.Tensor ys_pad_src: batch of padded target sequences (B, Lmax)
:return: ctc loass value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 0. Extract target language ID
# src_lang_ids = None
tgt_lang_ids = None
if self.multilingual:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beggining
# 1. forward encoder
xs_pad = xs_pad[:, :max(ilens)] # for data parallel
src_mask = (~make_pad_mask(ilens.tolist())).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
self.hs_pad = hs_pad
# 2. forward decoder
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
# replace <sos> with target language ID
if self.replace_sos:
ys_in_pad = torch.cat([tgt_lang_ids, ys_in_pad[:, 1:]], dim=1)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
self.pred_pad = pred_pad
pred_pad_asr, pred_pad_mt = None, None
# 3. compute attention loss
loss_asr, loss_mt = 0.0, 0.0
loss_att = self.criterion(pred_pad, ys_out_pad)
# Multi-task w/ ASR
if self.asr_weight > 0 and self.mtlalpha < 1.0:
# forward ASR decoder
ys_in_pad_asr, ys_out_pad_asr = add_sos_eos(ys_pad_src, self.sos, self.eos, self.ignore_id)
ys_mask_asr = target_mask(ys_in_pad_asr, self.ignore_id)
pred_pad_asr, _ = self.decoder_asr(ys_in_pad_asr, ys_mask_asr, hs_pad, hs_mask)
# compute loss
loss_asr = self.criterion(pred_pad_asr, ys_out_pad_asr)
# Multi-task w/ MT
if self.mt_weight > 0:
# forward MT encoder
ilens_mt = torch.sum(ys_pad_src != self.ignore_id, dim=1).cpu().numpy()
# NOTE: ys_pad_src is padded with -1
ys_src = [y[y != self.ignore_id] for y in ys_pad_src] # parse padded ys_src
ys_zero_pad_src = pad_list(ys_src, self.pad) # re-pad with zero
ys_zero_pad_src = ys_zero_pad_src[:, :max(ilens_mt)] # for data parallel
src_mask_mt = (~make_pad_mask(ilens_mt.tolist())).to(ys_zero_pad_src.device).unsqueeze(-2)
# ys_zero_pad_src, ys_pad = self.target_forcing(ys_zero_pad_src, ys_pad)
hs_pad_mt, hs_mask_mt = self.encoder_mt(ys_zero_pad_src, src_mask_mt)
# forward MT decoder
pred_pad_mt, _ = self.decoder(ys_in_pad, ys_mask, hs_pad_mt, hs_mask_mt)
# compute loss
loss_mt = self.criterion(pred_pad_mt, ys_out_pad)
self.acc = th_accuracy(pred_pad.view(-1, self.odim), ys_out_pad,
ignore_label=self.ignore_id)
if pred_pad_asr is not None:
self.acc_asr = th_accuracy(pred_pad_asr.view(-1, self.odim), ys_out_pad_asr,
ignore_label=self.ignore_id)
else:
self.acc_asr = 0.0
if pred_pad_mt is not None:
self.acc_mt = th_accuracy(pred_pad_mt.view(-1, self.odim), ys_out_pad,
ignore_label=self.ignore_id)
else:
self.acc_mt = 0.0
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
if self.mtlalpha == 0.0 or self.asr_weight == 0:
loss_ctc = 0.0
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad_src)
if self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad_src.cpu(), is_ctc=True)
# 5. compute cer/wer
cer, wer = None, None # TODO(hirofumi0810): fix later
# if self.training or (self.asr_weight == 0 or self.mtlalpha == 1 or not (self.report_cer or self.report_wer)):
# cer, wer = None, None
# else:
# ys_hat = pred_pad.argmax(dim=-1)
# cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copyied from e2e_asr
alpha = self.mtlalpha
self.loss = (1 - self.asr_weight - self.mt_weight) * loss_att + self.asr_weight * \
(alpha * loss_ctc + (1 - alpha) * loss_asr) + self.mt_weight * loss_mt
loss_asr_data = float(alpha * loss_ctc + (1 - alpha) * loss_asr)
loss_mt_data = None if self.mt_weight == 0 else float(loss_mt)
loss_st_data = float(loss_att)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(loss_asr_data, loss_mt_data, loss_st_data,
self.acc_asr, self.acc_mt, self.acc,
cer_ctc, cer, wer, 0.0, # TODO(hirofumi0810): bleu
loss_data)
else:
logging.warning('loss (=%f) is not correct', loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder)
def encode(self, x):
"""Encode source acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, _ = self.encoder(x, None)
return enc_output.squeeze(0)
def translate(self, x, trans_args, char_list=None, rnnlm=None, use_jit=False):
"""Translate input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
# preprate sos
if getattr(trans_args, "tgt_lang", False):
if self.replace_sos:
y = char_list.index(trans_args.tgt_lang)
else:
y = self.sos
logging.info('<sos> index: ' + str(y))
logging.info('<sos> mark: ' + char_list[y])
enc_output = self.encode(x).unsqueeze(0)
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = trans_args.beam_size
penalty = trans_args.penalty
vy = h.new_zeros(1).long()
if trans_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(trans_args.maxlenratio * h.size(0)))
minlen = int(trans_args.minlenratio * h.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y]}
hyps = [hyp]
ended_hyps = []
import six
traced_decoder = None
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
vy.unsqueeze(1)
vy[0] = hyp['yseq'][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(self.decoder.forward_one_step,
(ys, ys_mask, enc_output))
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(ys, ys_mask, enc_output)[0]
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp['rnnlm_prev'], vy)
local_scores = local_att_scores + trans_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
if char_list is not None:
logging.debug(
'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
hyp['yseq'].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp['score'] += trans_args.lm_weight * rnnlm.final(
hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
from espnet.nets.e2e_asr_common import end_detect
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remeined hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if char_list is not None:
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypothes: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), trans_args.nbest)]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy becasuse Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
return self.translate(x, trans_args, char_list, rnnlm)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor ys_pad_src: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, ys_pad_src)
ret = dict()
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and m.attn is not None: # skip MHA for submodules
ret[name] = m.attn.cpu().numpy()
return ret