/
decoder_layer.py
347 lines (278 loc) · 11.6 KB
/
decoder_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Optional, Tuple, cast, final
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import Dropout, Module
from torch.nn.parameter import Parameter
from fairseq2.nn.incremental_state import IncrementalStateBag
from fairseq2.nn.normalization import LayerNorm
from fairseq2.nn.padding import PaddingMask
from fairseq2.nn.transformer.attention_mask import AttentionMask
from fairseq2.nn.transformer.ffn import FeedForwardNetwork
from fairseq2.nn.transformer.layer_norm import (
LayerNormFactory,
create_standard_layer_norm,
)
from fairseq2.nn.transformer.multihead_attention import MultiheadAttention
from fairseq2.nn.transformer.norm_order import TransformerNormOrder
from fairseq2.typing import DataType, Device, finaloverride
class TransformerDecoderLayer(Module, ABC):
"""Represents a Transformer decoder layer."""
model_dim: int
def __init__(self, model_dim: int) -> None:
"""
:param model_dim:
The dimensionality of the model.
"""
super().__init__()
self.model_dim = model_dim
@abstractmethod
def forward(
self,
seqs: Tensor,
padding_mask: Optional[PaddingMask],
self_attn_mask: Optional[AttentionMask] = None,
encoder_output: Optional[Tensor] = None,
encoder_padding_mask: Optional[PaddingMask] = None,
*,
state_bag: Optional[IncrementalStateBag] = None,
) -> Tuple[Tensor, Optional[PaddingMask]]:
"""
:param seqs:
The sequences to process. *Shape:* :math:`(N,S,M)`, where :math:`N`
is the batch size, :math:`S` is the sequence length, and :math:`M`
is the dimensionality of the model.
:param padding_mask:
The padding mask of ``seqs``. *Shape:* :math:`(N,S)`, where :math:`N`
is the batch size and :math:`S` is the sequence length.
:param self_attn_mask:
The mask that will be added to attention weights before computing
the self attention. *Shape:* :math:`([H],S,S)`, where :math:`H` is
the number of attention heads and :math:`S` is the sequence length.
:param encoder_output:
The encoder output to use in encoder-decoder attention. *Shape:*
:math:`(N,S_{enc},M_{enc})`, where :math:`N` is the batch size,
:math:`S_{enc}` is the encoder output sequence length, and
:math:`M_{enc}` is the dimensionality of the encoder.
:param encoder_padding_mask:
The padding mask of ``encoder_output``. *Shape:* :math:`(N,S_{enc})`,
where :math:`N` is the batch size and :math:`S_{enc}` is the encoder
output sequence length.
:param state_bag:
The state bag to use for incremental decoding.
:returns:
- The decoder layer output. *Shape:* Same as ``seqs``.
- The padding mask of the decoder layer output. *Shape:* Same as
``padding_mask``.
"""
def extra_repr(self) -> str:
""":meta private:"""
return f"model_dim={self.model_dim}"
@final
class StandardTransformerDecoderLayer(TransformerDecoderLayer):
"""Represents a Transformer decoder layer as described in
:cite:t:`https://doi.org/10.48550/arxiv.1706.03762`."""
self_attn: MultiheadAttention
self_attn_norm: Optional[LayerNorm]
self_attn_dropout: Optional[Dropout]
self_attn_layer_norm: LayerNorm
encoder_decoder_attn: Optional[MultiheadAttention]
encoder_decoder_attn_dropout: Optional[Dropout]
encoder_decoder_attn_layer_norm: Optional[LayerNorm]
ffn: FeedForwardNetwork
ffn_dropout: Optional[Dropout]
residual_scale: Optional[Parameter]
ffn_layer_norm: LayerNorm
norm_order: TransformerNormOrder
def __init__(
self,
self_attn: MultiheadAttention,
encoder_decoder_attn: Optional[MultiheadAttention],
ffn: FeedForwardNetwork,
*,
scale_residual: bool = False,
dropout_p: float = 0.1,
norm_order: TransformerNormOrder = TransformerNormOrder.POST,
layer_norm_factory: Optional[LayerNormFactory] = None,
device: Optional[Device] = None,
dtype: Optional[DataType] = None,
) -> None:
"""
:param self_attn:
The self attention layer.
:param encoder_decoder_attn:
The encoder-decoder attention layer.
:param ffn:
The feed-forward network.
:param scale_residual:
If ``True``, scales residuals before adding them to the output of
the feed-forward network as described in
:cite:t:`https://doi.org/10.48550/arxiv.2110.09456`.
:param dropout_p:
The dropout probability on outputs of the attention layers and the
feed-forward network.
:param norm_order:
The Layer Normalization order.
:param layer_norm_factory:
The factory to construct the Layer Normalization modules.
"""
model_dim = self_attn.model_dim
super().__init__(model_dim)
if layer_norm_factory is None:
layer_norm_factory = create_standard_layer_norm
self_attn_layer_norm = layer_norm_factory(model_dim, device=device, dtype=dtype)
if norm_order != TransformerNormOrder.POST:
self.self_attn_layer_norm = self_attn_layer_norm
self.self_attn = self_attn
if norm_order == TransformerNormOrder.PRE_WITH_NORMFORMER:
self.self_attn_norm = layer_norm_factory(
model_dim, device=device, dtype=dtype
)
else:
self.register_module("self_attn_norm", None)
if dropout_p > 0.0:
self.self_attn_dropout = Dropout(dropout_p)
else:
self.register_module("self_attn_dropout", None)
if norm_order == TransformerNormOrder.POST:
self.self_attn_layer_norm = self_attn_layer_norm
if encoder_decoder_attn is None:
self.register_module("encoder_decoder_attn", None)
self.register_module("encoder_decoder_attn_layer_norm", None)
else:
encoder_decoder_attn_layer_norm = layer_norm_factory(
model_dim, device=device, dtype=dtype
)
if norm_order != TransformerNormOrder.POST:
self.encoder_decoder_attn_layer_norm = encoder_decoder_attn_layer_norm
self.encoder_decoder_attn = encoder_decoder_attn
if dropout_p > 0.0:
self.encoder_decoder_attn_dropout = Dropout(dropout_p)
else:
self.register_module("encoder_decoder_attn_dropout", None)
if norm_order == TransformerNormOrder.POST:
self.encoder_decoder_attn_layer_norm = encoder_decoder_attn_layer_norm
ffn_layer_norm = layer_norm_factory(model_dim, device=device, dtype=dtype)
if norm_order != TransformerNormOrder.POST:
self.ffn_layer_norm = ffn_layer_norm
self.ffn = ffn
if dropout_p > 0.0:
self.ffn_dropout = Dropout(dropout_p)
else:
self.register_module("ffn_dropout", None)
if scale_residual:
self.residual_scale = Parameter(
torch.empty((model_dim,), device=device, dtype=dtype)
)
else:
self.register_parameter("residual_scale", None)
if norm_order == TransformerNormOrder.POST:
self.ffn_layer_norm = ffn_layer_norm
self.norm_order = norm_order
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset the parameters and buffers of the module."""
if self.residual_scale is not None:
nn.init.ones_(self.residual_scale)
@finaloverride
def forward(
self,
seqs: Tensor,
padding_mask: Optional[PaddingMask],
self_attn_mask: Optional[AttentionMask] = None,
encoder_output: Optional[Tensor] = None,
encoder_padding_mask: Optional[PaddingMask] = None,
*,
state_bag: Optional[IncrementalStateBag] = None,
) -> Tuple[Tensor, Optional[PaddingMask]]:
seqs = self._forward_self_attn(seqs, padding_mask, self_attn_mask, state_bag)
seqs = self._forward_encoder_decoder_attn(
seqs, padding_mask, encoder_output, encoder_padding_mask, state_bag
)
seqs = self._forward_ffn(seqs)
return seqs, padding_mask
def _forward_self_attn(
self,
seqs: Tensor,
padding_mask: Optional[PaddingMask],
self_attn_mask: Optional[AttentionMask],
state_bag: Optional[IncrementalStateBag],
) -> Tensor:
residual = seqs
if self.norm_order != TransformerNormOrder.POST:
seqs = self.self_attn_layer_norm(seqs)
seqs = self.self_attn(
seqs,
padding_mask,
keys=seqs,
key_padding_mask=padding_mask,
values=seqs,
attn_mask=self_attn_mask,
state_bag=state_bag,
)
if self.self_attn_norm is not None:
seqs = self.self_attn_norm(seqs)
if self.self_attn_dropout is not None:
seqs = self.self_attn_dropout(seqs)
seqs = seqs + residual
if self.norm_order == TransformerNormOrder.POST:
seqs = self.self_attn_layer_norm(seqs)
return seqs
def _forward_encoder_decoder_attn(
self,
seqs: Tensor,
padding_mask: Optional[PaddingMask],
encoder_output: Optional[Tensor],
encoder_padding_mask: Optional[PaddingMask],
state_bag: Optional[IncrementalStateBag],
) -> Tensor:
if self.encoder_decoder_attn is None:
if encoder_output is not None:
raise ValueError(
"`encoder_output` must be `None` for decoder-only attention."
)
return seqs
if encoder_output is None:
raise ValueError(
"`encoder_output` must not be `None` for encoder-decoder attention."
)
residual = seqs
if self.norm_order != TransformerNormOrder.POST:
seqs = cast(LayerNorm, self.encoder_decoder_attn_layer_norm)(seqs)
seqs = self.encoder_decoder_attn(
seqs,
padding_mask,
keys=encoder_output,
key_padding_mask=encoder_padding_mask,
values=encoder_output,
state_bag=state_bag,
)
if self.encoder_decoder_attn_dropout is not None:
seqs = self.encoder_decoder_attn_dropout(seqs)
seqs = seqs + residual
if self.norm_order == TransformerNormOrder.POST:
seqs = cast(LayerNorm, self.encoder_decoder_attn_layer_norm)(seqs)
return seqs
def _forward_ffn(self, seqs: Tensor) -> Tensor:
residual = seqs
if self.norm_order != TransformerNormOrder.POST:
seqs = self.ffn_layer_norm(seqs)
seqs = self.ffn(seqs)
if self.ffn_dropout is not None:
seqs = self.ffn_dropout(seqs)
if self.residual_scale is not None:
residual = self.residual_scale * residual
seqs = seqs + residual
if self.norm_order == TransformerNormOrder.POST:
seqs = self.ffn_layer_norm(seqs)
return seqs
def extra_repr(self) -> str:
""":meta private:"""
s = super().extra_repr()
return f"{s}, norm_order={self.norm_order}"