-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
rnn.py
3526 lines (3020 loc) · 160 KB
/
rnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from functools import partial, reduce
import warnings
import paddle
from paddle.utils import deprecated
from . import nn
from . import tensor
from . import control_flow
from . import utils
from . import sequence_lod
from .utils import *
from .. import core
from ..framework import default_main_program
from ..data_feeder import convert_dtype
from ..layer_helper import LayerHelper
from ..framework import in_dygraph_mode
from ..param_attr import ParamAttr
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
__all__ = [
'RNNCell',
'GRUCell',
'LSTMCell',
'Decoder',
'BeamSearchDecoder',
'rnn',
'birnn',
'dynamic_decode',
'DecodeHelper',
'TrainingHelper',
'GreedyEmbeddingHelper',
'SampleEmbeddingHelper',
'BasicDecoder',
'dynamic_lstm',
'dynamic_lstmp',
'dynamic_gru',
'gru_unit',
'lstm_unit',
'lstm',
'beam_search',
'beam_search_decode',
]
class RNNCell(object):
"""
:api_attr: Static Graph
RNNCell is the base class for abstraction representing the calculations
mapping the input and state to the output and new state. It is suitable to
and mostly used in RNN.
"""
def call(self, inputs, states, **kwargs):
r"""
Every cell must implement this method to do the calculations mapping the
inputs and states to the output and new states.
To be more flexible, both inputs and states can be a tensor variable or
a nested structure (list|tuple|namedtuple|dict) of tensor variable, that
is, a (possibly nested structure of) tensor variable[s].
Parameters:
inputs: A (possibly nested structure of) tensor variable[s].
states: A (possibly nested structure of) tensor variable[s].
**kwargs: Additional keyword arguments, provided by the caller.
Returns:
tuple: outputs and new_states pair. outputs and new_states both \
can be nested structure of tensor variables. new_states must \
have the same structure with states.
"""
raise NotImplementedError("RNNCell must implent the call function.")
def __call__(self, inputs, states, **kwargs):
return self.call(inputs, states, **kwargs)
def get_initial_states(self,
batch_ref,
shape=None,
dtype='float32',
init_value=0,
batch_dim_idx=0):
r"""
Generate initialized states according to provided shape, data type and
value.
Parameters:
batch_ref: A (possibly nested structure of) tensor variable[s].
The first dimension of the tensor will be used as batch size to
initialize states.
shape: A (possibly nested structure of) shape[s], where a shape is
represented as a list/tuple of integer). -1(for batch size) will
beautomatically inserted if shape is not started with it. If None,
property `state_shape` will be used. The default value is None.
dtype: A (possibly nested structure of) data type[s]. The structure
must be same as that of `shape`, except when all tensors' in states
has the same data type, a single data type can be used. If
property `cell.state_shape` is not available, float32 will be used
as the data type. The default value is float32.
init_value: A float value used to initialize states.
batch_dim_idx: An integer indicating which dimension of the tensor in
inputs represents batch size. The default value is 0.
Returns:
Variable: tensor variable[s] packed in the same structure provided \
by shape, representing the initialized states.
"""
if sys.version_info < (3, ):
integer_types = (
int,
long, )
else:
integer_types = (int, )
check_variable_and_dtype(batch_ref, 'batch_ref',
['float32', 'float64', 'int32', 'int64'],
'RNNCell')
check_type(shape, 'shape', (list, tuple, type(None), integer_types),
'RNNCell')
if isinstance(shape, (list, tuple)):
shapes = map_structure(lambda x: x, shape)
if isinstance(shape, list):
for i, _shape in enumerate(shapes):
check_type(_shape, 'shapes[' + str(i) + ']', integer_types,
'RNNCell')
else:
check_type(shapes, 'shapes', integer_types, 'RNNCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'RNNCell')
# TODO: use inputs and batch_size
batch_ref = flatten(batch_ref)[0]
def _is_shape_sequence(seq):
if sys.version_info < (3, ):
integer_types = (
int,
long, )
else:
integer_types = (int, )
"""For shape, list/tuple of integer is the finest-grained objection"""
if (isinstance(seq, list) or isinstance(seq, tuple)):
if reduce(lambda flag, x: isinstance(x, integer_types) and flag,
seq, True):
return False
# TODO: Add check for the illegal
if isinstance(seq, dict):
return True
return (isinstance(seq, collections.Sequence) and
not isinstance(seq, six.string_types))
class Shape(object):
def __init__(self, shape):
self.shape = shape if shape[0] == -1 else ([-1] + list(shape))
# nested structure of shapes
states_shapes = self.state_shape if shape is None else shape
is_sequence_ori = utils.is_sequence
utils.is_sequence = _is_shape_sequence
states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)
utils.is_sequence = is_sequence_ori
# nested structure of dtypes
try:
states_dtypes = self.state_dtype if dtype is None else dtype
except NotImplementedError: # use fp32 as default
states_dtypes = "float32"
if len(flatten(states_dtypes)) == 1:
dtype = flatten(states_dtypes)[0]
states_dtypes = map_structure(lambda shape: dtype, states_shapes)
init_states = map_structure(
lambda shape, dtype: tensor.fill_constant_batch_size_like(
input=batch_ref,
shape=shape.shape,
dtype=dtype,
value=init_value,
input_dim_idx=batch_dim_idx), states_shapes, states_dtypes)
return init_states
@property
def state_shape(self):
"""
Abstract method (property).
Used to initialize states.
A (possibly nested structure of) shape[s], where a shape is represented
as a list/tuple of integers (-1 for batch size would be automatically
inserted into a shape if shape is not started with it).
Not necessary to be implemented if states are not initialized by
`get_initial_states` or the `shape` argument is provided when using
`get_initial_states`.
"""
raise NotImplementedError(
"Please add implementaion for `state_shape` in the used cell.")
@property
def state_dtype(self):
"""
Abstract method (property).
Used to initialize states.
A (possibly nested structure of) data types[s]. The structure must be
same as that of `shape`, except when all tensors' in states has the same
data type, a single data type can be used.
Not necessary to be implemented if states are not initialized
by `get_initial_states` or the `dtype` argument is provided when using
`get_initial_states`.
"""
raise NotImplementedError(
"Please add implementaion for `state_dtype` in the used cell.")
class GRUCell(RNNCell):
r"""
:api_attr: Static Graph
Gated Recurrent Unit cell. It is a wrapper for
`fluid.contrib.layers.rnn_impl.BasicGRUUnit` to make it adapt to RNNCell.
The formula used is as follow:
.. math::
u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)
r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)
\\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)
h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \\tilde{h_t}
For more details, please refer to `Learning Phrase Representations using
RNN Encoder Decoder for Statistical Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
cell = layers.GRUCell(hidden_size=256)
"""
def __init__(self,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype="float32",
name="GRUCell"):
"""
Constructor of GRUCell.
Parameters:
hidden_size (int): The hidden size in the GRU cell.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight matrix. Default: None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of GRU. Default: None.
gate_activation (function, optional): The activation function for :math:`act_g`.
Default: `fluid.layers.sigmoid`.
activation (function, optional): The activation function for :math:`act_c`.
Default: `fluid.layers.tanh`.
dtype(string, optional): The data type used in this cell. Default float32.
name(string, optional) : The name scope used to identify parameters and biases.
"""
check_type(hidden_size, 'hidden_size', (int), 'GRUCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'GRUCell')
self.hidden_size = hidden_size
from .. import contrib # TODO: resolve recurrent import
self.gru_unit = contrib.layers.rnn_impl.BasicGRUUnit(
name, hidden_size, param_attr, bias_attr, gate_activation,
activation, dtype)
def call(self, inputs, states):
r"""
Perform calculations of GRU.
Parameters:
inputs(Variable): A tensor with shape `[batch_size, input_size]`,
corresponding to :math:`x_t` in the formula. The data type
should be float32 or float64.
states(Variable): A tensor with shape `[batch_size, hidden_size]`.
corresponding to :math:`h_{t-1}` in the formula. The data type
should be float32 or float64.
Returns:
tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` and \
`new_states` is the same tensor shaped `[batch_size, hidden_size]`, \
corresponding to :math:`h_t` in the formula. The data type of the \
tensor is same as that of `states`.
"""
check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'],
'GRUCell')
check_variable_and_dtype(states, 'states', ['float32', 'float64'],
'GRUCell')
new_hidden = self.gru_unit(inputs, states)
return new_hidden, new_hidden
@property
def state_shape(self):
"""
The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch
size would be automatically inserted into shape). The shape corresponds
to :math:`h_{t-1}`.
"""
return [self.hidden_size]
class LSTMCell(RNNCell):
r"""
:api_attr: Static Graph
Long-Short Term Memory cell. It is a wrapper for
`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` to make it adapt to RNNCell.
The formula used is as follow:
.. math::
i_{t} & = act_g(W_{x_{i}}x_{t} + W_{h_{i}}h_{t-1} + b_{i})
f_{t} & = act_g(W_{x_{f}}x_{t} + W_{h_{f}}h_{t-1} + b_{f} + forget\\_bias)
c_{t} & = f_{t}c_{t-1} + i_{t} act_c (W_{x_{c}}x_{t} + W_{h_{c}}h_{t-1} + b_{c})
o_{t} & = act_g(W_{x_{o}}x_{t} + W_{h_{o}}h_{t-1} + b_{o})
h_{t} & = o_{t} act_c (c_{t})
For more details, please refer to `RECURRENT NEURAL NETWORK REGULARIZATION <http://arxiv.org/abs/1409.2329>`_
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
cell = layers.LSTMCell(hidden_size=256)
"""
def __init__(self,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype="float32",
name="LSTMCell"):
"""
Constructor of LSTMCell.
Parameters:
hidden_size (int): The hidden size in the LSTM cell.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight matrix. Default: None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of LSTM. Default: None.
gate_activation (function, optional): The activation function for :math:`act_g`.
Default: 'fluid.layers.sigmoid'.
activation (function, optional): The activation function for :math:`act_h`.
Default: 'fluid.layers.tanh'.
forget_bias(float, optional): forget bias used when computing forget gate.
Default 1.0
dtype(string, optional): The data type used in this cell. Default float32.
name(string, optional) : The name scope used to identify parameters and biases.
"""
check_type(hidden_size, 'hidden_size', (int), 'LSTMCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'LSTMCell')
self.hidden_size = hidden_size
from .. import contrib # TODO: resolve recurrent import
self.lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
name, hidden_size, param_attr, bias_attr, gate_activation,
activation, forget_bias, dtype)
def call(self, inputs, states):
r"""
Perform calculations of LSTM.
Parameters:
inputs(Variable): A tensor with shape `[batch_size, input_size]`,
corresponding to :math:`x_t` in the formula. The data type
should be float32 or float64.
states(Variable): A list of containing two tensors, each shaped
`[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}, c_{t-1}`
in the formula. The data type should be float32 or float64.
Returns:
tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` is \
a tensor with shape `[batch_size, hidden_size]`, corresponding \
to :math:`h_{t}` in the formula; `new_states` is a list containing \
two tenser variables shaped `[batch_size, hidden_size]`, corresponding \
to :math:`h_{t}, c_{t}` in the formula. The data type of these \
tensors all is same as that of `states`.
"""
check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'],
'LSTMCell')
check_type(states, 'states', list, 'LSTMCell')
if isinstance(states, list):
for i, state in enumerate(states):
check_variable_and_dtype(state, 'state[' + str(i) + ']',
['float32', 'float64'], 'LSTMCell')
pre_hidden, pre_cell = states
new_hidden, new_cell = self.lstm_unit(inputs, pre_hidden, pre_cell)
return new_hidden, [new_hidden, new_cell]
@property
def state_shape(self):
"""
The `state_shape` of LSTMCell is a list with two shapes: `[[hidden_size], [hidden_size]]`
(-1 for batch size would be automatically inserted into shape). These two
shapes correspond to :math:`h_{t-1}` and :math:`c_{t-1}` separately.
"""
return [[self.hidden_size], [self.hidden_size]]
def rnn(cell,
inputs,
initial_states=None,
sequence_length=None,
time_major=False,
is_reverse=False,
**kwargs):
"""
rnn creates a recurrent neural network specified by RNNCell `cell`,
which performs :code:`cell.call()` (for dygraph mode :code:`cell.forward`)
repeatedly until reaches to the maximum length of `inputs`.
Arguments:
cell(RNNCellBase): An instance of `RNNCellBase`.
inputs(Tensor): the input sequences.
If time_major is True, the shape is
`[time_steps, batch_size, input_size]`
else the shape is `[batch_size, time_steps, input_size]`.
initial_states(Tensor|tuple|list, optional): the initial state of the
rnn cell. Tensor or a possibly nested structure of tensors. If not
provided, `cell.get_initial_states` would be called to produce
the initial state. Defaults to None.
sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
or int32. The valid lengths of input sequences. Defaults to None.
If `sequence_length` is not None, the inputs are treated as
padded sequences. In each input sequence, elements whose time step
index are not less than the valid length are treated as paddings.
time_major (bool): Whether the first dimension of the input means the
time steps. Defaults to False.
is_reverse (bool, optional): Indicate whether to calculate in the reverse
order of input sequences. Defaults to False.
**kwargs: Additional keyword arguments to pass to `forward` of the cell.
Returns:
(outputs, final_states)
outputs (Tensor|list|tuple): the output sequence. Tensor or nested
structure of Tensors.
If `time_major` is True, the shape of each tensor in outpus is
`[time_steps, batch_size, hidden_size]`, else
`[batch_size, time_steps, hidden_size]`.
final_states (Tensor|list|tuple): final states. A (possibly nested structure of)
tensor[s], representing the final state for RNN. It has the same
structure of intial state. Each tensor in final states has the same
shape and dtype as the corresponding tensor in initial states.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
cell = paddle.nn.SimpleRNNCell(16, 32)
inputs = paddle.rand((4, 23, 16))
prev_h = paddle.randn((4, 32))
outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h)
"""
if in_dygraph_mode():
return _rnn_dynamic_graph(cell, inputs, initial_states, sequence_length,
time_major, is_reverse, **kwargs)
else:
return _rnn_static_graph(cell, inputs, initial_states, sequence_length,
time_major, is_reverse, **kwargs)
class ArrayWrapper(object):
def __init__(self, x):
self.array = [x]
def append(self, x):
self.array.append(x)
return self
def __getitem__(self, item):
return self.array.__getitem__(item)
def _maybe_copy(state, new_state, step_mask):
"""update rnn state or just pass the old state through"""
new_state = nn.elementwise_mul(new_state, step_mask, axis=0) \
+ nn.elementwise_mul(state, (1 - step_mask), axis=0)
return new_state
def _transpose_batch_time(x):
perm = [1, 0] + list(range(2, len(x.shape)))
return nn.transpose(x, perm)
def _rnn_dynamic_graph(cell,
inputs,
initial_states=None,
sequence_length=None,
time_major=False,
is_reverse=False,
**kwargs):
time_step_index = 0 if time_major else 1
flat_inputs = flatten(inputs)
time_steps = flat_inputs[0].shape[time_step_index]
if initial_states is None:
initial_states = cell.get_initial_states(
batch_ref=inputs, batch_dim_idx=1 if time_major else 0)
if not time_major:
inputs = map_structure(_transpose_batch_time, inputs)
if sequence_length is not None:
mask = sequence_lod.sequence_mask(
sequence_length, maxlen=time_steps, dtype=inputs.dtype)
mask = nn.transpose(mask, [1, 0])
if is_reverse:
inputs = map_structure(lambda x: tensor.reverse(x, axis=[0]), inputs)
mask = tensor.reverse(mask, axis=[0]) \
if sequence_length is not None else None
states = initial_states
outputs = []
for i in range(time_steps):
step_inputs = map_structure(lambda x: x[i], inputs)
step_outputs, new_states = cell(step_inputs, states, **kwargs)
if sequence_length is not None:
new_states = map_structure(
partial(
_maybe_copy, step_mask=mask[i]), states, new_states)
states = new_states
outputs = map_structure(lambda x: ArrayWrapper(x),
step_outputs) if i == 0 else map_structure(
lambda x, x_array: x_array.append(x),
step_outputs, outputs)
final_outputs = map_structure(
lambda x: nn.stack(x.array, axis=time_step_index),
outputs)
if is_reverse:
final_outputs = map_structure(
lambda x: tensor.reverse(x, axis=time_step_index),
final_outputs)
final_states = new_states
return final_outputs, final_states
def _rnn_static_graph(cell,
inputs,
initial_states=None,
sequence_length=None,
time_major=False,
is_reverse=False,
**kwargs):
check_type(inputs, 'inputs', (Variable, list, tuple), 'rnn')
if isinstance(inputs, (list, tuple)):
for i, input_x in enumerate(inputs):
check_variable_and_dtype(input_x, 'inputs[' + str(i) + ']',
['float32', 'float64'], 'rnn')
check_type(initial_states, 'initial_states',
(Variable, list, tuple, type(None)), 'rnn')
check_type(sequence_length, 'sequence_length', (Variable, type(None)),
'rnn')
def _switch_grad(x, stop=False):
x.stop_gradient = stop
return x
if initial_states is None:
initial_states = cell.get_initial_states(
batch_ref=inputs, batch_dim_idx=1 if time_major else 0)
initial_states = map_structure(_switch_grad, initial_states)
if not time_major:
inputs = map_structure(_transpose_batch_time, inputs)
if sequence_length:
max_seq_len = nn.shape(flatten(inputs)[0])[0]
mask = sequence_lod.sequence_mask(
sequence_length,
maxlen=max_seq_len,
dtype=flatten(initial_states)[0].dtype)
mask = nn.transpose(mask, [1, 0])
if is_reverse:
inputs = map_structure(lambda x: tensor.reverse(x, axis=[0]), inputs)
mask = tensor.reverse(mask, axis=[0]) if sequence_length else None
# StaticRNN
rnn = control_flow.StaticRNN()
with rnn.step():
inputs = map_structure(rnn.step_input, inputs)
states = map_structure(rnn.memory, initial_states)
copy_states = map_structure(lambda x: x, states)
outputs, new_states = cell(inputs, copy_states, **kwargs)
assert_same_structure(states, new_states)
if sequence_length:
step_mask = rnn.step_input(mask)
new_states = map_structure(
partial(
_maybe_copy, step_mask=step_mask), states, new_states)
map_structure(rnn.update_memory, states, new_states)
flat_outputs = flatten(outputs)
map_structure(rnn.step_output, outputs)
map_structure(rnn.step_output, new_states)
rnn_out = rnn()
final_outputs = rnn_out[:len(flat_outputs)]
final_outputs = pack_sequence_as(outputs, final_outputs)
final_states = map_structure(lambda x: x[-1], rnn_out[len(flat_outputs):])
final_states = pack_sequence_as(new_states, final_states)
if is_reverse:
final_outputs = map_structure(lambda x: tensor.reverse(x, axis=[0]),
final_outputs)
if not time_major:
final_outputs = map_structure(_transpose_batch_time, final_outputs)
return (final_outputs, final_states)
def birnn(cell_fw,
cell_bw,
inputs,
initial_states=None,
sequence_length=None,
time_major=False,
**kwargs):
"""
birnn creates a bidirectional recurrent neural network specified by
RNNCell `cell_fw` and `cell_bw`, which performs :code:`cell.call()`
(for dygraph mode :code:`cell.forward`) repeatedly until reaches to
the maximum length of `inputs` and then concat the ouputs for both RNNs
along the last axis.
Arguments:
cell_fw(RNNCellBase): An instance of `RNNCellBase`.
cell_bw(RNNCellBase): An instance of `RNNCellBase`.
inputs(Tensor): the input sequences.
If time_major is True, the shape is
`[time_steps, batch_size, input_size]`
else the shape is `[batch_size, time_steps, input_size]`.
initial_states(tuple, optional): A tuple of initial states of
`cell_fw` and `cell_bw`.
If not provided, `cell.get_initial_states` would be called to
produce initial state for each cell. Defaults to None.
sequence_length (Tensor, optional): shape `[batch_size]`, dtype: int64
or int32. The valid lengths of input sequences. Defaults to None.
If `sequence_length` is not None, the inputs are treated as
padded sequences. In each input sequence, elements whose time step
index are not less than the valid length are treated as paddings.
time_major (bool): Whether the first dimension of the input means the
time steps. Defaults to False.
**kwargs: Additional keyword arguments to pass to `forward` of each cell.
Returns:
(outputs, final_states)
outputs (Tensor): the outputs of the bidirectional RNN. It is the
concatenation of the outputs from the forward RNN and backward
RNN along the last axis.
If time major is True, the shape is `[time_steps, batch_size, size]`,
else the shape is `[batch_size, time_steps, size]`, where size is
`cell_fw.hidden_size + cell_bw.hidden_size`.
final_states (tuple): A tuple of the final states of the forward
cell and backward cell.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
cell_fw = paddle.nn.LSTMCell(16, 32)
cell_bw = paddle.nn.LSTMCell(16, 32)
inputs = paddle.rand((4, 23, 16))
hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
initial_states = ((hf, cf), (hb, cb))
outputs, final_states = paddle.fluid.layers.birnn(
cell_fw, cell_bw, inputs, initial_states)
"""
if initial_states is None:
states_fw = cell_fw.get_initial_states(
batch_ref=inputs, batch_dim_idx=1 if time_major else 0)
states_bw = cell_fw.get_initial_states(
batch_ref=inputs, batch_dim_idx=1 if time_major else 0)
else:
states_fw, states_bw = initial_states
outputs_fw, states_fw = rnn(cell_fw,
inputs,
states_fw,
sequence_length,
time_major=time_major,
**kwargs)
outputs_bw, states_bw = rnn(cell_bw,
inputs,
states_bw,
sequence_length,
time_major=time_major,
is_reverse=True,
**kwargs)
outputs = map_structure(lambda x, y: tensor.concat([x, y], -1), outputs_fw,
outputs_bw)
final_states = (states_fw, states_bw)
return outputs, final_states
class Decoder(object):
"""
:api_attr: Static Graph
Decoder is the base class for any decoder instance used in `dynamic_decode`.
It provides interface for output generation for one time step, which can be
used to generate sequences.
The key abstraction provided by Decoder is:
1. :code:`(initial_input, initial_state, finished) = initialize(inits)` ,
which generates the input and state for the first decoding step, and gives the
initial status telling whether each sequence in the batch is finished.
It would be called once before the decoding iterations.
2. :code:`(output, next_state, next_input, finished) = step(time, input, state)` ,
which transforms the input and state to the output and new state, generates
input for the next decoding step, and emits the flag indicating finished status.
It is the main part for each decoding iteration.
3. :code:`(final_outputs, final_state) = finalize(outputs, final_state, sequence_lengths)` ,
which revises the outputs(stack of all time steps' output) and final state(state from the
last decoding step) to get the counterpart for special usage.
Not necessary to be implemented if no need to revise the stacked outputs and
state from the last decoding step. If implemented, it would be called after
the decoding iterations.
Decoder is more general compared to RNNCell, since the returned `next_input`
and `finished` make it can determine the input and when to finish by itself
when used in dynamic decoding. Decoder always wraps a RNNCell instance though
not necessary.
"""
def initialize(self, inits):
r"""
Called once before the decoding iterations.
Parameters:
inits: Argument provided by the caller.
Returns:
tuple: A tuple( :code:`(initial_inputs, initial_states, finished)` ). \
`initial_inputs` and `initial_states` both are a (possibly nested \
structure of) tensor variable[s], and `finished` is a tensor with \
bool data type.
"""
raise NotImplementedError
def step(self, time, inputs, states, **kwargs):
r"""
Called per step of decoding.
Parameters:
time(Variable): A Tensor with shape :math:`[1]` provided by the caller.
The data type is int64.
inputs(Variable): A (possibly nested structure of) tensor variable[s].
states(Variable): A (possibly nested structure of) tensor variable[s].
**kwargs: Additional keyword arguments, provided by the caller.
Returns:
tuple: A tuple( :code:(outputs, next_states, next_inputs, finished)` ). \
`next_inputs` and `next_states` both are a (possibly nested \
structure of) tensor variable[s], and the structure, shape and \
data type must be same as the counterpart from input arguments. \
`outputs` is a (possibly nested structure of) tensor variable[s]. \
`finished` is a Tensor with bool data type.
"""
raise NotImplementedError
def finalize(self, outputs, final_states, sequence_lengths):
r"""
Called once after the decoding iterations if implemented.
Parameters:
outputs(Variable): A (possibly nested structure of) tensor variable[s].
The structure and data type is same as `output_dtype`.
The tensor stacks all time steps' output thus has shape
:math:`[time\_step, batch\_size, ...]` , which is done by the caller.
final_states(Variable): A (possibly nested structure of) tensor variable[s].
It is the `next_states` returned by `decoder.step` at last decoding step,
thus has the same structure, shape and data type with states at any time
step.
Returns:
tuple: A tuple( :code:`(final_outputs, final_states)` ). \
`final_outputs` and `final_states` both are a (possibly nested \
structure of) tensor variable[s].
"""
raise NotImplementedError
@property
def tracks_own_finished(self):
"""
Describes whether the Decoder keeps track of finished states by itself.
`decoder.step()` would emit a bool `finished` value at each decoding
step. The emited `finished` can be used to determine whether every
batch entries is finished directly, or it can be combined with the
finished tracker keeped in `dynamic_decode` by performing a logical OR
to take the already finished into account.
If `False`, the latter would be took when performing `dynamic_decode`,
which is the default. Otherwise, the former would be took, which uses
the finished value emited by the decoder as all batch entry finished
status directly, and it is the case when batch entries might be
reordered such as beams in BeamSearchDecoder.
Returns:
bool: A python bool `False`.
"""
return False
class BeamSearchDecoder(Decoder):
"""
Decoder with beam search decoding strategy. It wraps a cell to get probabilities,
and follows a beam search step to calculate scores and select candidate
token ids for each decoding step.
Please refer to `Beam search <https://en.wikipedia.org/wiki/Beam_search>`_
for more details.
**NOTE** When decoding with beam search, the `inputs` and `states` of cell
would be tiled to `beam_size` (unsqueeze and tile), resulting to shapes like
`[batch_size * beam_size, ...]` , which is built into `BeamSearchDecoder` and
done automatically. Thus any other tensor with shape `[batch_size, ...]` used
in `cell.call` needs to be tiled manually first, which can be completed by using
:code:`BeamSearchDecoder.tile_beam_merge_with_batch` . The most common case
for this is the encoder output in attention mechanism.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.nn import BeamSearchDecoder, dynamic_decode
from paddle.nn import GRUCell, Linear, Embedding
trg_embeder = Embedding(100, 32)
output_layer = Linear(32, 32)
decoder_cell = GRUCell(input_size=32, hidden_size=32)
decoder = BeamSearchDecoder(decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=trg_embeder,
output_fn=output_layer)
"""
def __init__(self,
cell,
start_token,
end_token,
beam_size,
embedding_fn=None,
output_fn=None):
"""
Constructor of BeamSearchDecoder.
Parameters:
cell(RNNCellBase): An instance of `RNNCellBase` or object with the same interface.
start_token(int): The start token id.
end_token(int): The end token id.
beam_size(int): The beam width used in beam search.
embedding_fn(optional): A callable to apply to selected candidate ids.
Mostly it is an embedding layer to transform ids to embeddings,
and the returned value acts as the `input` argument for `cell.call`.
If not provided, the id to embedding transformation must be built into
`cell.call`. Default None.
output_fn(optional): A callable to apply to the cell's output prior to
calculate scores and select candidate token ids. Default None.
"""
self.cell = cell
self.embedding_fn = embedding_fn
self.output_fn = output_fn
self.start_token = start_token
self.end_token = end_token
self.beam_size = beam_size
@staticmethod
def tile_beam_merge_with_batch(x, beam_size):
r"""
Tile the batch dimension of a tensor. Specifically, this function takes
a tensor t shaped `[batch_size, s0, s1, ...]` composed of minibatch
entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * beam_size, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`beam_size` times.
Parameters:
x(Variable): A tensor with shape `[batch_size, ...]`. The data type
should be float32, float64, int32, int64 or bool.
beam_size(int): The beam width used in beam search.
Returns:
Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable),
'BeamSearchDecoder.tile_beam_merge_with_batch')
x = nn.unsqueeze(x, [1]) # [batch_size, 1, ...]
expand_times = [1] * len(x.shape)
expand_times[1] = beam_size
x = nn.expand(x, expand_times) # [batch_size, beam_size, ...]
x = nn.transpose(x, list(range(2, len(x.shape))) +
[0, 1]) # [..., batch_size, beam_size]
# use 0 to copy to avoid wrong shape
x = nn.reshape(
x, shape=[0] *
(len(x.shape) - 2) + [-1]) # [..., batch_size * beam_size]
x = nn.transpose(
x, [len(x.shape) - 1] +
list(range(0, len(x.shape) - 1))) # [batch_size * beam_size, ...]
return x
def _split_batch_beams(self, x):
r"""
Reshape a tensor with shape `[batch_size * beam_size, ...]` to a new
tensor with shape `[batch_size, beam_size, ...]`.
Parameters:
x(Variable): A tensor with shape `[batch_size * beam_size, ...]`. The
data type should be float32, float64, int32, int64 or bool.
Returns:
Variable: A tensor with shape `[batch_size, beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._split_batch_beams')
# TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch
return nn.reshape(x, shape=[-1, self.beam_size] + list(x.shape[1:]))
def _merge_batch_beams(self, x):
r"""
Reshape a tensor with shape `[batch_size, beam_size, ...]` to a new
tensor with shape `[batch_size * beam_size, ...]`.
Parameters:
x(Variable): A tensor with shape `[batch_size, beam_size, ...]`. The
data type should be float32, float64, int32, int64 or bool.
Returns:
Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._merge_batch_beams')