From 373ab6af46a582c7afbad6742a67b9dc4fea3df9 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 29 Apr 2019 18:26:33 +0000 Subject: [PATCH] Fix misalignment of documentation in BahdanauAttention This fix fixes the misalignment of documentation in BahdanauAttention, as was specified in 28054. The issue seems to be that ` (optional)` should be placed after the `:` so that memory_sequence_length could be identified as an arg. This fix fixes 28054. Signed-off-by: Yong Tang --- tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py index 35da873b2ac885..7aa207be72ef78 100644 --- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py +++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py @@ -937,7 +937,7 @@ def __init__(self, num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. - memory_sequence_length (optional): Sequence lengths for the batch entries + memory_sequence_length: (optional) Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. normalize: Python boolean. Whether to normalize the energy term.