From 89fa65c0b2550833611f719d3ca351e8ca739350 Mon Sep 17 00:00:00 2001 From: Guillaume Klein Date: Fri, 20 Dec 2019 14:03:10 +0100 Subject: [PATCH] Remove experimental_run_tf_function in AttentionWrapper test --- tensorflow_addons/seq2seq/attention_wrapper_test.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/seq2seq/attention_wrapper_test.py b/tensorflow_addons/seq2seq/attention_wrapper_test.py index c0100db329..942f0c8a51 100644 --- a/tensorflow_addons/seq2seq/attention_wrapper_test.py +++ b/tensorflow_addons/seq2seq/attention_wrapper_test.py @@ -125,7 +125,6 @@ def test_passing_memory_from_call(self, attention_cls): ("bahdanau_monotonic", wrapper.BahdanauMonotonicAttention), ) def test_save_load_layer(self, attention_cls): - self.skipTest("Attention not working with single code path.") vocab = 20 embedding_dim = 6 inputs = tf.keras.Input(shape=[self.timestep]) @@ -146,7 +145,7 @@ def test_save_load_layer(self, attention_cls): model = tf.keras.Model([inputs, query, state], score) # Fall back to v1 style Keras training loop until issue with # using outputs of a layer in another layer's constructor. - model.compile("rmsprop", "mse", experimental_run_tf_function=False) + model.compile("rmsprop", "mse") model.fit([x, self.query, self.state], (y, y)) y_ref = model.predict_on_batch([x_test, self.query, self.state]) @@ -158,8 +157,7 @@ def test_save_load_layer(self, attention_cls): # Fall back to v1 style Keras training loop until issue with # using outputs of a layer in another layer's constructor. - loaded_model.compile( - "rmsprop", "mse", experimental_run_tf_function=False) + loaded_model.compile("rmsprop", "mse") y = loaded_model.predict_on_batch([x_test, self.query, self.state])