From b5a794c2763270db86644c2988a08f91259aeb46 Mon Sep 17 00:00:00 2001 From: Tzu-Wei Sung Date: Thu, 15 Aug 2019 17:14:51 +0800 Subject: [PATCH] fix float64 tests --- .../seq2seq/attention_wrapper.py | 6 ++++-- .../seq2seq/attention_wrapper_test.py | 20 +++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tensorflow_addons/seq2seq/attention_wrapper.py b/tensorflow_addons/seq2seq/attention_wrapper.py index aa8b38a6b6..275245fded 100644 --- a/tensorflow_addons/seq2seq/attention_wrapper.py +++ b/tensorflow_addons/seq2seq/attention_wrapper.py @@ -1538,7 +1538,8 @@ def __init__(self, initial_cell_state=None, name=None, attention_layer=None, - attention_fn=None): + attention_fn=None, + **kwargs): """Construct the `AttentionWrapper`. **NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped @@ -1619,6 +1620,7 @@ def __init__(self, attention_layer) and outputs (attention, alignments, next_attention_state). If provided, the attention_layer_size should be the size of the outputs of attention_fn. + **kwargs: Other keyword arguments for layer creation. Raises: TypeError: `attention_layer_size` is not None and @@ -1629,7 +1631,7 @@ def __init__(self, of `attention_layer_size`; if `attention_layer_size` and `attention_layer` are set simultaneously. """ - super(AttentionWrapper, self).__init__(name=name) + super(AttentionWrapper, self).__init__(name=name, **kwargs) rnn_cell_impl.assert_like_rnncell("cell", cell) if isinstance(attention_mechanism, (list, tuple)): self._is_multi = True diff --git a/tensorflow_addons/seq2seq/attention_wrapper_test.py b/tensorflow_addons/seq2seq/attention_wrapper_test.py index 0c39c26b4e..422e56c9d7 100644 --- a/tensorflow_addons/seq2seq/attention_wrapper_test.py +++ b/tensorflow_addons/seq2seq/attention_wrapper_test.py @@ -466,8 +466,7 @@ def _testWithMaybeMultiAttention(self, expected_final_alignment_history, final_alignment_history_info) - # TODO: #407 Float64 test is failing - @parameterized.parameters([np.float32]) + @parameterized.parameters([np.float32, np.float64]) def testBahdanauNormalizedDType(self, dtype): encoder_outputs = self.encoder_outputs.astype(dtype) decoder_inputs = self.decoder_inputs.astype(dtype) @@ -478,11 +477,12 @@ def testBahdanauNormalizedDType(self, dtype): normalize=True, dtype=dtype) cell = keras.layers.LSTMCell( - self.units, recurrent_activation="sigmoid") - cell = wrapper.AttentionWrapper(cell, attention_mechanism) + self.units, recurrent_activation="sigmoid", dtype=dtype) + cell = wrapper.AttentionWrapper(cell, attention_mechanism, dtype=dtype) sampler = sampler_py.TrainingSampler() - my_decoder = basic_decoder.BasicDecoder(cell=cell, sampler=sampler) + my_decoder = basic_decoder.BasicDecoder( + cell=cell, sampler=sampler, dtype=dtype) final_outputs, final_state, _ = my_decoder( decoder_inputs, @@ -493,8 +493,7 @@ def testBahdanauNormalizedDType(self, dtype): self.assertEqual(final_outputs.rnn_output.dtype, dtype) self.assertIsInstance(final_state, wrapper.AttentionWrapperState) - # TODO: #407 Float64 test is failing - @parameterized.parameters([np.float32]) + @parameterized.parameters([np.float32, np.float64]) def testLuongScaledDType(self, dtype): # Test case for GitHub issue 18099 encoder_outputs = self.encoder_outputs.astype(dtype) @@ -507,11 +506,12 @@ def testLuongScaledDType(self, dtype): dtype=dtype, ) cell = keras.layers.LSTMCell( - self.units, recurrent_activation="sigmoid") - cell = wrapper.AttentionWrapper(cell, attention_mechanism) + self.units, recurrent_activation="sigmoid", dtype=dtype) + cell = wrapper.AttentionWrapper(cell, attention_mechanism, dtype=dtype) sampler = sampler_py.TrainingSampler() - my_decoder = basic_decoder.BasicDecoder(cell=cell, sampler=sampler) + my_decoder = basic_decoder.BasicDecoder( + cell=cell, sampler=sampler, dtype=dtype) final_outputs, final_state, _ = my_decoder( decoder_inputs,