diff --git a/tensorflow_addons/layers/__init__.py b/tensorflow_addons/layers/__init__.py index d527e16362..e6c9d0055c 100644 --- a/tensorflow_addons/layers/__init__.py +++ b/tensorflow_addons/layers/__init__.py @@ -18,11 +18,11 @@ from __future__ import division from __future__ import print_function -from tensorflow_addons.layers.gelu import GeLU +from tensorflow_addons.layers.gelu import GELU from tensorflow_addons.layers.maxout import Maxout from tensorflow_addons.layers.normalizations import GroupNormalization from tensorflow_addons.layers.normalizations import InstanceNormalization from tensorflow_addons.layers.optical_flow import CorrelationCost from tensorflow_addons.layers.poincare import PoincareNormalize from tensorflow_addons.layers.sparsemax import Sparsemax -from tensorflow_addons.layers.wrappers import WeightNormalization \ No newline at end of file +from tensorflow_addons.layers.wrappers import WeightNormalization diff --git a/tensorflow_addons/layers/gelu.py b/tensorflow_addons/layers/gelu.py index 31420f76de..d2dcfa87e5 100644 --- a/tensorflow_addons/layers/gelu.py +++ b/tensorflow_addons/layers/gelu.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Implements GeLU activation.""" +"""Implements GELU activation.""" from __future__ import absolute_import from __future__ import division @@ -23,7 +23,7 @@ @tf.keras.utils.register_keras_serializable(package='Addons') -class GeLU(tf.keras.layers.Layer): +class GELU(tf.keras.layers.Layer): """Gaussian Error Linear Unit. A smoother version of ReLU generally used @@ -40,7 +40,7 @@ class GeLU(tf.keras.layers.Layer): """ def __init__(self, approximate=True, **kwargs): - super(GeLU, self).__init__(**kwargs) + super(GELU, self).__init__(**kwargs) self.approximate = approximate self.supports_masking = True @@ -49,7 +49,7 @@ def call(self, inputs): def get_config(self): config = {'approximate': self.approximate} - base_config = super(GeLU, self).get_config() + base_config = super(GELU, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): diff --git a/tensorflow_addons/layers/gelu_test.py b/tensorflow_addons/layers/gelu_test.py index 99331fb44e..d31badcab1 100644 --- a/tensorflow_addons/layers/gelu_test.py +++ b/tensorflow_addons/layers/gelu_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for GeLU activation.""" +"""Tests for GELU activation.""" from __future__ import absolute_import from __future__ import division @@ -21,18 +21,18 @@ import numpy as np import tensorflow as tf from absl.testing import parameterized -from tensorflow_addons.layers.gelu import GeLU +from tensorflow_addons.layers.gelu import GELU from tensorflow_addons.utils import test_utils @parameterized.parameters([np.float16, np.float32, np.float64]) @test_utils.run_all_in_graph_and_eager_modes -class TestGeLU(tf.test.TestCase): +class TestGELU(tf.test.TestCase): def test_random(self, dtype): x = np.array([[0.5, 1.2, -0.3]]).astype(dtype) val = np.array([[0.345714, 1.0617027, -0.11462909]]).astype(dtype) test_utils.layer_test( - GeLU, kwargs={'dtype': dtype}, input_data=x, expected_output=val) + GELU, kwargs={'dtype': dtype}, input_data=x, expected_output=val) if __name__ == '__main__':