diff --git a/build_deps/toolchains/gpu/find_cuda_config.py b/build_deps/toolchains/gpu/find_cuda_config.py index 868f7bddae..dd814c309d 100644 --- a/build_deps/toolchains/gpu/find_cuda_config.py +++ b/build_deps/toolchains/gpu/find_cuda_config.py @@ -277,12 +277,16 @@ def get_nvcc_version(path): nvcc_name = "nvcc.exe" if _is_windows() else "nvcc" nvcc_path, nvcc_version = _find_versioned_file( - base_paths, ["", "bin",], nvcc_name, cuda_version, get_nvcc_version + base_paths, + ["", "bin"], + nvcc_name, + cuda_version, + get_nvcc_version, ) nvvm_path = _find_file( base_paths, - ["nvvm/libdevice", "share/cuda", "lib/nvidia-cuda-toolkit/libdevice",], + ["nvvm/libdevice", "share/cuda", "lib/nvidia-cuda-toolkit/libdevice"], "libdevice*.10.bc", ) diff --git a/tensorflow_addons/activations/tests/sparsemax_test.py b/tensorflow_addons/activations/tests/sparsemax_test.py index a027b13453..0dabd13e07 100644 --- a/tensorflow_addons/activations/tests/sparsemax_test.py +++ b/tensorflow_addons/activations/tests/sparsemax_test.py @@ -112,7 +112,7 @@ def test_sparsemax_against_numpy_high_rank(dtype): def test_sparsemax_of_nan(dtype): """check sparsemax transfers nan.""" z_nan = np.asarray( - [[0, np.nan, 0], [0, np.nan, np.nan], [np.nan, np.nan, np.nan],] + [[0, np.nan, 0], [0, np.nan, np.nan], [np.nan, np.nan, np.nan]] ).astype(dtype) tf_sparsemax_nan = sparsemax(z_nan) @@ -132,7 +132,7 @@ def test_sparsemax_of_nan(dtype): def test_sparsemax_of_inf(dtype): """check sparsemax is infinity safe.""" z_neg = np.asarray( - [[0, -np.inf, 0], [0, -np.inf, -np.inf], [-np.inf, -np.inf, -np.inf],] + [[0, -np.inf, 0], [0, -np.inf, -np.inf], [-np.inf, -np.inf, -np.inf]] ).astype(dtype) z_pos = np.asarray( [[0, np.inf, 0], [0, np.inf, np.inf], [np.inf, np.inf, np.inf]] diff --git a/tensorflow_addons/image/compose_ops.py b/tensorflow_addons/image/compose_ops.py index 9d3aacac97..263fe5cfa5 100644 --- a/tensorflow_addons/image/compose_ops.py +++ b/tensorflow_addons/image/compose_ops.py @@ -43,7 +43,7 @@ def blend(image1: TensorLike, image2: TensorLike, factor: Number) -> tf.Tensor: Returns: A blended image Tensor of `tf.float32`. - """ + """ with tf.name_scope("blend"): if factor == 0.0: diff --git a/tensorflow_addons/image/cutout_ops.py b/tensorflow_addons/image/cutout_ops.py index 6cf52b0980..6bdbbaf03c 100644 --- a/tensorflow_addons/image/cutout_ops.py +++ b/tensorflow_addons/image/cutout_ops.py @@ -100,7 +100,7 @@ def random_cutout( ) offset = tf.transpose([cutout_center_height, cutout_center_width], [1, 0]) - return cutout(images, mask_size, offset, constant_values, data_format,) + return cutout(images, mask_size, offset, constant_values, data_format) def cutout( diff --git a/tensorflow_addons/image/filters.py b/tensorflow_addons/image/filters.py index cc4fc11cad..6e3da3e1ba 100644 --- a/tensorflow_addons/image/filters.py +++ b/tensorflow_addons/image/filters.py @@ -304,9 +304,7 @@ def gaussian_filter2d( gaussian_kernel_2d, [filter_shape[0], filter_shape[1], channels, 1] ) - image = _pad( - image, filter_shape, mode=padding, constant_values=constant_values, - ) + image = _pad(image, filter_shape, mode=padding, constant_values=constant_values) output = tf.nn.depthwise_conv2d( input=image, diff --git a/tensorflow_addons/image/interpolate_spline.py b/tensorflow_addons/image/interpolate_spline.py index 6d27599245..3af817bdf2 100644 --- a/tensorflow_addons/image/interpolate_spline.py +++ b/tensorflow_addons/image/interpolate_spline.py @@ -296,7 +296,7 @@ def interpolate_spline( train_values to perform polyharmonic interpolation. The query values are the values of the interpolant evaluated at the locations specified in query_points. - """ + """ with tf.name_scope(name or "interpolate_spline"): train_points = tf.convert_to_tensor(train_points) train_values = tf.convert_to_tensor(train_values) diff --git a/tensorflow_addons/image/sparse_image_warp.py b/tensorflow_addons/image/sparse_image_warp.py index 9b50a4da08..0470788897 100644 --- a/tensorflow_addons/image/sparse_image_warp.py +++ b/tensorflow_addons/image/sparse_image_warp.py @@ -39,7 +39,7 @@ def _expand_to_minibatch(np_array: TensorLike, batch_size: TensorLike) -> Tensor def _get_boundary_locations( - image_height: TensorLike, image_width: TensorLike, num_points_per_edge: TensorLike, + image_height: TensorLike, image_width: TensorLike, num_points_per_edge: TensorLike ) -> TensorLike: """Compute evenly-spaced indices along edge of image.""" y_range = np.linspace(0, image_height - 1, num_points_per_edge + 2) diff --git a/tensorflow_addons/image/tests/connected_components_test.py b/tensorflow_addons/image/tests/connected_components_test.py index ec354932ad..e1c3d10bd1 100644 --- a/tensorflow_addons/image/tests/connected_components_test.py +++ b/tensorflow_addons/image/tests/connected_components_test.py @@ -118,7 +118,7 @@ def test_multiple_images(): @pytest.mark.usefixtures("maybe_run_functions_eagerly") def test_zeros(): np.testing.assert_equal( - connected_components(tf.zeros((100, 20, 50), tf.bool)), np.zeros((100, 20, 50)), + connected_components(tf.zeros((100, 20, 50), tf.bool)), np.zeros((100, 20, 50)) ) @@ -134,7 +134,7 @@ def test_ones(): def test_ones_small(): np.testing.assert_equal( - connected_components(tf.ones((3, 5), tf.bool)).numpy(), np.ones((3, 5)), + connected_components(tf.ones((3, 5), tf.bool)).numpy(), np.ones((3, 5)) ) diff --git a/tensorflow_addons/image/tests/dense_image_warp_test.py b/tensorflow_addons/image/tests/dense_image_warp_test.py index 24397ec7be..e883fbe9c3 100644 --- a/tensorflow_addons/image/tests/dense_image_warp_test.py +++ b/tensorflow_addons/image/tests/dense_image_warp_test.py @@ -30,7 +30,7 @@ def test_interpolate_small_grid_ij(): shape=[1, 4, 3, 1], ) query_points = tf.constant( - [[0.0, 0.0], [1.0, 0.0], [2.0, 0.5], [1.5, 1.5], [3.0, 2.0]], shape=[1, 5, 2], + [[0.0, 0.0], [1.0, 0.0], [2.0, 0.5], [1.5, 1.5], [3.0, 2.0]], shape=[1, 5, 2] ) expected_results = np.reshape(np.array([0.0, 3.0, 6.5, 6.0, 11.0]), [1, 5, 1]) @@ -45,7 +45,7 @@ def test_interpolate_small_grid_xy(): shape=[1, 4, 3, 1], ) query_points = tf.constant( - [[0.0, 0.0], [0.0, 1.0], [0.5, 2.0], [1.5, 1.5], [2.0, 3.0]], shape=[1, 5, 2], + [[0.0, 0.0], [0.0, 1.0], [0.5, 2.0], [1.5, 1.5], [2.0, 3.0]], shape=[1, 5, 2] ) expected_results = np.reshape(np.array([0.0, 3.0, 6.5, 6.0, 11.0]), [1, 5, 1]) @@ -91,7 +91,8 @@ def _check_zero_flow_correctness(shape, image_type, flow_type): rand_flows *= 0 interp = dense_image_warp( - image=tf.convert_to_tensor(rand_image), flow=tf.convert_to_tensor(rand_flows), + image=tf.convert_to_tensor(rand_image), + flow=tf.convert_to_tensor(rand_flows), ) np.testing.assert_allclose(rand_image, interp, rtol=1e-6, atol=1e-6) diff --git a/tensorflow_addons/image/tests/distort_image_ops_test.py b/tensorflow_addons/image/tests/distort_image_ops_test.py index af37c75d77..2409023302 100644 --- a/tensorflow_addons/image/tests/distort_image_ops_test.py +++ b/tensorflow_addons/image/tests/distort_image_ops_test.py @@ -125,7 +125,7 @@ def test_adjust_hsv_in_yiq_unknown_shape(): image_np = np.random.rand(*shape) * 255.0 image_tf = tf.constant(image_np) np.testing.assert_allclose( - _adjust_hue_in_yiq_np(image_np, 0), fn(image_tf), rtol=2e-4, atol=1e-4, + _adjust_hue_in_yiq_np(image_np, 0), fn(image_tf), rtol=2e-4, atol=1e-4 ) diff --git a/tensorflow_addons/image/tests/filters_test.py b/tensorflow_addons/image/tests/filters_test.py index 1376a4c039..1c87c1111c 100644 --- a/tensorflow_addons/image/tests/filters_test.py +++ b/tensorflow_addons/image/tests/filters_test.py @@ -62,7 +62,7 @@ def setup_values( assert 3 <= len(image_shape) <= 4 height, width = image_shape[-3], image_shape[-2] plane = tf.constant( - [x for x in range(1, height * width + 1)], shape=(height, width), dtype=dtype, + [x for x in range(1, height * width + 1)], shape=(height, width), dtype=dtype ) image = tile_image(plane, image_shape=image_shape) @@ -172,7 +172,13 @@ def test_reflect_padding_with_3x3_filter_mean(image_shape): @pytest.mark.parametrize("image_shape", _image_shapes_to_test) def test_reflect_padding_with_4x4_filter_mean(image_shape): - expected_plane = tf.constant([[5.0, 5.0, 5.0], [5.0, 5.0, 5.0], [5.0, 5.0, 5.0],]) + expected_plane = tf.constant( + [ + [5.0, 5.0, 5.0], + [5.0, 5.0, 5.0], + [5.0, 5.0, 5.0], + ] + ) verify_values( mean_filter2d, diff --git a/tensorflow_addons/image/tests/sparse_image_warp_test.py b/tensorflow_addons/image/tests/sparse_image_warp_test.py index ce22ff8126..27e09dd1ef 100644 --- a/tensorflow_addons/image/tests/sparse_image_warp_test.py +++ b/tensorflow_addons/image/tests/sparse_image_warp_test.py @@ -121,7 +121,7 @@ def assert_move_single_pixel(order, num_boundary_points, type_to_use): # Shift it one pixel to the right. control_point_displacements = [[0.0, 1.0]] control_point_displacements = tf.constant( - np.float32(np.expand_dims(control_point_displacements, 0)), dtype=type_to_use, + np.float32(np.expand_dims(control_point_displacements, 0)), dtype=type_to_use ) (warped_image, flow) = sparse_image_warp( diff --git a/tensorflow_addons/image/tests/transform_ops_test.py b/tensorflow_addons/image/tests/transform_ops_test.py index d634455548..1d626f83e8 100644 --- a/tensorflow_addons/image/tests/transform_ops_test.py +++ b/tensorflow_addons/image/tests/transform_ops_test.py @@ -36,7 +36,7 @@ @pytest.mark.parametrize("dtype", _DTYPES) def test_compose(dtype): image = tf.constant( - [[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype, + [[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype ) # Rotate counter-clockwise by pi / 2. rotation = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4) @@ -56,7 +56,7 @@ def test_compose(dtype): @pytest.mark.parametrize("dtype", _DTYPES) def test_extreme_projective_transform(dtype): image = tf.constant( - [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype, + [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype ) transformation = tf.constant([1, 0, 0, 0, 1, 0, -1, 0], tf.dtypes.float32) image_transformed = transform_ops.transform(image, transformation) diff --git a/tensorflow_addons/image/utils.py b/tensorflow_addons/image/utils.py index e4bf497ebc..9fb688736a 100644 --- a/tensorflow_addons/image/utils.py +++ b/tensorflow_addons/image/utils.py @@ -126,7 +126,7 @@ def unwrap(image, replace): Returns: image: A 3D image `Tensor` with 3 channels. - """ + """ image_shape = tf.shape(image) # Flatten the spatial dimensions. flattened_image = tf.reshape(image, [-1, image_shape[2]]) diff --git a/tensorflow_addons/layers/crf.py b/tensorflow_addons/layers/crf.py index 7449ce7dcc..22b3f9a2f4 100644 --- a/tensorflow_addons/layers/crf.py +++ b/tensorflow_addons/layers/crf.py @@ -77,7 +77,7 @@ def __init__( if self.use_kernel: self._dense_layer = tf.keras.layers.Dense( - units=self.units, dtype=self.dtype, + units=self.units, dtype=self.dtype ) else: self._dense_layer = lambda x: tf.cast(x, dtype=self.dtype) diff --git a/tensorflow_addons/layers/esn.py b/tensorflow_addons/layers/esn.py index ceaf68741b..1abe43333c 100644 --- a/tensorflow_addons/layers/esn.py +++ b/tensorflow_addons/layers/esn.py @@ -90,7 +90,7 @@ class ESN(tf.keras.layers.RNN): `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell. - """ + """ @typechecked def __init__( diff --git a/tensorflow_addons/layers/spatial_pyramid_pooling.py b/tensorflow_addons/layers/spatial_pyramid_pooling.py index f96c0067eb..1514e34bd6 100644 --- a/tensorflow_addons/layers/spatial_pyramid_pooling.py +++ b/tensorflow_addons/layers/spatial_pyramid_pooling.py @@ -86,9 +86,7 @@ def call(self, inputs, **kwargs): new_input_height = dynamic_input_shape[1] - height_overflow new_input_width = dynamic_input_shape[2] - width_overflow - new_inp = inputs[ - :, :new_input_height, :new_input_width, :, - ] + new_inp = inputs[:, :new_input_height, :new_input_width, :] output = self.pool_layers[index](new_inp) output = tf.reshape( output, [dynamic_input_shape[0], bin[0] * bin[1], inputs.shape[-1]] @@ -103,9 +101,7 @@ def call(self, inputs, **kwargs): new_input_height = dynamic_input_shape[2] - height_overflow new_input_width = dynamic_input_shape[3] - width_overflow - new_inp = inputs[ - :, :, :new_input_height, :new_input_width, - ] + new_inp = inputs[:, :, :new_input_height, :new_input_width] output = self.pool_layers[index](new_inp) output = tf.reshape( output, [dynamic_input_shape[0], inputs.shape[1], bin[0] * bin[1]] diff --git a/tensorflow_addons/layers/tests/maxout_test.py b/tensorflow_addons/layers/tests/maxout_test.py index 6fcd88fe55..90a612ed57 100644 --- a/tensorflow_addons/layers/tests/maxout_test.py +++ b/tensorflow_addons/layers/tests/maxout_test.py @@ -42,7 +42,7 @@ def test_nchw(): def test_unknown(): inputs = np.random.random((5, 4, 2, 18)).astype("float32") test_utils.layer_test( - Maxout, kwargs={"num_units": 3}, input_shape=(5, 4, 2, None), input_data=inputs, + Maxout, kwargs={"num_units": 3}, input_shape=(5, 4, 2, None), input_data=inputs ) test_utils.layer_test( diff --git a/tensorflow_addons/layers/tests/optical_flow_test.py b/tensorflow_addons/layers/tests/optical_flow_test.py index 894f141485..f01c88cdb8 100644 --- a/tensorflow_addons/layers/tests/optical_flow_test.py +++ b/tensorflow_addons/layers/tests/optical_flow_test.py @@ -105,11 +105,16 @@ def test_forward_simple(data_format): actual = tf.transpose(actual, [0, 3, 1, 2]) # We can test fixed ids, as output is independent from data_format - expected_ids = np.concatenate([np.zeros(464,), np.ones(464,)]) + expected_ids = np.concatenate( + [ + np.zeros(464), + np.ones(464), + ] + ) np.testing.assert_allclose(tf.where(actual == 0)[:, 0].numpy(), expected_ids) counts = [54, 52, 54, 50, 44, 50, 54, 52, 54] - expected_ids = np.concatenate([k * np.ones(v,) for k, v in enumerate(counts)]) + expected_ids = np.concatenate([k * np.ones(v) for k, v in enumerate(counts)]) expected_ids = np.concatenate([expected_ids, expected_ids]) np.testing.assert_allclose(tf.where(actual == 0)[:, 1], expected_ids) assert actual.shape == (2, 9, 7, 8) diff --git a/tensorflow_addons/layers/tests/spectral_normalization_test.py b/tensorflow_addons/layers/tests/spectral_normalization_test.py index 48b96aa5e3..a8aebab814 100644 --- a/tensorflow_addons/layers/tests/spectral_normalization_test.py +++ b/tensorflow_addons/layers/tests/spectral_normalization_test.py @@ -95,7 +95,7 @@ def test_model_fit(base_layer_fn, input_shape, output_shape): "base_layer_fn, input_shape", [ (lambda: tf.keras.layers.Dense(2), [3, 2]), - (lambda: tf.keras.layers.Conv2D(3, (2, 2), padding="same"), [4, 4, 3],), + (lambda: tf.keras.layers.Conv2D(3, (2, 2), padding="same"), [4, 4, 3]), (lambda: tf.keras.layers.Embedding(2, 10), [2]), ], ) @@ -113,7 +113,7 @@ def test_normalization(): inputs = tf.keras.layers.Input(shape=[2, 2, 1]) base_layer = tf.keras.layers.Conv2D( - 1, (2, 2), kernel_initializer=tf.constant_initializer(value=2), + 1, (2, 2), kernel_initializer=tf.constant_initializer(value=2) ) sn_layer = spectral_normalization.SpectralNormalization(base_layer) model = tf.keras.models.Sequential(layers=[inputs, sn_layer]) @@ -125,7 +125,7 @@ def test_normalization(): for training in [False, True]: _ = model( - tf.constant(np.ones((1, 2, 2, 1), dtype=np.float32)), training=training, + tf.constant(np.ones((1, 2, 2, 1), dtype=np.float32)), training=training ) if training: w = weights_normalized diff --git a/tensorflow_addons/layers/tests/wrappers_test.py b/tensorflow_addons/layers/tests/wrappers_test.py index 372e467663..448a02aaa0 100644 --- a/tensorflow_addons/layers/tests/wrappers_test.py +++ b/tensorflow_addons/layers/tests/wrappers_test.py @@ -27,7 +27,7 @@ def test_basic(): test_utils.layer_test( wrappers.WeightNormalization, - kwargs={"layer": tf.keras.layers.Conv2D(5, (2, 2)),}, + kwargs={"layer": tf.keras.layers.Conv2D(5, (2, 2))}, input_shape=(2, 4, 4, 3), ) @@ -35,7 +35,7 @@ def test_basic(): def test_no_bias(): test_utils.layer_test( wrappers.WeightNormalization, - kwargs={"layer": tf.keras.layers.Dense(5, use_bias=False),}, + kwargs={"layer": tf.keras.layers.Dense(5, use_bias=False)}, input_shape=(2, 4), ) @@ -49,7 +49,7 @@ def _check_data_init(data_init, input_data, expected_output): ) test_utils.layer_test( wrappers.WeightNormalization, - kwargs={"layer": layer, "data_init": data_init,}, + kwargs={"layer": layer, "data_init": data_init}, input_data=input_data, expected_output=expected_output, ) diff --git a/tensorflow_addons/losses/metric_learning.py b/tensorflow_addons/losses/metric_learning.py index c9e8889247..2cbf43affa 100644 --- a/tensorflow_addons/losses/metric_learning.py +++ b/tensorflow_addons/losses/metric_learning.py @@ -31,12 +31,15 @@ def pairwise_distance(feature: TensorLike, squared: bool = False): Returns: pairwise_distances: 2-D Tensor of size `[number of data, number of data]`. """ - pairwise_distances_squared = tf.math.add( - tf.math.reduce_sum(tf.math.square(feature), axis=[1], keepdims=True), - tf.math.reduce_sum( - tf.math.square(tf.transpose(feature)), axis=[0], keepdims=True - ), - ) - 2.0 * tf.matmul(feature, tf.transpose(feature)) + pairwise_distances_squared = ( + tf.math.add( + tf.math.reduce_sum(tf.math.square(feature), axis=[1], keepdims=True), + tf.math.reduce_sum( + tf.math.square(tf.transpose(feature)), axis=[0], keepdims=True + ), + ) + - 2.0 * tf.matmul(feature, tf.transpose(feature)) + ) # Deal with numerical inaccuracies. Set small negatives to zero. pairwise_distances_squared = tf.math.maximum(pairwise_distances_squared, 0.0) diff --git a/tensorflow_addons/losses/tests/kappa_loss_test.py b/tensorflow_addons/losses/tests/kappa_loss_test.py index 9b647b9ded..706ebdf3f3 100644 --- a/tensorflow_addons/losses/tests/kappa_loss_test.py +++ b/tensorflow_addons/losses/tests/kappa_loss_test.py @@ -79,7 +79,7 @@ def test_quadratic_weighted_kappa_loss(np_seed): def test_config(): kappa_loss = WeightedKappaLoss( - num_classes=4, weightage="linear", name="kappa_loss", epsilon=0.001, + num_classes=4, weightage="linear", name="kappa_loss", epsilon=0.001 ) assert kappa_loss.num_classes == 4 assert kappa_loss.weightage == "linear" diff --git a/tensorflow_addons/losses/tests/lifted_test.py b/tensorflow_addons/losses/tests/lifted_test.py index 5223dd3871..a6b986cd0f 100644 --- a/tensorflow_addons/losses/tests/lifted_test.py +++ b/tensorflow_addons/losses/tests/lifted_test.py @@ -112,7 +112,7 @@ def test_lifted_struct(dtype): def test_keras_model_compile(): model = tf.keras.models.Sequential( - [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10),] + [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10)] ) model.compile(loss="Addons>lifted_struct_loss", optimizer="adam") diff --git a/tensorflow_addons/losses/tests/triplet_test.py b/tensorflow_addons/losses/tests/triplet_test.py index eda7dc0b50..abb0527100 100644 --- a/tensorflow_addons/losses/tests/triplet_test.py +++ b/tensorflow_addons/losses/tests/triplet_test.py @@ -182,7 +182,7 @@ def test_semihard_tripled_loss_angular(dtype, dist_func, dist_metric): def test_keras_model_compile_semihard(): model = tf.keras.models.Sequential( - [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10),] + [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10)] ) model.compile(loss="Addons>triplet_semihard_loss", optimizer="adam") @@ -225,7 +225,7 @@ def test_hard_tripled_loss_angular(dtype, soft, dist_func, dist_metric): def test_keras_model_compile_hard(): model = tf.keras.models.Sequential( - [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10),] + [tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10)] ) model.compile(loss="Addons>triplet_hard_loss", optimizer="adam") diff --git a/tensorflow_addons/metrics/r_square.py b/tensorflow_addons/metrics/r_square.py index 8ae78fc543..04c3866dbc 100644 --- a/tensorflow_addons/metrics/r_square.py +++ b/tensorflow_addons/metrics/r_square.py @@ -30,8 +30,7 @@ def _reduce_average( input_tensor: tf.Tensor, axis=None, keepdims=False, weights=None ) -> tf.Tensor: - """Computes the (weighted) mean of elements across dimensions of a tensor. - """ + """Computes the (weighted) mean of elements across dimensions of a tensor.""" if weights is None: return tf.reduce_mean(input_tensor, axis=axis, keepdims=keepdims) @@ -116,7 +115,7 @@ def update_state(self, y_true, y_pred, sample_weight=None) -> None: self.sum.assign_add(tf.reduce_sum(weighted_y_true, axis=0)) self.squared_sum.assign_add(tf.reduce_sum(y_true * weighted_y_true, axis=0)) self.res.assign_add( - tf.reduce_sum((y_true - y_pred) ** 2 * sample_weight, axis=0,) + tf.reduce_sum((y_true - y_pred) ** 2 * sample_weight, axis=0) ) self.count.assign_add(tf.reduce_sum(sample_weight, axis=0)) diff --git a/tensorflow_addons/metrics/tests/hamming_test.py b/tensorflow_addons/metrics/tests/hamming_test.py index 4ed1b4dc8e..96be48675e 100644 --- a/tensorflow_addons/metrics/tests/hamming_test.py +++ b/tensorflow_addons/metrics/tests/hamming_test.py @@ -101,7 +101,7 @@ def test_mc_5_classes(): def test_ml_4_classes(): actuals = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 1]], dtype=tf.float32) predictions = tf.constant( - [[0.97, 0.56, 0.83, 0.77], [0.34, 0.95, 0.7, 0.89], [0.95, 0.45, 0.23, 0.56],], + [[0.97, 0.56, 0.83, 0.77], [0.34, 0.95, 0.7, 0.89], [0.95, 0.45, 0.23, 0.56]], dtype=tf.float32, ) # Initialize diff --git a/tensorflow_addons/metrics/tests/multilabel_confusion_matrix_test.py b/tensorflow_addons/metrics/tests/multilabel_confusion_matrix_test.py index 36f586a032..0edf76888a 100644 --- a/tensorflow_addons/metrics/tests/multilabel_confusion_matrix_test.py +++ b/tensorflow_addons/metrics/tests/multilabel_confusion_matrix_test.py @@ -84,7 +84,7 @@ def test_mcm_4_classes(dtype): # Check results check_results( mcm_obj, - [[[4, 1], [1, 4]], [[6, 0], [2, 2]], [[6, 1], [1, 2]], [[2, 0], [2, 6]],], + [[[4, 1], [1, 4]], [[6, 0], [2, 2]], [[6, 1], [1, 2]], [[2, 0], [2, 6]]], ) @@ -127,5 +127,5 @@ def test_multiclass(dtype): # Check results check_results( mcm_obj, - [[[5, 2], [0, 3]], [[7, 1], [2, 0]], [[7, 0], [1, 2]], [[8, 0], [0, 2]],], + [[[5, 2], [0, 3]], [[7, 1], [2, 0]], [[7, 0], [1, 2]], [[8, 0], [0, 2]]], ) diff --git a/tensorflow_addons/optimizers/average_wrapper.py b/tensorflow_addons/optimizers/average_wrapper.py index 86f815bb4d..444daf3dd6 100644 --- a/tensorflow_addons/optimizers/average_wrapper.py +++ b/tensorflow_addons/optimizers/average_wrapper.py @@ -137,7 +137,7 @@ def get_config(self): @classmethod def from_config(cls, config, custom_objects=None): optimizer = tf.keras.optimizers.deserialize( - config.pop("optimizer"), custom_objects=custom_objects, + config.pop("optimizer"), custom_objects=custom_objects ) return cls(optimizer, **config) diff --git a/tensorflow_addons/optimizers/lookahead.py b/tensorflow_addons/optimizers/lookahead.py index 8f96dc9c62..31fe13b043 100644 --- a/tensorflow_addons/optimizers/lookahead.py +++ b/tensorflow_addons/optimizers/lookahead.py @@ -125,10 +125,10 @@ def _look_ahead_op(self, var): ) with tf.control_dependencies([step_back]): slow_update = slow_var.assign( - tf.where(sync_cond, step_back, slow_var,), use_locking=self._use_locking + tf.where(sync_cond, step_back, slow_var), use_locking=self._use_locking ) var_update = var.assign( - tf.where(sync_cond, step_back, var,), use_locking=self._use_locking + tf.where(sync_cond, step_back, var), use_locking=self._use_locking ) return tf.group(slow_update, var_update) @@ -184,6 +184,6 @@ def lr(self, lr): @classmethod def from_config(cls, config, custom_objects=None): optimizer = tf.keras.optimizers.deserialize( - config.pop("optimizer"), custom_objects=custom_objects, + config.pop("optimizer"), custom_objects=custom_objects ) return cls(optimizer, **config) diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py index 7e92186ad9..db14979c12 100644 --- a/tensorflow_addons/optimizers/moving_average.py +++ b/tensorflow_addons/optimizers/moving_average.py @@ -140,11 +140,11 @@ def has_shadow_copy(self): def swap_weights(self): """Swap the average and moving weights. - This is a convenience method to allow one to evaluate the averaged weights - at test time. Loads the weights stored in `self._average_weights` into the model, - keeping a copy of the original model weights. Swapping twice will return - the original weights. - """ + This is a convenience method to allow one to evaluate the averaged weights + at test time. Loads the weights stored in `self._average_weights` into the model, + keeping a copy of the original model weights. Swapping twice will return + the original weights. + """ if tf.distribute.in_cross_replica_context(): strategy = tf.distribute.get_strategy() return strategy.run(self._swap_weights, args=()) @@ -178,4 +178,4 @@ def swap(strategy, a, b): ) # a = a - b ctx = tf.distribute.get_replica_context() - return ctx.merge_call(swap, args=(self._average_weights, self._model_weights,)) + return ctx.merge_call(swap, args=(self._average_weights, self._model_weights)) diff --git a/tensorflow_addons/optimizers/tests/conditional_gradient_test.py b/tensorflow_addons/optimizers/tests/conditional_gradient_test.py index b3f7ac37be..069a394041 100644 --- a/tensorflow_addons/optimizers/tests/conditional_gradient_test.py +++ b/tensorflow_addons/optimizers/tests/conditional_gradient_test.py @@ -1355,7 +1355,7 @@ def test_sparse_nuclear(): tf.constant([4, 2]), ) top_singular_vector0 = tf.constant( - [[0.0, 0.0], [0.7071067, 0.7071067], [0.0, 0.0], [0.0, 0.0]], dtype=dtype, + [[0.0, 0.0], [0.7071067, 0.7071067], [0.0, 0.0], [0.0, 0.0]], dtype=dtype ) top_singular_vector1 = tf.constant( [ diff --git a/tensorflow_addons/optimizers/tests/lookahead_test.py b/tensorflow_addons/optimizers/tests/lookahead_test.py index 4802bf0fd5..b315288094 100644 --- a/tensorflow_addons/optimizers/tests/lookahead_test.py +++ b/tensorflow_addons/optimizers/tests/lookahead_test.py @@ -55,10 +55,10 @@ def run_sparse_sample(iterations, optimizer, seed=0x2019): var_1 = tf.Variable(val_1, dtype=tf.dtypes.float32) grad_0 = tf.IndexedSlices( - tf.constant([np.random.standard_normal()]), tf.constant([0]), tf.constant([2]), + tf.constant([np.random.standard_normal()]), tf.constant([0]), tf.constant([2]) ) grad_1 = tf.IndexedSlices( - tf.constant([np.random.standard_normal()]), tf.constant([1]), tf.constant([2]), + tf.constant([np.random.standard_normal()]), tf.constant([1]), tf.constant([2]) ) grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1])) diff --git a/tensorflow_addons/optimizers/tests/moving_average_test.py b/tensorflow_addons/optimizers/tests/moving_average_test.py index 3e1d77e1e5..4e4b90983f 100644 --- a/tensorflow_addons/optimizers/tests/moving_average_test.py +++ b/tensorflow_addons/optimizers/tests/moving_average_test.py @@ -31,7 +31,7 @@ def test_run(): grads_and_vars = list(zip([grads0, grads1], [var0, var1])) - opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5,) + opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5) opt.apply_gradients(grads_and_vars) opt.apply_gradients(grads_and_vars) @@ -118,7 +118,7 @@ def test_optimizer_string(): def test_config(): sgd_opt = tf.keras.optimizers.SGD(lr=2.0, nesterov=True, momentum=0.3, decay=0.1) opt = MovingAverage( - sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True, + sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True ) config = opt.get_config() @@ -166,7 +166,7 @@ def test_fit_simple_linear_model(): def test_serialization(): sgd_opt = tf.keras.optimizers.SGD(lr=2.0, nesterov=True, momentum=0.3, decay=0.1) optimizer = MovingAverage( - sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True, + sgd_opt, average_decay=0.5, num_updates=None, start_step=5, dynamic_decay=True ) config = tf.keras.optimizers.serialize(optimizer) new_optimizer = tf.keras.optimizers.deserialize(config) @@ -180,7 +180,7 @@ def test_start_step(): grads_and_vars = [(grads0, var0)] opt = MovingAverage( - tf.keras.optimizers.SGD(lr=1.0), average_decay=0.5, start_step=1, + tf.keras.optimizers.SGD(lr=1.0), average_decay=0.5, start_step=1 ) opt.apply_gradients(grads_and_vars) @@ -203,7 +203,7 @@ def test_dynamic_decay(): grads_and_vars = [(grads0, var0)] opt = MovingAverage( - tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5, dynamic_decay=True, + tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5, dynamic_decay=True ) opt.apply_gradients(grads_and_vars) @@ -222,7 +222,7 @@ def test_swap_weights(device): var = tf.Variable([1.0, 2.0]) grads = tf.constant([0.1, 0.1]) - opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5,) + opt = MovingAverage(tf.keras.optimizers.SGD(lr=2.0), average_decay=0.5) @tf.function def apply_gradients(): diff --git a/tensorflow_addons/optimizers/tests/rectified_adam_test.py b/tensorflow_addons/optimizers/tests/rectified_adam_test.py index 3fe0043bcb..040e5e638e 100644 --- a/tensorflow_addons/optimizers/tests/rectified_adam_test.py +++ b/tensorflow_addons/optimizers/tests/rectified_adam_test.py @@ -121,7 +121,7 @@ def test_dense_sample_with_warmup(): iterations=100, expected=[[0.994062, 1.993912], [2.994167, 3.994152]], optimizer=RectifiedAdam( - lr=1e-3, total_steps=100, warmup_proportion=0.1, min_lr=1e-5, + lr=1e-3, total_steps=100, warmup_proportion=0.1, min_lr=1e-5 ), ) @@ -132,7 +132,7 @@ def test_sparse_sample_with_warmup(): iterations=200, expected=[[0.982629, 2.0], [3.0, 3.982674]], optimizer=RectifiedAdam( - lr=1e-3, total_steps=200, warmup_proportion=0.1, min_lr=1e-5, + lr=1e-3, total_steps=200, warmup_proportion=0.1, min_lr=1e-5 ), ) @@ -145,7 +145,7 @@ def test_dense_sample_with_lookahead(): iterations=100, expected=[[0.993126, 1.992901], [2.993283, 3.993261]], optimizer=Lookahead( - RectifiedAdam(lr=1e-3, beta_1=0.95,), sync_period=6, slow_step_size=0.45, + RectifiedAdam(lr=1e-3, beta_1=0.95), sync_period=6, slow_step_size=0.45 ), ) @@ -158,7 +158,7 @@ def test_sparse_sample_with_lookahead(): iterations=150, expected=[[0.988156, 2.0], [3.0, 3.988291]], optimizer=Lookahead( - RectifiedAdam(lr=1e-3, beta_1=0.95,), sync_period=6, slow_step_size=0.45, + RectifiedAdam(lr=1e-3, beta_1=0.95), sync_period=6, slow_step_size=0.45 ), ) @@ -172,7 +172,7 @@ def test_get_config(): def test_serialization(): optimizer = RectifiedAdam( - lr=1e-3, total_steps=10000, warmup_proportion=0.1, min_lr=1e-5, + lr=1e-3, total_steps=10000, warmup_proportion=0.1, min_lr=1e-5 ) config = tf.keras.optimizers.serialize(optimizer) new_optimizer = tf.keras.optimizers.deserialize(config) diff --git a/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py b/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py index 6dd3d0106a..25f81f7350 100644 --- a/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py +++ b/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py @@ -68,11 +68,11 @@ def do_test( if do_sparse: grads0_np_indices = np.array([0, 1], dtype=np.int32) grads0 = tf.IndexedSlices( - tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([2]), + tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([2]) ) grads1_np_indices = np.array([0, 1], dtype=np.int32) grads1 = tf.IndexedSlices( - tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([2]), + tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([2]) ) else: grads0 = tf.constant(grads0_np) @@ -83,7 +83,7 @@ def do_test( for _ in range(3): if do_decay_var_list: opt.apply_gradients( - zip([grads0, grads1], [var0, var1]), decay_var_list=[var0, var1], + zip([grads0, grads1], [var0, var1]), decay_var_list=[var0, var1] ) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) @@ -374,7 +374,7 @@ def test_optimizer_basic(dtype, optimizer): @pytest.mark.parametrize("dtype", [tf.half, tf.float32, tf.float64]) def test_optimizer_sparse(dtype, optimizer): do_test_sparse_repeated_indices( - dtype, optimizer, learning_rate=0.001, momentum=0.9, weight_decay=WEIGHT_DECAY, + dtype, optimizer, learning_rate=0.001, momentum=0.9, weight_decay=WEIGHT_DECAY ) diff --git a/tensorflow_addons/optimizers/tests/yogi_test.py b/tensorflow_addons/optimizers/tests/yogi_test.py index fcd98d49bb..90edf085cb 100644 --- a/tensorflow_addons/optimizers/tests/yogi_test.py +++ b/tensorflow_addons/optimizers/tests/yogi_test.py @@ -140,12 +140,8 @@ def do_test_sparse(beta1=0.0, l1reg=0.0, l2reg=0.0): ) # Validate updated params. - test_utils.assert_allclose_according_to_type( - var0_np, var0.numpy(), - ) - test_utils.assert_allclose_according_to_type( - var1_np, var1.numpy(), - ) + test_utils.assert_allclose_according_to_type(var0_np, var0.numpy()) + test_utils.assert_allclose_according_to_type(var1_np, var1.numpy()) @pytest.mark.usefixtures("maybe_run_functions_eagerly") @@ -186,7 +182,8 @@ def test_sparse_repeated_indices(): opt2 = yogi.Yogi() np.testing.assert_allclose( - aggregated_update_var.numpy(), repeated_index_update_var.numpy(), + aggregated_update_var.numpy(), + repeated_index_update_var.numpy(), ) for _ in range(3): @@ -194,7 +191,8 @@ def test_sparse_repeated_indices(): opt2.apply_gradients([(grad_aggregated, aggregated_update_var)]) np.testing.assert_allclose( - aggregated_update_var.numpy(), repeated_index_update_var.numpy(), + aggregated_update_var.numpy(), + repeated_index_update_var.numpy(), ) diff --git a/tensorflow_addons/optimizers/weight_decay_optimizers.py b/tensorflow_addons/optimizers/weight_decay_optimizers.py index 1f798b2474..5b70dec5be 100644 --- a/tensorflow_addons/optimizers/weight_decay_optimizers.py +++ b/tensorflow_addons/optimizers/weight_decay_optimizers.py @@ -88,9 +88,7 @@ def __init__(self, weight_decay: Union[FloatTensorLike, Callable], **kwargs): def get_config(self): config = super().get_config() - config.update( - {"weight_decay": self._serialize_hyperparameter("weight_decay"),} - ) + config.update({"weight_decay": self._serialize_hyperparameter("weight_decay")}) return config @classmethod diff --git a/tensorflow_addons/rnn/esn_cell.py b/tensorflow_addons/rnn/esn_cell.py index 640c8c88fd..d36be9f369 100644 --- a/tensorflow_addons/rnn/esn_cell.py +++ b/tensorflow_addons/rnn/esn_cell.py @@ -118,7 +118,8 @@ def _esn_recurrent_initializer(shape, dtype, partition_info=None): ) connectivity_mask = tf.cast( - tf.math.less_equal(tf.random.uniform(shape), self.connectivity,), dtype + tf.math.less_equal(tf.random.uniform(shape), self.connectivity), + dtype, ) recurrent_weights = tf.math.multiply(recurrent_weights, connectivity_mask) diff --git a/tensorflow_addons/rnn/tests/esn_cell_test.py b/tensorflow_addons/rnn/tests/esn_cell_test.py index d88d3d372b..ae695fd658 100644 --- a/tensorflow_addons/rnn/tests/esn_cell_test.py +++ b/tensorflow_addons/rnn/tests/esn_cell_test.py @@ -24,7 +24,7 @@ def test_base_esn(): units = 3 expected_output = np.array( - [[2.77, 2.77, 2.77], [4.77, 4.77, 4.77], [6.77, 6.77, 6.77],], dtype=np.float32, + [[2.77, 2.77, 2.77], [4.77, 4.77, 4.77], [6.77, 6.77, 6.77]], dtype=np.float32 ) const_initializer = tf.constant_initializer(0.5) diff --git a/tensorflow_addons/seq2seq/beam_search_decoder.py b/tensorflow_addons/seq2seq/beam_search_decoder.py index 9d5d3c30dc..73897b6607 100644 --- a/tensorflow_addons/seq2seq/beam_search_decoder.py +++ b/tensorflow_addons/seq2seq/beam_search_decoder.py @@ -63,7 +63,7 @@ class BeamSearchDecoderState( class BeamSearchDecoderOutput( collections.namedtuple( - "BeamSearchDecoderOutput", ("scores", "predicted_ids", "parent_ids"), + "BeamSearchDecoderOutput", ("scores", "predicted_ids", "parent_ids") ) ): """Outputs of a `BeamSearchDecoder` step. diff --git a/tensorflow_addons/seq2seq/decoder.py b/tensorflow_addons/seq2seq/decoder.py index 6c3c24c734..c098a80feb 100644 --- a/tensorflow_addons/seq2seq/decoder.py +++ b/tensorflow_addons/seq2seq/decoder.py @@ -308,8 +308,11 @@ def dynamic_decode( ValueError: if `maximum_iterations` is provided but is not a scalar. """ with tf.name_scope(scope or "decoder"): - is_xla = not tf.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext( - tf.compat.v1.get_default_graph() + is_xla = ( + not tf.executing_eagerly() + and control_flow_util.GraphOrParentsInXlaContext( + tf.compat.v1.get_default_graph() + ) ) if maximum_iterations is not None: diff --git a/tensorflow_addons/seq2seq/tests/basic_decoder_test.py b/tensorflow_addons/seq2seq/tests/basic_decoder_test.py index 24c6f2aeda..e2606027a8 100644 --- a/tensorflow_addons/seq2seq/tests/basic_decoder_test.py +++ b/tensorflow_addons/seq2seq/tests/basic_decoder_test.py @@ -63,7 +63,7 @@ def test_step_with_training_helper_output_layer(cell_class, use_output_layer): assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -88,14 +88,14 @@ def test_step_with_training_helper_output_layer(cell_class, use_output_layer): assert len(output_layer.variables) == 1 np.testing.assert_equal( - np.asanyarray([False, False, False, False, True]), first_finished, + np.asanyarray([False, False, False, False, True]), first_finished ) np.testing.assert_equal( - np.asanyarray([False, False, False, True, True]), step_finished, + np.asanyarray([False, False, False, True, True]), step_finished ) assert output_dtype.sample_id == step_outputs.sample_id.dtype np.testing.assert_equal( - np.argmax(step_outputs.rnn_output, -1), step_outputs.sample_id, + np.argmax(step_outputs.rnn_output, -1), step_outputs.sample_id ) @@ -131,7 +131,7 @@ def test_step_with_training_helper_masked_input(use_mask): ) else: (first_finished, first_inputs, first_state) = my_decoder.initialize( - input_t, initial_state=initial_state, sequence_length=sequence_length, + input_t, initial_state=initial_state, sequence_length=sequence_length ) output_size = my_decoder.output_size @@ -143,7 +143,7 @@ def test_step_with_training_helper_masked_input(use_mask): assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -163,7 +163,7 @@ def test_step_with_training_helper_masked_input(use_mask): np.testing.assert_equal((np.maximum(sequence_length - 1, 0) == 0), step_finished) assert output_dtype.sample_id == step_outputs.sample_id.dtype np.testing.assert_equal( - np.argmax(step_outputs.rnn_output, -1), step_outputs.sample_id, + np.argmax(step_outputs.rnn_output, -1), step_outputs.sample_id ) @@ -194,7 +194,7 @@ def test_step_with_greedy_embedding_helper(): ) assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -212,7 +212,7 @@ def test_step_with_greedy_embedding_helper(): expected_step_finished = expected_sample_ids == end_token expected_step_next_inputs = embeddings[expected_sample_ids] np.testing.assert_equal( - np.asanyarray([False, False, False, False, False]), first_finished, + np.asanyarray([False, False, False, False, False]), first_finished ) np.testing.assert_equal(expected_step_finished, step_finished) assert output_dtype.sample_id == step_outputs.sample_id.dtype @@ -248,7 +248,7 @@ def test_step_with_sample_embedding_helper(): ) assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -302,7 +302,7 @@ def test_step_with_scheduled_embedding_training_helper(): assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -318,10 +318,10 @@ def test_step_with_scheduled_embedding_training_helper(): assert (batch_size, input_depth) == step_next_inputs.shape np.testing.assert_equal( - np.asanyarray([False, False, False, False, True]), first_finished, + np.asanyarray([False, False, False, False, True]), first_finished ) np.testing.assert_equal( - np.asanyarray([False, False, False, True, True]), step_finished, + np.asanyarray([False, False, False, True, True]), step_finished ) sample_ids = step_outputs.sample_id.numpy() assert output_dtype.sample_id == sample_ids.dtype @@ -394,7 +394,7 @@ def next_inputs_fn(outputs): ) assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -412,10 +412,10 @@ def next_inputs_fn(outputs): assert (batch_size, cell_depth) == step_state[1].shape np.testing.assert_equal( - np.asanyarray([False, False, False, False, True]), first_finished, + np.asanyarray([False, False, False, False, True]), first_finished ) np.testing.assert_equal( - np.asanyarray([False, False, False, True, True]), step_finished, + np.asanyarray([False, False, False, True, True]), step_finished ) sample_ids = step_outputs.sample_id @@ -440,7 +440,7 @@ def next_inputs_fn(outputs): ) np.testing.assert_equal( - step_next_inputs.numpy()[batch_where_sampling], expected_next_sampling_inputs, + step_next_inputs.numpy()[batch_where_sampling], expected_next_sampling_inputs ) np.testing.assert_equal( @@ -498,7 +498,7 @@ def end_fn(sample_ids): ) assert basic_decoder.BasicDecoderOutput(tf.float32, tf.int32) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) @@ -561,7 +561,7 @@ def end_fn(sample_ids): assert basic_decoder.BasicDecoderOutput(cell_depth, cell_depth) == output_size assert basic_decoder.BasicDecoderOutput(tf.float32, tf.bool) == output_dtype - (step_outputs, step_state, step_next_inputs, step_finished,) = my_decoder.step( + (step_outputs, step_state, step_next_inputs, step_finished) = my_decoder.step( tf.constant(0), first_inputs, first_state ) diff --git a/tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py b/tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py index 962c225e3c..80ec3a501b 100644 --- a/tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py +++ b/tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py @@ -259,7 +259,7 @@ def test_eos_masking(): [-0.3, -0.3, -0.3, 3, 0], [5, 6, 0, 0, 0], ], - [[-0.2, -0.2, -0.2, -0.2, 0], [-0.3, -0.3, -0.1, 3, 0], [5, 6, 3, 0, 0],], + [[-0.2, -0.2, -0.2, -0.2, 0], [-0.3, -0.3, -0.1, 3, 0], [5, 6, 3, 0, 0]], ] ) diff --git a/tensorflow_addons/seq2seq/tests/beam_search_ops_test.py b/tensorflow_addons/seq2seq/tests/beam_search_ops_test.py index f2fca4d64c..ddd2eaad0a 100644 --- a/tensorflow_addons/seq2seq/tests/beam_search_ops_test.py +++ b/tensorflow_addons/seq2seq/tests/beam_search_ops_test.py @@ -141,7 +141,7 @@ def test_gather_tree_batch(): # valid id and everything after it should be end_token. if found > 0: np.testing.assert_equal( - v[: found - 1] >= 0, np.ones_like(v[: found - 1], dtype=bool), + v[: found - 1] >= 0, np.ones_like(v[: found - 1], dtype=bool) ) np.testing.assert_allclose( v[found + 1 :], end_token * np.ones_like(v[found + 1 :]) diff --git a/tensorflow_addons/seq2seq/tests/decoder_test.py b/tensorflow_addons/seq2seq/tests/decoder_test.py index ef869aa8bc..e30f338ee7 100644 --- a/tensorflow_addons/seq2seq/tests/decoder_test.py +++ b/tensorflow_addons/seq2seq/tests/decoder_test.py @@ -92,7 +92,7 @@ def test_dynamic_decode_tflite_conversion(): cell = tf.keras.layers.LSTMCell(units) sampler = sampler_py.GreedyEmbeddingSampler() embeddings = tf.random.uniform([vocab_size, units]) - my_decoder = basic_decoder.BasicDecoder(cell=cell, sampler=sampler,) + my_decoder = basic_decoder.BasicDecoder(cell=cell, sampler=sampler) @tf.function def _decode(start_tokens, end_token): @@ -148,7 +148,7 @@ def test_dynamic_decode_rnn_with_training_helper_matches_dynamic_rnn( cell=cell, sampler=sampler, impute_finished=use_sequence_length ) - (final_decoder_outputs, final_decoder_state, _,) = my_decoder( + (final_decoder_outputs, final_decoder_state, _) = my_decoder( inputs, initial_state=zero_state, sequence_length=sequence_length ) @@ -168,7 +168,7 @@ def test_dynamic_decode_rnn_with_training_helper_matches_dynamic_rnn( # to dynamic_rnn, which also zeros out outputs and passes along # state. np.testing.assert_allclose( - final_decoder_outputs.rnn_output, final_rnn_outputs[:, 0:max_out, :], + final_decoder_outputs.rnn_output, final_rnn_outputs[:, 0:max_out, :] ) if use_sequence_length: np.testing.assert_allclose(final_decoder_state, final_rnn_state) diff --git a/tensorflow_addons/seq2seq/tests/loss_test.py b/tensorflow_addons/seq2seq/tests/loss_test.py index 7da06b7f4b..3188b7263d 100644 --- a/tensorflow_addons/seq2seq/tests/loss_test.py +++ b/tensorflow_addons/seq2seq/tests/loss_test.py @@ -296,7 +296,7 @@ def return_logits(x): inp = tf.keras.layers.Input(shape=(sequence_length,)) out = tf.keras.layers.Lambda( - return_logits, output_shape=(sequence_length, number_of_classes), + return_logits, output_shape=(sequence_length, number_of_classes) )(inp) model = tf.keras.models.Model(inp, out) @@ -307,7 +307,7 @@ def return_logits(x): x = tf.ones(shape=(batch_size, sequence_length)) h = model.fit( - x, targets, sample_weight=weights, batch_size=batch_size, steps_per_epoch=1, + x, targets, sample_weight=weights, batch_size=batch_size, steps_per_epoch=1 ) calculated_loss = h.history["loss"][0] diff --git a/tensorflow_addons/text/tests/parse_time_op_test.py b/tensorflow_addons/text/tests/parse_time_op_test.py index 0a576a1f22..f976800e7b 100644 --- a/tensorflow_addons/text/tests/parse_time_op_test.py +++ b/tensorflow_addons/text/tests/parse_time_op_test.py @@ -49,7 +49,7 @@ def test_parse_time(): ] for time_string, time_format, output_unit, expected in items: result = text.parse_time( - time_string=time_string, time_format=time_format, output_unit=output_unit, + time_string=time_string, time_format=time_format, output_unit=output_unit ) np.testing.assert_equal(expected, result.numpy()) diff --git a/tensorflow_addons/text/tests/skip_gram_ops_test.py b/tensorflow_addons/text/tests/skip_gram_ops_test.py index ee60b49091..e60656c780 100644 --- a/tensorflow_addons/text/tests/skip_gram_ops_test.py +++ b/tensorflow_addons/text/tests/skip_gram_ops_test.py @@ -108,7 +108,7 @@ def test_skip_gram_sample_skips_0(): input_tensor, min_skips=0, max_skips=0, emit_self_as_target=True ) expected_tokens, expected_labels = _split_tokens_labels( - [(b"the", b"the"), (b"quick", b"quick"), (b"brown", b"brown"),] + [(b"the", b"the"), (b"quick", b"quick"), (b"brown", b"brown")] ) np.testing.assert_equal(np.asanyarray(expected_tokens), tokens.numpy()) np.testing.assert_equal(np.asanyarray(expected_labels), labels.numpy()) @@ -234,7 +234,7 @@ def test_skip_gram_sample_non_string_input(): input_tensor = tf.constant([1, 2, 3], dtype=tf.dtypes.int16) tokens, labels = text.skip_gram_sample(input_tensor, min_skips=1, max_skips=1) expected_tokens, expected_labels = _split_tokens_labels( - [(1, 2), (2, 1), (2, 3), (3, 2),] + [(1, 2), (2, 1), (2, 3), (3, 2)] ) np.testing.assert_equal(np.asanyarray(expected_tokens), tokens.numpy()) np.testing.assert_equal(np.asanyarray(expected_labels), labels.numpy()) @@ -283,7 +283,7 @@ def test_skip_gram_sample_errors(): text.skip_gram_sample(dummy_input, vocab_freq_table=None, corpus_size=100) with pytest.raises(ValueError): text.skip_gram_sample( - dummy_input, vocab_freq_table=None, vocab_subsampling=1e-5, corpus_size=100, + dummy_input, vocab_freq_table=None, vocab_subsampling=1e-5, corpus_size=100 ) # vocab_subsampling and corpus_size must both be present or absent. @@ -477,7 +477,7 @@ def _text_vocab_subsample_vocab_helper( ) expected_tokens, expected_labels = _split_tokens_labels( - [(b"the", b"to"), (b"to", b"the"), (b"to", b"life"), (b"life", b"to"),] + [(b"the", b"to"), (b"to", b"the"), (b"to", b"life"), (b"life", b"to")] ) np.testing.assert_equal(np.asanyarray(expected_tokens), tokens.numpy()) np.testing.assert_equal(np.asanyarray(expected_labels), labels.numpy()) @@ -601,7 +601,7 @@ def _make_text_vocab_freq_file(tmp_dir): with open(filepath, "w") as f: writer = csv.writer(f) writer.writerows( - [["and", 40], ["life", 8], ["the", 30], ["to", 20], ["universe", 2],] + [["and", 40], ["life", 8], ["the", 30], ["to", 20], ["universe", 2]] ) return filepath diff --git a/tensorflow_addons/utils/keras_utils.py b/tensorflow_addons/utils/keras_utils.py index 8865e0623d..e480527c21 100644 --- a/tensorflow_addons/utils/keras_utils.py +++ b/tensorflow_addons/utils/keras_utils.py @@ -29,21 +29,21 @@ def __init__( ): """Initializes `LossFunctionWrapper` class. - Args: - fn: The loss function to wrap, with signature `fn(y_true, y_pred, - **kwargs)`. - reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to - loss. Default value is `AUTO`. `AUTO` indicates that the reduction - option will be determined by the usage context. For almost all cases - this defaults to `SUM_OVER_BATCH_SIZE`. When used with - `tf.distribute.Strategy`, outside of built-in training loops such as - `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` - will raise an error. Please see this custom training [tutorial]( - https://www.tensorflow.org/tutorials/distribute/custom_training) - for more details. - name: (Optional) name for the loss. - **kwargs: The keyword arguments that are passed on to `fn`. - """ + Args: + fn: The loss function to wrap, with signature `fn(y_true, y_pred, + **kwargs)`. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to + loss. Default value is `AUTO`. `AUTO` indicates that the reduction + option will be determined by the usage context. For almost all cases + this defaults to `SUM_OVER_BATCH_SIZE`. When used with + `tf.distribute.Strategy`, outside of built-in training loops such as + `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` + will raise an error. Please see this custom training [tutorial]( + https://www.tensorflow.org/tutorials/distribute/custom_training) + for more details. + name: (Optional) name for the loss. + **kwargs: The keyword arguments that are passed on to `fn`. + """ super().__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs @@ -51,13 +51,13 @@ def __init__( def call(self, y_true, y_pred): """Invokes the `LossFunctionWrapper` instance. - Args: - y_true: Ground truth values. - y_pred: The predicted values. + Args: + y_true: Ground truth values. + y_pred: The predicted values. - Returns: - Loss values per sample. - """ + Returns: + Loss values per sample. + """ return self.fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): diff --git a/tensorflow_addons/version.py b/tensorflow_addons/version.py index 37e9362ffe..b85169a59f 100644 --- a/tensorflow_addons/version.py +++ b/tensorflow_addons/version.py @@ -27,6 +27,6 @@ _VERSION_SUFFIX = "dev" # Example, '0.1.0-dev' -__version__ = ".".join([_MAJOR_VERSION, _MINOR_VERSION, _PATCH_VERSION,]) +__version__ = ".".join([_MAJOR_VERSION, _MINOR_VERSION, _PATCH_VERSION]) if _VERSION_SUFFIX: __version__ = "{}-{}".format(__version__, _VERSION_SUFFIX) diff --git a/tools/install_deps/black.txt b/tools/install_deps/black.txt index 7c9d150acb..52ee1d9aa1 100644 --- a/tools/install_deps/black.txt +++ b/tools/install_deps/black.txt @@ -1 +1 @@ -black==19.10b0 +black==20.8b1 diff --git a/tools/testing/source_code_test.py b/tools/testing/source_code_test.py index cc1778b73b..280ca61b01 100644 --- a/tools/testing/source_code_test.py +++ b/tools/testing/source_code_test.py @@ -45,7 +45,7 @@ def test_api_typed(): "https://github.com/tensorflow/addons/blob/master/CONTRIBUTING.md#about-type-hints" ) ensure_api_is_typed( - modules_list, exception_list, init_only=True, additional_message=help_message, + modules_list, exception_list, init_only=True, additional_message=help_message )