From e2c1378ce0fe16ef7bf5aa9e20f07ca6d25458bb Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 28 Jan 2020 21:06:07 -0500 Subject: [PATCH 001/106] initial setup. need to build tests --- .../discriminative_layer_training.py | 202 ++++++++++++++++++ .../discriminative_layer_training_test.py | 24 +++ 2 files changed, 226 insertions(+) create mode 100644 tensorflow_addons/optimizers/discriminative_layer_training.py create mode 100644 tensorflow_addons/optimizers/discriminative_layer_training_test.py diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py new file mode 100644 index 0000000000..828080a675 --- /dev/null +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -0,0 +1,202 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Discriminative Layer Training Manager for TensorFlow.""" + +# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py +# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py +# python -m flake8 + +import tensorflow as tf +import numpy as np + + +def apply_gradients(self, grads_and_vars, *args, **kwargs): + """New apply gradients function. + This intercepts the grads and vars, scales them, then passes them to the old apply gradients function + + :TODO finish docstring + + """ + + if self.testing_flag: + print("Training with layerwise learning rates") + self.testing_flag = False + + grads = [] + var_list = [] + + # scale each grad based on var's lr_mult + for grad, var in grads_and_vars: + grad = tf.math.scalar_mul(var.lr_mult, grad) + grads.append(grad) + var_list.append(var) + + grads_and_vars = list(zip(grads, var_list)) + + return self._apply_gradients(grads_and_vars, *args, **kwargs) + + +# @tf.keras.utils.register_keras_serializable(package="Addons") :TODO figure out why other classes have this wrapper +class DiscriminativeLearning(object): + def __init__(self, model, verbose=True): + """Apply logic for discriminative learning to a compiled model + :TODO finish docstring + """ + + self._prepare_model(model=model, verbose=verbose) + + def _get_layers(self, layer): + """Helper method to access a layer's sublayers as a list or return an empty list + :TODO finish docstring + + """ + + try: + return layer.layers + except AttributeError: + return [] + + def _get_lr_mult(self, layer): + + """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set + :TODO finish docstring + """ + + try: + return layer.lr_mult + except AttributeError: + return 1.0 + + def _assign_lr_mult(self, layer, lr_mult, override=False): + + """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set + :TODO finish docstring + """ + + try: + if layer.lr_mult and override: + layer.lr_mult = lr_mult # check if layer has lr mult and if override, then assign the new lr mult + except AttributeError: + layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult + + def _get_lowest_layers(self, layer, propagate_lr_mult_to_sub_layers=True): + + """Helper method iterate through all nested layers of an object that behaves like a layer or model + By default, we want to propagate the lr mult to the lower layers. + tbh I can't properly explain how this works so see this post + + https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python + :TODO finish docstring + + """ + + mult = self._get_lr_mult(layer) + layers = self._get_layers(layer) + + if len(layers) > 0: + for sublayer in layers: + + # we generally want to propagate the lr mult to the lower layers + if propagate_lr_mult_to_sub_layers: + self._assign_lr_mult(sublayer, mult) + + # recursively iterate through the nested layers + for nested_sublayer in self._get_lowest_layers(sublayer): + yield nested_sublayer + + else: + yield layer + + def _apply_lr_mult_to_var(self, layer): + """Helper method to apply the lr mult to the trainable variables of a layer + :TODO finish docstring + """ + + lr_mult = self._get_lr_mult(layer) + + for var in layer.trainable_variables: + var.lr_mult = tf.convert_to_tensor(lr_mult, tf.float32) # 0D tensor + var.lr_mult_value = ( + lr_mult # easier to check vars lr mult in graph and eager + ) + + # :TODO float16 testing? not sure what would happen atm + + def _check_for_lr_mult(self, layer, verbose=True, propagate=True): + """Identify which layers have an LR mult not equal to 1 + :TODO finish docstring + """ + + layers_with_lr_mult = [] + + for sub_layer in self._get_lowest_layers( + layer, propagate_lr_mult_to_sub_layers=propagate + ): + lr_mult = self._get_lr_mult(sub_layer) + if lr_mult != 1.0: + layers_with_lr_mult.append(sub_layer) + if verbose: + # :TODO this should be info + print("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) + + return layers_with_lr_mult + + def _compute_params(self, var_list): + """helps compute params to provide a summary that aligns with model.summary() + :TODO finish docstring + """ + return np.sum([np.prod(list(var.shape)) for var in var_list]) + + def _prepare_model(self, model, verbose=True): + """Prepares a compiled model for discriminative layer training + + Model must be compiled first + :TODO finish docstring + """ + + layers_with_lr_mult = self._check_for_lr_mult(model, verbose=verbose) + if len(layers_with_lr_mult) == 0: + # :TODO this should be a warning + + print( + "Discriminative Layer Training requires an lr_mult attribute on at least one layer" + ) + + print( + "The assigned lr_mult must not be equal to 1. eg: model.layers[0].lr_mult = 0.01" + ) + + for layer in self._get_lowest_layers(model): + self._apply_lr_mult_to_var(layer) + + # get opt, move the original apply fn to a safe place, assign new apply fn + + opt = model.optimizer + opt._apply_gradients = opt.apply_gradients + opt.apply_gradients = apply_gradients.__get__(opt) + opt.testing_flag = True + vars_with_lr_mult = [ + var for var in model.trainable_variables if var.lr_mult_value != 1.0 + ] + + # :TODO this should be info + if verbose: + print( + "%i params of %i will learn at a different rate" + % ( + self._compute_params(vars_with_lr_mult), + self._compute_params(model.trainable_variables), + ) + ) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py new file mode 100644 index 0000000000..4be8d9204a --- /dev/null +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -0,0 +1,24 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Discriminative Layer Training Manager for TensorFlow.""" + +# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py + + +import tensorflow as tf +from tensorflow_addons.utils import test_utils +import numpy as np + +#:TODO create tests From ef4b235f7def382be797f53243c37d245dda831b Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 12:27:51 -0500 Subject: [PATCH 002/106] build some tests. need to test them --- .../discriminative_layer_training_test.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 4be8d9204a..0eaefd34cf 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -20,5 +20,109 @@ import tensorflow as tf from tensorflow_addons.utils import test_utils import numpy as np +from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning #:TODO create tests +def toy_cnn(): + '''Consistently create model with same random weights + skip head activation to allow both bce with logits and cce with logits + intended to work with + x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) + y = np.zeros(shape = (None, 5), dtype = np.float32) + y[:, 0] = 1. + ''' + + tf.random.set_seed(1) + + bignet = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False + , weights=None + , input_shape=(32, 32, 3) + , pooling='avg') + + net = tf.keras.models.Model(inputs=bignet.input, outputs=bignet.get_layer('block_2_add').output) + + model = tf.keras.Sequential([net + , tf.keras.layers.GlobalAveragePooling2D() + , tf.keras.layers.Dropout(0.5) + , tf.keras.layers.Dense(5, name='head')]) + + return model + +def toy_rnn(): + '''Consistently create model with same random weights + skip head activation to allow both bce with logits and cce with logits + intended to work with + + x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) + y = np.zeros(shape = (None, 5), dtype = np.float32) + y[:, 0] = 1. + ''' + + tf.random.set_seed(1) + + model = tf.keras.Sequential() + model.add(tf.keras.layers.Input(shape=(32, 32, 3))) + model.add(tf.keras.layers.Reshape(target_shape=(32, 96))) + model.add(tf.keras.layers.Cropping1D(cropping=(0, 24))) + model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) + model.add(tf.keras.layers.Dropout(0.5)) + model.add(tf.keras.layers.Dense(5)) + + return model + +def get_train_results(model, loss, opt): + '''Run a traininng loop and return the results for analysis + Accepts loss classes and optimizer classes as defined in tf.keras.losses and tf.keras.optimizers + ''' + tf.random.set_seed(1) + + model.compile(loss=loss(), optimizer=opt) + + x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(32, 5), dtype=np.float32) + y[:, 0] = 1. + + return model.fit(x, y, epochs=10, batch_size=16, verbose=0) + + +def opt_list(): + return [tf.keras.optimizers.Adam, tf.keras.optimizers.SGD] + +def loss_list(): + return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, tf.keras.losses.MSE] + +@test_utils.run_all_in_graph_and_eager_modes +class DiscriminativeLearningTest(tf.test.TestCase): + + def test_same_results_when_no_lr_mult_specified(self): + + model_fns = [toy_cnn, toy_rnn] + + for model_fn in model_fns: + for loss in loss_list(): + for opt in opt_list(): + model = model_fn() + hist = get_train_results(model, loss, opt) + + model_lr = model_fn() + DiscriminativeLearning(model_lr) + hist_lr = get_train_results(model_lr, loss, opt) + + print(hist) + print(hist_lr) + break + break + break + + return + + +if __name__ == '__main__': + d = DiscriminativeLearningTest() + d.test_same_results_when_no_lr_mult_specified() + + pass + + + + From 34e4e165150b42449372f9e0978d0c8a2370b73a Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 12:49:23 -0500 Subject: [PATCH 003/106] fixed typo --- .../optimizers/discriminative_layer_training_test.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 0eaefd34cf..dd54e31480 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -21,6 +21,7 @@ from tensorflow_addons.utils import test_utils import numpy as np from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning +import itertools #:TODO create tests def toy_cnn(): @@ -76,7 +77,7 @@ def get_train_results(model, loss, opt): ''' tf.random.set_seed(1) - model.compile(loss=loss(), optimizer=opt) + model.compile(loss=loss(), optimizer=opt()) x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) y = np.zeros(shape=(32, 5), dtype=np.float32) @@ -91,6 +92,13 @@ def opt_list(): def loss_list(): return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, tf.keras.losses.MSE] +def zipped_permutes(): + return itertools.product([[toy_cnn, toy_rnn] + , opt_list() + , loss_list()]) + + + @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): From c9e8b99f0f5f4ac6edde52a10b04136e57d2bdf7 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 13:05:34 -0500 Subject: [PATCH 004/106] created first test --- .../optimizers/rectified_adam_test.py | 288 ++++++++---------- 1 file changed, 132 insertions(+), 156 deletions(-) diff --git a/tensorflow_addons/optimizers/rectified_adam_test.py b/tensorflow_addons/optimizers/rectified_adam_test.py index f646e798d8..a3f5fd9772 100644 --- a/tensorflow_addons/optimizers/rectified_adam_test.py +++ b/tensorflow_addons/optimizers/rectified_adam_test.py @@ -12,165 +12,141 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for Rectified Adam optimizer.""" +"""Tests for Discriminative Layer Training Manager for TensorFlow.""" + +# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py -import tensorflow as tf +import tensorflow as tf from tensorflow_addons.utils import test_utils -from tensorflow_addons.optimizers import RectifiedAdam, Lookahead +import numpy as np +from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning +import itertools + + +def toy_cnn(): + '''Consistently create model with same random weights + skip head activation to allow both bce with logits and cce with logits + intended to work with + x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) + y = np.zeros(shape = (None, 5), dtype = np.float32) + y[:, 0] = 1. + ''' + + tf.random.set_seed(1) + + bignet = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False + , weights=None + , input_shape=(32, 32, 3) + , pooling='avg') + + net = tf.keras.models.Model(inputs=bignet.input, outputs=bignet.get_layer('block_2_add').output) + + model = tf.keras.Sequential([net + , tf.keras.layers.GlobalAveragePooling2D() + , tf.keras.layers.Dropout(0.5) + , tf.keras.layers.Dense(5, name='head')]) + + return model + + +def toy_rnn(): + '''Consistently create model with same random weights + skip head activation to allow both bce with logits and cce with logits + intended to work with + + x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) + y = np.zeros(shape = (None, 5), dtype = np.float32) + y[:, 0] = 1. + ''' + + tf.random.set_seed(1) + + model = tf.keras.Sequential() + model.add(tf.keras.layers.Input(shape=(32, 32, 3))) + model.add(tf.keras.layers.Reshape(target_shape=(32, 96))) + model.add(tf.keras.layers.Cropping1D(cropping=(0, 24))) + model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) + model.add(tf.keras.layers.Dropout(0.5)) + model.add(tf.keras.layers.Dense(5)) + + return model + + +def get_train_results(model): + '''Run a traininng loop and return the results for analysis + model must be compiled first + ''' + tf.random.set_seed(1) + x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(32, 5), dtype=np.float32) + y[:, 0] = 1. + + return model.fit(x, y, epochs=10, batch_size=16, verbose=0) + + +def opt_list(): + return [tf.keras.optimizers.Adam, tf.keras.optimizers.SGD] + + +def loss_list(): + return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, + tf.keras.losses.MeanSquaredError] + + +def zipped_permutes(): + return list(itertools.product([toy_cnn, toy_rnn] + , loss_list() + , opt_list() + )) + + +def get_losses(hist): + return np.array(hist.__dict__['history']['loss']) @test_utils.run_all_in_graph_and_eager_modes -class RectifiedAdamTest(tf.test.TestCase): - def run_dense_sample(self, iterations, expected, optimizer): - var_0 = tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32) - var_1 = tf.Variable([3.0, 4.0], dtype=tf.dtypes.float32) - - grad_0 = tf.constant([0.1, 0.2], dtype=tf.dtypes.float32) - grad_1 = tf.constant([0.03, 0.04], dtype=tf.dtypes.float32) - - grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1])) - - if tf.executing_eagerly(): - for _ in range(iterations): - optimizer.apply_gradients(grads_and_vars) - else: - update = optimizer.apply_gradients(grads_and_vars) - self.evaluate(tf.compat.v1.global_variables_initializer()) - for _ in range(iterations): - self.evaluate(update) - - self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4) - self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4) - - def run_sparse_sample(self, iterations, expected, optimizer): - var_0 = tf.Variable([1.0, 2.0]) - var_1 = tf.Variable([3.0, 4.0]) - - grad_0 = tf.IndexedSlices( - tf.constant([0.1]), tf.constant([0]), tf.constant([2]) - ) - grad_1 = tf.IndexedSlices( - tf.constant([0.04]), tf.constant([1]), tf.constant([2]) - ) - - grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1])) - - if tf.executing_eagerly(): - for _ in range(iterations): - optimizer.apply_gradients(grads_and_vars) - else: - update = optimizer.apply_gradients(grads_and_vars) - self.evaluate(tf.compat.v1.global_variables_initializer()) - for _ in range(iterations): - self.evaluate(update) - - self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4) - self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4) - - def test_dense_sample(self): - # Expected values are obtained from the official implementation - self.run_dense_sample( - iterations=1000, - expected=[[0.5554, 1.5549], [2.5557, 3.5557]], - optimizer=RectifiedAdam(lr=1e-3), - ) - - def test_sparse_sample(self): - # Expected values are obtained from the official implementation - # Dense results should be: [-0.1929, 0.8066], [1.8075, 2.8074] - self.run_sparse_sample( - iterations=2000, - expected=[[-0.1929, 2.0], [3.0, 2.8074]], - optimizer=RectifiedAdam(lr=1e-3), - ) - - def test_dense_sample_with_amsgrad(self): - # Expected values are obtained from the official implementation - # `amsgrad` has no effect because the gradient is fixed - self.run_dense_sample( - iterations=1000, - expected=[[0.5554, 1.5549], [2.5557, 3.5557]], - optimizer=RectifiedAdam(lr=1e-3, amsgrad=True), - ) - - def test_sparse_sample_with_amsgrad(self): - # Expected values are obtained from the official implementation - # `amsgrad` has no effect because the gradient is fixed - self.run_sparse_sample( - iterations=2000, - expected=[[-0.1929, 2.0], [3.0, 2.8074]], - optimizer=RectifiedAdam(lr=1e-3, amsgrad=True), - ) - - def test_dense_sample_with_weight_decay(self): - # Expected values are obtained from the official implementation - self.run_dense_sample( - iterations=1000, - expected=[[0.5472, 1.5368], [2.5276, 3.5176]], - optimizer=RectifiedAdam(lr=1e-3, weight_decay=0.01), - ) - - def test_sparse_sample_with_weight_decay(self): - # Expected values are obtained from the official implementation - # Dense results should be: [-0.2029, 0.7768], [1.7578, 2.7380] - self.run_sparse_sample( - iterations=2000, - expected=[[-0.2029, 2.0], [3.0, 2.7380]], - optimizer=RectifiedAdam(lr=1e-3, weight_decay=0.01), - ) - - def test_dense_sample_with_warmup(self): - self.run_dense_sample( - iterations=1000, - expected=[[0.8041, 1.8041], [2.8041, 3.8041]], - optimizer=RectifiedAdam( - lr=1e-3, total_steps=1000, warmup_proportion=0.1, min_lr=1e-5, - ), - ) - - def test_sparse_sample_with_warmup(self): - self.run_sparse_sample( - iterations=2000, - expected=[[0.4653, 2.0], [3.0, 3.4653]], - optimizer=RectifiedAdam( - lr=1e-3, total_steps=2000, warmup_proportion=0.1, min_lr=1e-5, - ), - ) - - def test_dense_sample_with_lookahead(self): - # Expected values are obtained from the original implementation - # of Ranger - self.run_dense_sample( - iterations=1000, - expected=[[0.7985, 1.7983], [2.7987, 3.7986]], - optimizer=Lookahead( - RectifiedAdam(lr=1e-3, beta_1=0.95,), - sync_period=6, - slow_step_size=0.45, - ), - ) - - def test_sparse_sample_with_lookahead(self): - # Expected values are obtained from the original implementation - # of Ranger. - # Dense results should be: [0.6417, 1.6415], [2.6419, 3.6418] - self.run_sparse_sample( - iterations=1500, - expected=[[0.6417, 2.0], [3.0, 3.6418]], - optimizer=Lookahead( - RectifiedAdam(lr=1e-3, beta_1=0.95,), - sync_period=6, - slow_step_size=0.45, - ), - ) - - def test_get_config(self): - opt = RectifiedAdam(lr=1e-4) - config = opt.get_config() - self.assertEqual(config["learning_rate"], 1e-4) - self.assertEqual(config["total_steps"], 0) - - -if __name__ == "__main__": - tf.test.main() +class DiscriminativeLearningTest(tf.test.TestCase): + + def test_same_results_when_no_lr_mult_specified(self): + + model_fns = [toy_cnn, toy_rnn] + + for model_fn, loss, opt in zipped_permutes(): + model = model_fn() + model.compile(loss=loss(), optimizer=opt()) + hist = get_train_results(model) + + model_lr = model_fn() + model_lr.compile(loss=loss(), optimizer=opt()) + DiscriminativeLearning(model_lr) + hist_lr = get_train_results(model_lr) + + self.assertAllClose(get_losses(hist), get_losses(hist_lr)) + + def test_same_results_when_lr_mult_is_1(self): + + model_fns = [toy_cnn, toy_rnn] + + for model_fn, loss, opt in zipped_permutes(): + model = model_fn() + model.compile(loss=loss(), optimizer=opt()) + hist = get_train_results(model) + + model_lr = model_fn() + model_lr.compile(loss=loss(), optimizer=opt()) + DiscriminativeLearning(model_lr) + hist_lr = get_train_results(model_lr) + + self.assertAllClose(get_losses(hist), get_losses(hist_lr)) + + +if __name__ == '__main__': + d = DiscriminativeLearningTest() + d.test_same_results_when_no_lr_mult_specified() + + + + + + From 3b545a4c49ccf1a01f6a9d1d177a97849c4c1360 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 13:06:33 -0500 Subject: [PATCH 005/106] created first test --- .../discriminative_layer_training_test.py | 66 ++++++++++++------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index dd54e31480..a3f5fd9772 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -23,7 +23,7 @@ from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning import itertools -#:TODO create tests + def toy_cnn(): '''Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -49,6 +49,7 @@ def toy_cnn(): return model + def toy_rnn(): '''Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -71,14 +72,12 @@ def toy_rnn(): return model -def get_train_results(model, loss, opt): + +def get_train_results(model): '''Run a traininng loop and return the results for analysis - Accepts loss classes and optimizer classes as defined in tf.keras.losses and tf.keras.optimizers + model must be compiled first ''' tf.random.set_seed(1) - - model.compile(loss=loss(), optimizer=opt()) - x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) y = np.zeros(shape=(32, 5), dtype=np.float32) y[:, 0] = 1. @@ -89,15 +88,22 @@ def get_train_results(model, loss, opt): def opt_list(): return [tf.keras.optimizers.Adam, tf.keras.optimizers.SGD] + def loss_list(): - return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, tf.keras.losses.MSE] + return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, + tf.keras.losses.MeanSquaredError] + def zipped_permutes(): - return itertools.product([[toy_cnn, toy_rnn] - , opt_list() - , loss_list()]) + return list(itertools.product([toy_cnn, toy_rnn] + , loss_list() + , opt_list() + )) +def get_losses(hist): + return np.array(hist.__dict__['history']['loss']) + @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): @@ -106,30 +112,40 @@ def test_same_results_when_no_lr_mult_specified(self): model_fns = [toy_cnn, toy_rnn] - for model_fn in model_fns: - for loss in loss_list(): - for opt in opt_list(): - model = model_fn() - hist = get_train_results(model, loss, opt) + for model_fn, loss, opt in zipped_permutes(): + model = model_fn() + model.compile(loss=loss(), optimizer=opt()) + hist = get_train_results(model) + + model_lr = model_fn() + model_lr.compile(loss=loss(), optimizer=opt()) + DiscriminativeLearning(model_lr) + hist_lr = get_train_results(model_lr) - model_lr = model_fn() - DiscriminativeLearning(model_lr) - hist_lr = get_train_results(model_lr, loss, opt) + self.assertAllClose(get_losses(hist), get_losses(hist_lr)) - print(hist) - print(hist_lr) - break - break - break + def test_same_results_when_lr_mult_is_1(self): + + model_fns = [toy_cnn, toy_rnn] - return + for model_fn, loss, opt in zipped_permutes(): + model = model_fn() + model.compile(loss=loss(), optimizer=opt()) + hist = get_train_results(model) + + model_lr = model_fn() + model_lr.compile(loss=loss(), optimizer=opt()) + DiscriminativeLearning(model_lr) + hist_lr = get_train_results(model_lr) + + self.assertAllClose(get_losses(hist), get_losses(hist_lr)) if __name__ == '__main__': d = DiscriminativeLearningTest() d.test_same_results_when_no_lr_mult_specified() - pass + From fab5871e5bb19fcfc703dd72e74692a6dc739e47 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 13:07:35 -0500 Subject: [PATCH 006/106] accidentally messed up another file --- .../optimizers/rectified_adam_test.py | 288 ++++++++++-------- 1 file changed, 156 insertions(+), 132 deletions(-) diff --git a/tensorflow_addons/optimizers/rectified_adam_test.py b/tensorflow_addons/optimizers/rectified_adam_test.py index a3f5fd9772..f646e798d8 100644 --- a/tensorflow_addons/optimizers/rectified_adam_test.py +++ b/tensorflow_addons/optimizers/rectified_adam_test.py @@ -12,141 +12,165 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for Discriminative Layer Training Manager for TensorFlow.""" - -# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py - +"""Tests for Rectified Adam optimizer.""" import tensorflow as tf -from tensorflow_addons.utils import test_utils -import numpy as np -from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning -import itertools - - -def toy_cnn(): - '''Consistently create model with same random weights - skip head activation to allow both bce with logits and cce with logits - intended to work with - x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) - y = np.zeros(shape = (None, 5), dtype = np.float32) - y[:, 0] = 1. - ''' - - tf.random.set_seed(1) - - bignet = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False - , weights=None - , input_shape=(32, 32, 3) - , pooling='avg') - - net = tf.keras.models.Model(inputs=bignet.input, outputs=bignet.get_layer('block_2_add').output) - - model = tf.keras.Sequential([net - , tf.keras.layers.GlobalAveragePooling2D() - , tf.keras.layers.Dropout(0.5) - , tf.keras.layers.Dense(5, name='head')]) - - return model - - -def toy_rnn(): - '''Consistently create model with same random weights - skip head activation to allow both bce with logits and cce with logits - intended to work with - - x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) - y = np.zeros(shape = (None, 5), dtype = np.float32) - y[:, 0] = 1. - ''' - - tf.random.set_seed(1) - - model = tf.keras.Sequential() - model.add(tf.keras.layers.Input(shape=(32, 32, 3))) - model.add(tf.keras.layers.Reshape(target_shape=(32, 96))) - model.add(tf.keras.layers.Cropping1D(cropping=(0, 24))) - model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) - model.add(tf.keras.layers.Dropout(0.5)) - model.add(tf.keras.layers.Dense(5)) - - return model - - -def get_train_results(model): - '''Run a traininng loop and return the results for analysis - model must be compiled first - ''' - tf.random.set_seed(1) - x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(32, 5), dtype=np.float32) - y[:, 0] = 1. - - return model.fit(x, y, epochs=10, batch_size=16, verbose=0) - - -def opt_list(): - return [tf.keras.optimizers.Adam, tf.keras.optimizers.SGD] - - -def loss_list(): - return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, - tf.keras.losses.MeanSquaredError] - -def zipped_permutes(): - return list(itertools.product([toy_cnn, toy_rnn] - , loss_list() - , opt_list() - )) - - -def get_losses(hist): - return np.array(hist.__dict__['history']['loss']) +from tensorflow_addons.utils import test_utils +from tensorflow_addons.optimizers import RectifiedAdam, Lookahead @test_utils.run_all_in_graph_and_eager_modes -class DiscriminativeLearningTest(tf.test.TestCase): - - def test_same_results_when_no_lr_mult_specified(self): - - model_fns = [toy_cnn, toy_rnn] - - for model_fn, loss, opt in zipped_permutes(): - model = model_fn() - model.compile(loss=loss(), optimizer=opt()) - hist = get_train_results(model) - - model_lr = model_fn() - model_lr.compile(loss=loss(), optimizer=opt()) - DiscriminativeLearning(model_lr) - hist_lr = get_train_results(model_lr) - - self.assertAllClose(get_losses(hist), get_losses(hist_lr)) - - def test_same_results_when_lr_mult_is_1(self): - - model_fns = [toy_cnn, toy_rnn] - - for model_fn, loss, opt in zipped_permutes(): - model = model_fn() - model.compile(loss=loss(), optimizer=opt()) - hist = get_train_results(model) - - model_lr = model_fn() - model_lr.compile(loss=loss(), optimizer=opt()) - DiscriminativeLearning(model_lr) - hist_lr = get_train_results(model_lr) - - self.assertAllClose(get_losses(hist), get_losses(hist_lr)) - - -if __name__ == '__main__': - d = DiscriminativeLearningTest() - d.test_same_results_when_no_lr_mult_specified() - - - - - - +class RectifiedAdamTest(tf.test.TestCase): + def run_dense_sample(self, iterations, expected, optimizer): + var_0 = tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32) + var_1 = tf.Variable([3.0, 4.0], dtype=tf.dtypes.float32) + + grad_0 = tf.constant([0.1, 0.2], dtype=tf.dtypes.float32) + grad_1 = tf.constant([0.03, 0.04], dtype=tf.dtypes.float32) + + grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1])) + + if tf.executing_eagerly(): + for _ in range(iterations): + optimizer.apply_gradients(grads_and_vars) + else: + update = optimizer.apply_gradients(grads_and_vars) + self.evaluate(tf.compat.v1.global_variables_initializer()) + for _ in range(iterations): + self.evaluate(update) + + self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4) + self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4) + + def run_sparse_sample(self, iterations, expected, optimizer): + var_0 = tf.Variable([1.0, 2.0]) + var_1 = tf.Variable([3.0, 4.0]) + + grad_0 = tf.IndexedSlices( + tf.constant([0.1]), tf.constant([0]), tf.constant([2]) + ) + grad_1 = tf.IndexedSlices( + tf.constant([0.04]), tf.constant([1]), tf.constant([2]) + ) + + grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1])) + + if tf.executing_eagerly(): + for _ in range(iterations): + optimizer.apply_gradients(grads_and_vars) + else: + update = optimizer.apply_gradients(grads_and_vars) + self.evaluate(tf.compat.v1.global_variables_initializer()) + for _ in range(iterations): + self.evaluate(update) + + self.assertAllClose(var_0.read_value(), expected[0], atol=2e-4) + self.assertAllClose(var_1.read_value(), expected[1], atol=2e-4) + + def test_dense_sample(self): + # Expected values are obtained from the official implementation + self.run_dense_sample( + iterations=1000, + expected=[[0.5554, 1.5549], [2.5557, 3.5557]], + optimizer=RectifiedAdam(lr=1e-3), + ) + + def test_sparse_sample(self): + # Expected values are obtained from the official implementation + # Dense results should be: [-0.1929, 0.8066], [1.8075, 2.8074] + self.run_sparse_sample( + iterations=2000, + expected=[[-0.1929, 2.0], [3.0, 2.8074]], + optimizer=RectifiedAdam(lr=1e-3), + ) + + def test_dense_sample_with_amsgrad(self): + # Expected values are obtained from the official implementation + # `amsgrad` has no effect because the gradient is fixed + self.run_dense_sample( + iterations=1000, + expected=[[0.5554, 1.5549], [2.5557, 3.5557]], + optimizer=RectifiedAdam(lr=1e-3, amsgrad=True), + ) + + def test_sparse_sample_with_amsgrad(self): + # Expected values are obtained from the official implementation + # `amsgrad` has no effect because the gradient is fixed + self.run_sparse_sample( + iterations=2000, + expected=[[-0.1929, 2.0], [3.0, 2.8074]], + optimizer=RectifiedAdam(lr=1e-3, amsgrad=True), + ) + + def test_dense_sample_with_weight_decay(self): + # Expected values are obtained from the official implementation + self.run_dense_sample( + iterations=1000, + expected=[[0.5472, 1.5368], [2.5276, 3.5176]], + optimizer=RectifiedAdam(lr=1e-3, weight_decay=0.01), + ) + + def test_sparse_sample_with_weight_decay(self): + # Expected values are obtained from the official implementation + # Dense results should be: [-0.2029, 0.7768], [1.7578, 2.7380] + self.run_sparse_sample( + iterations=2000, + expected=[[-0.2029, 2.0], [3.0, 2.7380]], + optimizer=RectifiedAdam(lr=1e-3, weight_decay=0.01), + ) + + def test_dense_sample_with_warmup(self): + self.run_dense_sample( + iterations=1000, + expected=[[0.8041, 1.8041], [2.8041, 3.8041]], + optimizer=RectifiedAdam( + lr=1e-3, total_steps=1000, warmup_proportion=0.1, min_lr=1e-5, + ), + ) + + def test_sparse_sample_with_warmup(self): + self.run_sparse_sample( + iterations=2000, + expected=[[0.4653, 2.0], [3.0, 3.4653]], + optimizer=RectifiedAdam( + lr=1e-3, total_steps=2000, warmup_proportion=0.1, min_lr=1e-5, + ), + ) + + def test_dense_sample_with_lookahead(self): + # Expected values are obtained from the original implementation + # of Ranger + self.run_dense_sample( + iterations=1000, + expected=[[0.7985, 1.7983], [2.7987, 3.7986]], + optimizer=Lookahead( + RectifiedAdam(lr=1e-3, beta_1=0.95,), + sync_period=6, + slow_step_size=0.45, + ), + ) + + def test_sparse_sample_with_lookahead(self): + # Expected values are obtained from the original implementation + # of Ranger. + # Dense results should be: [0.6417, 1.6415], [2.6419, 3.6418] + self.run_sparse_sample( + iterations=1500, + expected=[[0.6417, 2.0], [3.0, 3.6418]], + optimizer=Lookahead( + RectifiedAdam(lr=1e-3, beta_1=0.95,), + sync_period=6, + slow_step_size=0.45, + ), + ) + + def test_get_config(self): + opt = RectifiedAdam(lr=1e-4) + config = opt.get_config() + self.assertEqual(config["learning_rate"], 1e-4) + self.assertEqual(config["total_steps"], 0) + + +if __name__ == "__main__": + tf.test.main() From 333e90745ea1e8cc403bbc8f004dcc49641cb38b Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:02:00 -0500 Subject: [PATCH 007/106] accidentally messed up another file --- .../discriminative_layer_training.py | 6 +- .../discriminative_layer_training_test.py | 71 +++++++++---------- 2 files changed, 34 insertions(+), 43 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 828080a675..590f59c5fc 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -14,10 +14,6 @@ # ============================================================================== """Discriminative Layer Training Manager for TensorFlow.""" -# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py -# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py -# python -m flake8 - import tensorflow as tf import numpy as np @@ -142,7 +138,7 @@ def _check_for_lr_mult(self, layer, verbose=True, propagate=True): layers_with_lr_mult = [] for sub_layer in self._get_lowest_layers( - layer, propagate_lr_mult_to_sub_layers=propagate + layer, propagate_lr_mult_to_sub_layers=propagate ): lr_mult = self._get_lr_mult(sub_layer) if lr_mult != 1.0: diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index a3f5fd9772..acdf260d42 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -14,51 +14,56 @@ # ============================================================================== """Tests for Discriminative Layer Training Manager for TensorFlow.""" -# python -m black tensorflow_addons/optimizers/discriminative_layer_training_test.py - import tensorflow as tf from tensorflow_addons.utils import test_utils import numpy as np -from tensorflow_addons.optimizers.discriminative_layer_training import DiscriminativeLearning +from tensorflow_addons.optimizers.discriminative_layer_training import ( + DiscriminativeLearning, +) import itertools def toy_cnn(): - '''Consistently create model with same random weights + """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. - ''' + """ tf.random.set_seed(1) - bignet = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False - , weights=None - , input_shape=(32, 32, 3) - , pooling='avg') + bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( + include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" + ) - net = tf.keras.models.Model(inputs=bignet.input, outputs=bignet.get_layer('block_2_add').output) + net = tf.keras.models.Model( + inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output + ) - model = tf.keras.Sequential([net - , tf.keras.layers.GlobalAveragePooling2D() - , tf.keras.layers.Dropout(0.5) - , tf.keras.layers.Dense(5, name='head')]) + model = tf.keras.Sequential( + [ + net, + tf.keras.layers.GlobalAveragePooling2D(), + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(5, name="head"), + ] + ) return model def toy_rnn(): - '''Consistently create model with same random weights + """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. - ''' + """ tf.random.set_seed(1) @@ -74,13 +79,13 @@ def toy_rnn(): def get_train_results(model): - '''Run a traininng loop and return the results for analysis + """Run a traininng loop and return the results for analysis model must be compiled first - ''' + """ tf.random.set_seed(1) x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) y = np.zeros(shape=(32, 5), dtype=np.float32) - y[:, 0] = 1. + y[:, 0] = 1.0 return model.fit(x, y, epochs=10, batch_size=16, verbose=0) @@ -90,28 +95,25 @@ def opt_list(): def loss_list(): - return [tf.keras.losses.BinaryCrossentropy, tf.keras.losses.CategoricalCrossentropy, - tf.keras.losses.MeanSquaredError] + return [ + tf.keras.losses.BinaryCrossentropy, + tf.keras.losses.CategoricalCrossentropy, + tf.keras.losses.MeanSquaredError, + ] def zipped_permutes(): - return list(itertools.product([toy_cnn, toy_rnn] - , loss_list() - , opt_list() - )) + return list(itertools.product([toy_cnn, toy_rnn], loss_list(), opt_list())) def get_losses(hist): - return np.array(hist.__dict__['history']['loss']) + return np.array(hist.__dict__["history"]["loss"]) @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): - def test_same_results_when_no_lr_mult_specified(self): - model_fns = [toy_cnn, toy_rnn] - for model_fn, loss, opt in zipped_permutes(): model = model_fn() model.compile(loss=loss(), optimizer=opt()) @@ -126,14 +128,13 @@ def test_same_results_when_no_lr_mult_specified(self): def test_same_results_when_lr_mult_is_1(self): - model_fns = [toy_cnn, toy_rnn] - for model_fn, loss, opt in zipped_permutes(): model = model_fn() model.compile(loss=loss(), optimizer=opt()) hist = get_train_results(model) model_lr = model_fn() + model_lr.lr_mult = 1.0 model_lr.compile(loss=loss(), optimizer=opt()) DiscriminativeLearning(model_lr) hist_lr = get_train_results(model_lr) @@ -141,12 +142,6 @@ def test_same_results_when_lr_mult_is_1(self): self.assertAllClose(get_losses(hist), get_losses(hist_lr)) -if __name__ == '__main__': +if __name__ == "__main__": d = DiscriminativeLearningTest() d.test_same_results_when_no_lr_mult_specified() - - - - - - From b79135d25b2184719e55e7d72e38aef0c9db3816 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:04:17 -0500 Subject: [PATCH 008/106] accidentally messed up another file --- .../optimizers/discriminative_layer_training_test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index acdf260d42..698f76607a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -113,7 +113,8 @@ def get_losses(hist): @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): def test_same_results_when_no_lr_mult_specified(self): - + """Results for training with no lr mult specified + should be the same as training without the discriminative learning""" for model_fn, loss, opt in zipped_permutes(): model = model_fn() model.compile(loss=loss(), optimizer=opt()) @@ -127,6 +128,8 @@ def test_same_results_when_no_lr_mult_specified(self): self.assertAllClose(get_losses(hist), get_losses(hist_lr)) def test_same_results_when_lr_mult_is_1(self): + """Results for training with no lr mult specified + should be the same as training with the discriminative learning and all layers with lr_mult set to 1""" for model_fn, loss, opt in zipped_permutes(): model = model_fn() From c4d2588853311f9356951d105c6a1950826a7db9 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:06:10 -0500 Subject: [PATCH 009/106] added run all distributed --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 698f76607a..7e2bbfe57a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -109,7 +109,7 @@ def zipped_permutes(): def get_losses(hist): return np.array(hist.__dict__["history"]["loss"]) - +@test_utils.run_all_distributed @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): def test_same_results_when_no_lr_mult_specified(self): From c47c34a4e4306927bbd3de72cbcb1d23a9fe6f8d Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:08:46 -0500 Subject: [PATCH 010/106] fixed formatting --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 7e2bbfe57a..917e353205 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -14,7 +14,6 @@ # ============================================================================== """Tests for Discriminative Layer Training Manager for TensorFlow.""" - import tensorflow as tf from tensorflow_addons.utils import test_utils import numpy as np @@ -109,6 +108,7 @@ def zipped_permutes(): def get_losses(hist): return np.array(hist.__dict__["history"]["loss"]) + @test_utils.run_all_distributed @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): From a510f7082127565dc7dbe4f81b8dabe848501d64 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:27:21 -0500 Subject: [PATCH 011/106] trying to fix tests not running on github CI. --- .../discriminative_layer_training_test.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 917e353205..55e015cace 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -112,6 +112,25 @@ def get_losses(hist): @test_utils.run_all_distributed @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): + + # TODO: create test generator + # def __init__(self, *args, **kwargs): + # super(DiscriminativeLearningTest, self).__init__(*args, **kwargs) + # + # for model_fn, loss, opt in zipped_permutes(): + + # def _test_same_results_when_no_lr_mult_specified(self, model_fn, loss, opt): + # model = model_fn() + # model.compile(loss=loss(), optimizer=opt()) + # hist = get_train_results(model) + # + # model_lr = model_fn() + # model_lr.compile(loss=loss(), optimizer=opt()) + # DiscriminativeLearning(model_lr) + # hist_lr = get_train_results(model_lr) + # + # self.assertAllClose(get_losses(hist), get_losses(hist_lr)) + def test_same_results_when_no_lr_mult_specified(self): """Results for training with no lr mult specified should be the same as training without the discriminative learning""" @@ -146,5 +165,4 @@ def test_same_results_when_lr_mult_is_1(self): if __name__ == "__main__": - d = DiscriminativeLearningTest() - d.test_same_results_when_no_lr_mult_specified() + tf.test.main() From 69404330ec8014a1d1e13f801eb0c2b09ba01793 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 14:38:15 -0500 Subject: [PATCH 012/106] realized that I should probably add the new optimizer files to the build and init --- tensorflow_addons/optimizers/BUILD | 15 +++++++++++++++ tensorflow_addons/optimizers/__init__.py | 4 ++++ 2 files changed, 19 insertions(+) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index 9e896e0a60..a2850e68ce 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -19,6 +19,7 @@ py_library( "utils.py", "weight_decay_optimizers.py", "yogi.py", + "discriminative_layer_training.py" ], deps = [ "//tensorflow_addons/utils", @@ -156,3 +157,17 @@ py_test( ":optimizers", ], ) + + + +py_test( + name = "discriminative_layer_training_test", + size = "small", + srcs = [ + "discriminative_layer_training_test.py", + ], + main = "discriminative_layer_training_test.py", + deps = [ + ":optimizers", + ], +) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index 7fbe3df8f6..6244f7c97b 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -36,3 +36,7 @@ from tensorflow_addons.optimizers.weight_decay_optimizers import ( extend_with_decoupled_weight_decay) from tensorflow_addons.optimizers.yogi import Yogi + +from tensorflow_addons.optimizers.discriminative_layer_training import ( + DiscriminativeLearning, +) From af932d8d3874c2b0d2e34c1e624323bab522da90 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 15:06:51 -0500 Subject: [PATCH 013/106] added typeguard and docstring --- .../discriminative_layer_training.py | 34 ++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 590f59c5fc..b3e00e6fbb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -16,6 +16,7 @@ import tensorflow as tf import numpy as np +from typeguard import typechecked def apply_gradients(self, grads_and_vars, *args, **kwargs): @@ -46,7 +47,38 @@ def apply_gradients(self, grads_and_vars, *args, **kwargs): # @tf.keras.utils.register_keras_serializable(package="Addons") :TODO figure out why other classes have this wrapper class DiscriminativeLearning(object): - def __init__(self, model, verbose=True): + """Discriminative Learning Model Modifier. + + Discriminative Learning is a technique that applies different learning rates to + different layers in a model. Generally, a lower learning rate is applied to the + layers closest to the input and a higher learning rate is applied to layers closer + to the output. This method helps in transfer learning by quickly calibrating the head + of a model while preserving the useful weights in the main part of the model. + + Example usage + model = tf.keras.Sequential() + model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) + model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) + DiscriminativeLearning(model) + model.fit(x, y) + + Arguments + model: tf.keras.Model, The model to be used for discriminative learning. + It should have at least 1 layer with the attribute lr_mult. The lr_mult should + be set to a value not equal to 1. Otherwise, you will have the exact same + result as not using discriminative learning. + + verbose: Bool, to generate a report on how many parameters are affected + + Returns + None - The model object passed to in the arguments will be modified + + References + - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) + """ + + @typechecked + def __init__(self, model: tf.keras.Model, verbose: bool = True): """Apply logic for discriminative learning to a compiled model :TODO finish docstring """ From ea1f62ee963cf2a9e92a31125c950fe68a6c0bad Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 29 Jan 2020 15:51:27 -0500 Subject: [PATCH 014/106] removed run_all_distributed --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 55e015cace..d3d84ac953 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -109,7 +109,7 @@ def get_losses(hist): return np.array(hist.__dict__["history"]["loss"]) -@test_utils.run_all_distributed +# @test_utils.run_all_distributed @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): From 80e2768415240bf451ab01c77676d7f7bb26eede Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 13:07:50 -0500 Subject: [PATCH 015/106] graph and eager testing for SGD --- .../discriminative_layer_training.py | 22 +- .../discriminative_layer_training_test.py | 226 +++++++++++------- 2 files changed, 158 insertions(+), 90 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index b3e00e6fbb..c9356efd45 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -19,6 +19,23 @@ from typeguard import typechecked +def _compute_gradients(self, loss, var_list, grad_loss=None): + grads_and_vars = self.__compute_gradients(loss, var_list, grad_loss) + + grads = [] + var_list = [] + + # scale each grad based on var's lr_mult + for grad, var in grads_and_vars: + grad = tf.math.scalar_mul(var.lr_mult, grad) + grads.append(grad) + var_list.append(var) + + grads_and_vars = list(zip(grads, var_list)) + + return grads_and_vars + + def apply_gradients(self, grads_and_vars, *args, **kwargs): """New apply gradients function. This intercepts the grads and vars, scales them, then passes them to the old apply gradients function @@ -212,8 +229,9 @@ def _prepare_model(self, model, verbose=True): # get opt, move the original apply fn to a safe place, assign new apply fn opt = model.optimizer - opt._apply_gradients = opt.apply_gradients - opt.apply_gradients = apply_gradients.__get__(opt) + opt.__compute_gradients = opt._compute_gradients + opt._compute_gradients = _compute_gradients.__get__(opt) + opt.testing_flag = True vars_with_lr_mult = [ var for var in model.trainable_variables if var.lr_mult_value != 1.0 diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index d3d84ac953..1d7b4fe3de 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -21,41 +21,60 @@ DiscriminativeLearning, ) import itertools +import os +from tensorflow.python.eager import context def toy_cnn(): """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits - intended to work with + + The model returned by this function should have identical weights to all + other models returned by this function, for the duration of that + continuous integration run + + model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. """ - tf.random.set_seed(1) - - bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( - include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" - ) - - net = tf.keras.models.Model( - inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output - ) - - model = tf.keras.Sequential( - [ - net, - tf.keras.layers.GlobalAveragePooling2D(), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(5, name="head"), - ] - ) - - return model - - + cnn_model_path = "cnn.h5" + + if not os.path.exists(cnn_model_path): + # force eager mode for simple initialization of vars + with context.eager_mode(): + tf.random.set_seed(1) + bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( + include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" + ) + + # take the first few layers so we cover BN, Conv, Pooling ops for testing + net = tf.keras.models.Model( + inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output + ) + model = tf.keras.Sequential( + [ + net, + tf.keras.layers.GlobalAveragePooling2D(), + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(5, name="head"), + ] + ) + # always save and never return initialized model from memory + # it seems you cannot pass variables from a nested eager context to its parent graph context + + model.save(cnn_model_path) + + # load the initialized model from the disk + return tf.keras.models.load_model(cnn_model_path) + + +# TODO: get toy_run to work def toy_rnn(): - """Consistently create model with same random weights + """ + + Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits intended to work with @@ -86,23 +105,21 @@ def get_train_results(model): y = np.zeros(shape=(32, 5), dtype=np.float32) y[:, 0] = 1.0 - return model.fit(x, y, epochs=10, batch_size=16, verbose=0) - - -def opt_list(): - return [tf.keras.optimizers.Adam, tf.keras.optimizers.SGD] - - -def loss_list(): - return [ - tf.keras.losses.BinaryCrossentropy, - tf.keras.losses.CategoricalCrossentropy, - tf.keras.losses.MeanSquaredError, - ] + return model.fit(x, y, epochs=10, batch_size=16, verbose=0, shuffle=False) def zipped_permutes(): - return list(itertools.product([toy_cnn, toy_rnn], loss_list(), opt_list())) + model_fns = [toy_cnn] + losses = [ + tf.keras.losses.BinaryCrossentropy(from_logits=True), + tf.keras.losses.CategoricalCrossentropy(from_logits=True), + tf.keras.losses.MeanSquaredError(), + ] + optimzers = [ + tf.keras.optimizers.SGD + # , tf.keras.optimizers.Adam + ] + return list(itertools.product(model_fns, losses, optimzers)) def get_losses(hist): @@ -112,57 +129,90 @@ def get_losses(hist): # @test_utils.run_all_distributed @test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): - - # TODO: create test generator - # def __init__(self, *args, **kwargs): - # super(DiscriminativeLearningTest, self).__init__(*args, **kwargs) - # - # for model_fn, loss, opt in zipped_permutes(): - - # def _test_same_results_when_no_lr_mult_specified(self, model_fn, loss, opt): - # model = model_fn() - # model.compile(loss=loss(), optimizer=opt()) - # hist = get_train_results(model) - # - # model_lr = model_fn() - # model_lr.compile(loss=loss(), optimizer=opt()) - # DiscriminativeLearning(model_lr) - # hist_lr = get_train_results(model_lr) - # - # self.assertAllClose(get_losses(hist), get_losses(hist_lr)) - - def test_same_results_when_no_lr_mult_specified(self): - """Results for training with no lr mult specified - should be the same as training without the discriminative learning""" - for model_fn, loss, opt in zipped_permutes(): - model = model_fn() - model.compile(loss=loss(), optimizer=opt()) - hist = get_train_results(model) - - model_lr = model_fn() - model_lr.compile(loss=loss(), optimizer=opt()) - DiscriminativeLearning(model_lr) - hist_lr = get_train_results(model_lr) - - self.assertAllClose(get_losses(hist), get_losses(hist_lr)) - - def test_same_results_when_lr_mult_is_1(self): - """Results for training with no lr mult specified - should be the same as training with the discriminative learning and all layers with lr_mult set to 1""" - - for model_fn, loss, opt in zipped_permutes(): - model = model_fn() - model.compile(loss=loss(), optimizer=opt()) - hist = get_train_results(model) - - model_lr = model_fn() - model_lr.lr_mult = 1.0 - model_lr.compile(loss=loss(), optimizer=opt()) - DiscriminativeLearning(model_lr) - hist_lr = get_train_results(model_lr) - - self.assertAllClose(get_losses(hist), get_losses(hist_lr)) + def _assert_losses_are_close(self, hist, hist_lr): + """higher tolerance for graph due to non determinism""" + if tf.executing_eagerly(): + rtol, atol = 1e-6, 1e-6 + else: + # atol isn't important. + rtol, atol = 0.05, 1.00 + rtol, atol = 0.01, 0.01 + return self.assertAllClose( + get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol + ) + + def _assert_training_losses_are_close(self, model, model_lr): + hist = get_train_results(model) + hist_lr = get_train_results(model_lr) + self._assert_losses_are_close(hist, hist_lr) + + def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): + model = model_fn() + model.compile(loss=loss, optimizer=opt()) + + model_lr = model_fn() + model_lr.compile(loss=loss, optimizer=opt()) + DiscriminativeLearning(model_lr) + + self._assert_training_losses_are_close(model, model_lr) + + def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): + model = model_fn() + model.trainable = False + model.compile(loss=loss, optimizer=opt()) + + model_lr = model_fn() + model_lr.lr_mult = 0. + model_lr.compile(loss=loss, optimizer=opt()) + DiscriminativeLearning(model_lr) + + self._assert_training_losses_are_close(model, model_lr) + + def _test_equal_layer_lr_to_opt_lr(self, model_fn, loss, opt): + lr = 0.001 + model = model_fn() + model.compile(loss=loss, optimizer=opt(learning_rate=lr * 0.5)) + + model_lr = model_fn() + model_lr.lr_mult = 0.5 + model_lr.compile(loss=loss, optimizer=opt(learning_rate=lr)) + DiscriminativeLearning(model_lr) + + self._assert_training_losses_are_close(model, model_lr) + + def _run_tests_in_notebook(self): + for name, method in DiscriminativeLearningTest.__dict__.items(): + if callable(method) and name[:4] == "test": + print("running test %s" % name) + method(self) + + +def test_wrap(method, **kwargs): + # @test_utils.run_in_graph_and_eager_modes + def test(self): + return method(self, **kwargs) + + return test + + +def generate_tests(): + for name, method in DiscriminativeLearningTest.__dict__.copy().items(): + if callable(method) and name[:5] == "_test": + for model_fn, loss, opt in zipped_permutes()[:2]: + testmethodname = name[1:] + "_%s_%s_%s" % ( + model_fn.__name__, + loss.name, + opt.__name__, + ) + testmethod = test_wrap( + method=method, model_fn=model_fn, loss=loss, opt=opt + ) + setattr(DiscriminativeLearningTest, testmethodname, testmethod) if __name__ == "__main__": + generate_tests() + + # DiscriminativeLearningTest()._run_tests_in_notebook() + # print("done") tf.test.main() From f89ac90b9f6f2dd8c7c1eb1cfbfcbbafc1e37b07 Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 13:13:29 -0500 Subject: [PATCH 016/106] reformatted --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 1d7b4fe3de..f0f8e7ff09 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -162,7 +162,7 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): model.compile(loss=loss, optimizer=opt()) model_lr = model_fn() - model_lr.lr_mult = 0. + model_lr.lr_mult = 0.0 model_lr.compile(loss=loss, optimizer=opt()) DiscriminativeLearning(model_lr) From 1137bc3edcc8a59e1022b8bcd3452ae4099ffdd6 Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 14:45:21 -0500 Subject: [PATCH 017/106] added distributed tests --- .../discriminative_layer_training_test.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index f0f8e7ff09..29b13641dc 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -134,9 +134,8 @@ def _assert_losses_are_close(self, hist, hist_lr): if tf.executing_eagerly(): rtol, atol = 1e-6, 1e-6 else: - # atol isn't important. rtol, atol = 0.05, 1.00 - rtol, atol = 0.01, 0.01 + return self.assertAllClose( get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol ) @@ -188,7 +187,7 @@ def _run_tests_in_notebook(self): def test_wrap(method, **kwargs): - # @test_utils.run_in_graph_and_eager_modes + @test_utils.run_in_graph_and_eager_modes def test(self): return method(self, **kwargs) @@ -196,6 +195,8 @@ def test(self): def generate_tests(): + distributed_dec = test_utils.run_distributed(2) + for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": for model_fn, loss, opt in zipped_permutes()[:2]: @@ -209,6 +210,12 @@ def generate_tests(): ) setattr(DiscriminativeLearningTest, testmethodname, testmethod) + setattr( + DiscriminativeLearningTest, + testmethodname + "_distributed", + distributed_dec(testmethod), + ) + if __name__ == "__main__": generate_tests() From 11497195bae1746adcd015ab413fb504fea6e7cc Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 16:09:47 -0500 Subject: [PATCH 018/106] removed distributed tests --- .../optimizers/discriminative_layer_training_test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 29b13641dc..459bacbcd0 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -210,11 +210,11 @@ def generate_tests(): ) setattr(DiscriminativeLearningTest, testmethodname, testmethod) - setattr( - DiscriminativeLearningTest, - testmethodname + "_distributed", - distributed_dec(testmethod), - ) + # setattr( + # DiscriminativeLearningTest, + # testmethodname + "_distributed", + # distributed_dec(testmethod), + # ) if __name__ == "__main__": From 87b396c8d519979e613ac3280c6816e1af10e42f Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 16:32:42 -0500 Subject: [PATCH 019/106] reverted discriminative layer grad adjust back to apply gradients --- .../discriminative_layer_training.py | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index c9356efd45..b3e00e6fbb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -19,23 +19,6 @@ from typeguard import typechecked -def _compute_gradients(self, loss, var_list, grad_loss=None): - grads_and_vars = self.__compute_gradients(loss, var_list, grad_loss) - - grads = [] - var_list = [] - - # scale each grad based on var's lr_mult - for grad, var in grads_and_vars: - grad = tf.math.scalar_mul(var.lr_mult, grad) - grads.append(grad) - var_list.append(var) - - grads_and_vars = list(zip(grads, var_list)) - - return grads_and_vars - - def apply_gradients(self, grads_and_vars, *args, **kwargs): """New apply gradients function. This intercepts the grads and vars, scales them, then passes them to the old apply gradients function @@ -229,9 +212,8 @@ def _prepare_model(self, model, verbose=True): # get opt, move the original apply fn to a safe place, assign new apply fn opt = model.optimizer - opt.__compute_gradients = opt._compute_gradients - opt._compute_gradients = _compute_gradients.__get__(opt) - + opt._apply_gradients = opt.apply_gradients + opt.apply_gradients = apply_gradients.__get__(opt) opt.testing_flag = True vars_with_lr_mult = [ var for var in model.trainable_variables if var.lr_mult_value != 1.0 From a5a8a6f0eb1ebfb898039289562a92593df88734 Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 17:37:47 -0500 Subject: [PATCH 020/106] added distributed tests with one time virtual device init --- .../discriminative_layer_training_test.py | 78 +++++++++++-------- 1 file changed, 47 insertions(+), 31 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 459bacbcd0..15ca82c6fb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -126,13 +126,11 @@ def get_losses(hist): return np.array(hist.__dict__["history"]["loss"]) -# @test_utils.run_all_distributed -@test_utils.run_all_in_graph_and_eager_modes class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): """higher tolerance for graph due to non determinism""" if tf.executing_eagerly(): - rtol, atol = 1e-6, 1e-6 + rtol, atol = 0.01, 0.01 else: rtol, atol = 0.05, 1.00 @@ -151,7 +149,7 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): model_lr = model_fn() model_lr.compile(loss=loss, optimizer=opt()) - DiscriminativeLearning(model_lr) + DiscriminativeLearning(model_lr, verbose=False) self._assert_training_losses_are_close(model, model_lr) @@ -163,21 +161,19 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): model_lr = model_fn() model_lr.lr_mult = 0.0 model_lr.compile(loss=loss, optimizer=opt()) - DiscriminativeLearning(model_lr) + DiscriminativeLearning(model_lr, verbose=False) self._assert_training_losses_are_close(model, model_lr) - def _test_equal_layer_lr_to_opt_lr(self, model_fn, loss, opt): - lr = 0.001 - model = model_fn() - model.compile(loss=loss, optimizer=opt(learning_rate=lr * 0.5)) + def _test_loss_changes_over_time(self, model_fn, loss, opt): model_lr = model_fn() - model_lr.lr_mult = 0.5 - model_lr.compile(loss=loss, optimizer=opt(learning_rate=lr)) - DiscriminativeLearning(model_lr) + model_lr.layers[0].lr_mult = 0.01 + model_lr.compile(loss=loss, optimizer=opt()) + DiscriminativeLearning(model_lr, verbose=False) - self._assert_training_losses_are_close(model, model_lr) + loss_values = get_losses(get_train_results(model_lr)) + self.assertLess(loss_values[-1], loss_values[0]) def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): @@ -186,40 +182,60 @@ def _run_tests_in_notebook(self): method(self) -def test_wrap(method, **kwargs): +def run_distributed(devices): + def decorator(f): + def decorated(self, *args, **kwargs): + logical_devices = devices + strategy = tf.distribute.MirroredStrategy(logical_devices) + with strategy.scope(): + f(self, *args, **kwargs) + + return decorated + + return decorator + + +def test_wrap(method, devices, **kwargs): @test_utils.run_in_graph_and_eager_modes - def test(self): + def single(self): return method(self, **kwargs) - return test + @test_utils.run_in_graph_and_eager_modes + @run_distributed(devices) + def distributed(self): + return method(self, **kwargs) + return single, distributed -def generate_tests(): - distributed_dec = test_utils.run_distributed(2) +def generate_tests(devices): for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": - for model_fn, loss, opt in zipped_permutes()[:2]: + for model_fn, loss, opt in zipped_permutes(): testmethodname = name[1:] + "_%s_%s_%s" % ( model_fn.__name__, loss.name, opt.__name__, ) - testmethod = test_wrap( - method=method, model_fn=model_fn, loss=loss, opt=opt + testmethod, testmethod_dist = test_wrap( + method=method, + devices=devices, + model_fn=model_fn, + loss=loss, + opt=opt, ) - setattr(DiscriminativeLearningTest, testmethodname, testmethod) - # setattr( - # DiscriminativeLearningTest, - # testmethodname + "_distributed", - # distributed_dec(testmethod), - # ) + setattr(DiscriminativeLearningTest, testmethodname, testmethod) + setattr( + DiscriminativeLearningTest, + testmethodname + "_distributed", + testmethod_dist, + ) if __name__ == "__main__": - generate_tests() - - # DiscriminativeLearningTest()._run_tests_in_notebook() - # print("done") + devices = test_utils.create_virtual_devices(2) + generate_tests(devices) + # DiscriminativeLearningTest()._run_tests_in_notebook() + # print("done") tf.test.main() From e76835fbe2351b79a03e1169133107a02b0ff55b Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 18:03:17 -0500 Subject: [PATCH 021/106] increased tolerance for distributed added comments explaining tests --- .../optimizers/discriminative_layer_training_test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 15ca82c6fb..0d97d74626 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -128,8 +128,8 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): - """higher tolerance for graph due to non determinism""" - if tf.executing_eagerly(): + """higher tolerance for graph and distributed bc unable to run deterministically""" + if tf.executing_eagerly() and not tf.distribute.has_strategy(): rtol, atol = 0.01, 0.01 else: rtol, atol = 0.05, 1.00 @@ -144,6 +144,7 @@ def _assert_training_losses_are_close(self, model, model_lr): self._assert_losses_are_close(hist, hist_lr) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): + """confirm that discriminative learning is almost the same as regular learning""" model = model_fn() model.compile(loss=loss, optimizer=opt()) @@ -154,6 +155,8 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): + """confirm 0 lr for the model is the same as model not trainable""" + model = model_fn() model.trainable = False model.compile(loss=loss, optimizer=opt()) @@ -166,6 +169,7 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) def _test_loss_changes_over_time(self, model_fn, loss, opt): + """confirm that model trains with lower lr on specific layer""" model_lr = model_fn() model_lr.layers[0].lr_mult = 0.01 From 504d4bb200f1924a6df7f420490c41795cf5ebf8 Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 30 Jan 2020 18:18:47 -0500 Subject: [PATCH 022/106] changed how distributed is recognized for increasing tolerance --- .../optimizers/discriminative_layer_training_test.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 0d97d74626..5e7b432e9e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -129,10 +129,11 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): """higher tolerance for graph and distributed bc unable to run deterministically""" - if tf.executing_eagerly() and not tf.distribute.has_strategy(): - rtol, atol = 0.01, 0.01 - else: + if not tf.executing_eagerly() or tf.distribute.has_strategy(): rtol, atol = 0.05, 1.00 + # print('graph or dist') + else: + rtol, atol = 0.01, 0.01 return self.assertAllClose( get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol @@ -240,6 +241,6 @@ def generate_tests(devices): if __name__ == "__main__": devices = test_utils.create_virtual_devices(2) generate_tests(devices) - # DiscriminativeLearningTest()._run_tests_in_notebook() + # DiscriminativeLearningTest()._run_tests_in_notebook() # print("done") tf.test.main() From 9eb2d3c04cb789b51788979e8f59d3a46841386d Mon Sep 17 00:00:00 2001 From: hongy <34040987+hyang0129@users.noreply.github.com> Date: Fri, 31 Jan 2020 18:18:30 -0500 Subject: [PATCH 023/106] Redesigned Logic into Optimizer Wrapper (#1) * redesigned methodology to use multiple optimizers (one per unique LR) and pass grads to these multiple optimizers. Should allow for complex optimizers to behave properly * adjusted behavior of resource apply to only return the op if the lr_mult matches the lr_mult of the optimizer should only return 1 op for each var. * updated init file changed training config * removed variable position and added some more comments * removed grouped variables as unnecessary --- tensorflow_addons/optimizers/BUILD | 2 +- tensorflow_addons/optimizers/__init__.py | 2 +- .../discriminative_layer_training.py | 180 +++++++++++------- .../discriminative_layer_training_test.py | 67 +++++-- 4 files changed, 157 insertions(+), 94 deletions(-) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index a2850e68ce..1de588e3a2 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -162,7 +162,7 @@ py_test( py_test( name = "discriminative_layer_training_test", - size = "small", + size = "medium", srcs = [ "discriminative_layer_training_test.py", ], diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index 6244f7c97b..02b55324b9 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -38,5 +38,5 @@ from tensorflow_addons.optimizers.yogi import Yogi from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeLearning, + DiscriminativeWrapper, ) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index b3e00e6fbb..072e246fb9 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -19,71 +19,11 @@ from typeguard import typechecked -def apply_gradients(self, grads_and_vars, *args, **kwargs): - """New apply gradients function. - This intercepts the grads and vars, scales them, then passes them to the old apply gradients function - - :TODO finish docstring - - """ - - if self.testing_flag: - print("Training with layerwise learning rates") - self.testing_flag = False - - grads = [] - var_list = [] - - # scale each grad based on var's lr_mult - for grad, var in grads_and_vars: - grad = tf.math.scalar_mul(var.lr_mult, grad) - grads.append(grad) - var_list.append(var) - - grads_and_vars = list(zip(grads, var_list)) - - return self._apply_gradients(grads_and_vars, *args, **kwargs) - - # @tf.keras.utils.register_keras_serializable(package="Addons") :TODO figure out why other classes have this wrapper -class DiscriminativeLearning(object): - """Discriminative Learning Model Modifier. - Discriminative Learning is a technique that applies different learning rates to - different layers in a model. Generally, a lower learning rate is applied to the - layers closest to the input and a higher learning rate is applied to layers closer - to the output. This method helps in transfer learning by quickly calibrating the head - of a model while preserving the useful weights in the main part of the model. - Example usage - model = tf.keras.Sequential() - model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) - model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) - DiscriminativeLearning(model) - model.fit(x, y) - - Arguments - model: tf.keras.Model, The model to be used for discriminative learning. - It should have at least 1 layer with the attribute lr_mult. The lr_mult should - be set to a value not equal to 1. Otherwise, you will have the exact same - result as not using discriminative learning. - - verbose: Bool, to generate a report on how many parameters are affected - - Returns - None - The model object passed to in the arguments will be modified - - References - - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) - """ - - @typechecked - def __init__(self, model: tf.keras.Model, verbose: bool = True): - """Apply logic for discriminative learning to a compiled model - :TODO finish docstring - """ - - self._prepare_model(model=model, verbose=verbose) +class ModelManager: + """Class for grouping functions related to model lr_mult management""" def _get_layers(self, layer): """Helper method to access a layer's sublayers as a list or return an empty list @@ -188,11 +128,10 @@ def _compute_params(self, var_list): return np.sum([np.prod(list(var.shape)) for var in var_list]) def _prepare_model(self, model, verbose=True): - """Prepares a compiled model for discriminative layer training - - Model must be compiled first + """Prepares a built model for disc training :TODO finish docstring """ + # :TODO add checks to ensure model is built layers_with_lr_mult = self._check_for_lr_mult(model, verbose=verbose) if len(layers_with_lr_mult) == 0: @@ -209,12 +148,6 @@ def _prepare_model(self, model, verbose=True): for layer in self._get_lowest_layers(model): self._apply_lr_mult_to_var(layer) - # get opt, move the original apply fn to a safe place, assign new apply fn - - opt = model.optimizer - opt._apply_gradients = opt.apply_gradients - opt.apply_gradients = apply_gradients.__get__(opt) - opt.testing_flag = True vars_with_lr_mult = [ var for var in model.trainable_variables if var.lr_mult_value != 1.0 ] @@ -228,3 +161,108 @@ def _prepare_model(self, model, verbose=True): self._compute_params(model.trainable_variables), ) ) + + +# :TODO disable all other methods bc this is wrapper +# notimplementedreason = '''Optimizer Wrappers only implement minimize, _compute_gradients, apply_gradients, and get_config''' + + +class DiscriminativeWrapper(tf.keras.optimizers.Optimizer): + """Discriminative Layer Training Wrapper + + Discriminative layer training is a technique that applies different learning rates to + different layers in a model. Generally, a lower learning rate is applied to the + layers closest to the input and a higher learning rate is applied to layers closer + to the output. This method helps in transfer learning by quickly calibrating the head + of a model while preserving the useful weights in the main part of the model. + + You should assign the lr_mult attribute to a layer. This will multiply the learning rate + used by the base optimizer for that layer. + + This method creates a copy of the base optimizer for each unique learning rate multipler. + + Performance is similar to using a single copy of the base optimizer as gradients are computed + only once and then passed on. + + Example usage + model = tf.keras.Sequential() + model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) + model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) + model.layers[0].lr_mult = 0.01 + opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) + model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) + model.fit(x, y) + + Arguments + base_optimizer: a class that inherits from tf.keras.optimizers.Optimizer. Do not + pass an instance of the class. + + model: tf.keras.Model, The model to be used for discriminative learning. + It should have at least 1 layer with the attribute lr_mult. The lr_mult should + be set to a value not equal to 1. Otherwise, you will have the exact same + result as not using discriminative learning. + + learning_rate: float, the learning rate for the model + + verbose: Bool, to generate a report on how many parameters are affected + + *args: Args to pass to the base optimizer + + **kwargs: Kwargs to pass to the base optimizer + + Returns + Optimizer - A keras optimizer + + References + - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) + """ + + @typechecked + def __init__( + self, + base_optimizer, + model: tf.keras.Model, + learning_rate: float, + verbose: bool = True, + name="discrim_opt", + *args, + **kwargs + ): + + super().__init__(lr=learning_rate, name=name, *args, **kwargs) + + ModelManager()._prepare_model(model, verbose=verbose) + + self.opt_class = base_optimizer + + # find unique lr_mult + variable_groups = {var.lr_mult_value: None for var in model.trainable_variables} + + self.optimizer_group = [] + + for lr_mult_value in variable_groups.keys(): + opt = self.opt_class(learning_rate=learning_rate * lr_mult_value, **kwargs) + opt.lr_mult_value = lr_mult_value + self.optimizer_group.append(opt) + + def apply_gradients(self, grads_and_vars, name=None): + # :TODO docstring + + # create gradvar buckets for each opt + gvdict = {} + for opt in self.optimizer_group: + gvdict[opt.lr_mult_value] = [] + + # load the gradvars into the appropriate bucket + for grad, var in tuple(grads_and_vars): + gvdict[var.lr_mult_value].append((grad, var)) + + # return results from each opt + return [ + opt.apply_gradients(tuple(gvdict[opt.lr_mult_value])) + for opt in self.optimizer_group + ] + + def get_config(self): + # :TODO determine appropriate config + pass diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 5e7b432e9e..358e244ea8 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -18,7 +18,7 @@ from tensorflow_addons.utils import test_utils import numpy as np from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeLearning, + DiscriminativeWrapper, ) import itertools import os @@ -96,7 +96,7 @@ def toy_rnn(): return model -def get_train_results(model): +def get_train_results(model, verbose=False): """Run a traininng loop and return the results for analysis model must be compiled first """ @@ -105,19 +105,19 @@ def get_train_results(model): y = np.zeros(shape=(32, 5), dtype=np.float32) y[:, 0] = 1.0 - return model.fit(x, y, epochs=10, batch_size=16, verbose=0, shuffle=False) + return model.fit(x, y, epochs=10, batch_size=16, verbose=verbose, shuffle=False) def zipped_permutes(): model_fns = [toy_cnn] losses = [ - tf.keras.losses.BinaryCrossentropy(from_logits=True), + # tf.keras.losses.BinaryCrossentropy(from_logits=True), tf.keras.losses.CategoricalCrossentropy(from_logits=True), - tf.keras.losses.MeanSquaredError(), + # tf.keras.losses.MeanSquaredError(), ] optimzers = [ - tf.keras.optimizers.SGD - # , tf.keras.optimizers.Adam + # tf.keras.optimizers.SGD, + tf.keras.optimizers.Adam, ] return list(itertools.product(model_fns, losses, optimzers)) @@ -140,42 +140,67 @@ def _assert_losses_are_close(self, hist, hist_lr): ) def _assert_training_losses_are_close(self, model, model_lr): - hist = get_train_results(model) - hist_lr = get_train_results(model_lr) + hist = get_train_results(model, verbose=False) + hist_lr = get_train_results(model_lr, verbose=False) self._assert_losses_are_close(hist, hist_lr) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """confirm that discriminative learning is almost the same as regular learning""" + learning_rate = 0.01 model = model_fn() - model.compile(loss=loss, optimizer=opt()) + model.compile(loss=loss, optimizer=opt(learning_rate)) model_lr = model_fn() - model_lr.compile(loss=loss, optimizer=opt()) - DiscriminativeLearning(model_lr, verbose=False) + d_opt = DiscriminativeWrapper( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) self._assert_training_losses_are_close(model, model_lr) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): - """confirm 0 lr for the model is the same as model not trainable""" - + """confirm 0 lr_mult for the model is the same as model not trainable""" + learning_rate = 0.01 model = model_fn() model.trainable = False - model.compile(loss=loss, optimizer=opt()) + model.compile(loss=loss, optimizer=opt(learning_rate)) model_lr = model_fn() model_lr.lr_mult = 0.0 - model_lr.compile(loss=loss, optimizer=opt()) - DiscriminativeLearning(model_lr, verbose=False) + d_opt = DiscriminativeWrapper( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + self._assert_training_losses_are_close(model, model_lr) + + def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): + """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr""" + + mult = 0.5 + learning_rate = 0.01 + model = model_fn() + model.compile(loss=loss, optimizer=opt(learning_rate * mult)) + + model_lr = model_fn() + model_lr.lr_mult = mult + d_opt = DiscriminativeWrapper( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) self._assert_training_losses_are_close(model, model_lr) def _test_loss_changes_over_time(self, model_fn, loss, opt): """confirm that model trains with lower lr on specific layer""" + learning_rate = 0.01 model_lr = model_fn() model_lr.layers[0].lr_mult = 0.01 - model_lr.compile(loss=loss, optimizer=opt()) - DiscriminativeLearning(model_lr, verbose=False) + d_opt = DiscriminativeWrapper( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) loss_values = get_losses(get_train_results(model_lr)) self.assertLess(loss_values[-1], loss_values[0]) @@ -230,7 +255,7 @@ def generate_tests(devices): opt=opt, ) - setattr(DiscriminativeLearningTest, testmethodname, testmethod) + # setattr(DiscriminativeLearningTest, testmethodname, testmethod) setattr( DiscriminativeLearningTest, testmethodname + "_distributed", @@ -241,6 +266,6 @@ def generate_tests(devices): if __name__ == "__main__": devices = test_utils.create_virtual_devices(2) generate_tests(devices) - # DiscriminativeLearningTest()._run_tests_in_notebook() + # DiscriminativeLearningTest()._run_tests_in_notebook() # print("done") tf.test.main() From 7e6579fb7fafcf1c4b8fb4fba5c36fee83dfa8bc Mon Sep 17 00:00:00 2001 From: Hongya Date: Fri, 31 Jan 2020 18:24:37 -0500 Subject: [PATCH 024/106] reformatted --- .../optimizers/discriminative_layer_training.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 072e246fb9..a94ed0a1e0 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -219,14 +219,14 @@ class DiscriminativeWrapper(tf.keras.optimizers.Optimizer): @typechecked def __init__( - self, - base_optimizer, - model: tf.keras.Model, - learning_rate: float, - verbose: bool = True, - name="discrim_opt", - *args, - **kwargs + self, + base_optimizer, + model: tf.keras.Model, + learning_rate: float, + verbose: bool = True, + name="discrim_opt", + *args, + **kwargs ): super().__init__(lr=learning_rate, name=name, *args, **kwargs) From 0fefcdf325e96681c92730d081335b4f1d04ceb3 Mon Sep 17 00:00:00 2001 From: Hongya Date: Fri, 31 Jan 2020 18:59:11 -0500 Subject: [PATCH 025/106] updated documentation explicitly defined serialization as not supported --- .../discriminative_layer_training.py | 138 ++++++++---------- 1 file changed, 62 insertions(+), 76 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index a94ed0a1e0..9db82d2775 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -19,16 +19,11 @@ from typeguard import typechecked -# @tf.keras.utils.register_keras_serializable(package="Addons") :TODO figure out why other classes have this wrapper - - class ModelManager: """Class for grouping functions related to model lr_mult management""" def _get_layers(self, layer): """Helper method to access a layer's sublayers as a list or return an empty list - :TODO finish docstring - """ try: @@ -39,7 +34,6 @@ def _get_layers(self, layer): def _get_lr_mult(self, layer): """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set - :TODO finish docstring """ try: @@ -50,7 +44,6 @@ def _get_lr_mult(self, layer): def _assign_lr_mult(self, layer, lr_mult, override=False): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set - :TODO finish docstring """ try: @@ -66,8 +59,6 @@ def _get_lowest_layers(self, layer, propagate_lr_mult_to_sub_layers=True): tbh I can't properly explain how this works so see this post https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python - :TODO finish docstring - """ mult = self._get_lr_mult(layer) @@ -89,7 +80,6 @@ def _get_lowest_layers(self, layer, propagate_lr_mult_to_sub_layers=True): def _apply_lr_mult_to_var(self, layer): """Helper method to apply the lr mult to the trainable variables of a layer - :TODO finish docstring """ lr_mult = self._get_lr_mult(layer) @@ -100,11 +90,8 @@ def _apply_lr_mult_to_var(self, layer): lr_mult # easier to check vars lr mult in graph and eager ) - # :TODO float16 testing? not sure what would happen atm - def _check_for_lr_mult(self, layer, verbose=True, propagate=True): """Identify which layers have an LR mult not equal to 1 - :TODO finish docstring """ layers_with_lr_mult = [] @@ -116,32 +103,27 @@ def _check_for_lr_mult(self, layer, verbose=True, propagate=True): if lr_mult != 1.0: layers_with_lr_mult.append(sub_layer) if verbose: - # :TODO this should be info - print("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) + tf.print("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) return layers_with_lr_mult def _compute_params(self, var_list): """helps compute params to provide a summary that aligns with model.summary() - :TODO finish docstring """ return np.sum([np.prod(list(var.shape)) for var in var_list]) def _prepare_model(self, model, verbose=True): """Prepares a built model for disc training - :TODO finish docstring """ - # :TODO add checks to ensure model is built layers_with_lr_mult = self._check_for_lr_mult(model, verbose=verbose) if len(layers_with_lr_mult) == 0: - # :TODO this should be a warning - print( + tf.print( "Discriminative Layer Training requires an lr_mult attribute on at least one layer" ) - print( + tf.print( "The assigned lr_mult must not be equal to 1. eg: model.layers[0].lr_mult = 0.01" ) @@ -152,9 +134,8 @@ def _prepare_model(self, model, verbose=True): var for var in model.trainable_variables if var.lr_mult_value != 1.0 ] - # :TODO this should be info if verbose: - print( + tf.print( "%i params of %i will learn at a different rate" % ( self._compute_params(vars_with_lr_mult), @@ -163,71 +144,72 @@ def _prepare_model(self, model, verbose=True): ) -# :TODO disable all other methods bc this is wrapper -# notimplementedreason = '''Optimizer Wrappers only implement minimize, _compute_gradients, apply_gradients, and get_config''' - - class DiscriminativeWrapper(tf.keras.optimizers.Optimizer): - """Discriminative Layer Training Wrapper + @typechecked + def __init__( + self, + base_optimizer: object, + model: tf.keras.Model, + learning_rate: float, + verbose: bool = True, + name="discrim_opt", + *args, + **kwargs + ): + """Discriminative Layer Training Wrapper - Discriminative layer training is a technique that applies different learning rates to - different layers in a model. Generally, a lower learning rate is applied to the - layers closest to the input and a higher learning rate is applied to layers closer - to the output. This method helps in transfer learning by quickly calibrating the head - of a model while preserving the useful weights in the main part of the model. + Discriminative layer training is a technique that applies different learning rates to + different layers in a model. Generally, a lower learning rate is applied to the + layers closest to the input and a higher learning rate is applied to layers closer + to the output. This method helps in transfer learning by quickly calibrating the head + of a model while preserving the useful weights in the main part of the model. - You should assign the lr_mult attribute to a layer. This will multiply the learning rate - used by the base optimizer for that layer. + You should assign the lr_mult attribute to a layer. This will multiply the learning rate + used by the base optimizer for that layer. - This method creates a copy of the base optimizer for each unique learning rate multipler. + This method creates a copy of the base optimizer for each unique learning rate multipler. - Performance is similar to using a single copy of the base optimizer as gradients are computed - only once and then passed on. + Performance is similar to using a single copy of the base optimizer as gradients are computed + only once and then passed on. - Example usage - model = tf.keras.Sequential() - model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) - model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) - model.layers[0].lr_mult = 0.01 - opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) - model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) - model.fit(x, y) + This optimizer does not support from_config or get_config. To try to preserve the state, you may + serialize the optimizers in the optimizer_group attribute in an instance of this class. - Arguments - base_optimizer: a class that inherits from tf.keras.optimizers.Optimizer. Do not - pass an instance of the class. + Example usage + model = tf.keras.Sequential() + model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) + model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) + model.layers[0].lr_mult = 0.01 + opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) + model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) + model.fit(x, y) - model: tf.keras.Model, The model to be used for discriminative learning. - It should have at least 1 layer with the attribute lr_mult. The lr_mult should - be set to a value not equal to 1. Otherwise, you will have the exact same - result as not using discriminative learning. + Arguments + base_optimizer: a class that inherits from tf.keras.optimizers.Optimizer. Do not + pass an instance of the class. - learning_rate: float, the learning rate for the model + model: tf.keras.Model, The model to be used for discriminative learning. + It should have at least 1 layer with the attribute lr_mult. The lr_mult should + be set to a value not equal to 1. Otherwise, you will have the exact same + result as not using discriminative learning. - verbose: Bool, to generate a report on how many parameters are affected + learning_rate: float, the learning rate for the model - *args: Args to pass to the base optimizer + verbose: Bool, to generate a report on how many parameters are affected - **kwargs: Kwargs to pass to the base optimizer + *args: Args to pass to the base optimizer - Returns - Optimizer - A keras optimizer + **kwargs: Kwargs to pass to the base optimizer - References - - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) - """ + Returns + Optimizer - A keras optimizer to use with model.compile - @typechecked - def __init__( - self, - base_optimizer, - model: tf.keras.Model, - learning_rate: float, - verbose: bool = True, - name="discrim_opt", - *args, - **kwargs - ): + References + - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) + """ + assert issubclass( + base_optimizer, tf.keras.optimizers.Optimizer + ), "Base optimizer must be a class that inherits from tf.keras.optimizers.Optimizer" super().__init__(lr=learning_rate, name=name, *args, **kwargs) @@ -246,7 +228,9 @@ def __init__( self.optimizer_group.append(opt) def apply_gradients(self, grads_and_vars, name=None): - # :TODO docstring + """allocates gradients to each optimizer based on the variable's learning rate multiplier + then applies the gradients. In graph mode, it returns 1 operation per optimizer + """ # create gradvar buckets for each opt gvdict = {} @@ -264,5 +248,7 @@ def apply_gradients(self, grads_and_vars, name=None): ] def get_config(self): - # :TODO determine appropriate config - pass + raise NotImplementedError("Optimizer wrapper does not support get config") + + def from_config(self): + raise NotImplementedError("Optimizer wrapper does not support from config") From 595ec9ac5af3536318711dcd99071228a419c46d Mon Sep 17 00:00:00 2001 From: Hongya Date: Fri, 31 Jan 2020 19:05:07 -0500 Subject: [PATCH 026/106] added typecheck for name --- tensorflow_addons/optimizers/discriminative_layer_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 9db82d2775..6273a197b0 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -152,7 +152,7 @@ def __init__( model: tf.keras.Model, learning_rate: float, verbose: bool = True, - name="discrim_opt", + name: str = "discrim_opt", *args, **kwargs ): From 8fd351d907973d586a4e1e93aa4d81b74ce89993 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 12:24:33 -0500 Subject: [PATCH 027/106] added typecheck for name --- tensorflow_addons/optimizers/__init__.py | 6 +++--- .../optimizers/discriminative_layer_training.py | 9 +++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index 02b55324b9..e82a8ed1d6 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -24,6 +24,8 @@ Triangular2CyclicalLearningRate) from tensorflow_addons.optimizers.cyclical_learning_rate import ( ExponentialCyclicalLearningRate) +from tensorflow_addons.optimizers.discriminative_layer_training import ( + DiscriminativeWrapper) from tensorflow_addons.optimizers.lamb import LAMB from tensorflow_addons.optimizers.lazy_adam import LazyAdam from tensorflow_addons.optimizers.lookahead import Lookahead @@ -37,6 +39,4 @@ extend_with_decoupled_weight_decay) from tensorflow_addons.optimizers.yogi import Yogi -from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeWrapper, -) + diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 6273a197b0..a2f8c0358e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -1,4 +1,4 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ import tensorflow as tf import numpy as np from typeguard import typechecked +import logging class ModelManager: @@ -103,7 +104,7 @@ def _check_for_lr_mult(self, layer, verbose=True, propagate=True): if lr_mult != 1.0: layers_with_lr_mult.append(sub_layer) if verbose: - tf.print("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) + logging.info("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) return layers_with_lr_mult @@ -119,11 +120,11 @@ def _prepare_model(self, model, verbose=True): layers_with_lr_mult = self._check_for_lr_mult(model, verbose=verbose) if len(layers_with_lr_mult) == 0: - tf.print( + logging.warning( "Discriminative Layer Training requires an lr_mult attribute on at least one layer" ) - tf.print( + logging.warning( "The assigned lr_mult must not be equal to 1. eg: model.layers[0].lr_mult = 0.01" ) From 9d43e442d85e3a0041bb2fc0eab7258a44831e3c Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 12:26:02 -0500 Subject: [PATCH 028/106] fixed blank line at end of init file --- tensorflow_addons/optimizers/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index e82a8ed1d6..c65f6afce4 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -37,6 +37,4 @@ from tensorflow_addons.optimizers.weight_decay_optimizers import SGDW from tensorflow_addons.optimizers.weight_decay_optimizers import ( extend_with_decoupled_weight_decay) -from tensorflow_addons.optimizers.yogi import Yogi - - +from tensorflow_addons.optimizers.yogi import Yogi \ No newline at end of file From 3d581df2de453683af1fe013c5043783dc26753d Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 12:48:04 -0500 Subject: [PATCH 029/106] realized no new line meant to add new line guessing that build file needs to be in alpha order? --- tensorflow_addons/optimizers/BUILD | 5 +++-- tensorflow_addons/optimizers/__init__.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index 1de588e3a2..1b35309526 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -9,6 +9,7 @@ py_library( "average_wrapper.py", "conditional_gradient.py", "cyclical_learning_rate.py", + "discriminative_layer_training.py", "lamb.py", "lazy_adam.py", "lookahead.py", @@ -18,8 +19,8 @@ py_library( "stochastic_weight_averaging.py", "utils.py", "weight_decay_optimizers.py", - "yogi.py", - "discriminative_layer_training.py" + "yogi.py" + ], deps = [ "//tensorflow_addons/utils", diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index c65f6afce4..dc2e874f91 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -37,4 +37,4 @@ from tensorflow_addons.optimizers.weight_decay_optimizers import SGDW from tensorflow_addons.optimizers.weight_decay_optimizers import ( extend_with_decoupled_weight_decay) -from tensorflow_addons.optimizers.yogi import Yogi \ No newline at end of file +from tensorflow_addons.optimizers.yogi import Yogi From 742117b5ac70f9a5b80e123f9b2c29715b669da0 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 15:53:46 -0500 Subject: [PATCH 030/106] ran buildifier --- tensorflow_addons/optimizers/BUILD | 5 +---- tensorflow_addons/optimizers/moving_average.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index 1b35309526..ea1c237694 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -19,8 +19,7 @@ py_library( "stochastic_weight_averaging.py", "utils.py", "weight_decay_optimizers.py", - "yogi.py" - + "yogi.py", ], deps = [ "//tensorflow_addons/utils", @@ -159,8 +158,6 @@ py_test( ], ) - - py_test( name = "discriminative_layer_training_test", size = "medium", diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py index 93d5a3a837..b5f2b5cb43 100644 --- a/tensorflow_addons/optimizers/moving_average.py +++ b/tensorflow_addons/optimizers/moving_average.py @@ -45,7 +45,7 @@ def __init__(self, num_updates=None, name="MovingAverage", **kwargs): - r"""Construct a new MovingAverage optimizer. + """Construct a new MovingAverage optimizer. Args: optimizer: str or `tf.keras.optimizers.Optimizer` that will be From 95112384be2e8feb20423e32e048c1382c8d2f24 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 15:55:26 -0500 Subject: [PATCH 031/106] fixed accidentally affecting moving average --- tensorflow_addons/optimizers/moving_average.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py index b5f2b5cb43..93d5a3a837 100644 --- a/tensorflow_addons/optimizers/moving_average.py +++ b/tensorflow_addons/optimizers/moving_average.py @@ -45,7 +45,7 @@ def __init__(self, num_updates=None, name="MovingAverage", **kwargs): - """Construct a new MovingAverage optimizer. + r"""Construct a new MovingAverage optimizer. Args: optimizer: str or `tf.keras.optimizers.Optimizer` that will be From 55399f816b997341757eb1442f80c5c38220bdfa Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 4 Feb 2020 15:56:21 -0500 Subject: [PATCH 032/106] changed print to logging.info --- tensorflow_addons/optimizers/discriminative_layer_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index a2f8c0358e..9f4343f909 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -136,7 +136,7 @@ def _prepare_model(self, model, verbose=True): ] if verbose: - tf.print( + logging.info( "%i params of %i will learn at a different rate" % ( self._compute_params(vars_with_lr_mult), From 3fa5e190eae0511ba118b0f5c379a6b617656941 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 10 Feb 2020 12:57:42 -0500 Subject: [PATCH 033/106] changed print to logging.info --- .../Discriminative_Layer_Training.ipynb | 458 ++++++++++++++++++ 1 file changed, 458 insertions(+) create mode 100644 docs/tutorials/Discriminative_Layer_Training.ipynb diff --git a/docs/tutorials/Discriminative_Layer_Training.ipynb b/docs/tutorials/Discriminative_Layer_Training.ipynb new file mode 100644 index 0000000000..2262888c32 --- /dev/null +++ b/docs/tutorials/Discriminative_Layer_Training.ipynb @@ -0,0 +1,458 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Discriminative Layer Training.ipynb", + "provenance": [], + "private_outputs": true, + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Tce3stUlHN0L" + }, + "source": [ + "##### Copyright 2019 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "cellView": "form", + "colab_type": "code", + "id": "tuOe1ymfHZPu", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "qFdPvlXBOdUN" + }, + "source": [ + "# Title" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MfBg1C5NB3X0" + }, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " View on TensorFlow.org\n", + " \n", + " Run in Google Colab\n", + " \n", + " View source on GitHub\n", + " \n", + " Download notebook\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r6P32iYYV27b" + }, + "source": [ + "[Update button links]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xHxb-dlhMIzW" + }, + "source": [ + "## Overview\n", + "\n", + "This tutorial will demonstrate how to implement discriminative layer training and how it can help in transfer learning. \n", + "\n", + "In this example, we will fine tune a pretrained imagenet resnet50 to classify a subset of the cifar 100 dataset, a tanks vs trains dataset. \n", + "\n", + "This tutorial will demonstrate that discriminative layer training helps improves training speed. The intuition is that lower layers are more generalizable and should be preserved, while higher layers are task specific. Setting a lower learning rate for the lower layers helps preserve general features for use by the high layers and prevent over fitting. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MUXex9ctTuDB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "IqR2PQG4ZaZ0", + "colab": {} + }, + "source": [ + "try:\n", + " %tensorflow_version 2.x\n", + "except:\n", + " pass\n", + "\n", + "import tensorflow as tf\n", + "\n", + "#it will be much faster on gpu, but you can still run this on cpu \n", + "tf.config.list_physical_devices('GPU')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "9ALfZ9Q37Ugn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --no-deps tensorflow-addons~=0.7\n", + "!pip install typeguard\n", + "\n", + "#discriminative wrapper not available in current tfa\n", + "!git clone https://github.com/hyang0129/addons" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "XX8Tf1Xo9VRQ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#duct taping to get the imports \n", + "#will be changed or removed once we can import the wrapper from the main tfa modules\n", + "\n", + "import shutil \n", + "\n", + "shutil.copy(\"addons/tensorflow_addons/optimizers/discriminative_layer_training.py\", \"discriminative_layer_training.py\")\n", + "\n", + "from discriminative_layer_training import DiscriminativeWrapper" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5iwuzaUFIWuT", + "colab_type": "text" + }, + "source": [ + "## Prepare Data" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qs_KxybYIkwJ", + "colab_type": "text" + }, + "source": [ + "First, we want to prepare our dataset. We will download cifar 100 and only keep data in label 85 and 90 (tanks and trains) " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eKn-6_Hs90i-", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from skimage import io \n", + "import numpy as np \n", + "\n", + "\n", + "train, test = tf.keras.datasets.cifar100.load_data()\n", + "\n", + "#find the tanks and trains and filter down the dataset\n", + "train_tanksandtrains = np.isin(train[1], [85, 90]).flatten()\n", + "\n", + "train_x = train[0][train_tanksandtrains ]\n", + "train_y = train[1][train_tanksandtrains ]\n", + "#if is tank then 1 else 0 \n", + "train_y = (train_y == 85) * 1\n", + "\n", + "# do the same for test dataset\n", + "test_tanksandtrains = np.isin(test[1], [85, 90]).flatten()\n", + "\n", + "test_x = test[0][test_tanksandtrains] \n", + "test_y = test[1][test_tanksandtrains] \n", + "test_y = (test_y == 85) * 1\n", + "\n", + "\n", + "# show a train \n", + "print(train_y[0])\n", + "io.imshow(train_x[0])\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "47fAulgMLi1R", + "colab_type": "text" + }, + "source": [ + "We will also use some data augmentation because our training set is very small (1k images)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nUqGtS-jLhR_", + "colab_type": "code", + "colab": {} + }, + "source": [ + "\n", + "#create a data generator for augmentation \n", + "datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n", + " featurewise_center=True,\n", + " featurewise_std_normalization=True,\n", + " rotation_range=20,\n", + " width_shift_range=0.2,\n", + " height_shift_range=0.2,\n", + " horizontal_flip=True)\n", + "\n", + "#we only have 1000 training images, so we limit the steps to ensure the generator doesn't run out \n", + "epochs = 10 \n", + "steps = 1000//64\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SofAMdvCLgtP", + "colab_type": "text" + }, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zvJ5X6YVLbtu", + "colab_type": "text" + }, + "source": [ + "##Define Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L6Lk1WYzI9kb", + "colab_type": "text" + }, + "source": [ + "This is our model function. It is a simple resnet50 with a pooling layer as the output. This gets fed to our classifer head. We will initialize this for regular training than reinitialize for discriminative layer training. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZpHgkPVBAjRi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#build a simple pretrained resnet with a custom head \n", + "def get_model(): \n", + " model = tf.keras.Sequential() \n", + " model.add(tf.keras.applications.resnet50.ResNet50(weights = 'imagenet', \n", + " input_shape = (32,32,3),\n", + " include_top = False, \n", + " pooling = 'avg'))\n", + " model.add(tf.keras.layers.Dense(1))\n", + " model.add(tf.keras.layers.Activation('sigmoid'))\n", + " return model \n", + "\n", + "example_model = get_model()\n", + "example_model.summary()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OLjgxrtULp22", + "colab_type": "text" + }, + "source": [ + "##Training Comparison" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uPklCqBYJfrp", + "colab_type": "text" + }, + "source": [ + "This is regular training. We assign a learning rate for the whole model then train for 10 epochs. However, because Adam is a momentum based optimizer, it has a tendency to pick up on irrelevant low level features and overfit the data. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jXVl1R-e9gw_", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#get a copy of the model before any training\n", + "model = get_model() \n", + "\n", + "#define optimizer and compile \n", + "opt = tf.keras.optimizers.Adam(learning_rate = 0.001)\n", + "model.compile(loss = 'binary_crossentropy',\n", + " optimizer = opt)\n", + "\n", + "#fit for 10 epochs\n", + "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", + " steps_per_epoch = steps, \n", + " epochs = epochs, \n", + " validation_data= (test_x, test_y))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TeiIv-bMJ0vc", + "colab_type": "text" + }, + "source": [ + "Now we will attempt to correct that behaviour. We know that the lower level features don't need to be changed greatly, so we assign a lower learning rate multiplier of 0.1. If the overall learning rate is 0.001, then the resnet lower layers will learn at 0.1 * 0.001 = 0.0001, which is slower than the head. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GQs0j6eVB2mU", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#get a copy of the model before any training\n", + "model = get_model() \n", + "\n", + "\"\"\"\n", + "intuitively, the lower layers contain general features like shapes, etc \n", + "these features shouldn't need to change drastically for this new task \n", + "\"\"\"\n", + "\n", + "#assign layer 0, which is the resnet50 model an lr_mult of 0.1 to reduce lr \n", + "model.layers[0].lr_mult = 0.1\n", + "\n", + "'''\n", + "use the wrapper around an Adam class (do not pass an instance)\n", + "you can pass other kwargs to the wrapper, they will go straight to the \n", + "base_optimizer. This is because the wrapper creates a copy of the base_optimizer\n", + "for each unique learning rate multiplier \n", + "'''\n", + "opt = DiscriminativeWrapper(base_optimizer = tf.keras.optimizers.Adam, \n", + " model = model, \n", + " learning_rate = 0.001, )\n", + "\n", + "#compile in the same way as a regular model \n", + "model.compile(loss = 'binary_crossentropy',\n", + " optimizer = opt)\n", + "\n", + "#fit in the same way as a regular model\n", + "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", + " steps_per_epoch = steps, \n", + " epochs = epochs, \n", + " validation_data= (test_x, test_y))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J_ruHnAsKKwq", + "colab_type": "text" + }, + "source": [ + "Based on the results, you can see that slowing down the lower layers can help in transfer learning. This method requires more hyper parameter tuning, but can save you a lot of time for transfer learning tasks. By lowering the learning rate for lower layers, you can preserve the more generalizable features and allow your model to generalize better. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "caAItxLmMttp", + "colab_type": "text" + }, + "source": [ + "I hope you find this tutorial helpful and find awesome ways to apply transfer learning and discriminative layer learning. " + ] + } + ] +} \ No newline at end of file From f3d402c3105f9718f0829273428217f56f46c0f2 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 10 Feb 2020 13:02:27 -0500 Subject: [PATCH 034/106] Revert "changed print to logging.info" This reverts commit 3fa5e190 --- .../Discriminative_Layer_Training.ipynb | 458 ------------------ 1 file changed, 458 deletions(-) delete mode 100644 docs/tutorials/Discriminative_Layer_Training.ipynb diff --git a/docs/tutorials/Discriminative_Layer_Training.ipynb b/docs/tutorials/Discriminative_Layer_Training.ipynb deleted file mode 100644 index 2262888c32..0000000000 --- a/docs/tutorials/Discriminative_Layer_Training.ipynb +++ /dev/null @@ -1,458 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "Discriminative Layer Training.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.3" - }, - "accelerator": "GPU" - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Tce3stUlHN0L" - }, - "source": [ - "##### Copyright 2019 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "tuOe1ymfHZPu", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "qFdPvlXBOdUN" - }, - "source": [ - "# Title" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MfBg1C5NB3X0" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "r6P32iYYV27b" - }, - "source": [ - "[Update button links]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xHxb-dlhMIzW" - }, - "source": [ - "## Overview\n", - "\n", - "This tutorial will demonstrate how to implement discriminative layer training and how it can help in transfer learning. \n", - "\n", - "In this example, we will fine tune a pretrained imagenet resnet50 to classify a subset of the cifar 100 dataset, a tanks vs trains dataset. \n", - "\n", - "This tutorial will demonstrate that discriminative layer training helps improves training speed. The intuition is that lower layers are more generalizable and should be preserved, while higher layers are task specific. Setting a lower learning rate for the lower layers helps preserve general features for use by the high layers and prevent over fitting. \n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MUXex9ctTuDB" - }, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "IqR2PQG4ZaZ0", - "colab": {} - }, - "source": [ - "try:\n", - " %tensorflow_version 2.x\n", - "except:\n", - " pass\n", - "\n", - "import tensorflow as tf\n", - "\n", - "#it will be much faster on gpu, but you can still run this on cpu \n", - "tf.config.list_physical_devices('GPU')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "9ALfZ9Q37Ugn", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!pip install --no-deps tensorflow-addons~=0.7\n", - "!pip install typeguard\n", - "\n", - "#discriminative wrapper not available in current tfa\n", - "!git clone https://github.com/hyang0129/addons" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "XX8Tf1Xo9VRQ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#duct taping to get the imports \n", - "#will be changed or removed once we can import the wrapper from the main tfa modules\n", - "\n", - "import shutil \n", - "\n", - "shutil.copy(\"addons/tensorflow_addons/optimizers/discriminative_layer_training.py\", \"discriminative_layer_training.py\")\n", - "\n", - "from discriminative_layer_training import DiscriminativeWrapper" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5iwuzaUFIWuT", - "colab_type": "text" - }, - "source": [ - "## Prepare Data" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qs_KxybYIkwJ", - "colab_type": "text" - }, - "source": [ - "First, we want to prepare our dataset. We will download cifar 100 and only keep data in label 85 and 90 (tanks and trains) " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "eKn-6_Hs90i-", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from skimage import io \n", - "import numpy as np \n", - "\n", - "\n", - "train, test = tf.keras.datasets.cifar100.load_data()\n", - "\n", - "#find the tanks and trains and filter down the dataset\n", - "train_tanksandtrains = np.isin(train[1], [85, 90]).flatten()\n", - "\n", - "train_x = train[0][train_tanksandtrains ]\n", - "train_y = train[1][train_tanksandtrains ]\n", - "#if is tank then 1 else 0 \n", - "train_y = (train_y == 85) * 1\n", - "\n", - "# do the same for test dataset\n", - "test_tanksandtrains = np.isin(test[1], [85, 90]).flatten()\n", - "\n", - "test_x = test[0][test_tanksandtrains] \n", - "test_y = test[1][test_tanksandtrains] \n", - "test_y = (test_y == 85) * 1\n", - "\n", - "\n", - "# show a train \n", - "print(train_y[0])\n", - "io.imshow(train_x[0])\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "47fAulgMLi1R", - "colab_type": "text" - }, - "source": [ - "We will also use some data augmentation because our training set is very small (1k images)" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "nUqGtS-jLhR_", - "colab_type": "code", - "colab": {} - }, - "source": [ - "\n", - "#create a data generator for augmentation \n", - "datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n", - " featurewise_center=True,\n", - " featurewise_std_normalization=True,\n", - " rotation_range=20,\n", - " width_shift_range=0.2,\n", - " height_shift_range=0.2,\n", - " horizontal_flip=True)\n", - "\n", - "#we only have 1000 training images, so we limit the steps to ensure the generator doesn't run out \n", - "epochs = 10 \n", - "steps = 1000//64\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SofAMdvCLgtP", - "colab_type": "text" - }, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zvJ5X6YVLbtu", - "colab_type": "text" - }, - "source": [ - "##Define Model" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "L6Lk1WYzI9kb", - "colab_type": "text" - }, - "source": [ - "This is our model function. It is a simple resnet50 with a pooling layer as the output. This gets fed to our classifer head. We will initialize this for regular training than reinitialize for discriminative layer training. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ZpHgkPVBAjRi", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#build a simple pretrained resnet with a custom head \n", - "def get_model(): \n", - " model = tf.keras.Sequential() \n", - " model.add(tf.keras.applications.resnet50.ResNet50(weights = 'imagenet', \n", - " input_shape = (32,32,3),\n", - " include_top = False, \n", - " pooling = 'avg'))\n", - " model.add(tf.keras.layers.Dense(1))\n", - " model.add(tf.keras.layers.Activation('sigmoid'))\n", - " return model \n", - "\n", - "example_model = get_model()\n", - "example_model.summary()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OLjgxrtULp22", - "colab_type": "text" - }, - "source": [ - "##Training Comparison" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uPklCqBYJfrp", - "colab_type": "text" - }, - "source": [ - "This is regular training. We assign a learning rate for the whole model then train for 10 epochs. However, because Adam is a momentum based optimizer, it has a tendency to pick up on irrelevant low level features and overfit the data. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jXVl1R-e9gw_", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#get a copy of the model before any training\n", - "model = get_model() \n", - "\n", - "#define optimizer and compile \n", - "opt = tf.keras.optimizers.Adam(learning_rate = 0.001)\n", - "model.compile(loss = 'binary_crossentropy',\n", - " optimizer = opt)\n", - "\n", - "#fit for 10 epochs\n", - "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", - " steps_per_epoch = steps, \n", - " epochs = epochs, \n", - " validation_data= (test_x, test_y))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TeiIv-bMJ0vc", - "colab_type": "text" - }, - "source": [ - "Now we will attempt to correct that behaviour. We know that the lower level features don't need to be changed greatly, so we assign a lower learning rate multiplier of 0.1. If the overall learning rate is 0.001, then the resnet lower layers will learn at 0.1 * 0.001 = 0.0001, which is slower than the head. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GQs0j6eVB2mU", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#get a copy of the model before any training\n", - "model = get_model() \n", - "\n", - "\"\"\"\n", - "intuitively, the lower layers contain general features like shapes, etc \n", - "these features shouldn't need to change drastically for this new task \n", - "\"\"\"\n", - "\n", - "#assign layer 0, which is the resnet50 model an lr_mult of 0.1 to reduce lr \n", - "model.layers[0].lr_mult = 0.1\n", - "\n", - "'''\n", - "use the wrapper around an Adam class (do not pass an instance)\n", - "you can pass other kwargs to the wrapper, they will go straight to the \n", - "base_optimizer. This is because the wrapper creates a copy of the base_optimizer\n", - "for each unique learning rate multiplier \n", - "'''\n", - "opt = DiscriminativeWrapper(base_optimizer = tf.keras.optimizers.Adam, \n", - " model = model, \n", - " learning_rate = 0.001, )\n", - "\n", - "#compile in the same way as a regular model \n", - "model.compile(loss = 'binary_crossentropy',\n", - " optimizer = opt)\n", - "\n", - "#fit in the same way as a regular model\n", - "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", - " steps_per_epoch = steps, \n", - " epochs = epochs, \n", - " validation_data= (test_x, test_y))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "J_ruHnAsKKwq", - "colab_type": "text" - }, - "source": [ - "Based on the results, you can see that slowing down the lower layers can help in transfer learning. This method requires more hyper parameter tuning, but can save you a lot of time for transfer learning tasks. By lowering the learning rate for lower layers, you can preserve the more generalizable features and allow your model to generalize better. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "caAItxLmMttp", - "colab_type": "text" - }, - "source": [ - "I hope you find this tutorial helpful and find awesome ways to apply transfer learning and discriminative layer learning. " - ] - } - ] -} \ No newline at end of file From 3457b3ba13bb7490f6d26f4099f70ed541ae2989 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 10 Feb 2020 13:03:05 -0500 Subject: [PATCH 035/106] added tutorial. tutorial doesn't import from tfa. May need to remove from PR. Please let me know --- .../Discriminative_Layer_Training.ipynb | 458 ++++++++++++++++++ 1 file changed, 458 insertions(+) create mode 100644 docs/tutorials/Discriminative_Layer_Training.ipynb diff --git a/docs/tutorials/Discriminative_Layer_Training.ipynb b/docs/tutorials/Discriminative_Layer_Training.ipynb new file mode 100644 index 0000000000..2262888c32 --- /dev/null +++ b/docs/tutorials/Discriminative_Layer_Training.ipynb @@ -0,0 +1,458 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Discriminative Layer Training.ipynb", + "provenance": [], + "private_outputs": true, + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Tce3stUlHN0L" + }, + "source": [ + "##### Copyright 2019 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "cellView": "form", + "colab_type": "code", + "id": "tuOe1ymfHZPu", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "qFdPvlXBOdUN" + }, + "source": [ + "# Title" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MfBg1C5NB3X0" + }, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " View on TensorFlow.org\n", + " \n", + " Run in Google Colab\n", + " \n", + " View source on GitHub\n", + " \n", + " Download notebook\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r6P32iYYV27b" + }, + "source": [ + "[Update button links]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xHxb-dlhMIzW" + }, + "source": [ + "## Overview\n", + "\n", + "This tutorial will demonstrate how to implement discriminative layer training and how it can help in transfer learning. \n", + "\n", + "In this example, we will fine tune a pretrained imagenet resnet50 to classify a subset of the cifar 100 dataset, a tanks vs trains dataset. \n", + "\n", + "This tutorial will demonstrate that discriminative layer training helps improves training speed. The intuition is that lower layers are more generalizable and should be preserved, while higher layers are task specific. Setting a lower learning rate for the lower layers helps preserve general features for use by the high layers and prevent over fitting. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MUXex9ctTuDB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "IqR2PQG4ZaZ0", + "colab": {} + }, + "source": [ + "try:\n", + " %tensorflow_version 2.x\n", + "except:\n", + " pass\n", + "\n", + "import tensorflow as tf\n", + "\n", + "#it will be much faster on gpu, but you can still run this on cpu \n", + "tf.config.list_physical_devices('GPU')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "9ALfZ9Q37Ugn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --no-deps tensorflow-addons~=0.7\n", + "!pip install typeguard\n", + "\n", + "#discriminative wrapper not available in current tfa\n", + "!git clone https://github.com/hyang0129/addons" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "XX8Tf1Xo9VRQ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#duct taping to get the imports \n", + "#will be changed or removed once we can import the wrapper from the main tfa modules\n", + "\n", + "import shutil \n", + "\n", + "shutil.copy(\"addons/tensorflow_addons/optimizers/discriminative_layer_training.py\", \"discriminative_layer_training.py\")\n", + "\n", + "from discriminative_layer_training import DiscriminativeWrapper" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5iwuzaUFIWuT", + "colab_type": "text" + }, + "source": [ + "## Prepare Data" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qs_KxybYIkwJ", + "colab_type": "text" + }, + "source": [ + "First, we want to prepare our dataset. We will download cifar 100 and only keep data in label 85 and 90 (tanks and trains) " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eKn-6_Hs90i-", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from skimage import io \n", + "import numpy as np \n", + "\n", + "\n", + "train, test = tf.keras.datasets.cifar100.load_data()\n", + "\n", + "#find the tanks and trains and filter down the dataset\n", + "train_tanksandtrains = np.isin(train[1], [85, 90]).flatten()\n", + "\n", + "train_x = train[0][train_tanksandtrains ]\n", + "train_y = train[1][train_tanksandtrains ]\n", + "#if is tank then 1 else 0 \n", + "train_y = (train_y == 85) * 1\n", + "\n", + "# do the same for test dataset\n", + "test_tanksandtrains = np.isin(test[1], [85, 90]).flatten()\n", + "\n", + "test_x = test[0][test_tanksandtrains] \n", + "test_y = test[1][test_tanksandtrains] \n", + "test_y = (test_y == 85) * 1\n", + "\n", + "\n", + "# show a train \n", + "print(train_y[0])\n", + "io.imshow(train_x[0])\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "47fAulgMLi1R", + "colab_type": "text" + }, + "source": [ + "We will also use some data augmentation because our training set is very small (1k images)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nUqGtS-jLhR_", + "colab_type": "code", + "colab": {} + }, + "source": [ + "\n", + "#create a data generator for augmentation \n", + "datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n", + " featurewise_center=True,\n", + " featurewise_std_normalization=True,\n", + " rotation_range=20,\n", + " width_shift_range=0.2,\n", + " height_shift_range=0.2,\n", + " horizontal_flip=True)\n", + "\n", + "#we only have 1000 training images, so we limit the steps to ensure the generator doesn't run out \n", + "epochs = 10 \n", + "steps = 1000//64\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SofAMdvCLgtP", + "colab_type": "text" + }, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zvJ5X6YVLbtu", + "colab_type": "text" + }, + "source": [ + "##Define Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L6Lk1WYzI9kb", + "colab_type": "text" + }, + "source": [ + "This is our model function. It is a simple resnet50 with a pooling layer as the output. This gets fed to our classifer head. We will initialize this for regular training than reinitialize for discriminative layer training. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZpHgkPVBAjRi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#build a simple pretrained resnet with a custom head \n", + "def get_model(): \n", + " model = tf.keras.Sequential() \n", + " model.add(tf.keras.applications.resnet50.ResNet50(weights = 'imagenet', \n", + " input_shape = (32,32,3),\n", + " include_top = False, \n", + " pooling = 'avg'))\n", + " model.add(tf.keras.layers.Dense(1))\n", + " model.add(tf.keras.layers.Activation('sigmoid'))\n", + " return model \n", + "\n", + "example_model = get_model()\n", + "example_model.summary()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OLjgxrtULp22", + "colab_type": "text" + }, + "source": [ + "##Training Comparison" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uPklCqBYJfrp", + "colab_type": "text" + }, + "source": [ + "This is regular training. We assign a learning rate for the whole model then train for 10 epochs. However, because Adam is a momentum based optimizer, it has a tendency to pick up on irrelevant low level features and overfit the data. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jXVl1R-e9gw_", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#get a copy of the model before any training\n", + "model = get_model() \n", + "\n", + "#define optimizer and compile \n", + "opt = tf.keras.optimizers.Adam(learning_rate = 0.001)\n", + "model.compile(loss = 'binary_crossentropy',\n", + " optimizer = opt)\n", + "\n", + "#fit for 10 epochs\n", + "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", + " steps_per_epoch = steps, \n", + " epochs = epochs, \n", + " validation_data= (test_x, test_y))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TeiIv-bMJ0vc", + "colab_type": "text" + }, + "source": [ + "Now we will attempt to correct that behaviour. We know that the lower level features don't need to be changed greatly, so we assign a lower learning rate multiplier of 0.1. If the overall learning rate is 0.001, then the resnet lower layers will learn at 0.1 * 0.001 = 0.0001, which is slower than the head. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GQs0j6eVB2mU", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#get a copy of the model before any training\n", + "model = get_model() \n", + "\n", + "\"\"\"\n", + "intuitively, the lower layers contain general features like shapes, etc \n", + "these features shouldn't need to change drastically for this new task \n", + "\"\"\"\n", + "\n", + "#assign layer 0, which is the resnet50 model an lr_mult of 0.1 to reduce lr \n", + "model.layers[0].lr_mult = 0.1\n", + "\n", + "'''\n", + "use the wrapper around an Adam class (do not pass an instance)\n", + "you can pass other kwargs to the wrapper, they will go straight to the \n", + "base_optimizer. This is because the wrapper creates a copy of the base_optimizer\n", + "for each unique learning rate multiplier \n", + "'''\n", + "opt = DiscriminativeWrapper(base_optimizer = tf.keras.optimizers.Adam, \n", + " model = model, \n", + " learning_rate = 0.001, )\n", + "\n", + "#compile in the same way as a regular model \n", + "model.compile(loss = 'binary_crossentropy',\n", + " optimizer = opt)\n", + "\n", + "#fit in the same way as a regular model\n", + "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", + " steps_per_epoch = steps, \n", + " epochs = epochs, \n", + " validation_data= (test_x, test_y))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J_ruHnAsKKwq", + "colab_type": "text" + }, + "source": [ + "Based on the results, you can see that slowing down the lower layers can help in transfer learning. This method requires more hyper parameter tuning, but can save you a lot of time for transfer learning tasks. By lowering the learning rate for lower layers, you can preserve the more generalizable features and allow your model to generalize better. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "caAItxLmMttp", + "colab_type": "text" + }, + "source": [ + "I hope you find this tutorial helpful and find awesome ways to apply transfer learning and discriminative layer learning. " + ] + } + ] +} \ No newline at end of file From 29d440a821aa4bab9fc2a41d2152bb3f89386d59 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 11:38:21 -0500 Subject: [PATCH 036/106] refactored to use static method refactored to use getattr updated warning on not using lr_mult expanded on some docstrings --- .../discriminative_layer_training.py | 94 +++++++++---------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 9f4343f909..6038273ad3 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -23,27 +23,20 @@ class ModelManager: """Class for grouping functions related to model lr_mult management""" - def _get_layers(self, layer): + @staticmethod + def _get_layers(layer): """Helper method to access a layer's sublayers as a list or return an empty list """ + return getattr(layer, 'layers', []) - try: - return layer.layers - except AttributeError: - return [] - - def _get_lr_mult(self, layer): - + @staticmethod + def _get_lr_mult(layer): """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set """ + return getattr(layer, 'lr_mult', 1.0) - try: - return layer.lr_mult - except AttributeError: - return 1.0 - - def _assign_lr_mult(self, layer, lr_mult, override=False): - + @staticmethod + def _assign_lr_mult(layer, lr_mult, override=False): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set """ @@ -53,54 +46,52 @@ def _assign_lr_mult(self, layer, lr_mult, override=False): except AttributeError: layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult - def _get_lowest_layers(self, layer, propagate_lr_mult_to_sub_layers=True): + @staticmethod + def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): """Helper method iterate through all nested layers of an object that behaves like a layer or model By default, we want to propagate the lr mult to the lower layers. - tbh I can't properly explain how this works so see this post + https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python """ - mult = self._get_lr_mult(layer) - layers = self._get_layers(layer) + mult = ModelManager._get_lr_mult(layer) + layers = ModelManager._get_layers(layer) if len(layers) > 0: for sublayer in layers: # we generally want to propagate the lr mult to the lower layers if propagate_lr_mult_to_sub_layers: - self._assign_lr_mult(sublayer, mult) + ModelManager._assign_lr_mult(sublayer, mult) # recursively iterate through the nested layers - for nested_sublayer in self._get_lowest_layers(sublayer): + for nested_sublayer in ModelManager._get_lowest_layers(sublayer): yield nested_sublayer else: yield layer - def _apply_lr_mult_to_var(self, layer): + @staticmethod + def _apply_lr_mult_to_var(layer): """Helper method to apply the lr mult to the trainable variables of a layer """ - - lr_mult = self._get_lr_mult(layer) - + lr_mult = ModelManager._get_lr_mult(layer) for var in layer.trainable_variables: - var.lr_mult = tf.convert_to_tensor(lr_mult, tf.float32) # 0D tensor - var.lr_mult_value = ( - lr_mult # easier to check vars lr mult in graph and eager - ) + var.lr_mult = lr_mult #the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor - def _check_for_lr_mult(self, layer, verbose=True, propagate=True): + @staticmethod + def _check_for_lr_mult(layer, verbose=True, propagate=True): """Identify which layers have an LR mult not equal to 1 """ layers_with_lr_mult = [] - for sub_layer in self._get_lowest_layers( + for sub_layer in ModelManager._get_lowest_layers( layer, propagate_lr_mult_to_sub_layers=propagate ): - lr_mult = self._get_lr_mult(sub_layer) + lr_mult = ModelManager._get_lr_mult(sub_layer) if lr_mult != 1.0: layers_with_lr_mult.append(sub_layer) if verbose: @@ -108,39 +99,42 @@ def _check_for_lr_mult(self, layer, verbose=True, propagate=True): return layers_with_lr_mult - def _compute_params(self, var_list): + @staticmethod + def _compute_params(var_list): """helps compute params to provide a summary that aligns with model.summary() """ return np.sum([np.prod(list(var.shape)) for var in var_list]) - def _prepare_model(self, model, verbose=True): - """Prepares a built model for disc training + @staticmethod + def _prepare_model(model, verbose=True): + """Prepares a model for disc training """ - layers_with_lr_mult = self._check_for_lr_mult(model, verbose=verbose) + layers_with_lr_mult = ModelManager._check_for_lr_mult(model, verbose=verbose) if len(layers_with_lr_mult) == 0: logging.warning( - "Discriminative Layer Training requires an lr_mult attribute on at least one layer" + """ + No Layer has been assigned an lr_mult attribute != 1.0 + Discriminative Layer Training will apply the same learning rate to all layers + It will perform as if you did not use Discriminative Layer Training + """ ) - logging.warning( - "The assigned lr_mult must not be equal to 1. eg: model.layers[0].lr_mult = 0.01" - ) - for layer in self._get_lowest_layers(model): - self._apply_lr_mult_to_var(layer) + for layer in ModelManager._get_lowest_layers(model): + ModelManager._apply_lr_mult_to_var(layer) vars_with_lr_mult = [ - var for var in model.trainable_variables if var.lr_mult_value != 1.0 + var for var in model.trainable_variables if var.lr_mult != 1.0 ] if verbose: logging.info( "%i params of %i will learn at a different rate" % ( - self._compute_params(vars_with_lr_mult), - self._compute_params(model.trainable_variables), + ModelManager._compute_params(vars_with_lr_mult), + ModelManager._compute_params(model.trainable_variables), ) ) @@ -214,7 +208,7 @@ def __init__( super().__init__(lr=learning_rate, name=name, *args, **kwargs) - ModelManager()._prepare_model(model, verbose=verbose) + ModelManager._prepare_model(model, verbose=verbose) self.opt_class = base_optimizer @@ -224,7 +218,7 @@ def __init__( self.optimizer_group = [] for lr_mult_value in variable_groups.keys(): - opt = self.opt_class(learning_rate=learning_rate * lr_mult_value, **kwargs) + opt = self.opt_class(learning_rate=learning_rate * lr_mult, **kwargs) opt.lr_mult_value = lr_mult_value self.optimizer_group.append(opt) @@ -236,13 +230,17 @@ def apply_gradients(self, grads_and_vars, name=None): # create gradvar buckets for each opt gvdict = {} for opt in self.optimizer_group: - gvdict[opt.lr_mult_value] = [] + gvdict[opt.lr_mult] = [] # load the gradvars into the appropriate bucket for grad, var in tuple(grads_and_vars): gvdict[var.lr_mult_value].append((grad, var)) # return results from each opt + # in eager mode, this will return a list of irrelevant results for each optimizer + # in eager mode, the function apply_gradients actually applies gradients to the model + # in graph mode, this will return a list of tensor ops for each opt + # in graph mode, apply_gradients creates the tensor ops for applying gradients on the graph return [ opt.apply_gradients(tuple(gvdict[opt.lr_mult_value])) for opt in self.optimizer_group From 5b0531c25118b54a41c81ac0aba75631d9c4c240 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 11:39:46 -0500 Subject: [PATCH 037/106] updated the usage of lr_mult in variables --- .../discriminative_layer_training.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 6038273ad3..fcaa4a921c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -20,7 +20,7 @@ import logging -class ModelManager: +class DiscriminativeModelManager: """Class for grouping functions related to model lr_mult management""" @staticmethod @@ -56,18 +56,18 @@ def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python """ - mult = ModelManager._get_lr_mult(layer) - layers = ModelManager._get_layers(layer) + mult = DiscriminativeModelManager._get_lr_mult(layer) + layers = DiscriminativeModelManager._get_layers(layer) if len(layers) > 0: for sublayer in layers: # we generally want to propagate the lr mult to the lower layers if propagate_lr_mult_to_sub_layers: - ModelManager._assign_lr_mult(sublayer, mult) + DiscriminativeModelManager._assign_lr_mult(sublayer, mult) # recursively iterate through the nested layers - for nested_sublayer in ModelManager._get_lowest_layers(sublayer): + for nested_sublayer in DiscriminativeModelManager._get_lowest_layers(sublayer): yield nested_sublayer else: @@ -77,9 +77,10 @@ def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): def _apply_lr_mult_to_var(layer): """Helper method to apply the lr mult to the trainable variables of a layer """ - lr_mult = ModelManager._get_lr_mult(layer) + lr_mult = DiscriminativeModelManager._get_lr_mult(layer) for var in layer.trainable_variables: var.lr_mult = lr_mult #the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor + #there's not benefit in setting the lr_mult as a variable because it does not interact with tensors @staticmethod def _check_for_lr_mult(layer, verbose=True, propagate=True): @@ -88,10 +89,10 @@ def _check_for_lr_mult(layer, verbose=True, propagate=True): layers_with_lr_mult = [] - for sub_layer in ModelManager._get_lowest_layers( + for sub_layer in DiscriminativeModelManager._get_lowest_layers( layer, propagate_lr_mult_to_sub_layers=propagate ): - lr_mult = ModelManager._get_lr_mult(sub_layer) + lr_mult = DiscriminativeModelManager._get_lr_mult(sub_layer) if lr_mult != 1.0: layers_with_lr_mult.append(sub_layer) if verbose: @@ -110,7 +111,7 @@ def _prepare_model(model, verbose=True): """Prepares a model for disc training """ - layers_with_lr_mult = ModelManager._check_for_lr_mult(model, verbose=verbose) + layers_with_lr_mult = DiscriminativeModelManager._check_for_lr_mult(model, verbose=verbose) if len(layers_with_lr_mult) == 0: logging.warning( @@ -122,8 +123,8 @@ def _prepare_model(model, verbose=True): ) - for layer in ModelManager._get_lowest_layers(model): - ModelManager._apply_lr_mult_to_var(layer) + for layer in DiscriminativeModelManager._get_lowest_layers(model): + DiscriminativeModelManager._apply_lr_mult_to_var(layer) vars_with_lr_mult = [ var for var in model.trainable_variables if var.lr_mult != 1.0 @@ -133,8 +134,8 @@ def _prepare_model(model, verbose=True): logging.info( "%i params of %i will learn at a different rate" % ( - ModelManager._compute_params(vars_with_lr_mult), - ModelManager._compute_params(model.trainable_variables), + DiscriminativeModelManager._compute_params(vars_with_lr_mult), + DiscriminativeModelManager._compute_params(model.trainable_variables), ) ) @@ -208,7 +209,7 @@ def __init__( super().__init__(lr=learning_rate, name=name, *args, **kwargs) - ModelManager._prepare_model(model, verbose=verbose) + DiscriminativeModelManager._prepare_model(model, verbose=verbose) self.opt_class = base_optimizer From 40e8bba12a7420945b174f1937b887bf36d6add8 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 11:41:25 -0500 Subject: [PATCH 038/106] renamed discriminative wrapper to disclayeropt --- tensorflow_addons/optimizers/__init__.py | 2 +- .../optimizers/discriminative_layer_training.py | 2 +- .../optimizers/discriminative_layer_training_test.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index dc2e874f91..4faa39e68c 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -25,7 +25,7 @@ from tensorflow_addons.optimizers.cyclical_learning_rate import ( ExponentialCyclicalLearningRate) from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeWrapper) + DiscriminativeLayerOptimizer) from tensorflow_addons.optimizers.lamb import LAMB from tensorflow_addons.optimizers.lazy_adam import LazyAdam from tensorflow_addons.optimizers.lookahead import Lookahead diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index fcaa4a921c..3671ab2fa2 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -140,7 +140,7 @@ def _prepare_model(model, verbose=True): ) -class DiscriminativeWrapper(tf.keras.optimizers.Optimizer): +class DiscriminativeLayerOptimizer(tf.keras.optimizers.Optimizer): @typechecked def __init__( self, diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 358e244ea8..a5382c01c2 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -18,7 +18,7 @@ from tensorflow_addons.utils import test_utils import numpy as np from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeWrapper, + DiscriminativeLayerOptimizer, ) import itertools import os @@ -151,7 +151,7 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): model.compile(loss=loss, optimizer=opt(learning_rate)) model_lr = model_fn() - d_opt = DiscriminativeWrapper( + d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) @@ -167,7 +167,7 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): model_lr = model_fn() model_lr.lr_mult = 0.0 - d_opt = DiscriminativeWrapper( + d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) @@ -184,7 +184,7 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): model_lr = model_fn() model_lr.lr_mult = mult - d_opt = DiscriminativeWrapper( + d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) @@ -197,7 +197,7 @@ def _test_loss_changes_over_time(self, model_fn, loss, opt): learning_rate = 0.01 model_lr = model_fn() model_lr.layers[0].lr_mult = 0.01 - d_opt = DiscriminativeWrapper( + d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) From e3781e09ba289e98d34f1f410dea243efea6e600 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 11:43:05 -0500 Subject: [PATCH 039/106] added note to disuade directly calling apply_gradients --- tensorflow_addons/optimizers/discriminative_layer_training.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 3671ab2fa2..4156833a09 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -226,6 +226,7 @@ def __init__( def apply_gradients(self, grads_and_vars, name=None): """allocates gradients to each optimizer based on the variable's learning rate multiplier then applies the gradients. In graph mode, it returns 1 operation per optimizer + Please use the model.fit method instead of accessing this directly """ # create gradvar buckets for each opt From 9c62b010e3b7b84372848348398cbabc57ebb601 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 11:58:14 -0500 Subject: [PATCH 040/106] updated toy_cnn to use tempdir and no longer call context.eager implemented toy_rnn function with same flow as toycnn --- .../discriminative_layer_training_test.py | 114 +++++++++++------- 1 file changed, 70 insertions(+), 44 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index a5382c01c2..3b1f924178 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -23,9 +23,10 @@ import itertools import os from tensorflow.python.eager import context +import tempfile -def toy_cnn(): +def toy_cnn(first_run = False): """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -33,67 +34,92 @@ def toy_cnn(): other models returned by this function, for the duration of that continuous integration run + Run this function before running the tests and set first run to true + model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. """ - cnn_model_path = "cnn.h5" - - if not os.path.exists(cnn_model_path): - # force eager mode for simple initialization of vars - with context.eager_mode(): - tf.random.set_seed(1) - bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( - include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" - ) - - # take the first few layers so we cover BN, Conv, Pooling ops for testing - net = tf.keras.models.Model( - inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output - ) - model = tf.keras.Sequential( - [ - net, - tf.keras.layers.GlobalAveragePooling2D(), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(5, name="head"), - ] - ) - # always save and never return initialized model from memory - # it seems you cannot pass variables from a nested eager context to its parent graph context - - model.save(cnn_model_path) - - # load the initialized model from the disk - return tf.keras.models.load_model(cnn_model_path) - - -# TODO: get toy_run to work -def toy_rnn(): - """ + cnn_model_path = os.path.join(tempfile.gettempdir() , "cnn.h5") + + if first_run: + bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( + include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" + ) + + # take the first few layers so we cover BN, Conv, Pooling ops for testing + net = tf.keras.models.Model( + inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output + ) + model = tf.keras.Sequential( + [ + net, + tf.keras.layers.GlobalAveragePooling2D(), + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(5, name="head"), + ] + ) + + model.save(cnn_model_path) + # this creates a model with set weights for testing purposes + # most tests will assert equivalency between a model with discriminative training and a model without + return None + else: + assert os.path.exists((cnn_model_path)), 'Could not find h5 file at path %s ' % cnn_model_path + # load the variable initialized model from the disk + return tf.keras.models.load_model(cnn_model_path) + +def toy_rnn(first_run = False): + """ Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits intended to work with + The model returned by this function should have identical weights to all + other models returned by this function, for the duration of that + continuous integration run + + Run this function before running the tests and set first run to true + x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. """ + rnn_model_path = os.path.join(tempfile.gettempdir() , "rnn.h5") + + if first_run: + + #pretend that net is a pretrained lstm of some sort + net = tf.keras.Sequential(name='pretrained lstm') + + net.add(tf.keras.layers.Input(shape=(32, 32, 3))) + net.add(tf.keras.layers.Reshape(target_shape=(32, 96))) + #reduce the length of the time series + net.add(tf.keras.layers.Cropping1D(cropping=(0, 16))) + #we are primarily interested in the bidir lstm layer and its behavior + net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) + + model = tf.keras.Sequential( + [ + net, + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(5, name="head"), + ] + ) - tf.random.set_seed(1) + model.save(rnn_model_path) + # this creates a model with set weights for testing purposes + # most tests will assert equivalency between a model with discriminative training and a model without + return None - model = tf.keras.Sequential() - model.add(tf.keras.layers.Input(shape=(32, 32, 3))) - model.add(tf.keras.layers.Reshape(target_shape=(32, 96))) - model.add(tf.keras.layers.Cropping1D(cropping=(0, 24))) - model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) - model.add(tf.keras.layers.Dropout(0.5)) - model.add(tf.keras.layers.Dense(5)) + else: + assert os.path.exists((rnn_model_path)), 'Could not find h5 file at path %s ' % rnn_model_path + # load the variable initialized model from the disk + return tf.keras.models.load_model(rnn_model_path) - return model def get_train_results(model, verbose=False): From c0ad05ab4866f09a4733d49c8331e1280c75bd49 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 12:03:30 -0500 Subject: [PATCH 041/106] added toy_rnn and sgd to the test permutations --- .../discriminative_layer_training_test.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 3b1f924178..867b950389 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -135,14 +135,24 @@ def get_train_results(model, verbose=False): def zipped_permutes(): - model_fns = [toy_cnn] + model_fns = [ + # generally, we want to test that common layers function correctly with discriminative layer training + # dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types + # we also assume that if it works for conv2d, it should work for conv3d by extension + # apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? + toy_cnn, + toy_rnn, + ] losses = [ - # tf.keras.losses.BinaryCrossentropy(from_logits=True), + # additional loss types do not need to be tested + # this is because losses affect the gradient tape, which is computed before + # the apply_gradients step. This means that the some gradient value is passed on to each opt + # and the gradient calculation is unaffected by which optimizer you are using tf.keras.losses.CategoricalCrossentropy(from_logits=True), - # tf.keras.losses.MeanSquaredError(), ] optimzers = [ - # tf.keras.optimizers.SGD, + #additional optimizers can be added for testing + tf.keras.optimizers.SGD, tf.keras.optimizers.Adam, ] return list(itertools.product(model_fns, losses, optimzers)) From 9a69ae81bf38b68afcb90dd7e57209cb7caab835 Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 11 Feb 2020 12:04:47 -0500 Subject: [PATCH 042/106] refactored permutes and train results into private fns --- .../discriminative_layer_training_test.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 867b950389..6a4809e4bb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -122,8 +122,8 @@ def toy_rnn(first_run = False): -def get_train_results(model, verbose=False): - """Run a traininng loop and return the results for analysis +def _get_train_results(model, verbose=False): + """Run a training loop and return the results for analysis model must be compiled first """ tf.random.set_seed(1) @@ -134,7 +134,7 @@ def get_train_results(model, verbose=False): return model.fit(x, y, epochs=10, batch_size=16, verbose=verbose, shuffle=False) -def zipped_permutes(): +def _zipped_permutes(): model_fns = [ # generally, we want to test that common layers function correctly with discriminative layer training # dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types @@ -176,8 +176,8 @@ def _assert_losses_are_close(self, hist, hist_lr): ) def _assert_training_losses_are_close(self, model, model_lr): - hist = get_train_results(model, verbose=False) - hist_lr = get_train_results(model_lr, verbose=False) + hist = _get_train_results(model, verbose=False) + hist_lr = _get_train_results(model_lr, verbose=False) self._assert_losses_are_close(hist, hist_lr) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): @@ -238,7 +238,7 @@ def _test_loss_changes_over_time(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - loss_values = get_losses(get_train_results(model_lr)) + loss_values = get_losses(_get_train_results(model_lr)) self.assertLess(loss_values[-1], loss_values[0]) def _run_tests_in_notebook(self): @@ -277,7 +277,7 @@ def distributed(self): def generate_tests(devices): for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": - for model_fn, loss, opt in zipped_permutes(): + for model_fn, loss, opt in _zipped_permutes(): testmethodname = name[1:] + "_%s_%s_%s" % ( model_fn.__name__, loss.name, From abbb9614318798f87facf61c2bd33ed3e070a006 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 12:43:14 -0500 Subject: [PATCH 043/106] reformatted files and fixed flake 8 issues fixed bad references when lr_mult was changed --- .../discriminative_layer_training.py | 36 ++++++++++--------- .../discriminative_layer_training_test.py | 34 +++++++++--------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 4156833a09..cf870af7cf 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -27,13 +27,13 @@ class DiscriminativeModelManager: def _get_layers(layer): """Helper method to access a layer's sublayers as a list or return an empty list """ - return getattr(layer, 'layers', []) + return getattr(layer, "layers", []) @staticmethod def _get_lr_mult(layer): """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set """ - return getattr(layer, 'lr_mult', 1.0) + return getattr(layer, "lr_mult", 1.0) @staticmethod def _assign_lr_mult(layer, lr_mult, override=False): @@ -67,9 +67,10 @@ def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): DiscriminativeModelManager._assign_lr_mult(sublayer, mult) # recursively iterate through the nested layers - for nested_sublayer in DiscriminativeModelManager._get_lowest_layers(sublayer): + for nested_sublayer in DiscriminativeModelManager._get_lowest_layers( + sublayer + ): yield nested_sublayer - else: yield layer @@ -79,8 +80,9 @@ def _apply_lr_mult_to_var(layer): """ lr_mult = DiscriminativeModelManager._get_lr_mult(layer) for var in layer.trainable_variables: - var.lr_mult = lr_mult #the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor - #there's not benefit in setting the lr_mult as a variable because it does not interact with tensors + var.lr_mult = lr_mult + # the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor + # there's not benefit in setting the lr_mult as a variable because it does not interact with tensors @staticmethod def _check_for_lr_mult(layer, verbose=True, propagate=True): @@ -111,18 +113,18 @@ def _prepare_model(model, verbose=True): """Prepares a model for disc training """ - layers_with_lr_mult = DiscriminativeModelManager._check_for_lr_mult(model, verbose=verbose) + layers_with_lr_mult = DiscriminativeModelManager._check_for_lr_mult( + model, verbose=verbose + ) if len(layers_with_lr_mult) == 0: logging.warning( - """ - No Layer has been assigned an lr_mult attribute != 1.0 + """No Layer has been assigned an lr_mult attribute != 1.0 Discriminative Layer Training will apply the same learning rate to all layers - It will perform as if you did not use Discriminative Layer Training + It will perform as if you did not use Discriminative Layer Training """ ) - for layer in DiscriminativeModelManager._get_lowest_layers(model): DiscriminativeModelManager._apply_lr_mult_to_var(layer) @@ -135,7 +137,9 @@ def _prepare_model(model, verbose=True): "%i params of %i will learn at a different rate" % ( DiscriminativeModelManager._compute_params(vars_with_lr_mult), - DiscriminativeModelManager._compute_params(model.trainable_variables), + DiscriminativeModelManager._compute_params( + model.trainable_variables + ), ) ) @@ -218,9 +222,9 @@ def __init__( self.optimizer_group = [] - for lr_mult_value in variable_groups.keys(): + for lr_mult in variable_groups.keys(): opt = self.opt_class(learning_rate=learning_rate * lr_mult, **kwargs) - opt.lr_mult_value = lr_mult_value + opt.lr_mult = lr_mult self.optimizer_group.append(opt) def apply_gradients(self, grads_and_vars, name=None): @@ -236,7 +240,7 @@ def apply_gradients(self, grads_and_vars, name=None): # load the gradvars into the appropriate bucket for grad, var in tuple(grads_and_vars): - gvdict[var.lr_mult_value].append((grad, var)) + gvdict[var.lr_mult].append((grad, var)) # return results from each opt # in eager mode, this will return a list of irrelevant results for each optimizer @@ -244,7 +248,7 @@ def apply_gradients(self, grads_and_vars, name=None): # in graph mode, this will return a list of tensor ops for each opt # in graph mode, apply_gradients creates the tensor ops for applying gradients on the graph return [ - opt.apply_gradients(tuple(gvdict[opt.lr_mult_value])) + opt.apply_gradients(tuple(gvdict[opt.lr_mult])) for opt in self.optimizer_group ] diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 6a4809e4bb..eb6475d9a2 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -22,11 +22,10 @@ ) import itertools import os -from tensorflow.python.eager import context import tempfile -def toy_cnn(first_run = False): +def toy_cnn(first_run=False): """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -42,7 +41,7 @@ def toy_cnn(first_run = False): y[:, 0] = 1. """ - cnn_model_path = os.path.join(tempfile.gettempdir() , "cnn.h5") + cnn_model_path = os.path.join(tempfile.gettempdir(), "cnn.h5") if first_run: bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( @@ -67,12 +66,14 @@ def toy_cnn(first_run = False): # most tests will assert equivalency between a model with discriminative training and a model without return None else: - assert os.path.exists((cnn_model_path)), 'Could not find h5 file at path %s ' % cnn_model_path + assert os.path.exists((cnn_model_path)), ( + "Could not find h5 file at path %s " % cnn_model_path + ) # load the variable initialized model from the disk return tf.keras.models.load_model(cnn_model_path) -def toy_rnn(first_run = False): +def toy_rnn(first_run=False): """ Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -88,26 +89,22 @@ def toy_rnn(first_run = False): y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. """ - rnn_model_path = os.path.join(tempfile.gettempdir() , "rnn.h5") + rnn_model_path = os.path.join(tempfile.gettempdir(), "rnn.h5") if first_run: - #pretend that net is a pretrained lstm of some sort - net = tf.keras.Sequential(name='pretrained lstm') + # pretend that net is a pretrained lstm of some sort + net = tf.keras.Sequential(name="pretrained lstm") net.add(tf.keras.layers.Input(shape=(32, 32, 3))) net.add(tf.keras.layers.Reshape(target_shape=(32, 96))) - #reduce the length of the time series + # reduce the length of the time series net.add(tf.keras.layers.Cropping1D(cropping=(0, 16))) - #we are primarily interested in the bidir lstm layer and its behavior + # we are primarily interested in the bidir lstm layer and its behavior net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) model = tf.keras.Sequential( - [ - net, - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(5, name="head"), - ] + [net, tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(5, name="head"),] ) model.save(rnn_model_path) @@ -116,12 +113,13 @@ def toy_rnn(first_run = False): return None else: - assert os.path.exists((rnn_model_path)), 'Could not find h5 file at path %s ' % rnn_model_path + assert os.path.exists((rnn_model_path)), ( + "Could not find h5 file at path %s " % rnn_model_path + ) # load the variable initialized model from the disk return tf.keras.models.load_model(rnn_model_path) - def _get_train_results(model, verbose=False): """Run a training loop and return the results for analysis model must be compiled first @@ -151,7 +149,7 @@ def _zipped_permutes(): tf.keras.losses.CategoricalCrossentropy(from_logits=True), ] optimzers = [ - #additional optimizers can be added for testing + # additional optimizers can be added for testing tf.keras.optimizers.SGD, tf.keras.optimizers.Adam, ] From 9f19a6319450465a7874836a7d1f9c88a92b86db Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 12:50:12 -0500 Subject: [PATCH 044/106] added missing functions in prep for tests --- .../optimizers/discriminative_layer_training_test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index eb6475d9a2..833a6dbf69 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -298,8 +298,10 @@ def generate_tests(devices): if __name__ == "__main__": + # save models so weights are always the same + toy_cnn(first_run=True) + toy_rnn(first_run=True) + devices = test_utils.create_virtual_devices(2) generate_tests(devices) - # DiscriminativeLearningTest()._run_tests_in_notebook() - # print("done") tf.test.main() From cd1f6132bce90e37cd144dbccccb7d493a5c877c Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 13:01:20 -0500 Subject: [PATCH 045/106] updated assign lr mult and explained further why refactored get lowest layers to assign sublayers explained recursively assign sublayers better --- .../discriminative_layer_training.py | 37 ++++++++++++------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index cf870af7cf..1786cb97aa 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -36,18 +36,23 @@ def _get_lr_mult(layer): return getattr(layer, "lr_mult", 1.0) @staticmethod - def _assign_lr_mult(layer, lr_mult, override=False): + def _assign_lr_mult(layer, lr_mult): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set """ - - try: - if layer.lr_mult and override: - layer.lr_mult = lr_mult # check if layer has lr mult and if override, then assign the new lr mult - except AttributeError: + if not hasattr(layer, 'lr_mult'): layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult + # this method should be called after the user has already assigned some lr mults + # to some layers. We just don't want to override any lr mults they assigned + else: + # we pass here because of propagation to nested layers + # users should be able to speficy model.layers[0].layers[0].lr_mult = 0.01 + # and model.layers[0].lr_mult = 0.1, such that the model.layers[0].layers[0] + # keeps its assigned lr mult of 0.01 + pass + @staticmethod - def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): + def _recursively_assign_sublayer_lr_mult(layer): """Helper method iterate through all nested layers of an object that behaves like a layer or model By default, we want to propagate the lr mult to the lower layers. @@ -62,12 +67,16 @@ def _get_lowest_layers(layer, propagate_lr_mult_to_sub_layers=True): if len(layers) > 0: for sublayer in layers: - # we generally want to propagate the lr mult to the lower layers - if propagate_lr_mult_to_sub_layers: - DiscriminativeModelManager._assign_lr_mult(sublayer, mult) + # we always assign the lr mult to the sublayers of the current layer + # the assign method will avoid overwritting lr mults + # so if you have a resnet and you specifically assign the first resnet layer + # to have lr_mult of 0.01 and the resnet model to have lr_mult of 0.1, all + # resnet layers except the first should get lr_mult of 0.1 and the first + # keeps its lr_mult of 0.01 + DiscriminativeModelManager._assign_lr_mult(sublayer, mult) # recursively iterate through the nested layers - for nested_sublayer in DiscriminativeModelManager._get_lowest_layers( + for nested_sublayer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( sublayer ): yield nested_sublayer @@ -91,8 +100,8 @@ def _check_for_lr_mult(layer, verbose=True, propagate=True): layers_with_lr_mult = [] - for sub_layer in DiscriminativeModelManager._get_lowest_layers( - layer, propagate_lr_mult_to_sub_layers=propagate + for sub_layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( + layer ): lr_mult = DiscriminativeModelManager._get_lr_mult(sub_layer) if lr_mult != 1.0: @@ -125,7 +134,7 @@ def _prepare_model(model, verbose=True): """ ) - for layer in DiscriminativeModelManager._get_lowest_layers(model): + for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult(model): DiscriminativeModelManager._apply_lr_mult_to_var(layer) vars_with_lr_mult = [ From 5f67423408167bbead0e2ea2e0d93cd3a3b79da4 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 13:03:11 -0500 Subject: [PATCH 046/106] forgot to run black so ran it to reformat --- .../optimizers/discriminative_layer_training.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 1786cb97aa..5b55810a5f 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -39,7 +39,7 @@ def _get_lr_mult(layer): def _assign_lr_mult(layer, lr_mult): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set """ - if not hasattr(layer, 'lr_mult'): + if not hasattr(layer, "lr_mult"): layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult # this method should be called after the user has already assigned some lr mults # to some layers. We just don't want to override any lr mults they assigned @@ -50,7 +50,6 @@ def _assign_lr_mult(layer, lr_mult): # keeps its assigned lr mult of 0.01 pass - @staticmethod def _recursively_assign_sublayer_lr_mult(layer): @@ -76,7 +75,9 @@ def _recursively_assign_sublayer_lr_mult(layer): DiscriminativeModelManager._assign_lr_mult(sublayer, mult) # recursively iterate through the nested layers - for nested_sublayer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( + for ( + nested_sublayer + ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( sublayer ): yield nested_sublayer @@ -100,9 +101,9 @@ def _check_for_lr_mult(layer, verbose=True, propagate=True): layers_with_lr_mult = [] - for sub_layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( - layer - ): + for ( + sub_layer + ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult(layer): lr_mult = DiscriminativeModelManager._get_lr_mult(sub_layer) if lr_mult != 1.0: layers_with_lr_mult.append(sub_layer) @@ -134,7 +135,9 @@ def _prepare_model(model, verbose=True): """ ) - for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult(model): + for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( + model + ): DiscriminativeModelManager._apply_lr_mult_to_var(layer) vars_with_lr_mult = [ From bbc0f6c5e49cb5deb11d61c5d762dc84e13daf30 Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 13:38:24 -0500 Subject: [PATCH 047/106] specified inputshape for rnn --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 833a6dbf69..0a699db3e6 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -96,7 +96,7 @@ def toy_rnn(first_run=False): # pretend that net is a pretrained lstm of some sort net = tf.keras.Sequential(name="pretrained lstm") - net.add(tf.keras.layers.Input(shape=(32, 32, 3))) + net.add(tf.keras.layers.Input(input_shape=(32, 32, 3))) net.add(tf.keras.layers.Reshape(target_shape=(32, 96))) # reduce the length of the time series net.add(tf.keras.layers.Cropping1D(cropping=(0, 16))) From b77bbfcbb24483cc1c8a22702d1994b38c40fd3f Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 13:57:43 -0500 Subject: [PATCH 048/106] increased size of test temporarily removed SGD opt. Double opts doubles the number of tests to run so just need to see how long this one takes. --- tensorflow_addons/optimizers/BUILD | 2 +- .../optimizers/discriminative_layer_training_test.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index ea1c237694..7ef2c862d9 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -160,7 +160,7 @@ py_test( py_test( name = "discriminative_layer_training_test", - size = "medium", + size = "large", srcs = [ "discriminative_layer_training_test.py", ], diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 0a699db3e6..b073955859 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -150,7 +150,8 @@ def _zipped_permutes(): ] optimzers = [ # additional optimizers can be added for testing - tf.keras.optimizers.SGD, + # seems to be timing out. will add SGD back later + # tf.keras.optimizers.SGD, tf.keras.optimizers.Adam, ] return list(itertools.product(model_fns, losses, optimzers)) From 2d5fe1ae297e76fd86cc56ac7379e904c6eacbdb Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 14:10:13 -0500 Subject: [PATCH 049/106] remove toy rnn for now --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index b073955859..42f1ccebb7 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -139,7 +139,7 @@ def _zipped_permutes(): # we also assume that if it works for conv2d, it should work for conv3d by extension # apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? toy_cnn, - toy_rnn, + # toy_rnn, ] losses = [ # additional loss types do not need to be tested From 503a9e5390e1e2c5114f30762718d6ac6dfd720a Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 14:53:06 -0500 Subject: [PATCH 050/106] changed back to medium. maybe large was not actually increasing runtime --- tensorflow_addons/optimizers/BUILD | 2 +- .../discriminative_layer_training_test.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/BUILD b/tensorflow_addons/optimizers/BUILD index 7ef2c862d9..ea1c237694 100644 --- a/tensorflow_addons/optimizers/BUILD +++ b/tensorflow_addons/optimizers/BUILD @@ -160,7 +160,7 @@ py_test( py_test( name = "discriminative_layer_training_test", - size = "large", + size = "medium", srcs = [ "discriminative_layer_training_test.py", ], diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 42f1ccebb7..cb2d9de49a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -97,11 +97,19 @@ def toy_rnn(first_run=False): net = tf.keras.Sequential(name="pretrained lstm") net.add(tf.keras.layers.Input(input_shape=(32, 32, 3))) - net.add(tf.keras.layers.Reshape(target_shape=(32, 96))) + + #crop the input shape so the lstm runs faster + net.add(tf.keras.layers.Cropping2D(cropping= ((8, 8), (12, 12) ) )) + + #reshape into a timeseries + net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) + # reduce the length of the time series - net.add(tf.keras.layers.Cropping1D(cropping=(0, 16))) + net.add(tf.keras.layers.Cropping1D(cropping=(0, 5))) + # reduce dimensions + # we are primarily interested in the bidir lstm layer and its behavior - net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(8))) + net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(4))) model = tf.keras.Sequential( [net, tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(5, name="head"),] From ff697cbd4b1c38223679405dd427aa804960d27c Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 15:05:08 -0500 Subject: [PATCH 051/106] fixed input layer --- .../optimizers/discriminative_layer_training_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index cb2d9de49a..38e9c02dce 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -96,12 +96,12 @@ def toy_rnn(first_run=False): # pretend that net is a pretrained lstm of some sort net = tf.keras.Sequential(name="pretrained lstm") - net.add(tf.keras.layers.Input(input_shape=(32, 32, 3))) + net.add(tf.keras.layers.InputLayer(input_shape=(32, 32, 3))) - #crop the input shape so the lstm runs faster - net.add(tf.keras.layers.Cropping2D(cropping= ((8, 8), (12, 12) ) )) + # crop the input shape so the lstm runs faster + net.add(tf.keras.layers.Cropping2D(cropping=((8, 8), (12, 12)))) - #reshape into a timeseries + # reshape into a timeseries net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) # reduce the length of the time series From 740127c82c5ae532c6364ea5c8a0acc858b49a6a Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 11 Feb 2020 16:16:46 -0500 Subject: [PATCH 052/106] fixed input layer being in wrong place --- .../optimizers/discriminative_layer_training_test.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 38e9c02dce..54c55293c2 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -94,9 +94,7 @@ def toy_rnn(first_run=False): if first_run: # pretend that net is a pretrained lstm of some sort - net = tf.keras.Sequential(name="pretrained lstm") - - net.add(tf.keras.layers.InputLayer(input_shape=(32, 32, 3))) + net = tf.keras.Sequential() # crop the input shape so the lstm runs faster net.add(tf.keras.layers.Cropping2D(cropping=((8, 8), (12, 12)))) @@ -112,7 +110,12 @@ def toy_rnn(first_run=False): net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(4))) model = tf.keras.Sequential( - [net, tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(5, name="head"),] + [ + tf.keras.layers.InputLayer(input_shape=(32, 32, 3)), + net, + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(5, name="head"), + ] ) model.save(rnn_model_path) From 10b7417e04e47c0ac1d63b01a2b111d9fd93ff69 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 08:30:42 -0500 Subject: [PATCH 053/106] virtual device modification issue --- .../optimizers/discriminative_layer_training_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 54c55293c2..ffc94b711a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -311,9 +311,10 @@ def generate_tests(devices): if __name__ == "__main__": # save models so weights are always the same + devices = test_utils.create_virtual_devices(2) + toy_cnn(first_run=True) toy_rnn(first_run=True) - devices = test_utils.create_virtual_devices(2) generate_tests(devices) tf.test.main() From a2831d967302db71cc2cd9ebdf2b54b5f0feb690 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 09:02:42 -0500 Subject: [PATCH 054/106] fixed incorrect usage of lr_mult --- tensorflow_addons/optimizers/discriminative_layer_training.py | 2 +- .../optimizers/discriminative_layer_training_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 5b55810a5f..1999c84deb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -230,7 +230,7 @@ def __init__( self.opt_class = base_optimizer # find unique lr_mult - variable_groups = {var.lr_mult_value: None for var in model.trainable_variables} + variable_groups = {var.lr_mult: None for var in model.trainable_variables} self.optimizer_group = [] diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index ffc94b711a..31ac893a70 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -310,9 +310,9 @@ def generate_tests(devices): if __name__ == "__main__": - # save models so weights are always the same devices = test_utils.create_virtual_devices(2) + # save models so weights are always the same toy_cnn(first_run=True) toy_rnn(first_run=True) From 6baa02401dd4913094dbf2083e05b2d6f186e1d4 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 10:45:19 -0500 Subject: [PATCH 055/106] added comments for tests explaining them better added toy rnn for testing --- .../optimizers/discriminative_layer_training_test.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 31ac893a70..c38dacefd3 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -150,7 +150,7 @@ def _zipped_permutes(): # we also assume that if it works for conv2d, it should work for conv3d by extension # apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? toy_cnn, - # toy_rnn, + toy_rnn, ] losses = [ # additional loss types do not need to be tested @@ -186,6 +186,7 @@ def _assert_losses_are_close(self, hist, hist_lr): ) def _assert_training_losses_are_close(self, model, model_lr): + """easy way to check if two models train in almost the same way""" hist = _get_train_results(model, verbose=False) hist_lr = _get_train_results(model_lr, verbose=False) self._assert_losses_are_close(hist, hist_lr) @@ -205,7 +206,9 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): - """confirm 0 lr_mult for the model is the same as model not trainable""" + """confirm 0 lr_mult for the model is the same as model not trainable + this also confirms that lr_mult on the model is propagated to all sublayers and their variables + """ learning_rate = 0.01 model = model_fn() model.trainable = False @@ -221,7 +224,9 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): - """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr""" + """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr + this also confirms that lr_mult on the model is propagated to all sublayers and their variables + """ mult = 0.5 learning_rate = 0.01 From ff8600008cab4b88578f1184c547b6537d43f8a8 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 11:07:02 -0500 Subject: [PATCH 056/106] added new test fix toy rnn initialization --- .../discriminative_layer_training_test.py | 75 +++++++++++++++---- 1 file changed, 62 insertions(+), 13 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index c38dacefd3..3bb125194e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -54,6 +54,7 @@ def toy_cnn(first_run=False): ) model = tf.keras.Sequential( [ + tf.keras.layers.InputLayer(input_shape=(32, 32, 3)), net, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dropout(0.5), @@ -118,6 +119,13 @@ def toy_rnn(first_run=False): ] ) + # seems that weights won't get created unless we run fit once + opt = tf.keras.optimizers.SGD(learning_rate = 0) + model.compile(loss = 'binary_crossentropy', opt = opt) + x = np.ones(shape=(1, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(1, 5), dtype=np.float32) + model.fit(x, y, epochs=1, batch_size=1, verbose=False, shuffle=False) + model.save(rnn_model_path) # this creates a model with set weights for testing purposes # most tests will assert equivalency between a model with discriminative training and a model without @@ -131,7 +139,7 @@ def toy_rnn(first_run=False): return tf.keras.models.load_model(rnn_model_path) -def _get_train_results(model, verbose=False): +def _get_train_results(model, verbose=False, epochs = 10): """Run a training loop and return the results for analysis model must be compiled first """ @@ -140,7 +148,7 @@ def _get_train_results(model, verbose=False): y = np.zeros(shape=(32, 5), dtype=np.float32) y[:, 0] = 1.0 - return model.fit(x, y, epochs=10, batch_size=16, verbose=verbose, shuffle=False) + return model.fit(x, y, epochs=epochs, batch_size=16, verbose=verbose, shuffle=False) def _zipped_permutes(): @@ -185,10 +193,13 @@ def _assert_losses_are_close(self, hist, hist_lr): get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol ) - def _assert_training_losses_are_close(self, model, model_lr): - """easy way to check if two models train in almost the same way""" - hist = _get_train_results(model, verbose=False) - hist_lr = _get_train_results(model_lr, verbose=False) + def _assert_training_losses_are_close(self, model, model_lr, epochs = 10): + """easy way to check if two models train in almost the same way + epochs set to 10 by default to allow momentum methods to pick up momentum and diverge + if the disc training is not working + """ + hist = _get_train_results(model, verbose=False, epochs= epochs) + hist_lr = _get_train_results(model_lr, verbose=False, epochs= epochs) self._assert_losses_are_close(hist, hist_lr) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): @@ -205,9 +216,30 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) + def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(self, model_fn, loss, opt): + """confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable + this also confirms that lr_mult propagates into that layer's trainable variables + this also confirms that lr_mult does not propagate to the rest of the layers unintentionally + """ + learning_rate = 0.01 + model = model_fn() + + #we use layer 1 instead of 0 bc layer 0 is just an input layer + model.layers[1].trainable = False + model.compile(loss=loss, optimizer=opt(learning_rate)) + + model_lr = model_fn() + model_lr.layers[1].lr_mult = 0.0 + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + self._assert_training_losses_are_close(model, model_lr) + def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): """confirm 0 lr_mult for the model is the same as model not trainable - this also confirms that lr_mult on the model is propagated to all sublayers and their variables + this also confirms that lr_mult on the model level is propagated to all sublayers and their variables """ learning_rate = 0.01 model = model_fn() @@ -221,11 +253,12 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - self._assert_training_losses_are_close(model, model_lr) + #only two epochs because we expect no training to occur, thus losses shouldn't change anyways + self._assert_training_losses_are_close(model, model_lr, epochs = 2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr - this also confirms that lr_mult on the model is propagated to all sublayers and their variables + this also confirms that lr_mult on the model level is propagated to all sublayers and their variables """ mult = 0.5 @@ -242,20 +275,36 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) - def _test_loss_changes_over_time(self, model_fn, loss, opt): - """confirm that model trains with lower lr on specific layer""" + def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): + """confirm that model trains with lower lr on specific layer + while a different lr_mult is applied everywhere else + also confirms that sub layers with an lr mult do not get overridden + """ learning_rate = 0.01 model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.01 + + # we set model to lrmult 0 and layer one to lrmult 0.5 + # if layer one is trainable, then the loss should decrease + model_lr.lr_mult = 0.00 + model_lr.layers[1].lr_mult = 0.5 + d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) - loss_values = get_losses(_get_train_results(model_lr)) + loss_values = get_losses(_get_train_results(model_lr, epochs=4)) self.assertLess(loss_values[-1], loss_values[0]) + + def _test_variables_get_assigned(self): + """confirm that variables do get an lr_mult attribute and that they get the correct one + :TODO confirm propagation to nested sublayers, confirm not override of a sublayer's mult + """ + pass + + def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): if callable(method) and name[:4] == "test": From f244bf24a6b4f4bac739077c8651f53335c309fe Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 11:22:00 -0500 Subject: [PATCH 057/106] fixed typo --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 3bb125194e..08ead8d0ec 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -121,7 +121,7 @@ def toy_rnn(first_run=False): # seems that weights won't get created unless we run fit once opt = tf.keras.optimizers.SGD(learning_rate = 0) - model.compile(loss = 'binary_crossentropy', opt = opt) + model.compile(loss = 'binary_crossentropy', optimizer = opt) x = np.ones(shape=(1, 32, 32, 3), dtype=np.float32) y = np.zeros(shape=(1, 5), dtype=np.float32) model.fit(x, y, epochs=1, batch_size=1, verbose=False, shuffle=False) From b9119e8bfac7c7e82270900ff6ccc5646f5b659b Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 11:53:45 -0500 Subject: [PATCH 058/106] added inputshape so that pretrained rnn generates weights --- .../optimizers/discriminative_layer_training_test.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 08ead8d0ec..11e643854e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -98,7 +98,8 @@ def toy_rnn(first_run=False): net = tf.keras.Sequential() # crop the input shape so the lstm runs faster - net.add(tf.keras.layers.Cropping2D(cropping=((8, 8), (12, 12)))) + # pretrained need inputshape for weights to be initialized + net.add(tf.keras.layers.Cropping2D(cropping=((8, 8), (12, 12)), input_shape = (32, 32, 3))) # reshape into a timeseries net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) @@ -119,13 +120,6 @@ def toy_rnn(first_run=False): ] ) - # seems that weights won't get created unless we run fit once - opt = tf.keras.optimizers.SGD(learning_rate = 0) - model.compile(loss = 'binary_crossentropy', optimizer = opt) - x = np.ones(shape=(1, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(1, 5), dtype=np.float32) - model.fit(x, y, epochs=1, batch_size=1, verbose=False, shuffle=False) - model.save(rnn_model_path) # this creates a model with set weights for testing purposes # most tests will assert equivalency between a model with discriminative training and a model without From a178620f35fb7c5058c9c54a3d8628400cd4e8a3 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 12:29:37 -0500 Subject: [PATCH 059/106] changed test to allow head to learn. it should move the loss better --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 11e643854e..668a24ebde 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -281,7 +281,7 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): # we set model to lrmult 0 and layer one to lrmult 0.5 # if layer one is trainable, then the loss should decrease model_lr.lr_mult = 0.00 - model_lr.layers[1].lr_mult = 0.5 + model_lr.layers[-1].lr_mult = 0.5 d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate From d9408d0ac6f0f354cceebc7d5b5da8fb2db1d285 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 12:30:03 -0500 Subject: [PATCH 060/106] reformatted --- .../discriminative_layer_training_test.py | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 668a24ebde..eaa7398bfc 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -99,7 +99,11 @@ def toy_rnn(first_run=False): # crop the input shape so the lstm runs faster # pretrained need inputshape for weights to be initialized - net.add(tf.keras.layers.Cropping2D(cropping=((8, 8), (12, 12)), input_shape = (32, 32, 3))) + net.add( + tf.keras.layers.Cropping2D( + cropping=((8, 8), (12, 12)), input_shape=(32, 32, 3) + ) + ) # reshape into a timeseries net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) @@ -133,7 +137,7 @@ def toy_rnn(first_run=False): return tf.keras.models.load_model(rnn_model_path) -def _get_train_results(model, verbose=False, epochs = 10): +def _get_train_results(model, verbose=False, epochs=10): """Run a training loop and return the results for analysis model must be compiled first """ @@ -187,13 +191,13 @@ def _assert_losses_are_close(self, hist, hist_lr): get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol ) - def _assert_training_losses_are_close(self, model, model_lr, epochs = 10): + def _assert_training_losses_are_close(self, model, model_lr, epochs=10): """easy way to check if two models train in almost the same way epochs set to 10 by default to allow momentum methods to pick up momentum and diverge if the disc training is not working """ - hist = _get_train_results(model, verbose=False, epochs= epochs) - hist_lr = _get_train_results(model_lr, verbose=False, epochs= epochs) + hist = _get_train_results(model, verbose=False, epochs=epochs) + hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): @@ -210,7 +214,9 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) - def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(self, model_fn, loss, opt): + def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( + self, model_fn, loss, opt + ): """confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable this also confirms that lr_mult propagates into that layer's trainable variables this also confirms that lr_mult does not propagate to the rest of the layers unintentionally @@ -218,7 +224,7 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(self, model_fn, loss learning_rate = 0.01 model = model_fn() - #we use layer 1 instead of 0 bc layer 0 is just an input layer + # we use layer 1 instead of 0 bc layer 0 is just an input layer model.layers[1].trainable = False model.compile(loss=loss, optimizer=opt(learning_rate)) @@ -247,8 +253,8 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - #only two epochs because we expect no training to occur, thus losses shouldn't change anyways - self._assert_training_losses_are_close(model, model_lr, epochs = 2) + # only two epochs because we expect no training to occur, thus losses shouldn't change anyways + self._assert_training_losses_are_close(model, model_lr, epochs=2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr @@ -291,14 +297,12 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): loss_values = get_losses(_get_train_results(model_lr, epochs=4)) self.assertLess(loss_values[-1], loss_values[0]) - def _test_variables_get_assigned(self): """confirm that variables do get an lr_mult attribute and that they get the correct one :TODO confirm propagation to nested sublayers, confirm not override of a sublayer's mult """ pass - def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): if callable(method) and name[:4] == "test": From 353bcc35833c7c3cb993dc6a7d3c511987e52abe Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 12:57:32 -0500 Subject: [PATCH 061/106] fixed test for variable assignment added get config and from config --- .../discriminative_layer_training.py | 32 +++++++++++++++-- .../discriminative_layer_training_test.py | 35 +++++++++++++++++-- 2 files changed, 61 insertions(+), 6 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 1999c84deb..bfb55f14ae 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -228,6 +228,8 @@ def __init__( DiscriminativeModelManager._prepare_model(model, verbose=verbose) self.opt_class = base_optimizer + self.learning_rate = learning_rate + self.kwargs = kwargs # find unique lr_mult variable_groups = {var.lr_mult: None for var in model.trainable_variables} @@ -265,7 +267,31 @@ def apply_gradients(self, grads_and_vars, name=None): ] def get_config(self): - raise NotImplementedError("Optimizer wrapper does not support get config") + """This method cannot effectively return the optimizer configuration because + that configuration depends on the model and base optimizer + for now, it returns the config values of itself and base optimizers + """ + + logging.warning("""Discriminative Training Optimzer depends on its attached model + It will behave differently on the same model if the lr mult attributes are not set in the same way + Currently, this method does not support preserving optimizer's state during training + """) + config = super().get_config() + config['base_optimizer'] = self.opt_class + config['learning_rate'] = self.learning_rate + + for key, value in self.kwargs: + config[key] = value + + return config + + @classmethod + def from_config(cls, config, model): + """For this to work, you need to pass the same model to the optimizer""" + + logging.warning("""Discriminative Training Optimzer depends on its attached model + It will behave differently on the same model if the lr mult attributes are not set in the same way + Currently, this method does not support preserving optimizer's state during training + """) - def from_config(self): - raise NotImplementedError("Optimizer wrapper does not support from config") + return cls(**config, model=model) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index eaa7398bfc..c6d9327425 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -297,11 +297,40 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): loss_values = get_losses(_get_train_results(model_lr, epochs=4)) self.assertLess(loss_values[-1], loss_values[0]) - def _test_variables_get_assigned(self): + def _test_variables_get_assigned(self, model_fn, loss, opt): """confirm that variables do get an lr_mult attribute and that they get the correct one - :TODO confirm propagation to nested sublayers, confirm not override of a sublayer's mult """ - pass + learning_rate = 0.01 + model_lr = model_fn() + + #set lr mults + model_lr.layers[1].lr_mult = 0.3 + model_lr.layers[1].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + + # we expect trainable vars at 0.3 to be reduced by the amount at 0.1 + # this tests that the 0.3 lr mult does not override the 0.1 lr mult + self.assertEqual(len(model_lr.layers[1].trainable_variables) - len(model_lr.layers[1].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]) + ) + + # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer + self.assertEqual(len(model_lr.layers[1].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]) + ) + + # same logic as above + self.assertEqual(len(model_lr.layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]) + ) + + def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): From 4c7cd953269adbd0f2b3fd1b03bf007850edf22a Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 12:57:43 -0500 Subject: [PATCH 062/106] reformatted --- .../discriminative_layer_training.py | 16 ++++++----- .../discriminative_layer_training_test.py | 27 ++++++++++--------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index bfb55f14ae..88af44fecb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -272,13 +272,15 @@ def get_config(self): for now, it returns the config values of itself and base optimizers """ - logging.warning("""Discriminative Training Optimzer depends on its attached model + logging.warning( + """Discriminative Training Optimzer depends on its attached model It will behave differently on the same model if the lr mult attributes are not set in the same way Currently, this method does not support preserving optimizer's state during training - """) + """ + ) config = super().get_config() - config['base_optimizer'] = self.opt_class - config['learning_rate'] = self.learning_rate + config["base_optimizer"] = self.opt_class + config["learning_rate"] = self.learning_rate for key, value in self.kwargs: config[key] = value @@ -289,9 +291,11 @@ def get_config(self): def from_config(cls, config, model): """For this to work, you need to pass the same model to the optimizer""" - logging.warning("""Discriminative Training Optimzer depends on its attached model + logging.warning( + """Discriminative Training Optimzer depends on its attached model It will behave differently on the same model if the lr mult attributes are not set in the same way Currently, this method does not support preserving optimizer's state during training - """) + """ + ) return cls(**config, model=model) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index c6d9327425..7b5d1afe75 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -303,7 +303,7 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): learning_rate = 0.01 model_lr = model_fn() - #set lr mults + # set lr mults model_lr.layers[1].lr_mult = 0.3 model_lr.layers[1].layers[-1].lr_mult = 0.1 model_lr.layers[-1].lr_mult = 0.5 @@ -313,24 +313,25 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - # we expect trainable vars at 0.3 to be reduced by the amount at 0.1 # this tests that the 0.3 lr mult does not override the 0.1 lr mult - self.assertEqual(len(model_lr.layers[1].trainable_variables) - len(model_lr.layers[1].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]) - ) + self.assertEqual( + len(model_lr.layers[1].trainable_variables) + - len(model_lr.layers[1].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), + ) # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer - self.assertEqual(len(model_lr.layers[1].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]) - ) + self.assertEqual( + len(model_lr.layers[1].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), + ) # same logic as above - self.assertEqual(len(model_lr.layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]) - ) - - + self.assertEqual( + len(model_lr.layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), + ) def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): From 9ccd67a6d13c21cb3bd6a9ee5047bc1fef8970ce Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 13:54:39 -0500 Subject: [PATCH 063/106] fixed layer references from 1 to 0 because input layer isn't counted as an actual layer in the layer list --- .../discriminative_layer_training_test.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 7b5d1afe75..53056a784f 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -225,11 +225,11 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( model = model_fn() # we use layer 1 instead of 0 bc layer 0 is just an input layer - model.layers[1].trainable = False + model.layers[0].trainable = False model.compile(loss=loss, optimizer=opt(learning_rate)) model_lr = model_fn() - model_lr.layers[1].lr_mult = 0.0 + model_lr.layers[0].lr_mult = 0.0 d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) @@ -304,8 +304,8 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): model_lr = model_fn() # set lr mults - model_lr.layers[1].lr_mult = 0.3 - model_lr.layers[1].layers[-1].lr_mult = 0.1 + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 model_lr.layers[-1].lr_mult = 0.5 d_opt = DiscriminativeLayerOptimizer( @@ -316,14 +316,14 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): # we expect trainable vars at 0.3 to be reduced by the amount at 0.1 # this tests that the 0.3 lr mult does not override the 0.1 lr mult self.assertEqual( - len(model_lr.layers[1].trainable_variables) - - len(model_lr.layers[1].layers[-1].trainable_variables), + len(model_lr.layers[0].trainable_variables) + - len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), ) # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer self.assertEqual( - len(model_lr.layers[1].layers[-1].trainable_variables), + len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), ) From 9437d575f4548de5d162f0d59dfd08c6a63b1f22 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 13:55:45 -0500 Subject: [PATCH 064/106] reformatted --- .../optimizers/discriminative_layer_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 88af44fecb..ebdc5c02f3 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -274,8 +274,8 @@ def get_config(self): logging.warning( """Discriminative Training Optimzer depends on its attached model - It will behave differently on the same model if the lr mult attributes are not set in the same way - Currently, this method does not support preserving optimizer's state during training + It will behave differently on the same model if the lr mult attributes are not set in the same way + Currently, this method does not support preserving optimizer's state during training """ ) config = super().get_config() @@ -294,7 +294,7 @@ def from_config(cls, config, model): logging.warning( """Discriminative Training Optimzer depends on its attached model It will behave differently on the same model if the lr mult attributes are not set in the same way - Currently, this method does not support preserving optimizer's state during training + Currently, this method does not support preserving optimizer's state during training """ ) From 0ba9348af6d17953f54324e96c5a95ac956b8028 Mon Sep 17 00:00:00 2001 From: Hongya Date: Wed, 12 Feb 2020 14:20:32 -0500 Subject: [PATCH 065/106] increased lr and epochs because learning was happning, but assertless tolerance too low --- .../optimizers/discriminative_layer_training_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 53056a784f..a08a1d29ab 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -284,17 +284,17 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): learning_rate = 0.01 model_lr = model_fn() - # we set model to lrmult 0 and layer one to lrmult 0.5 + # we set model to lrmult 0 and layer one to lrmult 5 # if layer one is trainable, then the loss should decrease model_lr.lr_mult = 0.00 - model_lr.layers[-1].lr_mult = 0.5 + model_lr.layers[-1].lr_mult = 3 d_opt = DiscriminativeLayerOptimizer( opt, model_lr, verbose=False, learning_rate=learning_rate ) model_lr.compile(loss=loss, optimizer=d_opt) - loss_values = get_losses(_get_train_results(model_lr, epochs=4)) + loss_values = get_losses(_get_train_results(model_lr, epochs=5)) self.assertLess(loss_values[-1], loss_values[0]) def _test_variables_get_assigned(self, model_fn, loss, opt): From 126b5d4078191c01324fc704be4fe6594d5297cf Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 13 Feb 2020 15:39:35 -0500 Subject: [PATCH 066/106] attempting to use run distributed from test utils --- .../optimizers/discriminative_layer_training_test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index a08a1d29ab..bba62837fc 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -358,8 +358,11 @@ def test_wrap(method, devices, **kwargs): def single(self): return method(self, **kwargs) + devices = devices + + @test_utils.run_distributed(2) @test_utils.run_in_graph_and_eager_modes - @run_distributed(devices) + # @run_distributed(devices) def distributed(self): return method(self, **kwargs) @@ -392,11 +395,12 @@ def generate_tests(devices): if __name__ == "__main__": - devices = test_utils.create_virtual_devices(2) + # create devices to avoid cannot create devices error + # devices = test_utils.create_virtual_devices(2) # save models so weights are always the same toy_cnn(first_run=True) toy_rnn(first_run=True) - generate_tests(devices) + generate_tests(devices=None) tf.test.main() From 6e560bca685cde95f00671213e2ace076fd05607 Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 13 Feb 2020 16:00:32 -0500 Subject: [PATCH 067/106] removed tutorial --- .../Discriminative_Layer_Training.ipynb | 458 ------------------ 1 file changed, 458 deletions(-) delete mode 100644 docs/tutorials/Discriminative_Layer_Training.ipynb diff --git a/docs/tutorials/Discriminative_Layer_Training.ipynb b/docs/tutorials/Discriminative_Layer_Training.ipynb deleted file mode 100644 index 2262888c32..0000000000 --- a/docs/tutorials/Discriminative_Layer_Training.ipynb +++ /dev/null @@ -1,458 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "Discriminative Layer Training.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.3" - }, - "accelerator": "GPU" - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Tce3stUlHN0L" - }, - "source": [ - "##### Copyright 2019 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "tuOe1ymfHZPu", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "qFdPvlXBOdUN" - }, - "source": [ - "# Title" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MfBg1C5NB3X0" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "r6P32iYYV27b" - }, - "source": [ - "[Update button links]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xHxb-dlhMIzW" - }, - "source": [ - "## Overview\n", - "\n", - "This tutorial will demonstrate how to implement discriminative layer training and how it can help in transfer learning. \n", - "\n", - "In this example, we will fine tune a pretrained imagenet resnet50 to classify a subset of the cifar 100 dataset, a tanks vs trains dataset. \n", - "\n", - "This tutorial will demonstrate that discriminative layer training helps improves training speed. The intuition is that lower layers are more generalizable and should be preserved, while higher layers are task specific. Setting a lower learning rate for the lower layers helps preserve general features for use by the high layers and prevent over fitting. \n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MUXex9ctTuDB" - }, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "IqR2PQG4ZaZ0", - "colab": {} - }, - "source": [ - "try:\n", - " %tensorflow_version 2.x\n", - "except:\n", - " pass\n", - "\n", - "import tensorflow as tf\n", - "\n", - "#it will be much faster on gpu, but you can still run this on cpu \n", - "tf.config.list_physical_devices('GPU')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "9ALfZ9Q37Ugn", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!pip install --no-deps tensorflow-addons~=0.7\n", - "!pip install typeguard\n", - "\n", - "#discriminative wrapper not available in current tfa\n", - "!git clone https://github.com/hyang0129/addons" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "XX8Tf1Xo9VRQ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#duct taping to get the imports \n", - "#will be changed or removed once we can import the wrapper from the main tfa modules\n", - "\n", - "import shutil \n", - "\n", - "shutil.copy(\"addons/tensorflow_addons/optimizers/discriminative_layer_training.py\", \"discriminative_layer_training.py\")\n", - "\n", - "from discriminative_layer_training import DiscriminativeWrapper" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5iwuzaUFIWuT", - "colab_type": "text" - }, - "source": [ - "## Prepare Data" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qs_KxybYIkwJ", - "colab_type": "text" - }, - "source": [ - "First, we want to prepare our dataset. We will download cifar 100 and only keep data in label 85 and 90 (tanks and trains) " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "eKn-6_Hs90i-", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from skimage import io \n", - "import numpy as np \n", - "\n", - "\n", - "train, test = tf.keras.datasets.cifar100.load_data()\n", - "\n", - "#find the tanks and trains and filter down the dataset\n", - "train_tanksandtrains = np.isin(train[1], [85, 90]).flatten()\n", - "\n", - "train_x = train[0][train_tanksandtrains ]\n", - "train_y = train[1][train_tanksandtrains ]\n", - "#if is tank then 1 else 0 \n", - "train_y = (train_y == 85) * 1\n", - "\n", - "# do the same for test dataset\n", - "test_tanksandtrains = np.isin(test[1], [85, 90]).flatten()\n", - "\n", - "test_x = test[0][test_tanksandtrains] \n", - "test_y = test[1][test_tanksandtrains] \n", - "test_y = (test_y == 85) * 1\n", - "\n", - "\n", - "# show a train \n", - "print(train_y[0])\n", - "io.imshow(train_x[0])\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "47fAulgMLi1R", - "colab_type": "text" - }, - "source": [ - "We will also use some data augmentation because our training set is very small (1k images)" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "nUqGtS-jLhR_", - "colab_type": "code", - "colab": {} - }, - "source": [ - "\n", - "#create a data generator for augmentation \n", - "datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n", - " featurewise_center=True,\n", - " featurewise_std_normalization=True,\n", - " rotation_range=20,\n", - " width_shift_range=0.2,\n", - " height_shift_range=0.2,\n", - " horizontal_flip=True)\n", - "\n", - "#we only have 1000 training images, so we limit the steps to ensure the generator doesn't run out \n", - "epochs = 10 \n", - "steps = 1000//64\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SofAMdvCLgtP", - "colab_type": "text" - }, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zvJ5X6YVLbtu", - "colab_type": "text" - }, - "source": [ - "##Define Model" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "L6Lk1WYzI9kb", - "colab_type": "text" - }, - "source": [ - "This is our model function. It is a simple resnet50 with a pooling layer as the output. This gets fed to our classifer head. We will initialize this for regular training than reinitialize for discriminative layer training. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ZpHgkPVBAjRi", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#build a simple pretrained resnet with a custom head \n", - "def get_model(): \n", - " model = tf.keras.Sequential() \n", - " model.add(tf.keras.applications.resnet50.ResNet50(weights = 'imagenet', \n", - " input_shape = (32,32,3),\n", - " include_top = False, \n", - " pooling = 'avg'))\n", - " model.add(tf.keras.layers.Dense(1))\n", - " model.add(tf.keras.layers.Activation('sigmoid'))\n", - " return model \n", - "\n", - "example_model = get_model()\n", - "example_model.summary()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OLjgxrtULp22", - "colab_type": "text" - }, - "source": [ - "##Training Comparison" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uPklCqBYJfrp", - "colab_type": "text" - }, - "source": [ - "This is regular training. We assign a learning rate for the whole model then train for 10 epochs. However, because Adam is a momentum based optimizer, it has a tendency to pick up on irrelevant low level features and overfit the data. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jXVl1R-e9gw_", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#get a copy of the model before any training\n", - "model = get_model() \n", - "\n", - "#define optimizer and compile \n", - "opt = tf.keras.optimizers.Adam(learning_rate = 0.001)\n", - "model.compile(loss = 'binary_crossentropy',\n", - " optimizer = opt)\n", - "\n", - "#fit for 10 epochs\n", - "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", - " steps_per_epoch = steps, \n", - " epochs = epochs, \n", - " validation_data= (test_x, test_y))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TeiIv-bMJ0vc", - "colab_type": "text" - }, - "source": [ - "Now we will attempt to correct that behaviour. We know that the lower level features don't need to be changed greatly, so we assign a lower learning rate multiplier of 0.1. If the overall learning rate is 0.001, then the resnet lower layers will learn at 0.1 * 0.001 = 0.0001, which is slower than the head. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GQs0j6eVB2mU", - "colab_type": "code", - "colab": {} - }, - "source": [ - "#get a copy of the model before any training\n", - "model = get_model() \n", - "\n", - "\"\"\"\n", - "intuitively, the lower layers contain general features like shapes, etc \n", - "these features shouldn't need to change drastically for this new task \n", - "\"\"\"\n", - "\n", - "#assign layer 0, which is the resnet50 model an lr_mult of 0.1 to reduce lr \n", - "model.layers[0].lr_mult = 0.1\n", - "\n", - "'''\n", - "use the wrapper around an Adam class (do not pass an instance)\n", - "you can pass other kwargs to the wrapper, they will go straight to the \n", - "base_optimizer. This is because the wrapper creates a copy of the base_optimizer\n", - "for each unique learning rate multiplier \n", - "'''\n", - "opt = DiscriminativeWrapper(base_optimizer = tf.keras.optimizers.Adam, \n", - " model = model, \n", - " learning_rate = 0.001, )\n", - "\n", - "#compile in the same way as a regular model \n", - "model.compile(loss = 'binary_crossentropy',\n", - " optimizer = opt)\n", - "\n", - "#fit in the same way as a regular model\n", - "model.fit(datagen.flow(train_x, train_y, batch_size=64), \n", - " steps_per_epoch = steps, \n", - " epochs = epochs, \n", - " validation_data= (test_x, test_y))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "J_ruHnAsKKwq", - "colab_type": "text" - }, - "source": [ - "Based on the results, you can see that slowing down the lower layers can help in transfer learning. This method requires more hyper parameter tuning, but can save you a lot of time for transfer learning tasks. By lowering the learning rate for lower layers, you can preserve the more generalizable features and allow your model to generalize better. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "caAItxLmMttp", - "colab_type": "text" - }, - "source": [ - "I hope you find this tutorial helpful and find awesome ways to apply transfer learning and discriminative layer learning. " - ] - } - ] -} \ No newline at end of file From 44b6300d85a05dd5a6ce6a0adfa41a614d208e3a Mon Sep 17 00:00:00 2001 From: Hongya Date: Thu, 13 Feb 2020 16:04:34 -0500 Subject: [PATCH 068/106] switched to alternative distributed training method --- .../optimizers/discriminative_layer_training_test.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index bba62837fc..bb93ea48ef 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -360,9 +360,10 @@ def single(self): devices = devices - @test_utils.run_distributed(2) + # test utils run distributed results in a cannot RuntimeError: Virtual devices cannot be modified after being initialized + # @test_utils.run_distributed(2) @test_utils.run_in_graph_and_eager_modes - # @run_distributed(devices) + @run_distributed(devices) def distributed(self): return method(self, **kwargs) @@ -396,11 +397,11 @@ def generate_tests(devices): if __name__ == "__main__": # create devices to avoid cannot create devices error - # devices = test_utils.create_virtual_devices(2) + devices = test_utils.create_virtual_devices(2) # save models so weights are always the same toy_cnn(first_run=True) toy_rnn(first_run=True) - generate_tests(devices=None) + generate_tests(devices=devices) tf.test.main() From 360f2ce08af99b29fa684a44f9f5f7bbf463a046 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 11:42:10 -0500 Subject: [PATCH 069/106] trying to use run distributed without graph and eager --- .../optimizers/discriminative_layer_training_test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index bb93ea48ef..106502c3df 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -361,9 +361,9 @@ def single(self): devices = devices # test utils run distributed results in a cannot RuntimeError: Virtual devices cannot be modified after being initialized - # @test_utils.run_distributed(2) - @test_utils.run_in_graph_and_eager_modes - @run_distributed(devices) + @test_utils.run_distributed(2) + # @test_utils.run_in_graph_and_eager_modes + # @run_distributed(devices) def distributed(self): return method(self, **kwargs) @@ -397,8 +397,8 @@ def generate_tests(devices): if __name__ == "__main__": # create devices to avoid cannot create devices error - devices = test_utils.create_virtual_devices(2) - + # devices = test_utils.create_virtual_devices(2) + devices = None # save models so weights are always the same toy_cnn(first_run=True) toy_rnn(first_run=True) From e809eb907827947a7cf642aafb3d34c16a47d989 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 12:24:40 -0500 Subject: [PATCH 070/106] trying to use run_distributed --- .../optimizers/discriminative_layer_training_test.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 106502c3df..8c102be4ea 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -200,6 +200,7 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) + @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """confirm that discriminative learning is almost the same as regular learning""" learning_rate = 0.01 @@ -214,6 +215,7 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_distributed(2) def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self, model_fn, loss, opt ): @@ -237,6 +239,7 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_distributed(2) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): """confirm 0 lr_mult for the model is the same as model not trainable this also confirms that lr_mult on the model level is propagated to all sublayers and their variables @@ -256,6 +259,7 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): # only two epochs because we expect no training to occur, thus losses shouldn't change anyways self._assert_training_losses_are_close(model, model_lr, epochs=2) + @test_utils.run_distributed(2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr this also confirms that lr_mult on the model level is propagated to all sublayers and their variables @@ -275,6 +279,7 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_distributed(2) def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): """confirm that model trains with lower lr on specific layer while a different lr_mult is applied everywhere else @@ -297,6 +302,7 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): loss_values = get_losses(_get_train_results(model_lr, epochs=5)) self.assertLess(loss_values[-1], loss_values[0]) + @test_utils.run_distributed(2) def _test_variables_get_assigned(self, model_fn, loss, opt): """confirm that variables do get an lr_mult attribute and that they get the correct one """ @@ -333,6 +339,7 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), ) + def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): if callable(method) and name[:4] == "test": @@ -361,8 +368,8 @@ def single(self): devices = devices # test utils run distributed results in a cannot RuntimeError: Virtual devices cannot be modified after being initialized - @test_utils.run_distributed(2) - # @test_utils.run_in_graph_and_eager_modes + # @test_utils.run_distributed(2) + @test_utils.run_in_graph_and_eager_modes # @run_distributed(devices) def distributed(self): return method(self, **kwargs) From cb30bbb848404e8534b97a6bef1aae15064788c1 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 12:48:44 -0500 Subject: [PATCH 071/106] seems that doing any tensorstuff before tf.test.main creates the issue. changed models to auto check if weights exist and create or load --- .../optimizers/discriminative_layer_training_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 8c102be4ea..5cb61d958c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -43,7 +43,7 @@ def toy_cnn(first_run=False): cnn_model_path = os.path.join(tempfile.gettempdir(), "cnn.h5") - if first_run: + if not os.path.exists(cnn_model_path): bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" ) @@ -92,7 +92,7 @@ def toy_rnn(first_run=False): """ rnn_model_path = os.path.join(tempfile.gettempdir(), "rnn.h5") - if first_run: + if not os.path.exists(rnn_model_path): # pretend that net is a pretrained lstm of some sort net = tf.keras.Sequential() @@ -407,8 +407,8 @@ def generate_tests(devices): # devices = test_utils.create_virtual_devices(2) devices = None # save models so weights are always the same - toy_cnn(first_run=True) - toy_rnn(first_run=True) + # toy_cnn(first_run=True) + # toy_rnn(first_run=True) generate_tests(devices=devices) tf.test.main() From d03c5680bd0a23ab21035ab470bdd93727c44d93 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 13:10:06 -0500 Subject: [PATCH 072/106] forgot to return a model on first run of model fn --- .../optimizers/discriminative_layer_training_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 5cb61d958c..7bad5f44c5 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -65,7 +65,7 @@ def toy_cnn(first_run=False): model.save(cnn_model_path) # this creates a model with set weights for testing purposes # most tests will assert equivalency between a model with discriminative training and a model without - return None + return tf.keras.models.load_model(cnn_model_path) else: assert os.path.exists((cnn_model_path)), ( "Could not find h5 file at path %s " % cnn_model_path @@ -127,7 +127,7 @@ def toy_rnn(first_run=False): model.save(rnn_model_path) # this creates a model with set weights for testing purposes # most tests will assert equivalency between a model with discriminative training and a model without - return None + return tf.keras.models.load_model(rnn_model_path) else: assert os.path.exists((rnn_model_path)), ( @@ -339,7 +339,6 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), ) - def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): if callable(method) and name[:4] == "test": From 9f5e7bf2dd0b194e4b4a66585fc23ba18856546f Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 13:34:01 -0500 Subject: [PATCH 073/106] create model weights on init --- .../discriminative_layer_training_test.py | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 7bad5f44c5..d3a562a2ac 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -25,7 +25,7 @@ import tempfile -def toy_cnn(first_run=False): +def toy_cnn(): """Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -74,7 +74,7 @@ def toy_cnn(first_run=False): return tf.keras.models.load_model(cnn_model_path) -def toy_rnn(first_run=False): +def toy_rnn(): """ Consistently create model with same random weights skip head activation to allow both bce with logits and cce with logits @@ -179,6 +179,16 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): + + def __init__(self, methodName="runTest"): + super().__init__(self, methodName) + + #before running the tests, create model weights for reloading + toy_cnn() + toy_rnn() + #set up again to hopefully prevent the cannot initialize virtual devices error + self.setUp() + def _assert_losses_are_close(self, hist, hist_lr): """higher tolerance for graph and distributed bc unable to run deterministically""" if not tf.executing_eagerly() or tf.distribute.has_strategy(): @@ -200,6 +210,10 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) + + + + @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """confirm that discriminative learning is almost the same as regular learning""" @@ -359,24 +373,20 @@ def decorated(self, *args, **kwargs): return decorator -def test_wrap(method, devices, **kwargs): +def test_wrap(method, **kwargs): @test_utils.run_in_graph_and_eager_modes def single(self): return method(self, **kwargs) - devices = devices - # test utils run distributed results in a cannot RuntimeError: Virtual devices cannot be modified after being initialized - # @test_utils.run_distributed(2) @test_utils.run_in_graph_and_eager_modes - # @run_distributed(devices) def distributed(self): return method(self, **kwargs) return single, distributed -def generate_tests(devices): +def generate_tests(): for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": for model_fn, loss, opt in _zipped_permutes(): @@ -387,7 +397,6 @@ def generate_tests(devices): ) testmethod, testmethod_dist = test_wrap( method=method, - devices=devices, model_fn=model_fn, loss=loss, opt=opt, @@ -402,12 +411,5 @@ def generate_tests(devices): if __name__ == "__main__": - # create devices to avoid cannot create devices error - # devices = test_utils.create_virtual_devices(2) - devices = None - # save models so weights are always the same - # toy_cnn(first_run=True) - # toy_rnn(first_run=True) - - generate_tests(devices=devices) + generate_tests() tf.test.main() From 7bb37deea4b982c64933a5af6b76a4ce55fd662f Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 13:50:43 -0500 Subject: [PATCH 074/106] changed how args are passed for testcase --- .../optimizers/discriminative_layer_training_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index d3a562a2ac..6f9404e45d 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -180,8 +180,8 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): - def __init__(self, methodName="runTest"): - super().__init__(self, methodName) + def __init__(self, *args, **kwargs): + super().__init__(self, *args, **kwargs) #before running the tests, create model weights for reloading toy_cnn() From 6e4c78e21dc6845ff3e7ddf9c494fbea5abd8700 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 13:51:44 -0500 Subject: [PATCH 075/106] changed how args are passed for testcase --- .../optimizers/discriminative_layer_training_test.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 6f9404e45d..32e5b955d0 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -183,10 +183,10 @@ class DiscriminativeLearningTest(tf.test.TestCase): def __init__(self, *args, **kwargs): super().__init__(self, *args, **kwargs) - #before running the tests, create model weights for reloading + # before running the tests, create model weights for reloading toy_cnn() toy_rnn() - #set up again to hopefully prevent the cannot initialize virtual devices error + # set up again to hopefully prevent the cannot initialize virtual devices error self.setUp() def _assert_losses_are_close(self, hist, hist_lr): @@ -210,10 +210,6 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) - - - - @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """confirm that discriminative learning is almost the same as regular learning""" @@ -372,20 +368,17 @@ def decorated(self, *args, **kwargs): return decorator - def test_wrap(method, **kwargs): @test_utils.run_in_graph_and_eager_modes def single(self): return method(self, **kwargs) - @test_utils.run_in_graph_and_eager_modes def distributed(self): return method(self, **kwargs) return single, distributed - def generate_tests(): for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": From 99324be60caa0ab9d2b9c36e9d2b5e7148fad404 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 14:28:30 -0500 Subject: [PATCH 076/106] try fix init --- .../optimizers/discriminative_layer_training_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 32e5b955d0..ebcab1f395 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -181,7 +181,7 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): def __init__(self, *args, **kwargs): - super().__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) # before running the tests, create model weights for reloading toy_cnn() @@ -368,6 +368,7 @@ def decorated(self, *args, **kwargs): return decorator + def test_wrap(method, **kwargs): @test_utils.run_in_graph_and_eager_modes def single(self): @@ -379,6 +380,7 @@ def distributed(self): return single, distributed + def generate_tests(): for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": From 4e1ca6de5f8e52f361b4437b6387c0a5edce6885 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 15:07:35 -0500 Subject: [PATCH 077/106] trying to init weights on model properly --- .../discriminative_layer_training_test.py | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index ebcab1f395..ee24cd332e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -179,16 +179,6 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # before running the tests, create model weights for reloading - toy_cnn() - toy_rnn() - # set up again to hopefully prevent the cannot initialize virtual devices error - self.setUp() - def _assert_losses_are_close(self, hist, hist_lr): """higher tolerance for graph and distributed bc unable to run deterministically""" if not tf.executing_eagerly() or tf.distribute.has_strategy(): @@ -210,6 +200,14 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) + def test_a_initialize_model_weights(self): + # this test should run first to initialize the model weights + # there seem to be major issues in initializing model weights on the fly when testing + # so we initialize them and save them to an h5 file and reload them each time + # this ensures that when comparing two runs, they start at the same place + toy_cnn() + toy_rnn() + @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """confirm that discriminative learning is almost the same as regular learning""" @@ -369,7 +367,7 @@ def decorated(self, *args, **kwargs): return decorator -def test_wrap(method, **kwargs): +def test_wrap(method, **kwargs): @test_utils.run_in_graph_and_eager_modes def single(self): return method(self, **kwargs) @@ -391,13 +389,11 @@ def generate_tests(): opt.__name__, ) testmethod, testmethod_dist = test_wrap( - method=method, - model_fn=model_fn, - loss=loss, - opt=opt, + method=method, model_fn=model_fn, loss=loss, opt=opt, ) - # setattr(DiscriminativeLearningTest, testmethodname, testmethod) + # setattr(DiscriminativeLearningTest, testmethodname, testmethod) + setattr( DiscriminativeLearningTest, testmethodname + "_distributed", From 4b84779e9c393d39463fd3f5a4eab870a6297900 Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 15:25:03 -0500 Subject: [PATCH 078/106] trying to init weights on model properly --- .../optimizers/discriminative_layer_training_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index ee24cd332e..47a3307b48 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -200,6 +200,7 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) + @test_utils.run_in_graph_and_eager_modes def test_a_initialize_model_weights(self): # this test should run first to initialize the model weights # there seem to be major issues in initializing model weights on the fly when testing From 2041767d1c38a71ee6698e2ae962652bf3ee719c Mon Sep 17 00:00:00 2001 From: hyang Date: Fri, 14 Feb 2020 15:54:26 -0500 Subject: [PATCH 079/106] just trying all the possibilities --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 47a3307b48..0c55d97ac6 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -200,7 +200,7 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) - @test_utils.run_in_graph_and_eager_modes + @test_utils.run_distributed(1) def test_a_initialize_model_weights(self): # this test should run first to initialize the model weights # there seem to be major issues in initializing model weights on the fly when testing From 04311210f5a8519cbf9e506fa186e896a9890e13 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 17 Feb 2020 11:39:16 -0500 Subject: [PATCH 080/106] trying to fix weights setup --- .../optimizers/discriminative_layer_training_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 0c55d97ac6..f762d11cd0 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -200,7 +200,7 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) - @test_utils.run_distributed(1) + @test_utils.run_distributed(2) def test_a_initialize_model_weights(self): # this test should run first to initialize the model weights # there seem to be major issues in initializing model weights on the fly when testing From d41495b38e0a8289ef97fec144e53303990f770e Mon Sep 17 00:00:00 2001 From: hyang Date: Tue, 18 Feb 2020 09:42:09 -0500 Subject: [PATCH 081/106] expanded some comments for some tests --- .../discriminative_layer_training_test.py | 42 ++++++++----------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index f762d11cd0..fa7c41a18c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -202,10 +202,12 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): @test_utils.run_distributed(2) def test_a_initialize_model_weights(self): - # this test should run first to initialize the model weights - # there seem to be major issues in initializing model weights on the fly when testing - # so we initialize them and save them to an h5 file and reload them each time - # this ensures that when comparing two runs, they start at the same place + """this test should run first to initialize the model weights + there seem to be major issues in initializing model weights on the fly when testing + so we initialize them and save them to an h5 file and reload them each time + this ensures that when comparing two runs, they start at the same place + this is not actually testing anything, so it does not need to run in eager and graph + this needs to run distributed or else it will cause the cannot modify virtual devices error""" toy_cnn() toy_rnn() @@ -355,46 +357,36 @@ def _run_tests_in_notebook(self): method(self) -def run_distributed(devices): - def decorator(f): - def decorated(self, *args, **kwargs): - logical_devices = devices - strategy = tf.distribute.MirroredStrategy(logical_devices) - with strategy.scope(): - f(self, *args, **kwargs) - - return decorated - - return decorator - - def test_wrap(method, **kwargs): - @test_utils.run_in_graph_and_eager_modes - def single(self): - return method(self, **kwargs) + # wrap every test to run in graph and eager @test_utils.run_in_graph_and_eager_modes - def distributed(self): + def test(self): return method(self, **kwargs) - return single, distributed + return test def generate_tests(): + # generate tests for each permutation in the zipped permutes + # this separates tests for each permuatation of model, optimizer, and loss for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": for model_fn, loss, opt in _zipped_permutes(): + + # name the test as test_testname_model_loss_optimizer testmethodname = name[1:] + "_%s_%s_%s" % ( model_fn.__name__, loss.name, opt.__name__, ) - testmethod, testmethod_dist = test_wrap( + + # apply run in egaer and graph modes to each test + testmethod_dist = test_wrap( method=method, model_fn=model_fn, loss=loss, opt=opt, ) - # setattr(DiscriminativeLearningTest, testmethodname, testmethod) - + # we only run distributed tests setattr( DiscriminativeLearningTest, testmethodname + "_distributed", From 5883b75779d6e4dc40a81f44558020ce6b03f027 Mon Sep 17 00:00:00 2001 From: hyang Date: Mon, 24 Feb 2020 15:57:54 -0500 Subject: [PATCH 082/106] fixed some docstrings and expanded on some comments --- .../discriminative_layer_training.py | 60 ++++++++++--------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index ebdc5c02f3..efc03ac42c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -21,23 +21,23 @@ class DiscriminativeModelManager: - """Class for grouping functions related to model lr_mult management""" + """Class for grouping functions related to model lr_mult management.""" @staticmethod def _get_layers(layer): - """Helper method to access a layer's sublayers as a list or return an empty list + """Helper method to access a layer's sublayers as a list or return an empty list. """ - return getattr(layer, "layers", []) + return getattr(layer, "layers", None) @staticmethod def _get_lr_mult(layer): - """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set + """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set. """ return getattr(layer, "lr_mult", 1.0) @staticmethod def _assign_lr_mult(layer, lr_mult): - """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set + """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set. """ if not hasattr(layer, "lr_mult"): layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult @@ -52,10 +52,9 @@ def _assign_lr_mult(layer, lr_mult): @staticmethod def _recursively_assign_sublayer_lr_mult(layer): - - """Helper method iterate through all nested layers of an object that behaves like a layer or model + """Helper method iterate through all nested layers of an object that behaves like a layer or model. By default, we want to propagate the lr mult to the lower layers. - + Note that this function always returns a list of the lowest sublayers. https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python """ @@ -63,9 +62,8 @@ def _recursively_assign_sublayer_lr_mult(layer): mult = DiscriminativeModelManager._get_lr_mult(layer) layers = DiscriminativeModelManager._get_layers(layer) - if len(layers) > 0: + if layers is not None: for sublayer in layers: - # we always assign the lr mult to the sublayers of the current layer # the assign method will avoid overwritting lr mults # so if you have a resnet and you specifically assign the first resnet layer @@ -86,17 +84,17 @@ def _recursively_assign_sublayer_lr_mult(layer): @staticmethod def _apply_lr_mult_to_var(layer): - """Helper method to apply the lr mult to the trainable variables of a layer + """Helper method to apply the lr mult to the trainable variables of a layer. """ lr_mult = DiscriminativeModelManager._get_lr_mult(layer) for var in layer.trainable_variables: var.lr_mult = lr_mult - # the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor - # there's not benefit in setting the lr_mult as a variable because it does not interact with tensors + # the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor. + # there's not benefit in setting the lr_mult as a variable because it does not interact with tensors. @staticmethod def _check_for_lr_mult(layer, verbose=True, propagate=True): - """Identify which layers have an LR mult not equal to 1 + """Identify which layers have an LR mult not equal to 1. """ layers_with_lr_mult = [] @@ -114,20 +112,19 @@ def _check_for_lr_mult(layer, verbose=True, propagate=True): @staticmethod def _compute_params(var_list): - """helps compute params to provide a summary that aligns with model.summary() + """helps compute params to provide a summary that aligns with model.summary(). """ return np.sum([np.prod(list(var.shape)) for var in var_list]) @staticmethod def _prepare_model(model, verbose=True): - """Prepares a model for disc training + """Prepares a model for disc training. """ layers_with_lr_mult = DiscriminativeModelManager._check_for_lr_mult( model, verbose=verbose ) if len(layers_with_lr_mult) == 0: - logging.warning( """No Layer has been assigned an lr_mult attribute != 1.0 Discriminative Layer Training will apply the same learning rate to all layers @@ -135,6 +132,14 @@ def _prepare_model(model, verbose=True): """ ) + # lr mult assignment occurs in two steps to ensure propagation occurs correctly. + # In this example, given a model with layers : variables similar to { L1 : V1 , L2 : {L3 : V3, L4 : V4 ,} ,}, + # L2 represents a nested layer (usually a tf.keras.Model) and does not directly own any variables. + # If the user assigns L2 an lr mult x, x is propaged to L3 and L4 and then V3 and V4 is assigned lr mult of x. + # If the user assigned l2 lr mult x and L3 lr mult y, then lr mult x is propaged to L4 + # while L3 keeps its lr mult of y. Finally, the variables are assigned by x to V4 and y to V3. + # If a user doesn't assign an lr mult to L1, then L1 gets lr mult of 1.0 and so does V1. + # This is preferred because I don't want the optimizer to do a hasattr check on the variables. for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( model ): @@ -160,7 +165,7 @@ class DiscriminativeLayerOptimizer(tf.keras.optimizers.Optimizer): @typechecked def __init__( self, - base_optimizer: object, + base_optimizer: tf.keras.optimizers.Optimizer.__class__, model: tf.keras.Model, learning_rate: float, verbose: bool = True, @@ -184,7 +189,7 @@ def __init__( Performance is similar to using a single copy of the base optimizer as gradients are computed only once and then passed on. - This optimizer does not support from_config or get_config. To try to preserve the state, you may + This optimizer does preserve optimizer state. To try to preserve the state, you may serialize the optimizers in the optimizer_group attribute in an instance of this class. Example usage @@ -219,6 +224,7 @@ def __init__( References - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) """ + assert issubclass( base_optimizer, tf.keras.optimizers.Optimizer ), "Base optimizer must be a class that inherits from tf.keras.optimizers.Optimizer" @@ -243,8 +249,8 @@ def __init__( def apply_gradients(self, grads_and_vars, name=None): """allocates gradients to each optimizer based on the variable's learning rate multiplier - then applies the gradients. In graph mode, it returns 1 operation per optimizer - Please use the model.fit method instead of accessing this directly + then applies the gradients. In graph mode, it returns 1 operation per optimizer. + Please use the model.fit method instead of accessing this directly. """ # create gradvar buckets for each opt @@ -273,9 +279,9 @@ def get_config(self): """ logging.warning( - """Discriminative Training Optimzer depends on its attached model - It will behave differently on the same model if the lr mult attributes are not set in the same way - Currently, this method does not support preserving optimizer's state during training + """Discriminative Training Optimzer depends on its attached model. + It will behave differently on the same model if the lr mult attributes are not set in the same way. + Currently, this method does not support preserving optimizer's state during training. """ ) config = super().get_config() @@ -292,9 +298,9 @@ def from_config(cls, config, model): """For this to work, you need to pass the same model to the optimizer""" logging.warning( - """Discriminative Training Optimzer depends on its attached model - It will behave differently on the same model if the lr mult attributes are not set in the same way - Currently, this method does not support preserving optimizer's state during training + """Discriminative Training Optimzer depends on its attached model. + It will behave differently on the same model if the lr mult attributes are not set in the same way. + Currently, this method does not support preserving optimizer's state during training. """ ) From 00cba8ca97ca52f3bf626745ad8a0b647b3590ec Mon Sep 17 00:00:00 2001 From: hyang Date: Mon, 24 Feb 2020 17:08:31 -0500 Subject: [PATCH 083/106] reformatted files expanded on many comments and added full stops fixed get/from_config based on optimzierv2 added model checkpoint test --- .../discriminative_layer_training.py | 77 +++++--- .../discriminative_layer_training_test.py | 165 +++++++++++------- 2 files changed, 156 insertions(+), 86 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index efc03ac42c..562c2cf2e6 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -40,14 +40,14 @@ def _assign_lr_mult(layer, lr_mult): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set. """ if not hasattr(layer, "lr_mult"): - layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult + layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult. # this method should be called after the user has already assigned some lr mults - # to some layers. We just don't want to override any lr mults they assigned + # to some layers. We just don't want to override any lr mults they assigned. else: - # we pass here because of propagation to nested layers + # we pass here because of propagation to nested layers. # users should be able to speficy model.layers[0].layers[0].lr_mult = 0.01 # and model.layers[0].lr_mult = 0.1, such that the model.layers[0].layers[0] - # keeps its assigned lr mult of 0.01 + # keeps its assigned lr mult of 0.01. pass @staticmethod @@ -64,15 +64,15 @@ def _recursively_assign_sublayer_lr_mult(layer): if layers is not None: for sublayer in layers: - # we always assign the lr mult to the sublayers of the current layer - # the assign method will avoid overwritting lr mults - # so if you have a resnet and you specifically assign the first resnet layer + # we always assign the lr mult to the sublayers of the current layer. + # the assign method will avoid overwritting lr mults. + # so, if you have a resnet and you specifically assign the first resnet layer # to have lr_mult of 0.01 and the resnet model to have lr_mult of 0.1, all # resnet layers except the first should get lr_mult of 0.1 and the first - # keeps its lr_mult of 0.01 + # keeps its lr_mult of 0.01. DiscriminativeModelManager._assign_lr_mult(sublayer, mult) - # recursively iterate through the nested layers + # recursively iterate through the nested layers. for ( nested_sublayer ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( @@ -85,12 +85,15 @@ def _recursively_assign_sublayer_lr_mult(layer): @staticmethod def _apply_lr_mult_to_var(layer): """Helper method to apply the lr mult to the trainable variables of a layer. + This is necessary because the optimizer does not receive layers during optimization and only receives + variable objects. The lr mult attribute on the variable allows the disc optimizer to send the variable to + the correct learning rate. """ lr_mult = DiscriminativeModelManager._get_lr_mult(layer) for var in layer.trainable_variables: var.lr_mult = lr_mult # the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor. - # there's not benefit in setting the lr_mult as a variable because it does not interact with tensors. + # there's no benefit in setting the lr_mult as a variable because it does not interact with tensors. @staticmethod def _check_for_lr_mult(layer, verbose=True, propagate=True): @@ -138,8 +141,8 @@ def _prepare_model(model, verbose=True): # If the user assigns L2 an lr mult x, x is propaged to L3 and L4 and then V3 and V4 is assigned lr mult of x. # If the user assigned l2 lr mult x and L3 lr mult y, then lr mult x is propaged to L4 # while L3 keeps its lr mult of y. Finally, the variables are assigned by x to V4 and y to V3. - # If a user doesn't assign an lr mult to L1, then L1 gets lr mult of 1.0 and so does V1. - # This is preferred because I don't want the optimizer to do a hasattr check on the variables. + # This two step method ensures that each variable is assigned an lr mult exactly 1 time. + for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( model ): @@ -173,7 +176,7 @@ def __init__( *args, **kwargs ): - """Discriminative Layer Training Wrapper + """Discriminative Layer Training Wrapper. Discriminative layer training is a technique that applies different learning rates to different layers in a model. Generally, a lower learning rate is applied to the @@ -189,8 +192,9 @@ def __init__( Performance is similar to using a single copy of the base optimizer as gradients are computed only once and then passed on. - This optimizer does preserve optimizer state. To try to preserve the state, you may - serialize the optimizers in the optimizer_group attribute in an instance of this class. + Currently, this optimizer does not preserve optimizer state. Its state preservation methods will + differ significantly from a standard optimizer because it is a wrapper for multiple optimizers each with + their own learning rate, hyper parameters, and slots. Example usage model = tf.keras.Sequential() @@ -273,16 +277,23 @@ def apply_gradients(self, grads_and_vars, name=None): ] def get_config(self): - """This method cannot effectively return the optimizer configuration because - that configuration depends on the model and base optimizer - for now, it returns the config values of itself and base optimizers + """Returns the config of the optimizer. + + An optimizer config is a Python dictionary (serializable) + containing the configuration of an optimizer. + The same optimizer can be reinstantiated later + (without any saved state) from this configuration. + + Please note that this optimizer requires a model for instantiation or calling the from_config class method. + + Returns: + Python dictionary. """ logging.warning( - """Discriminative Training Optimzer depends on its attached model. - It will behave differently on the same model if the lr mult attributes are not set in the same way. - Currently, this method does not support preserving optimizer's state during training. - """ + """Discriminative Training Optimzer depends on its attached model. + It will behave differently on the same model if the lr mult attributes are not set in the same way. + """ ) config = super().get_config() config["base_optimizer"] = self.opt_class @@ -295,13 +306,25 @@ def get_config(self): @classmethod def from_config(cls, config, model): - """For this to work, you need to pass the same model to the optimizer""" + """Creates an optimizer from its config. + This method is the reverse of `get_config`, + capable of instantiating the same optimizer from the config + dictionary. - logging.warning( - """Discriminative Training Optimzer depends on its attached model. - It will behave differently on the same model if the lr mult attributes are not set in the same way. - Currently, this method does not support preserving optimizer's state during training. + Please note that this optimizer requires a model for instantiation or calling the from_config class method. + + Arguments: + config: A Python dictionary, typically the output of get_config. + model: An instance of tf.keras.Model. + + Returns: + An optimizer instance. """ + + logging.warning( + """Discriminative Training Optimzer depends on its attached model. + It will behave differently on the same model if the lr mult attributes are not set in the same way. + """ ) return cls(**config, model=model) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index fa7c41a18c..9885ece890 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -26,14 +26,14 @@ def toy_cnn(): - """Consistently create model with same random weights - skip head activation to allow both bce with logits and cce with logits + """Consistently create model with same random weights. + skip head activation to allow both bce with logits and cce with logits. The model returned by this function should have identical weights to all other models returned by this function, for the duration of that - continuous integration run + continuous integration run. - Run this function before running the tests and set first run to true + Run this function within a test, but make sure it runs before other tests. model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) @@ -75,17 +75,16 @@ def toy_cnn(): def toy_rnn(): - """ - Consistently create model with same random weights - skip head activation to allow both bce with logits and cce with logits - intended to work with + """Consistently create model with same random weights. + skip head activation to allow both bce with logits and cce with logits. The model returned by this function should have identical weights to all other models returned by this function, for the duration of that - continuous integration run + continuous integration run. - Run this function before running the tests and set first run to true + Run this function within a test, but make sure it runs before other tests. + model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. @@ -94,7 +93,7 @@ def toy_rnn(): if not os.path.exists(rnn_model_path): - # pretend that net is a pretrained lstm of some sort + # pretend this net is a pretrained lstm of some sort net = tf.keras.Sequential() # crop the input shape so the lstm runs faster @@ -138,8 +137,8 @@ def toy_rnn(): def _get_train_results(model, verbose=False, epochs=10): - """Run a training loop and return the results for analysis - model must be compiled first + """Run a training loop and return the results for analysis. + model must be compiled first. """ tf.random.set_seed(1) x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) @@ -151,24 +150,23 @@ def _get_train_results(model, verbose=False, epochs=10): def _zipped_permutes(): model_fns = [ - # generally, we want to test that common layers function correctly with discriminative layer training - # dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types - # we also assume that if it works for conv2d, it should work for conv3d by extension + # generally, we want to test that common layers function correctly with discriminative layer training. + # dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types. + # we also assume that if it works for conv2d, it should work for conv3d by extension. # apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? toy_cnn, toy_rnn, ] losses = [ - # additional loss types do not need to be tested + # additional loss types do not need to be tested. # this is because losses affect the gradient tape, which is computed before # the apply_gradients step. This means that the some gradient value is passed on to each opt - # and the gradient calculation is unaffected by which optimizer you are using + # and the gradient calculation is unaffected by which optimizer you are using. tf.keras.losses.CategoricalCrossentropy(from_logits=True), ] optimzers = [ - # additional optimizers can be added for testing - # seems to be timing out. will add SGD back later - # tf.keras.optimizers.SGD, + # additional optimizers can be added for testing. + tf.keras.optimizers.SGD, tf.keras.optimizers.Adam, ] return list(itertools.product(model_fns, losses, optimzers)) @@ -180,7 +178,7 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): - """higher tolerance for graph and distributed bc unable to run deterministically""" + """higher tolerance for graph and distributed bc unable to run deterministically.""" if not tf.executing_eagerly() or tf.distribute.has_strategy(): rtol, atol = 0.05, 1.00 # print('graph or dist') @@ -192,9 +190,9 @@ def _assert_losses_are_close(self, hist, hist_lr): ) def _assert_training_losses_are_close(self, model, model_lr, epochs=10): - """easy way to check if two models train in almost the same way - epochs set to 10 by default to allow momentum methods to pick up momentum and diverge - if the disc training is not working + """easy way to check if two models train in almost the same way. + epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, + if the disc training is not working. """ hist = _get_train_results(model, verbose=False, epochs=epochs) hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) @@ -202,18 +200,19 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): @test_utils.run_distributed(2) def test_a_initialize_model_weights(self): - """this test should run first to initialize the model weights - there seem to be major issues in initializing model weights on the fly when testing - so we initialize them and save them to an h5 file and reload them each time - this ensures that when comparing two runs, they start at the same place - this is not actually testing anything, so it does not need to run in eager and graph - this needs to run distributed or else it will cause the cannot modify virtual devices error""" + """this test should run first to initialize the model weights. + there seem to be major issues in initializing model weights on the fly when testing, + so we initialize them and save them to an h5 file and reload them each time. + this ensures that when comparing two runs, they start at the same place. + this is not actually testing anything, so it does not need to run in eager and graph. + this needs to run distributed or else it will cause the cannot modify virtual devices error.""" toy_cnn() toy_rnn() + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): - """confirm that discriminative learning is almost the same as regular learning""" + """confirm that discriminative learning is almost the same as regular learning.""" learning_rate = 0.01 model = model_fn() model.compile(loss=loss, optimizer=opt(learning_rate)) @@ -226,18 +225,19 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self, model_fn, loss, opt ): - """confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable - this also confirms that lr_mult propagates into that layer's trainable variables - this also confirms that lr_mult does not propagate to the rest of the layers unintentionally + """confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. + this also confirms that lr_mult propagates into that layer's trainable variables. + this also confirms that lr_mult does not propagate to the rest of the layers unintentionally. """ learning_rate = 0.01 model = model_fn() - # we use layer 1 instead of 0 bc layer 0 is just an input layer + # layers 0 represents the pretrained network model.layers[0].trainable = False model.compile(loss=loss, optimizer=opt(learning_rate)) @@ -250,10 +250,11 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): - """confirm 0 lr_mult for the model is the same as model not trainable - this also confirms that lr_mult on the model level is propagated to all sublayers and their variables + """confirm 0 lr_mult for the model is the same as model not trainable. + this also confirms that lr_mult on the model level is propagated to all sublayers and their variables. """ learning_rate = 0.01 model = model_fn() @@ -267,13 +268,14 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - # only two epochs because we expect no training to occur, thus losses shouldn't change anyways + # only two epochs because we expect no training to occur, thus losses shouldn't change anyways. self._assert_training_losses_are_close(model, model_lr, epochs=2) + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): - """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr - this also confirms that lr_mult on the model level is propagated to all sublayers and their variables + """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. + this also confirms that lr_mult on the model level is propagated to all sublayers and their variables. """ mult = 0.5 @@ -290,18 +292,19 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): - """confirm that model trains with lower lr on specific layer - while a different lr_mult is applied everywhere else - also confirms that sub layers with an lr mult do not get overridden + """confirm that model trains with lower lr on specific layer, + while a different lr_mult is applied everywhere else. + also confirms that sub layers with an lr mult do not get overridden. """ learning_rate = 0.01 model_lr = model_fn() - # we set model to lrmult 0 and layer one to lrmult 5 - # if layer one is trainable, then the loss should decrease + # we set model to lrmult 0 and layer one to lrmult 5. + # if layer one is trainable, then the loss should decrease. model_lr.lr_mult = 0.00 model_lr.layers[-1].lr_mult = 3 @@ -313,14 +316,15 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): loss_values = get_losses(_get_train_results(model_lr, epochs=5)) self.assertLess(loss_values[-1], loss_values[0]) + @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_variables_get_assigned(self, model_fn, loss, opt): - """confirm that variables do get an lr_mult attribute and that they get the correct one + """confirm that variables do get an lr_mult attribute and that they get the correct one. """ learning_rate = 0.01 model_lr = model_fn() - # set lr mults + # set lr mults. model_lr.layers[0].lr_mult = 0.3 model_lr.layers[0].layers[-1].lr_mult = 0.1 model_lr.layers[-1].lr_mult = 0.5 @@ -330,26 +334,69 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - # we expect trainable vars at 0.3 to be reduced by the amount at 0.1 - # this tests that the 0.3 lr mult does not override the 0.1 lr mult + # we expect trainable vars at 0.3 to be reduced by the amount at 0.1. + # this tests that the 0.3 lr mult does not override the 0.1 lr mult. self.assertEqual( len(model_lr.layers[0].trainable_variables) - len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), ) - # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer + # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. self.assertEqual( len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), ) - # same logic as above + # same logic as above. self.assertEqual( len(model_lr.layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), ) + @test_utils.run_in_graph_and_eager_modes + @test_utils.run_distributed(2) + def _test_model_checkpoint(self, model_fn, loss, opt): + """confirm that model does save checkpoints and can load them properly""" + + learning_rate = 0.01 + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(32, 5), dtype=np.float32) + y[:, 0] = 1.0 + + filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_{epoch}") + + callbacks = [ + tf.keras.callbacks.ModelCheckpoint( + filepath=filepath, save_weights_only=True, verbose=1 + ) + ] + + model_lr.fit( + x, + y, + epochs=5, + batch_size=16, + verbose=False, + shuffle=False, + callbacks=callbacks, + ) + + # if this doesn't error out, then loading and checkpointing should be fine + model_lr.load_weights( + filepath=os.path.join(tempfile.gettempdir(), model_fn.__name__ + "4") + ) + def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): if callable(method) and name[:4] == "test": @@ -358,9 +405,8 @@ def _run_tests_in_notebook(self): def test_wrap(method, **kwargs): - # wrap every test to run in graph and eager + """wrap the test method so that it has pre assigned kwargs.""" - @test_utils.run_in_graph_and_eager_modes def test(self): return method(self, **kwargs) @@ -368,25 +414,26 @@ def test(self): def generate_tests(): - # generate tests for each permutation in the zipped permutes - # this separates tests for each permuatation of model, optimizer, and loss + # generate tests for each permutation in the zipped permutes. + # this separates tests for each permuatation of model, optimizer, and loss. for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": for model_fn, loss, opt in _zipped_permutes(): - # name the test as test_testname_model_loss_optimizer + # name the test as test_testname_model_loss_optimizer. testmethodname = name[1:] + "_%s_%s_%s" % ( model_fn.__name__, loss.name, opt.__name__, ) - # apply run in egaer and graph modes to each test + # create test functions that use kwargs mentioned above. testmethod_dist = test_wrap( method=method, model_fn=model_fn, loss=loss, opt=opt, ) - # we only run distributed tests + # set class attributes so we get multiple nicely named tests. + # also all tests are set to run distributed, so append distributed to the end. setattr( DiscriminativeLearningTest, testmethodname + "_distributed", From d14257277b50caf42bd3048fa611d3865b37a8c1 Mon Sep 17 00:00:00 2001 From: hyang Date: Mon, 24 Feb 2020 17:20:33 -0500 Subject: [PATCH 084/106] capitalized comments properly. --- .../discriminative_layer_training.py | 46 +++---- .../discriminative_layer_training_test.py | 123 +++++++++--------- 2 files changed, 84 insertions(+), 85 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 562c2cf2e6..71078fbe0b 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -40,12 +40,12 @@ def _assign_lr_mult(layer, lr_mult): """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set. """ if not hasattr(layer, "lr_mult"): - layer.lr_mult = lr_mult # since layer has no lr mult, assign the mult. - # this method should be called after the user has already assigned some lr mults + layer.lr_mult = lr_mult # Since layer has no lr mult, assign the mult. + # This method should be called after the user has already assigned some lr mults # to some layers. We just don't want to override any lr mults they assigned. else: - # we pass here because of propagation to nested layers. - # users should be able to speficy model.layers[0].layers[0].lr_mult = 0.01 + # We pass here because of propagation to nested layers. + # Users should be able to speficy model.layers[0].layers[0].lr_mult = 0.01 # and model.layers[0].lr_mult = 0.1, such that the model.layers[0].layers[0] # keeps its assigned lr mult of 0.01. pass @@ -64,15 +64,15 @@ def _recursively_assign_sublayer_lr_mult(layer): if layers is not None: for sublayer in layers: - # we always assign the lr mult to the sublayers of the current layer. - # the assign method will avoid overwritting lr mults. - # so, if you have a resnet and you specifically assign the first resnet layer + # We always assign the lr mult to the sublayers of the current layer. + # The assign method will avoid overwritting lr mults. + # So, if you have a resnet and you specifically assign the first resnet layer # to have lr_mult of 0.01 and the resnet model to have lr_mult of 0.1, all # resnet layers except the first should get lr_mult of 0.1 and the first # keeps its lr_mult of 0.01. DiscriminativeModelManager._assign_lr_mult(sublayer, mult) - # recursively iterate through the nested layers. + # Recursively iterate through the nested layers. for ( nested_sublayer ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( @@ -92,12 +92,12 @@ def _apply_lr_mult_to_var(layer): lr_mult = DiscriminativeModelManager._get_lr_mult(layer) for var in layer.trainable_variables: var.lr_mult = lr_mult - # the lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor. - # there's no benefit in setting the lr_mult as a variable because it does not interact with tensors. + # The lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor. + # There's no benefit in setting the lr_mult as a variable because it does not interact with tensors. @staticmethod def _check_for_lr_mult(layer, verbose=True, propagate=True): - """Identify which layers have an LR mult not equal to 1. + """Identify which layers have an lr mult not equal to 1. """ layers_with_lr_mult = [] @@ -115,7 +115,7 @@ def _check_for_lr_mult(layer, verbose=True, propagate=True): @staticmethod def _compute_params(var_list): - """helps compute params to provide a summary that aligns with model.summary(). + """Helps compute params to provide a summary that aligns with model.summary(). """ return np.sum([np.prod(list(var.shape)) for var in var_list]) @@ -135,7 +135,7 @@ def _prepare_model(model, verbose=True): """ ) - # lr mult assignment occurs in two steps to ensure propagation occurs correctly. + # Lr mult assignment occurs in two steps to ensure propagation occurs correctly. # In this example, given a model with layers : variables similar to { L1 : V1 , L2 : {L3 : V3, L4 : V4 ,} ,}, # L2 represents a nested layer (usually a tf.keras.Model) and does not directly own any variables. # If the user assigns L2 an lr mult x, x is propaged to L3 and L4 and then V3 and V4 is assigned lr mult of x. @@ -206,7 +206,7 @@ def __init__( model.fit(x, y) Arguments - base_optimizer: a class that inherits from tf.keras.optimizers.Optimizer. Do not + base_optimizer: A class that inherits from tf.keras.optimizers.Optimizer. Do not pass an instance of the class. model: tf.keras.Model, The model to be used for discriminative learning. @@ -241,7 +241,7 @@ def __init__( self.learning_rate = learning_rate self.kwargs = kwargs - # find unique lr_mult + # Find unique lr_mult. variable_groups = {var.lr_mult: None for var in model.trainable_variables} self.optimizer_group = [] @@ -252,25 +252,25 @@ def __init__( self.optimizer_group.append(opt) def apply_gradients(self, grads_and_vars, name=None): - """allocates gradients to each optimizer based on the variable's learning rate multiplier + """Allocates gradients to each optimizer based on the variable's learning rate multiplier then applies the gradients. In graph mode, it returns 1 operation per optimizer. Please use the model.fit method instead of accessing this directly. """ - # create gradvar buckets for each opt + # Create gradvar buckets for each opt. gvdict = {} for opt in self.optimizer_group: gvdict[opt.lr_mult] = [] - # load the gradvars into the appropriate bucket + # Load the gradvars into the appropriate bucket. for grad, var in tuple(grads_and_vars): gvdict[var.lr_mult].append((grad, var)) - # return results from each opt - # in eager mode, this will return a list of irrelevant results for each optimizer - # in eager mode, the function apply_gradients actually applies gradients to the model - # in graph mode, this will return a list of tensor ops for each opt - # in graph mode, apply_gradients creates the tensor ops for applying gradients on the graph + # Return results from each opt. + # In eager mode, this will return a list of irrelevant results for each optimizer. + # In eager mode, the function apply_gradients actually applies gradients to the model. + # In graph mode, this will return a list of tensor ops for each opt. + # In graph mode, apply_gradients creates the tensor ops for applying gradients on the graph. return [ opt.apply_gradients(tuple(gvdict[opt.lr_mult])) for opt in self.optimizer_group diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 9885ece890..3705473c25 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -27,7 +27,7 @@ def toy_cnn(): """Consistently create model with same random weights. - skip head activation to allow both bce with logits and cce with logits. + Skip head activation to allow both bce with logits and cce with logits. The model returned by this function should have identical weights to all other models returned by this function, for the duration of that @@ -35,7 +35,7 @@ def toy_cnn(): Run this function within a test, but make sure it runs before other tests. - model is intended to work with + Model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. @@ -48,7 +48,7 @@ def toy_cnn(): include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" ) - # take the first few layers so we cover BN, Conv, Pooling ops for testing + # Take the first few layers so we cover BN, Conv, Pooling ops for testing. net = tf.keras.models.Model( inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output ) @@ -63,20 +63,20 @@ def toy_cnn(): ) model.save(cnn_model_path) - # this creates a model with set weights for testing purposes - # most tests will assert equivalency between a model with discriminative training and a model without + # This creates a model with set weights for testing purposes. + # Most tests will assert equivalency between a model with discriminative training and a model without. return tf.keras.models.load_model(cnn_model_path) else: assert os.path.exists((cnn_model_path)), ( "Could not find h5 file at path %s " % cnn_model_path ) - # load the variable initialized model from the disk + # Load the variable initialized model from the disk. return tf.keras.models.load_model(cnn_model_path) def toy_rnn(): """Consistently create model with same random weights. - skip head activation to allow both bce with logits and cce with logits. + Skip head activation to allow both bce with logits and cce with logits. The model returned by this function should have identical weights to all other models returned by this function, for the duration of that @@ -84,7 +84,7 @@ def toy_rnn(): Run this function within a test, but make sure it runs before other tests. - model is intended to work with + Model is intended to work with x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) y = np.zeros(shape = (None, 5), dtype = np.float32) y[:, 0] = 1. @@ -93,25 +93,24 @@ def toy_rnn(): if not os.path.exists(rnn_model_path): - # pretend this net is a pretrained lstm of some sort + # Pretend this net is a pretrained lstm of some sort. net = tf.keras.Sequential() - # crop the input shape so the lstm runs faster - # pretrained need inputshape for weights to be initialized + # Crop the input shape so the lstm runs faster. + # Pretrained need inputshape for weights to be initialized. net.add( tf.keras.layers.Cropping2D( cropping=((8, 8), (12, 12)), input_shape=(32, 32, 3) ) ) - # reshape into a timeseries + # Reshape into a timeseries. net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) - # reduce the length of the time series + # Reduce the length of the time series. net.add(tf.keras.layers.Cropping1D(cropping=(0, 5))) - # reduce dimensions - # we are primarily interested in the bidir lstm layer and its behavior + # We are primarily interested in the bidir lstm layer and its behavior. net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(4))) model = tf.keras.Sequential( @@ -124,21 +123,21 @@ def toy_rnn(): ) model.save(rnn_model_path) - # this creates a model with set weights for testing purposes - # most tests will assert equivalency between a model with discriminative training and a model without + # This creates a model with set weights for testing purposes. + # Most tests will assert equivalency between a model with discriminative training and a model without. return tf.keras.models.load_model(rnn_model_path) else: assert os.path.exists((rnn_model_path)), ( "Could not find h5 file at path %s " % rnn_model_path ) - # load the variable initialized model from the disk + # Load the variable initialized model from the disk return tf.keras.models.load_model(rnn_model_path) def _get_train_results(model, verbose=False, epochs=10): """Run a training loop and return the results for analysis. - model must be compiled first. + Model must be compiled first. """ tf.random.set_seed(1) x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) @@ -150,22 +149,22 @@ def _get_train_results(model, verbose=False, epochs=10): def _zipped_permutes(): model_fns = [ - # generally, we want to test that common layers function correctly with discriminative layer training. - # dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types. - # we also assume that if it works for conv2d, it should work for conv3d by extension. - # apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? + # Generally, we want to test that common layers function correctly with discriminative layer training. + # Dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types. + # We also assume that if it works for conv2d, it should work for conv3d by extension. + # Apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? toy_cnn, toy_rnn, ] losses = [ - # additional loss types do not need to be tested. - # this is because losses affect the gradient tape, which is computed before + # Additional loss types do not need to be tested. + # This is because losses affect the gradient tape, which is computed before # the apply_gradients step. This means that the some gradient value is passed on to each opt # and the gradient calculation is unaffected by which optimizer you are using. tf.keras.losses.CategoricalCrossentropy(from_logits=True), ] optimzers = [ - # additional optimizers can be added for testing. + # Additional optimizers can be added for testing. tf.keras.optimizers.SGD, tf.keras.optimizers.Adam, ] @@ -178,7 +177,7 @@ def get_losses(hist): class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): - """higher tolerance for graph and distributed bc unable to run deterministically.""" + """Higher tolerance for graph and distributed bc unable to run deterministically.""" if not tf.executing_eagerly() or tf.distribute.has_strategy(): rtol, atol = 0.05, 1.00 # print('graph or dist') @@ -190,8 +189,8 @@ def _assert_losses_are_close(self, hist, hist_lr): ) def _assert_training_losses_are_close(self, model, model_lr, epochs=10): - """easy way to check if two models train in almost the same way. - epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, + """Easy way to check if two models train in almost the same way. + Epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, if the disc training is not working. """ hist = _get_train_results(model, verbose=False, epochs=epochs) @@ -200,19 +199,19 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): @test_utils.run_distributed(2) def test_a_initialize_model_weights(self): - """this test should run first to initialize the model weights. - there seem to be major issues in initializing model weights on the fly when testing, + """This test should run first to initialize the model weights. + There seem to be major issues in initializing model weights on the fly when testing, so we initialize them and save them to an h5 file and reload them each time. - this ensures that when comparing two runs, they start at the same place. - this is not actually testing anything, so it does not need to run in eager and graph. - this needs to run distributed or else it will cause the cannot modify virtual devices error.""" + This ensures that when comparing two runs, they start at the same place. + This is not actually testing anything, so it does not need to run in eager and graph. + This needs to run distributed or else it will cause the cannot modify virtual devices error.""" toy_cnn() toy_rnn() @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): - """confirm that discriminative learning is almost the same as regular learning.""" + """Confirm that discriminative learning is almost the same as regular learning.""" learning_rate = 0.01 model = model_fn() model.compile(loss=loss, optimizer=opt(learning_rate)) @@ -230,14 +229,14 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self, model_fn, loss, opt ): - """confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. - this also confirms that lr_mult propagates into that layer's trainable variables. - this also confirms that lr_mult does not propagate to the rest of the layers unintentionally. + """Confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. + This also confirms that lr_mult propagates into that layer's trainable variables. + This also confirms that lr_mult does not propagate to the rest of the layers unintentionally. """ learning_rate = 0.01 model = model_fn() - # layers 0 represents the pretrained network + # Layers 0 represents the pretrained network model.layers[0].trainable = False model.compile(loss=loss, optimizer=opt(learning_rate)) @@ -253,8 +252,8 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): - """confirm 0 lr_mult for the model is the same as model not trainable. - this also confirms that lr_mult on the model level is propagated to all sublayers and their variables. + """Confirm 0 lr_mult for the model is the same as model not trainable. + This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. """ learning_rate = 0.01 model = model_fn() @@ -268,14 +267,14 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - # only two epochs because we expect no training to occur, thus losses shouldn't change anyways. + # Only two epochs because we expect no training to occur, thus losses shouldn't change anyways. self._assert_training_losses_are_close(model, model_lr, epochs=2) @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): - """confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. - this also confirms that lr_mult on the model level is propagated to all sublayers and their variables. + """Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. + This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. """ mult = 0.5 @@ -295,16 +294,16 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): - """confirm that model trains with lower lr on specific layer, + """Confirm that model trains with lower lr on specific layer, while a different lr_mult is applied everywhere else. - also confirms that sub layers with an lr mult do not get overridden. + Also confirms that sub layers with an lr mult do not get overridden. """ learning_rate = 0.01 model_lr = model_fn() - # we set model to lrmult 0 and layer one to lrmult 5. - # if layer one is trainable, then the loss should decrease. + # We set model to lrmult 0 and layer one to lrmult 5. + # If layer one is trainable, then the loss should decrease. model_lr.lr_mult = 0.00 model_lr.layers[-1].lr_mult = 3 @@ -319,7 +318,7 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_variables_get_assigned(self, model_fn, loss, opt): - """confirm that variables do get an lr_mult attribute and that they get the correct one. + """Confirm that variables do get an lr_mult attribute and that they get the correct one. """ learning_rate = 0.01 model_lr = model_fn() @@ -334,21 +333,21 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - # we expect trainable vars at 0.3 to be reduced by the amount at 0.1. - # this tests that the 0.3 lr mult does not override the 0.1 lr mult. + # We expect trainable vars at 0.3 to be reduced by the amount at 0.1. + # This tests that the 0.3 lr mult does not override the 0.1 lr mult. self.assertEqual( len(model_lr.layers[0].trainable_variables) - len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), ) - # we expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. + # We expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. self.assertEqual( len(model_lr.layers[0].layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), ) - # same logic as above. + # Same logic as above. self.assertEqual( len(model_lr.layers[-1].trainable_variables), len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), @@ -357,7 +356,7 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_model_checkpoint(self, model_fn, loss, opt): - """confirm that model does save checkpoints and can load them properly""" + """Confirm that model does save checkpoints and can load them properly""" learning_rate = 0.01 model_lr = model_fn() @@ -392,7 +391,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): callbacks=callbacks, ) - # if this doesn't error out, then loading and checkpointing should be fine + # If this doesn't error out, then loading and checkpointing should be fine. model_lr.load_weights( filepath=os.path.join(tempfile.gettempdir(), model_fn.__name__ + "4") ) @@ -405,7 +404,7 @@ def _run_tests_in_notebook(self): def test_wrap(method, **kwargs): - """wrap the test method so that it has pre assigned kwargs.""" + """Wrap the test method so that it has pre assigned kwargs.""" def test(self): return method(self, **kwargs) @@ -414,26 +413,26 @@ def test(self): def generate_tests(): - # generate tests for each permutation in the zipped permutes. - # this separates tests for each permuatation of model, optimizer, and loss. + # Generate tests for each permutation in the zipped permutes. + # This separates tests for each permuatation of model, optimizer, and loss. for name, method in DiscriminativeLearningTest.__dict__.copy().items(): if callable(method) and name[:5] == "_test": for model_fn, loss, opt in _zipped_permutes(): - # name the test as test_testname_model_loss_optimizer. + # Name the test as test_testname_model_loss_optimizer. testmethodname = name[1:] + "_%s_%s_%s" % ( model_fn.__name__, loss.name, opt.__name__, ) - # create test functions that use kwargs mentioned above. + # Create test functions that use kwargs mentioned above. testmethod_dist = test_wrap( method=method, model_fn=model_fn, loss=loss, opt=opt, ) - # set class attributes so we get multiple nicely named tests. - # also all tests are set to run distributed, so append distributed to the end. + # Set class attributes so we get multiple nicely named tests. + # Also all tests are set to run distributed, so append distributed to the end. setattr( DiscriminativeLearningTest, testmethodname + "_distributed", From c4fc0e2f905c83a3f83475a66bdb8a718c3da048 Mon Sep 17 00:00:00 2001 From: hyang Date: Mon, 24 Feb 2020 17:57:48 -0500 Subject: [PATCH 085/106] removed sgd, reduced size of training inputs. --- .../discriminative_layer_training_test.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 3705473c25..62bbb32df5 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -138,13 +138,14 @@ def toy_rnn(): def _get_train_results(model, verbose=False, epochs=10): """Run a training loop and return the results for analysis. Model must be compiled first. + Training data sizes reduced. """ tf.random.set_seed(1) - x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(32, 5), dtype=np.float32) + x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(8, 5), dtype=np.float32) y[:, 0] = 1.0 - return model.fit(x, y, epochs=epochs, batch_size=16, verbose=verbose, shuffle=False) + return model.fit(x, y, epochs=epochs, batch_size=4, verbose=verbose, shuffle=False) def _zipped_permutes(): @@ -165,7 +166,7 @@ def _zipped_permutes(): ] optimzers = [ # Additional optimizers can be added for testing. - tf.keras.optimizers.SGD, + # However, testing adam should cover most optimizer behaviours because it uses momentum. tf.keras.optimizers.Adam, ] return list(itertools.product(model_fns, losses, optimzers)) @@ -369,8 +370,8 @@ def _test_model_checkpoint(self, model_fn, loss, opt): ) model_lr.compile(loss=loss, optimizer=d_opt) - x = np.ones(shape=(32, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(32, 5), dtype=np.float32) + x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(8, 5), dtype=np.float32) y[:, 0] = 1.0 filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_{epoch}") @@ -384,8 +385,8 @@ def _test_model_checkpoint(self, model_fn, loss, opt): model_lr.fit( x, y, - epochs=5, - batch_size=16, + epochs=2, + batch_size=4, verbose=False, shuffle=False, callbacks=callbacks, @@ -393,7 +394,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): # If this doesn't error out, then loading and checkpointing should be fine. model_lr.load_weights( - filepath=os.path.join(tempfile.gettempdir(), model_fn.__name__ + "4") + filepath=os.path.join(tempfile.gettempdir(), model_fn.__name__ + "1") ) def _run_tests_in_notebook(self): From ef3130be0067651e4214eeac05421b8518a177b5 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 24 Feb 2020 21:50:40 -0500 Subject: [PATCH 086/106] simplified checkpoint name --- .../optimizers/discriminative_layer_training_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 62bbb32df5..4e322d1783 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -374,7 +374,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): y = np.zeros(shape=(8, 5), dtype=np.float32) y[:, 0] = 1.0 - filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_{epoch}") + filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + '_cp.ckpt' ) callbacks = [ tf.keras.callbacks.ModelCheckpoint( @@ -394,7 +394,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): # If this doesn't error out, then loading and checkpointing should be fine. model_lr.load_weights( - filepath=os.path.join(tempfile.gettempdir(), model_fn.__name__ + "1") + filepath=filepath ) def _run_tests_in_notebook(self): From 30d3d1ded5d08090537ae2f89a4d17d0b9de528a Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 24 Feb 2020 22:18:10 -0500 Subject: [PATCH 087/106] reformatted --- .../optimizers/discriminative_layer_training_test.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 4e322d1783..635b46fac9 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -357,7 +357,7 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): @test_utils.run_in_graph_and_eager_modes @test_utils.run_distributed(2) def _test_model_checkpoint(self, model_fn, loss, opt): - """Confirm that model does save checkpoints and can load them properly""" + """Confirm that model does save checkpoints and can load them properly.""" learning_rate = 0.01 model_lr = model_fn() @@ -374,7 +374,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): y = np.zeros(shape=(8, 5), dtype=np.float32) y[:, 0] = 1.0 - filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + '_cp.ckpt' ) + filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_cp.ckpt") callbacks = [ tf.keras.callbacks.ModelCheckpoint( @@ -393,9 +393,7 @@ def _test_model_checkpoint(self, model_fn, loss, opt): ) # If this doesn't error out, then loading and checkpointing should be fine. - model_lr.load_weights( - filepath=filepath - ) + model_lr.load_weights(filepath=filepath) def _run_tests_in_notebook(self): for name, method in DiscriminativeLearningTest.__dict__.items(): From 7e46a068d0a58fb9f0afca559d49ae510ac10f9d Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 24 Feb 2020 22:23:43 -0500 Subject: [PATCH 088/106] remove run tests in notebook --- .../optimizers/discriminative_layer_training_test.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 635b46fac9..5f989a868c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -395,12 +395,6 @@ def _test_model_checkpoint(self, model_fn, loss, opt): # If this doesn't error out, then loading and checkpointing should be fine. model_lr.load_weights(filepath=filepath) - def _run_tests_in_notebook(self): - for name, method in DiscriminativeLearningTest.__dict__.items(): - if callable(method) and name[:4] == "test": - print("running test %s" % name) - method(self) - def test_wrap(method, **kwargs): """Wrap the test method so that it has pre assigned kwargs.""" From b18e2310de090d4ab443cb139dd2ce979965f26b Mon Sep 17 00:00:00 2001 From: hyang Date: Thu, 27 Feb 2020 13:45:10 -0500 Subject: [PATCH 089/106] updated README.md fixed indent for __init__ added test for from config and to config --- tensorflow_addons/optimizers/README.md | 2 + .../discriminative_layer_training.py | 72 +++++++++---------- .../discriminative_layer_training_test.py | 36 ++++++++++ 3 files changed, 74 insertions(+), 36 deletions(-) diff --git a/tensorflow_addons/optimizers/README.md b/tensorflow_addons/optimizers/README.md index cb65bfde0d..10ec32354d 100644 --- a/tensorflow_addons/optimizers/README.md +++ b/tensorflow_addons/optimizers/README.md @@ -5,6 +5,7 @@ |:---------- |:------------- |:--------------| | conditional_gradient | Pengyu Kan, Vishnu Lokhande | pkan2@wisc.edu, lokhande@cs.wisc.edu | | cyclical_learning_rate | Raphael Meudec | raphael.meudec@gmail.com | +| discriminative_layer_training | Hong Yang | hooong.yang@gmail.com | | lamb | Jing Li, Junjie Ke | jingli@google.com, junjiek@google.com | | lazy_adam | Saishruthi Swaminathan | saishruthi.tn@gmail.com | | lookahead | Zhao Hanguang | cyberzhg@gmail.com | @@ -22,6 +23,7 @@ |:--------- |:---------- |:---------| | conditional_gradient | ConditionalGradient | https://arxiv.org/pdf/1803.06453.pdf | | cyclical_learning_rate | Cyclical Learning Rate | https://arxiv.org/abs/1506.01186 | +| discriminative_layer_training | Discriminative Layer Optimizer | https://arxiv.org/pdf/1801.06146.pdf | | lamb | LAMB | https://arxiv.org/abs/1904.00962 | | lazy_adam | LazyAdam | https://arxiv.org/abs/1412.6980 | | lookahead | Lookahead | https://arxiv.org/abs/1907.08610v1 | diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 71078fbe0b..a0895c216e 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -178,55 +178,55 @@ def __init__( ): """Discriminative Layer Training Wrapper. - Discriminative layer training is a technique that applies different learning rates to - different layers in a model. Generally, a lower learning rate is applied to the - layers closest to the input and a higher learning rate is applied to layers closer - to the output. This method helps in transfer learning by quickly calibrating the head - of a model while preserving the useful weights in the main part of the model. + Discriminative layer training is a technique that applies different learning rates to + different layers in a model. Generally, a lower learning rate is applied to the + layers closest to the input and a higher learning rate is applied to layers closer + to the output. This method helps in transfer learning by quickly calibrating the head + of a model while preserving the useful weights in the main part of the model. - You should assign the lr_mult attribute to a layer. This will multiply the learning rate - used by the base optimizer for that layer. + You should assign the lr_mult attribute to a layer. This will multiply the learning rate + used by the base optimizer for that layer. - This method creates a copy of the base optimizer for each unique learning rate multipler. + This method creates a copy of the base optimizer for each unique learning rate multipler. - Performance is similar to using a single copy of the base optimizer as gradients are computed - only once and then passed on. + Performance is similar to using a single copy of the base optimizer as gradients are computed + only once and then passed on. - Currently, this optimizer does not preserve optimizer state. Its state preservation methods will - differ significantly from a standard optimizer because it is a wrapper for multiple optimizers each with - their own learning rate, hyper parameters, and slots. + Currently, this optimizer does not preserve optimizer state. Its state preservation methods will + differ significantly from a standard optimizer because it is a wrapper for multiple optimizers each with + their own learning rate, hyper parameters, and slots. - Example usage - model = tf.keras.Sequential() - model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) - model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) - model.layers[0].lr_mult = 0.01 - opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) - model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) - model.fit(x, y) + Example usage + model = tf.keras.Sequential() + model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) + model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) + model.layers[0].lr_mult = 0.01 + opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) + model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) + model.fit(x, y) - Arguments - base_optimizer: A class that inherits from tf.keras.optimizers.Optimizer. Do not - pass an instance of the class. + Arguments + base_optimizer: A class that inherits from tf.keras.optimizers.Optimizer. Do not + pass an instance of the class. - model: tf.keras.Model, The model to be used for discriminative learning. - It should have at least 1 layer with the attribute lr_mult. The lr_mult should - be set to a value not equal to 1. Otherwise, you will have the exact same - result as not using discriminative learning. + model: tf.keras.Model, The model to be used for discriminative learning. + It should have at least 1 layer with the attribute lr_mult. The lr_mult should + be set to a value not equal to 1. Otherwise, you will have the exact same + result as not using discriminative learning. - learning_rate: float, the learning rate for the model + learning_rate: float, the learning rate for the model - verbose: Bool, to generate a report on how many parameters are affected + verbose: Bool, to generate a report on how many parameters are affected - *args: Args to pass to the base optimizer + *args: Args to pass to the base optimizer - **kwargs: Kwargs to pass to the base optimizer + **kwargs: Kwargs to pass to the base optimizer - Returns - Optimizer - A keras optimizer to use with model.compile + Returns + Optimizer - A keras optimizer to use with model.compile - References - - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) + References + - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) """ assert issubclass( diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 5f989a868c..809b69fa68 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -395,6 +395,42 @@ def _test_model_checkpoint(self, model_fn, loss, opt): # If this doesn't error out, then loading and checkpointing should be fine. model_lr.load_weights(filepath=filepath) + def _test_config_tofrom(self, model_fn, loss, opt): + """Confirm that optimizer saves config and loads config.""" + + # build model and save the opt to a config as c. + learning_rate = 0.01 + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + c = d_opt.get_config() + + # reconstruct the model and then build the opt from config. + + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt_from_config = DiscriminativeLayerOptimizer.from_config(c, model_lr) + model_lr.compile(loss=loss, optimizer=d_opt_from_config) + + # we expect both optimizers to have the same optimizer group and base optimizer. + self.assertAllEqual(len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group)) + self.assertAllEqual(d_opt.opt_class, d_opt_from_config.opt_class) + + # we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult. + self.assertAllEqual([opt.learning_rate for opt in d_opt.optimizer_group], + [opt.learning_rate for opt in d_opt_from_config.optimizer_group]) + + def test_wrap(method, **kwargs): """Wrap the test method so that it has pre assigned kwargs.""" From 9eb210404a3bb68a3e3c106e05eeea7b70026907 Mon Sep 17 00:00:00 2001 From: hyang Date: Thu, 27 Feb 2020 13:46:42 -0500 Subject: [PATCH 090/106] fixed formatting --- .../optimizers/discriminative_layer_training_test.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 809b69fa68..2a00c3a4a6 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -423,13 +423,16 @@ def _test_config_tofrom(self, model_fn, loss, opt): model_lr.compile(loss=loss, optimizer=d_opt_from_config) # we expect both optimizers to have the same optimizer group and base optimizer. - self.assertAllEqual(len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group)) + self.assertAllEqual( + len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group) + ) self.assertAllEqual(d_opt.opt_class, d_opt_from_config.opt_class) # we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult. - self.assertAllEqual([opt.learning_rate for opt in d_opt.optimizer_group], - [opt.learning_rate for opt in d_opt_from_config.optimizer_group]) - + self.assertAllEqual( + [opt.learning_rate for opt in d_opt.optimizer_group], + [opt.learning_rate for opt in d_opt_from_config.optimizer_group], + ) def test_wrap(method, **kwargs): From b54a538922c0f1b62cf1501064ce3ff55935d624 Mon Sep 17 00:00:00 2001 From: hyang Date: Wed, 11 Mar 2020 15:28:00 -0400 Subject: [PATCH 091/106] removed distributed tests and added a warning if optimizer is initialized within a strategy scope --- .../optimizers/discriminative_layer_training.py | 8 ++++++++ .../optimizers/discriminative_layer_training_test.py | 12 +----------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index a0895c216e..acb572aa1a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -233,6 +233,14 @@ def __init__( base_optimizer, tf.keras.optimizers.Optimizer ), "Base optimizer must be a class that inherits from tf.keras.optimizers.Optimizer" + # assume that users will follow the general guidelines and init thier opts within a dist scope. + if tf.distribute.has_strategy(): + logging.warning( + """The discriminative layer optimizer may not behave as expected + when using a distribution strategy. + """ + ) + super().__init__(lr=learning_rate, name=name, *args, **kwargs) DiscriminativeModelManager._prepare_model(model, verbose=verbose) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 2a00c3a4a6..bf0c156b23 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -198,7 +198,6 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) - @test_utils.run_distributed(2) def test_a_initialize_model_weights(self): """This test should run first to initialize the model weights. There seem to be major issues in initializing model weights on the fly when testing, @@ -210,7 +209,6 @@ def test_a_initialize_model_weights(self): toy_rnn() @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): """Confirm that discriminative learning is almost the same as regular learning.""" learning_rate = 0.01 @@ -226,7 +224,6 @@ def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self, model_fn, loss, opt ): @@ -251,7 +248,6 @@ def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( self._assert_training_losses_are_close(model, model_lr) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): """Confirm 0 lr_mult for the model is the same as model not trainable. This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. @@ -272,7 +268,6 @@ def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr, epochs=2) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): """Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. @@ -293,7 +288,6 @@ def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): self._assert_training_losses_are_close(model, model_lr) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): """Confirm that model trains with lower lr on specific layer, while a different lr_mult is applied everywhere else. @@ -317,7 +311,6 @@ def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): self.assertLess(loss_values[-1], loss_values[0]) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_variables_get_assigned(self, model_fn, loss, opt): """Confirm that variables do get an lr_mult attribute and that they get the correct one. """ @@ -355,7 +348,6 @@ def _test_variables_get_assigned(self, model_fn, loss, opt): ) @test_utils.run_in_graph_and_eager_modes - @test_utils.run_distributed(2) def _test_model_checkpoint(self, model_fn, loss, opt): """Confirm that model does save checkpoints and can load them properly.""" @@ -466,9 +458,7 @@ def generate_tests(): # Set class attributes so we get multiple nicely named tests. # Also all tests are set to run distributed, so append distributed to the end. setattr( - DiscriminativeLearningTest, - testmethodname + "_distributed", - testmethod_dist, + DiscriminativeLearningTest, testmethodname, testmethod_dist, ) From 39d28ef9dd24b52ff32e029a0f99d7489e09e589 Mon Sep 17 00:00:00 2001 From: hyang Date: Wed, 11 Mar 2020 15:48:31 -0400 Subject: [PATCH 092/106] renamed test_wrap to wrap_test bc pytest thought it was a test. --- .../optimizers/discriminative_layer_training_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index bf0c156b23..7e9feadb54 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -427,11 +427,11 @@ def _test_config_tofrom(self, model_fn, loss, opt): ) -def test_wrap(method, **kwargs): +def wrap_test(func, **kwargs): """Wrap the test method so that it has pre assigned kwargs.""" def test(self): - return method(self, **kwargs) + return func(self, **kwargs) return test @@ -439,8 +439,8 @@ def test(self): def generate_tests(): # Generate tests for each permutation in the zipped permutes. # This separates tests for each permuatation of model, optimizer, and loss. - for name, method in DiscriminativeLearningTest.__dict__.copy().items(): - if callable(method) and name[:5] == "_test": + for name, func in DiscriminativeLearningTest.__dict__.copy().items(): + if callable(func) and name[:5] == "_test": for model_fn, loss, opt in _zipped_permutes(): # Name the test as test_testname_model_loss_optimizer. @@ -451,8 +451,8 @@ def generate_tests(): ) # Create test functions that use kwargs mentioned above. - testmethod_dist = test_wrap( - method=method, model_fn=model_fn, loss=loss, opt=opt, + testmethod_dist = wrap_test( + func=func, model_fn=model_fn, loss=loss, opt=opt, ) # Set class attributes so we get multiple nicely named tests. From 9d70f8d9d4b2163a21e07bfb7f2ce74bb83b8c0c Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 23 Mar 2020 10:59:36 -0400 Subject: [PATCH 093/106] converting tests into the pytest framework --- .../discriminative_layer_training_test.py | 111 ++++++++++-------- 1 file changed, 65 insertions(+), 46 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 7e9feadb54..3f5c0ba3e9 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -23,7 +23,8 @@ import itertools import os import tempfile - +import pytest +import sys def toy_cnn(): """Consistently create model with same random weights. @@ -176,6 +177,56 @@ def get_losses(hist): return np.array(hist.__dict__["history"]["loss"]) +def _assert_losses_are_close(hist, hist_lr): + """Higher tolerance for graph and distributed bc unable to run deterministically.""" + if not tf.executing_eagerly() or tf.distribute.has_strategy(): + rtol, atol = 0.05, 1.00 + # print('graph or dist') + else: + rtol, atol = 0.01, 0.01 + + return np.testing.assert_allclose( + get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol + ) + + +def _assert_training_losses_are_close(model, model_lr, epochs=10): + """Easy way to check if two models train in almost the same way. + Epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, + if the disc training is not working. + """ + hist = _get_train_results(model, verbose=False, epochs=epochs) + hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) + _assert_losses_are_close(hist, hist_lr) + + + +def test_a_initialize_model_weights(): + """This test should run first to initialize the model weights. + There seem to be major issues in initializing model weights on the fly when testing, + so we initialize them and save them to an h5 file and reload them each time. + This ensures that when comparing two runs, they start at the same place. + This is not actually testing anything, so it does not need to run in eager and graph. + This needs to run distributed or else it will cause the cannot modify virtual devices error.""" + toy_cnn() + toy_rnn() + + +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +def test_equal_with_no_layer_lr(model_fn, loss, opt): + """Confirm that discriminative learning is almost the same as regular learning.""" + learning_rate = 0.01 + model = model_fn() + model.compile(loss=loss, optimizer=opt(learning_rate)) + + model_lr = model_fn() + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + _assert_training_losses_are_close(model, model_lr) + class DiscriminativeLearningTest(tf.test.TestCase): def _assert_losses_are_close(self, hist, hist_lr): """Higher tolerance for graph and distributed bc unable to run deterministically.""" @@ -198,15 +249,15 @@ def _assert_training_losses_are_close(self, model, model_lr, epochs=10): hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) self._assert_losses_are_close(hist, hist_lr) - def test_a_initialize_model_weights(self): - """This test should run first to initialize the model weights. - There seem to be major issues in initializing model weights on the fly when testing, - so we initialize them and save them to an h5 file and reload them each time. - This ensures that when comparing two runs, they start at the same place. - This is not actually testing anything, so it does not need to run in eager and graph. - This needs to run distributed or else it will cause the cannot modify virtual devices error.""" - toy_cnn() - toy_rnn() + # def test_a_initialize_model_weights(self): + # """This test should run first to initialize the model weights. + # There seem to be major issues in initializing model weights on the fly when testing, + # so we initialize them and save them to an h5 file and reload them each time. + # This ensures that when comparing two runs, they start at the same place. + # This is not actually testing anything, so it does not need to run in eager and graph. + # This needs to run distributed or else it will cause the cannot modify virtual devices error.""" + # toy_cnn() + # toy_rnn() @test_utils.run_in_graph_and_eager_modes def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): @@ -427,41 +478,9 @@ def _test_config_tofrom(self, model_fn, loss, opt): ) -def wrap_test(func, **kwargs): - """Wrap the test method so that it has pre assigned kwargs.""" - - def test(self): - return func(self, **kwargs) - - return test - - -def generate_tests(): - # Generate tests for each permutation in the zipped permutes. - # This separates tests for each permuatation of model, optimizer, and loss. - for name, func in DiscriminativeLearningTest.__dict__.copy().items(): - if callable(func) and name[:5] == "_test": - for model_fn, loss, opt in _zipped_permutes(): - - # Name the test as test_testname_model_loss_optimizer. - testmethodname = name[1:] + "_%s_%s_%s" % ( - model_fn.__name__, - loss.name, - opt.__name__, - ) - - # Create test functions that use kwargs mentioned above. - testmethod_dist = wrap_test( - func=func, model_fn=model_fn, loss=loss, opt=opt, - ) - - # Set class attributes so we get multiple nicely named tests. - # Also all tests are set to run distributed, so append distributed to the end. - setattr( - DiscriminativeLearningTest, testmethodname, testmethod_dist, - ) - if __name__ == "__main__": - generate_tests() - tf.test.main() + # generate_tests() + # tf.test.main() + + sys.exit(pytest.main([__file__])) \ No newline at end of file From 02e79df74e3f96fa1f0aa4cf8192e9a427d26628 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 23 Mar 2020 11:12:31 -0400 Subject: [PATCH 094/106] converted tests and parameterized --- .../discriminative_layer_training_test.py | 405 ++++++++---------- 1 file changed, 182 insertions(+), 223 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 3f5c0ba3e9..7cc028ce6f 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -15,7 +15,6 @@ """Tests for Discriminative Layer Training Manager for TensorFlow.""" import tensorflow as tf -from tensorflow_addons.utils import test_utils import numpy as np from tensorflow_addons.optimizers.discriminative_layer_training import ( DiscriminativeLayerOptimizer, @@ -26,6 +25,7 @@ import pytest import sys + def toy_cnn(): """Consistently create model with same random weights. Skip head activation to allow both bce with logits and cce with logits. @@ -200,7 +200,6 @@ def _assert_training_losses_are_close(model, model_lr, epochs=10): _assert_losses_are_close(hist, hist_lr) - def test_a_initialize_model_weights(): """This test should run first to initialize the model weights. There seem to be major issues in initializing model weights on the fly when testing, @@ -213,6 +212,7 @@ def test_a_initialize_model_weights(): @pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") def test_equal_with_no_layer_lr(model_fn, loss, opt): """Confirm that discriminative learning is almost the same as regular learning.""" learning_rate = 0.01 @@ -227,260 +227,219 @@ def test_equal_with_no_layer_lr(model_fn, loss, opt): _assert_training_losses_are_close(model, model_lr) -class DiscriminativeLearningTest(tf.test.TestCase): - def _assert_losses_are_close(self, hist, hist_lr): - """Higher tolerance for graph and distributed bc unable to run deterministically.""" - if not tf.executing_eagerly() or tf.distribute.has_strategy(): - rtol, atol = 0.05, 1.00 - # print('graph or dist') - else: - rtol, atol = 0.01, 0.01 - - return self.assertAllClose( - get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol - ) - def _assert_training_losses_are_close(self, model, model_lr, epochs=10): - """Easy way to check if two models train in almost the same way. - Epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, - if the disc training is not working. - """ - hist = _get_train_results(model, verbose=False, epochs=epochs) - hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) - self._assert_losses_are_close(hist, hist_lr) - - # def test_a_initialize_model_weights(self): - # """This test should run first to initialize the model weights. - # There seem to be major issues in initializing model weights on the fly when testing, - # so we initialize them and save them to an h5 file and reload them each time. - # This ensures that when comparing two runs, they start at the same place. - # This is not actually testing anything, so it does not need to run in eager and graph. - # This needs to run distributed or else it will cause the cannot modify virtual devices error.""" - # toy_cnn() - # toy_rnn() - - @test_utils.run_in_graph_and_eager_modes - def _test_equal_with_no_layer_lr(self, model_fn, loss, opt): - """Confirm that discriminative learning is almost the same as regular learning.""" - learning_rate = 0.01 - model = model_fn() - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - self._assert_training_losses_are_close(model, model_lr) - - @test_utils.run_in_graph_and_eager_modes - def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false( - self, model_fn, loss, opt - ): - """Confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. - This also confirms that lr_mult propagates into that layer's trainable variables. - This also confirms that lr_mult does not propagate to the rest of the layers unintentionally. - """ - learning_rate = 0.01 - model = model_fn() - - # Layers 0 represents the pretrained network - model.layers[0].trainable = False - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.0 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - self._assert_training_losses_are_close(model, model_lr) - - @test_utils.run_in_graph_and_eager_modes - def _test_equal_0_layer_lr_to_trainable_false(self, model_fn, loss, opt): - """Confirm 0 lr_mult for the model is the same as model not trainable. - This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. - """ - learning_rate = 0.01 - model = model_fn() - model.trainable = False - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - model_lr.lr_mult = 0.0 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - # Only two epochs because we expect no training to occur, thus losses shouldn't change anyways. - self._assert_training_losses_are_close(model, model_lr, epochs=2) - - @test_utils.run_in_graph_and_eager_modes - def _test_equal_half_layer_lr_to_half_lr_of_opt(self, model_fn, loss, opt): - """Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. - This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. - """ - - mult = 0.5 - learning_rate = 0.01 - model = model_fn() - model.compile(loss=loss, optimizer=opt(learning_rate * mult)) - - model_lr = model_fn() - model_lr.lr_mult = mult - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(model_fn, loss, opt): + """Confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. + This also confirms that lr_mult propagates into that layer's trainable variables. + This also confirms that lr_mult does not propagate to the rest of the layers unintentionally. + """ + learning_rate = 0.01 + model = model_fn() - self._assert_training_losses_are_close(model, model_lr) + # Layers 0 represents the pretrained network + model.layers[0].trainable = False + model.compile(loss=loss, optimizer=opt(learning_rate)) - @test_utils.run_in_graph_and_eager_modes - def _test_sub_layers_keep_lr_mult(self, model_fn, loss, opt): - """Confirm that model trains with lower lr on specific layer, - while a different lr_mult is applied everywhere else. - Also confirms that sub layers with an lr mult do not get overridden. - """ + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.0 + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) - learning_rate = 0.01 - model_lr = model_fn() + _assert_training_losses_are_close(model, model_lr) - # We set model to lrmult 0 and layer one to lrmult 5. - # If layer one is trainable, then the loss should decrease. - model_lr.lr_mult = 0.00 - model_lr.layers[-1].lr_mult = 3 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_equal_0_layer_lr_to_trainable_false(model_fn, loss, opt): + """Confirm 0 lr_mult for the model is the same as model not trainable. + This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. + """ + learning_rate = 0.01 + model = model_fn() + model.trainable = False + model.compile(loss=loss, optimizer=opt(learning_rate)) - loss_values = get_losses(_get_train_results(model_lr, epochs=5)) - self.assertLess(loss_values[-1], loss_values[0]) + model_lr = model_fn() + model_lr.lr_mult = 0.0 + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) - @test_utils.run_in_graph_and_eager_modes - def _test_variables_get_assigned(self, model_fn, loss, opt): - """Confirm that variables do get an lr_mult attribute and that they get the correct one. - """ - learning_rate = 0.01 - model_lr = model_fn() + # Only two epochs because we expect no training to occur, thus losses shouldn't change anyways. + _assert_training_losses_are_close(model, model_lr, epochs=2) - # set lr mults. - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - # We expect trainable vars at 0.3 to be reduced by the amount at 0.1. - # This tests that the 0.3 lr mult does not override the 0.1 lr mult. - self.assertEqual( - len(model_lr.layers[0].trainable_variables) - - len(model_lr.layers[0].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), - ) +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_equal_half_layer_lr_to_half_lr_of_opt(model_fn, loss, opt): + """Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. + This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. + """ - # We expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. - self.assertEqual( - len(model_lr.layers[0].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), - ) + mult = 0.5 + learning_rate = 0.01 + model = model_fn() + model.compile(loss=loss, optimizer=opt(learning_rate * mult)) - # Same logic as above. - self.assertEqual( - len(model_lr.layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), - ) + model_lr = model_fn() + model_lr.lr_mult = mult + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) - @test_utils.run_in_graph_and_eager_modes - def _test_model_checkpoint(self, model_fn, loss, opt): - """Confirm that model does save checkpoints and can load them properly.""" + _assert_training_losses_are_close(model, model_lr) - learning_rate = 0.01 - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_sub_layers_keep_lr_mult(model_fn, loss, opt): + """Confirm that model trains with lower lr on specific layer, + while a different lr_mult is applied everywhere else. + Also confirms that sub layers with an lr mult do not get overridden. + """ - x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(8, 5), dtype=np.float32) - y[:, 0] = 1.0 + learning_rate = 0.01 + model_lr = model_fn() - filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_cp.ckpt") + # We set model to lrmult 0 and layer one to lrmult 5. + # If layer one is trainable, then the loss should decrease. + model_lr.lr_mult = 0.00 + model_lr.layers[-1].lr_mult = 3 - callbacks = [ - tf.keras.callbacks.ModelCheckpoint( - filepath=filepath, save_weights_only=True, verbose=1 - ) - ] - - model_lr.fit( - x, - y, - epochs=2, - batch_size=4, - verbose=False, - shuffle=False, - callbacks=callbacks, - ) + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) - # If this doesn't error out, then loading and checkpointing should be fine. - model_lr.load_weights(filepath=filepath) + loss_values = get_losses(_get_train_results(model_lr, epochs=5)) + np.testing.assert_array_less([loss_values[-1]], [loss_values[0]]) - def _test_config_tofrom(self, model_fn, loss, opt): - """Confirm that optimizer saves config and loads config.""" - # build model and save the opt to a config as c. - learning_rate = 0.01 - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_variables_get_assigned(model_fn, loss, opt): + """Confirm that variables do get an lr_mult attribute and that they get the correct one. + """ + learning_rate = 0.01 + model_lr = model_fn() - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) + # set lr mults. + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + # We expect trainable vars at 0.3 to be reduced by the amount at 0.1. + # This tests that the 0.3 lr mult does not override the 0.1 lr mult. + np.testing.assert_equal( + len(model_lr.layers[0].trainable_variables) + - len(model_lr.layers[0].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), + ) - c = d_opt.get_config() + # We expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. + np.testing.assert_equal( + len(model_lr.layers[0].layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), + ) - # reconstruct the model and then build the opt from config. + # Same logic as above. + np.testing.assert_equal( + len(model_lr.layers[-1].trainable_variables), + len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), + ) - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - d_opt_from_config = DiscriminativeLayerOptimizer.from_config(c, model_lr) - model_lr.compile(loss=loss, optimizer=d_opt_from_config) +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_model_checkpoint(model_fn, loss, opt): + """Confirm that model does save checkpoints and can load them properly.""" - # we expect both optimizers to have the same optimizer group and base optimizer. - self.assertAllEqual( - len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group) - ) - self.assertAllEqual(d_opt.opt_class, d_opt_from_config.opt_class) + learning_rate = 0.01 + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) + y = np.zeros(shape=(8, 5), dtype=np.float32) + y[:, 0] = 1.0 - # we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult. - self.assertAllEqual( - [opt.learning_rate for opt in d_opt.optimizer_group], - [opt.learning_rate for opt in d_opt_from_config.optimizer_group], + filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_cp.ckpt") + + callbacks = [ + tf.keras.callbacks.ModelCheckpoint( + filepath=filepath, save_weights_only=True, verbose=1 ) + ] + + model_lr.fit( + x, y, epochs=2, batch_size=4, verbose=False, shuffle=False, callbacks=callbacks, + ) + + # If this doesn't error out, then loading and checkpointing should be fine. + model_lr.load_weights(filepath=filepath) + + +@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def _test_config_tofrom(model_fn, loss, opt): + """Confirm that optimizer saves config and loads config.""" + + # build model and save the opt to a config as c. + learning_rate = 0.01 + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + d_opt = DiscriminativeLayerOptimizer( + opt, model_lr, verbose=False, learning_rate=learning_rate + ) + model_lr.compile(loss=loss, optimizer=d_opt) + + c = d_opt.get_config() + + # reconstruct the model and then build the opt from config. + + model_lr = model_fn() + model_lr.layers[0].lr_mult = 0.3 + model_lr.layers[0].layers[-1].lr_mult = 0.1 + model_lr.layers[-1].lr_mult = 0.5 + + d_opt_from_config = DiscriminativeLayerOptimizer.from_config(c, model_lr) + model_lr.compile(loss=loss, optimizer=d_opt_from_config) + + # we expect both optimizers to have the same optimizer group and base optimizer. + np.testing.assert_equal( + len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group) + ) + np.testing.assert_equal(d_opt.opt_class, d_opt_from_config.opt_class) + + # we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult. + np.testing.assert_array_equal( + [opt.learning_rate for opt in d_opt.optimizer_group], + [opt.learning_rate for opt in d_opt_from_config.optimizer_group], + ) if __name__ == "__main__": # generate_tests() # tf.test.main() - sys.exit(pytest.main([__file__])) \ No newline at end of file + sys.exit(pytest.main([__file__])) From dea5198ef67e2377892f206d7301a90adba163d5 Mon Sep 17 00:00:00 2001 From: Hongya Date: Mon, 23 Mar 2020 11:12:53 -0400 Subject: [PATCH 095/106] cleaned up code --- .../optimizers/discriminative_layer_training_test.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py index 7cc028ce6f..dc7643de84 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training_test.py @@ -439,7 +439,4 @@ def _test_config_tofrom(model_fn, loss, opt): if __name__ == "__main__": - # generate_tests() - # tf.test.main() - sys.exit(pytest.main([__file__])) From 1f951e5acd5090ccecb571681731f1356ebf818f Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 31 Mar 2020 14:53:40 -0400 Subject: [PATCH 096/106] added additional checks and doc string for changes in lr multiplier during training. --- .../discriminative_layer_training.py | 32 ++++++++++++++++--- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index acb572aa1a..cb2cd557af 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -196,6 +196,9 @@ def __init__( differ significantly from a standard optimizer because it is a wrapper for multiple optimizers each with their own learning rate, hyper parameters, and slots. + This optimizer does not support learning rate schedules or changes to the learning rate multiplier + during the training process. + Example usage model = tf.keras.Sequential() model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) @@ -233,7 +236,7 @@ def __init__( base_optimizer, tf.keras.optimizers.Optimizer ), "Base optimizer must be a class that inherits from tf.keras.optimizers.Optimizer" - # assume that users will follow the general guidelines and init thier opts within a dist scope. + # assume that users will follow the general guidelines and init their opts within a dist scope. if tf.distribute.has_strategy(): logging.warning( """The discriminative layer optimizer may not behave as expected @@ -250,11 +253,16 @@ def __init__( self.kwargs = kwargs # Find unique lr_mult. - variable_groups = {var.lr_mult: None for var in model.trainable_variables} + unique_lr_mults = set([var.lr_mult for var in model.trainable_variables]) + + # Store variables into their variable groups to doublecheck that lr mults for variables don't change. + self.variable_groups = {lr_mult: [] for lr_mult in unique_lr_mults} + for var in model.trainable_variables: + self.variable_groups[var.lr_mult].append(var) self.optimizer_group = [] - for lr_mult in variable_groups.keys(): + for lr_mult in unique_lr_mults: opt = self.opt_class(learning_rate=learning_rate * lr_mult, **kwargs) opt.lr_mult = lr_mult self.optimizer_group.append(opt) @@ -272,7 +280,23 @@ def apply_gradients(self, grads_and_vars, name=None): # Load the gradvars into the appropriate bucket. for grad, var in tuple(grads_and_vars): - gvdict[var.lr_mult].append((grad, var)) + try: + gvdict[var.lr_mult].append((grad, var)) + except KeyError: + logging.error( + "Variable named %s has lr multiplier %f, which does not exist in the lr multipliers when the optimizer wrapper was initialized." + % (var.name, var.lr_mult) + ) + + # Doublecheck that each variable group has the same number of variables. + # While we could directly check every variable, the documentation states not to change lr mults. + # Checking each variable independently may add too much overhead and each update step. + + for lr_mult in self.variable_groups.keys(): + assert len(self.variable_groups[lr_mult]) == len(gvdict[lr_mult]), ( + "Mismatch in lr multipliers for variables. Expected %i variables for lr multiplier %f, but got %i" + % (len(self.variable_groups[lr_mult]), lr_mult, (gvdict[lr_mult])) + ) # Return results from each opt. # In eager mode, this will return a list of irrelevant results for each optimizer. From c903d275b4b6a73582a4a4299539883497f424da Mon Sep 17 00:00:00 2001 From: Hongya Date: Tue, 21 Apr 2020 15:18:23 -0400 Subject: [PATCH 097/106] changed comment --- tensorflow_addons/optimizers/discriminative_layer_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index cb2cd557af..31e47e5c92 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Discriminative Layer Training Manager for TensorFlow.""" +"""Discriminative Layer Training Optimizer for TensorFlow.""" import tensorflow as tf import numpy as np From cd01a2b7ec488bd20760e5253f2dbc941cdc3af6 Mon Sep 17 00:00:00 2001 From: HongM Date: Wed, 9 Sep 2020 14:06:09 -0400 Subject: [PATCH 098/106] Simplified discriminative layer training by using a multi optimizer wrapper class. Removed old tests and added new tests conforming to pytest standard. --- .../discriminative_layer_training.py | 381 ++++----------- .../discriminative_layer_training_test.py | 442 ------------------ .../discriminative_layer_training_test.py | 94 ++++ 3 files changed, 178 insertions(+), 739 deletions(-) delete mode 100644 tensorflow_addons/optimizers/discriminative_layer_training_test.py create mode 100644 tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 31e47e5c92..56317e0b8c 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -15,348 +15,135 @@ """Discriminative Layer Training Optimizer for TensorFlow.""" import tensorflow as tf -import numpy as np from typeguard import typechecked -import logging +from typing import Union +from tensorflow.python.keras.optimizer_v2 import optimizer_v2 +# python -m flake8 tensorflow_addons/optimizers/discriminative_layer_training.py +# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py -class DiscriminativeModelManager: - """Class for grouping functions related to model lr_mult management.""" - @staticmethod - def _get_layers(layer): - """Helper method to access a layer's sublayers as a list or return an empty list. - """ - return getattr(layer, "layers", None) - - @staticmethod - def _get_lr_mult(layer): - """Helper method to access a layer's learning rate multiplier, which defaults to 1 if lr mult is not set. - """ - return getattr(layer, "lr_mult", 1.0) - - @staticmethod - def _assign_lr_mult(layer, lr_mult): - """Helper method to assign a layer's learning rate multiplier, which does nothing if lr mult is already set. - """ - if not hasattr(layer, "lr_mult"): - layer.lr_mult = lr_mult # Since layer has no lr mult, assign the mult. - # This method should be called after the user has already assigned some lr mults - # to some layers. We just don't want to override any lr mults they assigned. - else: - # We pass here because of propagation to nested layers. - # Users should be able to speficy model.layers[0].layers[0].lr_mult = 0.01 - # and model.layers[0].lr_mult = 0.1, such that the model.layers[0].layers[0] - # keeps its assigned lr mult of 0.01. - pass - - @staticmethod - def _recursively_assign_sublayer_lr_mult(layer): - """Helper method iterate through all nested layers of an object that behaves like a layer or model. - By default, we want to propagate the lr mult to the lower layers. - Note that this function always returns a list of the lowest sublayers. - - https://stackoverflow.com/questions/6340351/iterating-through-list-of-list-in-python - """ +class FakeVar: + def __init__(self, name): + # probably can be refactored out + self.name = name - mult = DiscriminativeModelManager._get_lr_mult(layer) - layers = DiscriminativeModelManager._get_layers(layer) - - if layers is not None: - for sublayer in layers: - # We always assign the lr mult to the sublayers of the current layer. - # The assign method will avoid overwritting lr mults. - # So, if you have a resnet and you specifically assign the first resnet layer - # to have lr_mult of 0.01 and the resnet model to have lr_mult of 0.1, all - # resnet layers except the first should get lr_mult of 0.1 and the first - # keeps its lr_mult of 0.01. - DiscriminativeModelManager._assign_lr_mult(sublayer, mult) - - # Recursively iterate through the nested layers. - for ( - nested_sublayer - ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( - sublayer - ): - yield nested_sublayer - else: - yield layer - - @staticmethod - def _apply_lr_mult_to_var(layer): - """Helper method to apply the lr mult to the trainable variables of a layer. - This is necessary because the optimizer does not receive layers during optimization and only receives - variable objects. The lr mult attribute on the variable allows the disc optimizer to send the variable to - the correct learning rate. - """ - lr_mult = DiscriminativeModelManager._get_lr_mult(layer) - for var in layer.trainable_variables: - var.lr_mult = lr_mult - # The lr_mult behaves as a hyper parameter and not a variable. it will not be a tensor. - # There's no benefit in setting the lr_mult as a variable because it does not interact with tensors. - - @staticmethod - def _check_for_lr_mult(layer, verbose=True, propagate=True): - """Identify which layers have an lr mult not equal to 1. - """ - - layers_with_lr_mult = [] - for ( - sub_layer - ) in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult(layer): - lr_mult = DiscriminativeModelManager._get_lr_mult(sub_layer) - if lr_mult != 1.0: - layers_with_lr_mult.append(sub_layer) - if verbose: - logging.info("layer %s lr_mult : %f" % (sub_layer.name, lr_mult)) - - return layers_with_lr_mult - - @staticmethod - def _compute_params(var_list): - """Helps compute params to provide a summary that aligns with model.summary(). - """ - return np.sum([np.prod(list(var.shape)) for var in var_list]) - - @staticmethod - def _prepare_model(model, verbose=True): - """Prepares a model for disc training. - """ - - layers_with_lr_mult = DiscriminativeModelManager._check_for_lr_mult( - model, verbose=verbose - ) - if len(layers_with_lr_mult) == 0: - logging.warning( - """No Layer has been assigned an lr_mult attribute != 1.0 - Discriminative Layer Training will apply the same learning rate to all layers - It will perform as if you did not use Discriminative Layer Training - """ - ) - - # Lr mult assignment occurs in two steps to ensure propagation occurs correctly. - # In this example, given a model with layers : variables similar to { L1 : V1 , L2 : {L3 : V3, L4 : V4 ,} ,}, - # L2 represents a nested layer (usually a tf.keras.Model) and does not directly own any variables. - # If the user assigns L2 an lr mult x, x is propaged to L3 and L4 and then V3 and V4 is assigned lr mult of x. - # If the user assigned l2 lr mult x and L3 lr mult y, then lr mult x is propaged to L4 - # while L3 keeps its lr mult of y. Finally, the variables are assigned by x to V4 and y to V3. - # This two step method ensures that each variable is assigned an lr mult exactly 1 time. - - for layer in DiscriminativeModelManager._recursively_assign_sublayer_lr_mult( - model - ): - DiscriminativeModelManager._apply_lr_mult_to_var(layer) - - vars_with_lr_mult = [ - var for var in model.trainable_variables if var.lr_mult != 1.0 - ] - - if verbose: - logging.info( - "%i params of %i will learn at a different rate" - % ( - DiscriminativeModelManager._compute_params(vars_with_lr_mult), - DiscriminativeModelManager._compute_params( - model.trainable_variables - ), - ) - ) - - -class DiscriminativeLayerOptimizer(tf.keras.optimizers.Optimizer): +@tf.keras.utils.register_keras_serializable(package="Addons") +class MultiOpt(optimizer_v2.OptimizerV2): @typechecked def __init__( self, - base_optimizer: tf.keras.optimizers.Optimizer.__class__, - model: tf.keras.Model, - learning_rate: float, - verbose: bool = True, - name: str = "discrim_opt", - *args, + optimizer_layer_pairs: Union[list, None], + optimizer_specs: Union[list, None], + name: str = "MultiOpt", **kwargs ): - """Discriminative Layer Training Wrapper. - - Discriminative layer training is a technique that applies different learning rates to - different layers in a model. Generally, a lower learning rate is applied to the - layers closest to the input and a higher learning rate is applied to layers closer - to the output. This method helps in transfer learning by quickly calibrating the head - of a model while preserving the useful weights in the main part of the model. - - You should assign the lr_mult attribute to a layer. This will multiply the learning rate - used by the base optimizer for that layer. - This method creates a copy of the base optimizer for each unique learning rate multipler. + """ - Performance is similar to using a single copy of the base optimizer as gradients are computed - only once and then passed on. + Creates a wrapper around a set of instantiated optimizer layer pairs. - Currently, this optimizer does not preserve optimizer state. Its state preservation methods will - differ significantly from a standard optimizer because it is a wrapper for multiple optimizers each with - their own learning rate, hyper parameters, and slots. + Each optimizer will optimize only the weights associated with its paired layer. This can be used + to implement discriminative layer training by assigning different learning rates to each optimizer + layer pair. (Optimizer, list(Layers)) pairs are also supported. - This optimizer does not support learning rate schedules or changes to the learning rate multiplier - during the training process. + Currently, MultiOpt does not support callbacks that modify optimizers. However, you can instantiate + optimizer layer pairs with tf.keras.optimizers.schedules.LearningRateSchedule instead of a static learning + rate. - Example usage - model = tf.keras.Sequential() - model.add(tf.keras.applications.resnet.ResNet50(include_top = False, pooling = 'avg')) - model.add(tf.keras.layers.Dense(1, activation = 'sigmoid')) - model.layers[0].lr_mult = 0.01 - opt = DiscriminativeWrapper(tf.keras.optimizers.Adam, model, learning_rate = 0.01) - model.compile(loss = tf.keras.losses.BinaryCrossentropy, optimizer = opt) - model.fit(x, y) + This code should function on CPU, GPU, and TPU. - Arguments - base_optimizer: A class that inherits from tf.keras.optimizers.Optimizer. Do not - pass an instance of the class. + Example: - model: tf.keras.Model, The model to be used for discriminative learning. - It should have at least 1 layer with the attribute lr_mult. The lr_mult should - be set to a value not equal to 1. Otherwise, you will have the exact same - result as not using discriminative learning. + ```python - learning_rate: float, the learning rate for the model + model = get_model() - verbose: Bool, to generate a report on how many parameters are affected + opt1 = tf.keras.optimizers.Adam(learning_rate=1e-4) + opt2 = tf.keras.optimizers.Adam(learning_rate=1e-2) - *args: Args to pass to the base optimizer + opt_layer_pairs = [(opt1, model.layers[0]), + (opt2, model.layers[1:])] - **kwargs: Kwargs to pass to the base optimizer + loss = tf.keras.losses.MSE + optimizer = MultiOpt(opt_layer_pairs) - Returns - Optimizer - A keras optimizer to use with model.compile + model.compile(optimizer=optimizer, loss = loss) - References - - [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/pdf/1801.06146.pdf) - """ + model.fit(x,y) - assert issubclass( - base_optimizer, tf.keras.optimizers.Optimizer - ), "Base optimizer must be a class that inherits from tf.keras.optimizers.Optimizer" + ``` - # assume that users will follow the general guidelines and init their opts within a dist scope. - if tf.distribute.has_strategy(): - logging.warning( - """The discriminative layer optimizer may not behave as expected - when using a distribution strategy. - """ - ) - super().__init__(lr=learning_rate, name=name, *args, **kwargs) + """ - DiscriminativeModelManager._prepare_model(model, verbose=verbose) + super(MultiOpt, self).__init__(name, **kwargs) - self.opt_class = base_optimizer - self.learning_rate = learning_rate - self.kwargs = kwargs + if any(optimizer_layer_pairs) and not any(optimizer_specs): + self.optimizer_specs = [ + self.create_optimizer_spec(opt, layer) + for opt, layer in optimizer_layer_pairs + ] - # Find unique lr_mult. - unique_lr_mults = set([var.lr_mult for var in model.trainable_variables]) + elif any(optimizer_specs): + self.optimizer_specs = optimizer_specs - # Store variables into their variable groups to doublecheck that lr mults for variables don't change. - self.variable_groups = {lr_mult: [] for lr_mult in unique_lr_mults} - for var in model.trainable_variables: - self.variable_groups[var.lr_mult].append(var) + else: + raise RuntimeError( + "You must specify either an list of optimizer_layer_pairs or a list of optimizer_specs" + ) - self.optimizer_group = [] + self.initialized_optimizer_specs = [ + self.initialize_from_optimizer_spec(spec) for spec in self.optimizer_specs + ] - for lr_mult in unique_lr_mults: - opt = self.opt_class(learning_rate=learning_rate * lr_mult, **kwargs) - opt.lr_mult = lr_mult - self.optimizer_group.append(opt) + self.lr = self.initialized_optimizer_specs[0]["optimizer"].lr - def apply_gradients(self, grads_and_vars, name=None): - """Allocates gradients to each optimizer based on the variable's learning rate multiplier - then applies the gradients. In graph mode, it returns 1 operation per optimizer. - Please use the model.fit method instead of accessing this directly. + def apply_gradients( + self, grads_and_vars, name=None, experimental_aggregate_gradients=True + ): + """ + Wrapped Gradient Apply method. Returns a list of tf ops to be executed. """ - # Create gradvar buckets for each opt. - gvdict = {} - for opt in self.optimizer_group: - gvdict[opt.lr_mult] = [] + for spec in self.optimizer_specs: + spec["gv"] = [] - # Load the gradvars into the appropriate bucket. for grad, var in tuple(grads_and_vars): - try: - gvdict[var.lr_mult].append((grad, var)) - except KeyError: - logging.error( - "Variable named %s has lr multiplier %f, which does not exist in the lr multipliers when the optimizer wrapper was initialized." - % (var.name, var.lr_mult) - ) - - # Doublecheck that each variable group has the same number of variables. - # While we could directly check every variable, the documentation states not to change lr mults. - # Checking each variable independently may add too much overhead and each update step. - - for lr_mult in self.variable_groups.keys(): - assert len(self.variable_groups[lr_mult]) == len(gvdict[lr_mult]), ( - "Mismatch in lr multipliers for variables. Expected %i variables for lr multiplier %f, but got %i" - % (len(self.variable_groups[lr_mult]), lr_mult, (gvdict[lr_mult])) - ) + for spec in self.optimizer_specs: + for weight in spec["weights"]: + if var.name == weight.name: + spec["gv"].append((grad, var)) - # Return results from each opt. - # In eager mode, this will return a list of irrelevant results for each optimizer. - # In eager mode, the function apply_gradients actually applies gradients to the model. - # In graph mode, this will return a list of tensor ops for each opt. - # In graph mode, apply_gradients creates the tensor ops for applying gradients on the graph. return [ - opt.apply_gradients(tuple(gvdict[opt.lr_mult])) - for opt in self.optimizer_group + spec["optimizer"].apply_gradients(spec["gv"]) + for spec in self.optimizer_specs ] def get_config(self): - """Returns the config of the optimizer. - - An optimizer config is a Python dictionary (serializable) - containing the configuration of an optimizer. - The same optimizer can be reinstantiated later - (without any saved state) from this configuration. - - Please note that this optimizer requires a model for instantiation or calling the from_config class method. - - Returns: - Python dictionary. - """ - - logging.warning( - """Discriminative Training Optimzer depends on its attached model. - It will behave differently on the same model if the lr mult attributes are not set in the same way. - """ - ) - config = super().get_config() - config["base_optimizer"] = self.opt_class - config["learning_rate"] = self.learning_rate - - for key, value in self.kwargs: - config[key] = value - + config = super(MultiOpt, self).get_config() + config.update({"optimizer_specs": self.optimizer_specs}) return config @classmethod - def from_config(cls, config, model): - """Creates an optimizer from its config. - This method is the reverse of `get_config`, - capable of instantiating the same optimizer from the config - dictionary. - - Please note that this optimizer requires a model for instantiation or calling the from_config class method. - - Arguments: - config: A Python dictionary, typically the output of get_config. - model: An instance of tf.keras.Model. - - Returns: - An optimizer instance. - """ - - logging.warning( - """Discriminative Training Optimzer depends on its attached model. - It will behave differently on the same model if the lr mult attributes are not set in the same way. - """ + def initialize_from_optimizer_spec(cls, optimizer_spec): + optimizer_spec["optimizer"] = optimizer_spec["optimizer_class"].from_config( + optimizer_spec["optimizer_config"] ) + return optimizer_spec + + @classmethod + def create_optimizer_spec(cls, optimizer_instance, layer): + if type(layer) == list: + weights = [ + FakeVar(var.name) for sublayer in layer for var in sublayer.weights + ] + else: + weights = [FakeVar(var.name) for var in layer.weights] - return cls(**config, model=model) + return { + "optimizer_class": optimizer_instance.__class__, + "optimizer_config": optimizer_instance.get_config(), + "weights": weights, + } diff --git a/tensorflow_addons/optimizers/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/discriminative_layer_training_test.py deleted file mode 100644 index dc7643de84..0000000000 --- a/tensorflow_addons/optimizers/discriminative_layer_training_test.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for Discriminative Layer Training Manager for TensorFlow.""" - -import tensorflow as tf -import numpy as np -from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeLayerOptimizer, -) -import itertools -import os -import tempfile -import pytest -import sys - - -def toy_cnn(): - """Consistently create model with same random weights. - Skip head activation to allow both bce with logits and cce with logits. - - The model returned by this function should have identical weights to all - other models returned by this function, for the duration of that - continuous integration run. - - Run this function within a test, but make sure it runs before other tests. - - Model is intended to work with - x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) - y = np.zeros(shape = (None, 5), dtype = np.float32) - y[:, 0] = 1. - """ - - cnn_model_path = os.path.join(tempfile.gettempdir(), "cnn.h5") - - if not os.path.exists(cnn_model_path): - bignet = tf.keras.applications.mobilenet_v2.MobileNetV2( - include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg" - ) - - # Take the first few layers so we cover BN, Conv, Pooling ops for testing. - net = tf.keras.models.Model( - inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output - ) - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(32, 32, 3)), - net, - tf.keras.layers.GlobalAveragePooling2D(), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(5, name="head"), - ] - ) - - model.save(cnn_model_path) - # This creates a model with set weights for testing purposes. - # Most tests will assert equivalency between a model with discriminative training and a model without. - return tf.keras.models.load_model(cnn_model_path) - else: - assert os.path.exists((cnn_model_path)), ( - "Could not find h5 file at path %s " % cnn_model_path - ) - # Load the variable initialized model from the disk. - return tf.keras.models.load_model(cnn_model_path) - - -def toy_rnn(): - """Consistently create model with same random weights. - Skip head activation to allow both bce with logits and cce with logits. - - The model returned by this function should have identical weights to all - other models returned by this function, for the duration of that - continuous integration run. - - Run this function within a test, but make sure it runs before other tests. - - Model is intended to work with - x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32) - y = np.zeros(shape = (None, 5), dtype = np.float32) - y[:, 0] = 1. - """ - rnn_model_path = os.path.join(tempfile.gettempdir(), "rnn.h5") - - if not os.path.exists(rnn_model_path): - - # Pretend this net is a pretrained lstm of some sort. - net = tf.keras.Sequential() - - # Crop the input shape so the lstm runs faster. - # Pretrained need inputshape for weights to be initialized. - net.add( - tf.keras.layers.Cropping2D( - cropping=((8, 8), (12, 12)), input_shape=(32, 32, 3) - ) - ) - - # Reshape into a timeseries. - net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3))) - - # Reduce the length of the time series. - net.add(tf.keras.layers.Cropping1D(cropping=(0, 5))) - - # We are primarily interested in the bidir lstm layer and its behavior. - net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(4))) - - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(32, 32, 3)), - net, - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(5, name="head"), - ] - ) - - model.save(rnn_model_path) - # This creates a model with set weights for testing purposes. - # Most tests will assert equivalency between a model with discriminative training and a model without. - return tf.keras.models.load_model(rnn_model_path) - - else: - assert os.path.exists((rnn_model_path)), ( - "Could not find h5 file at path %s " % rnn_model_path - ) - # Load the variable initialized model from the disk - return tf.keras.models.load_model(rnn_model_path) - - -def _get_train_results(model, verbose=False, epochs=10): - """Run a training loop and return the results for analysis. - Model must be compiled first. - Training data sizes reduced. - """ - tf.random.set_seed(1) - x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(8, 5), dtype=np.float32) - y[:, 0] = 1.0 - - return model.fit(x, y, epochs=epochs, batch_size=4, verbose=verbose, shuffle=False) - - -def _zipped_permutes(): - model_fns = [ - # Generally, we want to test that common layers function correctly with discriminative layer training. - # Dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types. - # We also assume that if it works for conv2d, it should work for conv3d by extension. - # Apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use? - toy_cnn, - toy_rnn, - ] - losses = [ - # Additional loss types do not need to be tested. - # This is because losses affect the gradient tape, which is computed before - # the apply_gradients step. This means that the some gradient value is passed on to each opt - # and the gradient calculation is unaffected by which optimizer you are using. - tf.keras.losses.CategoricalCrossentropy(from_logits=True), - ] - optimzers = [ - # Additional optimizers can be added for testing. - # However, testing adam should cover most optimizer behaviours because it uses momentum. - tf.keras.optimizers.Adam, - ] - return list(itertools.product(model_fns, losses, optimzers)) - - -def get_losses(hist): - return np.array(hist.__dict__["history"]["loss"]) - - -def _assert_losses_are_close(hist, hist_lr): - """Higher tolerance for graph and distributed bc unable to run deterministically.""" - if not tf.executing_eagerly() or tf.distribute.has_strategy(): - rtol, atol = 0.05, 1.00 - # print('graph or dist') - else: - rtol, atol = 0.01, 0.01 - - return np.testing.assert_allclose( - get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol - ) - - -def _assert_training_losses_are_close(model, model_lr, epochs=10): - """Easy way to check if two models train in almost the same way. - Epochs set to 10 by default to allow momentum methods to pick up momentum and diverge, - if the disc training is not working. - """ - hist = _get_train_results(model, verbose=False, epochs=epochs) - hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs) - _assert_losses_are_close(hist, hist_lr) - - -def test_a_initialize_model_weights(): - """This test should run first to initialize the model weights. - There seem to be major issues in initializing model weights on the fly when testing, - so we initialize them and save them to an h5 file and reload them each time. - This ensures that when comparing two runs, they start at the same place. - This is not actually testing anything, so it does not need to run in eager and graph. - This needs to run distributed or else it will cause the cannot modify virtual devices error.""" - toy_cnn() - toy_rnn() - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def test_equal_with_no_layer_lr(model_fn, loss, opt): - """Confirm that discriminative learning is almost the same as regular learning.""" - learning_rate = 0.01 - model = model_fn() - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - _assert_training_losses_are_close(model, model_lr) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(model_fn, loss, opt): - """Confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable. - This also confirms that lr_mult propagates into that layer's trainable variables. - This also confirms that lr_mult does not propagate to the rest of the layers unintentionally. - """ - learning_rate = 0.01 - model = model_fn() - - # Layers 0 represents the pretrained network - model.layers[0].trainable = False - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.0 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - _assert_training_losses_are_close(model, model_lr) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_equal_0_layer_lr_to_trainable_false(model_fn, loss, opt): - """Confirm 0 lr_mult for the model is the same as model not trainable. - This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. - """ - learning_rate = 0.01 - model = model_fn() - model.trainable = False - model.compile(loss=loss, optimizer=opt(learning_rate)) - - model_lr = model_fn() - model_lr.lr_mult = 0.0 - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - # Only two epochs because we expect no training to occur, thus losses shouldn't change anyways. - _assert_training_losses_are_close(model, model_lr, epochs=2) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_equal_half_layer_lr_to_half_lr_of_opt(model_fn, loss, opt): - """Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr. - This also confirms that lr_mult on the model level is propagated to all sublayers and their variables. - """ - - mult = 0.5 - learning_rate = 0.01 - model = model_fn() - model.compile(loss=loss, optimizer=opt(learning_rate * mult)) - - model_lr = model_fn() - model_lr.lr_mult = mult - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - _assert_training_losses_are_close(model, model_lr) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_sub_layers_keep_lr_mult(model_fn, loss, opt): - """Confirm that model trains with lower lr on specific layer, - while a different lr_mult is applied everywhere else. - Also confirms that sub layers with an lr mult do not get overridden. - """ - - learning_rate = 0.01 - model_lr = model_fn() - - # We set model to lrmult 0 and layer one to lrmult 5. - # If layer one is trainable, then the loss should decrease. - model_lr.lr_mult = 0.00 - model_lr.layers[-1].lr_mult = 3 - - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - loss_values = get_losses(_get_train_results(model_lr, epochs=5)) - np.testing.assert_array_less([loss_values[-1]], [loss_values[0]]) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_variables_get_assigned(model_fn, loss, opt): - """Confirm that variables do get an lr_mult attribute and that they get the correct one. - """ - learning_rate = 0.01 - model_lr = model_fn() - - # set lr mults. - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - # We expect trainable vars at 0.3 to be reduced by the amount at 0.1. - # This tests that the 0.3 lr mult does not override the 0.1 lr mult. - np.testing.assert_equal( - len(model_lr.layers[0].trainable_variables) - - len(model_lr.layers[0].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]), - ) - - # We expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer. - np.testing.assert_equal( - len(model_lr.layers[0].layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]), - ) - - # Same logic as above. - np.testing.assert_equal( - len(model_lr.layers[-1].trainable_variables), - len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]), - ) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_model_checkpoint(model_fn, loss, opt): - """Confirm that model does save checkpoints and can load them properly.""" - - learning_rate = 0.01 - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32) - y = np.zeros(shape=(8, 5), dtype=np.float32) - y[:, 0] = 1.0 - - filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_cp.ckpt") - - callbacks = [ - tf.keras.callbacks.ModelCheckpoint( - filepath=filepath, save_weights_only=True, verbose=1 - ) - ] - - model_lr.fit( - x, y, epochs=2, batch_size=4, verbose=False, shuffle=False, callbacks=callbacks, - ) - - # If this doesn't error out, then loading and checkpointing should be fine. - model_lr.load_weights(filepath=filepath) - - -@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes()) -@pytest.mark.usefixtures("maybe_run_functions_eagerly") -def _test_config_tofrom(model_fn, loss, opt): - """Confirm that optimizer saves config and loads config.""" - - # build model and save the opt to a config as c. - learning_rate = 0.01 - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - - d_opt = DiscriminativeLayerOptimizer( - opt, model_lr, verbose=False, learning_rate=learning_rate - ) - model_lr.compile(loss=loss, optimizer=d_opt) - - c = d_opt.get_config() - - # reconstruct the model and then build the opt from config. - - model_lr = model_fn() - model_lr.layers[0].lr_mult = 0.3 - model_lr.layers[0].layers[-1].lr_mult = 0.1 - model_lr.layers[-1].lr_mult = 0.5 - - d_opt_from_config = DiscriminativeLayerOptimizer.from_config(c, model_lr) - model_lr.compile(loss=loss, optimizer=d_opt_from_config) - - # we expect both optimizers to have the same optimizer group and base optimizer. - np.testing.assert_equal( - len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group) - ) - np.testing.assert_equal(d_opt.opt_class, d_opt_from_config.opt_class) - - # we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult. - np.testing.assert_array_equal( - [opt.learning_rate for opt in d_opt.optimizer_group], - [opt.learning_rate for opt in d_opt_from_config.optimizer_group], - ) - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py new file mode 100644 index 0000000000..7de48ef9bd --- /dev/null +++ b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py @@ -0,0 +1,94 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Discriminative Layer Training Optimizer for TensorFlow""" + +import pytest +import numpy as np + +from tensorflow_addons.optimizers.discriminative_layer_training import MultiOpt +import tensorflow as tf +from tensorflow_addons.utils import test_utils + +# python -m flake8 tensorflow_addons/optimizers/discriminative_layer_training.py +# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py + + +def _dtypes_to_test(use_gpu): + # Based on issue #347 in the following link, + # "https://github.com/tensorflow/addons/issues/347" + # tf.half is not registered for 'ResourceScatterUpdate' OpKernel + # for 'GPU' devices. + # So we have to remove tf.half when testing with gpu. + # The function "_DtypesToTest" is from + # "https://github.com/tensorflow/tensorflow/blob/5d4a6cee737a1dc6c20172a1dc1 + # 5df10def2df72/tensorflow/python/kernel_tests/conv_ops_3d_test.py#L53-L62" + if use_gpu: + return [tf.float32, tf.float64] + else: + return [tf.half, tf.float32, tf.float64] + +def _dtypes_with_checking_system(use_gpu, system): + # Based on issue #36764 in the following link, + # "https://github.com/tensorflow/tensorflow/issues/36764" + # tf.half is not registered for tf.linalg.svd function on Windows + # CPU version. + # So we have to remove tf.half when testing with Windows CPU version. + if system == "Windows": + return [tf.float32, tf.float64] + else: + return _dtypes_to_test(use_gpu) + + +@pytest.mark.with_device(["cpu", "gpu"]) +@pytest.mark.parametrize("dtype", [tf.float16, tf.float32, tf.float64]) +def test_fit_layer_optimizer(dtype, device): + # Test ensures that each optimizer is only optimizing its own layer with its learning rate + + if "gpu" in device and dtype == tf.float16: + pytest.xfail("See https://github.com/tensorflow/addons/issues/347") + + model = tf.keras.Sequential([tf.keras.Input(shape = [1]), + tf.keras.layers.Dense(1), + tf.keras.layers.Dense(1) + ]) + + x = np.array(np.ones([100])) + y = np.array(np.ones([100])) + + weights_before_train = (model.layers[0].weights[0].numpy(), + model.layers[1].weights[0].numpy()) + + opt1 = tf.keras.optimizers.Adam(learning_rate=1e-3) + opt2 = tf.keras.optimizers.SGD(learning_rate=0) + + opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1])] + + loss = tf.keras.losses.MSE + optimizer = MultiOpt(opt_layer_pairs) + + model.compile(optimizer=optimizer, + loss = loss) + + model.fit(x, y, batch_size=8, epochs=10) + + weights_after_train = (model.layers[0].weights[0].numpy(), + model.layers[1].weights[0].numpy()) + + with np.testing.assert_raises(AssertionError): + # expect weights to be different for layer 1 + test_utils.assert_allclose_according_to_type(weights_before_train[0], weights_after_train[0]) + + # expect weights to be same for layer 2 + test_utils.assert_allclose_according_to_type(weights_before_train[1], weights_after_train[1]) From 6f82e8ffe805592e72d0fb7961c952a33c5a5374 Mon Sep 17 00:00:00 2001 From: HongM Date: Wed, 9 Sep 2020 14:17:19 -0400 Subject: [PATCH 099/106] Refactored code using black and flake8 --- .../discriminative_layer_training.py | 1 - .../discriminative_layer_training_test.py | 35 +++++++++++-------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 56317e0b8c..ad8d6a1f3a 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -41,7 +41,6 @@ def __init__( ): """ - Creates a wrapper around a set of instantiated optimizer layer pairs. Each optimizer will optimize only the weights associated with its paired layer. This can be used diff --git a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py index 7de48ef9bd..fdae52ba1c 100644 --- a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py @@ -21,8 +21,8 @@ import tensorflow as tf from tensorflow_addons.utils import test_utils -# python -m flake8 tensorflow_addons/optimizers/discriminative_layer_training.py -# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py +# python -m flake8 tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py +# python -m black tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py def _dtypes_to_test(use_gpu): @@ -39,6 +39,7 @@ def _dtypes_to_test(use_gpu): else: return [tf.half, tf.float32, tf.float64] + def _dtypes_with_checking_system(use_gpu, system): # Based on issue #36764 in the following link, # "https://github.com/tensorflow/tensorflow/issues/36764" @@ -59,16 +60,17 @@ def test_fit_layer_optimizer(dtype, device): if "gpu" in device and dtype == tf.float16: pytest.xfail("See https://github.com/tensorflow/addons/issues/347") - model = tf.keras.Sequential([tf.keras.Input(shape = [1]), - tf.keras.layers.Dense(1), - tf.keras.layers.Dense(1) - ]) + model = tf.keras.Sequential( + [tf.keras.Input(shape=[1]), tf.keras.layers.Dense(1), tf.keras.layers.Dense(1)] + ) x = np.array(np.ones([100])) y = np.array(np.ones([100])) - weights_before_train = (model.layers[0].weights[0].numpy(), - model.layers[1].weights[0].numpy()) + weights_before_train = ( + model.layers[0].weights[0].numpy(), + model.layers[1].weights[0].numpy(), + ) opt1 = tf.keras.optimizers.Adam(learning_rate=1e-3) opt2 = tf.keras.optimizers.SGD(learning_rate=0) @@ -78,17 +80,22 @@ def test_fit_layer_optimizer(dtype, device): loss = tf.keras.losses.MSE optimizer = MultiOpt(opt_layer_pairs) - model.compile(optimizer=optimizer, - loss = loss) + model.compile(optimizer=optimizer, loss=loss) model.fit(x, y, batch_size=8, epochs=10) - weights_after_train = (model.layers[0].weights[0].numpy(), - model.layers[1].weights[0].numpy()) + weights_after_train = ( + model.layers[0].weights[0].numpy(), + model.layers[1].weights[0].numpy(), + ) with np.testing.assert_raises(AssertionError): # expect weights to be different for layer 1 - test_utils.assert_allclose_according_to_type(weights_before_train[0], weights_after_train[0]) + test_utils.assert_allclose_according_to_type( + weights_before_train[0], weights_after_train[0] + ) # expect weights to be same for layer 2 - test_utils.assert_allclose_according_to_type(weights_before_train[1], weights_after_train[1]) + test_utils.assert_allclose_according_to_type( + weights_before_train[1], weights_after_train[1] + ) From ef4ede19a41c1d4844a77bc4e763b2bd1a452251 Mon Sep 17 00:00:00 2001 From: HongM Date: Wed, 9 Sep 2020 14:18:34 -0400 Subject: [PATCH 100/106] updated init file --- tensorflow_addons/optimizers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index 6f3ead5c84..e35bc03cf3 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -27,7 +27,7 @@ ExponentialCyclicalLearningRate, ) from tensorflow_addons.optimizers.discriminative_layer_training import ( - DiscriminativeLayerOptimizer, + MultiOpt, ) from tensorflow_addons.optimizers.lamb import LAMB from tensorflow_addons.optimizers.lazy_adam import LazyAdam From 87d4c9dffadda880ad94f7baeaa902ccf8bb4b8a Mon Sep 17 00:00:00 2001 From: HongM Date: Wed, 9 Sep 2020 15:17:31 -0400 Subject: [PATCH 101/106] fixed typeguard error and usage of private/experimental api. --- .../optimizers/discriminative_layer_training.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index ad8d6a1f3a..c0df50c4bd 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -17,7 +17,7 @@ import tensorflow as tf from typeguard import typechecked from typing import Union -from tensorflow.python.keras.optimizer_v2 import optimizer_v2 +from tensorflow.keras.optimizers import Optimizer # python -m flake8 tensorflow_addons/optimizers/discriminative_layer_training.py # python -m black tensorflow_addons/optimizers/discriminative_layer_training.py @@ -30,12 +30,12 @@ def __init__(self, name): @tf.keras.utils.register_keras_serializable(package="Addons") -class MultiOpt(optimizer_v2.OptimizerV2): +class MultiOpt(Optimizer): @typechecked def __init__( self, - optimizer_layer_pairs: Union[list, None], - optimizer_specs: Union[list, None], + optimizer_layer_pairs: Union[list, None] = None, + optimizer_specs: Union[list, None] = None, name: str = "MultiOpt", **kwargs ): @@ -79,13 +79,13 @@ def __init__( super(MultiOpt, self).__init__(name, **kwargs) - if any(optimizer_layer_pairs) and not any(optimizer_specs): + if optimizer_specs is None and optimizer_layer_pairs is not None: self.optimizer_specs = [ self.create_optimizer_spec(opt, layer) for opt, layer in optimizer_layer_pairs ] - elif any(optimizer_specs): + elif optimizer_specs is not None and optimizer_layer_pairs is None: self.optimizer_specs = optimizer_specs else: @@ -99,9 +99,7 @@ def __init__( self.lr = self.initialized_optimizer_specs[0]["optimizer"].lr - def apply_gradients( - self, grads_and_vars, name=None, experimental_aggregate_gradients=True - ): + def apply_gradients(self, grads_and_vars, name=None): """ Wrapped Gradient Apply method. Returns a list of tf ops to be executed. """ From 35dca9b4216776b390cd7daf5824e267b029a431 Mon Sep 17 00:00:00 2001 From: HongM Date: Sun, 13 Sep 2020 19:39:02 -0400 Subject: [PATCH 102/106] restructured wrapper serialization and removed unnecessary components. --- tensorflow_addons/optimizers/__init__.py | 2 +- .../discriminative_layer_training.py | 75 +++++++++---------- .../discriminative_layer_training_test.py | 31 +++----- 3 files changed, 47 insertions(+), 61 deletions(-) diff --git a/tensorflow_addons/optimizers/__init__.py b/tensorflow_addons/optimizers/__init__.py index e35bc03cf3..5e1d90f2e4 100644 --- a/tensorflow_addons/optimizers/__init__.py +++ b/tensorflow_addons/optimizers/__init__.py @@ -27,7 +27,7 @@ ExponentialCyclicalLearningRate, ) from tensorflow_addons.optimizers.discriminative_layer_training import ( - MultiOpt, + MultiOptimzer, ) from tensorflow_addons.optimizers.lamb import LAMB from tensorflow_addons.optimizers.lazy_adam import LazyAdam diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index c0df50c4bd..77d1d522ec 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -19,24 +19,15 @@ from typing import Union from tensorflow.keras.optimizers import Optimizer -# python -m flake8 tensorflow_addons/optimizers/discriminative_layer_training.py -# python -m black tensorflow_addons/optimizers/discriminative_layer_training.py - - -class FakeVar: - def __init__(self, name): - # probably can be refactored out - self.name = name - @tf.keras.utils.register_keras_serializable(package="Addons") -class MultiOpt(Optimizer): +class MultiOptimzer(Optimizer): @typechecked def __init__( self, optimizer_layer_pairs: Union[list, None] = None, optimizer_specs: Union[list, None] = None, - name: str = "MultiOpt", + name: str = "MultiOptimzer", **kwargs ): @@ -45,13 +36,15 @@ def __init__( Each optimizer will optimize only the weights associated with its paired layer. This can be used to implement discriminative layer training by assigning different learning rates to each optimizer - layer pair. (Optimizer, list(Layers)) pairs are also supported. + layer pair. (Optimizer, list(Layers)) pairs are also supported. Please note that the layers must be + instantiated before instantiating the optimizer. Currently, MultiOpt does not support callbacks that modify optimizers. However, you can instantiate optimizer layer pairs with tf.keras.optimizers.schedules.LearningRateSchedule instead of a static learning rate. - This code should function on CPU, GPU, and TPU. + This code should function on CPU, GPU, and TPU. Apply the with strategy.scope() context as you + would with any other optimizer. Example: @@ -77,7 +70,7 @@ def __init__( """ - super(MultiOpt, self).__init__(name, **kwargs) + super(MultiOptimzer, self).__init__(name, **kwargs) if optimizer_specs is None and optimizer_layer_pairs is not None: self.optimizer_specs = [ @@ -86,22 +79,20 @@ def __init__( ] elif optimizer_specs is not None and optimizer_layer_pairs is None: - self.optimizer_specs = optimizer_specs + self.optimizer_specs = [ + self.maybe_initialize_optimizer_spec(spec) for spec in optimizer_specs + ] else: raise RuntimeError( "You must specify either an list of optimizer_layer_pairs or a list of optimizer_specs" ) - self.initialized_optimizer_specs = [ - self.initialize_from_optimizer_spec(spec) for spec in self.optimizer_specs - ] - - self.lr = self.initialized_optimizer_specs[0]["optimizer"].lr - def apply_gradients(self, grads_and_vars, name=None): """ Wrapped Gradient Apply method. Returns a list of tf ops to be executed. + + Name of variable is used rather than var.ref() to enable serialization and deserialization. """ for spec in self.optimizer_specs: @@ -109,38 +100,40 @@ def apply_gradients(self, grads_and_vars, name=None): for grad, var in tuple(grads_and_vars): for spec in self.optimizer_specs: - for weight in spec["weights"]: - if var.name == weight.name: + for name in spec["weights"]: + if var.name == name: spec["gv"].append((grad, var)) - return [ - spec["optimizer"].apply_gradients(spec["gv"]) - for spec in self.optimizer_specs - ] + return tf.group( + [ + spec["optimizer"].apply_gradients(spec["gv"]) + for spec in self.optimizer_specs + ] + ) def get_config(self): - config = super(MultiOpt, self).get_config() + config = super(MultiOptimzer, self).get_config() config.update({"optimizer_specs": self.optimizer_specs}) return config - @classmethod - def initialize_from_optimizer_spec(cls, optimizer_spec): - optimizer_spec["optimizer"] = optimizer_spec["optimizer_class"].from_config( - optimizer_spec["optimizer_config"] - ) - return optimizer_spec - @classmethod def create_optimizer_spec(cls, optimizer_instance, layer): + if type(layer) == list: - weights = [ - FakeVar(var.name) for sublayer in layer for var in sublayer.weights - ] + weights = [var.name for sublayer in layer for var in sublayer.weights] else: - weights = [FakeVar(var.name) for var in layer.weights] + weights = [var.name for var in layer.weights] return { - "optimizer_class": optimizer_instance.__class__, - "optimizer_config": optimizer_instance.get_config(), + "optimizer": optimizer_instance, "weights": weights, } + + @classmethod + def maybe_initialize_optimizer_spec(cls, optimizer_spec): + if type(optimizer_spec["optimizer"]) == dict: + optimizer_spec["optimizer"] = tf.keras.optimizers.deserialize( + optimizer_spec["optimizer"] + ) + + return optimizer_spec diff --git a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py index fdae52ba1c..c78e8e3cfd 100644 --- a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py @@ -1,4 +1,4 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,13 +17,10 @@ import pytest import numpy as np -from tensorflow_addons.optimizers.discriminative_layer_training import MultiOpt +from tensorflow_addons.optimizers.discriminative_layer_training import MultiOptimzer import tensorflow as tf from tensorflow_addons.utils import test_utils -# python -m flake8 tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py -# python -m black tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py - def _dtypes_to_test(use_gpu): # Based on issue #347 in the following link, @@ -34,27 +31,18 @@ def _dtypes_to_test(use_gpu): # The function "_DtypesToTest" is from # "https://github.com/tensorflow/tensorflow/blob/5d4a6cee737a1dc6c20172a1dc1 # 5df10def2df72/tensorflow/python/kernel_tests/conv_ops_3d_test.py#L53-L62" + # TODO(WindQAQ): xxx + if use_gpu: return [tf.float32, tf.float64] else: return [tf.half, tf.float32, tf.float64] -def _dtypes_with_checking_system(use_gpu, system): - # Based on issue #36764 in the following link, - # "https://github.com/tensorflow/tensorflow/issues/36764" - # tf.half is not registered for tf.linalg.svd function on Windows - # CPU version. - # So we have to remove tf.half when testing with Windows CPU version. - if system == "Windows": - return [tf.float32, tf.float64] - else: - return _dtypes_to_test(use_gpu) - - @pytest.mark.with_device(["cpu", "gpu"]) @pytest.mark.parametrize("dtype", [tf.float16, tf.float32, tf.float64]) -def test_fit_layer_optimizer(dtype, device): +@pytest.mark.parametrize("serialize", [True, False]) +def test_fit_layer_optimizer(dtype, device, serialize): # Test ensures that each optimizer is only optimizing its own layer with its learning rate if "gpu" in device and dtype == tf.float16: @@ -78,10 +66,15 @@ def test_fit_layer_optimizer(dtype, device): opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1])] loss = tf.keras.losses.MSE - optimizer = MultiOpt(opt_layer_pairs) + optimizer = MultiOptimzer(opt_layer_pairs) model.compile(optimizer=optimizer, loss=loss) + if serialize: + model.save("test", save_format="tf") + tf.keras.backend.clear_session() + model = tf.keras.models.load_model("test") + model.fit(x, y, batch_size=8, epochs=10) weights_after_train = ( From f189c39a01726e41b9288993907adc56f032a4af Mon Sep 17 00:00:00 2001 From: HongM Date: Sun, 13 Sep 2020 19:54:02 -0400 Subject: [PATCH 103/106] expanded on docstr and added repr --- .../optimizers/discriminative_layer_training.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 77d1d522ec..130251d745 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -32,7 +32,9 @@ def __init__( ): """ - Creates a wrapper around a set of instantiated optimizer layer pairs. + Creates a wrapper around a set of instantiated optimizer layer pairs. Allows for implementation of + Discriminative Layer Training as per https://arxiv.org/abs/1607.05440. Generally useful for transfer learning + of deep networks. Each optimizer will optimize only the weights associated with its paired layer. This can be used to implement discriminative layer training by assigning different learning rates to each optimizer @@ -59,7 +61,7 @@ def __init__( (opt2, model.layers[1:])] loss = tf.keras.losses.MSE - optimizer = MultiOpt(opt_layer_pairs) + optimizer = tfa.optimizers.MultiOpt(opt_layer_pairs) model.compile(optimizer=optimizer, loss = loss) @@ -137,3 +139,8 @@ def maybe_initialize_optimizer_spec(cls, optimizer_spec): ) return optimizer_spec + + def __repr__(self): + return "Multi Optimizer with %i optimizer layer pairs" % len( + self.optimizer_specs + ) From c63c635ac18b5e8bd7afee8b608bb51de5f66b6e Mon Sep 17 00:00:00 2001 From: HongM Date: Sun, 13 Sep 2020 21:26:32 -0400 Subject: [PATCH 104/106] cleaned up docstrings, added assertion tests, and added explicit test for only the serialization --- .../discriminative_layer_training.py | 105 ++++++++++-------- .../discriminative_layer_training_test.py | 26 ++++- 2 files changed, 81 insertions(+), 50 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 130251d745..67501a6bdb 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -14,86 +14,93 @@ # ============================================================================== """Discriminative Layer Training Optimizer for TensorFlow.""" +from typing import Union + import tensorflow as tf from typeguard import typechecked -from typing import Union -from tensorflow.keras.optimizers import Optimizer @tf.keras.utils.register_keras_serializable(package="Addons") -class MultiOptimzer(Optimizer): - @typechecked - def __init__( - self, - optimizer_layer_pairs: Union[list, None] = None, - optimizer_specs: Union[list, None] = None, - name: str = "MultiOptimzer", - **kwargs - ): - - """ - Creates a wrapper around a set of instantiated optimizer layer pairs. Allows for implementation of - Discriminative Layer Training as per https://arxiv.org/abs/1607.05440. Generally useful for transfer learning - of deep networks. +class MultiOptimzer(tf.keras.optimizers.Optimizer): + """Multi Optimizer Wrapper for Discriminative Layer Training. - Each optimizer will optimize only the weights associated with its paired layer. This can be used - to implement discriminative layer training by assigning different learning rates to each optimizer - layer pair. (Optimizer, list(Layers)) pairs are also supported. Please note that the layers must be - instantiated before instantiating the optimizer. + Creates a wrapper around a set of instantiated optimizer layer pairs. Generally useful for transfer learning + of deep networks. - Currently, MultiOpt does not support callbacks that modify optimizers. However, you can instantiate - optimizer layer pairs with tf.keras.optimizers.schedules.LearningRateSchedule instead of a static learning - rate. + Each optimizer will optimize only the weights associated with its paired layer. This can be used + to implement discriminative layer training by assigning different learning rates to each optimizer + layer pair. (Optimizer, list(Layers)) pairs are also supported. Please note that the layers must be + instantiated before instantiating the optimizer. - This code should function on CPU, GPU, and TPU. Apply the with strategy.scope() context as you - would with any other optimizer. + Args: + optimizers_and_layers: a list of tuples of an optimizer and a layer or model. Each tuple should contain + exactly 1 instantiated optimizer and 1 object that subclasses tf.keras.Model or tf.keras.Layer. Nested + layers and models will be automatically discovered. Alternatively, in place of a single layer, you can pass + a list of layers. + optimizer_specs: specialized list for serialization. Should be left as None for almost all cases. If you are + loading a serialized version of this optimizer, please use tf.keras.models.load_model after saving a + model compiled with this optimizer. - Example: + Usage: - ```python + >>> model = get_model() + >>> opt1 = tf.keras.optimizers.Adam(learning_rate=1e-4) + >>> opt2 = tf.keras.optimizers.Adam(learning_rate=1e-2) + >>> opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1:])] + >>> loss = tf.keras.losses.MSE + >>> optimizer = tfa.optimizers.MultiOpt(opt_layer_pairs) + >>> model.compile(optimizer=optimizer, loss = loss) + >>> model.fit(x,y) - model = get_model() - opt1 = tf.keras.optimizers.Adam(learning_rate=1e-4) - opt2 = tf.keras.optimizers.Adam(learning_rate=1e-2) + Reference: - opt_layer_pairs = [(opt1, model.layers[0]), - (opt2, model.layers[1:])] + [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/abs/1801.06146) + [Collaborative Layer-wise Discriminative Learning in Deep Neural Networks](https://arxiv.org/abs/1607.05440) - loss = tf.keras.losses.MSE - optimizer = tfa.optimizers.MultiOpt(opt_layer_pairs) + Notes: - model.compile(optimizer=optimizer, loss = loss) + Currently, MultiOpt does not support callbacks that modify optimizers. However, you can instantiate + optimizer layer pairs with tf.keras.optimizers.schedules.LearningRateSchedule instead of a static learning + rate. - model.fit(x,y) + This code should function on CPU, GPU, and TPU. Apply the with strategy.scope() context as you + would with any other optimizer. - ``` + """ + @typechecked + def __init__( + self, + optimizers_and_layers: Union[list, None] = None, + optimizer_specs: Union[list, None] = None, + name: str = "MultiOptimzer", + **kwargs + ): - """ super(MultiOptimzer, self).__init__(name, **kwargs) - if optimizer_specs is None and optimizer_layer_pairs is not None: + if optimizer_specs is None and optimizers_and_layers is not None: self.optimizer_specs = [ self.create_optimizer_spec(opt, layer) - for opt, layer in optimizer_layer_pairs + for opt, layer in optimizers_and_layers ] - elif optimizer_specs is not None and optimizer_layer_pairs is None: + elif optimizer_specs is not None and optimizers_and_layers is None: self.optimizer_specs = [ self.maybe_initialize_optimizer_spec(spec) for spec in optimizer_specs ] else: raise RuntimeError( - "You must specify either an list of optimizer_layer_pairs or a list of optimizer_specs" + "You must specify either an list of optimizers and layers or a list of optimizer_specs" ) - def apply_gradients(self, grads_and_vars, name=None): - """ - Wrapped Gradient Apply method. Returns a list of tf ops to be executed. + def apply_gradients(self, grads_and_vars, name=None, **kwargs): + """Wrapped apply_gradient method. + Returns a list of tf ops to be executed. Name of variable is used rather than var.ref() to enable serialization and deserialization. """ @@ -108,7 +115,7 @@ def apply_gradients(self, grads_and_vars, name=None): return tf.group( [ - spec["optimizer"].apply_gradients(spec["gv"]) + spec["optimizer"].apply_gradients(spec["gv"], **kwargs) for spec in self.optimizer_specs ] ) @@ -121,6 +128,12 @@ def get_config(self): @classmethod def create_optimizer_spec(cls, optimizer_instance, layer): + assert isinstance(optimizer_instance, tf.keras.optimizers.Optimizer), \ + "Object passed is not an instance of tf.keras.optimizers.Optimizer" + + assert isinstance(layer, tf.keras.layers.Layer) or isinstance(layer, tf.keras.Model), \ + "Object passed is not an instance of tf.keras.layers.Layer nor tf.keras.Model" + if type(layer) == list: weights = [var.name for sublayer in layer for var in sublayer.weights] else: diff --git a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py index c78e8e3cfd..e44b4181b1 100644 --- a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py @@ -12,16 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for Discriminative Layer Training Optimizer for TensorFlow""" +"""Tests for Discriminative Layer Training Optimizer for TensorFlow.""" import pytest import numpy as np +import tensorflow as tf from tensorflow_addons.optimizers.discriminative_layer_training import MultiOptimzer -import tensorflow as tf from tensorflow_addons.utils import test_utils - def _dtypes_to_test(use_gpu): # Based on issue #347 in the following link, # "https://github.com/tensorflow/addons/issues/347" @@ -31,7 +30,7 @@ def _dtypes_to_test(use_gpu): # The function "_DtypesToTest" is from # "https://github.com/tensorflow/tensorflow/blob/5d4a6cee737a1dc6c20172a1dc1 # 5df10def2df72/tensorflow/python/kernel_tests/conv_ops_3d_test.py#L53-L62" - # TODO(WindQAQ): xxx + # TODO(WindQAQ): Clean up this in TF2.4 if use_gpu: return [tf.float32, tf.float64] @@ -70,6 +69,7 @@ def test_fit_layer_optimizer(dtype, device, serialize): model.compile(optimizer=optimizer, loss=loss) + # serialize whole model including optimizer, clear the session, then reload the whole model. if serialize: model.save("test", save_format="tf") tf.keras.backend.clear_session() @@ -92,3 +92,21 @@ def test_fit_layer_optimizer(dtype, device, serialize): test_utils.assert_allclose_according_to_type( weights_before_train[1], weights_after_train[1] ) + + +def test_serialization(): + + model = tf.keras.Sequential( + [tf.keras.Input(shape=[1]), tf.keras.layers.Dense(1), tf.keras.layers.Dense(1)] + ) + + opt1 = tf.keras.optimizers.Adam(learning_rate=1e-3) + opt2 = tf.keras.optimizers.SGD(learning_rate=0) + + opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1])] + + optimizer = MultiOptimzer(opt_layer_pairs) + config = tf.keras.optimizers.serialize(optimizer) + + new_optimizer = tf.keras.optimizers.deserialize(config) + assert new_optimizer.get_config() == optimizer.get_config() \ No newline at end of file From 2c467521fb43ffabe334917209d57322a057bf81 Mon Sep 17 00:00:00 2001 From: HongM Date: Sun, 13 Sep 2020 21:27:31 -0400 Subject: [PATCH 105/106] ran black and flake8 --- .../optimizers/discriminative_layer_training.py | 11 ++++++----- .../tests/discriminative_layer_training_test.py | 3 ++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 67501a6bdb..4994c07706 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -78,7 +78,6 @@ def __init__( **kwargs ): - super(MultiOptimzer, self).__init__(name, **kwargs) if optimizer_specs is None and optimizers_and_layers is not None: @@ -128,11 +127,13 @@ def get_config(self): @classmethod def create_optimizer_spec(cls, optimizer_instance, layer): - assert isinstance(optimizer_instance, tf.keras.optimizers.Optimizer), \ - "Object passed is not an instance of tf.keras.optimizers.Optimizer" + assert isinstance( + optimizer_instance, tf.keras.optimizers.Optimizer + ), "Object passed is not an instance of tf.keras.optimizers.Optimizer" - assert isinstance(layer, tf.keras.layers.Layer) or isinstance(layer, tf.keras.Model), \ - "Object passed is not an instance of tf.keras.layers.Layer nor tf.keras.Model" + assert isinstance(layer, tf.keras.layers.Layer) or isinstance( + layer, tf.keras.Model + ), "Object passed is not an instance of tf.keras.layers.Layer nor tf.keras.Model" if type(layer) == list: weights = [var.name for sublayer in layer for var in sublayer.weights] diff --git a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py index e44b4181b1..8a93cfb903 100644 --- a/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py +++ b/tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py @@ -21,6 +21,7 @@ from tensorflow_addons.optimizers.discriminative_layer_training import MultiOptimzer from tensorflow_addons.utils import test_utils + def _dtypes_to_test(use_gpu): # Based on issue #347 in the following link, # "https://github.com/tensorflow/addons/issues/347" @@ -109,4 +110,4 @@ def test_serialization(): config = tf.keras.optimizers.serialize(optimizer) new_optimizer = tf.keras.optimizers.deserialize(config) - assert new_optimizer.get_config() == optimizer.get_config() \ No newline at end of file + assert new_optimizer.get_config() == optimizer.get_config() From 4b856f1401613ac3ce7800dca32b93b093854dec Mon Sep 17 00:00:00 2001 From: HongM Date: Sun, 13 Sep 2020 21:41:43 -0400 Subject: [PATCH 106/106] fixed doc string --- .../discriminative_layer_training.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tensorflow_addons/optimizers/discriminative_layer_training.py b/tensorflow_addons/optimizers/discriminative_layer_training.py index 4994c07706..e56387d682 100644 --- a/tensorflow_addons/optimizers/discriminative_layer_training.py +++ b/tensorflow_addons/optimizers/discriminative_layer_training.py @@ -43,15 +43,21 @@ class MultiOptimzer(tf.keras.optimizers.Optimizer): Usage: - >>> model = get_model() - >>> opt1 = tf.keras.optimizers.Adam(learning_rate=1e-4) - >>> opt2 = tf.keras.optimizers.Adam(learning_rate=1e-2) - >>> opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1:])] - >>> loss = tf.keras.losses.MSE - >>> optimizer = tfa.optimizers.MultiOpt(opt_layer_pairs) - >>> model.compile(optimizer=optimizer, loss = loss) - >>> model.fit(x,y) + ```python + model = get_model() + opt1 = tf.keras.optimizers.Adam(learning_rate=1e-4) + opt2 = tf.keras.optimizers.Adam(learning_rate=1e-2) + + opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1:])] + + loss = tf.keras.losses.MSE + optimizer = tfa.optimizers.MultiOpt(opt_layer_pairs) + + model.compile(optimizer=optimizer, loss = loss) + + model.fit(x,y) + ''' Reference: