From ea4a9a29b1906441558dc9c6009a7e2a3900cf77 Mon Sep 17 00:00:00 2001 From: David Palzer Date: Sun, 15 Nov 2020 11:41:30 -0500 Subject: [PATCH 1/4] fixed bug where tuner would not tune lr if also tuning batch_size --- pytorch_lightning/tuner/lr_finder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/tuner/lr_finder.py b/pytorch_lightning/tuner/lr_finder.py index b6d8c8178093b..ccc9087f695ac 100644 --- a/pytorch_lightning/tuner/lr_finder.py +++ b/pytorch_lightning/tuner/lr_finder.py @@ -408,7 +408,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, data self.progress_bar.update() current_loss = trainer.train_loop.running_loss.last().item() - current_step = trainer.global_step + 1 # remove the +1 in 1.0 + current_step = trainer.global_step # Avg loss (loss with momentum) + smoothing self.avg_loss = self.beta * self.avg_loss + (1 - self.beta) * current_loss From e56eff841e3aca9fc3bee4d5ee94367a39243838 Mon Sep 17 00:00:00 2001 From: David Palzer Date: Sun, 15 Nov 2020 12:07:59 -0500 Subject: [PATCH 2/4] added a '+1' to computing the smoothed loss. This maintains the behavior for the smoothed loss as before the bug fix --- pytorch_lightning/tuner/lr_finder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/tuner/lr_finder.py b/pytorch_lightning/tuner/lr_finder.py index ccc9087f695ac..940f9a9cf29a1 100644 --- a/pytorch_lightning/tuner/lr_finder.py +++ b/pytorch_lightning/tuner/lr_finder.py @@ -412,7 +412,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, data # Avg loss (loss with momentum) + smoothing self.avg_loss = self.beta * self.avg_loss + (1 - self.beta) * current_loss - smoothed_loss = self.avg_loss / (1 - self.beta**current_step) + smoothed_loss = self.avg_loss / (1 - self.beta**(current_step+1)) # Check if we diverging if self.early_stop_threshold is not None: From 452d0b33cde45b32fb1dc33f3d132fbab9f87904 Mon Sep 17 00:00:00 2001 From: David Palzer Date: Sun, 15 Nov 2020 12:11:05 -0500 Subject: [PATCH 3/4] pep8 fix --- pytorch_lightning/tuner/lr_finder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/tuner/lr_finder.py b/pytorch_lightning/tuner/lr_finder.py index 940f9a9cf29a1..298f983ebf4e9 100644 --- a/pytorch_lightning/tuner/lr_finder.py +++ b/pytorch_lightning/tuner/lr_finder.py @@ -412,7 +412,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, data # Avg loss (loss with momentum) + smoothing self.avg_loss = self.beta * self.avg_loss + (1 - self.beta) * current_loss - smoothed_loss = self.avg_loss / (1 - self.beta**(current_step+1)) + smoothed_loss = self.avg_loss / (1 - self.beta**(current_step + 1)) # Check if we diverging if self.early_stop_threshold is not None: From 76de8cc712b90bf033a97811b165bdf864ef1dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 9 Mar 2021 00:42:42 +0100 Subject: [PATCH 4/4] add changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f78569c1b7a0b..384a218c81305 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed `Trainer` not resetting `lightning_optimizers` when calling `Trainer.fit()` multiple times ([#6372](https://github.com/PyTorchLightning/pytorch-lightning/pull/6372)) +- Fixed an issue where the tuner would not tune the learning rate if also tuning the batch size ([#4688](https://github.com/PyTorchLightning/pytorch-lightning/pull/4688)) + + ## [1.2.2] - 2021-03-02 ### Added