From a3aad2301737cf31d65a4b4daf9d2e376266086c Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Mon, 20 Sep 2021 20:21:38 +0000 Subject: [PATCH 1/7] deprecate progress_bar_refresh_rate --- .../trainer/connectors/callback_connector.py | 8 ++++++++ pytorch_lightning/trainer/trainer.py | 7 ++++++- tests/deprecated_api/test_remove_1-7.py | 5 +++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/callback_connector.py b/pytorch_lightning/trainer/connectors/callback_connector.py index cafad831cbb30..95e9e930100ba 100644 --- a/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/pytorch_lightning/trainer/connectors/callback_connector.py @@ -74,6 +74,14 @@ def on_trainer_init( " in v1.7. Please pass `pytorch_lightning.callbacks.progress.ProgressBar` with" " `process_position` directly to the Trainer's `callbacks` argument instead." ) + + if progress_bar_refresh_rate is not None: + rank_zero_deprecation( + f"Setting `Trainer(progress_bar_refresh_rate={progress_bar_refresh_rate})` is deprecated in v1.5 and" + " will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.ProgressBar` with" + " `refresh_rate` directly to the Trainer's `callbacks` argument instead." + ) + self.trainer._progress_bar_callback = self.configure_progress_bar(progress_bar_refresh_rate, process_position) # configure the ModelSummary callback diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 7cabcb292622a..b0dbe77a18e0a 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -137,7 +137,7 @@ def __init__( tpu_cores: Optional[Union[List[int], str, int]] = None, ipus: Optional[int] = None, log_gpu_memory: Optional[str] = None, - progress_bar_refresh_rate: Optional[int] = None, + progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7 overfit_batches: Union[int, float] = 0.0, track_grad_norm: Union[int, float, str] = -1, check_val_every_n_epoch: int = 1, @@ -284,6 +284,11 @@ def __init__( Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.). + .. deprecated:: v1.5 + ``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7. + Please pass :class:`~pytorch_lightning.callbacks.progress.ProgressBar` with ``refresh_rate`` + directly to the Trainer's ``callbacks`` argument instead. + profiler: To profile individual steps during training and assist in identifying bottlenecks. overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int). diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 5d7d7db761233..c949384a0a8db 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -224,3 +224,8 @@ def test_v1_7_0_deprecate_add_get_queue(tmpdir): with pytest.deprecated_call(match=r"`LightningModule.get_from_queue` method was deprecated in v1.5"): trainer.fit(model) + + +def test_v1_7_0_progress_bar_refresh_rate_trainer_constructor(tmpdir): + with pytest.deprecated_call(match=r"Setting `Trainer\(progress_bar_refresh_rate=1\)` is deprecated in v1.5"): + _ = Trainer(progress_bar_refresh_rate=1) From 746aa74bc1c22b08cb65dffa83ff38304dbf9f3e Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Mon, 20 Sep 2021 20:26:38 +0000 Subject: [PATCH 2/7] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2462ffbdbb773..0bc0442933e37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -249,6 +249,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init if supported ([#9366](https://github.com/PyTorchLightning/pytorch-lightning/pull/9366)) +- Deprecated passing `progress_bar_refresh_rate` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `refresh_rate` directly to the list of callbacks ([#9616] (https://github.com/PyTorchLightning/pytorch-lightning/pull/9616)) ### Removed From 1ef573999dc98a04759e7fec60865b96e34b39f5 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Wed, 22 Sep 2021 18:20:07 +0000 Subject: [PATCH 3/7] fix changelog --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 793e157764583..58c6d70679fd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -249,10 +249,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init if supported ([#9366](https://github.com/PyTorchLightning/pytorch-lightning/pull/9366)) -- Deprecated passing `progress_bar_refresh_rate` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `refresh_rate` directly to the list of callbacks ([#9616] (https://github.com/PyTorchLightning/pytorch-lightning/pull/9616)) +- Deprecated `LightningLoggerBase.close`, `LoggerCollection.close` in favor of `LightningLoggerBase.finalize`, `LoggerCollection.finalize` ([#9422](https://github.com/PyTorchLightning/pytorch-lightning/pull/9422)) -- Deprecated `LightningLoggerBase.close`, `LoggerCollection.close` in favor of `LightningLoggerBase.finalize`, `LoggerCollection.finalize` ([#9422](https://github.com/PyTorchLightning/pytorch-lightning/pull/9422)) +- Deprecated passing `progress_bar_refresh_rate` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `refresh_rate` directly to the list of callbacks ([#9616](https://github.com/PyTorchLightning/pytorch-lightning/pull/9616)) ### Removed From 62bda1ad6f488c34b0079f6c9b62d08b4dee5dfc Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Thu, 23 Sep 2021 05:44:00 +0000 Subject: [PATCH 4/7] add enable_progress_bar --- CHANGELOG.md | 2 + benchmarks/test_basic_parity.py | 2 +- .../computer_vision_fine_tuning.py | 1 - .../trainer/connectors/callback_connector.py | 10 +++- pytorch_lightning/trainer/trainer.py | 4 ++ tests/accelerators/test_ddp_spawn.py | 4 +- tests/accelerators/test_dp.py | 2 +- tests/callbacks/test_callbacks.py | 6 +-- tests/callbacks/test_early_stopping.py | 6 +-- tests/callbacks/test_finetuning_callback.py | 4 +- tests/callbacks/test_lr_monitor.py | 8 ++-- tests/callbacks/test_progress_bar.py | 14 ++++-- tests/callbacks/test_pruning.py | 8 ++-- tests/callbacks/test_stochastic_weight_avg.py | 2 +- .../test_checkpoint_callback_frequency.py | 4 +- tests/checkpointing/test_model_checkpoint.py | 28 +++++------ .../checkpointing/test_trainer_checkpoint.py | 2 +- tests/deprecated_api/test_remove_1-7.py | 2 +- tests/loggers/test_base.py | 6 +-- .../loops/optimization/test_optimizer_loop.py | 2 +- tests/loops/test_loops.py | 4 +- tests/loops/test_training_loop.py | 2 +- .../models/data/horovod/test_train_script.py | 2 +- tests/models/test_cpu.py | 16 +++---- tests/models/test_gpu.py | 4 +- tests/models/test_hooks.py | 12 ++--- tests/models/test_horovod.py | 20 ++++---- tests/models/test_onnx.py | 2 +- tests/models/test_restore.py | 8 ++-- tests/models/test_tpu.py | 24 +++++----- tests/plugins/test_deepspeed_plugin.py | 4 +- tests/profiler/test_profiler.py | 2 +- .../connectors/test_callback_connector.py | 2 +- .../logging_/test_eval_loop_logging.py | 2 - .../trainer/logging_/test_logger_connector.py | 4 +- tests/trainer/optimization/test_optimizers.py | 4 +- tests/trainer/test_trainer.py | 46 +++++++++++-------- tests/utilities/test_auto_restart.py | 2 +- 38 files changed, 150 insertions(+), 127 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58c6d70679fd5..832f326654cca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -145,6 +145,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `RichModelSummary` callback ([#9546](https://github.com/PyTorchLightning/pytorch-lightning/pull/9546)) +- Added `enable_progress_bar` to Trainer constructor ([]()) + ### Changed - `pytorch_lightning.loggers.neptune.NeptuneLogger` is now consistent with new [neptune-client](https://github.com/neptune-ai/neptune-client) API ([#6867](https://github.com/PyTorchLightning/pytorch-lightning/pull/6867)). diff --git a/benchmarks/test_basic_parity.py b/benchmarks/test_basic_parity.py index 9c2c3fb72e80e..6612f76280076 100644 --- a/benchmarks/test_basic_parity.py +++ b/benchmarks/test_basic_parity.py @@ -157,7 +157,7 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): trainer = Trainer( # as the first run is skipped, no need to run it long max_epochs=num_epochs if idx > 0 else 1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, gpus=1 if device_type == "cuda" else 0, checkpoint_callback=False, diff --git a/pl_examples/domain_templates/computer_vision_fine_tuning.py b/pl_examples/domain_templates/computer_vision_fine_tuning.py index 2fba1d8ad1759..2cfeaf655f819 100644 --- a/pl_examples/domain_templates/computer_vision_fine_tuning.py +++ b/pl_examples/domain_templates/computer_vision_fine_tuning.py @@ -273,7 +273,6 @@ def add_arguments_to_parser(self, parser): { "trainer.max_epochs": 15, "trainer.weights_summary": None, - "trainer.progress_bar_refresh_rate": 1, "trainer.num_sanity_val_steps": 0, } ) diff --git a/pytorch_lightning/trainer/connectors/callback_connector.py b/pytorch_lightning/trainer/connectors/callback_connector.py index 95e9e930100ba..c5d16885cb134 100644 --- a/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/pytorch_lightning/trainer/connectors/callback_connector.py @@ -38,6 +38,7 @@ def on_trainer_init( self, callbacks: Optional[Union[List[Callback], Callback]], checkpoint_callback: bool, + enable_progress_bar: bool, progress_bar_refresh_rate: Optional[int], process_position: int, default_root_dir: Optional[str], @@ -81,8 +82,15 @@ def on_trainer_init( " will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.ProgressBar` with" " `refresh_rate` directly to the Trainer's `callbacks` argument instead." ) + import logging - self.trainer._progress_bar_callback = self.configure_progress_bar(progress_bar_refresh_rate, process_position) + logging.critical(enable_progress_bar) + if enable_progress_bar: + self.trainer._progress_bar_callback = self.configure_progress_bar( + progress_bar_refresh_rate, process_position + ) + else: + self.trainer._progress_bar_callback = None # configure the ModelSummary callback self._configure_model_summary_callback(weights_summary) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index b0dbe77a18e0a..f37253602af88 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -138,6 +138,7 @@ def __init__( ipus: Optional[int] = None, log_gpu_memory: Optional[str] = None, progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7 + enable_progress_bar: bool = True, overfit_batches: Union[int, float] = 0.0, track_grad_norm: Union[int, float, str] = -1, check_val_every_n_epoch: int = 1, @@ -289,6 +290,8 @@ def __init__( Please pass :class:`~pytorch_lightning.callbacks.progress.ProgressBar` with ``refresh_rate`` directly to the Trainer's ``callbacks`` argument instead. + enable_progress_bar: Whether to enable to progress bar by default. + profiler: To profile individual steps during training and assist in identifying bottlenecks. overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int). @@ -452,6 +455,7 @@ def __init__( self.callback_connector.on_trainer_init( callbacks, checkpoint_callback, + enable_progress_bar, progress_bar_refresh_rate, process_position, default_root_dir, diff --git a/tests/accelerators/test_ddp_spawn.py b/tests/accelerators/test_ddp_spawn.py index a21078cf55542..806c2fd458402 100644 --- a/tests/accelerators/test_ddp_spawn.py +++ b/tests/accelerators/test_ddp_spawn.py @@ -52,7 +52,7 @@ def test_multi_gpu_model_ddp_spawn(tmpdir): limit_val_batches=10, gpus=[0, 1], accelerator="ddp_spawn", - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) model = BoringModel() @@ -73,7 +73,7 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir): trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.2, limit_val_batches=0.2, diff --git a/tests/accelerators/test_dp.py b/tests/accelerators/test_dp.py index 8e09460551dec..dafb763170519 100644 --- a/tests/accelerators/test_dp.py +++ b/tests/accelerators/test_dp.py @@ -88,7 +88,7 @@ def test_multi_gpu_model_dp(tmpdir): limit_val_batches=10, gpus=[0, 1], accelerator="dp", - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) model = BoringModel() diff --git a/tests/callbacks/test_callbacks.py b/tests/callbacks/test_callbacks.py index 5803db051c659..78e21d821b810 100644 --- a/tests/callbacks/test_callbacks.py +++ b/tests/callbacks/test_callbacks.py @@ -35,7 +35,7 @@ def configure_callbacks(self): model = TestModel() trainer_options = dict( - default_root_dir=tmpdir, checkpoint_callback=False, fast_dev_run=True, progress_bar_refresh_rate=0 + default_root_dir=tmpdir, checkpoint_callback=False, fast_dev_run=True, enable_progress_bar=False ) def assert_expected_calls(_trainer, model_callback, trainer_callback): @@ -86,9 +86,7 @@ def configure_callbacks(self): return [model_callback_mock] model = TestModel() - trainer = Trainer( - default_root_dir=tmpdir, fast_dev_run=True, checkpoint_callback=False, progress_bar_refresh_rate=1 - ) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, checkpoint_callback=False) callbacks_before_fit = trainer.callbacks.copy() assert callbacks_before_fit diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 7cab0d8776056..fe6873c8f43bf 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -141,7 +141,7 @@ def validation_epoch_end(self, outputs): callbacks=[early_stop_callback], num_sanity_val_steps=0, max_epochs=10, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) assert trainer.current_epoch == expected_stop_epoch @@ -177,7 +177,7 @@ def training_epoch_end(self, outputs): callbacks=[early_stop_callback], num_sanity_val_steps=0, max_epochs=10, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) assert trainer.current_epoch == expected_stop_epoch @@ -444,7 +444,7 @@ def validation_step(self, batch, batch_idx): default_root_dir=tmpdir, limit_val_batches=1, callbacks=EarlyStopping(monitor="foo"), - progress_bar_refresh_rate=0, + enable_progress_bar=False, **kwargs, ) diff --git a/tests/callbacks/test_finetuning_callback.py b/tests/callbacks/test_finetuning_callback.py index c014c8e736874..31b8b0e160132 100644 --- a/tests/callbacks/test_finetuning_callback.py +++ b/tests/callbacks/test_finetuning_callback.py @@ -425,7 +425,7 @@ def forward(self, x): limit_train_batches=1, limit_val_batches=1, max_epochs=2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, callbacks=[ckpt, BackboneFinetuning(unfreeze_backbone_at_epoch=1)], ) trainer.fit(BackboneBoringModel()) @@ -436,7 +436,7 @@ def forward(self, x): limit_train_batches=1, limit_val_batches=1, max_epochs=3, - progress_bar_refresh_rate=0, + enable_progress_bar=False, callbacks=BackboneFinetuning(unfreeze_backbone_at_epoch=1), resume_from_checkpoint=ckpt.last_model_path, ) diff --git a/tests/callbacks/test_lr_monitor.py b/tests/callbacks/test_lr_monitor.py index 32b2551245885..df84a1b186231 100644 --- a/tests/callbacks/test_lr_monitor.py +++ b/tests/callbacks/test_lr_monitor.py @@ -251,7 +251,7 @@ def configure_optimizers(self): limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor], - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) trainer.fit(TestModel()) @@ -272,7 +272,7 @@ def configure_optimizers(self): limit_val_batches=2, limit_train_batches=2, callbacks=[lr_monitor], - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) trainer.fit(TestModel()) @@ -310,7 +310,7 @@ def configure_optimizers(self): limit_val_batches=2, limit_train_batches=2, callbacks=[lr_monitor], - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) @@ -388,7 +388,7 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int): limit_val_batches=0, limit_train_batches=2, callbacks=[TestFinetuning(), lr_monitor, Check()], - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, checkpoint_callback=False, ) diff --git a/tests/callbacks/test_progress_bar.py b/tests/callbacks/test_progress_bar.py index 8cf7c0c5fd2b7..fe40150f92680 100644 --- a/tests/callbacks/test_progress_bar.py +++ b/tests/callbacks/test_progress_bar.py @@ -59,11 +59,19 @@ def test_progress_bar_on(tmpdir, callbacks: list, refresh_rate: Optional[int]): assert progress_bars[0] is trainer.progress_bar_callback -@pytest.mark.parametrize("callbacks,refresh_rate", [([], 0), ([], False), ([ModelCheckpoint(dirpath="../trainer")], 0)]) -def test_progress_bar_off(tmpdir, callbacks: list, refresh_rate: Union[bool, int]): +@pytest.mark.parametrize( + "callbacks,refresh_rate,enable_progress_bar", + [([], 0, True), ([], False, True), ([ModelCheckpoint(dirpath="../trainer")], 0, True), ([], 1, False)], +) +def test_progress_bar_off(tmpdir, callbacks: list, refresh_rate: Union[bool, int], enable_progress_bar: bool): """Test different ways the progress bar can be turned off.""" - trainer = Trainer(default_root_dir=tmpdir, callbacks=callbacks, progress_bar_refresh_rate=refresh_rate) + trainer = Trainer( + default_root_dir=tmpdir, + callbacks=callbacks, + progress_bar_refresh_rate=refresh_rate, + enable_progress_bar=enable_progress_bar, + ) progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBar)] assert 0 == len(progress_bars) diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index cf02b0a4bce75..5c2f2d4d7b3de 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -107,7 +107,7 @@ def train_with_pruning_callback( trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, checkpoint_callback=False, logger=False, @@ -225,7 +225,7 @@ def apply_lottery_ticket_hypothesis(self): ) trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, checkpoint_callback=False, logger=False, @@ -252,7 +252,7 @@ def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, checkpoint_callback=False, logger=False, @@ -321,7 +321,7 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): ckpt_callback = ModelCheckpoint( monitor="test", save_top_k=2, save_last=True, save_on_train_epoch_end=save_on_train_epoch_end ) - trainer = Trainer(callbacks=[pruning_callback, ckpt_callback], max_epochs=3, progress_bar_refresh_rate=0) + trainer = Trainer(callbacks=[pruning_callback, ckpt_callback], max_epochs=3, enable_progress_bar=False) with caplog.at_level(INFO): trainer.fit(model) diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index 6800694eb3fcf..b2693ed5ded48 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -121,7 +121,7 @@ def train_with_swa( trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=max_epochs, limit_train_batches=5, limit_val_batches=0, diff --git a/tests/checkpointing/test_checkpoint_callback_frequency.py b/tests/checkpointing/test_checkpoint_callback_frequency.py index 12ec14712fc94..cb38ae8f0573f 100644 --- a/tests/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/checkpointing/test_checkpoint_callback_frequency.py @@ -43,7 +43,7 @@ def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_inter weights_summary=None, val_check_interval=val_check_interval, limit_val_batches=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) @@ -119,7 +119,7 @@ def training_epoch_end(self, outputs) -> None: trainer = Trainer( callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor="my_loss_step", save_top_k=k, mode="max")], default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=epochs, weights_summary=None, val_check_interval=val_check_interval, diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index 0c1a6fbd51268..8102bbe3d7fb5 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -135,7 +135,7 @@ def on_validation_epoch_end(self): limit_train_batches=limit_train_batches, limit_val_batches=limit_val_batches, max_epochs=max_epochs, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}" @@ -236,7 +236,7 @@ def configure_optimizers(self): limit_val_batches=limit_val_batches, max_epochs=max_epochs, val_check_interval=val_check_interval, - progress_bar_refresh_rate=0, + enable_progress_bar=False, num_sanity_val_steps=0, ) trainer.fit(model) @@ -639,7 +639,7 @@ def test_ckpt_every_n_train_steps(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, callbacks=[checkpoint_callback], logger=False, ) @@ -666,7 +666,7 @@ def test_model_checkpoint_train_time_interval(mock_datetime, tmpdir) -> None: default_root_dir=tmpdir, min_epochs=num_epochs, max_epochs=num_epochs, - progress_bar_refresh_rate=0, + enable_progress_bar=False, callbacks=[ ModelCheckpoint( filename="{epoch}-{step}", @@ -737,7 +737,7 @@ def test_ckpt_metric_names(tmpdir): max_epochs=1, gradient_clip_val=1.0, overfit_batches=0.20, - progress_bar_refresh_rate=0, + enable_progress_bar=False, limit_train_batches=0.01, limit_val_batches=0.01, callbacks=[ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, filename="{val_loss:.2f}")], @@ -758,7 +758,7 @@ def test_default_checkpoint_behavior(tmpdir): model = LogInTwoMethods() trainer = Trainer( - default_root_dir=tmpdir, max_epochs=3, progress_bar_refresh_rate=0, limit_train_batches=5, limit_val_batches=5 + default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5 ) with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock: @@ -889,7 +889,7 @@ def validation_step(self, batch, batch_idx): limit_test_batches=2, callbacks=[checkpoint_callback], weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) assert os.listdir(tmpdir) == ["epoch=00.ckpt"] @@ -905,7 +905,7 @@ def validation_step(self, batch, batch_idx): limit_test_batches=2, resume_from_checkpoint=checkpoint_callback.best_model_path, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) trainer.test(model, verbose=False) @@ -1041,7 +1041,7 @@ def test_val_check_interval_checkpoint_files(tmpdir): callbacks=[model_checkpoint], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) files = {p.basename for p in tmpdir.listdir()} @@ -1065,7 +1065,7 @@ def training_step(self, *args): callbacks=[model_checkpoint], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(TestModel()) assert model_checkpoint.current_score == 0.3 @@ -1098,7 +1098,7 @@ def training_step(self, *args): callbacks=[model_checkpoint], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(TestModel()) expected = float("inf" if mode == "min" else "-inf") @@ -1122,7 +1122,7 @@ def __init__(self, hparams): callbacks=[model_checkpoint], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) hp = {"test_hp_0": 1, "test_hp_1": 2} hp = OmegaConf.create(hp) if hparams_type == Container else Namespace(**hp) @@ -1150,7 +1150,7 @@ def test_ckpt_version_after_rerun_new_trainer(tmpdir): callbacks=[mc], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(BoringModel()) @@ -1176,7 +1176,7 @@ def test_ckpt_version_after_rerun_same_trainer(tmpdir): callbacks=[mc], logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(BoringModel()) trainer.fit_loop.max_epochs = 4 diff --git a/tests/checkpointing/test_trainer_checkpoint.py b/tests/checkpointing/test_trainer_checkpoint.py index 739dc98a22834..a617162a4daed 100644 --- a/tests/checkpointing/test_trainer_checkpoint.py +++ b/tests/checkpointing/test_trainer_checkpoint.py @@ -64,7 +64,7 @@ def validation_step(self, batch, batch_idx): limit_val_batches=12, limit_test_batches=12, resume_from_checkpoint=best_model_paths[-1], - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) trainer.test() diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 2fa8f96e77148..de4dba76b9c56 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -182,7 +182,7 @@ def on_keyboard_interrupt(self, trainer, pl_module): max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, default_root_dir=tmpdir, ) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 0ada58ae0b74f..878ea1362b9ed 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -319,7 +319,7 @@ class _Test: limit_val_batches=0.1, num_sanity_val_steps=0, checkpoint_callback=False, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) # there should be no exceptions raised for the same key/value pair in the hparams of both @@ -343,7 +343,7 @@ class _Test: limit_val_batches=0.1, num_sanity_val_steps=0, checkpoint_callback=False, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) with pytest.raises(MisconfigurationException, match="Error while merging hparams"): @@ -360,7 +360,7 @@ class _Test: limit_val_batches=0.1, num_sanity_val_steps=0, checkpoint_callback=False, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) with pytest.raises(MisconfigurationException, match="Error while merging hparams"): diff --git a/tests/loops/optimization/test_optimizer_loop.py b/tests/loops/optimization/test_optimizer_loop.py index beaf0d6daaf40..a667072a7cdf2 100644 --- a/tests/loops/optimization/test_optimizer_loop.py +++ b/tests/loops/optimization/test_optimizer_loop.py @@ -120,7 +120,7 @@ def configure_optimizers(self): trainer = Trainer( default_root_dir=tmpdir, fast_dev_run=10, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) diff --git a/tests/loops/test_loops.py b/tests/loops/test_loops.py index 47145a2f8f408..c21351d48f54d 100644 --- a/tests/loops/test_loops.py +++ b/tests/loops/test_loops.py @@ -362,7 +362,7 @@ def configure_optimizers_multiple(self): limit_train_batches=n_batches, limit_val_batches=0, accumulate_grad_batches=accumulate_grad_batches, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, checkpoint_callback=False, ) @@ -557,7 +557,7 @@ def configure_optimizers_multiple(self): limit_train_batches=n_batches, limit_val_batches=0, accumulate_grad_batches=accumulate_grad_batches, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, checkpoint_callback=True, ) diff --git a/tests/loops/test_training_loop.py b/tests/loops/test_training_loop.py index ae36e56495f93..7c0e99f69cabf 100644 --- a/tests/loops/test_training_loop.py +++ b/tests/loops/test_training_loop.py @@ -53,7 +53,7 @@ def training_epoch_end(self, outputs): limit_val_batches=1, limit_train_batches=2, limit_test_batches=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, ) trainer.fit(model) diff --git a/tests/models/data/horovod/test_train_script.py b/tests/models/data/horovod/test_train_script.py index ee77efeeb8675..ea59a82f96a1c 100644 --- a/tests/models/data/horovod/test_train_script.py +++ b/tests/models/data/horovod/test_train_script.py @@ -21,7 +21,7 @@ def test_horovod_model_script(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index c9d05ae5f3f42..18663562df626 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -108,7 +108,7 @@ def validation_step(self, *args, **kwargs): gradient_clip_val=1.0, overfit_batches=0.20, track_grad_norm=2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, accumulate_grad_batches=2, limit_train_batches=0.1, limit_val_batches=0.1, @@ -129,7 +129,7 @@ def test_multi_cpu_model_ddp(tmpdir): trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -159,7 +159,7 @@ def __init__(self, optimizer_name, learning_rate): trainer_options = dict( default_root_dir=tmpdir, max_epochs=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary="top", limit_train_batches=0.2, limit_val_batches=0.2, @@ -176,7 +176,7 @@ def test_default_logger_callbacks_cpu_model(tmpdir): max_epochs=1, gradient_clip_val=1.0, overfit_batches=0.20, - progress_bar_refresh_rate=0, + enable_progress_bar=False, limit_train_batches=0.01, limit_val_batches=0.01, ) @@ -214,7 +214,7 @@ def test_step(self, *args, **kwargs): # fit model trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=0.4, limit_val_batches=0.2, @@ -258,7 +258,7 @@ def test_step(self, *args, **kwargs): # fit model trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -291,7 +291,7 @@ def test_simple_cpu(tmpdir): def test_cpu_model(tmpdir): """Make sure model trains on CPU.""" trainer_options = dict( - default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, limit_train_batches=4, limit_val_batches=4 + default_root_dir=tmpdir, enable_progress_bar=False, max_epochs=1, limit_train_batches=4, limit_val_batches=4 ) model = BoringModel() @@ -305,7 +305,7 @@ def test_all_features_cpu_model(tmpdir): gradient_clip_val=1.0, overfit_batches=0.20, track_grad_norm=2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, accumulate_grad_batches=2, max_epochs=1, limit_train_batches=0.4, diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 1faf4b820eae5..be70dd033b8a9 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -43,7 +43,7 @@ def test_multi_gpu_none_backend(tmpdir): tutils.set_random_master_port() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.2, limit_val_batches=0.2, @@ -61,7 +61,7 @@ def test_single_gpu_model(tmpdir, gpus): """Make sure single GPU works (DP mode).""" trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 6ea8b76d253fa..470d3eb2b42f5 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -454,7 +454,7 @@ def training_step(self, batch, batch_idx): max_epochs=1, limit_train_batches=train_batches, limit_val_batches=val_batches, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, callbacks=[callback], **kwargs, @@ -565,7 +565,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume(tmpdir): default_root_dir=tmpdir, max_steps=1, limit_val_batches=0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, callbacks=[HookedCallback([])], ) @@ -582,7 +582,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume(tmpdir): # already performed 1 step, now resuming to do an additional 2 max_steps=(1 + train_batches), limit_val_batches=0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, resume_from_checkpoint=best_model_path, callbacks=[callback], @@ -677,7 +677,7 @@ def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, max_epochs=1, limit_val_batches=batches, limit_test_batches=batches, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, callbacks=[callback], ) @@ -724,7 +724,7 @@ def test_trainer_model_hook_system_predict(tmpdir): callback = HookedCallback(called) batches = 2 trainer = Trainer( - default_root_dir=tmpdir, limit_predict_batches=batches, progress_bar_refresh_rate=0, callbacks=[callback] + default_root_dir=tmpdir, limit_predict_batches=batches, enable_progress_bar=False, callbacks=[callback] ) assert called == [ dict(name="Callback.on_init_start", args=(trainer,)), @@ -844,7 +844,7 @@ def call(hook, fn, *args, **kwargs): limit_val_batches=batches, limit_test_batches=batches, limit_predict_batches=batches, - progress_bar_refresh_rate=0, + enable_progress_bar=False, weights_summary=None, reload_dataloaders_every_epoch=True, ) diff --git a/tests/models/test_horovod.py b/tests/models/test_horovod.py index c3f3cdcf7ffc2..ac4c0979c1b67 100644 --- a/tests/models/test_horovod.py +++ b/tests/models/test_horovod.py @@ -73,7 +73,7 @@ def test_horovod_cpu(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -91,7 +91,7 @@ def test_horovod_cpu_clip_grad_by_value(tmpdir): weights_save_path=str(tmpdir), gradient_clip_val=1.0, gradient_clip_algorithm="value", - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -108,7 +108,7 @@ def test_horovod_cpu_implicit(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -124,7 +124,7 @@ def test_horovod_multi_gpu(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -143,7 +143,7 @@ def test_horovod_multi_gpu_grad_by_value(tmpdir): weights_save_path=str(tmpdir), gradient_clip_val=1.0, gradient_clip_algorithm="value", - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -165,7 +165,7 @@ def test_horovod_apex(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -185,7 +185,7 @@ def test_horovod_amp(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -205,7 +205,7 @@ def test_horovod_gather(tmpdir): default_root_dir=str(tmpdir), weights_save_path=str(tmpdir), gradient_clip_val=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -231,7 +231,7 @@ def validation_step(self, batch, *args, **kwargs): trainer_options = dict( default_root_dir=str(tmpdir), - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, @@ -249,7 +249,7 @@ def test_horovod_multi_optimizer(tmpdir): # fit model trainer = Trainer( default_root_dir=str(tmpdir), - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=0.4, limit_val_batches=0.2, diff --git a/tests/models/test_onnx.py b/tests/models/test_onnx.py index 59af0ffa831d7..fcce3d4bc86b7 100644 --- a/tests/models/test_onnx.py +++ b/tests/models/test_onnx.py @@ -97,7 +97,7 @@ def test_model_saves_on_multi_gpu(tmpdir): limit_val_batches=10, gpus=[0, 1], accelerator="ddp_spawn", - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) model = BoringModel() diff --git a/tests/models/test_restore.py b/tests/models/test_restore.py index c9a784ed0a0f5..5a68f757e8744 100644 --- a/tests/models/test_restore.py +++ b/tests/models/test_restore.py @@ -220,7 +220,7 @@ def test_running_test_pretrained_model_distrib_dp(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=5, limit_val_batches=5, @@ -266,7 +266,7 @@ def test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=2, limit_val_batches=2, @@ -313,7 +313,7 @@ def test_running_test_pretrained_model_cpu(tmpdir): checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=2, limit_val_batches=2, @@ -345,7 +345,7 @@ def test_load_model_from_checkpoint(tmpdir, model_template): model = model_template() trainer_options = dict( - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=2, limit_val_batches=2, diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index 950c3577b89b9..dd4707fb324d3 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -62,7 +62,7 @@ def test_model_tpu_cores_1(tmpdir): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, tpu_cores=1, limit_train_batches=4, @@ -81,7 +81,7 @@ def test_model_tpu_index(tmpdir, tpu_core): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, tpu_cores=[tpu_core], limit_train_batches=4, @@ -100,7 +100,7 @@ def test_model_tpu_cores_8(tmpdir): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, tpu_cores=8, limit_train_batches=4, @@ -120,7 +120,7 @@ def test_model_16bit_tpu_cores_1(tmpdir): trainer_options = dict( default_root_dir=tmpdir, precision=16, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, tpu_cores=1, limit_train_batches=8, @@ -141,7 +141,7 @@ def test_model_16bit_tpu_index(tmpdir, tpu_core): trainer_options = dict( default_root_dir=tmpdir, precision=16, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, tpu_cores=[tpu_core], limit_train_batches=4, @@ -162,7 +162,7 @@ def test_model_16bit_tpu_cores_8(tmpdir): trainer_options = dict( default_root_dir=tmpdir, precision=16, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, tpu_cores=8, limit_train_batches=4, @@ -190,7 +190,7 @@ def validation_step(self, *args, **kwargs): trainer = Trainer( callbacks=[EarlyStopping(monitor="val_loss")], default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=2, limit_val_batches=2, @@ -207,7 +207,7 @@ def test_tpu_grad_norm(tmpdir): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=4, tpu_cores=1, limit_train_batches=0.4, @@ -226,7 +226,7 @@ def test_tpu_clip_grad_by_value(tmpdir): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=4, tpu_cores=1, limit_train_batches=10, @@ -379,7 +379,7 @@ def test_tpu_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, tpu_cores=1, precision=16, @@ -437,7 +437,7 @@ def teardown(self, stage): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=4, tpu_cores=8, limit_train_batches=0.4, @@ -464,7 +464,7 @@ def teardown(self, stage): tutils.reset_seed() trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=4, tpu_cores=8, limit_train_batches=0.4, diff --git a/tests/plugins/test_deepspeed_plugin.py b/tests/plugins/test_deepspeed_plugin.py index c7ccaab3e72f4..8354667e9f120 100644 --- a/tests/plugins/test_deepspeed_plugin.py +++ b/tests/plugins/test_deepspeed_plugin.py @@ -371,7 +371,7 @@ def on_before_accelerator_backend_setup(self, trainer, pl_module) -> None: model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)], precision=16, @@ -723,7 +723,7 @@ def on_train_batch_start( verification_callback = VerificationCallback() trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, # TODO: this test fails with max_epochs >1 as there are leftover batches per epoch. # there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it. # we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index 8210804a46ddc..25726f64b049a 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -265,7 +265,7 @@ def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler): model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=1, limit_train_batches=5, limit_val_batches=5, diff --git a/tests/trainer/connectors/test_callback_connector.py b/tests/trainer/connectors/test_callback_connector.py index 713344ae2b0e1..1703df4f859e9 100644 --- a/tests/trainer/connectors/test_callback_connector.py +++ b/tests/trainer/connectors/test_callback_connector.py @@ -122,7 +122,7 @@ def assert_composition(trainer_callbacks, model_callbacks, expected): model = LightningModule() model.configure_callbacks = lambda: model_callbacks trainer = Trainer( - checkpoint_callback=False, progress_bar_refresh_rate=0, weights_summary=None, callbacks=trainer_callbacks + checkpoint_callback=False, enable_progress_bar=False, weights_summary=None, callbacks=trainer_callbacks ) trainer.model = model cb_connector = CallbackConnector(trainer) diff --git a/tests/trainer/logging_/test_eval_loop_logging.py b/tests/trainer/logging_/test_eval_loop_logging.py index e8b398bee8872..885027224a0e0 100644 --- a/tests/trainer/logging_/test_eval_loop_logging.py +++ b/tests/trainer/logging_/test_eval_loop_logging.py @@ -527,7 +527,6 @@ def test_step(self, batch, batch_idx): limit_val_batches=2, limit_test_batches=2, max_epochs=2, - progress_bar_refresh_rate=1, ) # Train the model ⚡ @@ -598,7 +597,6 @@ def validation_step(self, batch, batch_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - progress_bar_refresh_rate=1, ) trainer.fit(model) diff --git a/tests/trainer/logging_/test_logger_connector.py b/tests/trainer/logging_/test_logger_connector.py index d26471a715c2b..b20cc4812e0d3 100644 --- a/tests/trainer/logging_/test_logger_connector.py +++ b/tests/trainer/logging_/test_logger_connector.py @@ -361,7 +361,7 @@ def test_epoch_end(self, outputs): assert all(torch.equal(d["test"], torch.tensor([0, 1])) for d in outputs) # check values model = TestModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2, progress_bar_refresh_rate=0) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2, enable_progress_bar=False) trainer.fit(model) trainer.validate(model) trainer.test(model) @@ -510,7 +510,7 @@ def _assert_called(model, fn, stage): limit_val_batches=2, limit_test_batches=2, max_epochs=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, num_sanity_val_steps=2, checkpoint_callback=False, ) diff --git a/tests/trainer/optimization/test_optimizers.py b/tests/trainer/optimization/test_optimizers.py index 2cd7e8f706a61..25a9c438319c3 100644 --- a/tests/trainer/optimization/test_optimizers.py +++ b/tests/trainer/optimization/test_optimizers.py @@ -542,7 +542,7 @@ def test_lr_scheduler_state_updated_before_saving(tmpdir, every_n_train_steps, e lr, gamma = 1, 10 trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, max_epochs=max_epochs, limit_train_batches=batches, @@ -578,7 +578,7 @@ def test_plateau_scheduler_lr_step_interval_updated_after_saving(tmpdir, save_on batches = 4 trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, max_epochs=1, limit_train_batches=batches, diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 5f1bdd1f34541..e159e2d899dc6 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -258,7 +258,7 @@ def on_train_batch_end(self, outputs, batch, batch_idx, *_): limit_train_batches=limit_train_batches, limit_val_batches=0, default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) @@ -432,7 +432,7 @@ def on_load_checkpoint(self, _): callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_top_k=-1)], default_root_dir=tmpdir, val_check_interval=1.0, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, weights_summary=None, ) @@ -474,7 +474,7 @@ def test_trainer_max_steps_and_epochs(tmpdir): "max_steps": num_train_samples + 10, "logger": False, "weights_summary": None, - "progress_bar_refresh_rate": 0, + "enable_progress_bar": False, } trainer = Trainer(**trainer_kwargs) trainer.fit(model) @@ -561,7 +561,7 @@ def test_trainer_min_steps_and_epochs(tmpdir): "min_steps": num_train_samples // 2, "logger": False, "weights_summary": None, - "progress_bar_refresh_rate": 0, + "enable_progress_bar": False, } trainer = Trainer(**trainer_kwargs) trainer.fit(model) @@ -599,7 +599,7 @@ def training_step(self, batch, batch_idx): min_epochs = 5 trainer = Trainer( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, min_epochs=min_epochs, limit_val_batches=0, limit_train_batches=2, @@ -627,7 +627,7 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): accumulate_grad_batches=10, logger=False, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) trainer.fit(model) @@ -677,7 +677,7 @@ def predict_step(self, batch, *_): limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, default_root_dir=tmpdir, callbacks=[ModelCheckpoint(monitor="foo", save_top_k=save_top_k)], ) @@ -748,7 +748,7 @@ def training_epoch_end(self, *args, **kwargs): trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=0.0, limit_val_batches=0.2, @@ -811,7 +811,7 @@ def validation_epoch_end(self, *args, **kwargs): trainer_options = dict( default_root_dir=tmpdir, - progress_bar_refresh_rate=0, + enable_progress_bar=False, max_epochs=2, limit_train_batches=0.4, limit_val_batches=0.0, @@ -925,7 +925,7 @@ def on_keyboard_interrupt(self, trainer, pl_module): max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2, - progress_bar_refresh_rate=0, + enable_progress_bar=False, logger=False, default_root_dir=tmpdir, ) @@ -1308,7 +1308,15 @@ def on_predict_epoch_end(self, trainer, pl_module, outputs): def predict( - tmpdir, accelerator, gpus, num_processes, model=None, plugins=None, datamodule=True, pbrr=None, use_callbacks=True + tmpdir, + accelerator, + gpus, + num_processes, + model=None, + plugins=None, + datamodule=True, + enable_progress_bar=True, + use_callbacks=True, ): dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))] @@ -1327,7 +1335,7 @@ def predict( gpus=gpus, num_processes=num_processes, plugins=plugins, - progress_bar_refresh_rate=pbrr, + enable_progress_bar=enable_progress_bar, callbacks=[cb, cb_1] if use_callbacks else [], ) if accelerator == "ddp_spawn": @@ -1379,10 +1387,10 @@ def predict_step(self, batch, batch_idx, dataloader_idx=None): assert x.expand_as(x).grad_fn is not None -@pytest.mark.parametrize("progress_bar_refresh_rate", [0, 5, None]) +@pytest.mark.parametrize("enable_progress_bar", [False, True]) @pytest.mark.parametrize("datamodule", [False, True]) -def test_trainer_predict_cpu(tmpdir, datamodule, progress_bar_refresh_rate): - predict(tmpdir, None, None, 1, datamodule=datamodule, pbrr=progress_bar_refresh_rate) +def test_trainer_predict_cpu(tmpdir, datamodule, enable_progress_bar): + predict(tmpdir, None, None, 1, datamodule=datamodule, enable_progress_bar=enable_progress_bar) @RunIf(min_gpus=2, special=True) @@ -1592,7 +1600,7 @@ def test_train_loop_system(tmpdir): limit_train_batches=5, limit_val_batches=1, limit_test_batches=1, - progress_bar_refresh_rate=0, + enable_progress_bar=False, ) class TestOptimizer(SGD): @@ -1791,9 +1799,7 @@ def test_on_load_checkpoint_missing_callbacks(tmpdir): trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()]) trainer.fit(model) - trainer = Trainer( - default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path, progress_bar_refresh_rate=1 - ) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path) with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"): trainer.fit(model) @@ -1880,7 +1886,7 @@ def current_memory(): fast_dev_run=True, gpus=1, accelerator="ddp", - progress_bar_refresh_rate=0, + enable_progress_bar=False, callbacks=Check(), ) trainer = Trainer(**trainer_kwargs) diff --git a/tests/utilities/test_auto_restart.py b/tests/utilities/test_auto_restart.py index 5500fe5393f27..e78403fba054a 100644 --- a/tests/utilities/test_auto_restart.py +++ b/tests/utilities/test_auto_restart.py @@ -941,7 +941,7 @@ def test_dataset_rng_states_restart_with_lightning(tmpdir, dataset_classes, mult default_root_dir=tmpdir, max_epochs=3, weights_summary=None, - progress_bar_refresh_rate=0, + enable_progress_bar=False, multiple_trainloader_mode=multiple_trainloader_mode, ) From 73afc01f30050cf2f39ab85f28d64ffb6556334f Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Thu, 23 Sep 2021 06:13:06 +0000 Subject: [PATCH 5/7] changelog --- CHANGELOG.md | 2 +- pytorch_lightning/trainer/connectors/callback_connector.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 832f326654cca..fc0bcee5ac55d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -145,7 +145,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `RichModelSummary` callback ([#9546](https://github.com/PyTorchLightning/pytorch-lightning/pull/9546)) -- Added `enable_progress_bar` to Trainer constructor ([]()) +- Added `enable_progress_bar` to Trainer constructor ([#9664](https://github.com/PyTorchLightning/pytorch-lightning/pull/9664)) ### Changed diff --git a/pytorch_lightning/trainer/connectors/callback_connector.py b/pytorch_lightning/trainer/connectors/callback_connector.py index c5d16885cb134..a4192c58bc77e 100644 --- a/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/pytorch_lightning/trainer/connectors/callback_connector.py @@ -82,9 +82,7 @@ def on_trainer_init( " will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.ProgressBar` with" " `refresh_rate` directly to the Trainer's `callbacks` argument instead." ) - import logging - logging.critical(enable_progress_bar) if enable_progress_bar: self.trainer._progress_bar_callback = self.configure_progress_bar( progress_bar_refresh_rate, process_position From ba016cfb9e683cdd73db062c8525966bcbb91274 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Fri, 24 Sep 2021 00:41:08 +0000 Subject: [PATCH 6/7] update docs --- docs/source/common/trainer.rst | 17 +++++++++++++++++ .../trainer/connectors/callback_connector.py | 3 ++- pytorch_lightning/trainer/trainer.py | 3 ++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index de37f5839e82b..9e7de8f928730 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1281,6 +1281,10 @@ See the :doc:`profiler documentation <../advanced/profiler>`. for more details. progress_bar_refresh_rate ^^^^^^^^^^^^^^^^^^^^^^^^^ +``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7. +Please pass :class:`~pytorch_lightning.callbacks.progress.ProgressBar` with ``refresh_rate`` +directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar, +pass ``enable_progress_bar = False`` to the Trainer. .. raw:: html @@ -1305,6 +1309,19 @@ Note: Lightning will set it to 20 in these environments if the user does not provide a value. - This argument is ignored if a custom callback is passed to :paramref:`~Trainer.callbacks`. +enable_progress_bar +^^^^^^^^^^^^^^^^^^^ + +Whether to enable or disable the progress bar. Defaults to True. + +.. testcode:: + + # default used by the Trainer + trainer = Trainer(enable_progress_bar=True) + + # disable progress bar + trainer = Trainer(enable_progress_bar=False) + reload_dataloaders_every_n_epochs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pytorch_lightning/trainer/connectors/callback_connector.py b/pytorch_lightning/trainer/connectors/callback_connector.py index a4192c58bc77e..71ea37b2df947 100644 --- a/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/pytorch_lightning/trainer/connectors/callback_connector.py @@ -80,7 +80,8 @@ def on_trainer_init( rank_zero_deprecation( f"Setting `Trainer(progress_bar_refresh_rate={progress_bar_refresh_rate})` is deprecated in v1.5 and" " will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.ProgressBar` with" - " `refresh_rate` directly to the Trainer's `callbacks` argument instead." + " `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress" + " bar pass `enable_progress_bar = False` to the Trainer." ) if enable_progress_bar: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index d47fe8f897aff..521ddbb0a623d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -286,7 +286,8 @@ def __init__( .. deprecated:: v1.5 ``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7. Please pass :class:`~pytorch_lightning.callbacks.progress.ProgressBar` with ``refresh_rate`` - directly to the Trainer's ``callbacks`` argument instead. + directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar, + pass ``enable_progress_bar = False`` to the Trainer. enable_progress_bar: Whether to enable to progress bar by default. From 71027b14740465fab7da064b47facc08a12bc578 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Fri, 24 Sep 2021 04:37:37 +0000 Subject: [PATCH 7/7] update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1090d418e33d5..bf4e121c68a2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -265,7 +265,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated `LightningLoggerBase.close`, `LoggerCollection.close` in favor of `LightningLoggerBase.finalize`, `LoggerCollection.finalize` ([#9422](https://github.com/PyTorchLightning/pytorch-lightning/pull/9422)) -- Deprecated passing `progress_bar_refresh_rate` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `refresh_rate` directly to the list of callbacks ([#9616](https://github.com/PyTorchLightning/pytorch-lightning/pull/9616)) +- Deprecated passing `progress_bar_refresh_rate` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `refresh_rate` directly to the list of callbacks, or passing `enable_progress_bar=False` to disable the progress bar ([#9616](https://github.com/PyTorchLightning/pytorch-lightning/pull/9616)) ### Removed