diff --git a/CHANGELOG.md b/CHANGELOG.md index c803239d5fccb..033aa89625daa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Removed the deprecated `checkpoint_callback` argument from the `Trainer` constructor ([#13027](https://github.com/PyTorchLightning/pytorch-lightning/pull/13027)) + + - Removed the deprecated `TestTubeLogger` ([#12859](https://github.com/PyTorchLightning/pytorch-lightning/pull/12859)) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index e63640e99f8ce..22458e320a825 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -535,12 +535,6 @@ Example:: # run val loop every 10 training epochs trainer = Trainer(check_val_every_n_epoch=10) -checkpoint_callback -^^^^^^^^^^^^^^^^^^^ - -.. warning:: `checkpoint_callback` has been deprecated in v1.5 and will be removed in v1.7. - To disable checkpointing, pass ``enable_checkpointing = False`` to the Trainer instead. - default_root_dir ^^^^^^^^^^^^^^^^ diff --git a/legacy/simple_classif_training.py b/legacy/simple_classif_training.py index 39362e9ef58ce..440af1d92a435 100644 --- a/legacy/simple_classif_training.py +++ b/legacy/simple_classif_training.py @@ -156,7 +156,6 @@ def main_train(dir_path, max_epochs: int = 20): default_root_dir=dir_path, gpus=int(torch.cuda.is_available()), precision=(16 if torch.cuda.is_available() else 32), - checkpoint_callback=True, callbacks=[stopping], min_epochs=3, max_epochs=max_epochs, diff --git a/pytorch_lightning/trainer/connectors/callback_connector.py b/pytorch_lightning/trainer/connectors/callback_connector.py index 7514e5c85eef7..2790d01fe133d 100644 --- a/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/pytorch_lightning/trainer/connectors/callback_connector.py @@ -43,7 +43,6 @@ def __init__(self, trainer): def on_trainer_init( self, callbacks: Optional[Union[List[Callback], Callback]], - checkpoint_callback: Optional[bool], enable_checkpointing: bool, enable_progress_bar: bool, process_position: int, @@ -71,7 +70,7 @@ def on_trainer_init( # configure checkpoint callback # pass through the required args to figure out defaults - self._configure_checkpoint_callbacks(checkpoint_callback, enable_checkpointing) + self._configure_checkpoint_callbacks(enable_checkpointing) # configure the timer callback. # responsible to stop the training when max_time is reached. @@ -133,15 +132,7 @@ def _configure_accumulated_gradients( self.trainer.accumulate_grad_batches = grad_accum_callback.get_accumulate_grad_batches(0) self.trainer.accumulation_scheduler = grad_accum_callback - def _configure_checkpoint_callbacks(self, checkpoint_callback: Optional[bool], enable_checkpointing: bool) -> None: - if checkpoint_callback is not None: - rank_zero_deprecation( - f"Setting `Trainer(checkpoint_callback={checkpoint_callback})` is deprecated in v1.5 and will " - f"be removed in v1.7. Please consider using `Trainer(enable_checkpointing={checkpoint_callback})`." - ) - # if both are set then checkpoint only if both are True - enable_checkpointing = checkpoint_callback and enable_checkpointing - + def _configure_checkpoint_callbacks(self, enable_checkpointing: bool) -> None: if self.trainer.checkpoint_callbacks: if not enable_checkpointing: raise MisconfigurationException( diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f72c2a8d08df2..b073a28590a1e 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -132,7 +132,6 @@ class Trainer( def __init__( self, logger: Union[Logger, Iterable[Logger], bool] = True, - checkpoint_callback: Optional[bool] = None, enable_checkpointing: bool = True, callbacks: Optional[Union[List[Callback], Callback]] = None, default_root_dir: Optional[str] = None, @@ -234,13 +233,6 @@ def __init__( callbacks: Add a callback or list of callbacks. Default: ``None``. - checkpoint_callback: If ``True``, enable checkpointing. - Default: ``None``. - - .. deprecated:: v1.5 - ``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7. - Please consider using ``enable_checkpointing`` instead. - enable_checkpointing: If ``True``, enable checkpointing. It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`. @@ -514,7 +506,6 @@ def __init__( # Declare attributes to be set in _callback_connector on_trainer_init self._callback_connector.on_trainer_init( callbacks, - checkpoint_callback, enable_checkpointing, enable_progress_bar, process_position, diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 270cd7ecd9769..0f311bf1a3051 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -154,11 +154,6 @@ def test_v1_7_0_deprecate_lightning_distributed(tmpdir): _ = LightningDistributed() -def test_v1_7_0_checkpoint_callback_trainer_constructor(tmpdir): - with pytest.deprecated_call(match=r"Setting `Trainer\(checkpoint_callback=True\)` is deprecated in v1.5"): - _ = Trainer(checkpoint_callback=True) - - def test_v1_7_0_deprecate_on_post_move_to_device(tmpdir): class TestModel(BoringModel): def on_post_move_to_device(self):