diff --git a/docs/source-pytorch/extensions/callbacks.rst b/docs/source-pytorch/extensions/callbacks.rst index 03bbd70f473d7..401b9606258b6 100644 --- a/docs/source-pytorch/extensions/callbacks.rst +++ b/docs/source-pytorch/extensions/callbacks.rst @@ -154,12 +154,6 @@ state_key Hooks ===== -on_configure_sharded_model -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. automethod:: pytorch_lightning.callbacks.Callback.on_configure_sharded_model - :noindex: - setup ^^^^^ @@ -256,9 +250,6 @@ on_predict_epoch_end .. automethod:: pytorch_lightning.callbacks.Callback.on_predict_epoch_end :noindex: -.. automethod:: pytorch_lightning.callbacks.Callback.on_epoch_end - :noindex: - on_validation_batch_start ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 36a115cc7c2d7..ea8a7aa6f6a2a 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -255,6 +255,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed the deprecated way to set the distributed backend via the environment variable `PL_TORCH_DISTRIBUTED_BACKEND`, in favor of setting the `process_group_backend` in the strategy constructor ([#14693](https://github.com/Lightning-AI/lightning/pull/14693)) +- Removed deprecated callback hooks ([#14834](https://github.com/Lightning-AI/lightning/pull/14834)) + * `Callback.on_configure_sharded_model` in favor of `Callback.setup` + * `Callback.on_before_accelerator_backend_setup` in favor of `Callback.setup` + * `Callback.on_batch_start` in favor of `Callback.on_train_batch_start` + * `Callback.on_batch_end` in favor of `Callback.on_train_batch_end` + * `Callback.on_epoch_start` in favor of `Callback.on_{train,validation,test}_epoch_start` + * `Callback.on_epoch_end` in favor of `Callback.on_{train,validation,test}_epoch_end` + * `Callback.on_pretrain_routine_{start,end}` in favor of `Callback.on_fit_start` + + - Removed the deprecated device attributes `Trainer.{devices,gpus,num_gpus,ipus,tpu_cores}` in favor of the accelerator-agnostic `Trainer.num_devices` ([#14829](https://github.com/Lightning-AI/lightning/pull/14829)) diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index f627b43cf53c9..2f2ee174144c9 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -56,22 +56,6 @@ def _generate_state_key(self, **kwargs: Any) -> str: """ return f"{self.__class__.__qualname__}{repr(kwargs)}" - def on_configure_sharded_model(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use `setup()` instead. - - Called before configure sharded model. - """ - - def on_before_accelerator_backend_setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``setup()`` instead. - - Called before accelerator is being setup. - """ - def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None: """Called when fit, validate, test, predict, or tune begins.""" @@ -130,42 +114,6 @@ def on_predict_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.Lightning def on_predict_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: List[Any]) -> None: """Called when the predict epoch ends.""" - def on_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on__epoch_start`` instead. - - Called when either of train/val/test epoch begins. - """ - - def on_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on__epoch_end`` instead. - - Called when either of train/val/test epoch ends. - """ - - def on_batch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on_train_batch_start`` instead. - - Called when the training batch begins. - """ - - def on_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on_train_batch_end`` instead. - - Called when the training batch ends. - """ - def on_validation_batch_start( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int, dataloader_idx: int ) -> None: @@ -220,24 +168,6 @@ def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the train ends.""" - def on_pretrain_routine_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead. - - Called when the pretrain routine begins. - """ - - def on_pretrain_routine_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead. - - Called when the pretrain routine ends. - """ - def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the validation loop begins.""" diff --git a/src/pytorch_lightning/callbacks/lambda_function.py b/src/pytorch_lightning/callbacks/lambda_function.py index 09d802ee8e78c..c56f0dd779f3e 100644 --- a/src/pytorch_lightning/callbacks/lambda_function.py +++ b/src/pytorch_lightning/callbacks/lambda_function.py @@ -40,9 +40,7 @@ class LambdaCallback(Callback): def __init__( self, - on_before_accelerator_backend_setup: Optional[Callable] = None, setup: Optional[Callable] = None, - on_configure_sharded_model: Optional[Callable] = None, teardown: Optional[Callable] = None, on_fit_start: Optional[Callable] = None, on_fit_end: Optional[Callable] = None, @@ -56,18 +54,12 @@ def __init__( on_validation_epoch_end: Optional[Callable] = None, on_test_epoch_start: Optional[Callable] = None, on_test_epoch_end: Optional[Callable] = None, - on_epoch_start: Optional[Callable] = None, - on_epoch_end: Optional[Callable] = None, - on_batch_start: Optional[Callable] = None, on_validation_batch_start: Optional[Callable] = None, on_validation_batch_end: Optional[Callable] = None, on_test_batch_start: Optional[Callable] = None, on_test_batch_end: Optional[Callable] = None, - on_batch_end: Optional[Callable] = None, on_train_start: Optional[Callable] = None, on_train_end: Optional[Callable] = None, - on_pretrain_routine_start: Optional[Callable] = None, - on_pretrain_routine_end: Optional[Callable] = None, on_validation_start: Optional[Callable] = None, on_validation_end: Optional[Callable] = None, on_test_start: Optional[Callable] = None, diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/pytorch_lightning/callbacks/model_checkpoint.py index 62001d50b1c85..256f913659529 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/pytorch_lightning/callbacks/model_checkpoint.py @@ -39,7 +39,7 @@ from lightning_lite.utilities.types import _PATH from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn from pytorch_lightning.utilities.types import STEP_OUTPUT log = logging.getLogger(__name__) @@ -351,19 +351,12 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.best_model_path = state_dict["best_model_path"] - def save_checkpoint(self, trainer: "pl.Trainer") -> None: # pragma: no-cover - """Performs the main logic around saving a checkpoint. - - This method runs on all ranks. It is the responsibility of `trainer.save_checkpoint` to correctly handle the - behaviour in distributed training, i.e., saving only on rank 0 for data parallel use cases. - """ - rank_zero_deprecation( - f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and will be removed in v1.8." - " Instead, you can use `trainer.save_checkpoint()` to manually save a checkpoint." + def save_checkpoint(self, trainer: "pl.Trainer") -> None: + raise NotImplementedError( + f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and is no longer supported" + f" as of 1.8. Please use `trainer.save_checkpoint()` to manually save a checkpoint. This method will be" + f" removed completely in v2.0." ) - monitor_candidates = self._monitor_candidates(trainer) - self._save_topk_checkpoint(trainer, monitor_candidates) - self._save_last_checkpoint(trainer, monitor_candidates) def _save_topk_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, Tensor]) -> None: if self.save_top_k == 0: diff --git a/src/pytorch_lightning/core/hooks.py b/src/pytorch_lightning/core/hooks.py index 86b3d3f92e9c8..7831c94e61d90 100644 --- a/src/pytorch_lightning/core/hooks.py +++ b/src/pytorch_lightning/core/hooks.py @@ -63,32 +63,6 @@ def on_predict_start(self) -> None: def on_predict_end(self) -> None: """Called at the end of predicting.""" - def on_pretrain_routine_start(self) -> None: - """Called at the beginning of the pretrain routine (between fit and train start). - - - fit - - pretrain_routine start - - pretrain_routine end - - training_start - - .. deprecated:: v1.6 - :meth:`on_pretrain_routine_start` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on_fit_start`` instead. - """ - - def on_pretrain_routine_end(self) -> None: - """Called at the end of the pretrain routine (between fit and train start). - - - fit - - pretrain_routine start - - pretrain_routine end - - training_start - - .. deprecated:: v1.6 - :meth:`on_pretrain_routine_end` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on_fit_start`` instead. - """ - def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: """Called in the training loop before anything happens for that batch. @@ -189,22 +163,6 @@ def on_predict_model_eval(self) -> None: """Sets the model to eval during the predict loop.""" self.trainer.model.eval() - def on_epoch_start(self) -> None: - """Called when either of train/val/test epoch begins. - - .. deprecated:: v1.6 - :meth:`on_epoch_start` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on__epoch_start`` instead. - """ - - def on_epoch_end(self) -> None: - """Called when either of train/val/test epoch ends. - - .. deprecated:: v1.6 - :meth:`on_epoch_end` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on__epoch_end`` instead. - """ - def on_train_epoch_start(self) -> None: """Called in the training loop at the very beginning of the epoch.""" diff --git a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py index 60125e1174cd8..29142215b7c1e 100644 --- a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py +++ b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py @@ -267,10 +267,8 @@ def _on_evaluation_end(self, *args: Any, **kwargs: Any) -> None: self.trainer._logger_connector.reset_results() def _on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None: - """Runs ``on_epoch_start`` and ``on_{validation/test}_epoch_start`` hooks.""" + """Runs the ``on_{validation/test}_epoch_start`` hooks.""" self.trainer._logger_connector.on_epoch_start() - self.trainer._call_callback_hooks("on_epoch_start", *args, **kwargs) - self.trainer._call_lightning_module_hook("on_epoch_start", *args, **kwargs) hook_name = "on_test_epoch_start" if self.trainer.testing else "on_validation_epoch_start" self.trainer._call_callback_hooks(hook_name, *args, **kwargs) @@ -295,8 +293,6 @@ def _on_evaluation_epoch_end(self) -> None: self.trainer._call_callback_hooks(hook_name) self.trainer._call_lightning_module_hook(hook_name) - self.trainer._call_callback_hooks("on_epoch_end") - self.trainer._call_lightning_module_hook("on_epoch_end") self.trainer._logger_connector.on_epoch_end() @staticmethod diff --git a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py index 1cf2a5a6dee30..03de14f8f96bf 100644 --- a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -200,9 +200,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...") batch_output = [] else: - # hook - self.trainer._call_callback_hooks("on_batch_start") - # hook self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx) response = self.trainer._call_lightning_module_hook("on_train_batch_start", batch, batch_idx) @@ -232,7 +229,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx) self.trainer._call_lightning_module_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx) - self.trainer._call_callback_hooks("on_batch_end") self.trainer._logger_connector.on_batch_end() self.batch_progress.increment_completed() diff --git a/src/pytorch_lightning/loops/fit_loop.py b/src/pytorch_lightning/loops/fit_loop.py index de232bdaa9147..251fc4df394f8 100644 --- a/src/pytorch_lightning/loops/fit_loop.py +++ b/src/pytorch_lightning/loops/fit_loop.py @@ -219,8 +219,7 @@ def on_run_start(self) -> None: # type: ignore[override] self.trainer._call_strategy_hook("on_train_start") def on_advance_start(self) -> None: # type: ignore[override] - """Prepares the dataloader for training and calls the hooks ``on_epoch_start`` and - ``on_train_epoch_start``""" + """Prepares the dataloader for training and calls the hook ``on_train_epoch_start``""" model = self.trainer.lightning_module # reset train dataloader @@ -246,9 +245,6 @@ def on_advance_start(self) -> None: # type: ignore[override] self.trainer._logger_connector.on_epoch_start() - self.trainer._call_callback_hooks("on_epoch_start") - self.trainer._call_lightning_module_hook("on_epoch_start") - self.trainer._call_callback_hooks("on_train_epoch_start") self.trainer._call_lightning_module_hook("on_train_epoch_start") @@ -299,9 +295,6 @@ def on_advance_end(self) -> None: self.trainer._call_callback_hooks("on_train_epoch_end") self.trainer._call_lightning_module_hook("on_train_epoch_end") - self.trainer._call_callback_hooks("on_epoch_end") - self.trainer._call_lightning_module_hook("on_epoch_end") - self.trainer._logger_connector.on_epoch_end() if self.epoch_loop._num_ready_batches_reached(): diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index f363abf92fe21..b14f955312cdd 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -20,7 +20,7 @@ from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_warn from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature @@ -48,10 +48,11 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: __verify_eval_loop_configuration(trainer, model, "predict") __verify_batch_transfer_support(trainer) + # TODO: Delete this check in v2.0 _check_deprecated_callback_hooks(trainer) - # TODO: Delete on_epoch_start/on_epoch_end hooks in v1.8 + # TODO: Delete this check in v2.0 _check_on_epoch_start_end(model) - # TODO: Delete on_pretrain_routine_start/end hooks in v1.8 + # TODO: Delete this check in v2.0 _check_on_pretrain_routine(model) @@ -185,7 +186,6 @@ def __check_training_step_requires_dataloader_iter(model: "pl.LightningModule") ) -# TODO: Remove on_epoch_start/on_epoch_end hooks in v1.8 def _check_on_epoch_start_end(model: "pl.LightningModule") -> None: hooks = ( ("on_epoch_start", "on__epoch_start"), @@ -193,34 +193,32 @@ def _check_on_epoch_start_end(model: "pl.LightningModule") -> None: ) for hook, alternative_hook in hooks: - if is_overridden(hook, model): - rank_zero_deprecation( - f"The `LightningModule.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `LightningModule.{alternative_hook}` instead." + if callable(getattr(model, hook, None)): + raise RuntimeError( + f"The `LightningModule.{hook}` hook was removed in v1.8. Please use" + f" `LightningModule.{alternative_hook}` instead." ) def _check_on_pretrain_routine(model: "pl.LightningModule") -> None: hooks = (("on_pretrain_routine_start", "on_fit_start"), ("on_pretrain_routine_end", "on_fit_start")) for hook, alternative_hook in hooks: - if is_overridden(hook, model): - rank_zero_deprecation( - f"The `LightningModule.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `LightningModule.{alternative_hook}` instead." + if callable(getattr(model, hook, None)): + raise RuntimeError( + f"The `LightningModule.{hook}` hook was removed in v1.8. Please use" + f" `LightningModule.{alternative_hook}` instead." ) def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: for callback in trainer.callbacks: - if is_overridden(method_name="on_configure_sharded_model", instance=callback): - rank_zero_deprecation( - "The `on_configure_sharded_model` callback hook was deprecated in" - " v1.6 and will be removed in v1.8. Use `setup()` instead." + if callable(getattr(callback, "on_configure_sharded_model", None)): + raise RuntimeError( + "The `on_configure_sharded_model` callback hook was removed in v1.8. Use `setup()` instead." ) - if is_overridden(method_name="on_before_accelerator_backend_setup", instance=callback): - rank_zero_deprecation( - "The `on_before_accelerator_backend_setup` callback hook was deprecated in" - " v1.6 and will be removed in v1.8. Use `setup()` instead." + if callable(getattr(callback, "on_before_accelerator_backend_setup", None)): + raise RuntimeError( + "The `on_before_accelerator_backend_setup` callback hook was removed in v1.8. Use `setup()` instead." ) has_legacy_argument = "callback_state" in inspect.signature(callback.on_load_checkpoint).parameters @@ -238,23 +236,20 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: ["on_batch_start", "on_train_batch_start"], ["on_batch_end", "on_train_batch_end"], ): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `Callback.{alternative_hook}` instead." + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.{alternative_hook}` instead." ) for hook, alternative_hook in ( ["on_epoch_start", "on__epoch_start"], ["on_epoch_end", "on__epoch_end"], ): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `Callback.{alternative_hook}` instead." + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.{alternative_hook}` instead." ) for hook in ("on_pretrain_routine_start", "on_pretrain_routine_end"): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook has been deprecated in v1.6 and" - " will be removed in v1.8. Please use `Callback.on_fit_start` instead." + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.on_fit_start` instead." ) diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py index 5e64d0705c8f2..f1478ecbf9cbe 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py +++ b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py @@ -26,8 +26,6 @@ class _LogOptions(TypedDict): default_on_epoch: bool functions = { - "on_before_accelerator_backend_setup": None, - "on_configure_sharded_model": None, "on_before_backward": _LogOptions( allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False ), @@ -75,8 +73,6 @@ class _LogOptions(TypedDict): "on_test_end": None, "on_predict_start": None, "on_predict_end": None, - "on_pretrain_routine_start": None, - "on_pretrain_routine_end": None, "on_train_epoch_start": _LogOptions( allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True ), @@ -97,21 +93,9 @@ class _LogOptions(TypedDict): ), "on_predict_epoch_start": None, "on_predict_epoch_end": None, - "on_epoch_start": _LogOptions( - allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True - ), - "on_epoch_end": _LogOptions( - allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True - ), "on_before_batch_transfer": None, "transfer_batch_to_device": None, "on_after_batch_transfer": None, - "on_batch_start": _LogOptions( - allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False - ), - "on_batch_end": _LogOptions( - allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False - ), "on_train_batch_start": _LogOptions( allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False ), diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 130613e6fd791..190a75ec2b286 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -981,7 +981,6 @@ def _run( # ---------------------------- # SET UP TRAINING # ---------------------------- - self._call_callback_hooks("on_before_accelerator_backend_setup") log.detail(f"{self.__class__.__name__}: setting up strategy environment") self.strategy.setup_environment() self.__setup_profiler() @@ -1135,15 +1134,6 @@ def _pre_training_routine(self) -> None: # register signals self._signal_connector.register_signal_handlers() - # -------------------------- - # Pre-train - # -------------------------- - self._call_callback_hooks("on_pretrain_routine_start") - self._call_lightning_module_hook("on_pretrain_routine_start") - - self._call_callback_hooks("on_pretrain_routine_end") - self._call_lightning_module_hook("on_pretrain_routine_end") - def _run_train(self) -> None: self._pre_training_routine() @@ -1255,7 +1245,6 @@ def _call_configure_sharded_model(self) -> None: materialize_module(self.lightning_module) self._call_lightning_module_hook("configure_sharded_model") - self._call_callback_hooks("on_configure_sharded_model") def _call_teardown_hook(self) -> None: assert self.state.fn is not None diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index 14a7bc54efcf6..a3091a23bf47c 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -13,8 +13,6 @@ # limitations under the License. from functools import partial -import pytest - from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import Callback, LambdaCallback from pytorch_lightning.demos.boring_classes import BoringModel @@ -47,10 +45,7 @@ def call(hook, *_, **__): limit_val_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) + trainer.fit(model) ckpt_path = trainer.checkpoint_callback.best_model_path # raises KeyboardInterrupt and loads from checkpoint @@ -63,17 +58,8 @@ def call(hook, *_, **__): limit_predict_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model, ckpt_path=ckpt_path) - with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.test(model) - with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.predict(model) + trainer.fit(model, ckpt_path=ckpt_path) + trainer.test(model) + trainer.predict(model) assert checker == hooks diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py deleted file mode 100644 index ff4b80f8898bd..0000000000000 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test deprecated functionality which will be removed in v1.8.0.""" -from unittest import mock - -import pytest - -from pytorch_lightning import Callback, Trainer -from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.demos.boring_classes import BoringModel - - -def test_v1_8_0_remove_on_batch_start_end(tmpdir): - class TestCallback(Callback): - def on_batch_start(self, *args, **kwargs): - print("on_batch_start") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_batch_end(self, *args, **kwargs): - print("on_batch_end") - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_on_configure_sharded_model(tmpdir): - class TestCallback(Callback): - def on_configure_sharded_model(self, trainer, model): - print("Configuring sharded model") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8." - ): - trainer.fit(model) - - -def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_epoch_start(self, *args, **kwargs): - print("on_epoch_start") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_epoch_end(self, *args, **kwargs): - print("on_epoch_end") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_pretrain_routine_start(self, *args, **kwargs): - print("foo") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_pretrain_routine_end(self, *args, **kwargs): - print("foo") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir): - class TestCallback(Callback): - def on_before_accelerator_backend_setup(self, *args, **kwargs): - print("on_before_accelerator_backend called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6" - " and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir): - class TestCallback(Callback): - def on_pretrain_routine_start(self, trainer, pl_module): - print("on_pretrain_routine_start called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_pretrain_routine_end(self, trainer, pl_module): - print("on_pretrain_routine_end called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_deprecated_mc_save_checkpoint(): - mc = ModelCheckpoint() - trainer = Trainer() - with mock.patch.object(trainer, "save_checkpoint"), pytest.deprecated_call( - match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6" - ): - mc.save_checkpoint(trainer) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index 22188974c8e0b..9457f264fd5be 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -18,6 +18,7 @@ import pytorch_lightning from pytorch_lightning import Callback, Trainer +from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback from tests_pytorch.helpers.runif import RunIf @@ -136,3 +137,166 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): trainer.callbacks = [TestCallbackSaveHookOverride()] trainer.save_checkpoint(tmpdir + "/pathok.ckpt") + + +def test_v2_0_0_remove_on_batch_start_end(tmpdir): + class TestCallback(Callback): + def on_batch_start(self, *args, **kwargs): + print("on_batch_start") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_batch_end(self, *args, **kwargs): + print("on_batch_end") + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_0_on_configure_sharded_model(tmpdir): + class TestCallback(Callback): + def on_configure_sharded_model(self, trainer, model): + print("Configuring sharded model") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `on_configure_sharded_model` callback hook was removed in v1.8."): + trainer.fit(model) + + +def test_v2_0_0_remove_on_epoch_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_epoch_start(self, *args, **kwargs): + print("on_epoch_start") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_epoch_end(self, *args, **kwargs): + print("on_epoch_end") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_pretrain_routine_start(self, *args, **kwargs): + print("foo") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_pretrain_routine_end(self, *args, **kwargs): + print("foo") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_0_on_before_accelerator_backend_setup(tmpdir): + class TestCallback(Callback): + def on_before_accelerator_backend_setup(self, *args, **kwargs): + print("on_before_accelerator_backend called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises( + RuntimeError, match="The `on_before_accelerator_backend_setup` callback hook was removed in v1.8" + ): + trainer.fit(model) + + +def test_v2_0_0_callback_on_pretrain_routine_start_end(tmpdir): + class TestCallback(Callback): + def on_pretrain_routine_start(self, trainer, pl_module): + print("on_pretrain_routine_start called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_pretrain_routine_end(self, trainer, pl_module): + print("on_pretrain_routine_end called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_end` hook was removed in v1.8."): + trainer.fit(model) + + +def test_v2_0_0_deprecated_mc_save_checkpoint(): + mc = ModelCheckpoint() + trainer = Trainer() + with mock.patch.object(trainer, "save_checkpoint"), pytest.raises( + NotImplementedError, + match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6 and is no longer supported as of 1.8.", + ): + mc.save_checkpoint(trainer) diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index a33d2a2a7117c..1eae5d7b64c34 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -311,7 +311,6 @@ def _auto_train_batch( dict(name="on_before_batch_transfer", args=(ANY, 0)), dict(name="transfer_batch_to_device", args=(ANY, device, 0)), dict(name="on_after_batch_transfer", args=(ANY, 0)), - dict(name="Callback.on_batch_start", args=(trainer, model)), dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)), dict(name="on_train_batch_start", args=(ANY, i)), dict(name="forward", args=(ANY,)), @@ -354,7 +353,6 @@ def _auto_train_batch( ), dict(name="Callback.on_train_batch_end", args=(trainer, model, dict(loss=ANY), ANY, i)), dict(name="on_train_batch_end", args=(dict(loss=ANY), ANY, i)), - dict(name="Callback.on_batch_end", args=(trainer, model)), ] ) return out @@ -369,7 +367,6 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k dict(name="on_before_batch_transfer", args=(ANY, 0)), dict(name="transfer_batch_to_device", args=(ANY, device, 0)), dict(name="on_after_batch_transfer", args=(ANY, 0)), - dict(name="Callback.on_batch_start", args=(trainer, model)), dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)), dict(name="on_train_batch_start", args=(ANY, i)), dict(name="forward", args=(ANY,)), @@ -389,7 +386,6 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k dict(name="training_step_end", args=(dict(loss=ANY),)), dict(name="Callback.on_train_batch_end", args=(trainer, model, dict(loss=ANY), ANY, i)), dict(name="on_train_batch_end", args=(dict(loss=ANY), ANY, i)), - dict(name="Callback.on_batch_end", args=(trainer, model)), ] ) return out @@ -398,16 +394,12 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k def _eval_epoch(fn, trainer, model, batches, key, device=torch.device("cpu")): outputs = {key: ANY} return [ - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name=f"Callback.on_{fn}_epoch_start", args=(trainer, model)), dict(name=f"on_{fn}_epoch_start"), *HookedModel._eval_batch(fn, trainer, model, batches, key, device=device), dict(name=f"{fn}_epoch_end", args=([outputs] * batches,)), dict(name=f"Callback.on_{fn}_epoch_end", args=(trainer, model)), dict(name=f"on_{fn}_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), ] @staticmethod @@ -521,20 +513,14 @@ def training_step(self, batch, batch_idx): expected = [ dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), # DeepSpeed needs the batch size to figure out throughput logging *([dict(name="train_dataloader")] if kwargs.get("strategy") == "deepspeed" else []), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="Callback.on_sanity_check_start", args=(trainer, model)), dict(name="val_dataloader"), dict(name="train", args=(False,)), @@ -553,8 +539,6 @@ def training_step(self, batch, batch_idx): dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, train_batches, device=device, **kwargs), @@ -575,8 +559,6 @@ def training_step(self, batch, batch_idx): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -633,27 +615,19 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): expected = [ dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), dict(name="Callback.on_load_checkpoint", args=(trainer, model, loaded_ckpt)), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="train", args=(True,)), dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, 2, current_epoch=1, current_batch=0), @@ -663,8 +637,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -722,27 +694,19 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): expected = [ dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), dict(name="Callback.on_load_checkpoint", args=(trainer, model, loaded_ckpt)), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="train", args=(True,)), dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, steps_after_reload, current_batch=1), @@ -752,8 +716,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -799,11 +761,9 @@ def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, expected = [ dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage=verb)), dict(name="setup", kwargs=dict(stage=verb)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), *(hooks if batches else []), dict(name="Callback.teardown", args=(trainer, model), kwargs=dict(stage=verb)), dict(name="teardown", kwargs=dict(stage=verb)), @@ -823,11 +783,9 @@ def test_trainer_model_hook_system_predict(tmpdir): expected = [ dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="predict")), dict(name="setup", kwargs=dict(stage="predict")), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="predict_dataloader"), dict(name="train", args=(False,)), dict(name="on_predict_model_eval"), diff --git a/tests/tests_pytorch/models/test_restore.py b/tests/tests_pytorch/models/test_restore.py index 39f623c73736d..ec0d08cb84b19 100644 --- a/tests/tests_pytorch/models/test_restore.py +++ b/tests/tests_pytorch/models/test_restore.py @@ -321,7 +321,7 @@ def test_try_resume_from_non_existing_checkpoint(tmpdir): class CaptureCallbacksBeforeTraining(Callback): callbacks = [] - def on_pretrain_routine_end(self, trainer, pl_module): + def on_fit_start(self, trainer, pl_module): self.callbacks = deepcopy(trainer.callbacks) @@ -347,15 +347,13 @@ def get_trainer_args(): # initial training trainer = Trainer(**get_trainer_args()) - with pytest.deprecated_call(match="`Callback.on_pretrain_routine_end` hook has been deprecated in v1.6"): - trainer.fit(model, datamodule=dm) + trainer.fit(model, datamodule=dm) callbacks_before_resume = deepcopy(trainer.callbacks) # resumed training trainer = Trainer(**get_trainer_args()) - with pytest.deprecated_call(match="`Callback.on_pretrain_routine_end` hook has been deprecated in v1.6"): - trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt")) + trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt")) assert len(callbacks_before_resume) == len(callback_capture.callbacks) diff --git a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py index 45f363caba621..20c5323907027 100644 --- a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py @@ -308,12 +308,6 @@ def on_validation_start(self, _, pl_module): pl_module, "on_validation_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices ) - def on_epoch_start(self, trainer, pl_module): - if trainer.validating: - self.make_logging( - pl_module, "on_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices - ) - def on_validation_epoch_start(self, _, pl_module): self.make_logging( pl_module, "on_validation_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -328,10 +322,6 @@ def on_validation_batch_end(self, _, pl_module, *__): prob_bars=self.choices, ) - def on_epoch_end(self, trainer, pl_module): - if trainer.validating: - self.make_logging(pl_module, "on_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_validation_epoch_end(self, _, pl_module): self.make_logging( pl_module, "on_validation_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -353,17 +343,13 @@ def validation_step(self, batch, batch_idx): max_epochs=1, callbacks=[cb], ) - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_start` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) assert cb.call_counter == { "on_validation_batch_end": 4, "on_validation_start": 1, - "on_epoch_start": 1, "on_validation_epoch_start": 1, "on_validation_epoch_end": 1, - "on_epoch_end": 1, } def get_expected(on_epoch, values): diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 533bbd4863de1..3c68bb38ccc8e 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -37,20 +37,12 @@ def test_fx_validator(): "on_before_backward", "on_after_backward", "on_before_optimizer_step", - "on_batch_end", - "on_batch_start", - "on_before_accelerator_backend_setup", "on_before_zero_grad", - "on_epoch_end", - "on_epoch_start", "on_fit_end", - "on_configure_sharded_model", "on_fit_start", "on_exception", "on_load_checkpoint", "load_state_dict", - "on_pretrain_routine_end", - "on_pretrain_routine_start", "on_sanity_check_end", "on_sanity_check_start", "state_dict", @@ -84,15 +76,11 @@ def test_fx_validator(): } not_supported = { - "on_before_accelerator_backend_setup", "on_fit_end", "on_fit_start", - "on_configure_sharded_model", "on_exception", "on_load_checkpoint", "load_state_dict", - "on_pretrain_routine_end", - "on_pretrain_routine_start", "on_sanity_check_end", "on_sanity_check_start", "on_predict_batch_end", @@ -199,14 +187,10 @@ def test_fx_validator_integration(tmpdir): """Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.""" not_supported = { None: "`self.trainer` reference is not registered", - "on_before_accelerator_backend_setup": "You can't", "setup": "You can't", "configure_sharded_model": "You can't", - "on_configure_sharded_model": "You can't", "configure_optimizers": "You can't", "on_fit_start": "You can't", - "on_pretrain_routine_start": "You can't", - "on_pretrain_routine_end": "You can't", "train_dataloader": "You can't", "val_dataloader": "You can't", "on_before_batch_transfer": "You can't", @@ -242,21 +226,18 @@ def test_fx_validator_integration(tmpdir): limit_predict_batches=1, callbacks=callback, ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.fit(model) + trainer.fit(model) not_supported.update( { # `lightning_module` ref is now present from the `fit` call - "on_before_accelerator_backend_setup": "You can't", "test_dataloader": "You can't", "on_test_model_eval": "You can't", "on_test_model_train": "You can't", "on_test_end": "You can't", } ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.test(model, verbose=False) + trainer.test(model, verbose=False) not_supported.update({k: "result collection is not registered yet" for k in not_supported}) not_supported.update( @@ -272,8 +253,7 @@ def test_fx_validator_integration(tmpdir): "on_predict_end": "result collection is not registered yet", } ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.predict(model) + trainer.predict(model) @RunIf(min_cuda_gpus=2) diff --git a/tests/tests_pytorch/trainer/logging_/test_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_loop_logging.py index 66c7bdcd25cf3..3251d4d2aa5ef 100644 --- a/tests/tests_pytorch/trainer/logging_/test_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_loop_logging.py @@ -60,8 +60,6 @@ def _make_assertion(model, hooks, result_mock, on_step, on_epoch, extra_kwargs): "optimizer_zero_grad", "training_step", "training_step_end", - "on_batch_start", - "on_batch_end", "on_train_batch_start", "on_train_batch_end", ] @@ -72,8 +70,6 @@ def _make_assertion(model, hooks, result_mock, on_step, on_epoch, extra_kwargs): "on_train_start", "on_train_epoch_start", "on_train_epoch_end", - "on_epoch_start", - "on_epoch_end", "training_epoch_end", ] all_logging_hooks = all_logging_hooks - set(hooks) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 6a2feae352c3b..8a44b7e131644 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -284,9 +284,6 @@ def make_logging(self, pl_module, func_name, on_steps, on_epochs, prob_bars): def on_train_start(self, _, pl_module): self.make_logging(pl_module, "on_train_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_epoch_start(self, _, pl_module): - self.make_logging(pl_module, "on_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_train_epoch_start(self, _, pl_module): self.make_logging( pl_module, "on_train_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -307,9 +304,6 @@ def on_train_epoch_end(self, _, pl_module): pl_module, "on_train_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices ) - def on_epoch_end(self, _, pl_module): - self.make_logging(pl_module, "on_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - class TestModel(BoringModel): seen_losses = [] @@ -330,9 +324,7 @@ def training_step(self, batch, batch_idx): callbacks=[cb], ) - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_start` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) # Make sure the func_name output equals the average from all logged values when on_epoch true assert trainer.progress_bar_callback.get_metrics(trainer, model)["train_loss"] == model.seen_losses[-1] @@ -340,12 +332,10 @@ def training_step(self, batch, batch_idx): assert cb.call_counter == { "on_train_start": 1, - "on_epoch_start": 1, "on_train_epoch_start": 1, "on_train_batch_start": 2, "on_train_batch_end": 2, "on_train_epoch_end": 1, - "on_epoch_end": 1, } def get_expected(on_epoch, values): @@ -535,9 +525,6 @@ def on_train_epoch_start(self, trainer, pl_module): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): self.log("on_train_batch_end", 3) - def on_epoch_end(self, trainer, pl_module): - self.log("on_epoch_end", 4) - def on_train_epoch_end(self, trainer, pl_module): self.log("on_train_epoch_end", 5) @@ -550,16 +537,12 @@ def on_train_epoch_end(self, trainer, pl_module): enable_model_summary=False, callbacks=[LoggingCallback()], ) - - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_end` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) expected = { "on_train_start": 1, "on_train_epoch_start": 2, "on_train_batch_end": 3, - "on_epoch_end": 4, "on_train_epoch_end": 5, } assert trainer.callback_metrics == expected