diff --git a/CHANGELOG.md b/CHANGELOG.md index d7cd27977ddbf..bb891ef031237 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -133,6 +133,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed deprecated `CheckpointConnector.hpc_load` property in favor of `CheckpointConnector.restore` ([#10525](https://github.com/PyTorchLightning/pytorch-lightning/pull/10525)) +- Removed deprecated `reload_dataloaders_every_epoch` from `Trainer` in favour of `reload_dataloaders_every_n_epochs` ([#10481](https://github.com/PyTorchLightning/pytorch-lightning/pull/10481)) + + ### Fixed diff --git a/pytorch_lightning/trainer/connectors/data_connector.py b/pytorch_lightning/trainer/connectors/data_connector.py index 90c398087578d..de81060ba1f80 100644 --- a/pytorch_lightning/trainer/connectors/data_connector.py +++ b/pytorch_lightning/trainer/connectors/data_connector.py @@ -64,7 +64,6 @@ def on_trainer_init( self, check_val_every_n_epoch: int, reload_dataloaders_every_n_epochs: int, - reload_dataloaders_every_epoch: bool, prepare_data_per_node: Optional[bool] = None, ) -> None: self.trainer.datamodule = None @@ -83,13 +82,6 @@ def on_trainer_init( self.trainer.check_val_every_n_epoch = check_val_every_n_epoch - if reload_dataloaders_every_epoch: - reload_dataloaders_every_n_epochs = int(reload_dataloaders_every_epoch) - rank_zero_deprecation( - "`reload_dataloaders_every_epoch` is deprecated in v1.4 and will be removed in v1.6." - " Please use `reload_dataloaders_every_n_epochs` in Trainer." - ) - if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0): raise MisconfigurationException( f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}." diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 19efdce8e3549..be9c71e2fe470 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -162,7 +162,6 @@ def __init__( benchmark: bool = False, deterministic: bool = False, reload_dataloaders_every_n_epochs: int = 0, - reload_dataloaders_every_epoch: bool = False, auto_lr_find: Union[bool, str] = False, replace_sampler_ddp: bool = True, detect_anomaly: bool = False, @@ -341,12 +340,6 @@ def __init__( reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs. - reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch. - - .. deprecated:: v1.4 - ``reload_dataloaders_every_epoch`` has been deprecated in v1.4 and will be removed in v1.6. - Please use ``reload_dataloaders_every_n_epochs``. - replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it, @@ -515,7 +508,6 @@ def __init__( self._data_connector.on_trainer_init( check_val_every_n_epoch, reload_dataloaders_every_n_epochs, - reload_dataloaders_every_epoch, prepare_data_per_node, ) diff --git a/tests/deprecated_api/test_remove_1-6.py b/tests/deprecated_api/test_remove_1-6.py deleted file mode 100644 index 1ded07734a7de..0000000000000 --- a/tests/deprecated_api/test_remove_1-6.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test deprecated functionality which will be removed in v1.6.0.""" -from unittest.mock import call, Mock - -import pytest - -from pytorch_lightning import Trainer -from tests.helpers import BoringModel - - -def test_v1_6_0_reload_dataloaders_every_epoch(tmpdir): - model = BoringModel() - - tracker = Mock() - model.train_dataloader = Mock(wraps=model.train_dataloader) - model.val_dataloader = Mock(wraps=model.val_dataloader) - model.test_dataloader = Mock(wraps=model.test_dataloader) - - tracker.attach_mock(model.train_dataloader, "train_dataloader") - tracker.attach_mock(model.val_dataloader, "val_dataloader") - tracker.attach_mock(model.test_dataloader, "test_dataloader") - - with pytest.deprecated_call(match="`reload_dataloaders_every_epoch` is deprecated in v1.4 and will be removed"): - trainer = Trainer( - default_root_dir=tmpdir, - limit_train_batches=0.3, - limit_val_batches=0.3, - reload_dataloaders_every_epoch=True, - max_epochs=3, - ) - trainer.fit(model) - trainer.test() - - expected_sequence = ( - [call.val_dataloader()] + [call.train_dataloader(), call.val_dataloader()] * 3 + [call.test_dataloader()] - ) - assert tracker.mock_calls == expected_sequence diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 6b34553ff313b..b55e8344ef146 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -866,7 +866,7 @@ def call(hook, fn, *args, **kwargs): limit_predict_batches=batches, enable_progress_bar=False, enable_model_summary=False, - reload_dataloaders_every_epoch=True, + reload_dataloaders_every_n_epochs=True, ) called = [] diff --git a/tests/trainer/test_dataloaders.py b/tests/trainer/test_dataloaders.py index 1ffc957659ef0..272078b1d4206 100644 --- a/tests/trainer/test_dataloaders.py +++ b/tests/trainer/test_dataloaders.py @@ -1276,7 +1276,7 @@ def validation_step(self, batch, batch_idx): # the val dataloader on the first epoch because this only tracks the training epoch # meaning multiple passes through the validation data within a single training epoch # would not have the dataloader reloaded. - # This breaks the assumption behind reload_dataloaders_every_epoch=True + # This breaks the assumption behind reload_dataloaders_every_n_epochs=True call.val_dataloader(), call.train_dataloader(), call.val_dataloader(),