Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed deprecated `CheckpointConnector.hpc_load` property in favor of `CheckpointConnector.restore` ([#10525](https://github.com/PyTorchLightning/pytorch-lightning/pull/10525))


- Removed deprecated `reload_dataloaders_every_epoch` from `Trainer` in favour of `reload_dataloaders_every_n_epochs` ([#10481](https://github.com/PyTorchLightning/pytorch-lightning/pull/10481))



### Fixed

Expand Down
8 changes: 0 additions & 8 deletions pytorch_lightning/trainer/connectors/data_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def on_trainer_init(
self,
check_val_every_n_epoch: int,
reload_dataloaders_every_n_epochs: int,
reload_dataloaders_every_epoch: bool,
prepare_data_per_node: Optional[bool] = None,
) -> None:
self.trainer.datamodule = None
Expand All @@ -83,13 +82,6 @@ def on_trainer_init(

self.trainer.check_val_every_n_epoch = check_val_every_n_epoch

if reload_dataloaders_every_epoch:
reload_dataloaders_every_n_epochs = int(reload_dataloaders_every_epoch)
rank_zero_deprecation(
"`reload_dataloaders_every_epoch` is deprecated in v1.4 and will be removed in v1.6."
" Please use `reload_dataloaders_every_n_epochs` in Trainer."
)

if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0):
raise MisconfigurationException(
f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}."
Expand Down
8 changes: 0 additions & 8 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ def __init__(
benchmark: bool = False,
deterministic: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
reload_dataloaders_every_epoch: bool = False,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
detect_anomaly: bool = False,
Expand Down Expand Up @@ -341,12 +340,6 @@ def __init__(

reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.

reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch.

.. deprecated:: v1.4
``reload_dataloaders_every_epoch`` has been deprecated in v1.4 and will be removed in v1.6.
Please use ``reload_dataloaders_every_n_epochs``.

replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this
will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
Expand Down Expand Up @@ -515,7 +508,6 @@ def __init__(
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
reload_dataloaders_every_epoch,
prepare_data_per_node,
)

Expand Down
49 changes: 0 additions & 49 deletions tests/deprecated_api/test_remove_1-6.py

This file was deleted.

2 changes: 1 addition & 1 deletion tests/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ def call(hook, fn, *args, **kwargs):
limit_predict_batches=batches,
enable_progress_bar=False,
enable_model_summary=False,
reload_dataloaders_every_epoch=True,
reload_dataloaders_every_n_epochs=True,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

doesn't this need to be an int?

)

called = []
Expand Down
2 changes: 1 addition & 1 deletion tests/trainer/test_dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -1276,7 +1276,7 @@ def validation_step(self, batch, batch_idx):
# the val dataloader on the first epoch because this only tracks the training epoch
# meaning multiple passes through the validation data within a single training epoch
# would not have the dataloader reloaded.
# This breaks the assumption behind reload_dataloaders_every_epoch=True
# This breaks the assumption behind reload_dataloaders_every_n_epochs=True
call.val_dataloader(),
call.train_dataloader(),
call.val_dataloader(),
Expand Down