Skip to content

Commit 8e8b46f

Browse files
committed
comments
1 parent b49c809 commit 8e8b46f

File tree

2 files changed

+14
-0
lines changed

2 files changed

+14
-0
lines changed

pytorch_lightning/trainer/training_loop.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,12 @@ def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx,
199199
self.trainer.logger_connector.on_train_batch_end()
200200

201201
def reset_train_val_dataloaders(self, model):
202+
"""
203+
Resets train and val dataloaders if none are attached to the trainer.
204+
205+
The val dataloader must be initialized before training loop starts, as the training loop
206+
inspects the val dataloader to determine whether to run the evaluation loop.
207+
"""
202208
if self.trainer.train_dataloader is None:
203209
self.trainer.reset_train_dataloader(model)
204210

tests/trainer/test_dataloaders.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1112,6 +1112,14 @@ def test_dataloaders_load_only_once_val_interval(tmpdir):
11121112
expected_sequence = [
11131113
'val_dataloader',
11141114
'train_dataloader',
1115+
# This has subsequent calls to val_dataloader
1116+
# because the training loop runs the evaluation loop,
1117+
# which reloads the val dataloader again.
1118+
# We cannot yet rely on trainer.current_epoch=0 to skip reloading
1119+
# the val dataloader on the first epoch because this only tracks the training epoch
1120+
# meaning multiple passes through the validation data within a single training epoch
1121+
# would not have the datalodaer reloaded.
1122+
# This breaks the assumption behind reload_dataloaders_every_epoch=True
11151123
'val_dataloader',
11161124
'val_dataloader',
11171125
'val_dataloader',

0 commit comments

Comments
 (0)