Skip to content

Commit 1be025d

Browse files
committed
Keep FitLoop.done check
1 parent b2db296 commit 1be025d

File tree

1 file changed

+3
-5
lines changed

1 file changed

+3
-5
lines changed

pytorch_lightning/loops/fit_loop.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414
import logging
1515
import os
1616
from functools import partial
17-
from typing import Optional
18-
from typing import Type
17+
from typing import Optional, Type
1918

2019
import pytorch_lightning as pl
2120
from pytorch_lightning.accelerators import GPUAccelerator
@@ -34,8 +33,7 @@
3433
InterBatchParallelDataFetcher,
3534
)
3635
from pytorch_lightning.utilities.model_helpers import is_overridden
37-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation
38-
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
36+
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
3937
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
4038

4139
log = logging.getLogger(__name__)
@@ -195,7 +193,7 @@ def skip(self) -> bool:
195193
"""Whether we should skip the training and immediately return from the call to :meth:`run`."""
196194
# since `trainer.num_training_batches` depends on the `train_dataloader` but that won't be called
197195
# until `on_run_start`, we use `limit_train_batches` instead
198-
return self.trainer.limit_train_batches == 0
196+
return self.done or self.trainer.limit_train_batches == 0
199197

200198
def connect(self, epoch_loop: TrainingEpochLoop) -> None: # type: ignore[override]
201199
"""Connects a training epoch loop to this fit loop."""

0 commit comments

Comments
 (0)