Skip to content

Commit 7b104ee

Browse files
committed
Merge branch 'master' into refactor/remove-check-ckpt-callback
2 parents e830627 + 17f2ae5 commit 7b104ee

File tree

3 files changed

+25
-17
lines changed

3 files changed

+25
-17
lines changed

CHANGELOG.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
166166
- Added support for `accelerator='cpu'|'gpu'|'tpu'|'ipu'|'auto'` ([#7808](https://github.com/PyTorchLightning/pytorch-lightning/pull/7808))
167167

168168

169+
- Added `tpu_spawn_debug` to plugin registry ([#7933](https://github.com/PyTorchLightning/pytorch-lightning/pull/7933))
170+
171+
169172
- Enabled traditional/manual launching of DDP processes through `LOCAL_RANK` and `NODE_RANK` environment variable assignments ([#7480](https://github.com/PyTorchLightning/pytorch-lightning/pull/7480))
170173

171174

@@ -303,6 +306,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
303306
- `Trainer(resume_from_checkpoint=...)` now restores the model directly after `LightningModule.setup()`, which is before `LightningModule.configure_sharded_model()` ([#7652](https://github.com/PyTorchLightning/pytorch-lightning/pull/7652))
304307

305308

309+
- Moved `torch.cuda.set_device()` to enable collective calls earlier in setup ([#8312](https://github.com/PyTorchLightning/pytorch-lightning/pull/8312))
310+
311+
312+
- Use XLA utility API to move data to CPU (Single TPU core) ([#8078](https://github.com/PyTorchLightning/pytorch-lightning/pull/8078))
313+
314+
306315
### Deprecated
307316

308317

@@ -460,6 +469,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
460469
- Fixed missing call to `LightningModule.untoggle_optimizer` in training loop when running gradient accumulation with multiple optimizers ([#8284](https://github.com/PyTorchLightning/pytorch-lightning/pull/8284))
461470

462471

472+
- Fixed progress bar updates for Pod Training ([#8258](https://github.com/PyTorchLightning/pytorch-lightning/pull/8258))
473+
474+
463475
## [1.3.8] - 2021-07-01
464476

465477
### Fixed

pytorch_lightning/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22

33
_this_year = time.strftime("%Y")
4-
__version__ = '1.4.0dev'
4+
__version__ = '1.4.0rc0'
55
__author__ = 'William Falcon et al.'
66
__author_email__ = '[email protected]'
77
__license__ = 'Apache-2.0'

pytorch_lightning/loops/epoch/evaluation_epoch_loop.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,8 @@ def __init__(self) -> None:
3737
super().__init__()
3838
self.predictions: Optional[PredictionCollection] = None
3939
self.dataloader: Optional[Iterator] = None
40-
self.dl_max_batches: Optional[int] = None
41-
self.dataloader_idx: Optional[int] = None
42-
self.num_dataloaders: Optional[int] = None
40+
self._dl_max_batches: Optional[int] = None
41+
self._num_dataloaders: Optional[int] = None
4342
self.outputs: List[STEP_OUTPUT] = []
4443
self.progress = EpochProgress()
4544

@@ -54,15 +53,14 @@ def connect(
5453
@property
5554
def done(self) -> bool:
5655
"""Returns ``True`` if the current iteration count reaches the number of dataloader batches."""
57-
return self.iteration_count >= self.dl_max_batches
56+
return self.iteration_count >= self._dl_max_batches
5857

5958
def reset(self) -> None:
6059
"""Resets the loop's internal state."""
6160
self.iteration_count = 0
6261
self.predictions = PredictionCollection(self.trainer.global_rank, self.trainer.world_size)
63-
self.dl_max_batches = None
64-
self.dataloader_idx = None
65-
self.num_dataloaders = None
62+
self._dl_max_batches = None
63+
self._num_dataloaders = None
6664
self.outputs = []
6765

6866
def on_run_start(
@@ -80,11 +78,9 @@ def on_run_start(
8078
dl_max_batches: maximum number of batches the dataloader can produce
8179
num_dataloaders: the total number of dataloaders
8280
"""
83-
void(dataloader_iter)
84-
85-
self.dl_max_batches = dl_max_batches
86-
self.dataloader_idx = dataloader_idx
87-
self.num_dataloaders = num_dataloaders
81+
void(dataloader_iter, dataloader_idx)
82+
self._dl_max_batches = dl_max_batches
83+
self._num_dataloaders = num_dataloaders
8884

8985
def advance(
9086
self,
@@ -182,8 +178,8 @@ def on_evaluation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx:
182178
"""
183179
self.trainer.logger_connector.on_batch_start()
184180

185-
assert self.num_dataloaders is not None
186-
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self.num_dataloaders)
181+
assert self._num_dataloaders is not None
182+
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self._num_dataloaders)
187183

188184
if self.trainer.testing:
189185
self.trainer.call_hook("on_test_batch_start", batch, batch_idx, dataloader_idx)
@@ -243,8 +239,8 @@ def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: int) -> Dict
243239
# make dataloader_idx arg in validation_step optional
244240
step_kwargs = OrderedDict([("batch", batch), ("batch_idx", batch_idx)])
245241

246-
multiple_val_loaders = not self.trainer.testing and self.num_dataloaders > 1
247-
multiple_test_loaders = self.trainer.testing and self.num_dataloaders > 1
242+
multiple_val_loaders = not self.trainer.testing and self._num_dataloaders > 1
243+
multiple_test_loaders = self.trainer.testing and self._num_dataloaders > 1
248244

249245
if multiple_test_loaders or multiple_val_loaders:
250246
step_kwargs["dataloader_idx"] = dataloader_idx

0 commit comments

Comments
 (0)