Skip to content

Commit 4fc80df

Browse files
authored
Merge branch 'master' into bugfix/should_stop
2 parents 747776c + eafec7d commit 4fc80df

37 files changed

+168
-66
lines changed

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
170170

171171
### Fixed
172172

173+
- Sanitize `None` params during pruning ([#6836](https://github.com/PyTorchLightning/pytorch-lightning/pull/6836))
174+
175+
173176
- Made the `Plugin.reduce` method more consistent across all Plugins to reflect a mean-reduction by default ([#6011](https://github.com/PyTorchLightning/pytorch-lightning/pull/6011))
174177

175178

@@ -197,9 +200,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
197200
- Fixed torch distributed not available in setup hook for DDP ([#6506](https://github.com/PyTorchLightning/pytorch-lightning/pull/6506))
198201

199202

203+
- Enforce an epoch scheduler interval when using SWA ([#6588](https://github.com/PyTorchLightning/pytorch-lightning/pull/6588))
204+
205+
206+
- Fixed an issue with `IterableDataset` when `__len__` is not defined ([#6828](https://github.com/PyTorchLightning/pytorch-lightning/pull/6828))
207+
208+
200209
- Fixed `EarlyStopping` logic when `min_epochs` or `min_steps` requirement is not met ([#6705](https://github.com/PyTorchLightning/pytorch-lightning/pull/6705))
201210

202211

212+
203213
## [1.2.6] - 2021-03-30
204214

205215
### Changed

docs/source/advanced/tpu.rst

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,7 @@ To get a TPU on colab, follow these steps:
6464

6565
.. code-block::
6666
67-
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
68-
!python pytorch-xla-env-setup.py --version 1.7 --apt-packages libomp5 libopenblas-dev
67+
!pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.8-cp37-cp37m-linux_x86_64.whl
6968
7069
5. Once the above is done, install PyTorch Lightning (v 0.7.0+).
7170

docs/source/common/lightning_module.rst

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -912,30 +912,6 @@ use_amp
912912
~~~~~~~
913913
True if using Automatic Mixed Precision (AMP)
914914

915-
------------
916-
917-
use_ddp
918-
~~~~~~~
919-
True if using ddp
920-
921-
------------
922-
923-
use_ddp2
924-
~~~~~~~~
925-
True if using ddp2
926-
927-
------------
928-
929-
use_dp
930-
~~~~~~
931-
True if using dp
932-
933-
------------
934-
935-
use_tpu
936-
~~~~~~~
937-
True if using TPUs
938-
939915
--------------
940916

941917
automatic_optimization

docs/source/starter/introduction_guide.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -572,9 +572,7 @@ Next, install the required xla library (adds support for PyTorch on TPUs)
572572

573573
.. code-block:: shell
574574
575-
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
576-
577-
!python pytorch-xla-env-setup.py --version nightly --apt-packages libomp5 libopenblas-dev
575+
!pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.8-cp37-cp37m-linux_x86_64.whl
578576
579577
In distributed training (multiple GPUs and multiple TPU cores) each GPU or TPU core will run a copy
580578
of this program. This means that without taking any care you will download the dataset N times which

pytorch_lightning/accelerators/accelerator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ def connect_precision_plugin(self, plugin: PrecisionPlugin) -> None:
480480
)
481481
self.setup_precision_plugin(plugin)
482482

483-
def save_checkpoint(self, checkpoint: Dict[str, Any], filepath) -> None:
483+
def save_checkpoint(self, checkpoint: Dict[str, Any], filepath: str) -> None:
484484
"""Save model/training states as a checkpoint file through state-dump and file-write.
485485
486486
Args:

pytorch_lightning/callbacks/finetuning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def finetune_function(self, pl_module, current_epoch, optimizer, optimizer_idx):
7777
# When `current_epoch` is 10, feature_extractor will start training.
7878
if current_epoch == self._unfreeze_at_epoch:
7979
self.unfreeze_and_add_param_group(
80-
module=pl_module.feature_extractor,
80+
modules=pl_module.feature_extractor,
8181
optimizer=optimizer,
8282
train_bn=True,
8383
)

pytorch_lightning/callbacks/progress.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,9 +148,10 @@ def total_val_batches(self) -> int:
148148
validation dataloader is of infinite size.
149149
"""
150150
total_val_batches = 0
151-
if not self.trainer.disable_validation:
152-
is_val_epoch = (self.trainer.current_epoch) % self.trainer.check_val_every_n_epoch == 0
151+
if self.trainer.enable_validation:
152+
is_val_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
153153
total_val_batches = sum(self.trainer.num_val_batches) if is_val_epoch else 0
154+
154155
return total_val_batches
155156

156157
@property

pytorch_lightning/callbacks/pruning.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,9 @@ def sanitize_parameters_to_prune(
422422
current_modules = [m for m in pl_module.modules() if not isinstance(m, _MODULE_CONTAINERS)]
423423

424424
if parameters_to_prune is None:
425-
parameters_to_prune = [(m, p) for p in parameters for m in current_modules if hasattr(m, p)]
425+
parameters_to_prune = [
426+
(m, p) for p in parameters for m in current_modules if getattr(m, p, None) is not None
427+
]
426428
elif (
427429
isinstance(parameters_to_prune, (list, tuple)) and len(parameters_to_prune) > 0
428430
and all(len(p) == 2 for p in parameters_to_prune)

pytorch_lightning/callbacks/stochastic_weight_avg.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,14 +187,15 @@ def on_train_epoch_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningMo
187187
anneal_strategy=self._annealing_strategy,
188188
last_epoch=trainer.max_epochs if self._annealing_strategy == "cos" else -1
189189
)
190+
_scheduler_config = _get_default_scheduler_config()
191+
assert _scheduler_config["interval"] == "epoch" and _scheduler_config["frequency"] == 1
192+
_scheduler_config["scheduler"] = self._swa_scheduler
190193

191194
if trainer.lr_schedulers:
192195
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
193196
rank_zero_warn(f"Swapping lr_scheduler {lr_scheduler} for {self._swa_scheduler}")
194-
trainer.lr_schedulers[0]["scheduler"] = self._swa_scheduler
197+
trainer.lr_schedulers[0] = _scheduler_config
195198
else:
196-
_scheduler_config = _get_default_scheduler_config()
197-
_scheduler_config["scheduler"] = self._swa_scheduler
198199
trainer.lr_schedulers.append(_scheduler_config)
199200

200201
self.n_averaged = torch.tensor(0, dtype=torch.long, device=pl_module.device)

pytorch_lightning/metrics/classification/accuracy.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,3 +37,4 @@ def __init__(
3737
.. deprecated::
3838
Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0.
3939
"""
40+
_ = threshold, top_k, subset_accuracy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn

0 commit comments

Comments
 (0)