Skip to content

Commit 7a9a08c

Browse files
Drop torch 1.6 testing (#10390)
* Drop torch 1.6 support * Drop 1.6 support * Update CHANGELOG * Fixes * Split change * Undo change * 1.7 -> 1.7.1 pytorch/pytorch#47354 * Force trigger nightly * Update .github/workflows/events-nightly.yml Co-authored-by: Aki Nitta <[email protected]> * Revert 1.7.1 change - try wildcard * Update adjust versions and test it * Undo test changes * Revert "Undo test changes" This reverts commit 3a6acad. * Update CHANGELOG.md Co-authored-by: Aki Nitta <[email protected]>
1 parent a8c2725 commit 7a9a08c

File tree

24 files changed

+40
-203
lines changed

24 files changed

+40
-203
lines changed

pytorch_lightning/callbacks/quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
if _TORCH_GREATER_EQUAL_1_8:
2929
from torch.quantization import FakeQuantizeBase
3030
else:
31-
# For torch 1.6 and 1.7.
31+
# For torch 1.7.
3232
from torch.quantization import FakeQuantize as FakeQuantizeBase
3333

3434
import pytorch_lightning as pl

pytorch_lightning/distributed/dist.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313
# limitations under the License.
1414
from typing import Any
1515

16-
from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
16+
import torch.distributed
17+
1718
from pytorch_lightning.utilities import rank_zero_deprecation
1819
from pytorch_lightning.utilities.distributed import group as _group
1920

@@ -40,6 +41,6 @@ def broadcast(self, obj: Any, group=_group.WORLD):
4041
if self.rank != 0:
4142
obj = [None] * len(obj)
4243

43-
broadcast_object_list(obj, 0, group=group or _group.WORLD)
44+
torch.distributed.broadcast_object_list(obj, 0, group=group or _group.WORLD)
4445

4546
return obj[0]

pytorch_lightning/overrides/torch_distributed.py

Lines changed: 0 additions & 99 deletions
This file was deleted.

pytorch_lightning/plugins/training_type/ddp.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
from pytorch_lightning.core.optimizer import LightningOptimizer
3535
from pytorch_lightning.overrides import LightningDistributedModule
3636
from pytorch_lightning.overrides.distributed import prepare_for_backward
37-
from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
3837
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
3938
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
4039
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
@@ -43,7 +42,6 @@
4342
_FAIRSCALE_AVAILABLE,
4443
_HYDRA_AVAILABLE,
4544
_IS_WINDOWS,
46-
_TORCH_GREATER_EQUAL_1_7,
4745
_TORCH_GREATER_EQUAL_1_8,
4846
_TORCH_GREATER_EQUAL_1_9,
4947
_TORCH_GREATER_EQUAL_1_10,
@@ -255,15 +253,13 @@ def pre_configure_ddp(self):
255253
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
256254
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
257255
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
258-
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
259-
if (
260-
_TORCH_GREATER_EQUAL_1_7
261-
and not self.lightning_module.automatic_optimization
262-
and not self._ddp_kwargs.get("find_unused_parameters", False)
256+
if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(
257+
"find_unused_parameters", False
263258
):
259+
# TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
264260
rank_zero_warn(
265-
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
266-
"to properly work with DDP."
261+
"From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to"
262+
" properly work with DDP. Using `find_unused_parameters=True`."
267263
)
268264
self._ddp_kwargs["find_unused_parameters"] = True
269265

@@ -371,7 +367,7 @@ def broadcast(self, obj: object, src: int = 0) -> object:
371367
obj = [obj]
372368
if self.global_rank != src:
373369
obj = [None]
374-
broadcast_object_list(obj, src, group=_group.WORLD)
370+
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
375371
return obj[0]
376372

377373
def pre_backward(self, closure_loss: torch.Tensor) -> None:

pytorch_lightning/plugins/training_type/ddp_spawn.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,11 @@
2727
import pytorch_lightning as pl
2828
from pytorch_lightning.overrides import LightningDistributedModule
2929
from pytorch_lightning.overrides.distributed import prepare_for_backward
30-
from pytorch_lightning.overrides.torch_distributed import broadcast_object_list
3130
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
3231
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
3332
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
3433
from pytorch_lightning.trainer.states import TrainerFn
35-
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8, rank_zero_warn
34+
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, rank_zero_warn
3635
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device
3736
from pytorch_lightning.utilities.cloud_io import atomic_save
3837
from pytorch_lightning.utilities.cloud_io import load as pl_load
@@ -238,15 +237,13 @@ def pre_configure_ddp(self):
238237
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
239238
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
240239
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
241-
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
242-
if (
243-
_TORCH_GREATER_EQUAL_1_7
244-
and not self.lightning_module.automatic_optimization
245-
and not self._ddp_kwargs.get("find_unused_parameters", False)
240+
if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(
241+
"find_unused_parameters", False
246242
):
243+
# TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
247244
rank_zero_warn(
248-
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
249-
"to properly work with DDP."
245+
"From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to"
246+
" properly work with DDP. Using `find_unused_parameters=True`."
250247
)
251248
self._ddp_kwargs["find_unused_parameters"] = True
252249

@@ -323,7 +320,7 @@ def broadcast(self, obj: object, src: int = 0) -> object:
323320
obj = [obj]
324321
if self.global_rank != src:
325322
obj = [None]
326-
broadcast_object_list(obj, src, group=_group.WORLD)
323+
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
327324
return obj[0]
328325

329326
def model_to_device(self):

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@
7474
from pytorch_lightning.utilities.imports import (
7575
_HOROVOD_AVAILABLE,
7676
_IPU_AVAILABLE,
77-
_TORCH_GREATER_EQUAL_1_7,
7877
_TORCH_GREATER_EQUAL_1_8,
7978
_TPU_AVAILABLE,
8079
)
@@ -190,10 +189,8 @@ def _init_deterministic(self, deterministic: bool) -> None:
190189
self.deterministic = deterministic
191190
if _TORCH_GREATER_EQUAL_1_8:
192191
torch.use_deterministic_algorithms(deterministic)
193-
elif _TORCH_GREATER_EQUAL_1_7:
192+
else:
194193
torch.set_deterministic(deterministic)
195-
else: # the minimum version Lightning supports is PyTorch 1.6
196-
torch._set_deterministic(deterministic)
197194
if deterministic:
198195
# fixing non-deterministic part of horovod
199196
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383

pytorch_lightning/utilities/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@
4444
_OMEGACONF_AVAILABLE,
4545
_POPTORCH_AVAILABLE,
4646
_RICH_AVAILABLE,
47-
_TORCH_GREATER_EQUAL_1_7,
4847
_TORCH_GREATER_EQUAL_1_8,
4948
_TORCH_GREATER_EQUAL_1_9,
5049
_TORCH_GREATER_EQUAL_1_10,

pytorch_lightning/utilities/auto_restart.py

Lines changed: 9 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -305,9 +305,6 @@ def _wrap_generator_samplers(self) -> None:
305305
# access wrapped dataset attributes
306306
dataset_dict = self.dataset.__dict__
307307

308-
# create a tuple of sampler names
309-
samplers_names = tuple(v.__class__.__name__ for k, v in dataset_dict.items() if isinstance(v, Sampler))
310-
311308
# create a dictionary of generator present within the dataset attributes
312309
dataset_sampler_generators = {k: v for k, v in dataset_dict.items() if isinstance(v, (Generator, Iterator))}
313310

@@ -318,31 +315,17 @@ def _wrap_generator_samplers(self) -> None:
318315
if isinstance(generator, Sampler):
319316
continue
320317

321-
# used to handle a weird behaviour from PyTorch 1.6
322-
# where the sampler is converted to a list_iterator
323-
is_legacy = False
324-
325-
if isinstance(generator, Generator):
326-
# Generator name have the the form `SamplerName.__iter__`
327-
generator_name = generator.__qualname__.split(".")[0]
328-
else:
329-
# assume the retrieved iterator is coming from sampler.
330-
is_legacy = True
331-
332-
# validate the base generator name matches a sampler name.
333-
if is_legacy or any(sampler_name == generator_name for sampler_name in samplers_names):
334-
335-
# wrap the generator into a `FastForwardSampler`
336-
sampler = FastForwardSampler(generator, attr_name=generator_attr_name)
318+
# wrap the generator into a `FastForwardSampler`
319+
sampler = FastForwardSampler(generator, attr_name=generator_attr_name)
337320

338-
# if `CaptureIterableDataset` was available, the sampler should reload its own state.
339-
if self._state_dict is not None:
340-
sampler.load_state_dict(self._state_dict[generator_attr_name])
341-
# store the samplers
342-
self.samplers[generator_attr_name] = sampler
321+
# if `CaptureIterableDataset` was available, the sampler should reload its own state.
322+
if self._state_dict is not None:
323+
sampler.load_state_dict(self._state_dict[generator_attr_name])
324+
# store the samplers
325+
self.samplers[generator_attr_name] = sampler
343326

344-
# replace generator with the generator from the `FastForwardSampler`.
345-
dataset_dict[generator_attr_name] = iter(sampler)
327+
# replace generator with the generator from the `FastForwardSampler`.
328+
dataset_dict[generator_attr_name] = iter(sampler)
346329

347330
self.reset_on_epoch()
348331

pytorch_lightning/utilities/cloud_io.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import fsspec
2020
import torch
2121
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem
22-
from packaging.version import Version
2322

2423

2524
def load(
@@ -59,12 +58,6 @@ def atomic_save(checkpoint: Dict[str, Any], filepath: Union[str, Path]) -> None:
5958
"""
6059

6160
bytesbuffer = io.BytesIO()
62-
# Can't use the new zipfile serialization for 1.6.0 because there's a bug in
63-
# torch.hub.load_state_dict_from_url() that prevents it from loading the new files.
64-
# More details can be found here: https://github.com/pytorch/pytorch/issues/42239
65-
if Version(torch.__version__).release[:3] == (1, 6, 0):
66-
torch.save(checkpoint, bytesbuffer, _use_new_zipfile_serialization=False)
67-
else:
68-
torch.save(checkpoint, bytesbuffer)
61+
torch.save(checkpoint, bytesbuffer)
6962
with fsspec.open(filepath, "wb") as f:
7063
f.write(bytesbuffer.getvalue())

pytorch_lightning/utilities/imports.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:
7070

7171
_IS_WINDOWS = platform.system() == "Windows"
7272
_IS_INTERACTIVE = hasattr(sys, "ps1") # https://stackoverflow.com/a/64523765
73-
_TORCH_GREATER_EQUAL_1_7 = _compare_version("torch", operator.ge, "1.7.0")
7473
_TORCH_GREATER_EQUAL_1_8 = _compare_version("torch", operator.ge, "1.8.0")
7574
_TORCH_GREATER_EQUAL_1_8_1 = _compare_version("torch", operator.ge, "1.8.1")
7675
_TORCH_GREATER_EQUAL_1_9 = _compare_version("torch", operator.ge, "1.9.0")
@@ -112,4 +111,4 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:
112111

113112
# experimental feature within PyTorch Lightning.
114113
def _fault_tolerant_training() -> bool:
115-
return _TORCH_GREATER_EQUAL_1_7 and int(os.getenv("PL_FAULT_TOLERANT_TRAINING", 0))
114+
return bool(int(os.getenv("PL_FAULT_TOLERANT_TRAINING", 0)))

0 commit comments

Comments
 (0)