diff --git a/CHANGELOG.md b/CHANGELOG.md index d9cab7a63eca6..1d7573af02513 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -177,7 +177,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed -- Set better defaults for `rank_zero_only.rank` when training is launched with SLURM and torchelastic ([#6802](https://github.com/PyTorchLightning/pytorch-lightning/pull/6802/)) +- Set better defaults for `rank_zero_only.rank` when training is launched with SLURM and torchelastic: + * Support SLURM and torchelastic global rank environment variables ([#5715](https://github.com/PyTorchLightning/pytorch-lightning/pull/5715)) + * Remove hardcoding of local rank in accelerator connector ([#6878](https://github.com/PyTorchLightning/pytorch-lightning/pull/6878)) - Sanitize `None` params during pruning ([#6836](https://github.com/PyTorchLightning/pytorch-lightning/pull/6836)) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 1e00d33cdf05a..aa52ec1c40d82 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -59,7 +59,6 @@ device_parser, DeviceType, DistributedType, - rank_zero_only, ) from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -112,12 +111,6 @@ def __init__( self._training_type_plugin: Optional[TrainingTypePlugin] = None self._cluster_environment: Optional[ClusterEnvironment] = None - # init the default rank if exists - # we need to call this here or NVIDIA flags and other messaging in init will show on all ranks - # this way we only show it on rank 0 - if "LOCAL_RANK" in os.environ: - rank_zero_only.rank = int(os.environ["LOCAL_RANK"]) - # for gpus allow int, string and gpu list if auto_select_gpus and isinstance(gpus, int): self.gpus = pick_multiple_gpus(gpus)