diff --git a/CHANGELOG.md b/CHANGELOG.md index bba4b8ff892bd..5912aa24d75ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -708,6 +708,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed passing `_ddp_params_and_buffers_to_ignore` ([#11949](https://github.com/PyTorchLightning/pytorch-lightning/pull/11949)) +- Prevent modification of `torch.backends.cudnn.benchmark` when `benchmark` not set on the `Trainer` ([#12020](https://github.com/PyTorchLightning/pytorch-lightning/pull/12020)) + + - Fixed an `AttributeError` when calling `save_hyperparameters` and no parameters need saving ([#11827](https://github.com/PyTorchLightning/pytorch-lightning/pull/11827)) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 5dbf795ba6b68..21402889153d7 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -416,22 +416,29 @@ benchmark | -Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is not set. -This flag sets the ``torch.backends.cudnn.deterministic`` flag. You can read more about its impact +The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to. If neither of this flag or +:paramref:`~pytorch_lightning.trainer.Trainer.deterministic` are set, the value for +``torch.backends.cudnn.benchmark`` set in the current session will be used. If +:paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is set to ``True``, this will default to ``False``. +You can read more about the interaction of ``torch.backends.cudnn.benchmark`` and ``torch.backends.cudnn.deterministic`` `here `__ -This is likely to increase the speed of your system if your input sizes don't change. However, if they do, then it -might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new -input size is encountered. Read more about it `here `__. +Setting this flag to ``True`` is likely to increase the speed of your system if your input sizes don't +change. However, if they do, then it might make your system slower. The CUDNN auto-tuner will try to find the best +algorithm for the hardware when a new input size is encountered. Read more about it +`here `__. Example:: - # defaults to True if not deterministic (which is False by default) - trainer = Trainer() + # default used by the Trainer (will use whatever the current value for torch.backends.cudnn.benchmark is) + trainer = Trainer(benchmark=None) # you can overwrite the value trainer = Trainer(benchmark=False) + # `benchmark` defaults to False when deterministic is True + trainer = Trainer(deterministic=True) + deterministic ^^^^^^^^^^^^^ diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index b31f598d8277f..2974498980b9c 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -101,7 +101,7 @@ def __init__( sync_batchnorm: bool = False, benchmark: Optional[bool] = None, replace_sampler_ddp: bool = True, - deterministic: bool = False, + deterministic: Optional[bool] = None, num_processes: Optional[int] = None, # deprecated tpu_cores: Optional[Union[List[int], int]] = None, # deprecated ipus: Optional[int] = None, # deprecated @@ -147,9 +147,13 @@ def __init__( "You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores" " torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.", ) - self.benchmark = not deterministic if benchmark is None else benchmark + if deterministic and benchmark is None: + # Set benchmark to False to ensure determinism + benchmark = False # TODO: move to gpu accelerator - torch.backends.cudnn.benchmark = self.benchmark + if benchmark is not None: + torch.backends.cudnn.benchmark = benchmark + self.benchmark = torch.backends.cudnn.benchmark self.replace_sampler_ddp = replace_sampler_ddp self._init_deterministic(deterministic) @@ -211,12 +215,13 @@ def __init__( self._lazy_init_strategy() def _init_deterministic(self, deterministic: bool) -> None: - self.deterministic = deterministic + # Default to False if not set + self.deterministic = deterministic or False if _TORCH_GREATER_EQUAL_1_8: - torch.use_deterministic_algorithms(deterministic) + torch.use_deterministic_algorithms(self.deterministic) else: - torch.set_deterministic(deterministic) - if deterministic: + torch.set_deterministic(self.deterministic) + if self.deterministic: # fixing non-deterministic part of horovod # https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383 os.environ["HOROVOD_FUSION_THRESHOLD"] = "0" diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 4c01e58e9e3d5..894efbbd4e3a0 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -174,7 +174,7 @@ def __init__( resume_from_checkpoint: Optional[Union[Path, str]] = None, profiler: Optional[Union[BaseProfiler, str]] = None, benchmark: Optional[bool] = None, - deterministic: bool = False, + deterministic: Optional[bool] = None, reload_dataloaders_every_n_epochs: int = 0, auto_lr_find: Union[bool, str] = False, replace_sampler_ddp: bool = True, @@ -230,8 +230,11 @@ def __init__( Default: ``False``. benchmark: Sets ``torch.backends.cudnn.benchmark``. - Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic` - is ``False``. Overwrite to manually set a different value. Default: ``None``. + The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to. If not specified, the + value set in the current session will be used. However, if + :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic` is ``True``, ``benchmark`` defaults + to ``False`` to ensure determinism. Override to manually set a different value. + Default: ``None``. callbacks: Add a callback or list of callbacks. Default: ``None``. @@ -260,7 +263,8 @@ def __init__( Default: ``False``. deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms. - Default: ``False``. + If not set, defaults to ``False``. + Default: ``None``. devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`, based on the accelerator type. diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 064c02660aaeb..9112bebbf75f6 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -641,12 +641,15 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): @pytest.mark.parametrize( ["benchmark_", "deterministic", "expected"], [ - (None, False, True), + (None, False, None), (None, True, False), + (None, None, None), (True, False, True), (True, True, True), - (False, True, False), + (True, None, True), (False, False, False), + (False, True, False), + (False, None, False), ], ) def test_benchmark_option(benchmark_, deterministic, expected): @@ -659,6 +662,7 @@ def test_benchmark_option(benchmark_, deterministic, expected): trainer = Trainer(benchmark=benchmark_, deterministic=deterministic) else: trainer = Trainer(benchmark=benchmark_, deterministic=deterministic) + expected = original_val if expected is None else expected assert torch.backends.cudnn.benchmark == expected assert trainer._accelerator_connector.benchmark == expected