Skip to content

Commit 01ebb1f

Browse files
authored
Avoid changing the current cudnn.benchmark value (#13154)
1 parent 3780407 commit 01ebb1f

File tree

5 files changed

+48
-30
lines changed

5 files changed

+48
-30
lines changed

CHANGELOG.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
9797
- Changed `pytorch_lightning.core.lightning` to `pytorch_lightning.core.module` ([#12740](https://github.com/PyTorchLightning/pytorch-lightning/pull/12740))
9898

9999

100-
-
100+
- Keep `torch.backends.cudnn.benchmark=False` by default (unlike in v1.6.{0-4}) after speed and memory problems depending on the data used. Please consider tuning `Trainer(benchmark)` manually. ([#13154](https://github.com/PyTorchLightning/pytorch-lightning/pull/13154))
101+
102+
103+
- Prevent modification of `torch.backends.cudnn.benchmark` when `Trainer(benchmark=...)` is not set ([#13154](https://github.com/PyTorchLightning/pytorch-lightning/pull/13154))
101104

102105
### Deprecated
103106

docs/source/common/trainer.rst

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -437,21 +437,24 @@ benchmark
437437

438438
|
439439
440-
Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is not set.
441-
This flag sets the ``torch.backends.cudnn.benchmark`` flag. You can read more about its impact
440+
The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to. The value for
441+
``torch.backends.cudnn.benchmark`` set in the current session will be used (``False`` if not manually set).
442+
If :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is set to ``True``, this will default to ``False``.
443+
You can read more about the interaction of ``torch.backends.cudnn.benchmark`` and ``torch.backends.cudnn.deterministic``
442444
`here <https://pytorch.org/docs/stable/notes/randomness.html#cuda-convolution-benchmarking>`__
443445

444-
This is likely to increase the speed of your system if your input sizes don't change. However, if they do, then it
445-
might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new
446-
input size is encountered. Read more about it `here <https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936>`__.
446+
Setting this flag to ``True`` can increase the speed of your system if your input sizes don't
447+
change. However, if they do, then it might make your system slower. The CUDNN auto-tuner will try to find the best
448+
algorithm for the hardware when a new input size is encountered. This might also increase the memory usage.
449+
Read more about it `here <https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936>`__.
447450

448451
Example::
449452

450-
# defaults to True if not deterministic (which is False by default)
451-
trainer = Trainer()
453+
# Will use whatever the current value for torch.backends.cudnn.benchmark, normally False
454+
trainer = Trainer(benchmark=None) # default
452455

453456
# you can overwrite the value
454-
trainer = Trainer(benchmark=False)
457+
trainer = Trainer(benchmark=True)
455458

456459
deterministic
457460
^^^^^^^^^^^^^

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -148,14 +148,19 @@ def __init__(
148148
A. Class > str
149149
B. Strategy > Accelerator/precision/plugins
150150
"""
151-
if benchmark and deterministic:
152-
rank_zero_warn(
153-
"You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores"
154-
" torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.",
155-
)
156-
self.benchmark = not deterministic if benchmark is None else benchmark
151+
if deterministic:
152+
if benchmark is None:
153+
# Set benchmark to False to ensure determinism
154+
benchmark = False
155+
elif benchmark:
156+
rank_zero_warn(
157+
"You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores"
158+
" torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.",
159+
)
157160
# TODO: move to gpu accelerator
158-
torch.backends.cudnn.benchmark = self.benchmark
161+
if benchmark is not None:
162+
torch.backends.cudnn.benchmark = benchmark
163+
self.benchmark = torch.backends.cudnn.benchmark
159164
self.replace_sampler_ddp = replace_sampler_ddp
160165
self._init_deterministic(deterministic)
161166

@@ -215,13 +220,13 @@ def __init__(
215220
# 6. Instantiate Strategy - Part 2
216221
self._lazy_init_strategy()
217222

218-
def _init_deterministic(self, deterministic: Union[bool, _LITERAL_WARN]) -> None:
219-
self.deterministic = deterministic
223+
def _init_deterministic(self, deterministic: Optional[Union[bool, _LITERAL_WARN]]) -> None:
224+
self.deterministic = deterministic or False # default to False if not set
220225
if _TORCH_GREATER_EQUAL_1_11 and deterministic == "warn":
221226
torch.use_deterministic_algorithms(True, warn_only=True)
222227
else:
223-
torch.use_deterministic_algorithms(deterministic)
224-
if deterministic:
228+
torch.use_deterministic_algorithms(self.deterministic)
229+
if self.deterministic:
225230
# fixing non-deterministic part of horovod
226231
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383
227232
os.environ["HOROVOD_FUSION_THRESHOLD"] = "0"

pytorch_lightning/trainer/trainer.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def __init__(
171171
resume_from_checkpoint: Optional[Union[Path, str]] = None,
172172
profiler: Optional[Union[Profiler, str]] = None,
173173
benchmark: Optional[bool] = None,
174-
deterministic: Union[bool, _LITERAL_WARN] = False,
174+
deterministic: Optional[Union[bool, _LITERAL_WARN]] = None,
175175
reload_dataloaders_every_n_epochs: int = 0,
176176
auto_lr_find: Union[bool, str] = False,
177177
replace_sampler_ddp: bool = True,
@@ -223,9 +223,11 @@ def __init__(
223223
that only one process at a time can access them.
224224
Default: ``False``.
225225
226-
benchmark: Sets ``torch.backends.cudnn.benchmark``.
227-
Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic`
228-
is ``False``. Overwrite to manually set a different value. Default: ``None``.
226+
benchmark: The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to.
227+
The value for ``torch.backends.cudnn.benchmark`` set in the current session will be used
228+
(``False`` if not manually set). If :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is set
229+
to ``True``, this will default to ``False``. Override to manually set a different value.
230+
Default: ``None``.
229231
230232
callbacks: Add a callback or list of callbacks.
231233
Default: ``None``.
@@ -249,8 +251,8 @@ def __init__(
249251
250252
deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.
251253
Set to ``"warn"`` to use deterministic algorithms whenever possible, throwing warnings on operations
252-
that don't support deterministic mode (requires Pytorch 1.11+).
253-
Default: ``False``.
254+
that don't support deterministic mode (requires Pytorch 1.11+). If not set, defaults to ``False``.
255+
Default: ``None``.
254256
255257
devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,
256258
based on the accelerator type.

tests/trainer/test_trainer.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -639,27 +639,32 @@ def test_trainer_max_steps_accumulate_batches(tmpdir):
639639
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
640640

641641

642+
@pytest.mark.parametrize("cudnn_benchmark", (False, True))
642643
@pytest.mark.parametrize(
643644
["benchmark_", "deterministic", "expected"],
644645
[
645-
(None, False, True),
646+
(None, False, None),
646647
(None, True, False),
648+
(None, None, None),
647649
(True, False, True),
648650
(True, True, True),
649-
(False, True, False),
651+
(True, None, True),
650652
(False, False, False),
653+
(False, True, False),
654+
(False, None, False),
651655
],
652656
)
653-
def test_benchmark_option(benchmark_, deterministic, expected):
657+
def test_benchmark_option(cudnn_benchmark, benchmark_, deterministic, expected):
654658
"""Verify benchmark option."""
655-
656659
original_val = torch.backends.cudnn.benchmark
657660

661+
torch.backends.cudnn.benchmark = cudnn_benchmark
658662
if benchmark_ and deterministic:
659663
with pytest.warns(UserWarning, match="You passed `deterministic=True` and `benchmark=True`"):
660664
trainer = Trainer(benchmark=benchmark_, deterministic=deterministic)
661665
else:
662666
trainer = Trainer(benchmark=benchmark_, deterministic=deterministic)
667+
expected = cudnn_benchmark if expected is None else expected
663668
assert torch.backends.cudnn.benchmark == expected
664669
assert trainer._accelerator_connector.benchmark == expected
665670

0 commit comments

Comments
 (0)