From 8bc88a48da078635d8f0942aaed938a86d96ed9d Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Wed, 16 Feb 2022 19:36:45 +0530 Subject: [PATCH 01/16] Thanks to @carmocca :tada: --- docs/source/common/trainer.rst | 8 +++-- .../connectors/accelerator_connector.py | 4 +-- pytorch_lightning/trainer/trainer.py | 5 +-- tests/trainer/test_trainer.py | 31 +++++++++++-------- 4 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 2126472b95169..e12b34b84421f 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -416,7 +416,8 @@ benchmark | -If true enables cudnn.benchmark. +Defaults to ``True`` if :attr:`~pytorch_lightning.trainer.Trainer.deterministic` is not set. + This flag is likely to increase the speed of your system if your input sizes don't change. However, if it does, then it will likely make your system slower. @@ -427,7 +428,10 @@ algorithm for the hardware `[see discussion here] Example:: - # default used by the Trainer + # defaults to True if not deterministic (which is False by default) + trainer = Trainer() + + # you can overwrite the value trainer = Trainer(benchmark=False) deterministic diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index fd65975618f02..e913c1acfde15 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -94,7 +94,7 @@ def __init__( gpu_ids, num_nodes, sync_batchnorm, - benchmark, + benchmark: Optional[bool], replace_sampler_ddp, deterministic: bool, precision, @@ -122,7 +122,7 @@ def __init__( self.ipus = ipus self.num_nodes = num_nodes self.sync_batchnorm = sync_batchnorm - self.benchmark = benchmark + self.benchmark = not deterministic if benchmark is None else benchmark self.replace_sampler_ddp = replace_sampler_ddp if not PrecisionType.supported_type(precision): raise MisconfigurationException( diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 4b68adbdf31b7..36bcc30f97456 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -174,7 +174,7 @@ def __init__( num_sanity_val_steps: int = 2, resume_from_checkpoint: Optional[Union[Path, str]] = None, profiler: Optional[Union[BaseProfiler, str]] = None, - benchmark: bool = False, + benchmark: Optional[bool] = None, deterministic: bool = False, reload_dataloaders_every_n_epochs: int = 0, auto_lr_find: Union[bool, str] = False, @@ -225,7 +225,8 @@ def __init__( GPUs are configured to be in "exclusive mode", such that only one process at a time can access them. - benchmark: If true enables cudnn.benchmark. + benchmark: Defaults to ``True`` if torch.backends.cudnn.benchmark is disabled. + Overwrite manually to set a different value. callbacks: Add a callback or list of callbacks. diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 587ff0b7b9f72..35a980057d737 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -637,23 +637,28 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps" -def test_benchmark_option(tmpdir): +@pytest.mark.parametrize([ + "benchmark", + "deterministic", + "expected" + ], [ + (None, False, True), + (None, True, False), + (True, False, True), + (True, True, True), + (False, True, False), + (False, False, False) + ] +) +def test_benchmark_option(benchmark, deterministic, expected): """Verify benchmark option.""" - model = BoringModel() - - # verify torch.backends.cudnn.benchmark is not turned on - assert not torch.backends.cudnn.benchmark + original_val = torch.backends.cudnn.benchmark - # fit model - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, benchmark=True) - trainer.fit(model) - - # verify training completed - assert trainer.state.finished, f"Training failed with {trainer.state}" + trainer = Trainer(benchmark=benchmark, deterministic=deterministic) + assert torch.backends.cudnn.benchmark == expected - # verify torch.backends.cudnn.benchmark is not turned off - assert torch.backends.cudnn.benchmark + torch.backends.cudnn.benchmark = original_val @pytest.mark.parametrize("ckpt_path", (None, "best", "specific")) From ab4067cf10e7e3c203078b2502248a90c554d327 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 16 Feb 2022 14:12:06 +0000 Subject: [PATCH 02/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- docs/source/common/trainer.rst | 2 +- tests/trainer/test_trainer.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index e12b34b84421f..9ed9376ed3c7c 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -431,7 +431,7 @@ Example:: # defaults to True if not deterministic (which is False by default) trainer = Trainer() - # you can overwrite the value + # you can overwrite the value trainer = Trainer(benchmark=False) deterministic diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 35a980057d737..71cfcdd39fbf6 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -637,18 +637,16 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps" -@pytest.mark.parametrize([ - "benchmark", - "deterministic", - "expected" - ], [ +@pytest.mark.parametrize( + ["benchmark", "deterministic", "expected"], + [ (None, False, True), (None, True, False), (True, False, True), (True, True, True), (False, True, False), - (False, False, False) - ] + (False, False, False), + ], ) def test_benchmark_option(benchmark, deterministic, expected): """Verify benchmark option.""" @@ -658,7 +656,7 @@ def test_benchmark_option(benchmark, deterministic, expected): trainer = Trainer(benchmark=benchmark, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected - torch.backends.cudnn.benchmark = original_val + torch.backends.cudnn.benchmark = original_val @pytest.mark.parametrize("ckpt_path", (None, "best", "specific")) From 462649289a079477c51b3726b24884944f2b15a8 Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Wed, 16 Feb 2022 19:43:40 +0530 Subject: [PATCH 03/16] Add entry to the changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2782a4cb1d9f1..0ca17c39104a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Make `benchmark` flag optional, based on deterministic flag. ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944)) + + - Add new `DETAIL` log level to provide useful logs for improving monitoring and debugging of batch jobs From ed4db5715d4ccc4936cee04d3782f805d8f2110f Mon Sep 17 00:00:00 2001 From: Carlos Mocholi Date: Wed, 16 Feb 2022 16:09:33 +0100 Subject: [PATCH 04/16] Improve docs --- docs/source/common/trainer.rst | 14 ++++++-------- pytorch_lightning/trainer/trainer.py | 5 +++-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 9ed9376ed3c7c..61d1cf521f6e3 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -416,15 +416,13 @@ benchmark | -Defaults to ``True`` if :attr:`~pytorch_lightning.trainer.Trainer.deterministic` is not set. +Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is not set. +This flag sets the ``torch.backends.cudnn.deterministic`` flag. You can read more about its impact +`here `__ -This flag is likely to increase the speed of your system if your -input sizes don't change. However, if it does, then it will likely -make your system slower. - -The speedup comes from allowing the cudnn auto-tuner to find the best -algorithm for the hardware `[see discussion here] -`_. +This is likely to increase the speed of your system if your input sizes don't change. However, if it does, then it +might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new +input size is encountered, `ref `__. Example:: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 36bcc30f97456..f2a0db176a7a4 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -225,8 +225,9 @@ def __init__( GPUs are configured to be in "exclusive mode", such that only one process at a time can access them. - benchmark: Defaults to ``True`` if torch.backends.cudnn.benchmark is disabled. - Overwrite manually to set a different value. + benchmark: Sets ``torch.backends.cudnn.benchmark``. + Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is ``False``. + Overwrite to manually set a different value. callbacks: Add a callback or list of callbacks. From 39b771c5b09194679498b95b14ef51ffa7f749e1 Mon Sep 17 00:00:00 2001 From: Carlos Mocholi Date: Wed, 16 Feb 2022 16:12:13 +0100 Subject: [PATCH 05/16] Singular to plural --- docs/source/common/trainer.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 61d1cf521f6e3..6eb77e249c05f 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -420,7 +420,7 @@ Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.determinis This flag sets the ``torch.backends.cudnn.deterministic`` flag. You can read more about its impact `here `__ -This is likely to increase the speed of your system if your input sizes don't change. However, if it does, then it +This is likely to increase the speed of your system if your input sizes don't change. However, if they do, then it might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new input size is encountered, `ref `__. From 8d3275cf4ab8573db04324dc8093d6c249a2538c Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Thu, 17 Feb 2022 08:55:23 +0530 Subject: [PATCH 06/16] Update tests/trainer/test_trainer.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- tests/trainer/test_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 71cfcdd39fbf6..c5d76ad1dd576 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -653,7 +653,7 @@ def test_benchmark_option(benchmark, deterministic, expected): original_val = torch.backends.cudnn.benchmark - trainer = Trainer(benchmark=benchmark, deterministic=deterministic) + _ = Trainer(benchmark=benchmark, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected torch.backends.cudnn.benchmark = original_val From 0fb524e3686cada67df18dad91380b506dd5b52c Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Thu, 17 Feb 2022 13:38:29 +0530 Subject: [PATCH 07/16] Change entry to 'Changed' --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ca17c39104a8..9e695c0530e2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Make `benchmark` flag optional, based on deterministic flag. ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944)) - - - Add new `DETAIL` log level to provide useful logs for improving monitoring and debugging of batch jobs @@ -116,6 +113,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Make `benchmark` flag optional, based on deterministic flag. ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944)) + + - Implemented a new native and rich format in `_print_results` method of the `EvaluationLoop` ([#11332](https://github.com/PyTorchLightning/pytorch-lightning/pull/11332)) From 58034a5574a33770280775a3622fa3618b64b932 Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Fri, 18 Feb 2022 10:17:34 +0530 Subject: [PATCH 08/16] Minor, default to None --- pytorch_lightning/trainer/connectors/accelerator_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index a51d7f89f94ce..6b3c98545eb50 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -94,7 +94,7 @@ def __init__( amp_type: str = "native", amp_level: Optional[str] = None, sync_batchnorm: bool = False, - benchmark: Optional[bool], + benchmark: Optional[bool] = None, replace_sampler_ddp: bool = True, deterministic: bool = False, num_processes: Optional[int] = None, # deprecated From d3dcdee07ebec52438900207109766b4e86a41e2 Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Mon, 21 Feb 2022 21:24:04 +0530 Subject: [PATCH 09/16] Update CHANGELOG.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Adrian Wälchli --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c252f78079b32..7267e9b30f096 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,7 +119,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Make `benchmark` flag optional, based on deterministic flag. ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944)) +- Make `benchmark` flag optional and set its value based on the deterministic flag ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944)) - Implemented a new native and rich format in `_print_results` method of the `EvaluationLoop` ([#11332](https://github.com/PyTorchLightning/pytorch-lightning/pull/11332)) From 2fe07fa9e2ae610d35d6b31bf17158740c9218f4 Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Wed, 23 Feb 2022 10:13:06 +0530 Subject: [PATCH 10/16] Raise a warning when benchmark and deterministic are True --- .../trainer/connectors/accelerator_connector.py | 7 +++++++ tests/trainer/test_trainer.py | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 6b3c98545eb50..846d8ea24cea4 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -137,6 +137,13 @@ def __init__( B. Strategy > Accelerator/precision/plugins C. TODO When multiple flag set to the same thing """ + # Raise a warning when benchmark and deterministic are True + if benchmark and deterministic: + rank_zero_warn( + "You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores" + " torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.", + category=RuntimeWarning, + ) self.benchmark = not deterministic if benchmark is None else benchmark # TODO: move to gpu accelerator torch.backends.cudnn.benchmark = self.benchmark diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 2aa0a04ff805e..ed6505c7a6791 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -653,7 +653,11 @@ def test_benchmark_option(benchmark, deterministic, expected): original_val = torch.backends.cudnn.benchmark - _ = Trainer(benchmark=benchmark, deterministic=deterministic) + if benchmark and deterministic: + with pytest.warns(RuntimeWarning, match="You passed `deterministic=True` and `benchmark=True`"): + _ = Trainer(benchmark=benchmark, deterministic=deterministic) + else: + _ = Trainer(benchmark=benchmark, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected torch.backends.cudnn.benchmark = original_val From 7fb9ed686c33899b01b4ce1e61758782f2f619c3 Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Wed, 23 Feb 2022 18:46:58 +0530 Subject: [PATCH 11/16] Apply suggestions from code review Co-authored-by: Rohit Gupta --- docs/source/common/trainer.rst | 2 +- pytorch_lightning/trainer/trainer.py | 2 +- tests/trainer/test_trainer.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index faefae8590704..7cfd02630aecc 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -422,7 +422,7 @@ This flag sets the ``torch.backends.cudnn.deterministic`` flag. You can read mor This is likely to increase the speed of your system if your input sizes don't change. However, if they do, then it might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new -input size is encountered, `ref `__. +input size is encountered. Read more about it `here `_. Example:: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f8e436045d3ee..e7bd2214174fc 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -229,7 +229,7 @@ def __init__( Default: ``False``. benchmark: Sets ``torch.backends.cudnn.benchmark``. - Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is ``False``. + Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic` is ``False``. Overwrite to manually set a different value. Default: ``None``. callbacks: Add a callback or list of callbacks. diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index ae540f11e0593..d47cea14002d0 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -656,10 +656,11 @@ def test_benchmark_option(benchmark, deterministic, expected): if benchmark and deterministic: with pytest.warns(RuntimeWarning, match="You passed `deterministic=True` and `benchmark=True`"): - _ = Trainer(benchmark=benchmark, deterministic=deterministic) + trainer = Trainer(benchmark=benchmark, deterministic=deterministic) else: - _ = Trainer(benchmark=benchmark, deterministic=deterministic) + trainer = Trainer(benchmark=benchmark, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected + assert trainer.benchmark == expected torch.backends.cudnn.benchmark = original_val From c2cbf0f7e75b0800211d610f5f3f6fcbe7ea2e48 Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Wed, 23 Feb 2022 19:01:43 +0530 Subject: [PATCH 12/16] Apply suggestions from code review --- tests/trainer/test_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index d47cea14002d0..3dd71e87758fa 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -660,7 +660,7 @@ def test_benchmark_option(benchmark, deterministic, expected): else: trainer = Trainer(benchmark=benchmark, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected - assert trainer.benchmark == expected + assert trainer._accelerator_connector.benchmark == expected torch.backends.cudnn.benchmark = original_val From f4a96c4f4dd3a4a5b5fdb0eef8cff5fab86446da Mon Sep 17 00:00:00 2001 From: Kushashwa Ravi Shrimali Date: Wed, 23 Feb 2022 19:24:03 +0530 Subject: [PATCH 13/16] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- docs/source/common/trainer.rst | 2 +- pytorch_lightning/trainer/connectors/accelerator_connector.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 7cfd02630aecc..ed5ec193db603 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -422,7 +422,7 @@ This flag sets the ``torch.backends.cudnn.deterministic`` flag. You can read mor This is likely to increase the speed of your system if your input sizes don't change. However, if they do, then it might make your system slower. The CUDNN auto-tuner will try to find the best algorithm for the hardware when a new -input size is encountered. Read more about it `here `_. +input size is encountered. Read more about it `here `__. Example:: diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 592e8d56f7da8..98802e224737c 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -142,12 +142,10 @@ def __init__( B. Strategy > Accelerator/precision/plugins C. TODO When multiple flag set to the same thing """ - # Raise a warning when benchmark and deterministic are True if benchmark and deterministic: rank_zero_warn( "You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores" " torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.", - category=RuntimeWarning, ) self.benchmark = not deterministic if benchmark is None else benchmark # TODO: move to gpu accelerator From 30739d4c0c49bb1807d910a6e285524988d2111e Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Wed, 23 Feb 2022 20:06:52 +0530 Subject: [PATCH 14/16] Update tests/trainer/test_trainer.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- tests/trainer/test_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 3dd71e87758fa..7c827720d8a97 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -655,7 +655,7 @@ def test_benchmark_option(benchmark, deterministic, expected): original_val = torch.backends.cudnn.benchmark if benchmark and deterministic: - with pytest.warns(RuntimeWarning, match="You passed `deterministic=True` and `benchmark=True`"): + with pytest.warns(UserWarning, match="You passed `deterministic=True` and `benchmark=True`"): trainer = Trainer(benchmark=benchmark, deterministic=deterministic) else: trainer = Trainer(benchmark=benchmark, deterministic=deterministic) From 1d5b9285d98d02bda33ea45ed2db7ab70c0746ae Mon Sep 17 00:00:00 2001 From: rohitgr7 Date: Wed, 23 Feb 2022 20:10:59 +0530 Subject: [PATCH 15/16] pre-commit --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index e7bd2214174fc..73949912cbd7b 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -229,8 +229,8 @@ def __init__( Default: ``False``. benchmark: Sets ``torch.backends.cudnn.benchmark``. - Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic` is ``False``. - Overwrite to manually set a different value. Default: ``None``. + Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic` + is ``False``. Overwrite to manually set a different value. Default: ``None``. callbacks: Add a callback or list of callbacks. Default: ``None``. From 542595230348f6a3c20db87281ea1b1ea92791f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 24 Feb 2022 19:26:44 +0100 Subject: [PATCH 16/16] Avoid conflict with pytest plugin --- tests/trainer/test_trainer.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index c222145a02a82..194ddd458e5ab 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -639,7 +639,7 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): @pytest.mark.parametrize( - ["benchmark", "deterministic", "expected"], + ["benchmark_", "deterministic", "expected"], [ (None, False, True), (None, True, False), @@ -649,16 +649,16 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): (False, False, False), ], ) -def test_benchmark_option(benchmark, deterministic, expected): +def test_benchmark_option(benchmark_, deterministic, expected): """Verify benchmark option.""" original_val = torch.backends.cudnn.benchmark - if benchmark and deterministic: + if benchmark_ and deterministic: with pytest.warns(UserWarning, match="You passed `deterministic=True` and `benchmark=True`"): - trainer = Trainer(benchmark=benchmark, deterministic=deterministic) + trainer = Trainer(benchmark=benchmark_, deterministic=deterministic) else: - trainer = Trainer(benchmark=benchmark, deterministic=deterministic) + trainer = Trainer(benchmark=benchmark_, deterministic=deterministic) assert torch.backends.cudnn.benchmark == expected assert trainer._accelerator_connector.benchmark == expected