From 1f4c351aa14073ff6c23671ea3f3628b6f16d6c7 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Mon, 25 Oct 2021 16:38:56 -0700 Subject: [PATCH 1/2] update accelerator to strategy in tests --- .../test_accelerator_connector.py | 36 +++++++++---------- tests/accelerators/test_tpu.py | 2 +- tests/plugins/test_amp_plugins.py | 2 +- tests/trainer/test_trainer.py | 22 ++++++------ 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index 7ad93b167794d..c9bd115334618 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -57,7 +57,7 @@ def test_accelerator_choice_cpu(tmpdir): @pytest.mark.parametrize(("num_processes", "num_nodes"), ([(1, 1), (1, 2), (2, 1), (2, 2)])) def test_accelerator_choice_ddp_cpu(tmpdir, num_processes: int, num_nodes: int): - trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=num_processes, num_nodes=num_nodes) + trainer = Trainer(fast_dev_run=True, strategy="ddp_cpu", num_processes=num_processes, num_nodes=num_nodes) assert isinstance(trainer.accelerator, CPUAccelerator) no_spawn = num_processes == 1 and num_nodes > 1 assert isinstance(trainer.training_type_plugin, DDPPlugin if no_spawn else DDPSpawnPlugin) @@ -68,7 +68,7 @@ def test_accelerator_choice_ddp_cpu(tmpdir, num_processes: int, num_nodes: int): @mock.patch("torch.cuda.device_count", return_value=2) @mock.patch("torch.cuda.is_available", return_value=True) def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock): - trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1) + trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=1) assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment) @@ -78,7 +78,7 @@ def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock): @mock.patch("torch.cuda.device_count", return_value=2) @mock.patch("torch.cuda.is_available", return_value=True) def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock): - trainer = Trainer(fast_dev_run=True, accelerator="ddp_spawn", gpus=1) + trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", gpus=1) assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment) @@ -109,7 +109,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -141,7 +141,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp2", gpus=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -172,7 +172,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -203,7 +203,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp2", gpus=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -225,7 +225,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp_cpu", num_processes=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -256,7 +256,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=1, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -285,7 +285,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=1, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp_cpu", num_processes=1, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -315,7 +315,7 @@ def on_fit_start(self, trainer, pl_module): raise SystemExit() model = BoringModel() - trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2, callbacks=[CB()]) + trainer = Trainer(fast_dev_run=True, strategy="ddp_cpu", num_processes=2, callbacks=[CB()]) with pytest.raises(SystemExit): trainer.fit(model) @@ -323,13 +323,13 @@ def on_fit_start(self, trainer, pl_module): @RunIf(special=True) def test_accelerator_choice_ddp_cpu_and_plugin(tmpdir): - """Test that accelerator="ddp_cpu" can work together with an instance of DDPPlugin.""" + """Test that strategy="ddp_cpu" can work together with an instance of DDPPlugin.""" _test_accelerator_choice_ddp_cpu_and_plugin(tmpdir, ddp_plugin_class=DDPPlugin) @RunIf(special=True) def test_accelerator_choice_ddp_cpu_and_plugin_spawn(tmpdir): - """Test that accelerator="ddp_cpu" can work together with an instance of DDPPSpawnPlugin.""" + """Test that strategy="ddp_cpu" can work together with an instance of DDPPSpawnPlugin.""" _test_accelerator_choice_ddp_cpu_and_plugin(tmpdir, ddp_plugin_class=DDPSpawnPlugin) @@ -340,7 +340,7 @@ def _test_accelerator_choice_ddp_cpu_and_plugin(tmpdir, ddp_plugin_class): default_root_dir=tmpdir, plugins=[ddp_plugin_class(find_unused_parameters=True)], fast_dev_run=True, - accelerator="ddp_cpu", + strategy="ddp_cpu", num_processes=2, ) assert isinstance(trainer.training_type_plugin, ddp_plugin_class) @@ -374,7 +374,7 @@ def creates_processes_externally(self) -> bool: return True trainer = Trainer( - default_root_dir=tmpdir, plugins=[CustomCluster()], fast_dev_run=True, accelerator="ddp_cpu", num_processes=2 + default_root_dir=tmpdir, plugins=[CustomCluster()], fast_dev_run=True, strategy="ddp_cpu", num_processes=2 ) assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) @@ -598,7 +598,7 @@ def test_accelerator_gpu_with_gpus_priority(): def test_validate_accelerator_and_devices(): with pytest.raises(MisconfigurationException, match="You passed `devices=2` but haven't specified"): - Trainer(accelerator="ddp_cpu", devices=2) + Trainer(strategy="ddp_cpu", devices=2) def test_set_devices_if_none_cpu(): @@ -630,14 +630,14 @@ def test_unsupported_distrib_types_on_cpu(training_type): def test_accelerator_ddp_for_cpu(tmpdir): - trainer = Trainer(accelerator="ddp", num_processes=2) + trainer = Trainer(strategy="ddp", num_processes=2) assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) def test_exception_when_strategy_used_with_accelerator(): with pytest.raises(MisconfigurationException, match="but have also passed"): - Trainer(accelerator="ddp", strategy="ddp_spawn") + Trainer(strategy="ddp", strategy="ddp_spawn") def test_exception_when_strategy_used_with_plugins(): diff --git a/tests/accelerators/test_tpu.py b/tests/accelerators/test_tpu.py index 98a96d15db245..b07826afe6e52 100644 --- a/tests/accelerators/test_tpu.py +++ b/tests/accelerators/test_tpu.py @@ -224,7 +224,7 @@ def on_train_end(self): @RunIf(tpu=True) def test_ddp_cpu_not_supported_on_tpus(): with pytest.raises(MisconfigurationException, match="`accelerator='ddp_cpu'` is not supported on TPU machines"): - Trainer(accelerator="ddp_cpu") + Trainer(strategy="ddp_cpu") @RunIf(tpu=True) diff --git a/tests/plugins/test_amp_plugins.py b/tests/plugins/test_amp_plugins.py index ed8c653b3a78f..3a0fdbc3b1991 100644 --- a/tests/plugins/test_amp_plugins.py +++ b/tests/plugins/test_amp_plugins.py @@ -212,7 +212,7 @@ def test_precision_selection_raises(monkeypatch): with mock.patch("torch.cuda.device_count", return_value=1), pytest.raises( MisconfigurationException, match="Sharded plugins are not supported with apex" ): - Trainer(amp_backend="apex", precision=16, gpus=1, accelerator="ddp_fully_sharded") + Trainer(amp_backend="apex", precision=16, gpus=1, strategy="ddp_fully_sharded") import pytorch_lightning.plugins.precision.apex_amp as apex diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index a45bf105722cf..8fe3af79db652 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1149,23 +1149,23 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1), ), ( - dict(accelerator="ddp", gpus=None), + dict(strategy="ddp", gpus=None), dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1), ), ( - dict(accelerator="ddp", num_processes=2, gpus=None), + dict(strategy="ddp", num_processes=2, gpus=None), dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), ), ( - dict(accelerator="ddp", num_nodes=2, gpus=None), + dict(strategy="ddp", num_nodes=2, gpus=None), dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1), ), ( - dict(accelerator="ddp_cpu", num_processes=2, gpus=None), + dict(strategy="ddp_cpu", num_processes=2, gpus=None), dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), ), ( - dict(accelerator="ddp2", gpus=None), + dict(strategy="ddp2", gpus=None), dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1), ), ( @@ -1177,15 +1177,15 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1), ), ( - dict(accelerator="ddp", gpus=1), + dict(strategy="ddp", gpus=1), dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1), ), ( - dict(accelerator="ddp_cpu", num_processes=2, gpus=1), + dict(strategy="ddp_cpu", num_processes=2, gpus=1), dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), ), ( - dict(accelerator="ddp2", gpus=1), + dict(strategy="ddp2", gpus=1), dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1), ), ( @@ -1197,15 +1197,15 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1), ), ( - dict(accelerator="ddp", gpus=2), + dict(strategy="ddp", gpus=2), dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2), ), ( - dict(accelerator="ddp2", gpus=2), + dict(strategy="ddp2", gpus=2), dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1), ), ( - dict(accelerator="ddp2", num_processes=2, gpus=None), + dict(strategy="ddp2", num_processes=2, gpus=None), dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), ), ( From 7d1369c69b7dc2335144deebea30c91e62626ce9 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Mon, 25 Oct 2021 17:18:38 -0700 Subject: [PATCH 2/2] fix --- tests/accelerators/test_accelerator_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index c9bd115334618..c2b95a7f8f053 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -637,7 +637,7 @@ def test_accelerator_ddp_for_cpu(tmpdir): def test_exception_when_strategy_used_with_accelerator(): with pytest.raises(MisconfigurationException, match="but have also passed"): - Trainer(strategy="ddp", strategy="ddp_spawn") + Trainer(accelerator="ddp", strategy="ddp_spawn") def test_exception_when_strategy_used_with_plugins():