diff --git a/benchmarks/test_basic_parity.py b/benchmarks/test_basic_parity.py index 2144be39394cb..7d3ad6af4bac3 100644 --- a/benchmarks/test_basic_parity.py +++ b/benchmarks/test_basic_parity.py @@ -160,8 +160,8 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): max_epochs=num_epochs if idx > 0 else 1, enable_progress_bar=False, enable_model_summary=False, + enable_checkpointing=False, gpus=1 if device_type == "cuda" else 0, - checkpoint_callback=False, logger=False, replace_sampler_ddp=False, ) diff --git a/pl_examples/basic_examples/mnist_datamodule.py b/pl_examples/basic_examples/mnist_datamodule.py index 1d2371c702ce0..335a36f0a3380 100644 --- a/pl_examples/basic_examples/mnist_datamodule.py +++ b/pl_examples/basic_examples/mnist_datamodule.py @@ -81,7 +81,6 @@ def __init__( ) num_workers = 0 - self.dims = (1, 28, 28) self.data_dir = data_dir self.val_split = val_split self.num_workers = num_workers @@ -90,7 +89,6 @@ def __init__( self.batch_size = batch_size self.dataset_train = ... self.dataset_val = ... - self.test_transforms = self.default_transforms @property def num_classes(self): @@ -134,7 +132,7 @@ def val_dataloader(self): def test_dataloader(self): """MNIST test set uses the test split.""" - extra = dict(transform=self.test_transforms) if self.test_transforms else {} + extra = dict(transform=self.default_transforms) if self.default_transforms else {} dataset = MNIST(self.data_dir, train=False, download=False, **extra) loader = DataLoader( dataset, diff --git a/pytorch_lightning/utilities/enums.py b/pytorch_lightning/utilities/enums.py index 18b0336b82d5f..cbb4f68bedfac 100644 --- a/pytorch_lightning/utilities/enums.py +++ b/pytorch_lightning/utilities/enums.py @@ -103,13 +103,6 @@ def supported_types() -> List[str]: class DistributedType(LightningEnum, metaclass=_OnAccessEnumMeta): """Define type of training strategy. - >>> # you can match the type with string - >>> DistributedType.DDP == 'ddp' - True - >>> # which is case invariant - >>> DistributedType.DDP2 in ('ddp2', ) - True - Deprecated since v1.6.0 and will be removed in v1.8.0. Use `_StrategyType` instead. diff --git a/setup.cfg b/setup.cfg index 9d63c0e556341..4494537280e79 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,10 +23,17 @@ python_files = test_*.py # doctest_plus = disabled addopts = - --strict + --strict-markers --doctest-modules --color=yes --disable-pytest-warnings +filterwarnings = + # error out on our deprecation warnings - ensures the code and tests are kept up-to-date + error::pytorch_lightning.utilities.warnings.LightningDeprecationWarning + # warnings from deprecated modules on import + # TODO: remove in 1.7 + ignore::pytorch_lightning.utilities.warnings.LightningDeprecationWarning:pytorch_lightning.core.decorators + ignore::pytorch_lightning.utilities.warnings.LightningDeprecationWarning:pytorch_lightning.core.memory junit_duration_report = call diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index d005c48757330..6078a92d24f6d 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -343,7 +343,7 @@ def test_accelerator_choice_ddp_cpu_and_strategy(tmpdir): _test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPPlugin) -@RunIf(skip_windows=True) +@RunIf(skip_windows=True, skip_49370=True) def test_accelerator_choice_ddp_cpu_and_strategy_spawn(tmpdir): """Test that accelerator="ddp_cpu" can work together with an instance of DDPPSpawnPlugin.""" _test_accelerator_choice_ddp_cpu_and_strategy(tmpdir, ddp_strategy_class=DDPSpawnPlugin) diff --git a/tests/accelerators/test_cpu.py b/tests/accelerators/test_cpu.py index b4d00fef3626b..41e73431495b9 100644 --- a/tests/accelerators/test_cpu.py +++ b/tests/accelerators/test_cpu.py @@ -35,7 +35,7 @@ def setup_optimizers_in_pre_dispatch(self) -> bool: return delay_dispatch model = TestModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=CustomPlugin(device=torch.device("cpu"))) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, strategy=CustomPlugin(device=torch.device("cpu"))) trainer.fit(model) diff --git a/tests/callbacks/test_gpu_stats_monitor.py b/tests/callbacks/test_gpu_stats_monitor.py index 5ed3f533b5588..d2e454dc0fc11 100644 --- a/tests/callbacks/test_gpu_stats_monitor.py +++ b/tests/callbacks/test_gpu_stats_monitor.py @@ -31,7 +31,8 @@ def test_gpu_stats_monitor(tmpdir): """Test GPU stats are logged using a logger.""" model = BoringModel() - gpu_stats = GPUStatsMonitor(intra_step_time=True) + with pytest.deprecated_call(match="GPUStatsMonitor` callback was deprecated in v1.5"): + gpu_stats = GPUStatsMonitor(intra_step_time=True) logger = CSVLogger(tmpdir) log_every_n_steps = 2 @@ -65,12 +66,13 @@ def test_gpu_stats_monitor(tmpdir): def test_gpu_stats_monitor_no_queries(tmpdir): """Test GPU logger doesn't fail if no "nvidia-smi" queries are to be performed.""" model = BoringModel() - gpu_stats = GPUStatsMonitor( - memory_utilization=False, - gpu_utilization=False, - intra_step_time=True, - inter_step_time=True, - ) + with pytest.deprecated_call(match="GPUStatsMonitor` callback was deprecated in v1.5"): + gpu_stats = GPUStatsMonitor( + memory_utilization=False, + gpu_utilization=False, + intra_step_time=True, + inter_step_time=True, + ) trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, @@ -93,7 +95,9 @@ def test_gpu_stats_monitor_no_queries(tmpdir): @pytest.mark.skipif(torch.cuda.is_available(), reason="test requires CPU machine") def test_gpu_stats_monitor_cpu_machine(tmpdir): """Test GPUStatsMonitor on CPU machine.""" - with pytest.raises(MisconfigurationException, match="NVIDIA driver is not installed"): + with pytest.raises(MisconfigurationException, match="NVIDIA driver is not installed"), pytest.deprecated_call( + match="GPUStatsMonitor` callback was deprecated in v1.5" + ): GPUStatsMonitor() @@ -101,7 +105,8 @@ def test_gpu_stats_monitor_cpu_machine(tmpdir): def test_gpu_stats_monitor_no_logger(tmpdir): """Test GPUStatsMonitor with no logger in Trainer.""" model = BoringModel() - gpu_stats = GPUStatsMonitor() + with pytest.deprecated_call(match="GPUStatsMonitor` callback was deprecated in v1.5"): + gpu_stats = GPUStatsMonitor() trainer = Trainer(default_root_dir=tmpdir, callbacks=[gpu_stats], max_epochs=1, gpus=1, logger=False) @@ -113,7 +118,8 @@ def test_gpu_stats_monitor_no_logger(tmpdir): def test_gpu_stats_monitor_no_gpu_warning(tmpdir): """Test GPUStatsMonitor raises a warning when not training on GPU device.""" model = BoringModel() - gpu_stats = GPUStatsMonitor() + with pytest.deprecated_call(match="GPUStatsMonitor` callback was deprecated in v1.5"): + gpu_stats = GPUStatsMonitor() trainer = Trainer(default_root_dir=tmpdir, callbacks=[gpu_stats], max_steps=1, gpus=None) diff --git a/tests/callbacks/test_lambda_function.py b/tests/callbacks/test_lambda_function.py index f2fa040b43c78..59bfb20976665 100644 --- a/tests/callbacks/test_lambda_function.py +++ b/tests/callbacks/test_lambda_function.py @@ -13,6 +13,8 @@ # limitations under the License. from functools import partial +import pytest + from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import Callback, LambdaCallback from tests.helpers.boring_model import BoringModel @@ -46,7 +48,8 @@ def call(hook, *_, **__): limit_val_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - trainer.fit(model) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.fit(model) ckpt_path = trainer.checkpoint_callback.best_model_path @@ -60,8 +63,11 @@ def call(hook, *_, **__): limit_predict_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - trainer.fit(model, ckpt_path=ckpt_path) - trainer.test(model) - trainer.predict(model) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.fit(model, ckpt_path=ckpt_path) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.test(model) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.predict(model) assert checker == hooks diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index c7186e819ea94..d30edb177ed10 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -171,7 +171,7 @@ def test_swa_callback_scheduler_step(tmpdir, interval: str): def test_swa_warns(tmpdir, caplog): model = SwaTestModel(interval="step") - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, stochastic_weight_avg=True) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, callbacks=StochasticWeightAveraging()) with caplog.at_level(level=logging.INFO), pytest.warns(UserWarning, match="SWA is currently only supported"): trainer.fit(model) assert "Swapping scheduler `StepLR` for `SWALR`" in caplog.text @@ -199,14 +199,19 @@ def configure_optimizers(self): return optimizer model = TestModel() - trainer = Trainer( - default_root_dir=tmpdir, - callbacks=StochasticWeightAveraging(swa_lrs=1e-3) if use_callbacks else None, - stochastic_weight_avg=stochastic_weight_avg, - limit_train_batches=4, - limit_val_batches=4, - max_epochs=2, - ) + kwargs = { + "default_root_dir": tmpdir, + "callbacks": StochasticWeightAveraging(swa_lrs=1e-3) if use_callbacks else None, + "stochastic_weight_avg": stochastic_weight_avg, + "limit_train_batches": 4, + "limit_val_batches": 4, + "max_epochs": 2, + } + if stochastic_weight_avg: + with pytest.deprecated_call(match=r"stochastic_weight_avg=True\)` is deprecated in v1.5"): + trainer = Trainer(**kwargs) + else: + trainer = Trainer(**kwargs) trainer.fit(model) if use_callbacks or stochastic_weight_avg: assert sum(1 for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)) == 1 diff --git a/tests/core/test_datamodules.py b/tests/core/test_datamodules.py index 7fe3032058e2d..d35941ac2cb15 100644 --- a/tests/core/test_datamodules.py +++ b/tests/core/test_datamodules.py @@ -442,20 +442,24 @@ def test_hyperparameters_saving(): def test_define_as_dataclass(): + class BoringDataModule(LightningDataModule): + def __init__(self, foo=None): + super().__init__() + # makes sure that no functionality is broken and the user can still manually make # super().__init__ call with parameters # also tests all the dataclass features that can be enabled without breaking anything @dataclass(init=True, repr=True, eq=True, order=True, unsafe_hash=True, frozen=False) - class BoringDataModule1(LightningDataModule): + class BoringDataModule1(BoringDataModule): batch_size: int - dims: int = 2 + foo: int = 2 def __post_init__(self): - super().__init__(dims=self.dims) + super().__init__(foo=self.foo) # asserts for the different dunder methods added by dataclass, when __init__ is implemented, i.e. # __repr__, __eq__, __lt__, __le__, etc. - assert BoringDataModule1(batch_size=64).dims == 2 + assert BoringDataModule1(batch_size=64).foo == 2 assert BoringDataModule1(batch_size=32) assert hasattr(BoringDataModule1, "__repr__") assert BoringDataModule1(batch_size=32) == BoringDataModule1(batch_size=32) @@ -477,7 +481,8 @@ def test_inconsistent_prepare_data_per_node(tmpdir): with pytest.raises(MisconfigurationException, match="Inconsistent settings found for `prepare_data_per_node`."): model = BoringModel() dm = BoringDataModule() - trainer = Trainer(prepare_data_per_node=False) + with pytest.deprecated_call(match="prepare_data_per_node` with the trainer flag is deprecated"): + trainer = Trainer(prepare_data_per_node=False) trainer.model = model trainer.datamodule = dm trainer._data_connector.prepare_data() diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 09a8df66a02cc..12b2816ca09c8 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -15,7 +15,6 @@ from unittest import mock import pytest -import torch from pytorch_lightning import Callback, LightningDataModule, Trainer from pytorch_lightning.callbacks.gpu_stats_monitor import GPUStatsMonitor @@ -233,22 +232,16 @@ def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir): class BoringCallbackDDPSpawnModel(BoringModel): - def __init__(self): - super().__init__() + def add_to_queue(self, queue): + ... - def add_to_queue(self, queue: torch.multiprocessing.SimpleQueue) -> None: - queue.put("test_val") - return super().add_to_queue(queue) + def get_from_queue(self, queue): + ... - def get_from_queue(self, queue: torch.multiprocessing.SimpleQueue) -> None: - self.test_val = queue.get() - return super().get_from_queue(queue) - -@RunIf(skip_windows=True, skip_49370=True) def test_v1_7_0_deprecate_add_get_queue(tmpdir): model = BoringCallbackDDPSpawnModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, num_processes=2, strategy="ddp_spawn") + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) with pytest.deprecated_call(match=r"`LightningModule.add_to_queue` method was deprecated in v1.5"): trainer.fit(model) @@ -410,13 +403,6 @@ def test_v1_7_0_deprecated_max_steps_none(tmpdir): def test_v1_7_0_resume_from_checkpoint_trainer_constructor(tmpdir): - with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"): - trainer = Trainer(resume_from_checkpoint="a") - with pytest.deprecated_call( - match=r"trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v1.7." - ): - _ = trainer.resume_from_checkpoint - # test resume_from_checkpoint still works until v1.7 deprecation model = BoringModel() callback = OldStatefulCallback(state=111) @@ -425,14 +411,22 @@ def test_v1_7_0_resume_from_checkpoint_trainer_constructor(tmpdir): ckpt_path = trainer.checkpoint_callback.best_model_path callback = OldStatefulCallback(state=222) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback], resume_from_checkpoint=ckpt_path) + with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"): + trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback], resume_from_checkpoint=ckpt_path) + with pytest.deprecated_call( + match=r"trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v1.7." + ): + _ = trainer.resume_from_checkpoint assert trainer.checkpoint_connector.resume_checkpoint_path is None assert trainer.checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path trainer.validate(model=model, ckpt_path=ckpt_path) assert callback.state == 222 assert trainer.checkpoint_connector.resume_checkpoint_path is None assert trainer.checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path - trainer.fit(model) + with pytest.deprecated_call( + match=r"trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v1.7." + ): + trainer.fit(model) assert callback.state == 111 assert trainer.checkpoint_connector.resume_checkpoint_path is None assert trainer.checkpoint_connector.resume_from_checkpoint_fit_path is None @@ -445,7 +439,8 @@ def test_v1_7_0_resume_from_checkpoint_trainer_constructor(tmpdir): # test fit(ckpt_path=) precedence over Trainer(resume_from_checkpoint=) path model = BoringModel() - trainer = Trainer(resume_from_checkpoint="trainer_arg_path") + with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"): + trainer = Trainer(resume_from_checkpoint="trainer_arg_path") with pytest.raises(FileNotFoundError, match="Checkpoint at fit_arg_ckpt_path not found. Aborting training."): trainer.fit(model, ckpt_path="fit_arg_ckpt_path") diff --git a/tests/loggers/test_all.py b/tests/loggers/test_all.py index 370b24431b088..803a13cbb11ea 100644 --- a/tests/loggers/test_all.py +++ b/tests/loggers/test_all.py @@ -76,7 +76,9 @@ def test_loggers_fit_test_all(tmpdir, monkeypatch): with mock.patch("pytorch_lightning.loggers.neptune.neptune", new_callable=create_neptune_mock): _test_loggers_fit_test(tmpdir, NeptuneLogger) - with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"): + with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"), pytest.deprecated_call( + match="TestTubeLogger is deprecated since v1.5" + ): _test_loggers_fit_test(tmpdir, TestTubeLogger) with mock.patch("pytorch_lightning.loggers.wandb.wandb") as wandb: @@ -176,7 +178,9 @@ def test_loggers_save_dir_and_weights_save_path_all(tmpdir, monkeypatch): ): _test_loggers_save_dir_and_weights_save_path(tmpdir, MLFlowLogger) - with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"): + with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"), pytest.deprecated_call( + match="TestTubeLogger is deprecated since v1.5" + ): _test_loggers_save_dir_and_weights_save_path(tmpdir, TestTubeLogger) with mock.patch("pytorch_lightning.loggers.wandb.wandb"): @@ -247,7 +251,11 @@ def test_loggers_pickle_all(tmpdir, monkeypatch, logger_class): """ _patch_comet_atexit(monkeypatch) try: - _test_loggers_pickle(tmpdir, monkeypatch, logger_class) + if logger_class is TestTubeLogger: + with pytest.deprecated_call(match="TestTubeLogger is deprecated since v1.5"): + _test_loggers_pickle(tmpdir, monkeypatch, logger_class) + else: + _test_loggers_pickle(tmpdir, monkeypatch, logger_class) except (ImportError, ModuleNotFoundError): pytest.xfail(f"pickle test requires {logger_class.__class__} dependencies to be installed.") @@ -327,7 +335,11 @@ def test_logger_created_on_rank_zero_only(tmpdir, monkeypatch, logger_class): """Test that loggers get replaced by dummy loggers on global rank > 0.""" _patch_comet_atexit(monkeypatch) try: - _test_logger_created_on_rank_zero_only(tmpdir, logger_class) + if logger_class is TestTubeLogger: + with pytest.deprecated_call(match="TestTubeLogger is deprecated since v1.5"): + _test_logger_created_on_rank_zero_only(tmpdir, logger_class) + else: + _test_logger_created_on_rank_zero_only(tmpdir, logger_class) except (ImportError, ModuleNotFoundError): pytest.xfail(f"multi-process test requires {logger_class.__class__} dependencies to be installed.") @@ -385,7 +397,9 @@ def test_logger_with_prefix_all(tmpdir, monkeypatch): logger.experiment.add_scalar.assert_called_once_with("tmp-test", 1.0, 0) # TestTube - with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"): + with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"), pytest.deprecated_call( + match="TestTubeLogger is deprecated since v1.5" + ): logger = _instantiate_logger(TestTubeLogger, save_dir=tmpdir, prefix=prefix) logger.log_metrics({"test": 1.0}, step=0) logger.experiment.log.assert_called_once_with({"tmp-test": 1.0}, global_step=0) diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 3e1ee9a3231d4..9e4b545ecc5bc 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -481,7 +481,8 @@ def training_step(self, batch, batch_idx): dict(name="Callback.on_init_start", args=(trainer,)), dict(name="Callback.on_init_end", args=(trainer,)), ] - trainer.fit(model) + with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"): + trainer.fit(model) saved_ckpt = { "callbacks": ANY, "epoch": 1, @@ -583,7 +584,8 @@ def test_trainer_model_hook_system_fit_no_val_and_resume(tmpdir): enable_model_summary=False, callbacks=[HookedCallback([])], ) - trainer.fit(model) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.fit(model) best_model_path = trainer.checkpoint_callback.best_model_path # resume from checkpoint with HookedModel @@ -605,7 +607,8 @@ def test_trainer_model_hook_system_fit_no_val_and_resume(tmpdir): dict(name="Callback.on_init_start", args=(trainer,)), dict(name="Callback.on_init_end", args=(trainer,)), ] - trainer.fit(model, ckpt_path=best_model_path) + with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"): + trainer.fit(model, ckpt_path=best_model_path) saved_ckpt = { "callbacks": ANY, "epoch": 2, # TODO: wrong saved epoch @@ -700,7 +703,8 @@ def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, dict(name="Callback.on_init_end", args=(trainer,)), ] fn = getattr(trainer, verb) - fn(model, verbose=False) + with pytest.deprecated_call(match=f"on_{dataloader}_dataloader` is deprecated in v1.5"): + fn(model, verbose=False) hooks = [ dict(name="train", args=(False,)), dict(name=f"on_{noun}_model_eval"), @@ -744,7 +748,8 @@ def test_trainer_model_hook_system_predict(tmpdir): dict(name="Callback.on_init_start", args=(trainer,)), dict(name="Callback.on_init_end", args=(trainer,)), ] - trainer.predict(model) + with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"): + trainer.predict(model) expected = [ dict(name="Callback.on_init_start", args=(trainer,)), dict(name="Callback.on_init_end", args=(trainer,)), diff --git a/tests/plugins/test_amp_plugins.py b/tests/plugins/test_amp_plugins.py index c482e8a83d7b6..8f563f0e410e2 100644 --- a/tests/plugins/test_amp_plugins.py +++ b/tests/plugins/test_amp_plugins.py @@ -266,7 +266,7 @@ def test_precision_selection_raises(monkeypatch): with mock.patch("torch.cuda.device_count", return_value=1), pytest.raises( MisconfigurationException, match="Sharded plugins are not supported with apex" ): - Trainer(amp_backend="apex", precision=16, gpus=1, accelerator="ddp_fully_sharded") + Trainer(amp_backend="apex", precision=16, gpus=1, strategy="ddp_fully_sharded") import pytorch_lightning.plugins.precision.apex_amp as apex diff --git a/tests/plugins/test_ddp_plugin_with_comm_hook.py b/tests/plugins/test_ddp_plugin_with_comm_hook.py index 6497b39ffa516..efcb089487c5b 100644 --- a/tests/plugins/test_ddp_plugin_with_comm_hook.py +++ b/tests/plugins/test_ddp_plugin_with_comm_hook.py @@ -30,7 +30,7 @@ def test_ddp_fp16_compress_comm_hook(tmpdir): """Test for DDP FP16 compress hook.""" model = BoringModel() - training_type_plugin = DDPPlugin(ddp_comm_hook=default.fp16_compress_hook, sync_batchnorm=True) + training_type_plugin = DDPPlugin(ddp_comm_hook=default.fp16_compress_hook) trainer = Trainer( max_epochs=1, gpus=2, @@ -53,7 +53,6 @@ def test_ddp_sgd_comm_hook(tmpdir): training_type_plugin = DDPPlugin( ddp_comm_state=powerSGD.PowerSGDState(process_group=None), ddp_comm_hook=powerSGD.powerSGD_hook, - sync_batchnorm=True, ) trainer = Trainer( max_epochs=1, @@ -78,7 +77,6 @@ def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir): ddp_comm_state=powerSGD.PowerSGDState(process_group=None), ddp_comm_hook=powerSGD.powerSGD_hook, ddp_comm_wrapper=default.fp16_compress_wrapper, - sync_batchnorm=True, ) trainer = Trainer( max_epochs=1, @@ -99,7 +97,7 @@ def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir): def test_ddp_spawn_fp16_compress_comm_hook(tmpdir): """Test for DDP Spawn FP16 compress hook.""" model = BoringModel() - training_type_plugin = DDPSpawnPlugin(ddp_comm_hook=default.fp16_compress_hook, sync_batchnorm=True) + training_type_plugin = DDPSpawnPlugin(ddp_comm_hook=default.fp16_compress_hook) trainer = Trainer( max_epochs=1, gpus=2, @@ -125,7 +123,6 @@ def test_ddp_post_local_sgd_comm_hook(tmpdir): ), ddp_comm_hook=post_localSGD.post_localSGD_hook, model_averaging_period=4, - sync_batchnorm=True, ) trainer = Trainer( fast_dev_run=True, diff --git a/tests/plugins/test_ddp_spawn_plugin.py b/tests/plugins/test_ddp_spawn_plugin.py index c5e5f7ccda748..db61711ab5afe 100644 --- a/tests/plugins/test_ddp_spawn_plugin.py +++ b/tests/plugins/test_ddp_spawn_plugin.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import pytest import torch from torch.nn.parallel.distributed import DistributedDataParallel @@ -76,7 +77,8 @@ def test_ddp_spawn_extra_parameters(tmpdir): val_name: str = "val_acc" model = BoringCallbackDDPSpawnModel(val_name, val) dm = BoringDataModule() - trainer.fit(model, datamodule=dm) + with pytest.deprecated_call(match="add_to_queue` method was deprecated in v1.5"): + trainer.fit(model, datamodule=dm) assert trainer.callback_metrics[val_name] == torch.tensor(val) assert model.test_val == "test_val" @@ -102,7 +104,8 @@ def test_ddp_spawn_add_get_queue(tmpdir): val_name: str = "val_acc" model = BoringCallbackDDPSpawnModel(val_name, val) dm = BoringDataModule() - trainer.fit(model, datamodule=dm) + with pytest.deprecated_call(match="add_to_queue` method was deprecated in v1.5"): + trainer.fit(model, datamodule=dm) assert trainer.callback_metrics[val_name] == torch.tensor(val) assert ddp_spawn_plugin.new_test_val == "new_test_val" diff --git a/tests/plugins/test_deepspeed_plugin.py b/tests/plugins/test_deepspeed_plugin.py index b35339487dac1..2d39a3de6b5c5 100644 --- a/tests/plugins/test_deepspeed_plugin.py +++ b/tests/plugins/test_deepspeed_plugin.py @@ -1011,7 +1011,6 @@ def on_train_batch_start( pl_module: LightningModule, batch: Any, batch_idx: int, - dataloader_idx: int, ) -> None: assert batch.device.index == 1 diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index 5b8c3939c7b48..4d18648b6a7f1 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -22,6 +22,7 @@ import torch from pytorch_lightning import Callback, Trainer +from pytorch_lightning.callbacks import StochasticWeightAveraging from pytorch_lightning.loggers.base import LoggerCollection from pytorch_lightning.loggers.tensorboard import TensorBoardLogger from pytorch_lightning.profiler import AdvancedProfiler, PassThroughProfiler, PyTorchProfiler, SimpleProfiler @@ -288,7 +289,9 @@ def test_pytorch_profiler_describe(pytorch_profiler): def test_advanced_profiler_cprofile_deepcopy(tmpdir): """Checks for pickle issue reported in #6522.""" model = BoringModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, profiler="advanced", stochastic_weight_avg=True) + trainer = Trainer( + default_root_dir=tmpdir, fast_dev_run=True, profiler="advanced", callbacks=StochasticWeightAveraging() + ) trainer.fit(model) diff --git a/tests/trainer/flags/test_min_max_epochs.py b/tests/trainer/flags/test_min_max_epochs.py index 4201c89350c21..989dde6e79360 100644 --- a/tests/trainer/flags/test_min_max_epochs.py +++ b/tests/trainer/flags/test_min_max_epochs.py @@ -7,13 +7,13 @@ @pytest.mark.parametrize( ["min_epochs", "max_epochs", "min_steps", "max_steps"], [ - (None, 3, None, None), + (None, 3, None, -1), (None, None, None, 20), (None, 3, None, 20), (None, None, 10, 20), - (1, 3, None, None), + (1, 3, None, -1), (1, None, None, 20), - (None, 3, 10, None), + (None, 3, 10, -1), ], ) def test_min_max_steps_epochs(tmpdir, min_epochs, max_epochs, min_steps, max_steps): diff --git a/tests/trainer/logging_/test_logger_connector.py b/tests/trainer/logging_/test_logger_connector.py index d26245a377897..656c5ce664800 100644 --- a/tests/trainer/logging_/test_logger_connector.py +++ b/tests/trainer/logging_/test_logger_connector.py @@ -250,7 +250,8 @@ def test_fx_validator_integration(tmpdir): limit_predict_batches=1, callbacks=callback, ) - trainer.fit(model) + with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"): + trainer.fit(model) not_supported.update( { @@ -262,7 +263,8 @@ def test_fx_validator_integration(tmpdir): "on_test_end": "You can't", } ) - trainer.test(model, verbose=False) + with pytest.deprecated_call(match="on_test_dataloader` is deprecated in v1.5"): + trainer.test(model, verbose=False) not_supported.update({k: "ResultCollection` is not registered yet" for k in not_supported}) not_supported.update( @@ -279,7 +281,8 @@ def test_fx_validator_integration(tmpdir): "on_predict_end": "ResultCollection` is not registered yet", } ) - trainer.predict(model) + with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"): + trainer.predict(model) @RunIf(min_gpus=2) diff --git a/tests/trainer/test_dataloaders.py b/tests/trainer/test_dataloaders.py index 272078b1d4206..43797f8b18901 100644 --- a/tests/trainer/test_dataloaders.py +++ b/tests/trainer/test_dataloaders.py @@ -1466,7 +1466,7 @@ def predict_dataloader(self): def test_request_dataloader(tmpdir): - """This test asserts dataloader can be modified and properly set to the trainer.""" + """This test asserts dataloader can be wrapped.""" class DataLoaderWrapper: def __init__(self, loader): @@ -1480,46 +1480,35 @@ def __iter__(self): def __next__(self): return next(self._iter) - class DataLoaderFunc: - def __init__(self, loader): - self.loader = loader - - def __call__(self): - return self.loader - class TestModel(BoringModel): def __init__(self): super().__init__() - self.on_train_dataloader_called = False self.on_train_batch_start_called = False - self.on_val_dataloader_called = False self.on_val_batch_start_called = False - def on_train_dataloader(self) -> None: - loader = self.train_dataloader() - self.train_dataloader = DataLoaderFunc(DataLoaderWrapper(loader)) - self.on_train_dataloader_called = True + def train_dataloader(self): + loader = super().train_dataloader() + return DataLoaderWrapper(loader) def on_train_batch_start(self, batch, batch_idx: int) -> None: assert isinstance(self.trainer.train_dataloader.loaders, DataLoaderWrapper) self.on_train_batch_start_called = True - def on_val_dataloader(self) -> None: - loader = self.val_dataloader() - self.val_dataloader = DataLoaderFunc(DataLoaderWrapper(loader)) - self.on_val_dataloader_called = True + def val_dataloader(self): + loader = super().val_dataloader() + return DataLoaderWrapper(loader) def on_validation_batch_start(self, batch, batch_idx: int, dataloader_idx: int) -> None: assert isinstance(self.trainer.val_dataloaders[0], DataLoaderWrapper) self.on_val_batch_start_called = True - trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=1) + trainer = Trainer( + default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1 + ) model = TestModel() trainer.fit(model) trainer.test(model) - assert model.on_train_dataloader_called assert model.on_train_batch_start_called - assert model.on_val_dataloader_called assert model.on_val_batch_start_called diff --git a/tests/trainer/test_supporters.py b/tests/trainer/test_supporters.py index 694d473155439..436a82c877c4f 100644 --- a/tests/trainer/test_supporters.py +++ b/tests/trainer/test_supporters.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from collections import Sequence +from typing import Sequence from unittest import mock import pytest diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 7e0af7cc0fc7c..2d39d83ec38ab 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -903,8 +903,8 @@ def training_step(self, batch, batch_idx): model = CurrentModel() - # fit model - trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_inf + 1), terminate_on_nan=True) + with pytest.deprecated_call(match="terminate_on_nan` was deprecated in v1.5"): + trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_inf + 1), terminate_on_nan=True) with pytest.raises(ValueError, match=r".*The loss returned in `training_step` is.*"): trainer.fit(model) @@ -916,7 +916,9 @@ def training_step(self, batch, batch_idx): def test_invalid_terminate_on_nan(tmpdir): - with pytest.raises(TypeError, match="`terminate_on_nan` should be a bool"): + with pytest.raises(TypeError, match="`terminate_on_nan` should be a bool"), pytest.deprecated_call( + match="terminate_on_nan` was deprecated in v1.5" + ): Trainer(default_root_dir=tmpdir, terminate_on_nan="False") @@ -937,7 +939,9 @@ def on_after_backward(self): torch.nn.init.constant_(self.layer.bias, math.nan) model = CurrentModel() - trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_nan + 1), terminate_on_nan=True) + + with pytest.deprecated_call(match="terminate_on_nan` was deprecated in v1.5"): + trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_nan + 1), terminate_on_nan=True) with pytest.raises(ValueError, match=r".*Detected nan and/or inf values in `layer.bias`.*"): trainer.fit(model) @@ -991,11 +995,14 @@ def on_keyboard_interrupt(self, trainer, pl_module): assert not trainer.interrupted assert handle_interrupt_callback.exception is None assert handle_interrupt_callback.exc_info is None - trainer.fit(model) + with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + trainer.fit(model) assert trainer.interrupted assert isinstance(handle_interrupt_callback.exception, KeyboardInterrupt) assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt) - with pytest.raises(MisconfigurationException): + with pytest.raises(MisconfigurationException), pytest.deprecated_call( + match="on_keyboard_interrupt` callback hook was deprecated in v1.5" + ): trainer.test(model) assert trainer.interrupted assert isinstance(handle_interrupt_callback.exception, MisconfigurationException) @@ -1218,7 +1225,11 @@ def test_trainer_config(trainer_kwargs, expected, monkeypatch): if trainer_kwargs["gpus"] is not None: monkeypatch.setattr(torch.cuda, "is_available", lambda: True) monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"]) - trainer = Trainer(**trainer_kwargs) + if trainer_kwargs["accelerator"] in (None, "ddp_cpu"): + trainer = Trainer(**trainer_kwargs) + else: + with pytest.deprecated_call(match=r"accelerator='.*'\)` has been deprecated in v1.5"): + trainer = Trainer(**trainer_kwargs) assert len(expected) == 4 for k, v in expected.items(): assert getattr(trainer, k) == v, f"Failed {k}: {v}" @@ -1314,7 +1325,6 @@ def training_step(self, *args, **kwargs): trainer = Trainer( default_root_dir=tmpdir, log_every_n_steps=log_interval, - flush_logs_every_n_steps=log_interval, limit_train_batches=train_batches, limit_val_batches=0, max_steps=max_steps, @@ -1806,7 +1816,7 @@ def on_predict_start(self) -> None: @pytest.mark.parametrize( - "strategy,num_processes", [(None, 1), pytest.param("ddp_spawn", 2, marks=RunIf(skip_windows=True, skip_49370=True))] + "strategy,num_processes", [(None, 1), pytest.param("ddp_spawn", 1, marks=RunIf(skip_windows=True, skip_49370=True))] ) def test_model_in_correct_mode_during_stages(tmpdir, strategy, num_processes): model = TrainerStagesModel() @@ -1982,7 +1992,7 @@ def on_predict_start(self) -> None: "strategy,num_processes", [ (None, 1), - pytest.param("ddp_spawn", 2, marks=RunIf(skip_windows=True)), + pytest.param("ddp_spawn", 1, marks=RunIf(skip_windows=True)), ], ) def test_error_handling_all_stages(tmpdir, strategy, num_processes): diff --git a/tests/utilities/test_cli.py b/tests/utilities/test_cli.py index 7a86150454777..5f824d1beed0b 100644 --- a/tests/utilities/test_cli.py +++ b/tests/utilities/test_cli.py @@ -572,7 +572,7 @@ def add_arguments_to_parser(self, parser): class EarlyExitTestModel(BoringModel): def on_fit_start(self): - raise Exception("Error on fit start") + raise MisconfigurationException("Error on fit start") @pytest.mark.parametrize("logger", (False, True)) @@ -584,8 +584,10 @@ def on_fit_start(self): pytest.param({"tpu_cores": 1}, marks=RunIf(tpu=True)), ), ) -def test_cli_ddp_spawn_save_config_callback(tmpdir, logger, trainer_kwargs): - with mock.patch("sys.argv", ["any.py", "fit"]), pytest.raises(Exception, match=r"Error on fit start"): +def test_cli_distributed_save_config_callback(tmpdir, logger, trainer_kwargs): + with mock.patch("sys.argv", ["any.py", "fit"]), pytest.raises( + MisconfigurationException, match=r"Error on fit start" + ): LightningCLI( EarlyExitTestModel, trainer_defaults={