diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ada11278f63d3..5a1bf25251333 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,20 +49,21 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v3.1.0 hooks: - id: pyupgrade args: [--py37-plus] name: Upgrade code - - repo: https://github.com/myint/docformatter - rev: v1.4 + - repo: https://github.com/PyCQA/docformatter + rev: v1.5.0 hooks: - id: docformatter args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120] + verbose: true - repo: https://github.com/asottile/yesqa - rev: v1.3.0 + rev: v1.4.0 hooks: - id: yesqa name: Unused noqa @@ -75,7 +76,7 @@ repos: exclude: docs/source-app - repo: https://github.com/psf/black - rev: 22.6.0 + rev: 22.10.0 hooks: - id: black name: Format code @@ -86,11 +87,11 @@ repos: hooks: - id: blacken-docs args: [--line-length=120] - additional_dependencies: [black==21.12b0] + additional_dependencies: [black==22.10.0] exclude: docs/source-app - repo: https://github.com/executablebooks/mdformat - rev: 0.7.14 + rev: 0.7.16 hooks: - id: mdformat additional_dependencies: @@ -105,7 +106,7 @@ repos: )$ - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 5.0.4 hooks: - id: flake8 name: Check PEP8 diff --git a/docs/source-app/examples/file_server/app.py b/docs/source-app/examples/file_server/app.py index cfc72a70b3f8a..54706dacec5b8 100644 --- a/docs/source-app/examples/file_server/app.py +++ b/docs/source-app/examples/file_server/app.py @@ -90,7 +90,7 @@ def upload_file(self, file): "size": full_size, "drive_path": uploaded_file, } - with open(self.get_filepath(meta_file), "wt") as f: + with open(self.get_filepath(meta_file), "w") as f: json.dump(meta, f) # 5: Put the file to the drive. diff --git a/docs/source-pytorch/debug/debugging_basic.rst b/docs/source-pytorch/debug/debugging_basic.rst index 14d059af1067c..d0eb42e05bc0a 100644 --- a/docs/source-pytorch/debug/debugging_basic.rst +++ b/docs/source-pytorch/debug/debugging_basic.rst @@ -36,7 +36,7 @@ A breakpoint stops your code execution so you can inspect variables, etc... and import pdb pdb.set_trace() - y = x ** 2 + y = x**2 In this example, the code will stop before executing the ``y = x**2`` line. diff --git a/docs/source-pytorch/strategies/hivemind_expert.rst b/docs/source-pytorch/strategies/hivemind_expert.rst index 3fa55afb132fd..580bea4bd4869 100644 --- a/docs/source-pytorch/strategies/hivemind_expert.rst +++ b/docs/source-pytorch/strategies/hivemind_expert.rst @@ -48,7 +48,7 @@ Size Adaptive Compression has been used in a variety of Hivemind applications an # compresses values above threshold with 8bit Quantization, lower with Float16 compression = SizeAdaptiveCompression( - threshold=2 ** 16 + 1, less=Float16Compression(), greater_equal=Uniform8BitQuantization() + threshold=2**16 + 1, less=Float16Compression(), greater_equal=Uniform8BitQuantization() ) trainer = pl.Trainer( strategy=HivemindStrategy( diff --git a/examples/lite/image_classifier_1_pytorch.py b/examples/lite/image_classifier_1_pytorch.py index a7fe9ce51cbb6..f00677837b2ed 100644 --- a/examples/lite/image_classifier_1_pytorch.py +++ b/examples/lite/image_classifier_1_pytorch.py @@ -28,7 +28,6 @@ # Credit to the PyTorch team # Taken from https://github.com/pytorch/examples/blob/master/mnist/main.py and slightly adapted. def run(hparams): - torch.manual_seed(hparams.seed) use_cuda = torch.cuda.is_available() diff --git a/src/lightning_app/cli/cmd_init.py b/src/lightning_app/cli/cmd_init.py index e3fbf4f680c3c..d6f70c8f07d00 100644 --- a/src/lightning_app/cli/cmd_init.py +++ b/src/lightning_app/cli/cmd_init.py @@ -9,7 +9,6 @@ def app(app_name: str) -> None: - if app_name is None: app_name = _capture_valid_app_component_name(resource_type="app") diff --git a/src/lightning_app/cli/cmd_install.py b/src/lightning_app/cli/cmd_install.py index db0467212f147..bb553856c54d5 100644 --- a/src/lightning_app/cli/cmd_install.py +++ b/src/lightning_app/cli/cmd_install.py @@ -32,7 +32,6 @@ def gallery_component(name: str, yes_arg: bool, version_arg: str, cwd: str = Non def non_gallery_component(gh_url: str, yes_arg: bool, cwd: str = None) -> None: - # give the user the chance to do a manual install git_url = _show_non_gallery_install_component_prompt(gh_url, yes_arg) @@ -41,7 +40,6 @@ def non_gallery_component(gh_url: str, yes_arg: bool, cwd: str = None) -> None: def gallery_app(name: str, yes_arg: bool, version_arg: str, cwd: str = None, overwrite: bool = False) -> None: - # make sure org/app-name syntax is correct org, app = _validate_name(name, resource_type="app", example="lightning/quick-start") @@ -61,7 +59,6 @@ def gallery_app(name: str, yes_arg: bool, version_arg: str, cwd: str = None, ove def non_gallery_app(gh_url: str, yes_arg: bool, cwd: str = None, overwrite: bool = False) -> None: - # give the user the chance to do a manual install repo_url, folder_name = _show_non_gallery_install_app_prompt(gh_url, yes_arg) diff --git a/src/lightning_app/cli/commands/logs.py b/src/lightning_app/cli/commands/logs.py index fb0746dd50fff..0f90d2b7ad69f 100644 --- a/src/lightning_app/cli/commands/logs.py +++ b/src/lightning_app/cli/commands/logs.py @@ -39,7 +39,6 @@ def logs(app_name: str, components: List[str], follow: bool) -> None: def _show_logs(app_name: str, components: List[str], follow: bool) -> None: - client = LightningClient() project = _get_project(client) diff --git a/src/lightning_app/cli/component-template/tests/test_placeholdername_component.py b/src/lightning_app/cli/component-template/tests/test_placeholdername_component.py index ca8e92d9494b8..e1b30e1c11b6b 100644 --- a/src/lightning_app/cli/component-template/tests/test_placeholdername_component.py +++ b/src/lightning_app/cli/component-template/tests/test_placeholdername_component.py @@ -1,5 +1,4 @@ -r""" -To test a lightning component: +r"""To test a lightning component: 1. Init the component. 2. call .run() diff --git a/src/lightning_app/utilities/imports.py b/src/lightning_app/utilities/imports.py index c44cae515fb00..372de95dcd0b1 100644 --- a/src/lightning_app/utilities/imports.py +++ b/src/lightning_app/utilities/imports.py @@ -20,7 +20,6 @@ def requires(module_paths: Union[str, List]): - if not isinstance(module_paths, list): module_paths = [module_paths] diff --git a/src/lightning_lite/lite.py b/src/lightning_lite/lite.py index 5e41f15121acb..7596fdfffc88f 100644 --- a/src/lightning_lite/lite.py +++ b/src/lightning_lite/lite.py @@ -396,8 +396,7 @@ def barrier(self, name: Optional[str] = None) -> None: def all_gather( self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False ) -> Union[Tensor, Dict, List, Tuple]: - r""" - Gather tensors or collections of tensors from multiple processes. + r"""Gather tensors or collections of tensors from multiple processes. Args: data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof. diff --git a/src/lightning_lite/strategies/launchers/base.py b/src/lightning_lite/strategies/launchers/base.py index f2d02973a203e..c63cc025d315f 100644 --- a/src/lightning_lite/strategies/launchers/base.py +++ b/src/lightning_lite/strategies/launchers/base.py @@ -16,8 +16,7 @@ class _Launcher(ABC): - r""" - Abstract base class for all Launchers. + r"""Abstract base class for all Launchers. Launchers are responsible for the creation and instrumentation of new processes so that the :class:`~lightning_lite.strategies.strategy.Strategy` can set up communication between all them. diff --git a/src/lightning_lite/strategies/launchers/subprocess_script.py b/src/lightning_lite/strategies/launchers/subprocess_script.py index 54ed9bf3fe88d..96539f6f40aa5 100644 --- a/src/lightning_lite/strategies/launchers/subprocess_script.py +++ b/src/lightning_lite/strategies/launchers/subprocess_script.py @@ -25,8 +25,7 @@ class _SubprocessScriptLauncher(_Launcher): - r""" - A process laucher that invokes the current script as many times as desired in a single node. + r"""A process laucher that invokes the current script as many times as desired in a single node. This launcher needs to be invoked on each node. In its default behavior, the main process in each node then spawns N-1 child processes via :func:`subprocess.Popen`, diff --git a/src/lightning_lite/strategies/launchers/xla.py b/src/lightning_lite/strategies/launchers/xla.py index bcb770d942791..2fdba252a42be 100644 --- a/src/lightning_lite/strategies/launchers/xla.py +++ b/src/lightning_lite/strategies/launchers/xla.py @@ -26,8 +26,8 @@ class _XLALauncher(_MultiProcessingLauncher): - r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the - end. + r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at + the end. The main process in which this launcher is invoked creates N so-called worker processes (using the `torch_xla` :func:`xmp.spawn`) that run the given function. diff --git a/src/pytorch_lightning/accelerators/accelerator.py b/src/pytorch_lightning/accelerators/accelerator.py index 448f8e87951be..956b6357e6634 100644 --- a/src/pytorch_lightning/accelerators/accelerator.py +++ b/src/pytorch_lightning/accelerators/accelerator.py @@ -29,9 +29,13 @@ class Accelerator(_Accelerator, ABC): """ def setup_environment(self, root_device: torch.device) -> None: - """ + """Create and prepare the device for the current process. + + Note that this is deprecated. + .. deprecated:: v1.8.0 - This hook was deprecated in v1.8.0 and will be removed in v1.10.0. Please use ``setup_device()`` instead. + This hook was deprecated in v1.8.0 and will be removed in v1.10.0. Please use + ``setup_device()`` instead. """ rank_zero_deprecation( "`Accelerator.setup_environment` has been deprecated in deprecated in v1.8.0 and will be removed in" diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index 400d45d3b6e55..61af8d26ff784 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -r""" -Base class used to build new callbacks. - -""" +r"""Base class used to build new callbacks.""" from typing import Any, Dict, List, Optional, Type @@ -26,8 +23,7 @@ class Callback: - r""" - Abstract base class used to build new callbacks. + r"""Abstract base class used to build new callbacks. Subclass this class and override any of the relevant hooks """ @@ -213,8 +209,7 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: def on_save_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] ) -> None: - r""" - Called when saving a checkpoint to give you a chance to store anything else you might want to save. + r"""Called when saving a checkpoint to give you a chance to store anything else you might want to save. Args: trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. @@ -225,8 +220,7 @@ def on_save_checkpoint( def on_load_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] ) -> None: - r""" - Called when loading a model checkpoint, use to reload state. + r"""Called when loading a model checkpoint, use to reload state. Args: trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. diff --git a/src/pytorch_lightning/callbacks/checkpoint.py b/src/pytorch_lightning/callbacks/checkpoint.py index 405f29876c6fc..94b72f3c790a8 100644 --- a/src/pytorch_lightning/callbacks/checkpoint.py +++ b/src/pytorch_lightning/callbacks/checkpoint.py @@ -2,8 +2,8 @@ class Checkpoint(Callback): - r""" - This is the base class for model checkpointing. Expert users may want to subclass it in case of writing - custom :class:`~pytorch_lightning.callbacksCheckpoint` callback, so that - the trainer recognizes the custom class as a checkpointing callback. + r"""This is the base class for model checkpointing. + + Expert users may want to subclass it in case of writing custom :class:`~pytorch_lightning.callbacksCheckpoint` + callback, so that the trainer recognizes the custom class as a checkpointing callback. """ diff --git a/src/pytorch_lightning/callbacks/device_stats_monitor.py b/src/pytorch_lightning/callbacks/device_stats_monitor.py index 0bc014290f271..dd8c7d2a25239 100644 --- a/src/pytorch_lightning/callbacks/device_stats_monitor.py +++ b/src/pytorch_lightning/callbacks/device_stats_monitor.py @@ -29,9 +29,8 @@ class DeviceStatsMonitor(Callback): - r""" - Automatically monitors and logs device stats during training stage. ``DeviceStatsMonitor`` - is a special callback as it requires a ``logger`` to passed as argument to the ``Trainer``. + r"""Automatically monitors and logs device stats during training stage. ``DeviceStatsMonitor`` is a special + callback as it requires a ``logger`` to passed as argument to the ``Trainer``. Args: cpu_stats: if ``None``, it will log CPU stats only if the accelerator is CPU. diff --git a/src/pytorch_lightning/callbacks/early_stopping.py b/src/pytorch_lightning/callbacks/early_stopping.py index 31e12d508dce2..283051bc04f47 100644 --- a/src/pytorch_lightning/callbacks/early_stopping.py +++ b/src/pytorch_lightning/callbacks/early_stopping.py @@ -36,8 +36,7 @@ class EarlyStopping(Callback): - r""" - Monitor a metric and stop training when it stops improving. + r"""Monitor a metric and stop training when it stops improving. Args: monitor: quantity to be monitored. diff --git a/src/pytorch_lightning/callbacks/finetuning.py b/src/pytorch_lightning/callbacks/finetuning.py index 0722f7b8e06d9..91bb266ad0f72 100644 --- a/src/pytorch_lightning/callbacks/finetuning.py +++ b/src/pytorch_lightning/callbacks/finetuning.py @@ -37,8 +37,7 @@ def multiplicative(epoch: int) -> float: class BaseFinetuning(Callback): - r""" - This class implements the base logic for writing your own Finetuning Callback. + r"""This class implements the base logic for writing your own Finetuning Callback. Override ``freeze_before_training`` and ``finetune_function`` methods with your own logic. @@ -338,7 +337,6 @@ class BackboneFinetuning(BaseFinetuning): >>> multiplicative = lambda epoch: 1.5 >>> backbone_finetuning = BackboneFinetuning(200, multiplicative) >>> trainer = Trainer(callbacks=[backbone_finetuning]) - """ def __init__( diff --git a/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py b/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py index 3af7737e0d7f6..2ef435dd9a476 100644 --- a/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py +++ b/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py @@ -28,8 +28,7 @@ class GradientAccumulationScheduler(Callback): - r""" - Change gradient accumulation factor according to scheduling. + r"""Change gradient accumulation factor according to scheduling. Args: scheduling: scheduling in format {epoch: accumulation_factor} diff --git a/src/pytorch_lightning/callbacks/lambda_function.py b/src/pytorch_lightning/callbacks/lambda_function.py index c56f0dd779f3e..faa6d7be3e41e 100644 --- a/src/pytorch_lightning/callbacks/lambda_function.py +++ b/src/pytorch_lightning/callbacks/lambda_function.py @@ -25,8 +25,7 @@ class LambdaCallback(Callback): - r""" - Create a simple callback on the fly using lambda functions. + r"""Create a simple callback on the fly using lambda functions. Args: **kwargs: hooks supported by :class:`~pytorch_lightning.callbacks.callback.Callback` diff --git a/src/pytorch_lightning/callbacks/lr_monitor.py b/src/pytorch_lightning/callbacks/lr_monitor.py index fd300fd076c26..a0ba56cb06702 100644 --- a/src/pytorch_lightning/callbacks/lr_monitor.py +++ b/src/pytorch_lightning/callbacks/lr_monitor.py @@ -33,8 +33,7 @@ class LearningRateMonitor(Callback): - r""" - Automatically monitor and logs learning rate for learning rate schedulers during training. + r"""Automatically monitor and logs learning rate for learning rate schedulers during training. Args: logging_interval: set to ``'epoch'`` or ``'step'`` to log ``lr`` of all optimizers @@ -84,7 +83,6 @@ def configure_optimizer(self): ) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) return [optimizer], [lr_scheduler] - """ def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False) -> None: diff --git a/src/pytorch_lightning/callbacks/model_summary.py b/src/pytorch_lightning/callbacks/model_summary.py index 5b7c1be91e5b7..4f4c56334e509 100644 --- a/src/pytorch_lightning/callbacks/model_summary.py +++ b/src/pytorch_lightning/callbacks/model_summary.py @@ -35,8 +35,7 @@ class ModelSummary(Callback): - r""" - Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule`. + r"""Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule`. Args: max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the diff --git a/src/pytorch_lightning/callbacks/progress/base.py b/src/pytorch_lightning/callbacks/progress/base.py index 7dc555ee76532..d658ca121c8b1 100644 --- a/src/pytorch_lightning/callbacks/progress/base.py +++ b/src/pytorch_lightning/callbacks/progress/base.py @@ -20,10 +20,9 @@ class ProgressBarBase(Callback): - r""" - The base class for progress bars in Lightning. It is a :class:`~pytorch_lightning.callbacks.Callback` - that keeps track of the batch progress in the :class:`~pytorch_lightning.trainer.trainer.Trainer`. - You should implement your highly custom progress bars with this as the base class. + r"""The base class for progress bars in Lightning. It is a :class:`~pytorch_lightning.callbacks.Callback` that + keeps track of the batch progress in the :class:`~pytorch_lightning.trainer.trainer.Trainer`. You should + implement your highly custom progress bars with this as the base class. Example:: @@ -44,7 +43,6 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch_idx): bar = LitProgressBar() trainer = Trainer(callbacks=[bar]) - """ def __init__(self) -> None: @@ -225,9 +223,8 @@ def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: s def get_metrics( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule" ) -> Dict[str, Union[int, str, float, Dict[str, float]]]: - r""" - Combines progress bar metrics collected from the trainer with standard metrics from get_standard_metrics. - Implement this to override the items displayed in the progress bar. + r"""Combines progress bar metrics collected from the trainer with standard metrics from + get_standard_metrics. Implement this to override the items displayed in the progress bar. Here is an example of how to override the defaults: @@ -256,9 +253,8 @@ def get_metrics(self, trainer, model): def get_standard_metrics(trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Dict[str, Union[int, str]]: - r""" - Returns several standard metrics displayed in the progress bar, including the average loss value, - split index of BPTT (if used) and the version of the experiment when using a logger. + r"""Returns several standard metrics displayed in the progress bar, including the average loss value, split + index of BPTT (if used) and the version of the experiment when using a logger. .. code-block:: diff --git a/src/pytorch_lightning/callbacks/pruning.py b/src/pytorch_lightning/callbacks/pruning.py index ad5f8776c56b4..f6b22f608f179 100644 --- a/src/pytorch_lightning/callbacks/pruning.py +++ b/src/pytorch_lightning/callbacks/pruning.py @@ -279,8 +279,7 @@ def _copy_param(new: nn.Module, old: nn.Module, name: str) -> None: dst.data = src.data.to(dst.device) def apply_lottery_ticket_hypothesis(self) -> None: - r""" - Lottery ticket hypothesis algorithm (see page 2 of the paper): + r"""Lottery ticket hypothesis algorithm (see page 2 of the paper): 1. Randomly initialize a neural network :math:`f(x; \theta_0)` (where :math:`\theta_0 \sim \mathcal{D}_\theta`). 2. Train the network for :math:`j` iterations, arriving at parameters :math:`\theta_j`. diff --git a/src/pytorch_lightning/callbacks/rich_model_summary.py b/src/pytorch_lightning/callbacks/rich_model_summary.py index 7373d91263efd..cdc04aef9a307 100644 --- a/src/pytorch_lightning/callbacks/rich_model_summary.py +++ b/src/pytorch_lightning/callbacks/rich_model_summary.py @@ -23,9 +23,8 @@ class RichModelSummary(ModelSummary): - r""" - Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule` - with `rich text formatting `_. + r"""Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule` with `rich + text formatting `_. Install it with pip: diff --git a/src/pytorch_lightning/callbacks/stochastic_weight_avg.py b/src/pytorch_lightning/callbacks/stochastic_weight_avg.py index ccf5051d5fd39..25a3a065a65cb 100644 --- a/src/pytorch_lightning/callbacks/stochastic_weight_avg.py +++ b/src/pytorch_lightning/callbacks/stochastic_weight_avg.py @@ -44,9 +44,7 @@ def __init__( avg_fn: Optional[_AVG_FN] = None, device: Optional[Union[torch.device, str]] = torch.device("cpu"), ): - r""" - - Implements the Stochastic Weight Averaging (SWA) Callback to average a model. + r"""Implements the Stochastic Weight Averaging (SWA) Callback to average a model. Stochastic Weight Averaging was proposed in ``Averaging Weights Leads to Wider Optima and Better Generalization`` by Pavel Izmailov, Dmitrii @@ -94,7 +92,6 @@ def __init__( device: if provided, the averaged model will be stored on the ``device``. When None is provided, it will infer the `device` from ``pl_module``. (default: ``"cpu"``) - """ err_msg = "swa_epoch_start should be a >0 integer or a float between 0 and 1." diff --git a/src/pytorch_lightning/core/datamodule.py b/src/pytorch_lightning/core/datamodule.py index 894adeb797a9c..8dc5f9a3c2609 100644 --- a/src/pytorch_lightning/core/datamodule.py +++ b/src/pytorch_lightning/core/datamodule.py @@ -126,8 +126,7 @@ def from_datasets( num_workers: int = 0, **datamodule_kwargs: Any, ) -> "LightningDataModule": - r""" - Create an instance from torch.utils.data.Dataset. + r"""Create an instance from torch.utils.data.Dataset. Args: train_dataset: Optional dataset to be used for train_dataloader() @@ -220,9 +219,8 @@ def load_from_checkpoint( hparams_file: Optional[_PATH] = None, **kwargs: Any, ) -> Self: # type: ignore[valid-type] - r""" - Primary way of loading a datamodule from a checkpoint. When Lightning saves a checkpoint - it stores the arguments passed to ``__init__`` in the checkpoint under ``"datamodule_hyper_parameters"``. + r"""Primary way of loading a datamodule from a checkpoint. When Lightning saves a checkpoint it stores the + arguments passed to ``__init__`` in the checkpoint under ``"datamodule_hyper_parameters"``. Any arguments specified through \*\*kwargs will override args stored in ``"datamodule_hyper_parameters"``. @@ -271,7 +269,6 @@ def load_from_checkpoint( batch_size=32, num_workers=10, ) - """ return _load_from_checkpoint( cls, diff --git a/src/pytorch_lightning/core/hooks.py b/src/pytorch_lightning/core/hooks.py index 7831c94e61d90..7bab128ceecee 100644 --- a/src/pytorch_lightning/core/hooks.py +++ b/src/pytorch_lightning/core/hooks.py @@ -443,8 +443,7 @@ def train_dataloader(self): raise MisconfigurationException("`train_dataloader` must be implemented to be used with the Lightning Trainer") def test_dataloader(self) -> EVAL_DATALOADERS: - r""" - Implement one or multiple PyTorch DataLoaders for testing. + r"""Implement one or multiple PyTorch DataLoaders for testing. For data processing use the following pattern: @@ -497,8 +496,7 @@ def test_dataloader(self): raise MisconfigurationException("`test_dataloader` must be implemented to be used with the Lightning Trainer") def val_dataloader(self) -> EVAL_DATALOADERS: - r""" - Implement one or multiple PyTorch DataLoaders for validation. + r"""Implement one or multiple PyTorch DataLoaders for validation. The dataloader you return will not be reloaded unless you set :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_n_epochs` to @@ -548,8 +546,7 @@ def val_dataloader(self): raise MisconfigurationException("`val_dataloader` must be implemented to be used with the Lightning Trainer") def predict_dataloader(self) -> EVAL_DATALOADERS: - r""" - Implement one or multiple PyTorch DataLoaders for prediction. + r"""Implement one or multiple PyTorch DataLoaders for prediction. It's recommended that all data downloads and preparation happen in :meth:`prepare_data`. @@ -705,9 +702,8 @@ class CheckpointHooks: """Hooks to be used with Checkpointing.""" def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - r""" - Called by Lightning to restore your model. - If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this. + r"""Called by Lightning to restore your model. If you saved something with :meth:`on_save_checkpoint` this + is your chance to restore this. Args: checkpoint: Loaded checkpoint @@ -724,9 +720,8 @@ def on_load_checkpoint(self, checkpoint): """ def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - r""" - Called by Lightning when saving a checkpoint to give you a chance to store anything - else you might want to save. + r"""Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want + to save. Args: checkpoint: The full checkpoint dictionary before it gets dumped to a file. @@ -742,5 +737,4 @@ def on_save_checkpoint(self, checkpoint): Lightning saves all aspects of training (epoch, global step, etc...) including amp scaling. There is no need for you to store anything about training. - """ diff --git a/src/pytorch_lightning/core/module.py b/src/pytorch_lightning/core/module.py index 0331fd5116e56..026ed9f138cfd 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/pytorch_lightning/core/module.py @@ -297,8 +297,7 @@ def _apply_batch_transfer_handler( return batch def print(self, *args: Any, **kwargs: Any) -> None: - r""" - Prints only from process 0. Use this in any distributed mode to log only once. + r"""Prints only from process 0. Use this in any distributed mode to log only once. Args: *args: The thing to print. The same as for Python's built-in print function. @@ -308,7 +307,6 @@ def print(self, *args: Any, **kwargs: Any) -> None: def forward(self, x): self.print(x, 'in forward') - """ if self.trainer.is_global_zero: progress_bar = self.trainer.progress_bar_callback @@ -567,10 +565,9 @@ def log_grad_norm(self, grad_norm_dict): def all_gather( self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False ) -> Union[Tensor, Dict, List, Tuple]: - r""" - Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ``all_gather`` operation - accelerator agnostic. ``all_gather`` is a function provided by accelerators to gather a tensor from several - distributed processes. + r"""Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ``all_gather`` + operation accelerator agnostic. ``all_gather`` is a function provided by accelerators to gather a tensor + from several distributed processes. Args: data: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof. @@ -587,8 +584,7 @@ def all_gather( return apply_to_collection(data, Tensor, all_gather, group=group, sync_grads=sync_grads) def forward(self, *args: Any, **kwargs: Any) -> Any: - r""" - Same as :meth:`torch.nn.Module.forward`. + r"""Same as :meth:`torch.nn.Module.forward`. Args: *args: Whatever you decide to pass into the forward method. @@ -600,9 +596,8 @@ def forward(self, *args: Any, **kwargs: Any) -> Any: return super().forward(*args, **kwargs) def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: # type: ignore[return-value] - r""" - Here you compute and return the training loss and some additional metrics for e.g. - the progress bar or logger. + r"""Here you compute and return the training loss and some additional metrics for e.g. the progress bar or + logger. Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): @@ -762,9 +757,8 @@ def training_epoch_end(self, training_step_outputs): """ def validation_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: - r""" - Operates on a single batch of data from the validation set. - In this step you'd might generate examples or calculate anything of interest like accuracy. + r"""Operates on a single batch of data from the validation set. In this step you'd might generate examples + or calculate anything of interest like accuracy. .. code-block:: python @@ -948,10 +942,8 @@ def validation_epoch_end(self, outputs): """ def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: - r""" - Operates on a single batch of data from the test set. - In this step you'd normally generate examples or calculate anything of interest - such as accuracy. + r"""Operates on a single batch of data from the test set. In this step you'd normally generate examples or + calculate anything of interest such as accuracy. .. code-block:: python @@ -1189,9 +1181,8 @@ def configure_callbacks(self): return [] def configure_optimizers(self) -> Any: - r""" - Choose what optimizers and learning-rate schedulers to use in your optimization. - Normally you'd need one. But in the case of GANs or similar you might have multiple. + r"""Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need + one. But in the case of GANs or similar you might have multiple. Return: Any of these 6 options. @@ -1587,9 +1578,8 @@ def optimizer_step( using_native_amp: bool = False, using_lbfgs: bool = False, ) -> None: - r""" - Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls - each optimizer. + r"""Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` + calls each optimizer. By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example once per optimizer. This method (and ``zero_grad()``) won't be called during the accumulation phase when @@ -1656,7 +1646,6 @@ def optimizer_step( lr_scale = min(1.0, float(self.trainer.global_step + 1) / 500.0) for pg in optimizer.param_groups: pg["lr"] = lr_scale * self.learning_rate - """ optimizer.step(closure=optimizer_closure) @@ -1684,10 +1673,8 @@ def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): optimizer.zero_grad() def tbptt_split_batch(self, batch: Any, split_size: int) -> List[Any]: - r""" - When using truncated backpropagation through time, each batch must be split along the - time dimension. Lightning handles this by default, but for custom behavior override - this function. + r"""When using truncated backpropagation through time, each batch must be split along the time dimension. + Lightning handles this by default, but for custom behavior override this function. Args: batch: Current batch @@ -1732,7 +1719,7 @@ def tbptt_split_batch(self, batch, split_size): split_x: Union[Tensor, List[Tensor]] if isinstance(x, Tensor): split_x = x[:, t : t + split_size] - elif isinstance(x, collections.Sequence): + elif isinstance(x, collections.abc.Sequence): split_x = [x[batch_idx][t : t + split_size] for batch_idx in range(len(x))] batch_split.append(split_x) @@ -1742,14 +1729,12 @@ def tbptt_split_batch(self, batch, split_size): return splits def freeze(self) -> None: - r""" - Freeze all params for inference. + r"""Freeze all params for inference. Example:: model = MyLightningModule(...) model.freeze() - """ for param in self.parameters(): param.requires_grad = False diff --git a/src/pytorch_lightning/core/saving.py b/src/pytorch_lightning/core/saving.py index 009a3e9664fd5..272a5a12bea2a 100644 --- a/src/pytorch_lightning/core/saving.py +++ b/src/pytorch_lightning/core/saving.py @@ -65,9 +65,8 @@ def load_from_checkpoint( strict: bool = True, **kwargs: Any, ) -> Self: # type: ignore[valid-type] - r""" - Primary way of loading a model from a checkpoint. When Lightning saves a checkpoint - it stores the arguments passed to ``__init__`` in the checkpoint under ``"hyper_parameters"``. + r"""Primary way of loading a model from a checkpoint. When Lightning saves a checkpoint it stores the + arguments passed to ``__init__`` in the checkpoint under ``"hyper_parameters"``. Any arguments specified through \*\*kwargs will override args stored in ``"hyper_parameters"``. diff --git a/src/pytorch_lightning/loggers/comet.py b/src/pytorch_lightning/loggers/comet.py index d0f9add3272b8..2276c356930f3 100644 --- a/src/pytorch_lightning/loggers/comet.py +++ b/src/pytorch_lightning/loggers/comet.py @@ -52,9 +52,9 @@ class CometLogger(Logger): - r""" - Track your parameters, metrics, source code and more using - `Comet `_. + r"""Track your parameters, metrics, source code and more using `Comet. + + `_. Install it with pip: @@ -336,14 +336,13 @@ def reset_experiment(self) -> None: @rank_zero_only def finalize(self, status: str) -> None: - r""" - When calling ``self.experiment.end()``, that experiment won't log any more data to Comet. - That's why, if you need to log any more data, you need to create an ExistingCometExperiment. - For example, to log data when testing your model after training, because when training is - finalized :meth:`CometLogger.finalize` is called. + r"""When calling ``self.experiment.end()``, that experiment won't log any more data to Comet. That's why, if + you need to log any more data, you need to create an ExistingCometExperiment. For example, to log data when + testing your model after training, because when training is finalized :meth:`CometLogger.finalize` is + called. - This happens automatically in the :meth:`~CometLogger.experiment` property, when - ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``. + This happens automatically in the :meth:`~CometLogger.experiment` property, when ``self._experiment`` is set to + ``None``, i.e. ``self.reset_experiment()``. """ if self._experiment is None: # When using multiprocessing, finalize() should be a no-op on the main process, as no experiment has been diff --git a/src/pytorch_lightning/loggers/csv_logs.py b/src/pytorch_lightning/loggers/csv_logs.py index 5b2d961bae11f..d11361803a299 100644 --- a/src/pytorch_lightning/loggers/csv_logs.py +++ b/src/pytorch_lightning/loggers/csv_logs.py @@ -35,8 +35,7 @@ class ExperimentWriter: - r""" - Experiment writer for CSVLogger. + r"""Experiment writer for CSVLogger. Currently supports to log hyperparameters and metrics in YAML and CSV format, respectively. @@ -101,8 +100,7 @@ def save(self) -> None: class CSVLogger(Logger): - r""" - Log to local file system in yaml and CSV format. + r"""Log to local file system in yaml and CSV format. Logs are saved to ``os.path.join(save_dir, name, version)``. diff --git a/src/pytorch_lightning/loggers/neptune.py b/src/pytorch_lightning/loggers/neptune.py index 323c6c039c784..c47f1d13b4ae2 100644 --- a/src/pytorch_lightning/loggers/neptune.py +++ b/src/pytorch_lightning/loggers/neptune.py @@ -48,8 +48,7 @@ class NeptuneLogger(Logger): - r""" - Log using `Neptune `_. + r"""Log using `Neptune `_. Install it with pip: @@ -364,8 +363,7 @@ def run(self) -> Run: @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # skipcq: PYL-W0221 - r""" - Log hyper-parameters to the run. + r"""Log hyper-parameters to the run. Hyperparams will be logged under the "/hyperparams" namespace. diff --git a/src/pytorch_lightning/loggers/tensorboard.py b/src/pytorch_lightning/loggers/tensorboard.py index 1c840a3dea7e1..9beb215be75c4 100644 --- a/src/pytorch_lightning/loggers/tensorboard.py +++ b/src/pytorch_lightning/loggers/tensorboard.py @@ -46,8 +46,7 @@ class TensorBoardLogger(Logger): - r""" - Log to local file system in `TensorBoard `_ format. + r"""Log to local file system in `TensorBoard `_ format. Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to ``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes diff --git a/src/pytorch_lightning/loggers/wandb.py b/src/pytorch_lightning/loggers/wandb.py index 19403da0bf114..890e5bbfaf8e1 100644 --- a/src/pytorch_lightning/loggers/wandb.py +++ b/src/pytorch_lightning/loggers/wandb.py @@ -51,8 +51,7 @@ class WandbLogger(Logger): - r""" - Log using `Weights and Biases `_. + r"""Log using `Weights and Biases `_. **Installation and set-up** @@ -285,7 +284,6 @@ def any_lightning_module_function_or_hook(self): If required WandB package is not installed on the device. MisconfigurationException: If both ``log_model`` and ``offline`` is set to ``True``. - """ LOGGER_JOIN_CHAR = "-" diff --git a/src/pytorch_lightning/profiler/base.py b/src/pytorch_lightning/profiler/base.py index f2e0ad5276f2e..618280ed485c8 100644 --- a/src/pytorch_lightning/profiler/base.py +++ b/src/pytorch_lightning/profiler/base.py @@ -52,7 +52,10 @@ def teardown(self, **kwargs: Any) -> None: class BaseProfiler(Profiler): - """ + """If you wish to write a custom profiler, you should inherit from this class. + + Note that this is deprecated. + .. deprecated:: v1.6 `BaseProfiler` was deprecated in v1.6 and will be removed in v1.8. Please use `Profiler` instead. diff --git a/src/pytorch_lightning/profiler/profiler.py b/src/pytorch_lightning/profiler/profiler.py index 40d18e79a3284..5b6b23f543dcc 100644 --- a/src/pytorch_lightning/profiler/profiler.py +++ b/src/pytorch_lightning/profiler/profiler.py @@ -16,7 +16,10 @@ class Profiler(NewProfiler): - """ + """If you wish to write a custom profiler, you should inherit from this class. + + Note that this is deprecated. + .. deprecated:: v1.6 `pytorch_lightning.profiler.Profiler` is deprecated in v1.7 and will be removed in v1.9. Use the equivalent `pytorch_lightning.profilers.Profiler` class instead. diff --git a/src/pytorch_lightning/serve/servable_module.py b/src/pytorch_lightning/serve/servable_module.py index ef95187c63245..6146122cfdf04 100644 --- a/src/pytorch_lightning/serve/servable_module.py +++ b/src/pytorch_lightning/serve/servable_module.py @@ -71,8 +71,7 @@ def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callab ... def serve_step(self, *args: torch.Tensor, **kwargs: torch.Tensor) -> Dict[str, torch.Tensor]: - r""" - Returns the predictions of your model as a dictionary. + r"""Returns the predictions of your model as a dictionary. .. code-block:: python diff --git a/src/pytorch_lightning/strategies/deepspeed.py b/src/pytorch_lightning/strategies/deepspeed.py index 465c65bfa7539..81971ed3538b2 100644 --- a/src/pytorch_lightning/strategies/deepspeed.py +++ b/src/pytorch_lightning/strategies/deepspeed.py @@ -68,7 +68,11 @@ def remove_module_hooks(model: torch.nn.Module) -> None: class LightningDeepSpeedModule(_LightningModuleWrapperBase): - """ + """Wraps the user's LightningModule and redirects the forward call to the appropriate method, either + ``training_step``, ``validation_step``, ``test_step``, or ``predict_step`` + + Note that this is deprecated. + .. deprecated:: v1.7.1 ``LightningDeepSpeedModule`` has been deprecated in v1.7.1 and will be removed in v1.9.0. """ diff --git a/src/pytorch_lightning/strategies/fully_sharded_native.py b/src/pytorch_lightning/strategies/fully_sharded_native.py index c628f2a653a79..46f4176fa5873 100644 --- a/src/pytorch_lightning/strategies/fully_sharded_native.py +++ b/src/pytorch_lightning/strategies/fully_sharded_native.py @@ -101,7 +101,6 @@ class DDPFullyShardedNativeStrategy(ParallelStrategy): or BF16 if ``precision=bf16`` unless a config is passed in. This is only available in PyTorch 1.12 and later. \**kwargs: Passed to the FSDP context manager which will configure the FSDP class when wrapping modules. - """ strategy_name = "fsdp_native" diff --git a/src/pytorch_lightning/strategies/launchers/subprocess_script.py b/src/pytorch_lightning/strategies/launchers/subprocess_script.py index 1a10c0e7cad72..4efe56acfc3c5 100644 --- a/src/pytorch_lightning/strategies/launchers/subprocess_script.py +++ b/src/pytorch_lightning/strategies/launchers/subprocess_script.py @@ -26,8 +26,7 @@ class _SubprocessScriptLauncher(_Launcher): - r""" - A process laucher that invokes the current script as many times as desired in a single node. + r"""A process laucher that invokes the current script as many times as desired in a single node. This launcher needs to be invoked on each node. In its default behavior, the main process in each node then spawns N-1 child processes via :func:`subprocess.Popen`, diff --git a/src/pytorch_lightning/strategies/launchers/xla.py b/src/pytorch_lightning/strategies/launchers/xla.py index d6e623da58937..4fa95bb5b61cf 100644 --- a/src/pytorch_lightning/strategies/launchers/xla.py +++ b/src/pytorch_lightning/strategies/launchers/xla.py @@ -32,8 +32,8 @@ class _XLALauncher(_MultiProcessingLauncher): - r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the - end. + r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at + the end. The main process in which this launcher is invoked creates N so-called worker processes (using the `torch_xla` :func:`xmp.spawn`) that run the given function. diff --git a/src/pytorch_lightning/trainer/call.py b/src/pytorch_lightning/trainer/call.py index 71175b2921766..f37f6935bac89 100644 --- a/src/pytorch_lightning/trainer/call.py +++ b/src/pytorch_lightning/trainer/call.py @@ -22,9 +22,8 @@ def _call_and_handle_interrupt(trainer: "pl.Trainer", trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any: - r""" - Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict) - as all errors should funnel through them + r"""Error handling, intended to be used only for main trainer function entry points (fit, validate, test, + predict) as all errors should funnel through them. Args: trainer_fn: one of (fit, validate, test, predict) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index f551f3ba5d3b7..bd963d7da9852 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -26,12 +26,10 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: - r""" - Checks that the model is configured correctly before the run is started. + r"""Checks that the model is configured correctly before the run is started. Args: trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration. - """ model = trainer.lightning_module diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index f6789314d7f1a..be02a7eb54b28 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -164,8 +164,7 @@ def __init__( multiple_trainloader_mode: str = "max_size_cycle", inference_mode: bool = True, ) -> None: - r""" - Customize every aspect of training via flags. + r"""Customize every aspect of training via flags. Args: @@ -554,8 +553,7 @@ def fit( datamodule: Optional[LightningDataModule] = None, ckpt_path: Optional[str] = None, ) -> None: - r""" - Runs the full optimization routine. + r"""Runs the full optimization routine. Args: model: Model to fit. @@ -631,8 +629,7 @@ def validate( verbose: bool = True, datamodule: Optional[LightningDataModule] = None, ) -> _EVALUATE_OUTPUT: - r""" - Perform one evaluation epoch over the validation set. + r"""Perform one evaluation epoch over the validation set. Args: model: The model to validate. @@ -722,9 +719,8 @@ def test( verbose: bool = True, datamodule: Optional[LightningDataModule] = None, ) -> _EVALUATE_OUTPUT: - r""" - Perform one evaluation epoch over the test set. - It's separated from fit to make sure you never run on your test set until you want to. + r"""Perform one evaluation epoch over the test set. It's separated from fit to make sure you never run on + your test set until you want to. Args: model: The model to test. @@ -814,10 +810,8 @@ def predict( return_predictions: Optional[bool] = None, ckpt_path: Optional[str] = None, ) -> Optional[_PREDICT_OUTPUT]: - r""" - Run inference on your data. - This will call the model forward function to compute predictions. Useful to perform distributed - and batched predictions. Logging is disabled in the predict hooks. + r"""Run inference on your data. This will call the model forward function to compute predictions. Useful to + perform distributed and batched predictions. Logging is disabled in the predict hooks. Args: model: The model to predict with. @@ -908,8 +902,7 @@ def tune( lr_find_kwargs: Optional[Dict[str, Any]] = None, method: Literal["fit", "validate", "test", "predict"] = "fit", ) -> _TunerResult: - r""" - Runs routines to tune hyperparameters before training. + r"""Runs routines to tune hyperparameters before training. Args: model: Model to tune. @@ -1883,14 +1876,12 @@ def ckpt_path(self) -> Optional[str]: def save_checkpoint( self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None ) -> None: - r""" - Runs routine to create a checkpoint. + r"""Runs routine to create a checkpoint. Args: filepath: Path where checkpoint is saved. weights_only: If ``True``, will only save the model weights. storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin - """ if self.model is None: raise AttributeError( @@ -2177,9 +2168,8 @@ def _should_terminate_gracefully(self) -> bool: @property def estimated_stepping_batches(self) -> Union[int, float]: - r""" - Estimated stepping batches for the complete training inferred from DataLoaders, gradient - accumulation factor and distributed setup. + r"""Estimated stepping batches for the complete training inferred from DataLoaders, gradient accumulation + factor and distributed setup. Examples:: @@ -2189,7 +2179,6 @@ def configure_optimizers(self): optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches ) return [optimizer], [scheduler] - """ accumulation_scheduler = self.accumulation_scheduler diff --git a/src/pytorch_lightning/utilities/argparse.py b/src/pytorch_lightning/utilities/argparse.py index 8b1872ee7b643..09ba86236865f 100644 --- a/src/pytorch_lightning/utilities/argparse.py +++ b/src/pytorch_lightning/utilities/argparse.py @@ -132,7 +132,6 @@ def get_init_arguments_and_types(cls: _ARGPARSE_CLS) -> List[Tuple[str, Tuple, A >>> from pytorch_lightning import Trainer >>> args = get_init_arguments_and_types(Trainer) - """ cls_default_params = inspect.signature(cls).parameters name_type_default = [] diff --git a/tests/tests_app/cli/test_cloud_cli.py b/tests/tests_app/cli/test_cloud_cli.py index 169e82ab1f42f..ccad26025e478 100644 --- a/tests/tests_app/cli/test_cloud_cli.py +++ b/tests/tests_app/cli/test_cloud_cli.py @@ -118,7 +118,6 @@ def lightningapp_v2_service_create_lightningapp_release_instance(self, project_i @mock.patch("lightning_app.runners.runtime_type.CloudRuntime", CloudRuntimePatch) @pytest.mark.parametrize("create_response", [RuntimeErrorResponse(), RuntimeErrorResponse2()]) def test_start_app(create_response, monkeypatch): - monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) monkeypatch.setattr(cloud, "Body8", MagicMock()) monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) @@ -202,7 +201,6 @@ def lightningapp_v2_service_list_lightningapps_v2(self, *args, **kwargs): ], ) def test_start_app_exception(message, monkeypatch, caplog): - monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) monkeypatch.setattr(cloud, "Body8", MagicMock()) monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) diff --git a/tests/tests_app/cli/test_cmd_init.py b/tests/tests_app/cli/test_cmd_init.py index 4ceaecf5e55ec..f145021bad656 100644 --- a/tests/tests_app/cli/test_cmd_init.py +++ b/tests/tests_app/cli/test_cmd_init.py @@ -9,7 +9,6 @@ def test_validate_init_name(): - # test that a good name works (mix chars) value = cmd_init._capture_valid_app_component_name("abc1-cde") assert value == "abc1-cde" diff --git a/tests/tests_app/cli/test_cmd_install.py b/tests/tests_app/cli/test_cmd_install.py index aa0c34ba6ed2d..fe10bb6185497 100644 --- a/tests/tests_app/cli/test_cmd_install.py +++ b/tests/tests_app/cli/test_cmd_install.py @@ -193,7 +193,6 @@ def test_version_arg_component(tmpdir, monkeypatch): @mock.patch("lightning_app.cli.cmd_install.subprocess", mock.MagicMock()) @mock.patch("lightning_app.cli.cmd_install.os.chdir", mock.MagicMock()) def test_version_arg_app(tmpdir): - # Version does not exist app_name = "lightning/invideo" version_arg = "NOT-EXIST" @@ -213,7 +212,6 @@ def test_version_arg_app(tmpdir): @mock.patch("lightning_app.cli.cmd_install.os.chdir", mock.MagicMock()) @mock.patch("lightning_app.cli.cmd_install._show_install_app_prompt") def test_install_resolve_latest_version(mock_show_install_app_prompt, tmpdir): - app_name = "lightning/invideo" runner = CliRunner() with mock.patch("lightning_app.cli.cmd_install.requests.get") as get_api_mock: @@ -242,7 +240,6 @@ def test_install_resolve_latest_version(mock_show_install_app_prompt, tmpdir): def test_proper_url_parsing(): - name = "lightning/invideo" # make sure org/app-name name is correct @@ -269,7 +266,6 @@ def test_proper_url_parsing(): @_RunIf(skip_windows=True) def test_install_app_shows_error(tmpdir): - app_folder_dir = Path(tmpdir / "some_random_directory").absolute() app_folder_dir.mkdir() diff --git a/tests/tests_app/components/database/test_client_server.py b/tests/tests_app/components/database/test_client_server.py index 7b193d8f74c20..3303000d7f32a 100644 --- a/tests/tests_app/components/database/test_client_server.py +++ b/tests/tests_app/components/database/test_client_server.py @@ -48,7 +48,6 @@ def run(self, client: DatabaseClient): @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_client_server(): - database_path = Path("database.db").resolve() if database_path.exists(): os.remove(database_path) @@ -122,7 +121,6 @@ def run(self): @pytest.mark.skipif(sys.platform == "win32", reason="currently not supported for windows.") @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_work_database_restart(): - id = str(uuid4()).split("-")[0] class Flow(LightningFlow): @@ -166,7 +164,6 @@ def run(self): @pytest.mark.skipif(sys.platform == "win32", reason="currently not supported for windows.") @pytest.mark.skipif(not _is_sqlmodel_available(), reason="sqlmodel is required for this test.") def test_work_database_periodic_store(): - id = str(uuid4()).split("-")[0] class Flow(LightningFlow): diff --git a/tests/tests_app/components/serve/test_gradio.py b/tests/tests_app/components/serve/test_gradio.py index 0b57656e6aa31..f305f0a323e84 100644 --- a/tests/tests_app/components/serve/test_gradio.py +++ b/tests/tests_app/components/serve/test_gradio.py @@ -6,7 +6,6 @@ @mock.patch.dict(os.environ, {"LIGHTING_TESTING": "1"}) @mock.patch("lightning_app.components.serve.gradio.gradio") def test_serve_gradio(gradio_mock): - from lightning_app.components.serve.gradio import ServeGradio class MyGradioServe(ServeGradio): diff --git a/tests/tests_app/components/serve/test_model_inference_api.py b/tests/tests_app/components/serve/test_model_inference_api.py index 17ed09aa2eea8..a32e9ff1abc84 100644 --- a/tests/tests_app/components/serve/test_model_inference_api.py +++ b/tests/tests_app/components/serve/test_model_inference_api.py @@ -34,7 +34,6 @@ def target_fn(port, workers): @pytest.mark.skipif(not (_is_torch_available() and _is_numpy_available()), reason="Missing torch and numpy") @pytest.mark.parametrize("workers", [0]) def test_model_inference_api(workers): - port = find_free_network_port() process = mp.Process(target=target_fn, args=(port, workers)) process.start() @@ -65,7 +64,6 @@ def predict(self, x): def test_model_inference_api_mock(monkeypatch): - monkeypatch.setattr(serve, "uvicorn", MagicMock()) comp = EmptyServer() comp.run() diff --git a/tests/tests_app/core/test_lightning_app.py b/tests/tests_app/core/test_lightning_app.py index 64bee758af5d5..a3fba03bdea77 100644 --- a/tests/tests_app/core/test_lightning_app.py +++ b/tests/tests_app/core/test_lightning_app.py @@ -428,7 +428,6 @@ def run(self): ], ) def test_lightning_app_aggregation_speed(default_timeout, queue_type_cls: BaseQueue, sleep_time, expect): - """This test validates the `_collect_deltas_from_ui_and_work_queues` can aggregate multiple delta together in a time window.""" @@ -1068,7 +1067,6 @@ def __init__(self, flow): def test_cloud_compute_binding(): - cloud_compute.ENABLE_MULTIPLE_WORKS_IN_NON_DEFAULT_CONTAINER = True assert cloud_compute._CLOUD_COMPUTE_STORE == {} diff --git a/tests/tests_app/core/test_lightning_flow.py b/tests/tests_app/core/test_lightning_flow.py index c0cf780dc5eff..5071c0df27f04 100644 --- a/tests/tests_app/core/test_lightning_flow.py +++ b/tests/tests_app/core/test_lightning_flow.py @@ -556,7 +556,6 @@ def run(self): @pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime, MultiProcessRuntime]) def test_lightning_flow_counter(runtime_cls, tmpdir): - app = LightningApp(FlowCounter()) app.checkpointing = True runtime_cls(app, start_server=False).dispatch() @@ -649,7 +648,6 @@ def run(self): def test_scheduling_api(): - app = LightningApp(FlowSchedule()) MultiProcessRuntime(app, start_server=True).dispatch() @@ -835,7 +833,6 @@ def run(self): def test_lightning_flow_flows_and_works(): - flow = FlowCollection() app = LightningApp(flow) diff --git a/tests/tests_app/core/test_lightning_work.py b/tests/tests_app/core/test_lightning_work.py index cb97eabfa237c..ceec85465c479 100644 --- a/tests/tests_app/core/test_lightning_work.py +++ b/tests/tests_app/core/test_lightning_work.py @@ -299,7 +299,6 @@ def run(self, *args, **kwargs): def test_work_cloud_build_config_provided(): - assert isinstance(LightningWork.cloud_build_config, property) assert LightningWork.cloud_build_config.fset is not None @@ -316,7 +315,6 @@ def run(self, *args, **kwargs): def test_work_local_build_config_provided(): - assert isinstance(LightningWork.local_build_config, property) assert LightningWork.local_build_config.fset is not None diff --git a/tests/tests_app/core/test_queues.py b/tests/tests_app/core/test_queues.py index 899ad9f606e85..6209b29e602a1 100644 --- a/tests/tests_app/core/test_queues.py +++ b/tests/tests_app/core/test_queues.py @@ -107,7 +107,6 @@ def test_redis_queue_read_timeout(redis_mock): [(QueuingSystem.SINGLEPROCESS, queue), (QueuingSystem.MULTIPROCESS, multiprocessing)], ) def test_process_queue_read_timeout(queue_type, queue_process_mock, monkeypatch): - queue_mocked = mock.MagicMock() monkeypatch.setattr(queue_process_mock, "Queue", queue_mocked) my_queue = queue_type.get_readiness_queue() diff --git a/tests/tests_app/frontend/just_py/test_just_py.py b/tests/tests_app/frontend/just_py/test_just_py.py index d1a40544bc2b8..59792416ca23e 100644 --- a/tests/tests_app/frontend/just_py/test_just_py.py +++ b/tests/tests_app/frontend/just_py/test_just_py.py @@ -15,7 +15,6 @@ def render_fn(get_state: Callable) -> Callable: def test_justpy_frontend(monkeypatch): - justpy = MagicMock() popen = MagicMock() monkeypatch.setitem(sys.modules, "justpy", justpy) diff --git a/tests/tests_app/runners/test_cloud.py b/tests/tests_app/runners/test_cloud.py index 25bc590893280..7d0da064010fe 100644 --- a/tests/tests_app/runners/test_cloud.py +++ b/tests/tests_app/runners/test_cloud.py @@ -1186,7 +1186,6 @@ def test_get_project(monkeypatch): @mock.patch("lightning_app.core.queues.QueuingSystem", MagicMock()) @mock.patch("lightning_app.runners.backends.cloud.LightningClient", MagicMock()) def test_check_uploaded_folder(monkeypatch, tmpdir, caplog): - monkeypatch.setattr(cloud, "logger", logging.getLogger()) app = MagicMock() diff --git a/tests/tests_app/runners/test_singleprocess.py b/tests/tests_app/runners/test_singleprocess.py index 3b2ad69185077..1b5328183f0a0 100644 --- a/tests/tests_app/runners/test_singleprocess.py +++ b/tests/tests_app/runners/test_singleprocess.py @@ -13,6 +13,5 @@ def on_before_run(): def test_single_process_runtime(tmpdir): - app = LightningApp(Flow()) SingleProcessRuntime(app, start_server=False).dispatch(on_before_run=on_before_run) diff --git a/tests/tests_app/structures/test_structures.py b/tests/tests_app/structures/test_structures.py index 7b84e31402f36..d045a68d295b2 100644 --- a/tests/tests_app/structures/test_structures.py +++ b/tests/tests_app/structures/test_structures.py @@ -438,7 +438,6 @@ def run(self): def test_dict_with_queues(): - app = LightningApp(FlowDict()) MultiProcessRuntime(app, start_server=False).dispatch() @@ -459,7 +458,6 @@ def run(self): def test_list_with_queues(): - app = LightningApp(FlowList()) MultiProcessRuntime(app, start_server=False).dispatch() diff --git a/tests/tests_app/utilities/test_app_helpers.py b/tests/tests_app/utilities/test_app_helpers.py index cdf526b74f1c7..72eb321912e0b 100644 --- a/tests/tests_app/utilities/test_app_helpers.py +++ b/tests/tests_app/utilities/test_app_helpers.py @@ -39,7 +39,6 @@ def test_is_overridden(): def test_simple_app_store(): - store = InMemoryStateStore() user_id = "1234" store.add(user_id) diff --git a/tests/tests_app/utilities/test_git.py b/tests/tests_app/utilities/test_git.py index cb2db0a2bfe33..a51825dbe0aa0 100644 --- a/tests/tests_app/utilities/test_git.py +++ b/tests/tests_app/utilities/test_git.py @@ -11,7 +11,6 @@ def test_execute_git_command(): - res = execute_git_command(["pull"]) assert res diff --git a/tests/tests_app/utilities/test_proxies.py b/tests/tests_app/utilities/test_proxies.py index 4b8a5f25f71e3..16b7c8d710bf1 100644 --- a/tests/tests_app/utilities/test_proxies.py +++ b/tests/tests_app/utilities/test_proxies.py @@ -635,7 +635,6 @@ def run(self): def test_state_observer(): - app = LightningApp(FlowState()) MultiProcessRuntime(app, start_server=False).dispatch() diff --git a/tests/tests_app/utilities/test_state.py b/tests/tests_app/utilities/test_state.py index 3b9f1b790cfc7..baca6db5071a6 100644 --- a/tests/tests_app/utilities/test_state.py +++ b/tests/tests_app/utilities/test_state.py @@ -14,7 +14,6 @@ @mock.patch("lightning_app.utilities.state._configure_session", return_value=requests) def test_app_state_not_connected(_): - """Test an error message when a disconnected AppState tries to access attributes.""" state = AppState(port=8000) with pytest.raises(AttributeError, match="Failed to connect and fetch the app state"): @@ -249,7 +248,6 @@ def json(self): def test_get_send_request(monkeypatch): - app = LightningApp(Flow()) monkeypatch.setattr(lightning_app.utilities.state, "_configure_session", mock.MagicMock()) diff --git a/tests/tests_app_examples/components/python/test_scripts.py b/tests/tests_app_examples/components/python/test_scripts.py index 53a083cf40ad6..2ff4ad26e6ef1 100644 --- a/tests/tests_app_examples/components/python/test_scripts.py +++ b/tests/tests_app_examples/components/python/test_scripts.py @@ -23,7 +23,6 @@ def test_scripts(file): @pytest.mark.skip(reason="causing some issues with CI, not sure if the test is actually needed") @_RunIf(pl=True) def test_components_app_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/tests_app_examples/layout.py b/tests/tests_app_examples/layout.py index ce65d3d9a825b..5b167e3b4a109 100644 --- a/tests/tests_app_examples/layout.py +++ b/tests/tests_app_examples/layout.py @@ -7,7 +7,6 @@ def test_layout_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/tests_app_examples/pickle_or_not.py b/tests/tests_app_examples/pickle_or_not.py index d55f39b2db28c..6730e20095353 100644 --- a/tests/tests_app_examples/pickle_or_not.py +++ b/tests/tests_app_examples/pickle_or_not.py @@ -7,7 +7,6 @@ def test_pickle_or_not_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/tests_app_examples/test_core_features_app.py b/tests/tests_app_examples/test_core_features_app.py index 3fd425350ad0a..ba3c63bc09b04 100644 --- a/tests/tests_app_examples/test_core_features_app.py +++ b/tests/tests_app_examples/test_core_features_app.py @@ -7,7 +7,6 @@ def test_core_features_app_example(): - runner = CliRunner() result = runner.invoke( run_app, diff --git a/tests/tests_lite/utilities/test_optimizer.py b/tests/tests_lite/utilities/test_optimizer.py index e14b8bc8cbe29..aa9ae0c965364 100644 --- a/tests/tests_lite/utilities/test_optimizer.py +++ b/tests/tests_lite/utilities/test_optimizer.py @@ -24,7 +24,7 @@ def assert_opt_parameters_on_device(opt, device: str): # Not sure there are any global tensors in the state dict if isinstance(param, torch.Tensor): assert param.data.device.type == device - elif isinstance(param, collections.Mapping): + elif isinstance(param, collections.abc.Mapping): for subparam in param.values(): if isinstance(subparam, torch.Tensor): assert param.data.device.type == device diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index d6ca2b97f363b..4081d0b950b62 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -169,7 +169,6 @@ def on_predict_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, da @RunIf(hpu=True) def test_accelerator_hpu(): - trainer = Trainer(accelerator="hpu", devices=1) assert isinstance(trainer.accelerator, HPUAccelerator) assert trainer.num_devices == 1 @@ -185,7 +184,6 @@ def test_accelerator_hpu(): @RunIf(hpu=True) def test_accelerator_hpu_with_single_device(): - trainer = Trainer(accelerator="hpu", devices=1) assert isinstance(trainer.strategy, SingleHPUStrategy) @@ -194,7 +192,6 @@ def test_accelerator_hpu_with_single_device(): @RunIf(hpu=True) def test_accelerator_hpu_with_multiple_devices(): - trainer = Trainer(accelerator="hpu", devices=8) assert isinstance(trainer.strategy, HPUParallelStrategy) @@ -203,7 +200,6 @@ def test_accelerator_hpu_with_multiple_devices(): @RunIf(hpu=True) def test_accelerator_auto_with_devices_hpu(): - trainer = Trainer(accelerator="auto", devices=8) assert isinstance(trainer.strategy, HPUParallelStrategy) @@ -309,7 +305,6 @@ def training_epoch_end(self, outputs) -> None: @RunIf(hpu=True) def test_hpu_device_stats_monitor(tmpdir): - hpu_stats = HPUAccelerator().get_device_stats("hpu") fields = [ "Limit", diff --git a/tests/tests_pytorch/accelerators/test_ipu.py b/tests/tests_pytorch/accelerators/test_ipu.py index 5c0049cdf9085..3c41f1b6fb024 100644 --- a/tests/tests_pytorch/accelerators/test_ipu.py +++ b/tests/tests_pytorch/accelerators/test_ipu.py @@ -580,7 +580,6 @@ def test_accelerator_ipu(): @RunIf(ipu=True) def test_accelerator_ipu_with_devices(): - trainer = Trainer(accelerator="ipu", devices=8) assert isinstance(trainer.strategy, IPUStrategy) assert isinstance(trainer.accelerator, IPUAccelerator) diff --git a/tests/tests_pytorch/accelerators/test_tpu.py b/tests/tests_pytorch/accelerators/test_tpu.py index 85ce3cac3a31c..19b62f2848af5 100644 --- a/tests/tests_pytorch/accelerators/test_tpu.py +++ b/tests/tests_pytorch/accelerators/test_tpu.py @@ -216,7 +216,6 @@ def test_strategy_choice_tpu_strategy(): @RunIf(tpu=True) @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_auto_parameters_tying_tpus(tmpdir): - model = WeightSharingModule() shared_params = find_shared_parameters(model) diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index 7663a53212427..0e6bd7fa5d87f 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -247,7 +247,6 @@ def validation_epoch_end(self, outputs): @pytest.mark.parametrize("stop_value", [torch.tensor(np.inf), torch.tensor(np.nan)]) def test_early_stopping_on_non_finite_monitor(tmpdir, stop_value): - losses = [4, 3, stop_value, 2, 1] expected_stop_epoch = 2 diff --git a/tests/tests_pytorch/callbacks/test_model_summary.py b/tests/tests_pytorch/callbacks/test_model_summary.py index af9b839ff8eda..81e2875d09ecb 100644 --- a/tests/tests_pytorch/callbacks/test_model_summary.py +++ b/tests/tests_pytorch/callbacks/test_model_summary.py @@ -19,7 +19,6 @@ def test_model_summary_callback_present_trainer(): - trainer = Trainer() assert any(isinstance(cb, ModelSummary) for cb in trainer.callbacks) diff --git a/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py b/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py index 8d582117591b0..7bfa44cf5eded 100644 --- a/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/tests_pytorch/checkpointing/test_checkpoint_callback_frequency.py @@ -35,7 +35,6 @@ def test_disabled_checkpointing(tmpdir): ["epochs", "val_check_interval", "expected"], [(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 6)] ) def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int): - model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, diff --git a/tests/tests_pytorch/core/test_lightning_module.py b/tests/tests_pytorch/core/test_lightning_module.py index 1c9653faf07b7..9ed9603909287 100644 --- a/tests/tests_pytorch/core/test_lightning_module.py +++ b/tests/tests_pytorch/core/test_lightning_module.py @@ -291,7 +291,6 @@ def configure_optimizers(self): ], ) def test_device_placement(tmpdir, accelerator, device): - model = BoringModel() trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator, devices=1) trainer.fit(model) diff --git a/tests/tests_pytorch/models/test_hparams.py b/tests/tests_pytorch/models/test_hparams.py index 80ef49e87fcf2..8f62df5cf05b8 100644 --- a/tests/tests_pytorch/models/test_hparams.py +++ b/tests/tests_pytorch/models/test_hparams.py @@ -871,7 +871,6 @@ def test_no_datamodule_for_hparams(tmpdir): def test_colliding_hparams(tmpdir): - model = SaveHparamsModel({"data_dir": "abc", "arg2": "abc"}) data = DataModuleWithHparams({"data_dir": "foo"}) diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index 1ed1212840234..5fccb532208b0 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -263,7 +263,6 @@ def advanced_profiler(tmpdir): @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_advanced_profiler_durations(advanced_profiler, action: str, expected: list): - for duration in expected: with advanced_profiler.profile(action): time.sleep(duration) @@ -490,7 +489,6 @@ def test_pytorch_profiler_nested_emit_nvtx(): def test_register_record_function(tmpdir): - use_cuda = torch.cuda.is_available() pytorch_profiler = PyTorchProfiler( export_to_chrome=False, diff --git a/tests/tests_pytorch/strategies/test_registry.py b/tests/tests_pytorch/strategies/test_registry.py index 8536e0b8b3438..eac8be6e55662 100644 --- a/tests/tests_pytorch/strategies/test_registry.py +++ b/tests/tests_pytorch/strategies/test_registry.py @@ -40,7 +40,6 @@ ], ) def test_strategy_registry_with_deepspeed_strategies(strategy_name, init_params): - assert strategy_name in StrategyRegistry assert StrategyRegistry[strategy_name]["init_params"] == init_params assert StrategyRegistry[strategy_name]["strategy"] == DeepSpeedStrategy @@ -49,7 +48,6 @@ def test_strategy_registry_with_deepspeed_strategies(strategy_name, init_params) @RunIf(deepspeed=True) @pytest.mark.parametrize("strategy", ["deepspeed", "deepspeed_stage_2_offload", "deepspeed_stage_3"]) def test_deepspeed_strategy_registry_with_trainer(tmpdir, strategy): - trainer = Trainer(default_root_dir=tmpdir, strategy=strategy, precision=16) assert isinstance(trainer.strategy, DeepSpeedStrategy) @@ -68,7 +66,6 @@ def test_tpu_spawn_debug_strategy_registry(xla_available): def test_fsdp_strategy_registry(tmpdir): - strategy = "fsdp" assert strategy in StrategyRegistry diff --git a/tests/tests_pytorch/test_cli.py b/tests/tests_pytorch/test_cli.py index 5e864cea3568d..174d6161ff7e8 100644 --- a/tests/tests_pytorch/test_cli.py +++ b/tests/tests_pytorch/test_cli.py @@ -133,7 +133,6 @@ def on_train_start(callback, trainer, _): def test_lightning_cli_args_callbacks(tmpdir): - callbacks = [ dict( class_path="pytorch_lightning.callbacks.LearningRateMonitor", @@ -203,7 +202,6 @@ def on_fit_start(self): def test_lightning_cli_args(tmpdir): - cli_args = [ "fit", f"--data.data_dir={tmpdir}", @@ -254,7 +252,6 @@ def test_lightning_env_parse(tmpdir): def test_lightning_cli_save_config_cases(tmpdir): - config_path = tmpdir / "config.yaml" cli_args = ["fit", f"--trainer.default_root_dir={tmpdir}", "--trainer.logger=False", "--trainer.fast_dev_run=1"] @@ -330,7 +327,6 @@ def any_model_any_data_cli(): def test_lightning_cli_help(): - cli_args = ["any.py", "fit", "--help"] out = StringIO() with mock.patch("sys.argv", cli_args), redirect_stdout(out), pytest.raises(SystemExit): @@ -875,7 +871,6 @@ def test_lightning_cli_datamodule_short_arguments(): @pytest.mark.parametrize("use_class_path_callbacks", [False, True]) def test_callbacks_append(use_class_path_callbacks): - """This test validates registries are used when simplified command line are being used.""" cli_args = [ "--optimizer", diff --git a/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py b/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py index 77a4888351cf2..97a0c16bf4999 100644 --- a/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_accelerator_connector.py @@ -418,7 +418,6 @@ def test_strategy_choice_gpu_instance(strategy_class): @RunIf(min_cuda_gpus=2) @pytest.mark.parametrize("strategy_class", [DDPSpawnStrategy, DDPStrategy]) def test_device_type_when_strategy_instance_gpu_passed(strategy_class): - trainer = Trainer(strategy=strategy_class(), accelerator="gpu", devices=2) assert isinstance(trainer.strategy, strategy_class) assert isinstance(trainer.accelerator, CUDAAccelerator) @@ -426,7 +425,6 @@ def test_device_type_when_strategy_instance_gpu_passed(strategy_class): @pytest.mark.parametrize("precision", [1, 12, "invalid"]) def test_validate_precision_type(precision): - with pytest.raises(MisconfigurationException, match=f"Precision {repr(precision)} is invalid"): Trainer(precision=precision) diff --git a/tests/tests_pytorch/trainer/connectors/test_signal_connector.py b/tests/tests_pytorch/trainer/connectors/test_signal_connector.py index a35f5f28dcc81..944972edbf647 100644 --- a/tests/tests_pytorch/trainer/connectors/test_signal_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_signal_connector.py @@ -47,7 +47,6 @@ def test_signal_handlers_restored_in_teardown(): @pytest.mark.parametrize("terminate_gracefully", [False, True]) @RunIf(skip_windows=True) def test_fault_tolerant_sig_handler(register_handler, terminate_gracefully, tmpdir): - if register_handler: def handler(*_): diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 3c68bb38ccc8e..e79cec83a012d 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -258,7 +258,6 @@ def test_fx_validator_integration(tmpdir): @RunIf(min_cuda_gpus=2) def test_epoch_results_cache_dp(tmpdir): - root_device = torch.device("cuda", 0) class TestModel(BoringModel): diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 8a44b7e131644..7582c994c6da5 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -398,7 +398,6 @@ def validation_step(self, batch, batch_idx): ], ) def test_logging_sync_dist_true(tmpdir, devices, accelerator): - """Tests to ensure that the sync_dist flag works (should just return the original value)""" fake_result = 1 model = LoggingSyncDistModel(fake_result) diff --git a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py index 0fcacf080a4d7..07270b976055a 100644 --- a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py +++ b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py @@ -760,7 +760,6 @@ def on_train_end(self): def train_manual_optimization(tmpdir, strategy, model_cls=TesManualOptimizationDDPModel): - seed_everything(42) model = model_cls() diff --git a/tests/tests_pytorch/trainer/test_dataloaders.py b/tests/tests_pytorch/trainer/test_dataloaders.py index 08e81e5915351..5ad7bd037f13a 100644 --- a/tests/tests_pytorch/trainer/test_dataloaders.py +++ b/tests/tests_pytorch/trainer/test_dataloaders.py @@ -1078,7 +1078,6 @@ def validation_epoch_end(self, outputs): @pytest.mark.parametrize("n", ["test", -1]) def test_dataloaders_load_every_n_epochs_exception(tmpdir, n): - with pytest.raises(MisconfigurationException, match="should be an int >"): Trainer(default_root_dir=tmpdir, reload_dataloaders_every_n_epochs=n) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index aa3af86abfe27..f43e9e7ea5a76 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -1508,7 +1508,6 @@ def test_trainer_predict_ddp_spawn(tmpdir, accelerator): @pytest.mark.parametrize("dataset_cls", [RandomDataset, RandomIterableDatasetWithLen, RandomIterableDataset]) def test_index_batch_sampler_wrapper_with_iterable_dataset(dataset_cls, tmpdir): - ds = dataset_cls(32, 8) loader = DataLoader(ds) is_iterable_dataset = isinstance(ds, IterableDataset) @@ -1592,7 +1591,6 @@ def configure_optimizers(self): ], ) def test_setup_hook_move_to_device_correctly(tmpdir, accelerator): - """Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.""" class TestModel(BoringModel): @@ -1699,7 +1697,6 @@ def backward(self, *args, **kwargs): def test_check_val_every_n_epoch_exception(tmpdir): - with pytest.raises(MisconfigurationException, match="should be an integer."): Trainer(default_root_dir=tmpdir, max_epochs=1, check_val_every_n_epoch=1.2) diff --git a/tests/tests_pytorch/utilities/test_fetching.py b/tests/tests_pytorch/utilities/test_fetching.py index f1ecabdbdd55a..1e7f664d44e77 100644 --- a/tests/tests_pytorch/utilities/test_fetching.py +++ b/tests/tests_pytorch/utilities/test_fetching.py @@ -219,7 +219,6 @@ def test_dataloader(self): @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize("accelerator", [pytest.param("cuda", marks=RunIf(min_cuda_gpus=1))]) def test_trainer_num_prefetch_batches(tmpdir, accelerator): - model = RecommenderModel() class AssertFetcher(Callback): @@ -464,7 +463,6 @@ def __init__(self) -> None: def test_transfer_hooks_with_unpacking(tmpdir): - """This test asserts the `transfer_batch` hooks are called only once per batch.""" class RandomDictDataset(RandomDataset): diff --git a/tests/tests_pytorch/utilities/test_parameter_tying.py b/tests/tests_pytorch/utilities/test_parameter_tying.py index 67a69efa58be9..1159b959186c0 100644 --- a/tests/tests_pytorch/utilities/test_parameter_tying.py +++ b/tests/tests_pytorch/utilities/test_parameter_tying.py @@ -39,7 +39,6 @@ def forward(self, x): [(BoringModel, []), (ParameterSharingModule, [["layer_1.weight", "layer_3.weight"]])], ) def test_find_shared_parameters(model, expected_shared_params): - assert expected_shared_params == find_shared_parameters(model())