Skip to content

Commit d13ad1e

Browse files
authored
docs: 2/3 enable Sphinx nitpicky [pytorch] part 2/n (#18602)
1 parent 18c80d2 commit d13ad1e

File tree

14 files changed

+175
-18
lines changed

14 files changed

+175
-18
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ test: clean
4949
docs: clean
5050
pip install -q awscli
5151
aws s3 sync --no-sign-request s3://sphinx-packages/ dist/
52-
pip install -e . --quiet -r requirements/pytorch/docs.txt -f dist/
52+
pip install -e .[all] --quiet -r requirements/pytorch/docs.txt -f dist/
5353
cd docs/source-pytorch && $(MAKE) html --jobs $(nproc)
5454

5555
update:

docs/source-app/code_samples/convert_pl_to_app/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from torchvision import transforms as T
88
from torchvision.datasets import MNIST
99

10-
import pytorch_lightning as pl
10+
import lightning.pytorch as pl
1111

1212

1313
class LitAutoEncoder(pl.LightningModule):

docs/source-pytorch/cli/lightning_cli_advanced.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,15 +196,15 @@ configuration files and automatic creation of objects, so you don't need to do i
196196
To somewhat overcome these limitations, there is a special key ``dict_kwargs`` that can be used
197197
to provide arguments that will not be validated during parsing, but will be used for class instantiation.
198198

199-
For example, then using the ``pytorch_lightning.profilers.PyTorchProfiler`` profiler,
199+
For example, then using the ``lightning.pytorch.profilers.PyTorchProfiler`` profiler,
200200
the ``profile_memory`` argument has a type that is determined dynamically. As a result, it's not possible
201201
to know the expected type during parsing. To account for this, your config file should be set up like this:
202202

203203
.. code:: yaml
204204
205205
trainer:
206206
profiler:
207-
class_path: pytorch_lightning.profilers.PyTorchProfiler
207+
class_path: lightning.pytorch.profilers.PyTorchProfiler
208208
dict_kwargs:
209209
profile_memory: true
210210

docs/source-pytorch/conf.py

Lines changed: 142 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,9 @@ def _load_py_module(name: str, location: str) -> ModuleType:
9494
assist_local.AssistantCLI.pull_docs_files(
9595
gh_user_repo="Lightning-AI/lightning-Habana",
9696
target_dir="docs/source-pytorch/integrations/hpu",
97-
checkout="tags/1.0.0",
97+
# todo: update after release
98+
# checkout="tags/1.0.0",
99+
checkout="tags/1.1.0.dev",
98100
)
99101

100102
if not _FAST_DOCS_DEV:
@@ -323,14 +325,151 @@ def _load_py_module(name: str, location: str) -> ModuleType:
323325
"PIL": ("https://pillow.readthedocs.io/en/stable/", None),
324326
"torchmetrics": ("https://torchmetrics.readthedocs.io/en/stable/", None),
325327
"graphcore": ("https://docs.graphcore.ai/en/latest/", None),
326-
"habana": ("https://lightning-ai.github.io/lightning-Habana/", None),
328+
"lightning_habana": ("https://lightning-ai.github.io/lightning-Habana/", None),
329+
"tensorboardX": ("https://tensorboardx.readthedocs.io/en/stable/", None),
330+
# needed for referencing App from lightning scope
331+
"lightning.app": ("https://lightning.ai/docs/app/stable/", None),
332+
# needed for referencing Fabric from lightning scope
333+
"lightning.fabric": ("https://lightning.ai/docs/fabric/stable/", None),
334+
# TODO: these are missing objects.inv
335+
# "comet_ml": ("https://www.comet.com/docs/v2/", None),
336+
# "neptune": ("https://docs.neptune.ai/", None),
337+
# "wandb": ("https://docs.wandb.ai//", None),
327338
}
328-
nitpicky = False # TODO: to be continued
339+
nitpicky = True
340+
329341

330342
nitpick_ignore = [
331343
("py:class", "typing.Self"),
332344
# missing in generated API
333345
("py:exc", "MisconfigurationException"),
346+
# TODO: generated list of all existing ATM, need to be fixed
347+
("py:class", "AveragedModel"),
348+
("py:class", "CometExperiment"),
349+
("py:meth", "DataModule.__init__"),
350+
("py:class", "HPUAccelerator"),
351+
("py:class", "Tensor"),
352+
("py:class", "_PATH"),
353+
("py:func", "add_argument"),
354+
("py:func", "add_class_arguments"),
355+
("py:meth", "apply_to_collection"),
356+
("py:attr", "best_model_path"),
357+
("py:attr", "best_model_score"),
358+
("py:attr", "checkpoint_path"),
359+
("py:class", "comet_ml.ExistingExperiment"),
360+
("py:class", "comet_ml.Experiment"),
361+
("py:class", "comet_ml.OfflineExperiment"),
362+
("py:meth", "deepspeed.DeepSpeedEngine.backward"),
363+
("py:attr", "example_input_array"),
364+
("py:class", "jsonargparse._core.ArgumentParser"),
365+
("py:class", "jsonargparse._namespace.Namespace"),
366+
("py:class", "jsonargparse.core.ArgumentParser"),
367+
("py:class", "jsonargparse.namespace.Namespace"),
368+
("py:class", "lightning.fabric.accelerators.xla.XLAAccelerator"),
369+
("py:class", "lightning.fabric.loggers.csv_logs._ExperimentWriter"),
370+
("py:class", "lightning.fabric.loggers.logger._DummyExperiment"),
371+
("py:class", "lightning.fabric.plugins.precision.transformer_engine.TransformerEnginePrecision"),
372+
("py:class", "lightning.fabric.utilities.device_dtype_mixin._DeviceDtypeModuleMixin"),
373+
("py:func", "lightning.fabric.utilities.seed.seed_everything"),
374+
("py:class", "lightning.fabric.utilities.types.LRScheduler"),
375+
("py:class", "lightning.fabric.utilities.types.ReduceLROnPlateau"),
376+
("py:class", "lightning.fabric.utilities.types.Steppable"),
377+
("py:class", "lightning.fabric.wrappers._FabricOptimizer"),
378+
("py:meth", "lightning.pytorch.Callback.on_exception"),
379+
("py:class", "lightning.pytorch.LightningModule"),
380+
("py:meth", "lightning.pytorch.LightningModule.on_train_epoch_end"),
381+
("py:meth", "lightning.pytorch.LightningModule.on_validation_epoch_end"),
382+
("py:meth", "lightning.pytorch.LightningModule.save_hyperparameters"),
383+
("py:meth", "lightning.pytorch.LightningModule.test_step"),
384+
("py:meth", "lightning.pytorch.LightningModule.training_step"),
385+
("py:meth", "lightning.pytorch.LightningModule.validation_step"),
386+
("py:obj", "lightning.pytorch.accelerators.MPSAccelerator"),
387+
("py:meth", "lightning.pytorch.accelerators.accelerator.Accelerator.register_accelerators"),
388+
("py:paramref", "lightning.pytorch.callbacks.Checkpoint._sphinx_paramlinks_save_top_k"),
389+
("py:func", "lightning.pytorch.callbacks.RichProgressBar.configure_columns"),
390+
("py:meth", "lightning.pytorch.callbacks.callback.Callback.on_load_checkpoint"),
391+
("py:meth", "lightning.pytorch.callbacks.callback.Callback.on_save_checkpoint"),
392+
("py:class", "lightning.pytorch.callbacks.checkpoint.Checkpoint"),
393+
("py:meth", "lightning.pytorch.callbacks.progress.progress_bar.ProgressBar.get_metrics"),
394+
("py:class", "lightning.pytorch.callbacks.progress.rich_progress.RichProgressBarTheme"),
395+
("py:class", "lightning.pytorch.callbacks.progress.tqdm_progress.Tqdm"),
396+
("py:class", "lightning.pytorch.cli.ReduceLROnPlateau"),
397+
("py:meth", "lightning.pytorch.core.LightningDataModule.setup"),
398+
("py:meth", "lightning.pytorch.core.LightningModule.configure_model"),
399+
("py:meth", "lightning.pytorch.core.LightningModule.save_hyperparameters"),
400+
("py:meth", "lightning.pytorch.core.LightningModule.setup"),
401+
("py:meth", "lightning.pytorch.core.hooks.ModelHooks.on_after_batch_transfer"),
402+
("py:meth", "lightning.pytorch.core.hooks.ModelHooks.setup"),
403+
("py:meth", "lightning.pytorch.core.hooks.ModelHooks.transfer_batch_to_device"),
404+
("py:meth", "lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin.save_hyperparameters"),
405+
("py:class", "lightning.pytorch.loggers.Logger"),
406+
("py:func", "lightning.pytorch.loggers.logger.rank_zero_experiment"),
407+
("py:class", "lightning.pytorch.plugins.environments.cluster_environment.ClusterEnvironment"),
408+
("py:class", "lightning.pytorch.plugins.environments.slurm_environment.SLURMEnvironment"),
409+
("py:class", "lightning.pytorch.plugins.io.wrapper._WrappingCheckpointIO"),
410+
("py:func", "lightning.pytorch.seed_everything"),
411+
("py:class", "lightning.pytorch.serve.servable_module.ServableModule"),
412+
("py:class", "lightning.pytorch.serve.servable_module_validator.ServableModuleValidator"),
413+
("py:mod", "lightning.pytorch.strategies"),
414+
("py:class", "lightning.pytorch.strategies.SingleXLAStrategy"),
415+
("py:meth", "lightning.pytorch.strategies.ddp.DDPStrategy.configure_ddp"),
416+
("py:meth", "lightning.pytorch.strategies.ddp.DDPStrategy.setup_distributed"),
417+
("py:meth", "lightning.pytorch.trainer.trainer.Trainer.lightning_module"),
418+
("py:class", "lightning.pytorch.tuner.lr_finder._LRFinder"),
419+
("py:class", "lightning.pytorch.utilities.CombinedLoader"),
420+
("py:obj", "lightning.pytorch.utilities.deepspeed.ds_checkpoint_dir"),
421+
("py:obj", "lightning.pytorch.utilities.memory.is_cuda_out_of_memory"),
422+
("py:obj", "lightning.pytorch.utilities.memory.is_cudnn_snafu"),
423+
("py:obj", "lightning.pytorch.utilities.memory.is_oom_error"),
424+
("py:obj", "lightning.pytorch.utilities.memory.is_out_of_cpu_memory"),
425+
("py:func", "lightning.pytorch.utilities.rank_zero.rank_zero_only"),
426+
("py:class", "lightning.pytorch.utilities.types.LRSchedulerConfig"),
427+
("py:class", "lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig"),
428+
("py:class", "lightning_habana.pytorch.plugins.precision.HPUPrecisionPlugin"),
429+
("py:class", "lightning_habana.pytorch.strategies.HPUParallelStrategy"),
430+
("py:class", "lightning_habana.pytorch.strategies.SingleHPUStrategy"),
431+
("py:obj", "logger.experiment"),
432+
("py:class", "mlflow.tracking.MlflowClient"),
433+
("py:attr", "model"),
434+
("py:meth", "move_data_to_device"),
435+
("py:class", "neptune.Run"),
436+
("py:class", "neptune.handler.Handler"),
437+
("py:meth", "on_after_batch_transfer"),
438+
("py:meth", "on_before_batch_transfer"),
439+
("py:meth", "on_save_checkpoint"),
440+
("py:meth", "optimizer_step"),
441+
("py:class", "out_dict"),
442+
("py:meth", "prepare_data"),
443+
("py:class", "pytorch_lightning.callbacks.device_stats_monitor.DeviceStatsMonitor"),
444+
("py:meth", "setup"),
445+
("py:meth", "test_step"),
446+
("py:meth", "toggle_optimizer"),
447+
("py:class", "torch.ScriptModule"),
448+
("py:class", "torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload"),
449+
("py:class", "torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision"),
450+
("py:class", "torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy"),
451+
("py:class", "torch.distributed.fsdp.sharded_grad_scaler.ShardedGradScaler"),
452+
("py:class", "torch.distributed.fsdp.wrap.ModuleWrapPolicy"),
453+
("py:func", "torch.inference_mode"),
454+
("py:meth", "torch.mean"),
455+
("py:func", "torch.nn.Module.eval"),
456+
("py:func", "torch.no_grad"),
457+
("py:class", "torch.optim.lr_scheduler.LRScheduler"),
458+
("py:meth", "torch.set_default_tensor_type"),
459+
("py:class", "torch.utils.data.DistributedSampler"),
460+
("py:class", "torch_xla.distributed.parallel_loader.MpDeviceLoader"),
461+
("py:func", "torch_xla.distributed.xla_multiprocessing.spawn"),
462+
("py:mod", "tqdm"),
463+
("py:meth", "training_step"),
464+
("py:meth", "transfer_batch_to_device"),
465+
("py:class", "types.FrameType"),
466+
("py:class", "typing.TypeGuard"),
467+
("py:meth", "untoggle_optimizer"),
468+
("py:meth", "validation_step"),
469+
("py:class", "wandb.Artifact"),
470+
("py:func", "wandb.init"),
471+
("py:class", "wandb.sdk.lib.RunDisabled"),
472+
("py:class", "wandb.wandb_run.Run"),
334473
]
335474

336475
# -- Options for todo extension ----------------------------------------------

docs/source-pytorch/extensions/logging.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ The :meth:`~lightning.pytorch.core.LightningModule.log` method has a few options
135135
* ``on_epoch``: Automatically accumulates and logs at the end of the epoch.
136136
* ``prog_bar``: Logs to the progress bar (Default: ``False``).
137137
* ``logger``: Logs to the logger like ``Tensorboard``, or any other custom logger passed to the :class:`~lightning.pytorch.trainer.trainer.Trainer` (Default: ``True``).
138-
* ``reduce_fx``: Reduction function over step values for end of epoch. Uses :meth:`torch.mean` by default and is not applied when a :class:`torchmetrics.Metric` is logged.
138+
* ``reduce_fx``: Reduction function over step values for end of epoch. Uses :func:`torch.mean` by default and is not applied when a :class:`torchmetrics.Metric` is logged.
139139
* ``enable_graph``: If True, will not auto detach the graph.
140140
* ``sync_dist``: If True, reduces the metric across devices. Use with care as this may lead to a significant communication overhead.
141141
* ``sync_dist_group``: The DDP group to sync across.

docs/source-pytorch/visualize/logging_advanced.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,9 +208,9 @@ Whether the value will be logged only on rank 0. This will prevent synchronizati
208208

209209
reduce_fx
210210
=========
211-
**Default:** :meth:`torch.mean`
211+
**Default:** :func:`torch.mean`
212212

213-
Reduction function over step values for end of epoch. Uses :meth:`torch.mean` by default and is not applied when a :class:`torchmetrics.Metric` is logged.
213+
Reduction function over step values for end of epoch. Uses :func:`torch.mean` by default and is not applied when a :class:`torchmetrics.Metric` is logged.
214214

215215
.. code-block:: python
216216

docs/source-pytorch/visualize/logging_expert.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ To build your own progress bar, subclass :class:`~lightning.pytorch.callbacks.Pr
112112
*******************************
113113
Integrate an experiment manager
114114
*******************************
115-
To create an integration between a custom logger and Lightning, subclass :class:`~lightning.pytorch.loggers.base.LightningLoggerBase`
115+
To create an integration between a custom logger and Lightning, subclass :class:`~lightning.pytorch.loggers.Logger`
116116

117117
.. code-block:: python
118118

src/lightning/pytorch/callbacks/checkpoint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
class Checkpoint(Callback):
55
r"""This is the base class for model checkpointing.
66
7-
Expert users may want to subclass it in case of writing custom :class:`~lightning.pytorch.callbacksCheckpoint`
7+
Expert users may want to subclass it in case of writing custom :class:`~lightning.pytorch.callbacks.Checkpoint`
88
callback, so that the trainer recognizes the custom class as a checkpointing callback.
99
1010
"""

src/lightning/pytorch/callbacks/early_stopping.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
r"""Early Stopping ^^^^^^^^^^^^^^
14+
r"""
15+
Early Stopping
16+
^^^^^^^^^^^^^^
1517
1618
Monitor a metric and stop training when it stops improving.
1719

src/lightning/pytorch/callbacks/finetuning.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,12 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
r"""Finetuning Callback ^^^^^^^^^^^^^^^^^^^^ Freeze and unfreeze models for finetuning purposes."""
14+
r"""
15+
Finetuning Callback
16+
^^^^^^^^^^^^^^^^^^^^
17+
18+
Freeze and unfreeze models for finetuning purposes.
19+
"""
1520
import logging
1621
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Union
1722

0 commit comments

Comments
 (0)