Skip to content

Commit c4d9fb8

Browse files
committed
fix make doc and update precision reference
1 parent f3ede47 commit c4d9fb8

File tree

3 files changed

+5
-6
lines changed

3 files changed

+5
-6
lines changed

pytorch_lightning/accelerators/accelerator.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -280,10 +280,9 @@ def amp_backend(self) -> Optional[LightningEnum]:
280280

281281
@property
282282
def precision(self) -> Union[str, int]:
283-
"""
284-
.. deprecated
285-
This method is deprecated will be removed soon.
286-
Use :`training_type_plugin.precision_plugin.precision` instead.
283+
"""This method is deprecated and will be removed soon.
284+
285+
Use `training_type_plugin.precision_plugin.precision` instead.
287286
"""
288287
rank_zero_deprecation(
289288
f"`{self.__class__.__name__}.precision` was and will be removed soon"

pytorch_lightning/plugins/training_type/ipu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def setup(self) -> None:
118118
self.lightning_module.trainer._update_dataloader = self._convert_to_poptorch_loader
119119

120120
def pre_dispatch(self) -> None:
121-
model = LightningIPUModule(self.lightning_module, self.precision)
121+
model = LightningIPUModule(self.lightning_module, self.precision_plugin.precision)
122122
self.model = model
123123

124124
# reset the backup

pytorch_lightning/plugins/training_type/sharded.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def _reinit_optimizers_with_oss(self, optimizers: List[Union[Optimizer, Lightnin
7575
optim_class = type(optimizer)
7676
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
7777
if _FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE:
78-
precision = self._precision or self.precision
78+
precision = self._precision or self.precision_plugin.precision
7979
is_fp16 = precision in ("mixed", 16)
8080
# For multi-node training, compressing the model shards in fp16 before broadcasting
8181
# improves performance. When using PyTorch AMP, it will not degrade

0 commit comments

Comments
 (0)