Skip to content

Commit 9aba366

Browse files
committed
Remove the precision attribute from LightningModule
changelog fix
1 parent f24349b commit 9aba366

File tree

4 files changed

+5
-10
lines changed

4 files changed

+5
-10
lines changed

src/pytorch_lightning/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
107107

108108
### Removed
109109

110+
- Removed the `LightningModule.precision` attribute ([#16203](https://github.com/Lightning-AI/lightning/pull/16203))
111+
112+
110113
- Removed deprecated `pytorch_lightning.utilities.memory.get_gpu_memory_map` in favor of `pytorch_lightning.accelerators.cuda.get_nvidia_gpu_stats` ([#15617](https://github.com/Lightning-AI/lightning/pull/15617))
111114

112115

src/pytorch_lightning/core/module.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -110,9 +110,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
110110
# pointer to the trainer object
111111
self._trainer: Optional["pl.Trainer"] = None
112112

113-
# the precision used
114-
self.precision: Union[int, str] = 32
115-
116113
# optionally can be set by user
117114
self._example_input_array: Optional[Union[Tensor, Tuple, Dict]] = None
118115
self._current_fx_name: Optional[str] = None

src/pytorch_lightning/trainer/connectors/data_connector.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -144,13 +144,8 @@ def attach_data(
144144
elif self.trainer.state.fn == TrainerFn.PREDICTING:
145145
_check_dataloader_none(predict_dataloaders, self._predict_dataloader_source, self.trainer.state.fn)
146146

147-
# set local properties on the model
148-
self._copy_trainer_model_properties(model)
149-
150-
def _copy_trainer_model_properties(self, model: "pl.LightningModule") -> None:
147+
# Attach the trainer to the LightningModule
151148
model.trainer = proxy(self.trainer)
152-
# for backward compatibility
153-
model.precision = int(self.trainer.precision) if self.trainer.precision != "bf16" else "bf16"
154149

155150
def attach_dataloaders(
156151
self,

src/pytorch_lightning/utilities/model_summary/model_summary.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None:
189189
self._layer_summary = self.summarize()
190190
# 1 byte -> 8 bits
191191
# TODO: how do we compute precision_megabytes in case of mixed precision?
192-
precision = self._model.precision if isinstance(self._model.precision, int) else 32
192+
precision = self._model.trainer.precision if isinstance(self._model.trainer.precision, int) else 32
193193
self._precision_megabytes = (precision / 8.0) * 1e-6
194194

195195
@property

0 commit comments

Comments
 (0)