Skip to content

Commit 024cf23

Browse files
author
Sean Naren
authored
Remove convert_to_half, suggest using model.half (#7974)
1 parent f7459f5 commit 024cf23

File tree

2 files changed

+2
-18
lines changed

2 files changed

+2
-18
lines changed

pytorch_lightning/plugins/training_type/ipu.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import torch
2020
from torch.utils.data import DataLoader
2121

22-
from pytorch_lightning import _logger as log
2322
from pytorch_lightning.callbacks import GradientAccumulationScheduler
2423
from pytorch_lightning.core.lightning import LightningModule
2524
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
@@ -67,7 +66,6 @@ def __init__(
6766
device_iterations: int = 1,
6867
autoreport: bool = True,
6968
autoreport_dir: Optional[str] = None,
70-
convert_model_to_half: bool = False,
7169
parallel_devices: Optional[List[torch.device]] = None,
7270
cluster_environment: Optional[ClusterEnvironment] = None,
7371
training_opts: Optional['poptorch.Options'] = None,
@@ -82,7 +80,6 @@ def __init__(
8280
autoreport: Enable auto-reporting for IPUs using PopVision
8381
https://docs.graphcore.ai/projects/graphcore-popvision-user-guide/en/latest/graph/graph.html
8482
autoreport_dir: Optional directory to store autoReport output.
85-
convert_model_to_half: Converts the model to half precision, which can be used for pure FP16 training.
8683
training_opts: Optional ``poptorch.Options`` to override the default created options for training.
8784
inference_opts: Optional ``poptorch.Options`` to override the default
8885
created options for validation/testing and predicting.
@@ -94,7 +91,6 @@ def __init__(
9491
"Learn more or get started with IPUs at https://www.graphcore.ai/getstarted"
9592
)
9693

97-
self.convert_model_to_half = convert_model_to_half
9894
self.device_iterations = device_iterations
9995
self.autoreport = autoreport
10096
self.autoreport_dir = autoreport_dir
@@ -113,12 +109,7 @@ def __init__(
113109

114110
def pre_dispatch(self) -> None:
115111
self._handle_gradient_accumulation_steps()
116-
if self.convert_model_to_half:
117-
log.info('Using full 16bit precision, converting LightningModule weights to FP16.')
118-
self.model = self.model.half()
119112
precision = self.lightning_module.trainer.precision
120-
precision = 16 if self.convert_model_to_half else precision
121-
122113
model = LightningIPUModule(self.lightning_module, precision)
123114
self.model = model
124115

tests/accelerators/test_ipu.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -200,20 +200,13 @@ class TestCallback(Callback):
200200

201201
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
202202
assert trainer.accelerator.model.precision == 16
203-
assert trainer.accelerator.training_type_plugin.convert_model_to_half
204203
for param in trainer.accelerator.model.parameters():
205204
assert param.dtype == torch.float16
206205
raise SystemExit
207206

208207
model = IPUModel()
209-
trainer = Trainer(
210-
default_root_dir=tmpdir,
211-
fast_dev_run=True,
212-
ipus=1,
213-
precision=16,
214-
plugins=IPUPlugin(convert_model_to_half=True),
215-
callbacks=TestCallback()
216-
)
208+
model = model.half()
209+
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=1, precision=16, callbacks=TestCallback())
217210

218211
assert isinstance(trainer.accelerator.training_type_plugin, IPUPlugin)
219212
assert isinstance(trainer.accelerator.precision_plugin, IPUPrecisionPlugin)

0 commit comments

Comments
 (0)