Skip to content

Commit 1c87f1f

Browse files
Bordaawaelchli
andauthored
remove legacy plugins (#5950)
* remove legacy plugins * imports * formatting * fix docs references * fix cluster environment inheritance Co-authored-by: Adrian Wälchli <[email protected]>
1 parent 4531b1c commit 1c87f1f

File tree

19 files changed

+14
-1458
lines changed

19 files changed

+14
-1458
lines changed

.yapfignore

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1 @@
11
.git/*
2-
3-
4-
# TODO
5-
pytorch_lightning/plugins/legacy/*

docs/source/advanced/multi_gpu.rst

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -580,9 +580,9 @@ Below are the possible configurations we support.
580580

581581
Implement Your Own Distributed (DDP) training
582582
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
583-
If you need your own way to init PyTorch DDP you can override :meth:`pytorch_lightning.plugins.legacy.ddp_plugin.DDPPlugin.init_ddp_connection`.
583+
If you need your own way to init PyTorch DDP you can override :meth:`pytorch_lightning.plugins.training_type.ddp.DDPPlugin.init_ddp_connection`.
584584

585-
If you also need to use your own DDP implementation, override :meth:`pytorch_lightning.plugins.legacy.ddp_plugin.DDPPlugin.configure_ddp`.
585+
If you also need to use your own DDP implementation, override :meth:`pytorch_lightning.plugins.training_type.ddp.DDPPlugin.configure_ddp`.
586586

587587

588588
----------
@@ -679,7 +679,7 @@ In addition, we use Gradient Checkpointing to reduce GPU memory requirements fur
679679

680680
Reference: https://arxiv.org/abs/1811.06965
681681

682-
.. note:: DDPSequentialPlugin is currently supported only for Pytorch 1.6.
682+
.. note:: RPCSequentialPlugin is currently supported only for Pytorch 1.6.
683683

684684
To get started, install FairScale using the command below. We install a specific branch which contains PyTorch related fixes for Sequential Parallelism.
685685

@@ -692,7 +692,7 @@ This should be kept within the ``sequential_module`` variable within your ``Ligh
692692

693693
.. code-block:: python
694694
695-
from pytorch_lightning.plugins.legacy.ddp_sequential_plugin import DDPSequentialPlugin
695+
from pytorch_lightning.plugins.training_type.rpc_sequential import RPCSequentialPlugin
696696
from pytorch_lightning import LightningModule
697697
698698
class MyModel(LightningModule):
@@ -702,7 +702,7 @@ This should be kept within the ``sequential_module`` variable within your ``Ligh
702702
703703
# Split my module across 4 gpus, one layer each
704704
model = MyModel()
705-
plugin = DDPSequentialPlugin(balance=[1, 1, 1, 1])
705+
plugin = RPCSequentialPlugin(balance=[1, 1, 1, 1])
706706
trainer = Trainer(accelerator='ddp', gpus=4, plugins=[plugin])
707707
trainer.fit(model)
708708

pl_examples/basic_examples/conv_sequential_example.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
to balance across your GPUs.
1919
2020
To run:
21-
python conv_model_sequential_example.py --accelerator ddp --gpus 4 --max_epochs 1 --batch_size 256 --use_ddp_sequential
21+
python conv_model_sequential_example.py --accelerator ddp --gpus 4 --max_epochs 1 --batch_size 256 --use_rpc_sequential
2222
"""
2323
import math
2424
from argparse import ArgumentParser
@@ -32,7 +32,7 @@
3232
from pl_examples import cli_lightning_logo
3333
from pytorch_lightning import Trainer
3434
from pytorch_lightning.metrics.functional import accuracy
35-
from pytorch_lightning.plugins.legacy.ddp_sequential_plugin import DDPSequentialPlugin
35+
from pytorch_lightning.plugins import RPCSequentialPlugin
3636
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _FAIRSCALE_PIPE_AVAILABLE
3737

3838
if _BOLTS_AVAILABLE:
@@ -201,7 +201,7 @@ def instantiate_datamodule(args):
201201
if __name__ == "__main__":
202202
cli_lightning_logo()
203203
parser = ArgumentParser(description="Pipe Example")
204-
parser.add_argument("--use_ddp_sequential", action="store_true")
204+
parser.add_argument("--use_rpc_sequential", action="store_true")
205205
parser = Trainer.add_argparse_args(parser)
206206
parser = pl_bolts.datamodules.CIFAR10DataModule.add_argparse_args(parser)
207207
args = parser.parse_args()
@@ -212,8 +212,8 @@ def instantiate_datamodule(args):
212212
cifar10_dm = instantiate_datamodule(args)
213213

214214
plugins = None
215-
if args.use_ddp_sequential:
216-
plugins = DDPSequentialPlugin()
215+
if args.use_rpc_sequential:
216+
plugins = RPCSequentialPlugin()
217217

218218
model = LitResnet(batch_size=args.batch_size, manual_optimization=not args.automatic_optimization)
219219

@@ -223,4 +223,4 @@ def instantiate_datamodule(args):
223223

224224
if trainer.accelerator_backend.rpc_enabled:
225225
# Called at the end of trainer to ensure all processes are killed
226-
trainer.accelerator_backend.ddp_plugin.exit_rpc_process()
226+
trainer.training_type_plugin.exit_rpc_process()

pytorch_lightning/plugins/environments/cluster_environment.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from pytorch_lightning.plugins.legacy.plugin import LightningPlugin
1615

17-
18-
class ClusterEnvironment(LightningPlugin):
16+
class ClusterEnvironment:
1917

2018
def __init__(self):
2119
self._world_size = None

pytorch_lightning/plugins/legacy/__init__.py

Whitespace-only changes.

pytorch_lightning/plugins/legacy/apex.py

Lines changed: 0 additions & 144 deletions
This file was deleted.

0 commit comments

Comments
 (0)