Skip to content

Commit 9064b83

Browse files
justusschockBorda
andauthored
Accelerator Refactor/RPC + Sharded (#5732)
Co-authored-by: Jirka Borovec <[email protected]> Co-authored-by: Jirka Borovec <[email protected]>
1 parent b3ebc18 commit 9064b83

File tree

5 files changed

+640
-4
lines changed

5 files changed

+640
-4
lines changed

pytorch_lightning/plugins/legacy/rpc_plugin.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class RPCPlugin(DDPPlugin):
3939

4040
def __init__(self, rpc_timeout_sec: float = DEFAULT_RPC_TIMEOUT_SEC, **kwargs):
4141
self.rpc_timeout_sec = rpc_timeout_sec
42-
self.rpc_initialized = False
42+
self._is_rpc_initialized = False
4343
super().__init__(**kwargs)
4444

4545
def init_rpc_connection(self,
@@ -48,7 +48,7 @@ def init_rpc_connection(self,
4848
os.environ['MASTER_PORT'] = os.getenv('RPC_MASTER_PORT', '15000')
4949
rpc.init_rpc(f"worker{global_rank}", rank=global_rank, world_size=world_size)
5050
rpc._set_rpc_timeout(self.rpc_timeout_sec)
51-
self.rpc_initialized = True
51+
self._is_rpc_initialized = True
5252

5353
def rpc_save_model(self,
5454
save_model_fn,
@@ -86,9 +86,9 @@ def on_accelerator_exit_rpc_process(self, trainer) -> None:
8686
self.exit_rpc_process()
8787

8888
def exit_rpc_process(self):
89-
if self.rpc_initialized:
89+
if self._is_rpc_initialized:
9090
torch.distributed.rpc.shutdown()
91-
self.rpc_initialized = False
91+
self._is_rpc_initialized = False
9292

9393
@property
9494
def return_after_exit_rpc_process(self) -> bool:
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
# Copyright The PyTorch Lightning team.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
import os
15+
from contextlib import suppress
16+
from typing import Optional
17+
18+
import torch
19+
20+
from pytorch_lightning.cluster_environments.cluster_environment import ClusterEnvironment
21+
from pytorch_lightning.core.lightning import LightningModule
22+
from pytorch_lightning.plugins.training_type.ddp import DDPPlugin
23+
from pytorch_lightning.utilities import _RPC_AVAILABLE
24+
25+
DEFAULT_RPC_TIMEOUT_SEC = 60.
26+
if _RPC_AVAILABLE:
27+
from torch.distributed import rpc
28+
with suppress(ModuleNotFoundError, ImportError):
29+
from torch.distributed.rpc.constants import DEFAULT_RPC_TIMEOUT_SEC
30+
31+
32+
class RPCPlugin(DDPPlugin):
33+
"""
34+
Backbone for RPC Plugins built on top of DDP.
35+
RPC introduces different communication behaviour than DDP. Unlike DDP, processes potentially are not
36+
required to run the same code as the main process.
37+
This leads to edge cases where logic needs to be re-defined. This class contains special cases
38+
that need to be addressed when using RPC communication when building custom RPC Plugins.
39+
"""
40+
41+
def __init__(
42+
self,
43+
parallel_devices,
44+
num_nodes=1,
45+
cluster_environment: ClusterEnvironment = None,
46+
sync_batchnorm=False,
47+
rpc_timeout_sec: float = DEFAULT_RPC_TIMEOUT_SEC,
48+
**kwargs
49+
):
50+
self.rpc_timeout_sec = rpc_timeout_sec
51+
self._is_rpc_initialized = False
52+
super().__init__(
53+
parallel_devices=parallel_devices,
54+
num_nodes=num_nodes,
55+
cluster_environment=cluster_environment,
56+
sync_batchnorm=sync_batchnorm,
57+
**kwargs
58+
)
59+
60+
def init_rpc_connection(self, global_rank: int, world_size: int) -> None:
61+
os.environ['MASTER_PORT'] = os.getenv('RPC_MASTER_PORT', '15000')
62+
rpc.init_rpc(f"worker{global_rank}", rank=global_rank, world_size=world_size)
63+
rpc._set_rpc_timeout(self.rpc_timeout_sec)
64+
self._is_rpc_initialized = True
65+
66+
def rpc_save_model(self, save_model_fn, last_filepath, trainer, pl_module) -> None:
67+
"""
68+
Override to save model to disk.
69+
This is required as the main process will be required to handle aggregating model states from RPC processes.
70+
71+
Args:
72+
save_model_fn: The saving function to save final model.
73+
last_filepath: The filepath to save the model to.
74+
trainer: The trainer object.
75+
pl_module: The LightningModule.
76+
"""
77+
raise NotImplementedError
78+
79+
def on_main_rpc_connection(self, trainer) -> None:
80+
"""
81+
Called when main rpc connection has been established.
82+
83+
Args:
84+
trainer: The trainer object.
85+
"""
86+
raise NotImplementedError
87+
88+
def on_accelerator_exit_rpc_process(self) -> None:
89+
"""
90+
Called to exit RPC process within the accelerator, that is being managed by main process.
91+
92+
Args:
93+
trainer: The trainer object.
94+
"""
95+
self.exit_rpc_process()
96+
97+
def exit_rpc_process(self):
98+
if self._is_rpc_initialized:
99+
torch.distributed.rpc.shutdown()
100+
self._is_rpc_initialized = False
101+
102+
@property
103+
def return_after_exit_rpc_process(self) -> bool:
104+
"""
105+
Override to decide whether to skip train/test function after shutdown completed.
106+
Usually RPC shutdown is a join/exit function, afterwards we want to exit the process.
107+
108+
Returns:
109+
Whether to return after RPC exit.
110+
"""
111+
raise NotImplementedError
112+
113+
def worker_optimizer_step(self, model: LightningModule, opt_idx: int, *args, **kwargs) -> None:
114+
"""
115+
Called when optimizer step is run on the main process. Used to signal any RPC workers to run optimizer step.
116+
117+
Args:
118+
model: The LightningModule.
119+
opt_idx: The idx of the optimizer to carry out step on.
120+
"""
121+
raise NotImplementedError
122+
123+
@property
124+
def is_main_rpc_process(self) -> bool:
125+
"""
126+
Override to add logic to determine current process is main RPC process.
127+
"""
128+
raise NotImplementedError
129+
130+
def barrier(self, name: Optional[str] = None) -> None:
131+
"""
132+
Override to define distributed sync communication. This needs to be handled differently due to
133+
the RPC connection managing certain processes at the same time.
134+
"""
135+
raise NotImplementedError

0 commit comments

Comments
 (0)