Skip to content

Commit 1df12f1

Browse files
committed
add support for simplefsdp+ep
1 parent 5b5d468 commit 1df12f1

File tree

10 files changed

+355
-62
lines changed

10 files changed

+355
-62
lines changed

torchtitan/experiments/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,6 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
_supported_experiments = frozenset(["flux", "llama4", "qwen3", "simple_fsdp", "vlm"])
7+
_supported_experiments = frozenset(
8+
["flux", "llama4", "qwen3", "simple_fsdp.llama3", "simple_fsdp.deepseek_v3", "vlm"]
9+
)

torchtitan/experiments/simple_fsdp/README.md

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,16 @@ This folder includes an experimental frontend implementation for [SimpleFSDP: Si
1212

1313
### Run SimpleFSDP Training on Llama 3
1414

15+
#### Training Llama3 models
16+
17+
```bash
18+
CONFIG_FILE="./torchtitan/models/llama3/train_configs/llama3_8b.toml" ./run_train.sh --model.name simple_fsdp.llama3 --compile.enable
19+
```
20+
21+
#### Training DeepSeek_v3 models
22+
1523
```bash
16-
CONFIG_FILE="./torchtitan/models/llama3/train_configs/llama3_8b.toml" ./run_train.sh --model.name simple_fsdp --compile.enable
24+
CONFIG_FILE="./torchtitan/models/deepseek_v3/train_configs/debug_model.toml" ./run_train.sh --model.name simple_fsdp.deepseek_v3 --compile.enable
1725
```
1826

1927
### Composability Support
@@ -30,7 +38,9 @@ Some of the features require the updates from PyTorch, with which we are working
3038
|Pipeline Parallelism||
3139
|Distributed Checkpointing||
3240
|Float8 Training| 🚧 |
33-
41+
|Expert Parallelism ||
42+
|Expert Parallelism + Activation Checkpointing| 🚧 |
43+
|Expert Parallelism + DualPipe| 🚧 |
3444

3545
### Citation
3646

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
#
7+
# Copyright (c) Meta Platforms, Inc. All Rights Reserved.
8+
9+
from torchtitan.components.loss import build_cross_entropy_loss
10+
from torchtitan.components.lr_scheduler import build_lr_schedulers
11+
from torchtitan.components.optimizer import build_optimizers_with_moe_load_balancing
12+
from torchtitan.components.tokenizer import build_hf_tokenizer
13+
from torchtitan.datasets.hf_datasets import build_hf_dataloader
14+
from torchtitan.models.deepseek_v3 import deepseekv3_configs
15+
from torchtitan.models.llama3 import pipeline_llama
16+
from torchtitan.protocols.train_spec import TrainSpec
17+
18+
from .model import SimpleFSDPDeepSeekV3Model
19+
from .parallelize import parallelize_deepseekv3
20+
21+
22+
def get_train_spec() -> TrainSpec:
23+
return TrainSpec(
24+
name="simple_fsdp.deepseek_v3",
25+
model_cls=SimpleFSDPDeepSeekV3Model,
26+
model_args=deepseekv3_configs,
27+
parallelize_fn=parallelize_deepseekv3,
28+
pipelining_fn=pipeline_llama,
29+
build_optimizers_fn=build_optimizers_with_moe_load_balancing,
30+
build_lr_schedulers_fn=build_lr_schedulers,
31+
build_dataloader_fn=build_hf_dataloader,
32+
build_tokenizer_fn=build_hf_tokenizer,
33+
build_loss_fn=build_cross_entropy_loss,
34+
)
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from torchtitan.models.deepseek_v3 import DeepSeekV3Model, DeepSeekV3ModelArgs
8+
9+
from ..simple_fsdp import disable_active_parametrization
10+
11+
12+
class SimpleFSDPDeepSeekV3Model(DeepSeekV3Model):
13+
def __init__(self, model_args: DeepSeekV3ModelArgs):
14+
super().__init__(model_args)
15+
self.init_weights()
16+
17+
def init_weights(self, *args, **kwargs):
18+
with disable_active_parametrization():
19+
super().init_weights(*args, **kwargs)
Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import torch
8+
import torch.nn as nn
9+
from torch.distributed.device_mesh import DeviceMesh
10+
11+
from torchtitan.config import JobConfig, TORCH_DTYPE_MAP
12+
from torchtitan.distributed import ParallelDims
13+
from torchtitan.distributed.tensor_parallel import maybe_enable_async_tp
14+
from torchtitan.experiments.llama4.infra.parallelize import apply_moe_ep_tp
15+
from torchtitan.models.deepseek_v3.infra.parallelize import apply_non_moe_tp
16+
from torchtitan.models.llama3.infra.parallelize import apply_ac
17+
from torchtitan.tools.logging import logger
18+
19+
from ..simple_fsdp import data_parallel, MixedPrecisionPolicy
20+
21+
# Adapted from llama4/infra/parallelize.py
22+
def parallelize_deepseekv3(
23+
model: nn.Module,
24+
parallel_dims: ParallelDims,
25+
job_config: JobConfig,
26+
):
27+
world_mesh = parallel_dims.world_mesh
28+
# TODO: TP currently cannot handle uneven seq_len because we set
29+
# `use_local_output=True` to use plain Tensors for legacy reasons.
30+
# Need to revisit this.
31+
assert (
32+
job_config.training.seq_len % parallel_dims.seq_len_divisor == 0
33+
), f"""
34+
Sequence length {job_config.training.seq_len} must be divisible by the product of TP degree
35+
({parallel_dims.tp}) and 2 * CP degree ({parallel_dims.cp}), i.e. {parallel_dims.seq_len_divisor}.
36+
"""
37+
38+
if (
39+
job_config.parallelism.context_parallel_degree > 1
40+
and model.model_args.use_flex_attn
41+
):
42+
raise NotImplementedError("CP support for FlexAttention is still in progress.")
43+
44+
if parallel_dims.tp_enabled:
45+
enable_float8_linear = "float8" in job_config.model.converters
46+
float8_is_rowwise = job_config.quantize.dense.float8.recipe_name in (
47+
"rowwise",
48+
"rowwise_with_gw_hp",
49+
)
50+
51+
enable_float8_tensorwise_tp = enable_float8_linear and not float8_is_rowwise
52+
if enable_float8_tensorwise_tp:
53+
# TODO(jianiw): This branch needs to be tested and enabled
54+
raise NotImplementedError(
55+
"Currently, float8 tensorwise TP is not tested for deepseekv3"
56+
)
57+
58+
apply_non_moe_tp(
59+
model,
60+
world_mesh["tp"],
61+
loss_parallel=not job_config.parallelism.disable_loss_parallel,
62+
enable_float8_tensorwise_tp=False,
63+
)
64+
maybe_enable_async_tp(job_config, world_mesh["tp"])
65+
66+
if parallel_dims.tp_enabled or parallel_dims.ep_enabled:
67+
apply_moe_ep_tp(
68+
model,
69+
tp_mesh=world_mesh["tp"] if parallel_dims.tp_enabled else None,
70+
ep_mesh=world_mesh["ep"] if parallel_dims.ep_enabled else None,
71+
ep_tp_mesh=(
72+
world_mesh["ep", "tp"]
73+
if parallel_dims.tp_enabled
74+
and parallel_dims.ep_enabled
75+
and parallel_dims.etp_enabled
76+
else None
77+
),
78+
etp_enabled=parallel_dims.etp_enabled,
79+
)
80+
81+
if job_config.activation_checkpoint.mode != "none":
82+
apply_ac(model, job_config.activation_checkpoint)
83+
84+
mp_policy = MixedPrecisionPolicy(
85+
param_dtype=TORCH_DTYPE_MAP[job_config.training.mixed_precision_param],
86+
reduce_dtype=TORCH_DTYPE_MAP[job_config.training.mixed_precision_reduce],
87+
)
88+
89+
# apply data parallel
90+
dp_mesh: DeviceMesh | None = None
91+
if (
92+
parallel_dims.fsdp_enabled
93+
or parallel_dims.ep_enabled
94+
or parallel_dims.dp_replicate_enabled
95+
):
96+
if parallel_dims.dp_replicate_enabled:
97+
if parallel_dims.dp_shard_enabled or parallel_dims.cp_enabled:
98+
dp_mesh_dim_names = ("dp_replicate", "dp_shard_cp")
99+
dp_mode = "hybrid_shard"
100+
else:
101+
dp_mesh_dim_names = ("dp_replicate",)
102+
dp_mode = "replicate"
103+
else:
104+
dp_mesh_dim_names = ("dp_shard_cp",)
105+
dp_mode = "fully_shard"
106+
107+
dp_mesh = world_mesh[tuple(dp_mesh_dim_names)]
108+
# the mesh dim names of which the MoE params are sharded on via FSDP/HSDP
109+
dp_mod_ep_mesh_dim_names = []
110+
111+
if parallel_dims.ep_enabled:
112+
if parallel_dims.dp_replicate_enabled:
113+
dp_mod_ep_mesh_dim_names.append("dp_replicate")
114+
dp_mod_ep_mesh_dim_names.append("dp_shard_mod_ep")
115+
dp_mod_ep_mesh = world_mesh[tuple(dp_mod_ep_mesh_dim_names)]
116+
117+
for _, transformer_block in model.layers.items():
118+
if transformer_block.moe_enabled and parallel_dims.ep_enabled:
119+
experts_shard_dim = 0
120+
assert dp_mod_ep_mesh is not None
121+
assert hasattr(transformer_block, "moe")
122+
if (
123+
dp_mod_ep_mesh.size()
124+
* parallel_dims.ep
125+
> transformer_block.moe.experts.num_experts
126+
):
127+
experts_shard_dim = 1
128+
129+
transformer_block.moe.experts = data_parallel(
130+
transformer_block.moe.experts,
131+
dp_mod_ep_mesh,
132+
dp_mode,
133+
ac_mode=job_config.activation_checkpoint.mode,
134+
mp_policy=mp_policy,
135+
shard_dim=experts_shard_dim,
136+
)
137+
# TODO(ruisizhang123): support set_gradient_divide_factor in simplefsdp
138+
# transformer_block.moe.experts.set_gradient_divide_factor(
139+
# parallel_dims.fsdp_gradient_divide_factor,
140+
# )
141+
142+
model = data_parallel(
143+
model,
144+
dp_mesh,
145+
dp_mode,
146+
ac_mode=job_config.activation_checkpoint.mode,
147+
mp_policy=mp_policy,
148+
)
149+
150+
logger.info(
151+
"Applied Data Parallel (simple_fsdp) (dp mode=%s) to the model", dp_mode
152+
)
153+
154+
if job_config.compile.enable:
155+
torch._inductor.config.reorder_for_peak_memory = False
156+
torch._dynamo.config.capture_scalar_outputs = True
157+
model = torch.compile(model, fullgraph=True)
158+
159+
return model

torchtitan/experiments/simple_fsdp/__init__.py renamed to torchtitan/experiments/simple_fsdp/llama3/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
def get_train_spec() -> TrainSpec:
2222
return TrainSpec(
23-
name="simple_fsdp",
23+
name="simple_fsdp.llama3",
2424
model_cls=SimpleFSDPTransformer,
2525
model_args=llama3_configs,
2626
parallelize_fn=parallelize_llama,

torchtitan/experiments/simple_fsdp/model.py renamed to torchtitan/experiments/simple_fsdp/llama3/model.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,14 @@
55
# LICENSE file in the root directory of this source tree.
66

77
from torchtitan.models.llama3 import Transformer, TransformerModelArgs
8-
from .simple_fsdp import disable_data_parallel
8+
9+
from ..simple_fsdp import disable_active_parametrization
910

1011

1112
class SimpleFSDPTransformer(Transformer):
1213
def __init__(self, model_args: TransformerModelArgs):
1314
super().__init__(model_args)
1415

1516
def init_weights(self, *args, **kwargs):
16-
with disable_data_parallel():
17+
with disable_active_parametrization():
1718
super().init_weights(*args, **kwargs)

torchtitan/experiments/simple_fsdp/parallelize.py renamed to torchtitan/experiments/simple_fsdp/llama3/parallelize.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from torchtitan.models.llama3.infra.parallelize import apply_tp
1515
from torchtitan.tools.logging import logger
1616

17-
from .simple_fsdp import data_parallel, MixedPrecisionPolicy
17+
from ..simple_fsdp import data_parallel, MixedPrecisionPolicy
1818

1919

2020
# for selective op activation checkpointing
@@ -116,7 +116,9 @@ def parallelize_llama(
116116
ac_mode=job_config.activation_checkpoint.mode,
117117
mp_policy=mp_policy,
118118
)
119-
logger.info("Applied Data Parallel (dp mode=%s) to the model", dp_mode)
119+
logger.info(
120+
"Applied Data Parallel (simple_fsdp) (dp mode=%s) to the model", dp_mode
121+
)
120122

121123
if job_config.compile.enable and "model" in job_config.compile.components:
122124
torch._inductor.config.reorder_for_peak_memory = False

0 commit comments

Comments
 (0)