Skip to content

Commit b1d2def

Browse files
committed
add support for simplefsdp+ep
1 parent 5ec8765 commit b1d2def

File tree

10 files changed

+331
-37
lines changed

10 files changed

+331
-37
lines changed

.github/workflows/integration_test_8gpu_simple_fsdp.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,4 +47,4 @@ jobs:
4747
python -m pip install --force-reinstall --pre torch --index-url https://download.pytorch.org/whl/nightly/cu126
4848
4949
mkdir artifacts-to-be-uploaded
50-
python -m torchtitan.experiments.simple_fsdp.tests.integration_tests artifacts-to-be-uploaded --ngpu 8
50+
python -m torchtitan.experiments.simple_fsdp.tests.llama3_integration_tests artifacts-to-be-uploaded --ngpu 8

torchtitan/experiments/simple_fsdp/README.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,18 @@ This folder includes an experimental frontend implementation for [SimpleFSDP: Si
1212

1313
### Enable SimpleFSDP Training
1414

15+
#### Training Llama3 models
16+
1517
```bash
1618
CONFIG_FILE="./torchtitan/models/llama3/train_configs/llama3_8b.toml" ./run_train.sh --model.name llama3_simple_fsdp --compile.enable
1719
```
1820

21+
#### Training DeepSeek_v3 models
22+
23+
```bash
24+
CONFIG_FILE="./torchtitan/models/deepseek_v3/train_configs/debug_model.toml" ./run_train.sh --model.name deepseekv3_simple_fsdp --compile.enable
25+
```
26+
1927
### Composability Support
2028

2129
Some of the features require the updates from PyTorch, with which we are working on providing composability support for the following features:
@@ -28,6 +36,7 @@ Some of the features require the updates from PyTorch, with which we are working
2836
|Tensor Parallelism||
2937
|Context Parallelism||
3038
|Pipeline Parallelism||
39+
|Expert Parallelism||
3140
|Distributed Checkpointing||
3241
|Float8 Training| 🚧 |
3342

torchtitan/experiments/simple_fsdp/__init__.py

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,20 @@
88

99
from torchtitan.components.loss import build_cross_entropy_loss
1010
from torchtitan.components.lr_scheduler import build_lr_schedulers
11-
from torchtitan.components.optimizer import build_optimizers
11+
from torchtitan.components.optimizer import (
12+
build_optimizers,
13+
build_optimizers_with_moe_load_balancing,
14+
)
1215
from torchtitan.components.tokenizer import build_hf_tokenizer
1316
from torchtitan.datasets.hf_datasets import build_hf_dataloader
17+
from torchtitan.models.deepseek_v3 import deepseekv3_configs
1418
from torchtitan.models.llama3 import llama3_configs, pipeline_llama
1519
from torchtitan.protocols.train_spec import register_train_spec, TrainSpec
20+
from .deepseek_v3_model import SimpleFSDPDeepSeekV3Model
21+
from .deepseek_v3_parallelize import parallelize_deepseekv3
1622

17-
from .model import SimpleFSDPTransformer
18-
from .parallelize import parallelize_llama
23+
from .llama3_model import SimpleFSDPTransformer
24+
from .llama3_parallelize import parallelize_llama
1925

2026
register_train_spec(
2127
TrainSpec(
@@ -31,3 +37,19 @@
3137
build_loss_fn=build_cross_entropy_loss,
3238
)
3339
)
40+
41+
42+
register_train_spec(
43+
TrainSpec(
44+
name="deepseekv3_simple_fsdp",
45+
model_cls=SimpleFSDPDeepSeekV3Model,
46+
model_args=deepseekv3_configs,
47+
parallelize_fn=parallelize_deepseekv3,
48+
pipelining_fn=pipeline_llama,
49+
build_optimizers_fn=build_optimizers_with_moe_load_balancing,
50+
build_lr_schedulers_fn=build_lr_schedulers,
51+
build_dataloader_fn=build_hf_dataloader,
52+
build_tokenizer_fn=build_hf_tokenizer,
53+
build_loss_fn=build_cross_entropy_loss,
54+
)
55+
)
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from torchtitan.models.deepseek_v3 import DeepSeekV3Model, DeepSeekV3ModelArgs
8+
from .simple_fsdp import disable_data_parallel
9+
10+
11+
class SimpleFSDPDeepSeekV3Model(DeepSeekV3Model):
12+
def __init__(self, model_args: DeepSeekV3ModelArgs):
13+
super().__init__(model_args)
14+
self.init_weights()
15+
16+
def init_weights(self, *args, **kwargs):
17+
with disable_data_parallel():
18+
super().init_weights(*args, **kwargs)
Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import torch
8+
import torch.nn as nn
9+
from torch.distributed.device_mesh import DeviceMesh
10+
11+
from torchtitan.config import JobConfig, TORCH_DTYPE_MAP
12+
from torchtitan.distributed import ParallelDims
13+
from torchtitan.experiments.llama4.infra.parallelize import apply_moe_ep_tp
14+
from torchtitan.models.deepseek_v3.infra.parallelize import apply_non_moe_tp
15+
from torchtitan.models.llama3.infra.parallelize import apply_ac
16+
from torchtitan.tools.logging import logger
17+
18+
from .simple_fsdp import data_parallel, MixedPrecisionPolicy
19+
20+
# Adapted from llama4/infra/parallelize.py
21+
def parallelize_deepseekv3(
22+
model: nn.Module,
23+
parallel_dims: ParallelDims,
24+
job_config: JobConfig,
25+
):
26+
world_mesh = parallel_dims.world_mesh
27+
# TODO: TP currently cannot handle uneven seq_len because we set
28+
# `use_local_output=True` to use plain Tensors for legacy reasons.
29+
# Need to revisit this.
30+
assert (
31+
job_config.training.seq_len % parallel_dims.seq_len_divisor == 0
32+
), f"""
33+
Sequence length {job_config.training.seq_len} must be divisible by the product of TP degree
34+
({parallel_dims.tp}) and 2 * CP degree ({parallel_dims.cp}).
35+
"""
36+
37+
if (
38+
job_config.parallelism.context_parallel_degree > 1
39+
and model.model_args.use_flex_attn
40+
):
41+
raise NotImplementedError("CP support for FlexAttention is still in progress.")
42+
43+
if parallel_dims.tp_enabled:
44+
if job_config.parallelism.enable_async_tensor_parallel:
45+
# TODO(jianiw): This branch needs to be tested and enabled
46+
raise NotImplementedError(
47+
"Currently, async TP is not tested for deepseekv3. \
48+
torch.compile is not supported yet, which is required for async TP."
49+
)
50+
51+
enable_float8_linear = "float8" in job_config.model.converters
52+
float8_is_rowwise = job_config.float8.recipe_name in (
53+
"rowwise",
54+
"rowwise_with_gw_hp",
55+
)
56+
57+
enable_float8_tensorwise_tp = enable_float8_linear and not float8_is_rowwise
58+
if enable_float8_tensorwise_tp:
59+
# TODO(jianiw): This branch needs to be tested and enabled
60+
raise NotImplementedError(
61+
"Currently, float8 tensorwise TP is not tested for deepseekv3"
62+
)
63+
64+
apply_non_moe_tp(
65+
model,
66+
world_mesh["tp"],
67+
loss_parallel=not job_config.parallelism.disable_loss_parallel,
68+
enable_float8_tensorwise_tp=False,
69+
enable_async_tp=False,
70+
)
71+
72+
if parallel_dims.tp_enabled or parallel_dims.ep_enabled:
73+
apply_moe_ep_tp(
74+
model,
75+
tp_mesh=world_mesh["tp"] if parallel_dims.tp_enabled else None,
76+
ep_mesh=world_mesh["ep"] if parallel_dims.ep_enabled else None,
77+
ep_tp_mesh=(
78+
world_mesh["ep", "tp"]
79+
if parallel_dims.tp_enabled and parallel_dims.ep_enabled
80+
else None
81+
),
82+
etp_enabled=parallel_dims.etp_enabled,
83+
)
84+
85+
if job_config.activation_checkpoint.mode != "none":
86+
apply_ac(model, job_config.activation_checkpoint)
87+
88+
mp_policy = MixedPrecisionPolicy(
89+
param_dtype=TORCH_DTYPE_MAP[job_config.training.mixed_precision_param],
90+
reduce_dtype=TORCH_DTYPE_MAP[job_config.training.mixed_precision_reduce],
91+
)
92+
93+
# apply data parallel
94+
dp_mesh: DeviceMesh | None = None
95+
if (
96+
parallel_dims.fsdp_enabled
97+
or parallel_dims.ep_enabled
98+
or parallel_dims.dp_replicate_enabled
99+
):
100+
if parallel_dims.dp_replicate_enabled:
101+
if parallel_dims.dp_shard_enabled or parallel_dims.cp_enabled:
102+
dp_mesh_dim_names = ("dp_replicate", "dp_shard_cp")
103+
dp_mode = "hybrid_shard"
104+
else:
105+
dp_mesh_dim_names = ("dp_replicate",)
106+
dp_mode = "replicate"
107+
else:
108+
dp_mesh_dim_names = ("dp_shard_cp",)
109+
dp_mode = "fully_shard"
110+
111+
dp_mesh = world_mesh[tuple(dp_mesh_dim_names)]
112+
# the mesh dim names of which the MoE params are sharded on via FSDP/HSDP
113+
dp_mod_ep_mesh_dim_names = []
114+
ep_modules = []
115+
ep_shared_experts = []
116+
if parallel_dims.ep_enabled:
117+
if parallel_dims.dp_replicate_enabled:
118+
dp_mod_ep_mesh_dim_names.append("dp_replicate")
119+
dp_mod_ep_mesh_dim_names.append("dp_shard_mod_ep")
120+
for _, transformer_block in model.layers.items():
121+
if transformer_block.moe_enabled:
122+
ep_modules.append(transformer_block.moe.experts)
123+
ep_shared_experts.append(transformer_block.moe.shared_experts)
124+
125+
if not parallel_dims.tp_enabled and parallel_dims.ep_enabled:
126+
tp_ep_mesh = world_mesh["ep"]
127+
elif parallel_dims.tp_enabled and parallel_dims.ep_enabled:
128+
tp_ep_mesh = world_mesh["ep", "tp"]
129+
else:
130+
tp_ep_mesh = None
131+
132+
model = data_parallel(
133+
model,
134+
dp_mesh,
135+
dp_mode,
136+
ac_mode=job_config.activation_checkpoint.mode,
137+
mp_policy=mp_policy,
138+
tp_mesh=world_mesh["tp"] if parallel_dims.tp_enabled else None,
139+
tp_ep_mesh=tp_ep_mesh,
140+
dp_mod_ep_mesh=world_mesh[tuple(dp_mod_ep_mesh_dim_names)]
141+
if parallel_dims.ep_enabled
142+
else None,
143+
ep_modules=ep_modules,
144+
ep_shared_experts=ep_shared_experts,
145+
)
146+
if parallel_dims.dp_replicate_enabled:
147+
logger.info("Applied HSDP to the model")
148+
else:
149+
logger.info("Applied FSDP to the model")
150+
151+
if job_config.compile.enable:
152+
torch._inductor.config.reorder_for_peak_memory = False
153+
torch._dynamo.config.capture_scalar_outputs = True
154+
model = torch.compile(model, fullgraph=True)
155+
156+
return model

0 commit comments

Comments
 (0)