Skip to content

Commit 5f4af36

Browse files
authored
FSDP2 support (axolotl-ai-cloud#2469)
* fsdp2 support * use accelerate release 1.6.0 * allow 8bit optims with fsdp2 * liger + torch compile fix * add fsdp2 e2e tests * use transformers commit with fsdp2 support * skip zero3 tests for this PR for now * fix fsdp2 config for ci * make sure both flex and flash attn work with fsdp2, skip fix untrained tokens * okay, actually use fdsp2... * more fixes to flex for fsdp2 * make sure to patch all the loaded models * additional validation for fsdp2, bump dep versions
1 parent a8f38c3 commit 5f4af36

File tree

9 files changed

+320
-43
lines changed

9 files changed

+320
-43
lines changed

requirements.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@ liger-kernel==0.5.5
1212
packaging==23.2
1313

1414
peft==0.15.0
15-
transformers==4.50.3
15+
transformers==4.51.0
1616
tokenizers>=0.21.1
17-
accelerate==1.5.2
17+
accelerate==1.6.0
1818
datasets==3.5.0
19-
deepspeed==0.15.4
20-
trl==0.16.0
19+
deepspeed>=0.15.4
20+
trl==0.16.1
2121

2222
optimum==1.16.2
2323
hf_transfer

src/axolotl/integrations/liger/__init__.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727

2828
from ...utils.distributed import zero_only
2929
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
30+
from .utils import patch_with_compile_disable
3031

3132
LOG = logging.getLogger("axolotl.integrations.liger")
3233

@@ -40,6 +41,18 @@ def get_input_args(self):
4041
return "axolotl.integrations.liger.LigerArgs"
4142

4243
def pre_model_load(self, cfg):
44+
if cfg.torch_compile:
45+
# torch compile will unnecessarily attempt to optimize the triton kernel unless explicitly disabled
46+
import liger_kernel.ops.fused_linear_cross_entropy
47+
48+
patch_with_compile_disable(
49+
liger_kernel.ops.fused_linear_cross_entropy,
50+
"fused_linear_cross_entropy_forward",
51+
)
52+
patch_with_compile_disable(
53+
liger_kernel.ops.fused_linear_cross_entropy,
54+
"fused_linear_cross_entropy_backward",
55+
)
4356
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
4457
from liger_kernel.transformers.functional import liger_cross_entropy
4558
from liger_kernel.transformers.geglu import LigerGEGLUMLP
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
"""
2+
utils to patch liger kernel ops to disable torch.compile
3+
"""
4+
5+
from functools import wraps
6+
7+
import torch
8+
9+
10+
def patch_with_compile_disable(module, function_name):
11+
"""
12+
Patch a function in a module by wrapping it with torch.compile.disable
13+
14+
Args:
15+
module: The module containing the function to patch
16+
function_name: The name of the function to patch
17+
"""
18+
original_function = getattr(module, function_name)
19+
20+
@wraps(original_function)
21+
@torch.compiler.disable
22+
def wrapped_function(*args, **kwargs):
23+
return original_function(*args, **kwargs)
24+
25+
# Replace the original function with the wrapped one
26+
setattr(module, function_name, wrapped_function)
27+
28+
# Return the original function in case you need to restore it later
29+
return original_function
Lines changed: 158 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,48 +1,172 @@
11
"""Flex attention monkey patch"""
22

3+
import sys
4+
from typing import Optional, Tuple, Union
5+
36
import torch
47
import transformers
58

69

7-
def patch_flex():
10+
def patch_flex_wrapper():
11+
# TODO remove this patch when transformers#37285 is merged and in a release
812
is_torch_2_6 = torch.__version__.startswith("2.6")
913
is_transformers_below_4_51 = transformers.__version__ < "4.51.0"
1014

11-
if is_torch_2_6 and is_transformers_below_4_51:
12-
from torch.nn.attention.flex_attention import flex_attention
15+
if not (is_torch_2_6 and is_transformers_below_4_51):
16+
return
17+
18+
from torch.nn.attention.flex_attention import flex_attention
19+
20+
class WrappedFlexAttention:
21+
"""
22+
We are doing a singleton class so that flex attention is compiled once when it's first called.
23+
"""
24+
25+
_instance = None
26+
_is_flex_compiled = False
27+
_compiled_flex_attention = None
1328

14-
class WrappedFlexAttention:
29+
def __new__(cls, *args, **kwargs):
30+
if cls._instance is None:
31+
# Create a new instance if one doesn't already exist
32+
cls._instance = super().__new__(cls)
33+
return cls._instance
34+
35+
@torch.compiler.disable(recursive=False)
36+
def __init__(self):
1537
"""
16-
We are doing a singleton class so that flex attention is compiled once when it's first called.
38+
Initialize or update the singleton instance.
1739
"""
40+
if not self._is_flex_compiled:
41+
self._compiled_flex_attention = torch.compile(
42+
flex_attention,
43+
dynamic=False,
44+
mode="max-autotune-no-cudagraphs",
45+
fullgraph=True,
46+
)
47+
self._is_flex_compiled = True
48+
49+
def __call__(self):
50+
return self._compiled_flex_attention
51+
52+
transformers.integrations.flex_attention.WrappedFlexAttention = WrappedFlexAttention
53+
54+
55+
def patch_flex_make_mask():
56+
is_torch_2_6 = torch.__version__.startswith("2.6")
57+
is_transformers_eq_4_51 = transformers.__version__ == "4.51.0"
58+
59+
if not (is_torch_2_6 and is_transformers_eq_4_51):
60+
return
61+
62+
from torch.nn.attention.flex_attention import (
63+
BlockMask,
64+
)
65+
from torch.nn.attention.flex_attention import (
66+
create_block_mask as create_block_causal_mask_flex,
67+
)
68+
69+
Offset = Union[torch.Tensor, int]
70+
71+
def patched_make_flex_block_causal_mask(
72+
attention_mask_2d: torch.Tensor,
73+
attention_chunk_size: Optional[int] = None,
74+
query_length=None,
75+
key_length=None,
76+
offsets: Optional[Tuple[Offset, Offset]] = None,
77+
) -> "BlockMask":
78+
"""
79+
Create a block causal document mask for a batch of sequences, both packed and unpacked.
80+
Create Block causal logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
81+
The resultant BlockMask is a compressed representation of the full block causal
82+
mask. BlockMask is essential for performant computation of flex attention.
83+
See: https://pytorch.org/blog/flexattention/
84+
85+
Args:
86+
attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
87+
of shape (batch_size, total_seq_len). e.g.
1888
19-
_instance = None
20-
_is_flex_compiled = False
21-
_compiled_flex_attention = None
22-
23-
def __new__(cls, *args, **kwargs):
24-
if cls._instance is None:
25-
# Create a new instance if one doesn't already exist
26-
cls._instance = super().__new__(cls)
27-
return cls._instance
28-
29-
@torch.compiler.disable(recursive=False)
30-
def __init__(self):
31-
"""
32-
Initialize or update the singleton instance.
33-
"""
34-
if not self._is_flex_compiled:
35-
self._compiled_flex_attention = torch.compile(
36-
flex_attention,
37-
dynamic=False,
38-
mode="max-autotune-no-cudagraphs",
39-
fullgraph=True,
40-
)
41-
self._is_flex_compiled = True
42-
43-
def __call__(self):
44-
return self._compiled_flex_attention
45-
46-
transformers.integrations.flex_attention.WrappedFlexAttention = (
47-
WrappedFlexAttention
89+
For unpacked sequence:
90+
[[1, 1, 1, 1, 0, 0, 0],
91+
[1, 1, 1, 1, 1, 0, 0]]
92+
93+
For packed sequence:
94+
[[1, 1, 1, 2, 2, 2, 0],
95+
[1, 1, 2, 2, 2, 3, 3]]
96+
97+
Returns:
98+
BlockMask
99+
"""
100+
101+
batch_size, total_seq_len = attention_mask_2d.shape
102+
if not key_length:
103+
key_length = total_seq_len
104+
if not query_length:
105+
query_length = total_seq_len
106+
attention_mask_2d = torch.nn.functional.pad(
107+
attention_mask_2d, value=0, pad=(0, key_length)
108+
)
109+
device = attention_mask_2d.device
110+
document_ids = attention_mask_2d.clone()
111+
112+
if attention_chunk_size is not None:
113+
# we create an arange, then we just // by chunk size to get [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
114+
document_ids = (document_ids.fill_(1).cumsum(-1) - 1) // (
115+
attention_chunk_size
116+
)
117+
118+
# Instead of passing a tensor mask, flex attention requires a mask_mod function
119+
# that determines which elements of QK^T should be included in the attention
120+
# computation prior to the softmax. For sample packing, we need both the
121+
# logic for both causal mask and document mask. See PyTorch's official
122+
# blog post for more details: https://pytorch.org/blog/flexattention/#mask-mods
123+
def causal_mask_mod(
124+
batch_idx, head_idx, q_idx, kv_idx
125+
): # pylint: disable=unused-argument
126+
"""
127+
Defines the logic of a block causal mask by combining both a standard causal mask
128+
and a block diagonal document mask.
129+
130+
See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
131+
for an illustration.
132+
"""
133+
causal_mask = q_idx >= kv_idx # not valid when decoding
134+
document_mask = (
135+
document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx]
136+
)
137+
padding_mask = attention_mask_2d[batch_idx, q_idx] > 0
138+
final_mask = causal_mask & padding_mask & document_mask
139+
return final_mask
140+
141+
if offsets is not None:
142+
q_offset = offsets[0]
143+
kv_offset = offsets[1]
144+
145+
def mask_mod(batch_idx, head_idx, q_idx, kv_idx):
146+
offset_q = q_idx + q_offset
147+
offset_kv = kv_idx + kv_offset
148+
return causal_mask_mod(batch_idx, head_idx, offset_q, offset_kv)
149+
150+
else:
151+
mask_mod = causal_mask_mod
152+
return create_block_causal_mask_flex(
153+
mask_mod=mask_mod,
154+
B=batch_size,
155+
H=None, # attention head
156+
Q_LEN=query_length,
157+
KV_LEN=key_length,
158+
device=device,
159+
_compile=True,
48160
)
161+
162+
for n in tuple(sys.modules):
163+
if ".modeling_" in n and "llama4" not in n:
164+
if hasattr(sys.modules[n], "make_flex_block_causal_mask"):
165+
print(n)
166+
sys.modules[n].make_flex_block_causal_mask = (
167+
patched_make_flex_block_causal_mask
168+
)
169+
170+
transformers.integrations.flex_attention.make_flex_block_causal_mask = (
171+
patched_make_flex_block_causal_mask
172+
)

src/axolotl/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ def save_trained_model(
217217

218218
# Handle FSDP state dict type
219219
state_dict_type = "FULL_STATE_DICT"
220-
if trainer.is_fsdp_enabled:
220+
if trainer.is_fsdp_enabled and str(cfg.fsdp_config.fsdp_version) != "2":
221221
if cfg.fsdp_final_state_dict_type:
222222
state_dict_type = cfg.fsdp_final_state_dict_type
223223
trainer.accelerator.state.fsdp_plugin.set_state_dict_type(state_dict_type)

src/axolotl/utils/models.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -889,9 +889,13 @@ def set_attention_config(self) -> None:
889889
self.model_config._attn_implementation = ( # pylint: disable=protected-access
890890
"flex_attention"
891891
)
892-
from axolotl.monkeypatch.attention.flex_attn import patch_flex
892+
from axolotl.monkeypatch.attention.flex_attn import (
893+
patch_flex_make_mask,
894+
patch_flex_wrapper,
895+
)
893896

894-
patch_flex()
897+
patch_flex_wrapper()
898+
patch_flex_make_mask()
895899

896900
elif self.cfg.flash_attention:
897901
if not self.cfg.sample_packing and self.cfg.s2_attention:

src/axolotl/utils/schemas/config.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -950,10 +950,23 @@ def check_fsdp_offload_w_8bit_optimizer(cls, data):
950950
and "8bit" in data.get("optimizer", "")
951951
and data.get("fsdp_config")
952952
and data["fsdp_config"].get("fsdp_offload_params")
953+
and str(data["fsdp_config"].get("fsdp_version")) != "2"
953954
):
954955
raise ValueError(
955956
f"FSDP Offload not compatible with {data.get('optimizer')}"
956957
)
958+
if (
959+
data.get("fsdp")
960+
and "8bit" in data.get("optimizer", "")
961+
and data.get("fsdp_config")
962+
and str(data["fsdp_config"].get("fsdp_version")) == "2"
963+
):
964+
if data.get("optimizer", "") in ["adamw_8bit", "adamw_bnb_8bit"]:
965+
# CUDA ops errors with bnb 8bit optimizer + FSDP2
966+
raise ValueError(
967+
f"FSDP2 not compatible with {data.get('optimizer')}, use `adamw_torch_8bit` instead"
968+
)
969+
957970
return data
958971

959972
@model_validator(mode="before")

src/axolotl/utils/trainer.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,8 @@ def setup_deepspeed_env(cfg, stage=None):
538538

539539
def setup_fsdp_envs(cfg):
540540
os.environ["ACCELERATE_USE_FSDP"] = "true"
541+
if str(cfg.fsdp_config.fsdp_version) == "2":
542+
os.environ["FSDP_VERSION"] = "2"
541543
if cfg.fsdp_config.fsdp_activation_checkpointing:
542544
os.environ["FSDP_ACTIVATION_CHECKPOINTING"] = "true"
543545
if cfg.fsdp_config.fsdp_offload_params:
@@ -556,6 +558,10 @@ def setup_fsdp_envs(cfg):
556558
os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = (
557559
cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
558560
)
561+
if cfg.fsdp_config.fsdp_reshard_after_forward is not None:
562+
os.environ["FSDP_RESHARD_AFTER_FORWARD"] = (
563+
"true" if cfg.fsdp_config.fsdp_reshard_after_forward else "false"
564+
)
559565

560566

561567
def prepare_optim_env(cfg):

0 commit comments

Comments
 (0)