Skip to content

Commit 8bbad21

Browse files
authored
llama4 support (axolotl-ai-cloud#2493)
* llama4 support * add xet support [skip ci] * be flexible on transformers version and skip test on version * don't use deepspeed for the fix_untrained_tokens test * reordering to trigger torch 2.6.0 tests first * slightly smaller train set * use 4.51.0 for now * remove stray print, add llama4 chat template to schema, bump peft to 0.15.1 * patches to make llama4 performant * add preliminary fp8 support
1 parent 5f4af36 commit 8bbad21

File tree

17 files changed

+407
-32
lines changed

17 files changed

+407
-32
lines changed

.github/workflows/multi-gpu-e2e.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,21 +27,21 @@ jobs:
2727
- cuda: 124
2828
cuda_version: 12.4.1
2929
python_version: "3.11"
30-
pytorch: 2.4.1
31-
axolotl_extras: # no vllm support for 2.4.1
30+
pytorch: 2.6.0
31+
axolotl_extras: vllm
3232
num_gpus: 2
3333
nightly_build: "true"
3434
- cuda: 124
3535
cuda_version: 12.4.1
3636
python_version: "3.11"
37-
pytorch: 2.5.1
38-
axolotl_extras: vllm
37+
pytorch: 2.4.1
38+
axolotl_extras: # no vllm support for 2.4.1
3939
num_gpus: 2
4040
nightly_build: "true"
4141
- cuda: 124
4242
cuda_version: 12.4.1
4343
python_version: "3.11"
44-
pytorch: 2.6.0
44+
pytorch: 2.5.1
4545
axolotl_extras: vllm
4646
num_gpus: 2
4747
nightly_build: "true"

.github/workflows/tests.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ jobs:
211211
- cuda: 124
212212
cuda_version: 12.4.1
213213
python_version: "3.11"
214-
pytorch: 2.5.1
214+
pytorch: 2.6.0
215215
num_gpus: 1
216216
axolotl_extras: vllm
217217
steps:
@@ -258,7 +258,7 @@ jobs:
258258
- cuda: 124
259259
cuda_version: 12.4.1
260260
python_version: "3.11"
261-
pytorch: 2.6.0
261+
pytorch: 2.5.1
262262
num_gpus: 1
263263
axolotl_extras: vllm
264264
steps:

examples/llama4/scout-lora.yaml

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
base_model: meta-llama/Llama-4-Scout-17B-16E
2+
model_type: Llama4ForConditionalGeneration
3+
# Automatically upload checkpoint and final model to HF
4+
# hub_model_id: username/custom_model_name
5+
6+
strict: false
7+
8+
# torch_compile: true
9+
10+
adapter: lora
11+
lora_r: 32
12+
lora_alpha: 64
13+
lora_target_modules:
14+
- self_attn.q_proj
15+
- self_attn.k_proj
16+
- self_attn.v_proj
17+
- self_attn.o_proj
18+
lora_modules_to_save:
19+
- lm_head
20+
- embed_tokens
21+
22+
chat_template: llama4
23+
datasets:
24+
- path: mlabonne/FineTome-100k
25+
type: chat_template
26+
split: train[:20%]
27+
field_messages: conversations
28+
message_property_mappings:
29+
role: from
30+
content: value
31+
32+
dataset_prepared_path: last_run_prepared
33+
val_set_size: 0.0
34+
output_dir: ./outputs/out
35+
36+
sequence_len: 4096
37+
sample_packing: true
38+
pad_to_sequence_len: true
39+
40+
gradient_accumulation_steps: 1
41+
micro_batch_size: 1
42+
num_epochs: 1
43+
optimizer: adamw_torch_8bit
44+
lr_scheduler: cosine
45+
learning_rate: 2e-5
46+
47+
bf16: true
48+
tf32: true
49+
50+
# gradient_checkpointing: true
51+
# gradient_checkpointing_kwargs:
52+
# use_reentrant: false
53+
logging_steps: 1
54+
flash_attention: true
55+
56+
warmup_steps: 100
57+
evals_per_epoch: 2
58+
saves_per_epoch: 1
59+
weight_decay: 0.0
60+
fsdp:
61+
- auto_wrap
62+
- full_shard
63+
fsdp_config:
64+
fsdp_version: 2
65+
fsdp_offload_params: false
66+
fsdp_cpu_ram_efficient_loading: true
67+
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
68+
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
69+
fsdp_state_dict_type: SHARDED_STATE_DICT
70+
fsdp_sharding_strategy: FULL_SHARD
71+
fsdp_reshard_after_forward: true
72+
fsdp_activation_checkpointing: true
73+
special_tokens:
74+
pad_token: <|finetune_right_pad_id|>
75+
eos_token: <|eot|>

requirements.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,19 @@ triton>=3.0.0
66
mamba-ssm==1.2.0.post1
77
xformers>=0.0.23.post1
88
autoawq==0.2.7.post3
9-
liger-kernel==0.5.5
9+
liger-kernel==0.5.6
1010
# END section
1111

1212
packaging==23.2
1313

14-
peft==0.15.0
14+
peft==0.15.1
1515
transformers==4.51.0
1616
tokenizers>=0.21.1
1717
accelerate==1.6.0
1818
datasets==3.5.0
1919
deepspeed>=0.15.4
2020
trl==0.16.1
21+
hf_xet==1.0.0
2122

2223
optimum==1.16.2
2324
hf_transfer

src/axolotl/core/trainers/base.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -562,6 +562,19 @@ def create_accelerator_and_postprocess(self):
562562

563563
return res
564564

565+
def additional_accelerator_args(
566+
self, fp8=None, **kwargs
567+
): # pylint: disable=unused-argument
568+
ret_kwargs = {}
569+
if fp8:
570+
from accelerate.utils import AORecipeKwargs
571+
572+
ret_kwargs["mixed_precision"] = "fp8"
573+
ret_kwargs["kwargs_handlers"] = [AORecipeKwargs()]
574+
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
575+
576+
return ret_kwargs
577+
565578
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
566579
"""
567580
Log `logs` on the various objects watching training, including stored metrics.

src/axolotl/integrations/liger/__init__.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,5 +173,17 @@ def _liger_rms_norm_wrapper(dim, **kwargs):
173173
raise NotImplementedError(
174174
"Fused linear cross entropy is not yet supported for Gemma3."
175175
)
176+
elif cfg.model_config_type == "llama4":
177+
from axolotl.integrations.liger.models.llama4 import (
178+
apply_liger_kernel_to_llama4,
179+
)
180+
181+
apply_liger_kernel_to_llama4(
182+
cross_entropy=cfg.liger_cross_entropy,
183+
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
184+
glu_activation=cfg.liger_glu_activation,
185+
rms_norm=cfg.liger_rms_norm,
186+
layer_norm=cfg.liger_layer_norm,
187+
)
176188
elif cfg.model_config_type in ["deepseek_v3"]:
177189
raise ValueError(f"Unsupported model config type: {cfg.model_config_type}")
Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
"""
2+
Liger FLCE for llama4
3+
"""
4+
5+
import sys
6+
from typing import List, Optional, Tuple, Union
7+
8+
import torch
9+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
10+
from transformers.modeling_outputs import CausalLMOutputWithPast
11+
12+
13+
def lce_forward(
14+
self,
15+
input_ids: torch.LongTensor = None,
16+
attention_mask: Optional[torch.Tensor] = None,
17+
position_ids: Optional[torch.LongTensor] = None,
18+
past_key_values: Optional[
19+
Union["Cache", List[torch.FloatTensor]] # noqa: F821
20+
] = None,
21+
inputs_embeds: Optional[torch.FloatTensor] = None,
22+
labels: Optional[torch.LongTensor] = None,
23+
use_cache: Optional[bool] = None,
24+
output_attentions: Optional[bool] = None,
25+
output_hidden_states: Optional[bool] = None,
26+
return_dict: Optional[bool] = None,
27+
cache_position: Optional[torch.LongTensor] = None,
28+
logits_to_keep: Union[int, torch.Tensor] = 0,
29+
**loss_kwargs,
30+
) -> Union[Tuple, CausalLMOutputWithPast]:
31+
r"""
32+
Args:
33+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
34+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
35+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
36+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
37+
38+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
39+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
40+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
41+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
42+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
43+
This is useful when using packed tensor format (single dimension for batch and sequence length).
44+
45+
Returns:
46+
"""
47+
48+
# pylint: disable=duplicate-code
49+
output_attentions = (
50+
output_attentions
51+
if output_attentions is not None
52+
else self.config.output_attentions
53+
)
54+
output_hidden_states = (
55+
output_hidden_states
56+
if output_hidden_states is not None
57+
else self.config.output_hidden_states
58+
)
59+
return_dict = (
60+
return_dict if return_dict is not None else self.config.use_return_dict
61+
)
62+
63+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
64+
outputs = self.model(
65+
input_ids=input_ids,
66+
attention_mask=attention_mask,
67+
position_ids=position_ids,
68+
past_key_values=past_key_values,
69+
inputs_embeds=inputs_embeds,
70+
use_cache=use_cache,
71+
output_attentions=output_attentions,
72+
output_hidden_states=output_hidden_states,
73+
return_dict=return_dict,
74+
cache_position=cache_position,
75+
)
76+
77+
hidden_states = outputs[0]
78+
79+
if hasattr(self.config, "pretraining_tp") and self.config.pretraining_tp > 1:
80+
raise Exception( # pylint: disable=broad-exception-raised
81+
"Liger Kernel does not support pretraining_tp!!"
82+
)
83+
84+
logits = None
85+
loss = None
86+
# if in training mode, don't materialize logits
87+
if self.training and (labels is not None):
88+
loss = LigerForCausalLMLoss(
89+
hidden_states=hidden_states,
90+
lm_head_weight=self.lm_head.weight,
91+
labels=labels,
92+
hidden_size=self.config.hidden_size,
93+
**loss_kwargs,
94+
)
95+
96+
else: # if in inference mode materialize logits
97+
slice_indices = (
98+
slice(-logits_to_keep, None)
99+
if isinstance(logits_to_keep, int)
100+
else logits_to_keep
101+
)
102+
logits = self.lm_head(hidden_states[:, slice_indices, :])
103+
if labels is not None:
104+
loss = self.loss_function(
105+
logits=logits,
106+
labels=labels,
107+
vocab_size=self.config.vocab_size,
108+
**loss_kwargs,
109+
)
110+
111+
if not return_dict:
112+
output = (logits,) + outputs[1:]
113+
return (loss,) + output if loss is not None else output
114+
115+
return CausalLMOutputWithPast(
116+
loss=loss,
117+
logits=logits,
118+
past_key_values=outputs.past_key_values,
119+
hidden_states=outputs.hidden_states,
120+
attentions=outputs.attentions,
121+
)
122+
123+
124+
def apply_liger_kernel_to_llama4(
125+
cross_entropy: bool = False,
126+
fused_linear_cross_entropy: bool = False,
127+
rms_norm: bool = False,
128+
glu_activation: bool = False,
129+
layer_norm: bool = False,
130+
**kwargs, # pylint: disable=unused-argument
131+
) -> None:
132+
"""
133+
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
134+
135+
Args:
136+
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
137+
fused_linear_cross_entropy (bool):
138+
Whether to apply Liger's fused linear cross entropy loss. Default is False.
139+
`cross_entropy` and `fused_linear_cross_entropy` cannot both be False.
140+
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
141+
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is False.
142+
glu_activation (bool): Whether to apply Liger's SwiGLU MLP. Default is False.
143+
layer_norm (bool): Whether to apply Liger's LayerNorm. Default is False.
144+
"""
145+
146+
import transformers.models.llama4.modeling_llama4 # noqa: F401 # pylint: disable=unused-import
147+
from liger_kernel.transformers.functional import liger_cross_entropy
148+
from liger_kernel.transformers.layer_norm import LigerLayerNorm
149+
from liger_kernel.transformers.rms_norm import LigerRMSNorm
150+
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
151+
152+
assert not (
153+
cross_entropy and fused_linear_cross_entropy
154+
), "cross_entropy and fused_linear_cross_entropy cannot both be True."
155+
156+
modeling_llama4 = sys.modules["transformers.models.llama4.modeling_llama4"]
157+
158+
if rms_norm:
159+
modeling_llama4.Llama4TextRMSNorm = LigerRMSNorm
160+
if glu_activation:
161+
modeling_llama4.Llama4TextMLP = LigerSwiGLUMLP
162+
if layer_norm:
163+
modeling_llama4.nn.LayerNorm = LigerLayerNorm
164+
165+
if cross_entropy:
166+
from transformers.loss.loss_utils import nn
167+
168+
nn.functional.cross_entropy = liger_cross_entropy
169+
170+
if fused_linear_cross_entropy:
171+
modeling_llama4.Llama4ForCausalLM.forward = lce_forward

src/axolotl/monkeypatch/attention/flex_attn.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,6 @@ def mask_mod(batch_idx, head_idx, q_idx, kv_idx):
162162
for n in tuple(sys.modules):
163163
if ".modeling_" in n and "llama4" not in n:
164164
if hasattr(sys.modules[n], "make_flex_block_causal_mask"):
165-
print(n)
166165
sys.modules[n].make_flex_block_causal_mask = (
167166
patched_make_flex_block_causal_mask
168167
)

src/axolotl/monkeypatch/multipack.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
SUPPORTED_MULTIPACK_MODEL_TYPES = [
1414
"mllama_text_model",
1515
"llama",
16+
"llama4",
1617
"mistral",
1718
"mixtral",
1819
"qwen2",

0 commit comments

Comments
 (0)