Skip to content

Commit 7c7edeb

Browse files
Qwen3 model support (#692)
## Summary <!--- This is a required section; please describe the main purpose of this proposed code change. ---> This pull request introduces support for the Qwen3 model within the Liger-Kernel framework. Fixes: #690 <!--- ## Details This is an optional section; is there anything specific that reviewers should be aware of? ---> ## Testing Done <!--- This is a required section; please describe how this change was tested. ---> A simple check: ``` from transformers import Qwen3ForCausalLM from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen3 apply_liger_kernel_to_qwen3() model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B") print(model) ``` gives the following output: ![Screenshot 2025-05-02 at 12 15 14 AM](https://github.com/user-attachments/assets/9d32456b-8f89-4a71-96f1-9f71ae2a2543) <!-- Replace BLANK with your device type. For example, A100-80G-PCIe Complete the following tasks before sending your PR, and replace `[ ]` with `[x]` to indicate you have done them. --> - Hardware Type: <BLANK> - [x] run `make test` to ensure correctness - [x] run `make checkstyle` to ensure code style - [x] run `make test-convergence` to ensure convergence
1 parent 64f5e1d commit 7c7edeb

File tree

10 files changed

+455
-0
lines changed

10 files changed

+455
-0
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,7 @@ loss.backward()
269269
| Qwen2, Qwen2.5, & QwQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
270270
| Qwen2-VL, & QVQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
271271
| Qwen2.5-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_5_vl` | RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
272+
| Qwen3 | `liger_kernel.transformers.apply_liger_kernel_to_qwen3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
272273
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
273274
| Granite 3.0 & 3.1 | `liger_kernel.transformers.apply_liger_kernel_to_granite` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
274275
| OLMo2 | `liger_kernel.transformers.apply_liger_kernel_to_olmo2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |

src/liger_kernel/transformers/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2 # noqa: F401
4040
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_5_vl # noqa: F401
4141
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen2_vl # noqa: F401
42+
from liger_kernel.transformers.monkey_patch import apply_liger_kernel_to_qwen3 # noqa: F401
4243

4344

4445
# Check if 'transformers' is installed
@@ -93,6 +94,7 @@ def __getattr__(name: str):
9394
"apply_liger_kernel_to_qwen2",
9495
"apply_liger_kernel_to_qwen2_5_vl",
9596
"apply_liger_kernel_to_qwen2_vl",
97+
"apply_liger_kernel_to_qwen3",
9698
}
9799

98100
if name in monkey_patch_symbols:
@@ -144,5 +146,6 @@ def __getattr__(name: str):
144146
"apply_liger_kernel_to_qwen2",
145147
"apply_liger_kernel_to_qwen2_5_vl",
146148
"apply_liger_kernel_to_qwen2_vl",
149+
"apply_liger_kernel_to_qwen3",
147150
]
148151
)
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
from typing import List
2+
from typing import Optional
3+
from typing import Union
4+
5+
import torch
6+
7+
from transformers.modeling_outputs import CausalLMOutputWithPast
8+
from transformers.models.qwen3.modeling_qwen3 import _CONFIG_FOR_DOC
9+
from transformers.models.qwen3.modeling_qwen3 import QWEN3_INPUTS_DOCSTRING
10+
from transformers.utils import add_start_docstrings_to_model_forward
11+
from transformers.utils import replace_return_docstrings
12+
13+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
14+
15+
16+
@add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING)
17+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
18+
def lce_forward(
19+
self,
20+
input_ids: Optional[torch.LongTensor] = None,
21+
attention_mask: Optional[torch.Tensor] = None,
22+
position_ids: Optional[torch.LongTensor] = None,
23+
past_key_values: Optional[List[torch.FloatTensor]] = None,
24+
inputs_embeds: Optional[torch.FloatTensor] = None,
25+
labels: Optional[torch.LongTensor] = None,
26+
use_cache: Optional[bool] = None,
27+
output_attentions: Optional[bool] = None,
28+
output_hidden_states: Optional[bool] = None,
29+
cache_position: Optional[torch.LongTensor] = None,
30+
logits_to_keep: Union[int, torch.Tensor] = 0,
31+
**kwargs,
32+
) -> CausalLMOutputWithPast:
33+
r"""
34+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
35+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
36+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
37+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
38+
39+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
40+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
41+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
42+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
43+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
44+
This is useful when using packed tensor format (single dimension for batch and sequence length).
45+
46+
Returns:
47+
48+
Example:
49+
50+
```python
51+
>>> from transformers import AutoTokenizer, Qwen3ForCausalLM
52+
53+
>>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
54+
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
55+
56+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
57+
>>> inputs = tokenizer(prompt, return_tensors="pt")
58+
59+
>>> # Generate
60+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
61+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
62+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
63+
```"""
64+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
65+
output_hidden_states = (
66+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
67+
)
68+
69+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
70+
outputs = self.model(
71+
input_ids=input_ids,
72+
attention_mask=attention_mask,
73+
position_ids=position_ids,
74+
past_key_values=past_key_values,
75+
inputs_embeds=inputs_embeds,
76+
use_cache=use_cache,
77+
output_attentions=output_attentions,
78+
output_hidden_states=output_hidden_states,
79+
cache_position=cache_position,
80+
**kwargs,
81+
)
82+
83+
hidden_states = outputs[0]
84+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
85+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
86+
kept_hidden_states = hidden_states[:, slice_indices, :]
87+
88+
shift_labels = kwargs.pop("shift_labels", None)
89+
logits = None
90+
loss = None
91+
# if in training mode, don't materialize logits
92+
if self.training and (labels is not None or shift_labels is not None):
93+
loss = LigerForCausalLMLoss(
94+
hidden_states=kept_hidden_states,
95+
lm_head_weight=self.lm_head.weight,
96+
labels=labels,
97+
shift_labels=shift_labels,
98+
hidden_size=self.config.hidden_size,
99+
**kwargs,
100+
)
101+
102+
else: # if in inference mode materialize logits
103+
logits = self.lm_head(kept_hidden_states)
104+
if labels is not None:
105+
loss = self.loss_function(
106+
logits=logits,
107+
labels=labels,
108+
vocab_size=self.config.vocab_size,
109+
**kwargs,
110+
)
111+
112+
return CausalLMOutputWithPast(
113+
loss=loss,
114+
logits=logits,
115+
past_key_values=outputs.past_key_values,
116+
hidden_states=outputs.hidden_states,
117+
attentions=outputs.attentions,
118+
)

src/liger_kernel/transformers/monkey_patch.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,6 +1048,60 @@ def apply_liger_kernel_to_qwen2(
10481048
print("Applied Liger kernels to Qwen2")
10491049

10501050

1051+
def apply_liger_kernel_to_qwen3(
1052+
rope: bool = True,
1053+
cross_entropy: bool = False,
1054+
fused_linear_cross_entropy: bool = True,
1055+
rms_norm: bool = True,
1056+
swiglu: bool = True,
1057+
model: PreTrainedModel = None,
1058+
) -> None:
1059+
"""
1060+
Apply Liger kernels to replace original implementation in HuggingFace Qwen3 models.
1061+
"""
1062+
assert not (cross_entropy and fused_linear_cross_entropy), (
1063+
"cross_entropy and fused_linear_cross_entropy cannot both be True."
1064+
)
1065+
1066+
from transformers.models.qwen3 import modeling_qwen3
1067+
from transformers.models.qwen3.modeling_qwen3 import Qwen3Model
1068+
1069+
from liger_kernel.transformers.model.qwen3 import lce_forward as qwen3_lce_forward
1070+
1071+
if rope:
1072+
modeling_qwen3.apply_rotary_pos_emb = liger_rotary_pos_emb
1073+
1074+
if rms_norm:
1075+
modeling_qwen3.Qwen3RMSNorm = LigerRMSNorm
1076+
1077+
if cross_entropy:
1078+
from transformers.loss.loss_utils import nn
1079+
1080+
nn.functional.cross_entropy = liger_cross_entropy
1081+
1082+
if fused_linear_cross_entropy:
1083+
modeling_qwen3.Qwen3ForCausalLM.forward = qwen3_lce_forward
1084+
1085+
if swiglu:
1086+
modeling_qwen3.Qwen3MLP = LigerSwiGLUMLP
1087+
1088+
if model is not None:
1089+
# The model instance already exists, so we need to additionally patch the
1090+
# instance variables that reference already-instantiated modules
1091+
1092+
# get the base model from the model instance
1093+
base_model: Qwen3Model = getattr(model, model.base_model_prefix, model)
1094+
1095+
if rms_norm:
1096+
_patch_rms_norm_module(base_model.norm)
1097+
for decoder_layer in base_model.layers:
1098+
if swiglu:
1099+
_patch_swiglu_module(decoder_layer.mlp, LigerSwiGLUMLP)
1100+
if rms_norm:
1101+
_patch_rms_norm_module(decoder_layer.input_layernorm)
1102+
_patch_rms_norm_module(decoder_layer.post_attention_layernorm)
1103+
1104+
10511105
def apply_liger_kernel_to_qwen2_vl(
10521106
rope: bool = True,
10531107
cross_entropy: bool = False,
@@ -1400,6 +1454,7 @@ def apply_liger_kernel_to_glm4(
14001454
"mixtral": apply_liger_kernel_to_mixtral,
14011455
"olmo2": apply_liger_kernel_to_olmo2,
14021456
"qwen2": apply_liger_kernel_to_qwen2,
1457+
"qwen3": apply_liger_kernel_to_qwen3,
14031458
"qwen2_vl": apply_liger_kernel_to_qwen2_vl,
14041459
"qwen2_5_vl": apply_liger_kernel_to_qwen2_5_vl,
14051460
"phi3": apply_liger_kernel_to_phi3,

test/convergence/bf16/test_mini_models.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from liger_kernel.transformers import apply_liger_kernel_to_qwen2
3434
from liger_kernel.transformers import apply_liger_kernel_to_qwen2_5_vl
3535
from liger_kernel.transformers import apply_liger_kernel_to_qwen2_vl
36+
from liger_kernel.transformers import apply_liger_kernel_to_qwen3
3637
from test.utils import DEFAULT_DATASET_PATH
3738
from test.utils import MiniModelConfig
3839
from test.utils import assert_verbose_allclose
@@ -51,6 +52,7 @@
5152
from test.utils import revert_liger_kernel_to_qwen2
5253
from test.utils import revert_liger_kernel_to_qwen2_5_vl
5354
from test.utils import revert_liger_kernel_to_qwen2_vl
55+
from test.utils import revert_liger_kernel_to_qwen3
5456
from test.utils import set_seed
5557
from test.utils import simple_collate_fn
5658
from test.utils import supports_bfloat16
@@ -82,6 +84,14 @@
8284
except ImportError:
8385
QWEN2_5_VL_AVAILABLE = False
8486

87+
try:
88+
from transformers.models.qwen3.configuration_qwen3 import Qwen3Config
89+
from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM
90+
91+
QWEN3_AVAILABLE = True
92+
except ImportError:
93+
QWEN3_AVAILABLE = False
94+
8595
try:
8696
from transformers.models.granite import GraniteConfig
8797
from transformers.models.granite import GraniteForCausalLM
@@ -358,6 +368,33 @@
358368
),
359369
}
360370

371+
if QWEN3_AVAILABLE:
372+
MINI_MODEL_SETUPS["mini_qwen3"] = MiniModelConfig(
373+
liger_kernel_patch_func=apply_liger_kernel_to_qwen3,
374+
liger_kernel_patch_revert_func=revert_liger_kernel_to_qwen3,
375+
model_class=Qwen3ForCausalLM,
376+
mini_model_config=Qwen3Config(
377+
attention_dropout=0.0,
378+
bos_token_id=1,
379+
eos_token_id=2,
380+
hidden_act="silu",
381+
hidden_size=896,
382+
initializer_range=0.02,
383+
intermediate_size=4864,
384+
max_position_embeddings=32768,
385+
num_attention_heads=8,
386+
num_hidden_layers=4,
387+
num_key_value_heads=2,
388+
rms_norm_eps=1e-6,
389+
rope_theta=1000000.0,
390+
sliding_window=131072,
391+
tie_word_embeddings=True,
392+
use_cache=True,
393+
vocab_size=32000,
394+
attn_implementation="sdpa",
395+
),
396+
)
397+
361398
if GEMMA3_AVAILABLE:
362399
MINI_MODEL_SETUPS["mini_gemma3_text"] = MiniModelConfig(
363400
liger_kernel_patch_func=apply_liger_kernel_to_gemma3_text,
@@ -851,6 +888,25 @@ def run_mini_model(
851888
1e-2,
852889
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
853890
),
891+
pytest.param(
892+
"mini_qwen3",
893+
32,
894+
1e-4,
895+
torch.bfloat16,
896+
1e-3,
897+
1e-2,
898+
1e-1,
899+
1e-2,
900+
1e-2,
901+
1e-2,
902+
marks=[
903+
pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
904+
pytest.mark.skipif(
905+
not QWEN3_AVAILABLE,
906+
reason="Qwen3 not available in this version of transformers",
907+
),
908+
],
909+
),
854910
pytest.param(
855911
"mini_qwen2_vl",
856912
32,

0 commit comments

Comments
 (0)