Skip to content

Commit 93ba573

Browse files
authored
fix: qwen3_vl attention config (#3216)
1 parent aa1240a commit 93ba573

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

src/axolotl/monkeypatch/lora_kernels.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,11 @@ def get_attention_cls_from_config(cfg: DictDefault) -> Type[nn.Module]:
134134

135135
return Qwen2Attention
136136

137+
if model_type == "qwen3_vl":
138+
from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLTextAttention
139+
140+
return Qwen3VLTextAttention
141+
137142
if model_type == "mllama":
138143
from transformers.models.mllama.modeling_mllama import MllamaTextSelfAttention
139144

0 commit comments

Comments
 (0)