Skip to content

Commit eefcfee

Browse files
committed
qwen3 asr working
1 parent 172865e commit eefcfee

File tree

2 files changed

+12
-0
lines changed

2 files changed

+12
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5057,6 +5057,14 @@ def set_vocab(self):
50575057
super().set_vocab()
50585058
# fix chat template, use correct chatml format
50595059
self.gguf_writer.add_chat_template("{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}")
5060+
# correct BOS/EOS tokens
5061+
with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
5062+
tokenizer_config = json.load(f)
5063+
added_tokens = tokenizer_config.get("added_tokens_decoder", {})
5064+
for token_id, data in added_tokens.items():
5065+
if data.get("content") == "<|im_end|>":
5066+
self.gguf_writer.add_bos_token_id(int(token_id))
5067+
self.gguf_writer.add_eos_token_id(int(token_id))
50605068

50615069
def modify_tensors(self, data_torch, name, bid):
50625070
# qwen3-omni

tools/mtmd/mtmd.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -982,6 +982,10 @@ bool mtmd_decode_use_non_causal(mtmd_context * ctx) {
982982
}
983983

984984
bool mtmd_decode_use_mrope(mtmd_context * ctx) {
985+
if (ctx->ctx_v == nullptr && ctx->proj_type_a() == PROJECTOR_TYPE_QWEN3A) {
986+
// qwen3-asr
987+
return true;
988+
}
985989
switch (ctx->proj_type_v()) {
986990
case PROJECTOR_TYPE_QWEN2VL:
987991
case PROJECTOR_TYPE_QWEN25VL:

0 commit comments

Comments
 (0)