Skip to content

Commit 9579f20

Browse files
committed
update for new chat_template (#4672)
* update for new chat_template * adapt to the old chat_template * fix
1 parent eaa32c1 commit 9579f20

File tree

2 files changed

+25
-6
lines changed

2 files changed

+25
-6
lines changed

paddlex/inference/models/common/tokenizer/tokenizer_utils.py

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -713,11 +713,27 @@ def _apply_chat_template(
713713
"apply_chat_template do not support applying batch conversations, "
714714
"so you should apply the conversation one by one."
715715
)
716-
query = self.chat_template.render(
717-
messages=conversations,
718-
**self.special_tokens_map,
719-
add_generation_prompt=add_generation_prompt,
720-
)
716+
try:
717+
query = self.chat_template.render(
718+
messages=conversations,
719+
**self.special_tokens_map,
720+
add_generation_prompt=add_generation_prompt,
721+
)
722+
except TypeError:
723+
for i in range(len(conversations)):
724+
content = conversations[i]["content"]
725+
if isinstance(content, list):
726+
new_content = ""
727+
for part in content:
728+
if part.get("type") == "text":
729+
new_content = part["text"]
730+
break
731+
conversations[i]["content"] = new_content
732+
query = self.chat_template.render(
733+
messages=conversations,
734+
**self.special_tokens_map,
735+
add_generation_prompt=add_generation_prompt,
736+
)
721737
return query
722738

723739
def encode_chat_inputs(

paddlex/inference/models/doc_vlm/processors/paddleocr_vl/_paddleocr_vl.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,10 @@ def preprocess(
8282
messages = [
8383
{
8484
"role": "user",
85-
"content": input_dict["query"],
85+
"content": [
86+
{"type": "image", "image": "placeholder"}, # placeholder
87+
{"type": "text", "text": input_dict["query"]},
88+
],
8689
}
8790
]
8891
prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)

0 commit comments

Comments
 (0)