Skip to content

Commit 70af49b

Browse files
fix json format issue for "transformerpipeline" (#63)
1 parent 63778b7 commit 70af49b

File tree

2 files changed

+1
-3
lines changed

2 files changed

+1
-3
lines changed

llmserve/backend/llm/pipelines/default_pipeline.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,6 @@ def preprocess(self, prompts: List[str], **generate_kwargs):
5757
try:
5858
prompt_text_bak = prompt_text
5959
logger.info(f"call json.loads")
60-
# for p in prompt_text:
61-
# logger.info(f"{p}")
6260
prompt_text = [json.loads(prompt, strict=False) for prompt in prompt_text]
6361
logger.info(f"call tokenizer.apply_chat_template")
6462
prompt_text = [self.tokenizer.apply_chat_template(prompt_obj, tokenize=False, add_generation_prompt=True) for prompt_obj in prompt_text]

llmserve/backend/llm/pipelines/default_transformers_pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def preprocess(self, prompts: List[str], **generate_kwargs):
144144
if isinstance(self.pipeline, transformers.pipelines.text_generation.TextGenerationPipeline):
145145
try:
146146
prompt_text_bak = prompt_text
147-
prompt_text = [json.loads(prompt) for prompt in prompt_text]
147+
prompt_text = [json.loads(prompt, strict=False) for prompt in prompt_text]
148148
prompt_text = [self.tokenizer.apply_chat_template(prompt_obj, tokenize=False, add_generation_prompt=True) for prompt_obj in prompt_text]
149149
except:
150150
logger.info("Seems no chat template from user or the model donot has a 'chat template'")

0 commit comments

Comments
 (0)