@@ -308,23 +308,23 @@ class VLLMSamplingParams(BaseModel):
308308
309309
310310class VLLMChatCompletionAdditionalParams (VLLMSamplingParams ):
311- echo : bool = Field (
312- default = False ,
311+ echo : Optional [ bool ] = Field (
312+ default = None ,
313313 description = (
314314 "If true, the new message will be prepended with the last message "
315315 "if they belong to the same role."
316316 ),
317317 )
318- add_generation_prompt : bool = Field (
319- default = True ,
318+ add_generation_prompt : Optional [ bool ] = Field (
319+ default = None ,
320320 description = (
321321 "If true, the generation prompt will be added to the chat template. "
322322 "This is a parameter used by chat template in tokenizer config of the "
323323 "model."
324324 ),
325325 )
326- continue_final_message : bool = Field (
327- default = False ,
326+ continue_final_message : Optional [ bool ] = Field (
327+ default = None ,
328328 description = (
329329 "If this is set, the chat will be formatted so that the final "
330330 "message in the chat is open-ended, without any EOS tokens. The "
@@ -333,8 +333,8 @@ class VLLMChatCompletionAdditionalParams(VLLMSamplingParams):
333333 "Cannot be used at the same time as `add_generation_prompt`."
334334 ),
335335 )
336- add_special_tokens : bool = Field (
337- default = False ,
336+ add_special_tokens : Optional [ bool ] = Field (
337+ default = None ,
338338 description = (
339339 "If true, special tokens (e.g. BOS) will be added to the prompt "
340340 "on top of what is added by the chat template. "
@@ -404,8 +404,8 @@ class VLLMChatCompletionAdditionalParams(VLLMSamplingParams):
404404 "for guided json decoding."
405405 ),
406406 )
407- priority : int = Field (
408- default = 0 ,
407+ priority : Optional [ int ] = Field (
408+ default = None ,
409409 description = (
410410 "The priority of the request (lower means earlier handling; "
411411 "default: 0). Any priority other than 0 will raise an error "
0 commit comments