@@ -111,27 +111,23 @@ async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
111111 guided_decoding = None
112112 guided_json = None
113113 # ---- 支持 response_format,但是官方对BPE分词器的支持仍然太差 ----
114- # if response_format is not None:
115- # if response_format["type"] == "json_object":
116- # guided_json_object = True
117- # if response_format["type"] == "json_schema":
118- # json_schema = response_format["json_schema"]
119- # assert json_schema is not None
120- # guided_json = json_schema["schema"]
114+ if response_format is not None :
115+ if response_format ["type" ] == "json_object" :
116+ guided_json_object = True
117+ if response_format ["type" ] == "json_schema" :
118+ json_schema = response_format ["json_schema" ]
119+ assert json_schema is not None
120+ guided_json = json_schema ["schema" ]
121121
122- # guided_decoding = GuidedDecodingParams.from_optional(
123- # json=guided_json,
124- # regex=None,
125- # choice=None,
126- # grammar=None,
127- # json_object=guided_json_object,
128- # backend=(
129- # self.engine_args.guided_decoding_backend
130- # if self.engine_args.guided_decoding_backend
131- # else "lm-format-enforcer"
132- # ),
133- # whitespace_pattern=None,
134- # )
122+ guided_decoding = GuidedDecodingParams .from_optional (
123+ json = guided_json ,
124+ regex = None ,
125+ choice = None ,
126+ grammar = None ,
127+ json_object = guided_json_object ,
128+ backend = "lm-format-enforcer" ,
129+ whitespace_pattern = None ,
130+ )
135131 # ---- 支持 response_format,但是官方对BPE分词器的支持仍然太差 ----
136132 sampling = SamplingParams (
137133 top_p = top_p ,
0 commit comments