|
4 | 4 | import asyncio
|
5 | 5 | import time
|
6 | 6 | from collections.abc import AsyncGenerator, AsyncIterator
|
| 7 | +from contextlib import AsyncExitStack |
7 | 8 | from copy import copy
|
8 | 9 | from http import HTTPStatus
|
9 | 10 | from typing import Any, Callable, Final, Optional, Union
|
@@ -226,99 +227,125 @@ async def create_responses(
|
226 | 227 |
|
227 | 228 | # Schedule the request and get the result generator.
|
228 | 229 | generators: list[AsyncGenerator[ConversationContext, None]] = []
|
229 |
| - try: |
230 |
| - tool_sessions: dict[str, Any] = {} |
231 |
| - for i, engine_prompt in enumerate(engine_prompts): |
232 |
| - default_max_tokens = self.max_model_len - len( |
233 |
| - engine_prompt["prompt_token_ids"]) |
234 |
| - sampling_params = request.to_sampling_params( |
235 |
| - default_max_tokens, self.default_sampling_params) |
236 |
| - |
237 |
| - trace_headers = (None if raw_request is None else await |
238 |
| - self._get_trace_headers(raw_request.headers)) |
239 |
| - |
240 |
| - context: ConversationContext |
241 |
| - if self.use_harmony: |
242 |
| - if request.stream: |
243 |
| - context = StreamingHarmonyContext( |
244 |
| - messages, tool_sessions) |
245 |
| - else: |
246 |
| - context = HarmonyContext(messages, tool_sessions) |
| 230 | + |
| 231 | + builtin_tool_list: list[str] = [] |
| 232 | + if self.use_harmony and self.tool_server is not None: |
| 233 | + if self.tool_server.has_tool("browser"): |
| 234 | + builtin_tool_list.append("browser") |
| 235 | + if self.tool_server.has_tool("python"): |
| 236 | + builtin_tool_list.append("python") |
| 237 | + async with AsyncExitStack() as exit_stack: |
| 238 | + try: |
| 239 | + if self.tool_server is not None: |
| 240 | + # TODO: initialize tool sessions lazily when the session |
| 241 | + # is actually used. |
| 242 | + tool_session_ctxs: dict[str, Any] = { |
| 243 | + tool_name: |
| 244 | + exit_stack.enter_async_context( |
| 245 | + self.tool_server.new_session(tool_name)) |
| 246 | + for tool_name in builtin_tool_list |
| 247 | + } |
| 248 | + tool_sessions = {} |
| 249 | + for tool_name in builtin_tool_list: |
| 250 | + tool_sessions[tool_name] = ( |
| 251 | + await tool_session_ctxs[tool_name]) |
247 | 252 | else:
|
248 |
| - context = SimpleContext() |
249 |
| - generator = self._generate_with_builtin_tools( |
250 |
| - request_id=request.request_id, |
251 |
| - request_prompt=request_prompts[i], |
252 |
| - engine_prompt=engine_prompt, |
253 |
| - sampling_params=sampling_params, |
254 |
| - context=context, |
255 |
| - lora_request=lora_request, |
256 |
| - priority=request.priority, |
257 |
| - trace_headers=trace_headers, |
| 253 | + assert len(builtin_tool_list) == 0 |
| 254 | + tool_sessions = {} |
| 255 | + for i, engine_prompt in enumerate(engine_prompts): |
| 256 | + default_max_tokens = self.max_model_len - len( |
| 257 | + engine_prompt["prompt_token_ids"]) |
| 258 | + sampling_params = request.to_sampling_params( |
| 259 | + default_max_tokens, self.default_sampling_params) |
| 260 | + |
| 261 | + trace_headers = (None if raw_request is None else await |
| 262 | + self._get_trace_headers( |
| 263 | + raw_request.headers)) |
| 264 | + |
| 265 | + context: ConversationContext |
| 266 | + if self.use_harmony: |
| 267 | + if request.stream: |
| 268 | + context = StreamingHarmonyContext( |
| 269 | + messages, tool_sessions) |
| 270 | + else: |
| 271 | + context = HarmonyContext(messages, tool_sessions) |
| 272 | + else: |
| 273 | + context = SimpleContext() |
| 274 | + generator = self._generate_with_builtin_tools( |
| 275 | + request_id=request.request_id, |
| 276 | + request_prompt=request_prompts[i], |
| 277 | + engine_prompt=engine_prompt, |
| 278 | + sampling_params=sampling_params, |
| 279 | + context=context, |
| 280 | + lora_request=lora_request, |
| 281 | + priority=request.priority, |
| 282 | + trace_headers=trace_headers, |
| 283 | + ) |
| 284 | + generators.append(generator) |
| 285 | + except ValueError as e: |
| 286 | + # TODO: Use a vllm-specific Validation Error |
| 287 | + return self.create_error_response(str(e)) |
| 288 | + |
| 289 | + assert len(generators) == 1 |
| 290 | + result_generator, = generators |
| 291 | + |
| 292 | + # Store the input messages. |
| 293 | + if request.store: |
| 294 | + self.msg_store[request.request_id] = messages |
| 295 | + |
| 296 | + if request.background: |
| 297 | + created_time = int(time.time()) |
| 298 | + response = ResponsesResponse.from_request( |
| 299 | + request, |
| 300 | + sampling_params, |
| 301 | + model_name=model_name, |
| 302 | + created_time=created_time, |
| 303 | + output=[], |
| 304 | + status="queued", |
| 305 | + usage=None, |
258 | 306 | )
|
259 |
| - generators.append(generator) |
260 |
| - except ValueError as e: |
261 |
| - # TODO: Use a vllm-specific Validation Error |
262 |
| - return self.create_error_response(str(e)) |
| 307 | + async with self.response_store_lock: |
| 308 | + self.response_store[response.id] = response |
263 | 309 |
|
264 |
| - assert len(generators) == 1 |
265 |
| - result_generator, = generators |
| 310 | + # Run the request in the background. |
| 311 | + task = asyncio.create_task( |
| 312 | + self._run_background_request( |
| 313 | + request, |
| 314 | + sampling_params, |
| 315 | + result_generator, |
| 316 | + context, |
| 317 | + model_name, |
| 318 | + tokenizer, |
| 319 | + request_metadata, |
| 320 | + created_time, |
| 321 | + ), |
| 322 | + name=f"create_{response.id}", |
| 323 | + ) |
266 | 324 |
|
267 |
| - # Store the input messages. |
268 |
| - if request.store: |
269 |
| - self.msg_store[request.request_id] = messages |
| 325 | + # For cleanup. |
| 326 | + response_id = response.id |
| 327 | + self.background_tasks[response_id] = task |
| 328 | + task.add_done_callback( |
| 329 | + lambda _: self.background_tasks.pop(response_id, None)) |
| 330 | + return response |
270 | 331 |
|
271 |
| - if request.background: |
272 |
| - created_time = int(time.time()) |
273 |
| - response = ResponsesResponse.from_request( |
274 |
| - request, |
275 |
| - sampling_params, |
276 |
| - model_name=model_name, |
277 |
| - created_time=created_time, |
278 |
| - output=[], |
279 |
| - status="queued", |
280 |
| - usage=None, |
281 |
| - ) |
282 |
| - async with self.response_store_lock: |
283 |
| - self.response_store[response.id] = response |
| 332 | + if request.stream: |
| 333 | + raise NotImplementedError( |
| 334 | + "Streaming responses are not supported") |
284 | 335 |
|
285 |
| - # Run the request in the background. |
286 |
| - task = asyncio.create_task( |
287 |
| - self._run_background_request( |
| 336 | + try: |
| 337 | + return await self.responses_full_generator( |
288 | 338 | request,
|
289 | 339 | sampling_params,
|
290 | 340 | result_generator,
|
291 | 341 | context,
|
292 | 342 | model_name,
|
293 | 343 | tokenizer,
|
294 | 344 | request_metadata,
|
295 |
| - created_time, |
296 |
| - ), |
297 |
| - name=f"create_{response.id}", |
298 |
| - ) |
299 |
| - |
300 |
| - # For cleanup. |
301 |
| - response_id = response.id |
302 |
| - self.background_tasks[response_id] = task |
303 |
| - task.add_done_callback( |
304 |
| - lambda _: self.background_tasks.pop(response_id, None)) |
305 |
| - return response |
306 |
| - |
307 |
| - if request.stream: |
308 |
| - raise NotImplementedError("Streaming responses are not supported") |
309 |
| - |
310 |
| - try: |
311 |
| - return await self.responses_full_generator( |
312 |
| - request, |
313 |
| - sampling_params, |
314 |
| - result_generator, |
315 |
| - context, |
316 |
| - model_name, |
317 |
| - tokenizer, |
318 |
| - request_metadata, |
319 |
| - ) |
320 |
| - except Exception as e: |
321 |
| - return self.create_error_response(str(e)) |
| 345 | + ) |
| 346 | + except Exception as e: |
| 347 | + return self.create_error_response(str(e)) |
| 348 | + return self.create_error_response("Should not reach here") |
322 | 349 |
|
323 | 350 | async def _make_request(
|
324 | 351 | self,
|
|
0 commit comments