Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 97950a7

Browse files
committed
Update typos
1 parent 4e60a83 commit 97950a7

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

torchchat/usages/openai_api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ def _gen_model_inputs_from_openai_completion_request(
334334

335335
assert prompt is not None, "Text prompt must be specified in the request"
336336

337-
return self._gen_model_inputs(prompt, images, completed_request.max_tokens)
337+
return self._gen_model_input(prompt, images, completion_request.max_tokens)
338338

339339
def chunked_completion(self, completion_request: CompletionRequest):
340340
"""Handle a chat completion request and yield a chunked response.
@@ -365,7 +365,7 @@ def chunked_completion(self, completion_request: CompletionRequest):
365365
device_sync(device=self.builder_args.device)
366366

367367
encoded, batch = self._gen_model_inputs_from_openai_completion_request(
368-
completed_request
368+
completion_request
369369
)
370370

371371
idx = 0

0 commit comments

Comments
 (0)