Skip to content

Commit 20703d0

Browse files
committed
[https://nvbugs/5625743][fix] Cherry-pick support for n>1 with pytorch backend for openai completion and add tests.
Signed-off-by: SimengLiu-nv <[email protected]>
1 parent 2ba6ce9 commit 20703d0

File tree

1 file changed

+22
-0
lines changed

1 file changed

+22
-0
lines changed

tests/unittest/llmapi/apps/_test_openai_completions.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,28 @@ async def test_batch_completions_streaming(async_client: openai.AsyncOpenAI,
205205
assert texts[0] == texts[1]
206206

207207

208+
@pytest.mark.asyncio(loop_scope="module")
209+
@pytest.mark.parametrize("prompts", [["Hello, my name is", "What is the AI?"]])
210+
async def test_batch_completions_with_option_n_streaming(
211+
async_client: openai.AsyncOpenAI, model_name, prompts):
212+
# test beam search with streaming
213+
batch = await async_client.completions.create(
214+
model=model_name,
215+
prompt=prompts,
216+
n=3, # number of completions to generate for each prompt.
217+
max_tokens=5,
218+
temperature=0.1,
219+
stream=True,
220+
)
221+
texts = [""] * 6 # 2 prompts × 3 generations per prompt = 6 choices
222+
async for chunk in batch:
223+
assert len(chunk.choices) == 1
224+
choice = chunk.choices[0]
225+
texts[choice.index] += choice.text
226+
227+
assert [""] not in texts # Assert all the generations are not empty
228+
229+
208230
@pytest.mark.asyncio(loop_scope="module")
209231
async def test_completion_stream_options(async_client: openai.AsyncOpenAI,
210232
model_name: str):

0 commit comments

Comments
 (0)