diff --git a/src/guidellm/backend/openai.py b/src/guidellm/backend/openai.py index b5cbc123..8a49d5ef 100644 --- a/src/guidellm/backend/openai.py +++ b/src/guidellm/backend/openai.py @@ -106,7 +106,7 @@ async def make_request( stream = await self._async_client.chat.completions.create( model=self.model, messages=[ - {"role": "system", "content": request.prompt}, + {"role": "user", "content": request.prompt}, ], stream=True, **request_args, diff --git a/tests/unit/backend/test_openai_backend.py b/tests/unit/backend/test_openai_backend.py index 396eb4cc..00b74236 100644 --- a/tests/unit/backend/test_openai_backend.py +++ b/tests/unit/backend/test_openai_backend.py @@ -171,7 +171,7 @@ async def test_openai_backend_make_request(req, request_args, mock_openai_client assert backend._async_client.create_args == () # type: ignore assert backend._async_client.create_kwargs["model"] == "mock-model" # type: ignore assert backend._async_client.create_kwargs["messages"] == [ # type: ignore - {"role": "system", "content": req.prompt} + {"role": "user", "content": req.prompt} ] assert backend._async_client.create_kwargs["stream"] # type: ignore assert backend._async_client.create_kwargs["n"] == 1 # type: ignore