diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 2b0885ab4..facbf8319 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -40,6 +40,9 @@ class ModelSettings: max_tokens: int | None = None """The maximum number of output tokens to generate.""" + seed: int | None = None + """The random seed to use when calling the model.""" + store: bool | None = None """Whether to store the generated model response for later retrieval. Defaults to True if not provided.""" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index de7b1ae4f..a21d0fec3 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -536,6 +536,7 @@ async def _fetch_response( stream=stream, stream_options={"include_usage": True} if stream else NOT_GIVEN, store=store, + seed=self._non_null_or_not_given(model_settings.seed), extra_headers=_HEADERS, ) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 17803fdac..f504c2164 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -246,8 +246,7 @@ async def _fetch_response( stream=stream, extra_headers=_HEADERS, text=response_format, - store=self._non_null_or_not_given(model_settings.store), - ) + store=self._non_null_or_not_given(model_settings.store)) def _get_client(self) -> AsyncOpenAI: if self._client is None: