@@ -120,10 +120,10 @@ class OpenAIModelSettings(ModelSettings, total=False):
120120 See [OpenAI's safety best practices](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids) for more details.
121121 """
122122
123- openai_service_tier : Literal ['auto' , 'default' , 'flex' ]
123+ openai_service_tier : Literal ['auto' , 'default' , 'flex' , 'priority' ]
124124 """The service tier to use for the model request.
125125
126- Currently supported values are `auto`, `default`, and `flex `.
126+ Currently supported values are `auto`, `default`, `flex`, and `priority `.
127127 For more information, see [OpenAI's service tiers documentation](https://platform.openai.com/docs/api-reference/chat/object#chat/object-service_tier).
128128 """
129129
@@ -803,6 +803,7 @@ async def _responses_create(
803803 top_p = sampling_settings .get ('top_p' , NOT_GIVEN ),
804804 truncation = model_settings .get ('openai_truncation' , NOT_GIVEN ),
805805 timeout = model_settings .get ('timeout' , NOT_GIVEN ),
806+ service_tier = model_settings .get ('openai_service_tier' , NOT_GIVEN ),
806807 reasoning = reasoning ,
807808 user = model_settings .get ('openai_user' , NOT_GIVEN ),
808809 text = text or NOT_GIVEN ,
0 commit comments