@@ -120,10 +120,10 @@ class OpenAIModelSettings(ModelSettings, total=False):
120
120
See [OpenAI's safety best practices](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids) for more details.
121
121
"""
122
122
123
- openai_service_tier : Literal ['auto' , 'default' , 'flex' ]
123
+ openai_service_tier : Literal ['auto' , 'default' , 'flex' , 'priority' ]
124
124
"""The service tier to use for the model request.
125
125
126
- Currently supported values are `auto`, `default`, and `flex `.
126
+ Currently supported values are `auto`, `default`, `flex`, and `priority `.
127
127
For more information, see [OpenAI's service tiers documentation](https://platform.openai.com/docs/api-reference/chat/object#chat/object-service_tier).
128
128
"""
129
129
@@ -803,6 +803,7 @@ async def _responses_create(
803
803
top_p = sampling_settings .get ('top_p' , NOT_GIVEN ),
804
804
truncation = model_settings .get ('openai_truncation' , NOT_GIVEN ),
805
805
timeout = model_settings .get ('timeout' , NOT_GIVEN ),
806
+ service_tier = model_settings .get ('openai_service_tier' , NOT_GIVEN ),
806
807
reasoning = reasoning ,
807
808
user = model_settings .get ('openai_user' , NOT_GIVEN ),
808
809
text = text or NOT_GIVEN ,
0 commit comments