@@ -2082,6 +2082,13 @@ type BetaAssistantNewParams struct {
2082
2082
Metadata param.Field [shared.MetadataParam ] `json:"metadata"`
2083
2083
// The name of the assistant. The maximum length is 256 characters.
2084
2084
Name param.Field [string ] `json:"name"`
2085
+ // **o1 and o3-mini models only**
2086
+ //
2087
+ // Constrains effort on reasoning for
2088
+ // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2089
+ // supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
2090
+ // result in faster responses and fewer tokens used on reasoning in a response.
2091
+ ReasoningEffort param.Field [BetaAssistantNewParamsReasoningEffort ] `json:"reasoning_effort"`
2085
2092
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
2086
2093
// make the output more random, while lower values like 0.2 will make it more
2087
2094
// focused and deterministic.
@@ -2107,6 +2114,28 @@ func (r BetaAssistantNewParams) MarshalJSON() (data []byte, err error) {
2107
2114
return apijson .MarshalRoot (r )
2108
2115
}
2109
2116
2117
+ // **o1 and o3-mini models only**
2118
+ //
2119
+ // Constrains effort on reasoning for
2120
+ // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2121
+ // supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
2122
+ // result in faster responses and fewer tokens used on reasoning in a response.
2123
+ type BetaAssistantNewParamsReasoningEffort string
2124
+
2125
+ const (
2126
+ BetaAssistantNewParamsReasoningEffortLow BetaAssistantNewParamsReasoningEffort = "low"
2127
+ BetaAssistantNewParamsReasoningEffortMedium BetaAssistantNewParamsReasoningEffort = "medium"
2128
+ BetaAssistantNewParamsReasoningEffortHigh BetaAssistantNewParamsReasoningEffort = "high"
2129
+ )
2130
+
2131
+ func (r BetaAssistantNewParamsReasoningEffort ) IsKnown () bool {
2132
+ switch r {
2133
+ case BetaAssistantNewParamsReasoningEffortLow , BetaAssistantNewParamsReasoningEffortMedium , BetaAssistantNewParamsReasoningEffortHigh :
2134
+ return true
2135
+ }
2136
+ return false
2137
+ }
2138
+
2110
2139
// A set of resources that are used by the assistant's tools. The resources are
2111
2140
// specific to the type of tool. For example, the `code_interpreter` tool requires
2112
2141
// a list of file IDs, while the `file_search` tool requires a list of vector store
@@ -2187,9 +2216,16 @@ type BetaAssistantUpdateParams struct {
2187
2216
// see all of your available models, or see our
2188
2217
// [Model overview](https://platform.openai.com/docs/models) for descriptions of
2189
2218
// them.
2190
- Model param.Field [string ] `json:"model"`
2219
+ Model param.Field [BetaAssistantUpdateParamsModel ] `json:"model"`
2191
2220
// The name of the assistant. The maximum length is 256 characters.
2192
2221
Name param.Field [string ] `json:"name"`
2222
+ // **o1 and o3-mini models only**
2223
+ //
2224
+ // Constrains effort on reasoning for
2225
+ // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2226
+ // supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
2227
+ // result in faster responses and fewer tokens used on reasoning in a response.
2228
+ ReasoningEffort param.Field [BetaAssistantUpdateParamsReasoningEffort ] `json:"reasoning_effort"`
2193
2229
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
2194
2230
// make the output more random, while lower values like 0.2 will make it more
2195
2231
// focused and deterministic.
@@ -2215,6 +2251,74 @@ func (r BetaAssistantUpdateParams) MarshalJSON() (data []byte, err error) {
2215
2251
return apijson .MarshalRoot (r )
2216
2252
}
2217
2253
2254
+ // ID of the model to use. You can use the
2255
+ // [List models](https://platform.openai.com/docs/api-reference/models/list) API to
2256
+ // see all of your available models, or see our
2257
+ // [Model overview](https://platform.openai.com/docs/models) for descriptions of
2258
+ // them.
2259
+ type BetaAssistantUpdateParamsModel string
2260
+
2261
+ const (
2262
+ BetaAssistantUpdateParamsModelO3Mini BetaAssistantUpdateParamsModel = "o3-mini"
2263
+ BetaAssistantUpdateParamsModelO3Mini2025_01_31 BetaAssistantUpdateParamsModel = "o3-mini-2025-01-31"
2264
+ BetaAssistantUpdateParamsModelO1 BetaAssistantUpdateParamsModel = "o1"
2265
+ BetaAssistantUpdateParamsModelO1_2024_12_17 BetaAssistantUpdateParamsModel = "o1-2024-12-17"
2266
+ BetaAssistantUpdateParamsModelGPT4o BetaAssistantUpdateParamsModel = "gpt-4o"
2267
+ BetaAssistantUpdateParamsModelGPT4o2024_11_20 BetaAssistantUpdateParamsModel = "gpt-4o-2024-11-20"
2268
+ BetaAssistantUpdateParamsModelGPT4o2024_08_06 BetaAssistantUpdateParamsModel = "gpt-4o-2024-08-06"
2269
+ BetaAssistantUpdateParamsModelGPT4o2024_05_13 BetaAssistantUpdateParamsModel = "gpt-4o-2024-05-13"
2270
+ BetaAssistantUpdateParamsModelGPT4oMini BetaAssistantUpdateParamsModel = "gpt-4o-mini"
2271
+ BetaAssistantUpdateParamsModelGPT4oMini2024_07_18 BetaAssistantUpdateParamsModel = "gpt-4o-mini-2024-07-18"
2272
+ BetaAssistantUpdateParamsModelGPT4Turbo BetaAssistantUpdateParamsModel = "gpt-4-turbo"
2273
+ BetaAssistantUpdateParamsModelGPT4Turbo2024_04_09 BetaAssistantUpdateParamsModel = "gpt-4-turbo-2024-04-09"
2274
+ BetaAssistantUpdateParamsModelGPT4_0125Preview BetaAssistantUpdateParamsModel = "gpt-4-0125-preview"
2275
+ BetaAssistantUpdateParamsModelGPT4TurboPreview BetaAssistantUpdateParamsModel = "gpt-4-turbo-preview"
2276
+ BetaAssistantUpdateParamsModelGPT4_1106Preview BetaAssistantUpdateParamsModel = "gpt-4-1106-preview"
2277
+ BetaAssistantUpdateParamsModelGPT4VisionPreview BetaAssistantUpdateParamsModel = "gpt-4-vision-preview"
2278
+ BetaAssistantUpdateParamsModelGPT4 BetaAssistantUpdateParamsModel = "gpt-4"
2279
+ BetaAssistantUpdateParamsModelGPT4_0314 BetaAssistantUpdateParamsModel = "gpt-4-0314"
2280
+ BetaAssistantUpdateParamsModelGPT4_0613 BetaAssistantUpdateParamsModel = "gpt-4-0613"
2281
+ BetaAssistantUpdateParamsModelGPT4_32k BetaAssistantUpdateParamsModel = "gpt-4-32k"
2282
+ BetaAssistantUpdateParamsModelGPT4_32k0314 BetaAssistantUpdateParamsModel = "gpt-4-32k-0314"
2283
+ BetaAssistantUpdateParamsModelGPT4_32k0613 BetaAssistantUpdateParamsModel = "gpt-4-32k-0613"
2284
+ BetaAssistantUpdateParamsModelGPT3_5Turbo BetaAssistantUpdateParamsModel = "gpt-3.5-turbo"
2285
+ BetaAssistantUpdateParamsModelGPT3_5Turbo16k BetaAssistantUpdateParamsModel = "gpt-3.5-turbo-16k"
2286
+ BetaAssistantUpdateParamsModelGPT3_5Turbo0613 BetaAssistantUpdateParamsModel = "gpt-3.5-turbo-0613"
2287
+ BetaAssistantUpdateParamsModelGPT3_5Turbo1106 BetaAssistantUpdateParamsModel = "gpt-3.5-turbo-1106"
2288
+ BetaAssistantUpdateParamsModelGPT3_5Turbo0125 BetaAssistantUpdateParamsModel = "gpt-3.5-turbo-0125"
2289
+ BetaAssistantUpdateParamsModelGPT3_5Turbo16k0613 BetaAssistantUpdateParamsModel = "gpt-3.5-turbo-16k-0613"
2290
+ )
2291
+
2292
+ func (r BetaAssistantUpdateParamsModel ) IsKnown () bool {
2293
+ switch r {
2294
+ case BetaAssistantUpdateParamsModelO3Mini , BetaAssistantUpdateParamsModelO3Mini2025_01_31 , BetaAssistantUpdateParamsModelO1 , BetaAssistantUpdateParamsModelO1_2024_12_17 , BetaAssistantUpdateParamsModelGPT4o , BetaAssistantUpdateParamsModelGPT4o2024_11_20 , BetaAssistantUpdateParamsModelGPT4o2024_08_06 , BetaAssistantUpdateParamsModelGPT4o2024_05_13 , BetaAssistantUpdateParamsModelGPT4oMini , BetaAssistantUpdateParamsModelGPT4oMini2024_07_18 , BetaAssistantUpdateParamsModelGPT4Turbo , BetaAssistantUpdateParamsModelGPT4Turbo2024_04_09 , BetaAssistantUpdateParamsModelGPT4_0125Preview , BetaAssistantUpdateParamsModelGPT4TurboPreview , BetaAssistantUpdateParamsModelGPT4_1106Preview , BetaAssistantUpdateParamsModelGPT4VisionPreview , BetaAssistantUpdateParamsModelGPT4 , BetaAssistantUpdateParamsModelGPT4_0314 , BetaAssistantUpdateParamsModelGPT4_0613 , BetaAssistantUpdateParamsModelGPT4_32k , BetaAssistantUpdateParamsModelGPT4_32k0314 , BetaAssistantUpdateParamsModelGPT4_32k0613 , BetaAssistantUpdateParamsModelGPT3_5Turbo , BetaAssistantUpdateParamsModelGPT3_5Turbo16k , BetaAssistantUpdateParamsModelGPT3_5Turbo0613 , BetaAssistantUpdateParamsModelGPT3_5Turbo1106 , BetaAssistantUpdateParamsModelGPT3_5Turbo0125 , BetaAssistantUpdateParamsModelGPT3_5Turbo16k0613 :
2295
+ return true
2296
+ }
2297
+ return false
2298
+ }
2299
+
2300
+ // **o1 and o3-mini models only**
2301
+ //
2302
+ // Constrains effort on reasoning for
2303
+ // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2304
+ // supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
2305
+ // result in faster responses and fewer tokens used on reasoning in a response.
2306
+ type BetaAssistantUpdateParamsReasoningEffort string
2307
+
2308
+ const (
2309
+ BetaAssistantUpdateParamsReasoningEffortLow BetaAssistantUpdateParamsReasoningEffort = "low"
2310
+ BetaAssistantUpdateParamsReasoningEffortMedium BetaAssistantUpdateParamsReasoningEffort = "medium"
2311
+ BetaAssistantUpdateParamsReasoningEffortHigh BetaAssistantUpdateParamsReasoningEffort = "high"
2312
+ )
2313
+
2314
+ func (r BetaAssistantUpdateParamsReasoningEffort ) IsKnown () bool {
2315
+ switch r {
2316
+ case BetaAssistantUpdateParamsReasoningEffortLow , BetaAssistantUpdateParamsReasoningEffortMedium , BetaAssistantUpdateParamsReasoningEffortHigh :
2317
+ return true
2318
+ }
2319
+ return false
2320
+ }
2321
+
2218
2322
// A set of resources that are used by the assistant's tools. The resources are
2219
2323
// specific to the type of tool. For example, the `code_interpreter` tool requires
2220
2324
// a list of file IDs, while the `file_search` tool requires a list of vector store
0 commit comments