@@ -136,22 +136,6 @@ type Assistant struct {
136136 // assistant. Tools can be of types `code_interpreter`, `file_search`, or
137137 // `function`.
138138 Tools []AssistantTool `json:"tools,required"`
139- // Specifies the format that the model must output. Compatible with
140- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
141- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
142- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
143- //
144- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
145- // message the model generates is valid JSON.
146- //
147- // **Important:** when using JSON mode, you **must** also instruct the model to
148- // produce JSON yourself via a system or user message. Without this, the model may
149- // generate an unending stream of whitespace until the generation reaches the token
150- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
151- // the message content may be partially cut off if `finish_reason="length"`, which
152- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
153- // max context length.
154- ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,nullable"`
155139 // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
156140 // make the output more random, while lower values like 0.2 will make it more
157141 // focused and deterministic.
@@ -172,21 +156,20 @@ type Assistant struct {
172156
173157// assistantJSON contains the JSON metadata for the struct [Assistant]
174158type assistantJSON struct {
175- ID apijson.Field
176- CreatedAt apijson.Field
177- Description apijson.Field
178- Instructions apijson.Field
179- Metadata apijson.Field
180- Model apijson.Field
181- Name apijson.Field
182- Object apijson.Field
183- Tools apijson.Field
184- ResponseFormat apijson.Field
185- Temperature apijson.Field
186- ToolResources apijson.Field
187- TopP apijson.Field
188- raw string
189- ExtraFields map [string ]apijson.Field
159+ ID apijson.Field
160+ CreatedAt apijson.Field
161+ Description apijson.Field
162+ Instructions apijson.Field
163+ Metadata apijson.Field
164+ Model apijson.Field
165+ Name apijson.Field
166+ Object apijson.Field
167+ Tools apijson.Field
168+ Temperature apijson.Field
169+ ToolResources apijson.Field
170+ TopP apijson.Field
171+ raw string
172+ ExtraFields map [string ]apijson.Field
190173}
191174
192175func (r * Assistant ) UnmarshalJSON (data []byte ) (err error ) {
@@ -1869,8 +1852,8 @@ func (r FileSearchToolType) IsKnown() bool {
18691852// Overrides for the file search tool.
18701853type FileSearchToolFileSearch struct {
18711854 // The maximum number of results the file search tool should output. The default is
1872- // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
1873- // and 50 inclusive.
1855+ // 20 for ` gpt-4*` models and 5 for ` gpt-3.5-turbo` . This number should be between
1856+ // 1 and 50 inclusive.
18741857 //
18751858 // Note that the file search tool may output fewer than `max_num_results` results.
18761859 // See the
@@ -1914,8 +1897,8 @@ func (r FileSearchToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {}
19141897// Overrides for the file search tool.
19151898type FileSearchToolFileSearchParam struct {
19161899 // The maximum number of results the file search tool should output. The default is
1917- // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
1918- // and 50 inclusive.
1900+ // 20 for ` gpt-4*` models and 5 for ` gpt-3.5-turbo` . This number should be between
1901+ // 1 and 50 inclusive.
19191902 //
19201903 // Note that the file search tool may output fewer than `max_num_results` results.
19211904 // See the
@@ -2001,22 +1984,6 @@ type BetaAssistantNewParams struct {
20011984 Metadata param.Field [interface {}] `json:"metadata"`
20021985 // The name of the assistant. The maximum length is 256 characters.
20031986 Name param.Field [string ] `json:"name"`
2004- // Specifies the format that the model must output. Compatible with
2005- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
2006- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
2007- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
2008- //
2009- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
2010- // message the model generates is valid JSON.
2011- //
2012- // **Important:** when using JSON mode, you **must** also instruct the model to
2013- // produce JSON yourself via a system or user message. Without this, the model may
2014- // generate an unending stream of whitespace until the generation reaches the token
2015- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
2016- // the message content may be partially cut off if `finish_reason="length"`, which
2017- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
2018- // max context length.
2019- ResponseFormat param.Field [AssistantResponseFormatOptionUnionParam ] `json:"response_format"`
20201987 // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
20211988 // make the output more random, while lower values like 0.2 will make it more
20221989 // focused and deterministic.
@@ -2234,22 +2201,6 @@ type BetaAssistantUpdateParams struct {
22342201 Model param.Field [string ] `json:"model"`
22352202 // The name of the assistant. The maximum length is 256 characters.
22362203 Name param.Field [string ] `json:"name"`
2237- // Specifies the format that the model must output. Compatible with
2238- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
2239- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
2240- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
2241- //
2242- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
2243- // message the model generates is valid JSON.
2244- //
2245- // **Important:** when using JSON mode, you **must** also instruct the model to
2246- // produce JSON yourself via a system or user message. Without this, the model may
2247- // generate an unending stream of whitespace until the generation reaches the token
2248- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
2249- // the message content may be partially cut off if `finish_reason="length"`, which
2250- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
2251- // max context length.
2252- ResponseFormat param.Field [AssistantResponseFormatOptionUnionParam ] `json:"response_format"`
22532204 // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
22542205 // make the output more random, while lower values like 0.2 will make it more
22552206 // focused and deterministic.
@@ -2335,7 +2286,7 @@ type BetaAssistantListParams struct {
23352286// `url.Values`.
23362287func (r BetaAssistantListParams ) URLQuery () (v url.Values ) {
23372288 return apiquery .MarshalWithSettings (r , apiquery.QuerySettings {
2338- ArrayFormat : apiquery .ArrayQueryFormatComma ,
2289+ ArrayFormat : apiquery .ArrayQueryFormatBrackets ,
23392290 NestedFormat : apiquery .NestedQueryFormatBrackets ,
23402291 })
23412292}
0 commit comments