|
| 1 | +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. |
| 2 | + |
| 3 | +package responses |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "net/http" |
| 8 | + "slices" |
| 9 | + |
| 10 | + "github.com/openai/openai-go/v3/internal/apijson" |
| 11 | + "github.com/openai/openai-go/v3/internal/requestconfig" |
| 12 | + "github.com/openai/openai-go/v3/option" |
| 13 | + "github.com/openai/openai-go/v3/packages/param" |
| 14 | + "github.com/openai/openai-go/v3/packages/respjson" |
| 15 | + "github.com/openai/openai-go/v3/shared" |
| 16 | + "github.com/openai/openai-go/v3/shared/constant" |
| 17 | +) |
| 18 | + |
| 19 | +// InputTokenService contains methods and other services that help with interacting |
| 20 | +// with the openai API. |
| 21 | +// |
| 22 | +// Note, unlike clients, this service does not read variables from the environment |
| 23 | +// automatically. You should not instantiate this service directly, and instead use |
| 24 | +// the [NewInputTokenService] method instead. |
| 25 | +type InputTokenService struct { |
| 26 | + Options []option.RequestOption |
| 27 | +} |
| 28 | + |
| 29 | +// NewInputTokenService generates a new service that applies the given options to |
| 30 | +// each request. These options are applied after the parent client's options (if |
| 31 | +// there is one), and before any request-specific options. |
| 32 | +func NewInputTokenService(opts ...option.RequestOption) (r InputTokenService) { |
| 33 | + r = InputTokenService{} |
| 34 | + r.Options = opts |
| 35 | + return |
| 36 | +} |
| 37 | + |
| 38 | +// Get input token counts |
| 39 | +func (r *InputTokenService) Count(ctx context.Context, body InputTokenCountParams, opts ...option.RequestOption) (res *InputTokenCountResponse, err error) { |
| 40 | + opts = slices.Concat(r.Options, opts) |
| 41 | + path := "responses/input_tokens" |
| 42 | + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) |
| 43 | + return |
| 44 | +} |
| 45 | + |
| 46 | +type InputTokenCountResponse struct { |
| 47 | + InputTokens int64 `json:"input_tokens,required"` |
| 48 | + Object constant.ResponseInputTokens `json:"object,required"` |
| 49 | + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. |
| 50 | + JSON struct { |
| 51 | + InputTokens respjson.Field |
| 52 | + Object respjson.Field |
| 53 | + ExtraFields map[string]respjson.Field |
| 54 | + raw string |
| 55 | + } `json:"-"` |
| 56 | +} |
| 57 | + |
| 58 | +// Returns the unmodified JSON received from the API |
| 59 | +func (r InputTokenCountResponse) RawJSON() string { return r.JSON.raw } |
| 60 | +func (r *InputTokenCountResponse) UnmarshalJSON(data []byte) error { |
| 61 | + return apijson.UnmarshalRoot(data, r) |
| 62 | +} |
| 63 | + |
| 64 | +type InputTokenCountParams struct { |
| 65 | + // A system (or developer) message inserted into the model's context. When used |
| 66 | + // along with `previous_response_id`, the instructions from a previous response |
| 67 | + // will not be carried over to the next response. This makes it simple to swap out |
| 68 | + // system (or developer) messages in new responses. |
| 69 | + Instructions param.Opt[string] `json:"instructions,omitzero"` |
| 70 | + // Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a |
| 71 | + // wide range of models with different capabilities, performance characteristics, |
| 72 | + // and price points. Refer to the |
| 73 | + // [model guide](https://platform.openai.com/docs/models) to browse and compare |
| 74 | + // available models. |
| 75 | + Model param.Opt[string] `json:"model,omitzero"` |
| 76 | + // Whether to allow the model to run tool calls in parallel. |
| 77 | + ParallelToolCalls param.Opt[bool] `json:"parallel_tool_calls,omitzero"` |
| 78 | + // The unique ID of the previous response to the model. Use this to create |
| 79 | + // multi-turn conversations. Learn more about |
| 80 | + // [conversation state](https://platform.openai.com/docs/guides/conversation-state). |
| 81 | + // Cannot be used in conjunction with `conversation`. |
| 82 | + PreviousResponseID param.Opt[string] `json:"previous_response_id,omitzero"` |
| 83 | + // The conversation that this response belongs to. Items from this conversation are |
| 84 | + // prepended to `input_items` for this response request. Input items and output |
| 85 | + // items from this response are automatically added to this conversation after this |
| 86 | + // response completes. |
| 87 | + Conversation InputTokenCountParamsConversationUnion `json:"conversation,omitzero"` |
| 88 | + // Text, image, or file inputs to the model, used to generate a response |
| 89 | + Input InputTokenCountParamsInputUnion `json:"input,omitzero"` |
| 90 | + // Configuration options for a text response from the model. Can be plain text or |
| 91 | + // structured JSON data. Learn more: |
| 92 | + // |
| 93 | + // - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) |
| 94 | + // - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) |
| 95 | + Text InputTokenCountParamsText `json:"text,omitzero"` |
| 96 | + // How the model should select which tool (or tools) to use when generating a |
| 97 | + // response. See the `tools` parameter to see how to specify which tools the model |
| 98 | + // can call. |
| 99 | + ToolChoice InputTokenCountParamsToolChoiceUnion `json:"tool_choice,omitzero"` |
| 100 | + // An array of tools the model may call while generating a response. You can |
| 101 | + // specify which tool to use by setting the `tool_choice` parameter. |
| 102 | + Tools []ToolUnionParam `json:"tools,omitzero"` |
| 103 | + // **gpt-5 and o-series models only** Configuration options for |
| 104 | + // [reasoning models](https://platform.openai.com/docs/guides/reasoning). |
| 105 | + Reasoning shared.ReasoningParam `json:"reasoning,omitzero"` |
| 106 | + // The truncation strategy to use for the model response. - `auto`: If the input to |
| 107 | + // this Response exceeds the model's context window size, the model will truncate |
| 108 | + // the response to fit the context window by dropping items from the beginning of |
| 109 | + // the conversation. - `disabled` (default): If the input size will exceed the |
| 110 | + // context window size for a model, the request will fail with a 400 error. |
| 111 | + // |
| 112 | + // Any of "auto", "disabled". |
| 113 | + Truncation InputTokenCountParamsTruncation `json:"truncation,omitzero"` |
| 114 | + paramObj |
| 115 | +} |
| 116 | + |
| 117 | +func (r InputTokenCountParams) MarshalJSON() (data []byte, err error) { |
| 118 | + type shadow InputTokenCountParams |
| 119 | + return param.MarshalObject(r, (*shadow)(&r)) |
| 120 | +} |
| 121 | +func (r *InputTokenCountParams) UnmarshalJSON(data []byte) error { |
| 122 | + return apijson.UnmarshalRoot(data, r) |
| 123 | +} |
| 124 | + |
| 125 | +// Only one field can be non-zero. |
| 126 | +// |
| 127 | +// Use [param.IsOmitted] to confirm if a field is set. |
| 128 | +type InputTokenCountParamsConversationUnion struct { |
| 129 | + OfString param.Opt[string] `json:",omitzero,inline"` |
| 130 | + OfConversationObject *ResponseConversationParam `json:",omitzero,inline"` |
| 131 | + paramUnion |
| 132 | +} |
| 133 | + |
| 134 | +func (u InputTokenCountParamsConversationUnion) MarshalJSON() ([]byte, error) { |
| 135 | + return param.MarshalUnion(u, u.OfString, u.OfConversationObject) |
| 136 | +} |
| 137 | +func (u *InputTokenCountParamsConversationUnion) UnmarshalJSON(data []byte) error { |
| 138 | + return apijson.UnmarshalRoot(data, u) |
| 139 | +} |
| 140 | + |
| 141 | +func (u *InputTokenCountParamsConversationUnion) asAny() any { |
| 142 | + if !param.IsOmitted(u.OfString) { |
| 143 | + return &u.OfString.Value |
| 144 | + } else if !param.IsOmitted(u.OfConversationObject) { |
| 145 | + return u.OfConversationObject |
| 146 | + } |
| 147 | + return nil |
| 148 | +} |
| 149 | + |
| 150 | +// Only one field can be non-zero. |
| 151 | +// |
| 152 | +// Use [param.IsOmitted] to confirm if a field is set. |
| 153 | +type InputTokenCountParamsInputUnion struct { |
| 154 | + OfString param.Opt[string] `json:",omitzero,inline"` |
| 155 | + OfResponseInputItemArray []ResponseInputItemUnionParam `json:",omitzero,inline"` |
| 156 | + paramUnion |
| 157 | +} |
| 158 | + |
| 159 | +func (u InputTokenCountParamsInputUnion) MarshalJSON() ([]byte, error) { |
| 160 | + return param.MarshalUnion(u, u.OfString, u.OfResponseInputItemArray) |
| 161 | +} |
| 162 | +func (u *InputTokenCountParamsInputUnion) UnmarshalJSON(data []byte) error { |
| 163 | + return apijson.UnmarshalRoot(data, u) |
| 164 | +} |
| 165 | + |
| 166 | +func (u *InputTokenCountParamsInputUnion) asAny() any { |
| 167 | + if !param.IsOmitted(u.OfString) { |
| 168 | + return &u.OfString.Value |
| 169 | + } else if !param.IsOmitted(u.OfResponseInputItemArray) { |
| 170 | + return &u.OfResponseInputItemArray |
| 171 | + } |
| 172 | + return nil |
| 173 | +} |
| 174 | + |
| 175 | +// Configuration options for a text response from the model. Can be plain text or |
| 176 | +// structured JSON data. Learn more: |
| 177 | +// |
| 178 | +// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) |
| 179 | +// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) |
| 180 | +type InputTokenCountParamsText struct { |
| 181 | + // Constrains the verbosity of the model's response. Lower values will result in |
| 182 | + // more concise responses, while higher values will result in more verbose |
| 183 | + // responses. Currently supported values are `low`, `medium`, and `high`. |
| 184 | + // |
| 185 | + // Any of "low", "medium", "high". |
| 186 | + Verbosity string `json:"verbosity,omitzero"` |
| 187 | + // An object specifying the format that the model must output. |
| 188 | + // |
| 189 | + // Configuring `{ "type": "json_schema" }` enables Structured Outputs, which |
| 190 | + // ensures the model will match your supplied JSON schema. Learn more in the |
| 191 | + // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). |
| 192 | + // |
| 193 | + // The default format is `{ "type": "text" }` with no additional options. |
| 194 | + // |
| 195 | + // **Not recommended for gpt-4o and newer models:** |
| 196 | + // |
| 197 | + // Setting to `{ "type": "json_object" }` enables the older JSON mode, which |
| 198 | + // ensures the message the model generates is valid JSON. Using `json_schema` is |
| 199 | + // preferred for models that support it. |
| 200 | + Format ResponseFormatTextConfigUnionParam `json:"format,omitzero"` |
| 201 | + paramObj |
| 202 | +} |
| 203 | + |
| 204 | +func (r InputTokenCountParamsText) MarshalJSON() (data []byte, err error) { |
| 205 | + type shadow InputTokenCountParamsText |
| 206 | + return param.MarshalObject(r, (*shadow)(&r)) |
| 207 | +} |
| 208 | +func (r *InputTokenCountParamsText) UnmarshalJSON(data []byte) error { |
| 209 | + return apijson.UnmarshalRoot(data, r) |
| 210 | +} |
| 211 | + |
| 212 | +func init() { |
| 213 | + apijson.RegisterFieldValidator[InputTokenCountParamsText]( |
| 214 | + "verbosity", "low", "medium", "high", |
| 215 | + ) |
| 216 | +} |
| 217 | + |
| 218 | +// Only one field can be non-zero. |
| 219 | +// |
| 220 | +// Use [param.IsOmitted] to confirm if a field is set. |
| 221 | +type InputTokenCountParamsToolChoiceUnion struct { |
| 222 | + // Check if union is this variant with !param.IsOmitted(union.OfToolChoiceMode) |
| 223 | + OfToolChoiceMode param.Opt[ToolChoiceOptions] `json:",omitzero,inline"` |
| 224 | + OfAllowedTools *ToolChoiceAllowedParam `json:",omitzero,inline"` |
| 225 | + OfHostedTool *ToolChoiceTypesParam `json:",omitzero,inline"` |
| 226 | + OfFunctionTool *ToolChoiceFunctionParam `json:",omitzero,inline"` |
| 227 | + OfMcpTool *ToolChoiceMcpParam `json:",omitzero,inline"` |
| 228 | + OfCustomTool *ToolChoiceCustomParam `json:",omitzero,inline"` |
| 229 | + paramUnion |
| 230 | +} |
| 231 | + |
| 232 | +func (u InputTokenCountParamsToolChoiceUnion) MarshalJSON() ([]byte, error) { |
| 233 | + return param.MarshalUnion(u, u.OfToolChoiceMode, |
| 234 | + u.OfAllowedTools, |
| 235 | + u.OfHostedTool, |
| 236 | + u.OfFunctionTool, |
| 237 | + u.OfMcpTool, |
| 238 | + u.OfCustomTool) |
| 239 | +} |
| 240 | +func (u *InputTokenCountParamsToolChoiceUnion) UnmarshalJSON(data []byte) error { |
| 241 | + return apijson.UnmarshalRoot(data, u) |
| 242 | +} |
| 243 | + |
| 244 | +func (u *InputTokenCountParamsToolChoiceUnion) asAny() any { |
| 245 | + if !param.IsOmitted(u.OfToolChoiceMode) { |
| 246 | + return &u.OfToolChoiceMode |
| 247 | + } else if !param.IsOmitted(u.OfAllowedTools) { |
| 248 | + return u.OfAllowedTools |
| 249 | + } else if !param.IsOmitted(u.OfHostedTool) { |
| 250 | + return u.OfHostedTool |
| 251 | + } else if !param.IsOmitted(u.OfFunctionTool) { |
| 252 | + return u.OfFunctionTool |
| 253 | + } else if !param.IsOmitted(u.OfMcpTool) { |
| 254 | + return u.OfMcpTool |
| 255 | + } else if !param.IsOmitted(u.OfCustomTool) { |
| 256 | + return u.OfCustomTool |
| 257 | + } |
| 258 | + return nil |
| 259 | +} |
| 260 | + |
| 261 | +// Returns a pointer to the underlying variant's property, if present. |
| 262 | +func (u InputTokenCountParamsToolChoiceUnion) GetMode() *string { |
| 263 | + if vt := u.OfAllowedTools; vt != nil { |
| 264 | + return (*string)(&vt.Mode) |
| 265 | + } |
| 266 | + return nil |
| 267 | +} |
| 268 | + |
| 269 | +// Returns a pointer to the underlying variant's property, if present. |
| 270 | +func (u InputTokenCountParamsToolChoiceUnion) GetTools() []map[string]any { |
| 271 | + if vt := u.OfAllowedTools; vt != nil { |
| 272 | + return vt.Tools |
| 273 | + } |
| 274 | + return nil |
| 275 | +} |
| 276 | + |
| 277 | +// Returns a pointer to the underlying variant's property, if present. |
| 278 | +func (u InputTokenCountParamsToolChoiceUnion) GetServerLabel() *string { |
| 279 | + if vt := u.OfMcpTool; vt != nil { |
| 280 | + return &vt.ServerLabel |
| 281 | + } |
| 282 | + return nil |
| 283 | +} |
| 284 | + |
| 285 | +// Returns a pointer to the underlying variant's property, if present. |
| 286 | +func (u InputTokenCountParamsToolChoiceUnion) GetType() *string { |
| 287 | + if vt := u.OfAllowedTools; vt != nil { |
| 288 | + return (*string)(&vt.Type) |
| 289 | + } else if vt := u.OfHostedTool; vt != nil { |
| 290 | + return (*string)(&vt.Type) |
| 291 | + } else if vt := u.OfFunctionTool; vt != nil { |
| 292 | + return (*string)(&vt.Type) |
| 293 | + } else if vt := u.OfMcpTool; vt != nil { |
| 294 | + return (*string)(&vt.Type) |
| 295 | + } else if vt := u.OfCustomTool; vt != nil { |
| 296 | + return (*string)(&vt.Type) |
| 297 | + } |
| 298 | + return nil |
| 299 | +} |
| 300 | + |
| 301 | +// Returns a pointer to the underlying variant's property, if present. |
| 302 | +func (u InputTokenCountParamsToolChoiceUnion) GetName() *string { |
| 303 | + if vt := u.OfFunctionTool; vt != nil { |
| 304 | + return (*string)(&vt.Name) |
| 305 | + } else if vt := u.OfMcpTool; vt != nil && vt.Name.Valid() { |
| 306 | + return &vt.Name.Value |
| 307 | + } else if vt := u.OfCustomTool; vt != nil { |
| 308 | + return (*string)(&vt.Name) |
| 309 | + } |
| 310 | + return nil |
| 311 | +} |
| 312 | + |
| 313 | +// The truncation strategy to use for the model response. - `auto`: If the input to |
| 314 | +// this Response exceeds the model's context window size, the model will truncate |
| 315 | +// the response to fit the context window by dropping items from the beginning of |
| 316 | +// the conversation. - `disabled` (default): If the input size will exceed the |
| 317 | +// context window size for a model, the request will fail with a 400 error. |
| 318 | +type InputTokenCountParamsTruncation string |
| 319 | + |
| 320 | +const ( |
| 321 | + InputTokenCountParamsTruncationAuto InputTokenCountParamsTruncation = "auto" |
| 322 | + InputTokenCountParamsTruncationDisabled InputTokenCountParamsTruncation = "disabled" |
| 323 | +) |
0 commit comments