Skip to content

Commit df38777

Browse files
chore(api): event shapes more accurate
1 parent 1a191a9 commit df38777

File tree

8 files changed

+187
-144
lines changed

8 files changed

+187
-144
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 97
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml
3-
openapi_spec_hash: d8b7d38911fead545adf3e4297956410
4-
config_hash: b2a4028fdbb27a08de89831ed310e244
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml
3+
openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0
4+
config_hash: e822d0c9082c8b312264403949243179

api.md

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -710,8 +710,6 @@ Response Types:
710710
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseOutputTextAnnotationAddedEvent">ResponseOutputTextAnnotationAddedEvent</a>
711711
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponsePrompt">ResponsePrompt</a>
712712
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseQueuedEvent">ResponseQueuedEvent</a>
713-
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseReasoningDeltaEvent">ResponseReasoningDeltaEvent</a>
714-
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseReasoningDoneEvent">ResponseReasoningDoneEvent</a>
715713
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseReasoningItem">ResponseReasoningItem</a>
716714
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseReasoningSummaryDeltaEvent">ResponseReasoningSummaryDeltaEvent</a>
717715
- <a href="https://pkg.go.dev/github.com/openai/openai-go/responses">responses</a>.<a href="https://pkg.go.dev/github.com/openai/openai-go/responses#ResponseReasoningSummaryDoneEvent">ResponseReasoningSummaryDoneEvent</a>

audiospeech.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,6 @@ const (
9898
AudioSpeechNewParamsVoiceBallad AudioSpeechNewParamsVoice = "ballad"
9999
AudioSpeechNewParamsVoiceCoral AudioSpeechNewParamsVoice = "coral"
100100
AudioSpeechNewParamsVoiceEcho AudioSpeechNewParamsVoice = "echo"
101-
AudioSpeechNewParamsVoiceFable AudioSpeechNewParamsVoice = "fable"
102-
AudioSpeechNewParamsVoiceOnyx AudioSpeechNewParamsVoice = "onyx"
103-
AudioSpeechNewParamsVoiceNova AudioSpeechNewParamsVoice = "nova"
104101
AudioSpeechNewParamsVoiceSage AudioSpeechNewParamsVoice = "sage"
105102
AudioSpeechNewParamsVoiceShimmer AudioSpeechNewParamsVoice = "shimmer"
106103
AudioSpeechNewParamsVoiceVerse AudioSpeechNewParamsVoice = "verse"

chatcompletion.go

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ type ChatCompletion struct {
180180
// - If set to 'auto', then the request will be processed with the service tier
181181
// configured in the Project settings. Unless otherwise configured, the Project
182182
// will use 'default'.
183-
// - If set to 'default', then the requset will be processed with the standard
183+
// - If set to 'default', then the request will be processed with the standard
184184
// pricing and performance for the selected model.
185185
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186186
// 'priority', then the request will be processed with the corresponding service
@@ -282,7 +282,7 @@ func (r *ChatCompletionChoiceLogprobs) UnmarshalJSON(data []byte) error {
282282
// - If set to 'auto', then the request will be processed with the service tier
283283
// configured in the Project settings. Unless otherwise configured, the Project
284284
// will use 'default'.
285-
// - If set to 'default', then the requset will be processed with the standard
285+
// - If set to 'default', then the request will be processed with the standard
286286
// pricing and performance for the selected model.
287287
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
288288
// 'priority', then the request will be processed with the corresponding service
@@ -547,9 +547,6 @@ const (
547547
ChatCompletionAudioParamVoiceBallad ChatCompletionAudioParamVoice = "ballad"
548548
ChatCompletionAudioParamVoiceCoral ChatCompletionAudioParamVoice = "coral"
549549
ChatCompletionAudioParamVoiceEcho ChatCompletionAudioParamVoice = "echo"
550-
ChatCompletionAudioParamVoiceFable ChatCompletionAudioParamVoice = "fable"
551-
ChatCompletionAudioParamVoiceOnyx ChatCompletionAudioParamVoice = "onyx"
552-
ChatCompletionAudioParamVoiceNova ChatCompletionAudioParamVoice = "nova"
553550
ChatCompletionAudioParamVoiceSage ChatCompletionAudioParamVoice = "sage"
554551
ChatCompletionAudioParamVoiceShimmer ChatCompletionAudioParamVoice = "shimmer"
555552
ChatCompletionAudioParamVoiceVerse ChatCompletionAudioParamVoice = "verse"
@@ -577,7 +574,7 @@ type ChatCompletionChunk struct {
577574
// - If set to 'auto', then the request will be processed with the service tier
578575
// configured in the Project settings. Unless otherwise configured, the Project
579576
// will use 'default'.
580-
// - If set to 'default', then the requset will be processed with the standard
577+
// - If set to 'default', then the request will be processed with the standard
581578
// pricing and performance for the selected model.
582579
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
583580
// 'priority', then the request will be processed with the corresponding service
@@ -794,7 +791,7 @@ func (r *ChatCompletionChunkChoiceLogprobs) UnmarshalJSON(data []byte) error {
794791
// - If set to 'auto', then the request will be processed with the service tier
795792
// configured in the Project settings. Unless otherwise configured, the Project
796793
// will use 'default'.
797-
// - If set to 'default', then the requset will be processed with the standard
794+
// - If set to 'default', then the request will be processed with the standard
798795
// pricing and performance for the selected model.
799796
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
800797
// 'priority', then the request will be processed with the corresponding service
@@ -2227,7 +2224,7 @@ type ChatCompletionNewParams struct {
22272224
// - If set to 'auto', then the request will be processed with the service tier
22282225
// configured in the Project settings. Unless otherwise configured, the Project
22292226
// will use 'default'.
2230-
// - If set to 'default', then the requset will be processed with the standard
2227+
// - If set to 'default', then the request will be processed with the standard
22312228
// pricing and performance for the selected model.
22322229
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
22332230
// 'priority', then the request will be processed with the corresponding service
@@ -2429,7 +2426,7 @@ func (u ChatCompletionNewParamsResponseFormatUnion) GetType() *string {
24292426
// - If set to 'auto', then the request will be processed with the service tier
24302427
// configured in the Project settings. Unless otherwise configured, the Project
24312428
// will use 'default'.
2432-
// - If set to 'default', then the requset will be processed with the standard
2429+
// - If set to 'default', then the request will be processed with the standard
24332430
// pricing and performance for the selected model.
24342431
// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
24352432
// 'priority', then the request will be processed with the corresponding service

image.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -812,7 +812,7 @@ type ImagesResponseUsage struct {
812812
InputTokens int64 `json:"input_tokens,required"`
813813
// The input tokens detailed information for the image generation.
814814
InputTokensDetails ImagesResponseUsageInputTokensDetails `json:"input_tokens_details,required"`
815-
// The number of image tokens in the output image.
815+
// The number of output tokens generated by the model.
816816
OutputTokens int64 `json:"output_tokens,required"`
817817
// The total number of tokens (images and text) used for the image generation.
818818
TotalTokens int64 `json:"total_tokens,required"`
@@ -940,6 +940,9 @@ type ImageEditParams struct {
940940
// The number of partial images to generate. This parameter is used for streaming
941941
// responses that return partial images. Value must be between 0 and 3. When set to
942942
// 0, the response will be a single image sent in one streaming event.
943+
//
944+
// Note that the final image may be sent before the full number of partial images
945+
// are generated if the full image is generated more quickly.
943946
PartialImages param.Opt[int64] `json:"partial_images,omitzero"`
944947
// A unique identifier representing your end-user, which can help OpenAI to monitor
945948
// and detect abuse.
@@ -1130,6 +1133,9 @@ type ImageGenerateParams struct {
11301133
// The number of partial images to generate. This parameter is used for streaming
11311134
// responses that return partial images. Value must be between 0 and 3. When set to
11321135
// 0, the response will be a single image sent in one streaming event.
1136+
//
1137+
// Note that the final image may be sent before the full number of partial images
1138+
// are generated if the full image is generated more quickly.
11331139
PartialImages param.Opt[int64] `json:"partial_images,omitzero"`
11341140
// A unique identifier representing your end-user, which can help OpenAI to monitor
11351141
// and detect abuse.

0 commit comments

Comments
 (0)