Skip to content

Commit bac7d59

Browse files
authored
fix MaxCompletionTokens typo (#862)
* fix spelling error * fix lint * Update chat.go * Update chat.go
1 parent fdd59d9 commit bac7d59

File tree

3 files changed

+31
-31
lines changed

3 files changed

+31
-31
lines changed

chat.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -207,18 +207,18 @@ type ChatCompletionRequest struct {
207207
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
208208
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
209209
MaxTokens int `json:"max_tokens,omitempty"`
210-
// MaxCompletionsTokens An upper bound for the number of tokens that can be generated for a completion,
210+
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
211211
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
212-
MaxCompletionsTokens int `json:"max_completion_tokens,omitempty"`
213-
Temperature float32 `json:"temperature,omitempty"`
214-
TopP float32 `json:"top_p,omitempty"`
215-
N int `json:"n,omitempty"`
216-
Stream bool `json:"stream,omitempty"`
217-
Stop []string `json:"stop,omitempty"`
218-
PresencePenalty float32 `json:"presence_penalty,omitempty"`
219-
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
220-
Seed *int `json:"seed,omitempty"`
221-
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
212+
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
213+
Temperature float32 `json:"temperature,omitempty"`
214+
TopP float32 `json:"top_p,omitempty"`
215+
N int `json:"n,omitempty"`
216+
Stream bool `json:"stream,omitempty"`
217+
Stop []string `json:"stop,omitempty"`
218+
PresencePenalty float32 `json:"presence_penalty,omitempty"`
219+
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
220+
Seed *int `json:"seed,omitempty"`
221+
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
222222
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
223223
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
224224
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias

chat_test.go

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -100,17 +100,17 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
100100
{
101101
name: "log_probs_unsupported",
102102
in: openai.ChatCompletionRequest{
103-
MaxCompletionsTokens: 1000,
104-
LogProbs: true,
105-
Model: openai.O1Preview,
103+
MaxCompletionTokens: 1000,
104+
LogProbs: true,
105+
Model: openai.O1Preview,
106106
},
107107
expectedError: openai.ErrO1BetaLimitationsLogprobs,
108108
},
109109
{
110110
name: "message_type_unsupported",
111111
in: openai.ChatCompletionRequest{
112-
MaxCompletionsTokens: 1000,
113-
Model: openai.O1Mini,
112+
MaxCompletionTokens: 1000,
113+
Model: openai.O1Mini,
114114
Messages: []openai.ChatCompletionMessage{
115115
{
116116
Role: openai.ChatMessageRoleSystem,
@@ -122,8 +122,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
122122
{
123123
name: "tool_unsupported",
124124
in: openai.ChatCompletionRequest{
125-
MaxCompletionsTokens: 1000,
126-
Model: openai.O1Mini,
125+
MaxCompletionTokens: 1000,
126+
Model: openai.O1Mini,
127127
Messages: []openai.ChatCompletionMessage{
128128
{
129129
Role: openai.ChatMessageRoleUser,
@@ -143,8 +143,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
143143
{
144144
name: "set_temperature_unsupported",
145145
in: openai.ChatCompletionRequest{
146-
MaxCompletionsTokens: 1000,
147-
Model: openai.O1Mini,
146+
MaxCompletionTokens: 1000,
147+
Model: openai.O1Mini,
148148
Messages: []openai.ChatCompletionMessage{
149149
{
150150
Role: openai.ChatMessageRoleUser,
@@ -160,8 +160,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
160160
{
161161
name: "set_top_unsupported",
162162
in: openai.ChatCompletionRequest{
163-
MaxCompletionsTokens: 1000,
164-
Model: openai.O1Mini,
163+
MaxCompletionTokens: 1000,
164+
Model: openai.O1Mini,
165165
Messages: []openai.ChatCompletionMessage{
166166
{
167167
Role: openai.ChatMessageRoleUser,
@@ -178,8 +178,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
178178
{
179179
name: "set_n_unsupported",
180180
in: openai.ChatCompletionRequest{
181-
MaxCompletionsTokens: 1000,
182-
Model: openai.O1Mini,
181+
MaxCompletionTokens: 1000,
182+
Model: openai.O1Mini,
183183
Messages: []openai.ChatCompletionMessage{
184184
{
185185
Role: openai.ChatMessageRoleUser,
@@ -197,8 +197,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
197197
{
198198
name: "set_presence_penalty_unsupported",
199199
in: openai.ChatCompletionRequest{
200-
MaxCompletionsTokens: 1000,
201-
Model: openai.O1Mini,
200+
MaxCompletionTokens: 1000,
201+
Model: openai.O1Mini,
202202
Messages: []openai.ChatCompletionMessage{
203203
{
204204
Role: openai.ChatMessageRoleUser,
@@ -214,8 +214,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
214214
{
215215
name: "set_frequency_penalty_unsupported",
216216
in: openai.ChatCompletionRequest{
217-
MaxCompletionsTokens: 1000,
218-
Model: openai.O1Mini,
217+
MaxCompletionTokens: 1000,
218+
Model: openai.O1Mini,
219219
Messages: []openai.ChatCompletionMessage{
220220
{
221221
Role: openai.ChatMessageRoleUser,
@@ -296,8 +296,8 @@ func TestO1ModelChatCompletions(t *testing.T) {
296296
defer teardown()
297297
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
298298
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
299-
Model: openai.O1Preview,
300-
MaxCompletionsTokens: 1000,
299+
Model: openai.O1Preview,
300+
MaxCompletionTokens: 1000,
301301
Messages: []openai.ChatCompletionMessage{
302302
{
303303
Role: openai.ChatMessageRoleUser,

completion.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import (
77
)
88

99
var (
10-
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionsTokens") //nolint:lll
10+
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
1111
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
1212
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
1313
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll

0 commit comments

Comments
 (0)