Skip to content

Commit 07791be

Browse files
authored
feat: remove MaxTokens limitation of o-series
2 parents 913c83b + 3730e26 commit 07791be

File tree

3 files changed

+112
-115
lines changed

3 files changed

+112
-115
lines changed

chat_stream_test.go

Lines changed: 74 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -934,80 +934,80 @@ func TestCreateChatCompletionStreamWithReasoningModel(t *testing.T) {
934934
}
935935
}
936936

937-
func TestCreateChatCompletionStreamReasoningValidatorFails(t *testing.T) {
938-
client, _, _ := setupOpenAITestServer()
939-
940-
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
941-
MaxTokens: 100, // This will trigger the validator to fail
942-
Model: openai.O3Mini,
943-
Messages: []openai.ChatCompletionMessage{
944-
{
945-
Role: openai.ChatMessageRoleUser,
946-
Content: "Hello!",
947-
},
948-
},
949-
Stream: true,
950-
})
951-
952-
if stream != nil {
953-
t.Error("Expected nil stream when validation fails")
954-
stream.Close()
955-
}
956-
957-
if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
958-
t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated, got: %v", err)
959-
}
960-
}
961-
962-
func TestCreateChatCompletionStreamO3ReasoningValidatorFails(t *testing.T) {
963-
client, _, _ := setupOpenAITestServer()
964-
965-
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
966-
MaxTokens: 100, // This will trigger the validator to fail
967-
Model: openai.O3,
968-
Messages: []openai.ChatCompletionMessage{
969-
{
970-
Role: openai.ChatMessageRoleUser,
971-
Content: "Hello!",
972-
},
973-
},
974-
Stream: true,
975-
})
976-
977-
if stream != nil {
978-
t.Error("Expected nil stream when validation fails")
979-
stream.Close()
980-
}
981-
982-
if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
983-
t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O3, got: %v", err)
984-
}
985-
}
986-
987-
func TestCreateChatCompletionStreamO4MiniReasoningValidatorFails(t *testing.T) {
988-
client, _, _ := setupOpenAITestServer()
989-
990-
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
991-
MaxTokens: 100, // This will trigger the validator to fail
992-
Model: openai.O4Mini,
993-
Messages: []openai.ChatCompletionMessage{
994-
{
995-
Role: openai.ChatMessageRoleUser,
996-
Content: "Hello!",
997-
},
998-
},
999-
Stream: true,
1000-
})
1001-
1002-
if stream != nil {
1003-
t.Error("Expected nil stream when validation fails")
1004-
stream.Close()
1005-
}
1006-
1007-
if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
1008-
t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O4Mini, got: %v", err)
1009-
}
1010-
}
937+
// func TestCreateChatCompletionStreamReasoningValidatorFails(t *testing.T) {
938+
// client, _, _ := setupOpenAITestServer()
939+
//
940+
// stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
941+
// MaxTokens: 100, // This will trigger the validator to fail
942+
// Model: openai.O3Mini,
943+
// Messages: []openai.ChatCompletionMessage{
944+
// {
945+
// Role: openai.ChatMessageRoleUser,
946+
// Content: "Hello!",
947+
// },
948+
// },
949+
// Stream: true,
950+
// })
951+
//
952+
// if stream != nil {
953+
// t.Error("Expected nil stream when validation fails")
954+
// stream.Close()
955+
// }
956+
//
957+
// if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
958+
// t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated, got: %v", err)
959+
// }
960+
//}
961+
//
962+
// func TestCreateChatCompletionStreamO3ReasoningValidatorFails(t *testing.T) {
963+
// client, _, _ := setupOpenAITestServer()
964+
//
965+
// stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
966+
// MaxTokens: 100, // This will trigger the validator to fail
967+
// Model: openai.O3,
968+
// Messages: []openai.ChatCompletionMessage{
969+
// {
970+
// Role: openai.ChatMessageRoleUser,
971+
// Content: "Hello!",
972+
// },
973+
// },
974+
// Stream: true,
975+
// })
976+
//
977+
// if stream != nil {
978+
// t.Error("Expected nil stream when validation fails")
979+
// stream.Close()
980+
// }
981+
//
982+
// if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
983+
// t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O3, got: %v", err)
984+
// }
985+
//}
986+
//
987+
// func TestCreateChatCompletionStreamO4MiniReasoningValidatorFails(t *testing.T) {
988+
// client, _, _ := setupOpenAITestServer()
989+
//
990+
// stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
991+
// MaxTokens: 100, // This will trigger the validator to fail
992+
// Model: openai.O4Mini,
993+
// Messages: []openai.ChatCompletionMessage{
994+
// {
995+
// Role: openai.ChatMessageRoleUser,
996+
// Content: "Hello!",
997+
// },
998+
// },
999+
// Stream: true,
1000+
// })
1001+
//
1002+
// if stream != nil {
1003+
// t.Error("Expected nil stream when validation fails")
1004+
// stream.Close()
1005+
// }
1006+
//
1007+
// if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
1008+
// t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O4Mini, got: %v", err)
1009+
// }
1010+
//}
10111011

10121012
func compareChatStreamResponseChoices(c1, c2 openai.ChatCompletionStreamChoice) bool {
10131013
if c1.Index != c2.Index {

chat_test.go

Lines changed: 38 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -74,44 +74,44 @@ func TestChatCompletionRequestWithRequestBodyModifier(t *testing.T) {
7474
checks.NoError(t, err)
7575
}
7676

77-
func TestO1ModelsChatCompletionsDeprecatedFields(t *testing.T) {
78-
tests := []struct {
79-
name string
80-
in openai.ChatCompletionRequest
81-
expectedError error
82-
}{
83-
{
84-
name: "o1-preview_MaxTokens_deprecated",
85-
in: openai.ChatCompletionRequest{
86-
MaxTokens: 5,
87-
Model: openai.O1Preview,
88-
},
89-
expectedError: openai.ErrReasoningModelMaxTokensDeprecated,
90-
},
91-
{
92-
name: "o1-mini_MaxTokens_deprecated",
93-
in: openai.ChatCompletionRequest{
94-
MaxTokens: 5,
95-
Model: openai.O1Mini,
96-
},
97-
expectedError: openai.ErrReasoningModelMaxTokensDeprecated,
98-
},
99-
}
100-
101-
for _, tt := range tests {
102-
t.Run(tt.name, func(t *testing.T) {
103-
config := openai.DefaultConfig("whatever")
104-
config.BaseURL = "http://localhost/v1"
105-
client := openai.NewClientWithConfig(config)
106-
ctx := context.Background()
107-
108-
_, err := client.CreateChatCompletion(ctx, tt.in)
109-
checks.HasError(t, err)
110-
msg := fmt.Sprintf("CreateChatCompletion should return wrong model error, returned: %s", err)
111-
checks.ErrorIs(t, err, tt.expectedError, msg)
112-
})
113-
}
114-
}
77+
// func TestO1ModelsChatCompletionsDeprecatedFields(t *testing.T) {
78+
// tests := []struct {
79+
// name string
80+
// in openai.ChatCompletionRequest
81+
// expectedError error
82+
// }{
83+
// {
84+
// name: "o1-preview_MaxTokens_deprecated",
85+
// in: openai.ChatCompletionRequest{
86+
// MaxTokens: 5,
87+
// Model: openai.O1Preview,
88+
// },
89+
// expectedError: openai.ErrReasoningModelMaxTokensDeprecated,
90+
// },
91+
// {
92+
// name: "o1-mini_MaxTokens_deprecated",
93+
// in: openai.ChatCompletionRequest{
94+
// MaxTokens: 5,
95+
// Model: openai.O1Mini,
96+
// },
97+
// expectedError: openai.ErrReasoningModelMaxTokensDeprecated,
98+
// },
99+
// }
100+
//
101+
// for _, tt := range tests {
102+
// t.Run(tt.name, func(t *testing.T) {
103+
// config := openai.DefaultConfig("whatever")
104+
// config.BaseURL = "http://localhost/v1"
105+
// client := openai.NewClientWithConfig(config)
106+
// ctx := context.Background()
107+
//
108+
// _, err := client.CreateChatCompletion(ctx, tt.in)
109+
// checks.HasError(t, err)
110+
// msg := fmt.Sprintf("CreateChatCompletion should return wrong model error, returned: %s", err)
111+
// checks.ErrorIs(t, err, tt.expectedError, msg)
112+
// })
113+
// }
114+
//}
115115

116116
func ptrOf[T any](v T) *T {
117117
return &v

reasoning_validator.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,6 @@ func (v *ReasoningValidator) Validate(request ChatCompletionRequest) error {
5555

5656
// validateReasoningModelParams checks reasoning model parameters.
5757
func (v *ReasoningValidator) validateReasoningModelParams(request ChatCompletionRequest) error {
58-
if request.MaxTokens > 0 {
59-
return ErrReasoningModelMaxTokensDeprecated
60-
}
6158
if request.LogProbs {
6259
return ErrReasoningModelLimitationsLogprobs
6360
}

0 commit comments

Comments
 (0)