Skip to content

Commit 8cc134e

Browse files
authored
Use openai-go v3.6.1 in the tests (#223)
* Add the latest version of openai-go in the tests Signed-off-by: irar2 <[email protected]> * Typo in function name Signed-off-by: irar2 <[email protected]> * Fixed lint errors Signed-off-by: irar2 <[email protected]> --------- Signed-off-by: irar2 <[email protected]>
1 parent 202efc4 commit 8cc134e

File tree

8 files changed

+259
-233
lines changed

8 files changed

+259
-233
lines changed

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ require (
1111
github.com/llm-d/llm-d-kv-cache-manager v0.3.0-rc1
1212
github.com/onsi/ginkgo/v2 v2.23.4
1313
github.com/onsi/gomega v1.37.0
14-
github.com/openai/openai-go v0.1.0-beta.10
14+
github.com/openai/openai-go/v3 v3.6.1
1515
github.com/pebbe/zmq4 v1.4.0
1616
github.com/prometheus/client_golang v1.22.0
1717
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,8 @@ github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus
8585
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
8686
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
8787
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
88-
github.com/openai/openai-go v0.1.0-beta.10 h1:CknhGXe8aXQMRuqg255PFnWzgRY9nEryMxoNIBBM9tU=
89-
github.com/openai/openai-go v0.1.0-beta.10/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
88+
github.com/openai/openai-go/v3 v3.6.1 h1:f8J6jhT9wkYnNvHTKR7bxHXSZrSvvcfpHGkmBra04tI=
89+
github.com/openai/openai-go/v3 v3.6.1/go.mod h1:UOpNxkqC9OdNXNUfpNByKOtB4jAL0EssQXq5p8gO0Xs=
9090
github.com/pebbe/zmq4 v1.4.0 h1:gO5P92Ayl8GXpPZdYcD62Cwbq0slSBVVQRIXwGSJ6eQ=
9191
github.com/pebbe/zmq4 v1.4.0/go.mod h1:nqnPueOapVhE2wItZ0uOErngczsJdLOGkebMxaO8r48=
9292
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=

pkg/llm-d-inference-sim/failures_test.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525

2626
. "github.com/onsi/ginkgo/v2"
2727
. "github.com/onsi/gomega"
28-
"github.com/openai/openai-go"
28+
"github.com/openai/openai-go/v3"
2929

3030
"github.com/llm-d/llm-d-inference-sim/pkg/common"
3131
openaiserverapi "github.com/llm-d/llm-d-inference-sim/pkg/openai-server-api"
@@ -134,7 +134,7 @@ var _ = Describe("Failures", func() {
134134
})
135135

136136
It("should always return an error response for chat completions", func() {
137-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
137+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
138138
_, err := openaiClient.Chat.Completions.New(ctx, params)
139139
Expect(err).To(HaveOccurred())
140140

@@ -147,7 +147,7 @@ var _ = Describe("Failures", func() {
147147
})
148148

149149
It("should always return an error response for text completions", func() {
150-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
150+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
151151
_, err := openaiClient.Chat.Completions.New(ctx, params)
152152
Expect(err).To(HaveOccurred())
153153

@@ -173,7 +173,7 @@ var _ = Describe("Failures", func() {
173173
})
174174

175175
It("should return only rate limit errors", func() {
176-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
176+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
177177
_, err := openaiClient.Chat.Completions.New(ctx, params)
178178
Expect(err).To(HaveOccurred())
179179

@@ -199,7 +199,7 @@ var _ = Describe("Failures", func() {
199199
})
200200

201201
It("should return only specified error types", func() {
202-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
202+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
203203

204204
// Make multiple requests to verify we get the expected error types
205205
for i := 0; i < 10; i++ {
@@ -230,7 +230,7 @@ var _ = Describe("Failures", func() {
230230
})
231231

232232
It("should never return errors and behave like random mode", func() {
233-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
233+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
234234
resp, err := openaiClient.Chat.Completions.New(ctx, params)
235235
Expect(err).ToNot(HaveOccurred())
236236
Expect(resp.Choices).To(HaveLen(1))
@@ -250,7 +250,7 @@ var _ = Describe("Failures", func() {
250250
}, nil)
251251
Expect(err).ToNot(HaveOccurred())
252252

253-
openaiClient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
253+
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
254254
_, err = openaiClient.Chat.Completions.New(ctx, params)
255255
Expect(err).To(HaveOccurred())
256256

pkg/llm-d-inference-sim/lora_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ import (
2323

2424
. "github.com/onsi/ginkgo/v2"
2525
. "github.com/onsi/gomega"
26-
"github.com/openai/openai-go"
27-
"github.com/openai/openai-go/option"
26+
"github.com/openai/openai-go/v3"
27+
"github.com/openai/openai-go/v3/option"
2828

2929
"github.com/llm-d/llm-d-inference-sim/pkg/common"
3030
vllmapi "github.com/llm-d/llm-d-inference-sim/pkg/vllm-api"
@@ -41,7 +41,7 @@ var _ = Describe("LoRAs", func() {
4141
Expect(err).NotTo(HaveOccurred())
4242

4343
// Request to lora3
44-
openaiclient, params := getOpenAIClentAndChatParams(client, "lora3", userMessage, false)
44+
openaiclient, params := getOpenAIClientAndChatParams(client, "lora3", userMessage, false)
4545
resp, err := openaiclient.Chat.Completions.New(ctx, params)
4646
Expect(err).ToNot(HaveOccurred())
4747

pkg/llm-d-inference-sim/metrics_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ import (
3232
"github.com/llm-d/llm-d-inference-sim/pkg/common"
3333
. "github.com/onsi/ginkgo/v2"
3434
. "github.com/onsi/gomega"
35-
"github.com/openai/openai-go"
36-
"github.com/openai/openai-go/option"
35+
"github.com/openai/openai-go/v3"
36+
"github.com/openai/openai-go/v3/option"
3737
)
3838

3939
const (
@@ -73,7 +73,7 @@ var _ = Describe("Simulator metrics", Ordered, func() {
7373
client, err := startServerWithArgs(ctx, common.ModeRandom, args, nil)
7474
Expect(err).NotTo(HaveOccurred())
7575

76-
openaiclient, params := getOpenAIClentAndChatParams(client, modelName, userMessage, false)
76+
openaiclient, params := getOpenAIClientAndChatParams(client, modelName, userMessage, false)
7777

7878
var wg sync.WaitGroup
7979
wg.Add(1)
@@ -316,7 +316,7 @@ var _ = Describe("Simulator metrics", Ordered, func() {
316316
client, err := startServerWithArgs(ctx, common.ModeRandom, args, nil)
317317
Expect(err).NotTo(HaveOccurred())
318318

319-
openaiclient, params := getOpenAIClentAndChatParams(client, modelName, userMessage, false)
319+
openaiclient, params := getOpenAIClientAndChatParams(client, modelName, userMessage, false)
320320
params.MaxTokens = openai.Int(5)
321321

322322
var reqWg, metricsWg sync.WaitGroup

pkg/llm-d-inference-sim/seed_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"github.com/llm-d/llm-d-inference-sim/pkg/common"
2323
. "github.com/onsi/ginkgo/v2"
2424
. "github.com/onsi/gomega"
25-
"github.com/openai/openai-go"
25+
"github.com/openai/openai-go/v3"
2626
)
2727

2828
var _ = Describe("Simulator with seed", func() {

pkg/llm-d-inference-sim/simulator_test.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ import (
3333
"github.com/llm-d/llm-d-kv-cache-manager/pkg/tokenization"
3434
. "github.com/onsi/ginkgo/v2"
3535
. "github.com/onsi/gomega"
36-
"github.com/openai/openai-go"
37-
"github.com/openai/openai-go/option"
38-
"github.com/openai/openai-go/packages/param"
36+
"github.com/openai/openai-go/v3"
37+
"github.com/openai/openai-go/v3/option"
38+
"github.com/openai/openai-go/v3/packages/param"
3939
"github.com/valyala/fasthttp/fasthttputil"
4040
"k8s.io/klog/v2"
4141
)
@@ -163,7 +163,7 @@ var _ = Describe("Simulator", func() {
163163
client, err := startServer(ctx, mode)
164164
Expect(err).NotTo(HaveOccurred())
165165

166-
openaiclient, params := getOpenAIClentAndChatParams(client, model, userMessage, true)
166+
openaiclient, params := getOpenAIClientAndChatParams(client, model, userMessage, true)
167167
stream := openaiclient.Chat.Completions.NewStreaming(ctx, params)
168168
defer func() {
169169
err := stream.Close()
@@ -264,7 +264,7 @@ var _ = Describe("Simulator", func() {
264264
client, err := startServer(ctx, mode)
265265
Expect(err).NotTo(HaveOccurred())
266266

267-
openaiclient, params := getOpenAIClentAndChatParams(client, model, userMessage, false)
267+
openaiclient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
268268
numTokens := 0
269269
// if maxTokens and maxCompletionTokens are passsed
270270
// maxCompletionTokens is used
@@ -539,7 +539,7 @@ var _ = Describe("Simulator", func() {
539539
Expect(string(body)).To(ContainSubstring("BadRequestError"))
540540

541541
// Also test with OpenAI client to ensure it gets an error
542-
openaiclient, params := getOpenAIClentAndChatParams(client, model, "This is a test message", false)
542+
openaiclient, params := getOpenAIClientAndChatParams(client, model, "This is a test message", false)
543543
params.MaxTokens = openai.Int(8)
544544

545545
_, err = openaiclient.Chat.Completions.New(ctx, params)
@@ -556,7 +556,7 @@ var _ = Describe("Simulator", func() {
556556
client, err := startServerWithArgs(ctx, common.ModeEcho, args, nil)
557557
Expect(err).NotTo(HaveOccurred())
558558

559-
openaiclient, params := getOpenAIClentAndChatParams(client, model, "Hello", false)
559+
openaiclient, params := getOpenAIClientAndChatParams(client, model, "Hello", false)
560560
params.MaxTokens = openai.Int(5)
561561

562562
// Send a request within the context window
@@ -604,7 +604,7 @@ func sendSimpleChatRequest(envs map[string]string, streaming bool) *http.Respons
604604
client, err := startServerWithArgs(ctx, common.ModeRandom, nil, envs)
605605
Expect(err).NotTo(HaveOccurred())
606606

607-
openaiclient, params := getOpenAIClentAndChatParams(client, model, userMessage, streaming)
607+
openaiclient, params := getOpenAIClientAndChatParams(client, model, userMessage, streaming)
608608
var httpResp *http.Response
609609
resp, err := openaiclient.Chat.Completions.New(ctx, params, option.WithResponseInto(&httpResp))
610610
Expect(err).NotTo(HaveOccurred())
@@ -616,7 +616,7 @@ func sendSimpleChatRequest(envs map[string]string, streaming bool) *http.Respons
616616
return httpResp
617617
}
618618

619-
func getOpenAIClentAndChatParams(client option.HTTPClient, model string, message string,
619+
func getOpenAIClientAndChatParams(client option.HTTPClient, model string, message string,
620620
streaming bool) (openai.Client, openai.ChatCompletionNewParams) {
621621
openaiclient := openai.NewClient(
622622
option.WithBaseURL(baseURL),

0 commit comments

Comments
 (0)