@@ -33,9 +33,9 @@ import (
3333 "github.com/llm-d/llm-d-kv-cache-manager/pkg/tokenization"
3434 . "github.com/onsi/ginkgo/v2"
3535 . "github.com/onsi/gomega"
36- "github.com/openai/openai-go"
37- "github.com/openai/openai-go/option"
38- "github.com/openai/openai-go/packages/param"
36+ "github.com/openai/openai-go/v3 "
37+ "github.com/openai/openai-go/v3/ option"
38+ "github.com/openai/openai-go/v3/ packages/param"
3939 "github.com/valyala/fasthttp/fasthttputil"
4040 "k8s.io/klog/v2"
4141)
@@ -163,7 +163,7 @@ var _ = Describe("Simulator", func() {
163163 client , err := startServer (ctx , mode )
164164 Expect (err ).NotTo (HaveOccurred ())
165165
166- openaiclient , params := getOpenAIClentAndChatParams (client , model , userMessage , true )
166+ openaiclient , params := getOpenAIClientAndChatParams (client , model , userMessage , true )
167167 stream := openaiclient .Chat .Completions .NewStreaming (ctx , params )
168168 defer func () {
169169 err := stream .Close ()
@@ -264,7 +264,7 @@ var _ = Describe("Simulator", func() {
264264 client , err := startServer (ctx , mode )
265265 Expect (err ).NotTo (HaveOccurred ())
266266
267- openaiclient , params := getOpenAIClentAndChatParams (client , model , userMessage , false )
267+ openaiclient , params := getOpenAIClientAndChatParams (client , model , userMessage , false )
268268 numTokens := 0
269269 // if maxTokens and maxCompletionTokens are passsed
270270 // maxCompletionTokens is used
@@ -539,7 +539,7 @@ var _ = Describe("Simulator", func() {
539539 Expect (string (body )).To (ContainSubstring ("BadRequestError" ))
540540
541541 // Also test with OpenAI client to ensure it gets an error
542- openaiclient , params := getOpenAIClentAndChatParams (client , model , "This is a test message" , false )
542+ openaiclient , params := getOpenAIClientAndChatParams (client , model , "This is a test message" , false )
543543 params .MaxTokens = openai .Int (8 )
544544
545545 _ , err = openaiclient .Chat .Completions .New (ctx , params )
@@ -556,7 +556,7 @@ var _ = Describe("Simulator", func() {
556556 client , err := startServerWithArgs (ctx , common .ModeEcho , args , nil )
557557 Expect (err ).NotTo (HaveOccurred ())
558558
559- openaiclient , params := getOpenAIClentAndChatParams (client , model , "Hello" , false )
559+ openaiclient , params := getOpenAIClientAndChatParams (client , model , "Hello" , false )
560560 params .MaxTokens = openai .Int (5 )
561561
562562 // Send a request within the context window
@@ -604,7 +604,7 @@ func sendSimpleChatRequest(envs map[string]string, streaming bool) *http.Respons
604604 client , err := startServerWithArgs (ctx , common .ModeRandom , nil , envs )
605605 Expect (err ).NotTo (HaveOccurred ())
606606
607- openaiclient , params := getOpenAIClentAndChatParams (client , model , userMessage , streaming )
607+ openaiclient , params := getOpenAIClientAndChatParams (client , model , userMessage , streaming )
608608 var httpResp * http.Response
609609 resp , err := openaiclient .Chat .Completions .New (ctx , params , option .WithResponseInto (& httpResp ))
610610 Expect (err ).NotTo (HaveOccurred ())
@@ -616,7 +616,7 @@ func sendSimpleChatRequest(envs map[string]string, streaming bool) *http.Respons
616616 return httpResp
617617}
618618
619- func getOpenAIClentAndChatParams (client option.HTTPClient , model string , message string ,
619+ func getOpenAIClientAndChatParams (client option.HTTPClient , model string , message string ,
620620 streaming bool ) (openai.Client , openai.ChatCompletionNewParams ) {
621621 openaiclient := openai .NewClient (
622622 option .WithBaseURL (baseURL ),
0 commit comments