Skip to content

Commit f835c9e

Browse files
committed
re-base the changes
Signed-off-by: Sergey Marunich <[email protected]>
1 parent 772ecdc commit f835c9e

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

pkg/llm-d-inference-sim/simulator_test.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
. "github.com/onsi/gomega"
3232
"github.com/openai/openai-go"
3333
"github.com/openai/openai-go/option"
34+
"github.com/openai/openai-go/packages/param"
3435
"github.com/valyala/fasthttp/fasthttputil"
3536
"k8s.io/klog/v2"
3637
)
@@ -43,8 +44,7 @@ const invalidMaxTokensErrMsg = "Max completion tokens and max tokens should be p
4344
var userMsgTokens int64
4445

4546
func startServer(ctx context.Context, mode string) (*http.Client, error) {
46-
// Disable failure injection for tests by default
47-
return startServerWithArgs(ctx, mode, []string{"cmd", "--model", model, "--mode", mode, "--failure-injection-rate", "0"})
47+
return startServerWithArgs(ctx, mode, nil)
4848
}
4949

5050
func startServerWithArgs(ctx context.Context, mode string, args []string) (*http.Client, error) {
@@ -56,7 +56,7 @@ func startServerWithArgs(ctx context.Context, mode string, args []string) (*http
5656
if args != nil {
5757
os.Args = args
5858
} else {
59-
os.Args = []string{"cmd", "--model", model, "--mode", mode, "--failure-injection-rate", "0"}
59+
os.Args = []string{"cmd", "--model", model, "--mode", mode}
6060
}
6161
logger := klog.Background()
6262

@@ -183,7 +183,7 @@ var _ = Describe("Simulator", func() {
183183
OfString: openai.String(userMessage),
184184
},
185185
Model: openai.CompletionNewParamsModel(model),
186-
StreamOptions: openai.ChatCompletionStreamOptionsParam{IncludeUsage: openai.Bool(true)},
186+
StreamOptions: openai.ChatCompletionStreamOptionsParam{IncludeUsage: param.NewOpt(true)},
187187
}
188188
stream := openaiclient.Completions.NewStreaming(ctx, params)
189189
defer func() {
@@ -246,11 +246,11 @@ var _ = Describe("Simulator", func() {
246246
// if maxTokens and maxCompletionTokens are passsed
247247
// maxCompletionTokens is used
248248
if maxTokens != 0 {
249-
params.MaxTokens = openai.Int(int64(maxTokens))
249+
params.MaxTokens = param.NewOpt(int64(maxTokens))
250250
numTokens = maxTokens
251251
}
252252
if maxCompletionTokens != 0 {
253-
params.MaxCompletionTokens = openai.Int(int64(maxCompletionTokens))
253+
params.MaxCompletionTokens = param.NewOpt(int64(maxCompletionTokens))
254254
numTokens = maxCompletionTokens
255255
}
256256
resp, err := openaiclient.Chat.Completions.New(ctx, params)
@@ -329,7 +329,7 @@ var _ = Describe("Simulator", func() {
329329
}
330330
numTokens := 0
331331
if maxTokens != 0 {
332-
params.MaxTokens = openai.Int(int64(maxTokens))
332+
params.MaxTokens = param.NewOpt(int64(maxTokens))
333333
numTokens = maxTokens
334334
}
335335
resp, err := openaiclient.Completions.New(ctx, params)
@@ -444,7 +444,7 @@ var _ = Describe("Simulator", func() {
444444
openai.UserMessage("This is a test message"),
445445
},
446446
Model: model,
447-
MaxTokens: openai.Int(8),
447+
MaxTokens: param.NewOpt(int64(8)),
448448
})
449449

450450
Expect(err).To(HaveOccurred())
@@ -471,7 +471,7 @@ var _ = Describe("Simulator", func() {
471471
openai.UserMessage("Hello"),
472472
},
473473
Model: model,
474-
MaxTokens: openai.Int(5),
474+
MaxTokens: param.NewOpt(int64(5)),
475475
})
476476

477477
Expect(err).NotTo(HaveOccurred())

0 commit comments

Comments
 (0)