Skip to content

Commit 99bdec4

Browse files
committed
Move common simulator tests helper functions to test_utils.go, use same model name is all tests, refactoring in server start functions
Signed-off-by: Maya Barnea <[email protected]>
1 parent 59bb8dd commit 99bdec4

File tree

9 files changed

+751
-694
lines changed

9 files changed

+751
-694
lines changed

pkg/llm-d-inference-sim/failures_test.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -126,15 +126,15 @@ var _ = Describe("Failures", func() {
126126
BeforeEach(func() {
127127
ctx = context.Background()
128128
var err error
129-
client, err = startServerWithArgs(ctx, "", []string{
130-
"cmd", "--model", model,
129+
client, err = startServerWithArgs(ctx, []string{
130+
"cmd", "--model", testModel,
131131
"--failure-injection-rate", "100",
132-
}, nil)
132+
})
133133
Expect(err).ToNot(HaveOccurred())
134134
})
135135

136136
It("should always return an error response for chat completions", func() {
137-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
137+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
138138
_, err := openaiClient.Chat.Completions.New(ctx, params)
139139
Expect(err).To(HaveOccurred())
140140

@@ -147,7 +147,7 @@ var _ = Describe("Failures", func() {
147147
})
148148

149149
It("should always return an error response for text completions", func() {
150-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
150+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
151151
_, err := openaiClient.Chat.Completions.New(ctx, params)
152152
Expect(err).To(HaveOccurred())
153153

@@ -164,16 +164,16 @@ var _ = Describe("Failures", func() {
164164
BeforeEach(func() {
165165
ctx = context.Background()
166166
var err error
167-
client, err = startServerWithArgs(ctx, "", []string{
168-
"cmd", "--model", model,
167+
client, err = startServerWithArgs(ctx, []string{
168+
"cmd", "--model", testModel,
169169
"--failure-injection-rate", "100",
170170
"--failure-types", common.FailureTypeRateLimit,
171-
}, nil)
171+
})
172172
Expect(err).ToNot(HaveOccurred())
173173
})
174174

175175
It("should return only rate limit errors", func() {
176-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
176+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
177177
_, err := openaiClient.Chat.Completions.New(ctx, params)
178178
Expect(err).To(HaveOccurred())
179179

@@ -182,24 +182,24 @@ var _ = Describe("Failures", func() {
182182
Expect(ok).To(BeTrue())
183183
Expect(openaiError.StatusCode).To(Equal(429))
184184
Expect(openaiError.Type).To(Equal(openaiserverapi.ErrorCodeToType(429)))
185-
Expect(strings.Contains(openaiError.Message, model)).To(BeTrue())
185+
Expect(strings.Contains(openaiError.Message, testModel)).To(BeTrue())
186186
})
187187
})
188188

189189
Context("with multiple specific failure types", func() {
190190
BeforeEach(func() {
191191
ctx = context.Background()
192192
var err error
193-
client, err = startServerWithArgs(ctx, "", []string{
194-
"cmd", "--model", model,
193+
client, err = startServerWithArgs(ctx, []string{
194+
"cmd", "--model", testModel,
195195
"--failure-injection-rate", "100",
196196
"--failure-types", common.FailureTypeInvalidAPIKey, common.FailureTypeServerError,
197-
}, nil)
197+
})
198198
Expect(err).ToNot(HaveOccurred())
199199
})
200200

201201
It("should return only specified error types", func() {
202-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
202+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
203203

204204
// Make multiple requests to verify we get the expected error types
205205
for i := 0; i < 10; i++ {
@@ -222,35 +222,35 @@ var _ = Describe("Failures", func() {
222222
BeforeEach(func() {
223223
ctx = context.Background()
224224
var err error
225-
client, err = startServerWithArgs(ctx, "", []string{
226-
"cmd", "--model", model,
225+
client, err = startServerWithArgs(ctx, []string{
226+
"cmd", "--model", testModel,
227227
"--failure-injection-rate", "0",
228-
}, nil)
228+
})
229229
Expect(err).ToNot(HaveOccurred())
230230
})
231231

232232
It("should never return errors and behave like random mode", func() {
233-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
233+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
234234
resp, err := openaiClient.Chat.Completions.New(ctx, params)
235235
Expect(err).ToNot(HaveOccurred())
236236
Expect(resp.Choices).To(HaveLen(1))
237237
Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty())
238-
Expect(resp.Model).To(Equal(model))
238+
Expect(resp.Model).To(Equal(testModel))
239239
})
240240
})
241241

242242
Context("testing all predefined failure types", func() {
243243
DescribeTable("should return correct error for each failure type",
244244
func(failureType string, expectedStatusCode int, expectedErrorType string) {
245245
ctx := context.Background()
246-
client, err := startServerWithArgs(ctx, "", []string{
247-
"cmd", "--model", model,
246+
client, err := startServerWithArgs(ctx, []string{
247+
"cmd", "--model", testModel,
248248
"--failure-injection-rate", "100",
249249
"--failure-types", failureType,
250-
}, nil)
250+
})
251251
Expect(err).ToNot(HaveOccurred())
252252

253-
openaiClient, params := getOpenAIClientAndChatParams(client, model, userMessage, false)
253+
openaiClient, params := getOpenAIClientAndChatParams(client, testModel, testUserMessage, false)
254254
_, err = openaiClient.Chat.Completions.New(ctx, params)
255255
Expect(err).To(HaveOccurred())
256256

pkg/llm-d-inference-sim/lora_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,22 +34,22 @@ var _ = Describe("LoRAs", func() {
3434
Context("LoRAs config and load", func() {
3535
It("Should config, load and load LoRAs correctly", func() {
3636
ctx := context.TODO()
37-
client, err := startServerWithArgs(ctx, "",
38-
[]string{"cmd", "--model", model, "--mode", common.ModeEcho,
37+
client, err := startServerWithArgs(ctx,
38+
[]string{"cmd", "--model", testModel, "--mode", common.ModeEcho,
3939
"--lora-modules", "{\"name\":\"lora3\",\"path\":\"/path/to/lora3\"}",
40-
"{\"name\":\"lora4\",\"path\":\"/path/to/lora4\"}"}, nil)
40+
"{\"name\":\"lora4\",\"path\":\"/path/to/lora4\"}"})
4141
Expect(err).NotTo(HaveOccurred())
4242

4343
// Request to lora3
44-
openaiclient, params := getOpenAIClientAndChatParams(client, "lora3", userMessage, false)
44+
openaiclient, params := getOpenAIClientAndChatParams(client, "lora3", testUserMessage, false)
4545
resp, err := openaiclient.Chat.Completions.New(ctx, params)
4646
Expect(err).ToNot(HaveOccurred())
4747

4848
Expect(resp.Choices).ShouldNot(BeEmpty())
4949
Expect(string(resp.Object)).To(Equal(chatCompletionObject))
5050

5151
msg := resp.Choices[0].Message.Content
52-
Expect(msg).Should(Equal(userMessage))
52+
Expect(msg).Should(Equal(testUserMessage))
5353

5454
// Unknown model, should return 404
5555
params.Model = "lora1"
@@ -88,7 +88,7 @@ var _ = Describe("LoRAs", func() {
8888
Expect(string(resp.Object)).To(Equal(chatCompletionObject))
8989

9090
msg = resp.Choices[0].Message.Content
91-
Expect(msg).Should(Equal(userMessage))
91+
Expect(msg).Should(Equal(testUserMessage))
9292

9393
// Unload lora3
9494
payload = map[string]string{

0 commit comments

Comments
 (0)