Skip to content

Commit b9776a7

Browse files
committed
refactor: streamline Copilot model fetching and remove unused streaming option
1 parent f3d35b3 commit b9776a7

File tree

4 files changed

+34
-92
lines changed

4 files changed

+34
-92
lines changed

src/api/providers/__tests__/copilot.spec.ts

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ describe("CopilotHandler", () => {
106106
mockOptions = {
107107
copilotModelId: "gpt-4",
108108
apiModelId: "gpt-4",
109-
openAiStreamingEnabled: true,
110109
}
111110

112111
// Mock successful authentication
@@ -344,47 +343,6 @@ describe("CopilotHandler", () => {
344343
expect(chunk.cacheReadTokens).toBe(2)
345344
})
346345

347-
it("should handle non-streaming requests", async () => {
348-
const nonStreamingHandler = new CopilotHandler({
349-
...mockOptions,
350-
openAiStreamingEnabled: false,
351-
})
352-
353-
const stream = nonStreamingHandler.createMessage(systemPrompt, messages)
354-
const chunks: any[] = []
355-
for await (const chunk of stream) {
356-
chunks.push(chunk)
357-
}
358-
359-
const textChunks = chunks.filter((chunk) => chunk.type === "text")
360-
expect(textChunks).toHaveLength(1)
361-
expect(textChunks[0].text).toBe("Test Copilot response")
362-
363-
const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
364-
expect(usageChunks).toHaveLength(1)
365-
})
366-
367-
it("should add max_tokens when includeMaxTokens is enabled", async () => {
368-
const handlerWithMaxTokens = new CopilotHandler({
369-
...mockOptions,
370-
includeMaxTokens: true,
371-
modelMaxTokens: 4096,
372-
})
373-
374-
const stream = handlerWithMaxTokens.createMessage(systemPrompt, messages)
375-
const chunks: any[] = []
376-
for await (const chunk of stream) {
377-
chunks.push(chunk)
378-
}
379-
380-
expect(mockCreate).toHaveBeenCalledWith(
381-
expect.objectContaining({
382-
max_completion_tokens: 4096,
383-
}),
384-
expect.any(Object),
385-
)
386-
})
387-
388346
it("should handle authentication failures gracefully", async () => {
389347
mockAuthenticator.getApiKey.mockRejectedValueOnce(new Error("Auth failed"))
390348

src/api/providers/copilot.ts

Lines changed: 20 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -122,55 +122,32 @@ export class CopilotHandler extends BaseProvider implements SingleCompletionHand
122122
"X-Initiator": initiator,
123123
}
124124

125-
if (this.options.openAiStreamingEnabled ?? true) {
126-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
127-
model: modelId,
128-
temperature: this.options.modelTemperature ?? 0,
129-
messages: convertedMessages,
130-
stream: true as const,
131-
stream_options: { include_usage: true },
132-
}
133-
134-
// Add max_tokens if needed
135-
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
136-
137-
const stream = await this.client.chat.completions.create(requestOptions, {
138-
headers,
139-
})
125+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
126+
model: modelId,
127+
temperature: this.options.modelTemperature ?? 0,
128+
messages: convertedMessages,
129+
stream: true as const,
130+
stream_options: { include_usage: true },
131+
max_completion_tokens: modelInfo.maxTokens,
132+
}
140133

141-
for await (const chunk of stream) {
142-
const delta = chunk.choices?.[0]?.delta
143-
if (delta?.content) {
144-
yield {
145-
type: "text",
146-
text: delta.content,
147-
}
148-
}
134+
const stream = await this.client.chat.completions.create(requestOptions, {
135+
headers,
136+
})
149137

150-
// Handle usage information
151-
if (chunk.usage) {
152-
yield this.processUsageMetrics(chunk.usage)
138+
for await (const chunk of stream) {
139+
const delta = chunk.choices?.[0]?.delta
140+
if (delta?.content) {
141+
yield {
142+
type: "text",
143+
text: delta.content,
153144
}
154145
}
155-
} else {
156-
// Non-streaming implementation
157-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
158-
model: modelId,
159-
temperature: this.options.modelTemperature ?? 0,
160-
messages: convertedMessages,
161-
}
162-
163-
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
164-
165-
const response = await this.client.chat.completions.create(requestOptions, {
166-
headers,
167-
})
168146

169-
yield {
170-
type: "text",
171-
text: response.choices[0]?.message.content || "",
147+
// Handle usage information
148+
if (chunk.usage) {
149+
yield this.processUsageMetrics(chunk.usage)
172150
}
173-
yield this.processUsageMetrics(response.usage)
174151
}
175152
}
176153

src/core/webview/__tests__/webviewMessageHandler.spec.ts

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,14 @@ vi.mock("../../../api/providers/fetchers/copilot")
66

77
import { webviewMessageHandler } from "../webviewMessageHandler"
88
import type { ClineProvider } from "../ClineProvider"
9-
import { getModels } from "../../../api/providers/fetchers/modelCache"
9+
import { flushModels, getModels } from "../../../api/providers/fetchers/modelCache"
1010
import { getCopilotModels, CopilotAuthenticator } from "../../../api/providers/fetchers/copilot"
1111
import type { ModelRecord } from "../../../shared/api"
1212

1313
const mockGetModels = getModels as Mock<typeof getModels>
1414
const mockGetCopilotModels = getCopilotModels as Mock<typeof getCopilotModels>
1515
const mockCopilotAuthenticator = CopilotAuthenticator as any
16+
const mockFlushModels = flushModels as Mock<typeof flushModels>
1617

1718
// Mock ClineProvider
1819
const mockClineProvider = {
@@ -560,21 +561,22 @@ describe("webviewMessageHandler - requestCopilotModels", () => {
560561
}
561562

562563
// Mock getCopilotModels to return mock models
563-
mockGetCopilotModels.mockResolvedValue(mockCopilotModels)
564+
mockGetModels.mockResolvedValue(mockCopilotModels)
564565

565566
await webviewMessageHandler(mockClineProvider, {
566567
type: "requestCopilotModels",
567568
})
568569

569-
expect(mockGetCopilotModels).toHaveBeenCalledTimes(1)
570+
expect(mockFlushModels).toHaveBeenCalledTimes(1)
571+
expect(mockGetModels).toHaveBeenCalledTimes(1)
570572
expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
571573
type: "copilotModels",
572574
copilotModels: mockCopilotModels,
573575
})
574576
})
575577

576578
it("handles errors when fetching Copilot models", async () => {
577-
mockGetCopilotModels.mockRejectedValue(new Error("Authentication failed"))
579+
mockGetModels.mockRejectedValue(new Error("Authentication failed"))
578580

579581
// Spy on console.error
580582
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {})
@@ -583,7 +585,8 @@ describe("webviewMessageHandler - requestCopilotModels", () => {
583585
type: "requestCopilotModels",
584586
})
585587

586-
expect(mockGetCopilotModels).toHaveBeenCalledTimes(1)
588+
expect(mockFlushModels).toHaveBeenCalledTimes(1)
589+
expect(mockGetModels).toHaveBeenCalledTimes(1)
587590
expect(consoleSpy).toHaveBeenCalledWith("Failed to fetch Copilot models:", expect.any(Error))
588591
expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
589592
type: "copilotModels",

src/core/webview/message-handle/copilot/requestCopilotModels.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { MessageHandlerStrategy, MessageHandlerContext } from "../types"
2-
import { getCopilotModels } from "../../../../api/providers/fetchers/copilot"
32
import { ModelRecord } from "../../../../shared/api"
3+
import { flushModels, getModels } from "../../../../api/providers/fetchers/modelCache"
44

55
/**
66
* Strategy for handling requestCopilotModels message
@@ -10,7 +10,11 @@ export class RequestCopilotModelsStrategy implements MessageHandlerStrategy {
1010
const { provider } = context
1111

1212
try {
13-
const copilotModels = await getCopilotModels()
13+
await flushModels("copilot")
14+
15+
const copilotModels = await getModels({
16+
provider: "copilot",
17+
})
1418
provider.postMessageToWebview({
1519
type: "copilotModels",
1620
copilotModels,

0 commit comments

Comments
 (0)