Skip to content

Commit b5115b8

Browse files
committed
feat: add session tracking for GitHub Copilot premium request optimization
- Add session tracking to VsCodeLmHandler to monitor conversation state - Track message count per session to identify first vs subsequent messages - Add comprehensive tests for session management functionality - This helps understand GitHub Copilot API usage patterns for optimization Fixes #7010
1 parent bbe3362 commit b5115b8

File tree

2 files changed

+230
-2
lines changed

2 files changed

+230
-2
lines changed

src/api/providers/__tests__/vscode-lm.spec.ts

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,4 +300,157 @@ describe("VsCodeLmHandler", () => {
300300
await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed")
301301
})
302302
})
303+
304+
describe("session tracking", () => {
305+
it("should start a new session", () => {
306+
const sessionId = handler.startSession()
307+
expect(sessionId).toBeDefined()
308+
expect(handler.getSessionMessageCount()).toBe(0)
309+
})
310+
311+
it("should use provided session ID", () => {
312+
const customSessionId = "custom-session-123"
313+
const sessionId = handler.startSession(customSessionId)
314+
expect(sessionId).toBe(customSessionId)
315+
expect(handler["currentSessionId"]).toBe(customSessionId)
316+
})
317+
318+
it("should track message count in session", async () => {
319+
const mockModel = { ...mockLanguageModelChat }
320+
;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel])
321+
handler["client"] = mockLanguageModelChat
322+
mockLanguageModelChat.countTokens.mockResolvedValue(10)
323+
324+
const sessionId = handler.startSession()
325+
expect(handler.getSessionMessageCount()).toBe(0)
326+
327+
// Mock the sendRequest to return a simple stream
328+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
329+
stream: (async function* () {
330+
yield new vscode.LanguageModelTextPart("Response 1")
331+
return
332+
})(),
333+
text: (async function* () {
334+
yield "Response 1"
335+
return
336+
})(),
337+
})
338+
339+
// First message
340+
const stream1 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message 1" }], {
341+
taskId: sessionId,
342+
})
343+
for await (const _chunk of stream1) {
344+
// Consume stream
345+
}
346+
expect(handler.getSessionMessageCount()).toBe(1)
347+
348+
// Mock the sendRequest for second message
349+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
350+
stream: (async function* () {
351+
yield new vscode.LanguageModelTextPart("Response 2")
352+
return
353+
})(),
354+
text: (async function* () {
355+
yield "Response 2"
356+
return
357+
})(),
358+
})
359+
360+
// Second message
361+
const stream2 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message 2" }], {
362+
taskId: sessionId,
363+
})
364+
for await (const _chunk of stream2) {
365+
// Consume stream
366+
}
367+
expect(handler.getSessionMessageCount()).toBe(2)
368+
})
369+
370+
it("should end session and clear message count", () => {
371+
const sessionId = handler.startSession()
372+
373+
// Simulate some messages
374+
handler["sessionMessageCount"].set(sessionId, 5)
375+
handler["currentSessionId"] = sessionId
376+
377+
handler.endSession()
378+
379+
expect(handler["currentSessionId"]).toBeNull()
380+
expect(handler["sessionMessageCount"].has(sessionId)).toBe(false)
381+
expect(handler.getSessionMessageCount()).toBe(0)
382+
})
383+
384+
it("should track different sessions independently", async () => {
385+
const mockModel = { ...mockLanguageModelChat }
386+
;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel])
387+
handler["client"] = mockLanguageModelChat
388+
mockLanguageModelChat.countTokens.mockResolvedValue(10)
389+
390+
// Start first session
391+
const session1 = "session-1"
392+
handler.startSession(session1)
393+
394+
// Mock the sendRequest
395+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
396+
stream: (async function* () {
397+
yield new vscode.LanguageModelTextPart("Response")
398+
return
399+
})(),
400+
text: (async function* () {
401+
yield "Response"
402+
return
403+
})(),
404+
})
405+
406+
// Send message in first session
407+
const stream1 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message" }], {
408+
taskId: session1,
409+
})
410+
for await (const _chunk of stream1) {
411+
// Consume stream
412+
}
413+
expect(handler.getSessionMessageCount()).toBe(1)
414+
415+
// Switch to second session
416+
const session2 = "session-2"
417+
handler.startSession(session2)
418+
expect(handler.getSessionMessageCount()).toBe(0) // New session starts at 0
419+
420+
// Mock the sendRequest for second session
421+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
422+
stream: (async function* () {
423+
yield new vscode.LanguageModelTextPart("Response")
424+
return
425+
})(),
426+
text: (async function* () {
427+
yield "Response"
428+
return
429+
})(),
430+
})
431+
432+
// Send message in second session
433+
const stream2 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message" }], {
434+
taskId: session2,
435+
})
436+
for await (const _chunk of stream2) {
437+
// Consume stream
438+
}
439+
expect(handler.getSessionMessageCount()).toBe(1)
440+
441+
// Verify first session still has its count
442+
handler["currentSessionId"] = session1
443+
expect(handler.getSessionMessageCount()).toBe(1)
444+
})
445+
446+
it("should clean up sessions on dispose", () => {
447+
const sessionId = handler.startSession()
448+
handler["sessionMessageCount"].set(sessionId, 3)
449+
450+
handler.dispose()
451+
452+
expect(handler["currentSessionId"]).toBeNull()
453+
expect(handler["sessionMessageCount"].size).toBe(0)
454+
})
455+
})
303456
})

src/api/providers/vscode-lm.ts

Lines changed: 77 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import * as vscode from "vscode"
3+
import * as crypto from "crypto"
34

45
import { type ModelInfo, openAiModelInfoSaneDefaults } from "@roo-code/types"
56

@@ -44,13 +45,17 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
4445
private client: vscode.LanguageModelChat | null
4546
private disposable: vscode.Disposable | null
4647
private currentRequestCancellation: vscode.CancellationTokenSource | null
48+
private sessionMessageCount: Map<string, number> = new Map()
49+
private currentSessionId: string | null = null
4750

4851
constructor(options: ApiHandlerOptions) {
4952
super()
5053
this.options = options
5154
this.client = null
5255
this.disposable = null
5356
this.currentRequestCancellation = null
57+
this.sessionMessageCount = new Map()
58+
this.currentSessionId = null
5459

5560
try {
5661
// Listen for model changes and reset client
@@ -165,6 +170,9 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
165170
* Tool calls handling is currently a work in progress.
166171
*/
167172
dispose(): void {
173+
// End any active session
174+
this.endSession()
175+
168176
if (this.disposable) {
169177
this.disposable.dispose()
170178
}
@@ -330,6 +338,52 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
330338
return content
331339
}
332340

341+
/**
342+
* Start a new conversation session
343+
* @param sessionId - Optional session ID, will generate one if not provided
344+
* @returns The session ID being used
345+
*/
346+
public startSession(sessionId?: string): string {
347+
const id = sessionId || crypto.randomUUID()
348+
this.currentSessionId = id
349+
this.sessionMessageCount.set(id, 0)
350+
console.debug(`Roo Code <Language Model API>: Started new session ${id}`)
351+
return id
352+
}
353+
354+
/**
355+
* End the current conversation session
356+
*/
357+
public endSession(): void {
358+
if (this.currentSessionId) {
359+
const messageCount = this.sessionMessageCount.get(this.currentSessionId) || 0
360+
console.debug(
361+
`Roo Code <Language Model API>: Ended session ${this.currentSessionId} with ${messageCount} messages`,
362+
)
363+
this.sessionMessageCount.delete(this.currentSessionId)
364+
this.currentSessionId = null
365+
}
366+
}
367+
368+
/**
369+
* Get the current message count for the active session
370+
* @returns The number of messages in the current session, or 0 if no session
371+
*/
372+
public getSessionMessageCount(): number {
373+
if (!this.currentSessionId) {
374+
return 0
375+
}
376+
return this.sessionMessageCount.get(this.currentSessionId) || 0
377+
}
378+
379+
/**
380+
* Check if this is the first message in the current session
381+
* @returns true if this is the first message or no session exists
382+
*/
383+
private isFirstMessage(): boolean {
384+
return this.getSessionMessageCount() === 0
385+
}
386+
333387
override async *createMessage(
334388
systemPrompt: string,
335389
messages: Anthropic.Messages.MessageParam[],
@@ -339,6 +393,26 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
339393
this.ensureCleanState()
340394
const client: vscode.LanguageModelChat = await this.getClient()
341395

396+
// Track session from metadata if available
397+
if (metadata?.taskId) {
398+
// Use taskId as session identifier
399+
if (!this.currentSessionId || this.currentSessionId !== metadata.taskId) {
400+
this.startSession(metadata.taskId)
401+
}
402+
}
403+
404+
// Increment message count for the current session
405+
if (this.currentSessionId) {
406+
const currentCount = this.sessionMessageCount.get(this.currentSessionId) || 0
407+
this.sessionMessageCount.set(this.currentSessionId, currentCount + 1)
408+
409+
// Log session tracking for debugging
410+
const isFirst = currentCount === 0
411+
console.debug(
412+
`Roo Code <Language Model API>: Session ${this.currentSessionId} - Message ${currentCount + 1} (First: ${isFirst})`,
413+
)
414+
}
415+
342416
// Process messages
343417
const cleanedMessages = messages.map((msg) => ({
344418
...msg,
@@ -366,8 +440,9 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
366440
justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`,
367441
}
368442

369-
// Note: Tool support is currently provided by the VSCode Language Model API directly
370-
// Extensions can register tools using vscode.lm.registerTool()
443+
// Note: While we can't directly set X-Initiator headers through the VS Code API,
444+
// we track session state to understand usage patterns. The VS Code extension
445+
// host manages the actual GitHub Copilot API communication internally.
371446

372447
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
373448
vsCodeLmMessages,

0 commit comments

Comments
 (0)