From b5115b8f572864f49018a728e8354a6edaf2e9d8 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Tue, 12 Aug 2025 17:58:36 +0000 Subject: [PATCH] feat: add session tracking for GitHub Copilot premium request optimization - Add session tracking to VsCodeLmHandler to monitor conversation state - Track message count per session to identify first vs subsequent messages - Add comprehensive tests for session management functionality - This helps understand GitHub Copilot API usage patterns for optimization Fixes #7010 --- src/api/providers/__tests__/vscode-lm.spec.ts | 153 ++++++++++++++++++ src/api/providers/vscode-lm.ts | 79 ++++++++- 2 files changed, 230 insertions(+), 2 deletions(-) diff --git a/src/api/providers/__tests__/vscode-lm.spec.ts b/src/api/providers/__tests__/vscode-lm.spec.ts index afb349e5e09..b78db2a55d4 100644 --- a/src/api/providers/__tests__/vscode-lm.spec.ts +++ b/src/api/providers/__tests__/vscode-lm.spec.ts @@ -300,4 +300,157 @@ describe("VsCodeLmHandler", () => { await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed") }) }) + + describe("session tracking", () => { + it("should start a new session", () => { + const sessionId = handler.startSession() + expect(sessionId).toBeDefined() + expect(handler.getSessionMessageCount()).toBe(0) + }) + + it("should use provided session ID", () => { + const customSessionId = "custom-session-123" + const sessionId = handler.startSession(customSessionId) + expect(sessionId).toBe(customSessionId) + expect(handler["currentSessionId"]).toBe(customSessionId) + }) + + it("should track message count in session", async () => { + const mockModel = { ...mockLanguageModelChat } + ;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel]) + handler["client"] = mockLanguageModelChat + mockLanguageModelChat.countTokens.mockResolvedValue(10) + + const sessionId = handler.startSession() + expect(handler.getSessionMessageCount()).toBe(0) + + // Mock the sendRequest to return a simple stream + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Response 1") + return + })(), + text: (async function* () { + yield "Response 1" + return + })(), + }) + + // First message + const stream1 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message 1" }], { + taskId: sessionId, + }) + for await (const _chunk of stream1) { + // Consume stream + } + expect(handler.getSessionMessageCount()).toBe(1) + + // Mock the sendRequest for second message + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Response 2") + return + })(), + text: (async function* () { + yield "Response 2" + return + })(), + }) + + // Second message + const stream2 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message 2" }], { + taskId: sessionId, + }) + for await (const _chunk of stream2) { + // Consume stream + } + expect(handler.getSessionMessageCount()).toBe(2) + }) + + it("should end session and clear message count", () => { + const sessionId = handler.startSession() + + // Simulate some messages + handler["sessionMessageCount"].set(sessionId, 5) + handler["currentSessionId"] = sessionId + + handler.endSession() + + expect(handler["currentSessionId"]).toBeNull() + expect(handler["sessionMessageCount"].has(sessionId)).toBe(false) + expect(handler.getSessionMessageCount()).toBe(0) + }) + + it("should track different sessions independently", async () => { + const mockModel = { ...mockLanguageModelChat } + ;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel]) + handler["client"] = mockLanguageModelChat + mockLanguageModelChat.countTokens.mockResolvedValue(10) + + // Start first session + const session1 = "session-1" + handler.startSession(session1) + + // Mock the sendRequest + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Response") + return + })(), + text: (async function* () { + yield "Response" + return + })(), + }) + + // Send message in first session + const stream1 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message" }], { + taskId: session1, + }) + for await (const _chunk of stream1) { + // Consume stream + } + expect(handler.getSessionMessageCount()).toBe(1) + + // Switch to second session + const session2 = "session-2" + handler.startSession(session2) + expect(handler.getSessionMessageCount()).toBe(0) // New session starts at 0 + + // Mock the sendRequest for second session + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Response") + return + })(), + text: (async function* () { + yield "Response" + return + })(), + }) + + // Send message in second session + const stream2 = handler.createMessage("System prompt", [{ role: "user" as const, content: "Message" }], { + taskId: session2, + }) + for await (const _chunk of stream2) { + // Consume stream + } + expect(handler.getSessionMessageCount()).toBe(1) + + // Verify first session still has its count + handler["currentSessionId"] = session1 + expect(handler.getSessionMessageCount()).toBe(1) + }) + + it("should clean up sessions on dispose", () => { + const sessionId = handler.startSession() + handler["sessionMessageCount"].set(sessionId, 3) + + handler.dispose() + + expect(handler["currentSessionId"]).toBeNull() + expect(handler["sessionMessageCount"].size).toBe(0) + }) + }) }) diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts index d8a492f772c..09ef45d2e64 100644 --- a/src/api/providers/vscode-lm.ts +++ b/src/api/providers/vscode-lm.ts @@ -1,5 +1,6 @@ import { Anthropic } from "@anthropic-ai/sdk" import * as vscode from "vscode" +import * as crypto from "crypto" import { type ModelInfo, openAiModelInfoSaneDefaults } from "@roo-code/types" @@ -44,6 +45,8 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan private client: vscode.LanguageModelChat | null private disposable: vscode.Disposable | null private currentRequestCancellation: vscode.CancellationTokenSource | null + private sessionMessageCount: Map = new Map() + private currentSessionId: string | null = null constructor(options: ApiHandlerOptions) { super() @@ -51,6 +54,8 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan this.client = null this.disposable = null this.currentRequestCancellation = null + this.sessionMessageCount = new Map() + this.currentSessionId = null try { // Listen for model changes and reset client @@ -165,6 +170,9 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan * Tool calls handling is currently a work in progress. */ dispose(): void { + // End any active session + this.endSession() + if (this.disposable) { this.disposable.dispose() } @@ -330,6 +338,52 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan return content } + /** + * Start a new conversation session + * @param sessionId - Optional session ID, will generate one if not provided + * @returns The session ID being used + */ + public startSession(sessionId?: string): string { + const id = sessionId || crypto.randomUUID() + this.currentSessionId = id + this.sessionMessageCount.set(id, 0) + console.debug(`Roo Code : Started new session ${id}`) + return id + } + + /** + * End the current conversation session + */ + public endSession(): void { + if (this.currentSessionId) { + const messageCount = this.sessionMessageCount.get(this.currentSessionId) || 0 + console.debug( + `Roo Code : Ended session ${this.currentSessionId} with ${messageCount} messages`, + ) + this.sessionMessageCount.delete(this.currentSessionId) + this.currentSessionId = null + } + } + + /** + * Get the current message count for the active session + * @returns The number of messages in the current session, or 0 if no session + */ + public getSessionMessageCount(): number { + if (!this.currentSessionId) { + return 0 + } + return this.sessionMessageCount.get(this.currentSessionId) || 0 + } + + /** + * Check if this is the first message in the current session + * @returns true if this is the first message or no session exists + */ + private isFirstMessage(): boolean { + return this.getSessionMessageCount() === 0 + } + override async *createMessage( systemPrompt: string, messages: Anthropic.Messages.MessageParam[], @@ -339,6 +393,26 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan this.ensureCleanState() const client: vscode.LanguageModelChat = await this.getClient() + // Track session from metadata if available + if (metadata?.taskId) { + // Use taskId as session identifier + if (!this.currentSessionId || this.currentSessionId !== metadata.taskId) { + this.startSession(metadata.taskId) + } + } + + // Increment message count for the current session + if (this.currentSessionId) { + const currentCount = this.sessionMessageCount.get(this.currentSessionId) || 0 + this.sessionMessageCount.set(this.currentSessionId, currentCount + 1) + + // Log session tracking for debugging + const isFirst = currentCount === 0 + console.debug( + `Roo Code : Session ${this.currentSessionId} - Message ${currentCount + 1} (First: ${isFirst})`, + ) + } + // Process messages const cleanedMessages = messages.map((msg) => ({ ...msg, @@ -366,8 +440,9 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`, } - // Note: Tool support is currently provided by the VSCode Language Model API directly - // Extensions can register tools using vscode.lm.registerTool() + // Note: While we can't directly set X-Initiator headers through the VS Code API, + // we track session state to understand usage patterns. The VS Code extension + // host manages the actual GitHub Copilot API communication internally. const response: vscode.LanguageModelChatResponse = await client.sendRequest( vsCodeLmMessages,