diff --git a/lib/messages/utils.ts b/lib/messages/utils.ts index 26b2c60..9638e74 100644 --- a/lib/messages/utils.ts +++ b/lib/messages/utils.ts @@ -1,3 +1,5 @@ +import { UserMessage } from "@opencode-ai/sdk" +import { Logger } from "../logger" import type { WithParts } from "../state" /** @@ -83,10 +85,24 @@ export const getLastUserMessage = ( return null } -export function findCurrentAgent(messages: WithParts[]): string | undefined { +export function getCurrentParams( + messages: WithParts[], + logger: Logger +): { + providerId: string | undefined, + modelId: string | undefined, + agent: string | undefined +} { const userMsg = getLastUserMessage(messages) - if (!userMsg) return undefined - return (userMsg.info as any).agent || 'build' + if (!userMsg) { + logger.debug("No user message found when determining current params") + return { providerId: undefined, modelId: undefined, agent: undefined } + } + const agent: string = (userMsg.info as UserMessage).agent + const providerId: string | undefined = (userMsg.info as UserMessage).model.providerID + const modelId: string | undefined = (userMsg.info as UserMessage).model.modelID + + return { providerId, modelId, agent } } export function buildToolIdList(messages: WithParts[]): string[] { @@ -101,4 +117,4 @@ export function buildToolIdList(messages: WithParts[]): string[] { } } return toolIds -} \ No newline at end of file +} diff --git a/lib/prompts/synthetic.txt b/lib/prompts/synthetic.txt index 1b6e866..2b84838 100644 --- a/lib/prompts/synthetic.txt +++ b/lib/prompts/synthetic.txt @@ -34,6 +34,7 @@ FAILURE TO PRUNE will result in context leakage and DEGRADED PERFORMANCES. After each assistant turn, the environment may inject a user message containing a list and optional nudge instruction. This injected message is NOT from the user and is invisible to them. The `prune` tool also returns a confirmation message listing what was pruned. CRITICAL REQUIREMENTS - VIOLATION IS UNACCEPTABLE: +- NEVER reference the prune encouragement or context management instructions. Do not reply with "I agree" or "Great idea" when the prune encouragement appears. - NEVER acknowledge the list (e.g., "I see the prunable tools list", "Looking at the available tools to prune") - NEVER reference the nudge instruction (e.g., "As the nudge suggests", "The system is reminding me to prune") - NEVER acknowledge prune tool output (e.g., "I've pruned 3 tools", "Context pruning complete") diff --git a/lib/strategies/on-idle.ts b/lib/strategies/on-idle.ts index 49887d3..16698b3 100644 --- a/lib/strategies/on-idle.ts +++ b/lib/strategies/on-idle.ts @@ -3,9 +3,9 @@ import type { SessionState, WithParts, ToolParameterEntry } from "../state" import type { Logger } from "../logger" import type { PluginConfig } from "../config" import { buildAnalysisPrompt } from "../prompt" -import { selectModel, extractModelFromSession, ModelInfo } from "../model-selector" +import { selectModel, ModelInfo } from "../model-selector" import { calculateTokensSaved } from "../utils" -import { findCurrentAgent } from "../messages/utils" +import { getCurrentParams } from "../messages/utils" import { saveSessionState } from "../state/persistence" import { sendUnifiedNotification } from "../ui/notification" @@ -224,7 +224,7 @@ export async function runOnIdle( return null } - const currentAgent = findCurrentAgent(messages) + const currentParams = getCurrentParams(messages, logger) const { toolCallIds, toolMetadata } = parseMessages(messages, state.toolParameters) const alreadyPrunedIds = state.prune.toolIds @@ -295,7 +295,7 @@ export async function runOnIdle( newlyPrunedIds, prunedToolMetadata, undefined, // reason - currentAgent, + currentParams, workingDirectory || "" ) diff --git a/lib/strategies/prune-tool.ts b/lib/strategies/prune-tool.ts index c546363..1b036ee 100644 --- a/lib/strategies/prune-tool.ts +++ b/lib/strategies/prune-tool.ts @@ -1,7 +1,7 @@ import { tool } from "@opencode-ai/plugin" import type { SessionState, ToolParameterEntry, WithParts } from "../state" import type { PluginConfig } from "../config" -import { findCurrentAgent, buildToolIdList } from "../messages/utils" +import { getCurrentParams, buildToolIdList } from "../messages/utils" import { calculateTokensSaved } from "../utils" import { PruneReason, sendUnifiedNotification } from "../ui/notification" import { formatPruningResultForTool } from "../ui/display-utils" @@ -68,7 +68,7 @@ export function createPruneTool( }) const messages: WithParts[] = messagesResponse.data || messagesResponse - const currentAgent: string | undefined = findCurrentAgent(messages) + const currentParams = getCurrentParams(messages, logger) const toolIdList: string[] = buildToolIdList(messages) // Validate that all numeric IDs are within bounds @@ -109,9 +109,10 @@ export function createPruneTool( pruneToolIds, toolMetadata, reason as PruneReason, - currentAgent, + currentParams, workingDirectory ) + state.stats.totalPruneTokens += state.stats.pruneTokenCounter state.stats.pruneTokenCounter = 0 state.nudgeCounter = 0 diff --git a/lib/ui/notification.ts b/lib/ui/notification.ts index c63b612..00ad378 100644 --- a/lib/ui/notification.ts +++ b/lib/ui/notification.ts @@ -63,7 +63,7 @@ export async function sendUnifiedNotification( pruneToolIds: string[], toolMetadata: Map, reason: PruneReason | undefined, - agent: string | undefined, + params: any, workingDirectory: string ): Promise { const hasPruned = pruneToolIds.length > 0 @@ -79,23 +79,32 @@ export async function sendUnifiedNotification( ? buildMinimalMessage(state, reason) : buildDetailedMessage(state, reason, pruneToolIds, toolMetadata, workingDirectory) - await sendIgnoredMessage(client, logger, sessionId, message, agent) + await sendIgnoredMessage(client, sessionId, message, params, logger) return true } export async function sendIgnoredMessage( client: any, - logger: Logger, sessionID: string, text: string, - agent?: string + params: any, + logger: Logger ): Promise { + const agent = params.agent || undefined + const model = params.providerId && params.modelId ? { + providerID: params.providerId, + modelID: params.modelId + } : undefined + try { await client.session.prompt({ - path: { id: sessionID }, + path: { + id: sessionID + }, body: { noReply: true, agent: agent, + model: model, parts: [{ type: 'text', text: text,