diff --git a/index.ts b/index.ts index d7c06ba..1ccb82e 100644 --- a/index.ts +++ b/index.ts @@ -1,14 +1,14 @@ import type { Plugin } from "@opencode-ai/plugin" import { getConfig } from "./lib/config" import { Logger } from "./lib/logger" -import { Janitor } from "./lib/janitor" +import { createJanitorContext } from "./lib/core/janitor" import { checkForUpdates } from "./lib/version-checker" import { createPluginState } from "./lib/state" import { installFetchWrapper } from "./lib/fetch-wrapper" import { createPruningTool } from "./lib/pruning-tool" import { createEventHandler, createChatParamsHandler } from "./lib/hooks" -import { createToolTracker } from "./lib/synth-instruction" -import { loadPrompt } from "./lib/prompt" +import { createToolTracker } from "./lib/api-formats/synth-instruction" +import { loadPrompt } from "./lib/core/prompt" const plugin: Plugin = (async (ctx) => { const { config, migrations } = getConfig(ctx) @@ -26,16 +26,18 @@ const plugin: Plugin = (async (ctx) => { const logger = new Logger(config.debug) const state = createPluginState() - const janitor = new Janitor( + const janitorCtx = createJanitorContext( ctx.client, state, logger, - config.protectedTools, - config.model, - config.showModelErrorToasts, - config.strictModelSelection, - config.pruning_summary, - ctx.directory + { + protectedTools: config.protectedTools, + model: config.model, + showModelErrorToasts: config.showModelErrorToasts ?? true, + strictModelSelection: config.strictModelSelection ?? false, + pruningSummary: config.pruning_summary, + workingDirectory: ctx.directory + } ) // Create tool tracker and load prompts for synthetic instruction injection @@ -85,10 +87,10 @@ const plugin: Plugin = (async (ctx) => { } return { - event: createEventHandler(ctx.client, janitor, logger, config, toolTracker), + event: createEventHandler(ctx.client, janitorCtx, logger, config, toolTracker), "chat.params": createChatParamsHandler(ctx.client, state, logger), tool: config.strategies.onTool.length > 0 ? { - prune: createPruningTool(ctx.client, janitor, config, toolTracker), + prune: createPruningTool(ctx.client, janitorCtx, config, toolTracker), } : undefined, } }) satisfies Plugin diff --git a/lib/synth-instruction.ts b/lib/api-formats/synth-instruction.ts similarity index 100% rename from lib/synth-instruction.ts rename to lib/api-formats/synth-instruction.ts diff --git a/lib/config.ts b/lib/config.ts index 2d0500b..74e173e 100644 --- a/lib/config.ts +++ b/lib/config.ts @@ -30,7 +30,7 @@ export interface ConfigResult { const defaultConfig: PluginConfig = { enabled: true, debug: false, - protectedTools: ['task', 'todowrite', 'todoread', 'prune'], + protectedTools: ['task', 'todowrite', 'todoread', 'prune', 'batch'], showModelErrorToasts: true, strictModelSelection: false, pruning_summary: 'detailed', diff --git a/lib/core/janitor.ts b/lib/core/janitor.ts new file mode 100644 index 0000000..726170c --- /dev/null +++ b/lib/core/janitor.ts @@ -0,0 +1,481 @@ +import { z } from "zod" +import type { Logger } from "../logger" +import type { PruningStrategy } from "../config" +import type { PluginState } from "../state" +import { buildAnalysisPrompt } from "./prompt" +import { selectModel, extractModelFromSession } from "../model-selector" +import { estimateTokensBatch, formatTokenCount } from "../tokenizer" +import { saveSessionState } from "../state/persistence" +import { ensureSessionRestored } from "../state" +import { + sendUnifiedNotification, + type NotificationContext +} from "../ui/notification" + +export interface SessionStats { + totalToolsPruned: number + totalTokensSaved: number + totalGCTokens: number +} + +export interface GCStats { + tokensCollected: number + toolsDeduped: number +} + +export interface PruningResult { + prunedCount: number + tokensSaved: number + llmPrunedIds: string[] + toolMetadata: Map + sessionStats: SessionStats +} + +export interface PruningOptions { + reason?: string + trigger: 'idle' | 'tool' +} + +export interface JanitorConfig { + protectedTools: string[] + model?: string + showModelErrorToasts: boolean + strictModelSelection: boolean + pruningSummary: "off" | "minimal" | "detailed" + workingDirectory?: string +} + +export interface JanitorContext { + client: any + state: PluginState + logger: Logger + config: JanitorConfig + notificationCtx: NotificationContext +} + +// ============================================================================ +// Context factory +// ============================================================================ + +export function createJanitorContext( + client: any, + state: PluginState, + logger: Logger, + config: JanitorConfig +): JanitorContext { + return { + client, + state, + logger, + config, + notificationCtx: { + client, + logger, + config: { + pruningSummary: config.pruningSummary, + workingDirectory: config.workingDirectory + } + } + } +} + +// ============================================================================ +// Public API +// ============================================================================ + +export async function runOnIdle( + ctx: JanitorContext, + sessionID: string, + strategies: PruningStrategy[] +): Promise { + return runWithStrategies(ctx, sessionID, strategies, { trigger: 'idle' }) +} + +export async function runOnTool( + ctx: JanitorContext, + sessionID: string, + strategies: PruningStrategy[], + reason?: string +): Promise { + return runWithStrategies(ctx, sessionID, strategies, { trigger: 'tool', reason }) +} + +// ============================================================================ +// Core pruning logic +// ============================================================================ + +async function runWithStrategies( + ctx: JanitorContext, + sessionID: string, + strategies: PruningStrategy[], + options: PruningOptions +): Promise { + const { client, state, logger, config } = ctx + + try { + if (strategies.length === 0) { + return null + } + + // Ensure persisted state is restored before processing + await ensureSessionRestored(state, sessionID, logger) + + const [sessionInfoResponse, messagesResponse] = await Promise.all([ + client.session.get({ path: { id: sessionID } }), + client.session.messages({ path: { id: sessionID }, query: { limit: 100 } }) + ]) + + const sessionInfo = sessionInfoResponse.data + const messages = messagesResponse.data || messagesResponse + + if (!messages || messages.length < 3) { + return null + } + + const currentAgent = findCurrentAgent(messages) + const { toolCallIds, toolOutputs, toolMetadata } = parseMessages(messages, state.toolParameters) + + const alreadyPrunedIds = state.prunedIds.get(sessionID) ?? [] + const unprunedToolCallIds = toolCallIds.filter(id => !alreadyPrunedIds.includes(id)) + + // Get pending GC stats (accumulated since last notification) + const gcPending = state.gcPending.get(sessionID) ?? null + + // If nothing to analyze and no GC activity, exit early + if (unprunedToolCallIds.length === 0 && !gcPending) { + return null + } + + const candidateCount = unprunedToolCallIds.filter(id => { + const metadata = toolMetadata.get(id) + return !metadata || !config.protectedTools.includes(metadata.tool) + }).length + + // PHASE 1: LLM ANALYSIS + let llmPrunedIds: string[] = [] + + if (strategies.includes('ai-analysis') && unprunedToolCallIds.length > 0) { + llmPrunedIds = await runLlmAnalysis( + ctx, + sessionID, + sessionInfo, + messages, + unprunedToolCallIds, + alreadyPrunedIds, + toolMetadata, + options + ) + } + + const finalNewlyPrunedIds = llmPrunedIds.filter(id => !alreadyPrunedIds.includes(id)) + + // If AI pruned nothing and no GC activity, nothing to report + if (finalNewlyPrunedIds.length === 0 && !gcPending) { + return null + } + + // PHASE 2: CALCULATE STATS & NOTIFICATION + const tokensSaved = await calculateTokensSaved(finalNewlyPrunedIds, toolOutputs) + + // Get current session stats, initializing with proper defaults + const currentStats = state.stats.get(sessionID) ?? { + totalToolsPruned: 0, + totalTokensSaved: 0, + totalGCTokens: 0 + } + + // Update session stats including GC contribution + const sessionStats: SessionStats = { + totalToolsPruned: currentStats.totalToolsPruned + finalNewlyPrunedIds.length, + totalTokensSaved: currentStats.totalTokensSaved + tokensSaved, + totalGCTokens: currentStats.totalGCTokens + (gcPending?.tokensCollected ?? 0) + } + state.stats.set(sessionID, sessionStats) + + // Send unified notification (handles all scenarios) + const notificationSent = await sendUnifiedNotification( + ctx.notificationCtx, + sessionID, + { + aiPrunedCount: llmPrunedIds.length, + aiTokensSaved: tokensSaved, + aiPrunedIds: llmPrunedIds, + toolMetadata, + gcPending, + sessionStats + }, + currentAgent + ) + + // Clear pending GC stats after notification (whether sent or not - we've consumed them) + if (gcPending) { + state.gcPending.delete(sessionID) + } + + // If we only had GC activity (no AI pruning), return null but notification was sent + if (finalNewlyPrunedIds.length === 0) { + if (notificationSent) { + logger.info("janitor", `GC-only notification: ~${formatTokenCount(gcPending?.tokensCollected ?? 0)} tokens from ${gcPending?.toolsDeduped ?? 0} deduped tools`, { + trigger: options.trigger + }) + } + return null + } + + // PHASE 3: STATE UPDATE (only if AI pruned something) + const allPrunedIds = [...new Set([...alreadyPrunedIds, ...llmPrunedIds])] + state.prunedIds.set(sessionID, allPrunedIds) + + const sessionName = sessionInfo?.title + saveSessionState(sessionID, new Set(allPrunedIds), sessionStats, logger, sessionName).catch(err => { + logger.error("janitor", "Failed to persist state", { error: err.message }) + }) + + const prunedCount = finalNewlyPrunedIds.length + const keptCount = candidateCount - prunedCount + + const logMeta: Record = { trigger: options.trigger } + if (options.reason) { + logMeta.reason = options.reason + } + if (gcPending) { + logMeta.gcTokens = gcPending.tokensCollected + logMeta.gcTools = gcPending.toolsDeduped + } + + logger.info("janitor", `Pruned ${prunedCount}/${candidateCount} tools, ${keptCount} kept (~${formatTokenCount(tokensSaved)} tokens)`, logMeta) + + return { + prunedCount: finalNewlyPrunedIds.length, + tokensSaved, + llmPrunedIds, + toolMetadata, + sessionStats + } + + } catch (error: any) { + ctx.logger.error("janitor", "Analysis failed", { + error: error.message, + trigger: options.trigger + }) + return null + } +} + +// ============================================================================ +// LLM Analysis +// ============================================================================ + +async function runLlmAnalysis( + ctx: JanitorContext, + sessionID: string, + sessionInfo: any, + messages: any[], + unprunedToolCallIds: string[], + alreadyPrunedIds: string[], + toolMetadata: Map, + options: PruningOptions +): Promise { + const { client, state, logger, config } = ctx + + const protectedToolCallIds: string[] = [] + const prunableToolCallIds = unprunedToolCallIds.filter(id => { + const metadata = toolMetadata.get(id) + if (metadata && config.protectedTools.includes(metadata.tool)) { + protectedToolCallIds.push(id) + return false + } + return true + }) + + if (prunableToolCallIds.length === 0) { + return [] + } + + const cachedModelInfo = state.model.get(sessionID) + const sessionModelInfo = extractModelFromSession(sessionInfo, logger) + const currentModelInfo = cachedModelInfo || sessionModelInfo + + const modelSelection = await selectModel(currentModelInfo, logger, config.model, config.workingDirectory) + + logger.info("janitor", `Model: ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, { + source: modelSelection.source + }) + + if (modelSelection.failedModel && config.showModelErrorToasts) { + const skipAi = modelSelection.source === 'fallback' && config.strictModelSelection + try { + await client.tui.showToast({ + body: { + title: skipAi ? "DCP: AI analysis skipped" : "DCP: Model fallback", + message: skipAi + ? `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nAI analysis skipped (strictModelSelection enabled)` + : `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nUsing ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, + variant: "info", + duration: 5000 + } + }) + } catch (toastError: any) { + // Ignore toast errors + } + } + + if (modelSelection.source === 'fallback' && config.strictModelSelection) { + logger.info("janitor", "Skipping AI analysis (fallback model, strictModelSelection enabled)") + return [] + } + + const { generateObject } = await import('ai') + + const sanitizedMessages = replacePrunedToolOutputs(messages, alreadyPrunedIds) + + const analysisPrompt = buildAnalysisPrompt( + prunableToolCallIds, + sanitizedMessages, + alreadyPrunedIds, + protectedToolCallIds, + options.reason + ) + + await logger.saveWrappedContext( + "janitor-shadow", + [{ role: "user", content: analysisPrompt }], + { + sessionID, + modelProvider: modelSelection.modelInfo.providerID, + modelID: modelSelection.modelInfo.modelID, + candidateToolCount: prunableToolCallIds.length, + alreadyPrunedCount: alreadyPrunedIds.length, + protectedToolCount: protectedToolCallIds.length, + trigger: options.trigger, + reason: options.reason + } + ) + + const result = await generateObject({ + model: modelSelection.model, + schema: z.object({ + pruned_tool_call_ids: z.array(z.string()), + reasoning: z.string(), + }), + prompt: analysisPrompt + }) + + const rawLlmPrunedIds = result.object.pruned_tool_call_ids + const llmPrunedIds = rawLlmPrunedIds.filter(id => + prunableToolCallIds.includes(id.toLowerCase()) + ) + + if (llmPrunedIds.length > 0) { + const reasoning = result.object.reasoning.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim() + logger.info("janitor", `LLM reasoning: ${reasoning.substring(0, 200)}${reasoning.length > 200 ? '...' : ''}`) + } + + return llmPrunedIds +} + +// ============================================================================ +// Message parsing +// ============================================================================ + +interface ParsedMessages { + toolCallIds: string[] + toolOutputs: Map + toolMetadata: Map +} + +function parseMessages( + messages: any[], + toolParametersCache: Map +): ParsedMessages { + const toolCallIds: string[] = [] + const toolOutputs = new Map() + const toolMetadata = new Map() + + for (const msg of messages) { + if (msg.parts) { + for (const part of msg.parts) { + if (part.type === "tool" && part.callID) { + const normalizedId = part.callID.toLowerCase() + toolCallIds.push(normalizedId) + + const cachedData = toolParametersCache.get(part.callID) || toolParametersCache.get(normalizedId) + const parameters = cachedData?.parameters ?? part.state?.input ?? part.parameters + + toolMetadata.set(normalizedId, { + tool: part.tool, + parameters: parameters + }) + + if (part.state?.status === "completed" && part.state.output) { + toolOutputs.set(normalizedId, part.state.output) + } + } + } + } + } + + return { toolCallIds, toolOutputs, toolMetadata } +} + +function findCurrentAgent(messages: any[]): string | undefined { + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i] + const info = msg.info + if (info?.role === 'user') { + return info.agent || 'build' + } + } + return undefined +} + +// ============================================================================ +// Helpers +// ============================================================================ + +function replacePrunedToolOutputs(messages: any[], prunedIds: string[]): any[] { + if (prunedIds.length === 0) return messages + + const prunedIdsSet = new Set(prunedIds.map(id => id.toLowerCase())) + + return messages.map(msg => { + if (!msg.parts) return msg + + return { + ...msg, + parts: msg.parts.map((part: any) => { + if (part.type === 'tool' && + part.callID && + prunedIdsSet.has(part.callID.toLowerCase()) && + part.state?.output) { + return { + ...part, + state: { + ...part.state, + output: '[Output removed to save context - information superseded or no longer needed]' + } + } + } + return part + }) + } + }) +} + +async function calculateTokensSaved(prunedIds: string[], toolOutputs: Map): Promise { + const outputsToTokenize: string[] = [] + + for (const prunedId of prunedIds) { + const output = toolOutputs.get(prunedId) + if (output) { + outputsToTokenize.push(output) + } + } + + if (outputsToTokenize.length > 0) { + const tokenCounts = await estimateTokensBatch(outputsToTokenize) + return tokenCounts.reduce((sum, count) => sum + count, 0) + } + + return 0 +} diff --git a/lib/prompt.ts b/lib/core/prompt.ts similarity index 98% rename from lib/prompt.ts rename to lib/core/prompt.ts index e210284..e7f44d4 100644 --- a/lib/prompt.ts +++ b/lib/core/prompt.ts @@ -2,7 +2,7 @@ import { readFileSync } from "fs" import { join } from "path" export function loadPrompt(name: string, vars?: Record): string { - const filePath = join(__dirname, "prompts", `${name}.txt`) + const filePath = join(__dirname, "..", "prompts", `${name}.txt`) let content = readFileSync(filePath, "utf8").trim() if (vars) { for (const [key, value] of Object.entries(vars)) { diff --git a/lib/core/strategies/deduplication.ts b/lib/core/strategies/deduplication.ts new file mode 100644 index 0000000..685fa06 --- /dev/null +++ b/lib/core/strategies/deduplication.ts @@ -0,0 +1,88 @@ +import { extractParameterKey } from "../../ui/display-utils" +import type { PruningStrategy, StrategyResult, ToolMetadata } from "./types" + +/** + * Deduplication strategy - prunes older tool calls that have identical + * tool name and parameters, keeping only the most recent occurrence. + */ +export const deduplicationStrategy: PruningStrategy = { + name: "deduplication", + + detect( + toolMetadata: Map, + unprunedIds: string[], + protectedTools: string[] + ): StrategyResult { + const signatureMap = new Map() + + const deduplicatableIds = unprunedIds.filter(id => { + const metadata = toolMetadata.get(id) + return !metadata || !protectedTools.includes(metadata.tool) + }) + + for (const id of deduplicatableIds) { + const metadata = toolMetadata.get(id) + if (!metadata) continue + + const signature = createToolSignature(metadata.tool, metadata.parameters) + if (!signatureMap.has(signature)) { + signatureMap.set(signature, []) + } + signatureMap.get(signature)!.push(id) + } + + const prunedIds: string[] = [] + const details = new Map() + + for (const [signature, ids] of signatureMap.entries()) { + if (ids.length > 1) { + const metadata = toolMetadata.get(ids[0])! + const idsToRemove = ids.slice(0, -1) // All except last + prunedIds.push(...idsToRemove) + + details.set(signature, { + toolName: metadata.tool, + parameterKey: extractParameterKey(metadata), + reason: `duplicate (${ids.length} occurrences, kept most recent)`, + duplicateCount: ids.length, + prunedIds: idsToRemove, + keptId: ids[ids.length - 1] + }) + } + } + + return { prunedIds, details } + } +} + +function createToolSignature(tool: string, parameters?: any): string { + if (!parameters) return tool + + const normalized = normalizeParameters(parameters) + const sorted = sortObjectKeys(normalized) + return `${tool}::${JSON.stringify(sorted)}` +} + +function normalizeParameters(params: any): any { + if (typeof params !== 'object' || params === null) return params + if (Array.isArray(params)) return params + + const normalized: any = {} + for (const [key, value] of Object.entries(params)) { + if (value !== undefined && value !== null) { + normalized[key] = value + } + } + return normalized +} + +function sortObjectKeys(obj: any): any { + if (typeof obj !== 'object' || obj === null) return obj + if (Array.isArray(obj)) return obj.map(sortObjectKeys) + + const sorted: any = {} + for (const key of Object.keys(obj).sort()) { + sorted[key] = sortObjectKeys(obj[key]) + } + return sorted +} diff --git a/lib/core/strategies/index.ts b/lib/core/strategies/index.ts new file mode 100644 index 0000000..060bf64 --- /dev/null +++ b/lib/core/strategies/index.ts @@ -0,0 +1,72 @@ +/** + * Strategy runner - executes all enabled pruning strategies and collects results. + */ + +import type { PruningStrategy, StrategyResult, ToolMetadata } from "./types" +import { deduplicationStrategy } from "./deduplication" + +export type { PruningStrategy, StrategyResult, ToolMetadata, StrategyDetail } from "./types" + +/** All available strategies */ +const ALL_STRATEGIES: PruningStrategy[] = [ + deduplicationStrategy, + // Future strategies will be added here: + // errorPruningStrategy, + // writeReadStrategy, + // partialReadStrategy, +] + +export interface RunStrategiesResult { + /** All tool IDs that should be pruned (deduplicated) */ + prunedIds: string[] + /** Results keyed by strategy name */ + byStrategy: Map +} + +/** + * Run all enabled strategies and collect pruned IDs. + * + * @param toolMetadata - Map of tool call ID to metadata + * @param unprunedIds - Tool call IDs not yet pruned (chronological order) + * @param protectedTools - Tool names that should never be pruned + * @param enabledStrategies - Strategy names to run (defaults to all) + */ +export function runStrategies( + toolMetadata: Map, + unprunedIds: string[], + protectedTools: string[], + enabledStrategies?: string[] +): RunStrategiesResult { + const byStrategy = new Map() + const allPrunedIds = new Set() + + // Filter to enabled strategies (or all if not specified) + const strategies = enabledStrategies + ? ALL_STRATEGIES.filter(s => enabledStrategies.includes(s.name)) + : ALL_STRATEGIES + + // Track which IDs are still available for each strategy + let remainingIds = unprunedIds + + for (const strategy of strategies) { + const result = strategy.detect(toolMetadata, remainingIds, protectedTools) + + if (result.prunedIds.length > 0) { + byStrategy.set(strategy.name, result) + + // Add to overall pruned set + for (const id of result.prunedIds) { + allPrunedIds.add(id) + } + + // Remove pruned IDs from remaining for next strategy + const prunedSet = new Set(result.prunedIds.map(id => id.toLowerCase())) + remainingIds = remainingIds.filter(id => !prunedSet.has(id.toLowerCase())) + } + } + + return { + prunedIds: Array.from(allPrunedIds), + byStrategy + } +} diff --git a/lib/core/strategies/types.ts b/lib/core/strategies/types.ts new file mode 100644 index 0000000..a013a0d --- /dev/null +++ b/lib/core/strategies/types.ts @@ -0,0 +1,43 @@ +/** + * Common interface for rule-based pruning strategies. + * Each strategy analyzes tool metadata and returns IDs that should be pruned. + */ + +export interface ToolMetadata { + tool: string + parameters?: any +} + +export interface StrategyResult { + /** Tool call IDs that should be pruned */ + prunedIds: string[] + /** Optional details about what was pruned and why */ + details?: Map +} + +export interface StrategyDetail { + toolName: string + parameterKey: string + reason: string + /** Additional info specific to the strategy */ + [key: string]: any +} + +export interface PruningStrategy { + /** Unique identifier for this strategy */ + name: string + + /** + * Analyze tool metadata and determine which tool calls should be pruned. + * + * @param toolMetadata - Map of tool call ID to metadata (tool name + parameters) + * @param unprunedIds - Tool call IDs that haven't been pruned yet (chronological order) + * @param protectedTools - Tool names that should never be pruned + * @returns IDs to prune and optional details + */ + detect( + toolMetadata: Map, + unprunedIds: string[], + protectedTools: string[] + ): StrategyResult +} diff --git a/lib/deduplicator.ts b/lib/deduplicator.ts deleted file mode 100644 index 1d64940..0000000 --- a/lib/deduplicator.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { extractParameterKey } from "./display-utils" - -export interface DuplicateDetectionResult { - duplicateIds: string[] // IDs to prune (older duplicates) - deduplicationDetails: Map -} - -export function detectDuplicates( - toolMetadata: Map, - unprunedToolCallIds: string[], // In chronological order - protectedTools: string[] -): DuplicateDetectionResult { - const signatureMap = new Map() - - const deduplicatableIds = unprunedToolCallIds.filter(id => { - const metadata = toolMetadata.get(id) - return !metadata || !protectedTools.includes(metadata.tool) - }) - - for (const id of deduplicatableIds) { - const metadata = toolMetadata.get(id) - if (!metadata) continue - - const signature = createToolSignature(metadata.tool, metadata.parameters) - if (!signatureMap.has(signature)) { - signatureMap.set(signature, []) - } - signatureMap.get(signature)!.push(id) - } - - const duplicateIds: string[] = [] - const deduplicationDetails = new Map() - - for (const [signature, ids] of signatureMap.entries()) { - if (ids.length > 1) { - const metadata = toolMetadata.get(ids[0])! - const idsToRemove = ids.slice(0, -1) // All except last - duplicateIds.push(...idsToRemove) - - deduplicationDetails.set(signature, { - toolName: metadata.tool, - parameterKey: extractParameterKey(metadata), - duplicateCount: ids.length, - prunedIds: idsToRemove, - keptId: ids[ids.length - 1] - }) - } - } - - return { duplicateIds, deduplicationDetails } -} - -function createToolSignature(tool: string, parameters?: any): string { - if (!parameters) return tool - - const normalized = normalizeParameters(parameters) - const sorted = sortObjectKeys(normalized) - return `${tool}::${JSON.stringify(sorted)}` -} - -function normalizeParameters(params: any): any { - if (typeof params !== 'object' || params === null) return params - if (Array.isArray(params)) return params - - const normalized: any = {} - for (const [key, value] of Object.entries(params)) { - if (value !== undefined && value !== null) { - normalized[key] = value - } - } - return normalized -} - -function sortObjectKeys(obj: any): any { - if (typeof obj !== 'object' || obj === null) return obj - if (Array.isArray(obj)) return obj.map(sortObjectKeys) - - const sorted: any = {} - for (const key of Object.keys(obj).sort()) { - sorted[key] = sortObjectKeys(obj[key]) - } - return sorted -} diff --git a/lib/fetch-wrapper/gc-tracker.ts b/lib/fetch-wrapper/gc-tracker.ts new file mode 100644 index 0000000..9119d89 --- /dev/null +++ b/lib/fetch-wrapper/gc-tracker.ts @@ -0,0 +1,77 @@ +import type { PluginState } from "../state" +import type { Logger } from "../logger" + +export function accumulateGCStats( + state: PluginState, + sessionId: string, + prunedIds: string[], + body: any, + logger: Logger +): void { + if (prunedIds.length === 0) return + + const toolOutputs = extractToolOutputsFromBody(body, prunedIds) + const tokensCollected = estimateTokensFromOutputs(toolOutputs) + + const existing = state.gcPending.get(sessionId) ?? { tokensCollected: 0, toolsDeduped: 0 } + + state.gcPending.set(sessionId, { + tokensCollected: existing.tokensCollected + tokensCollected, + toolsDeduped: existing.toolsDeduped + prunedIds.length + }) + + logger.debug("gc-tracker", "Accumulated GC stats", { + sessionId: sessionId.substring(0, 8), + newlyDeduped: prunedIds.length, + tokensThisCycle: tokensCollected, + pendingTotal: state.gcPending.get(sessionId) + }) +} + +function extractToolOutputsFromBody(body: any, prunedIds: string[]): string[] { + const outputs: string[] = [] + const prunedIdSet = new Set(prunedIds.map(id => id.toLowerCase())) + + // OpenAI Chat format + if (body.messages && Array.isArray(body.messages)) { + for (const m of body.messages) { + if (m.role === 'tool' && m.tool_call_id && prunedIdSet.has(m.tool_call_id.toLowerCase())) { + if (typeof m.content === 'string') { + outputs.push(m.content) + } + } + // Anthropic format + if (m.role === 'user' && Array.isArray(m.content)) { + for (const part of m.content) { + if (part.type === 'tool_result' && part.tool_use_id && prunedIdSet.has(part.tool_use_id.toLowerCase())) { + if (typeof part.content === 'string') { + outputs.push(part.content) + } + } + } + } + } + } + + // OpenAI Responses format + if (body.input && Array.isArray(body.input)) { + for (const item of body.input) { + if (item.type === 'function_call_output' && item.call_id && prunedIdSet.has(item.call_id.toLowerCase())) { + if (typeof item.output === 'string') { + outputs.push(item.output) + } + } + } + } + + return outputs +} + +// Character-based approximation (chars / 4) to avoid async tokenizer in fetch path +function estimateTokensFromOutputs(outputs: string[]): number { + let totalChars = 0 + for (const output of outputs) { + totalChars += output.length + } + return Math.round(totalChars / 4) +} diff --git a/lib/fetch-wrapper/gemini.ts b/lib/fetch-wrapper/gemini.ts index d02bbd0..abc1bd6 100644 --- a/lib/fetch-wrapper/gemini.ts +++ b/lib/fetch-wrapper/gemini.ts @@ -4,7 +4,7 @@ import { getAllPrunedIds, fetchSessionMessages } from "./types" -import { injectNudgeGemini, injectSynthGemini } from "../synth-instruction" +import { injectNudgeGemini, injectSynthGemini } from "../api-formats/synth-instruction" /** * Handles Google/Gemini format (body.contents array with functionResponse parts). diff --git a/lib/fetch-wrapper/index.ts b/lib/fetch-wrapper/index.ts index 75fd7c8..450e99c 100644 --- a/lib/fetch-wrapper/index.ts +++ b/lib/fetch-wrapper/index.ts @@ -1,12 +1,13 @@ import type { PluginState } from "../state" import type { Logger } from "../logger" import type { FetchHandlerContext, SynthPrompts } from "./types" -import type { ToolTracker } from "../synth-instruction" +import type { ToolTracker } from "../api-formats/synth-instruction" import type { PluginConfig } from "../config" import { handleOpenAIChatAndAnthropic } from "./openai-chat" import { handleGemini } from "./gemini" import { handleOpenAIResponses } from "./openai-responses" -import { detectDuplicates } from "../deduplicator" +import { runStrategies } from "../core/strategies" +import { accumulateGCStats } from "./gc-tracker" export type { FetchHandlerContext, FetchHandlerResult, SynthPrompts } from "./types" @@ -79,7 +80,7 @@ export function installFetchWrapper( } } - // Run deduplication after handlers have populated toolParameters cache + // Run strategies after handlers have populated toolParameters cache const sessionId = state.lastSeenSessionId if (sessionId && state.toolParameters.size > 0) { const toolIds = Array.from(state.toolParameters.keys()) @@ -87,11 +88,18 @@ export function installFetchWrapper( const alreadyPrunedLower = new Set(alreadyPruned.map(id => id.toLowerCase())) const unpruned = toolIds.filter(id => !alreadyPrunedLower.has(id.toLowerCase())) if (unpruned.length > 1) { - const { duplicateIds } = detectDuplicates(state.toolParameters, unpruned, config.protectedTools) - if (duplicateIds.length > 0) { + const result = runStrategies( + state.toolParameters, + unpruned, + config.protectedTools + ) + if (result.prunedIds.length > 0) { // Normalize to lowercase to match janitor's ID normalization - const normalizedIds = duplicateIds.map(id => id.toLowerCase()) + const normalizedIds = result.prunedIds.map(id => id.toLowerCase()) state.prunedIds.set(sessionId, [...new Set([...alreadyPruned, ...normalizedIds])]) + + // Track GC activity for the next notification + accumulateGCStats(state, sessionId, result.prunedIds, body, logger) } } } diff --git a/lib/fetch-wrapper/openai-chat.ts b/lib/fetch-wrapper/openai-chat.ts index 9aeb6d0..78b522e 100644 --- a/lib/fetch-wrapper/openai-chat.ts +++ b/lib/fetch-wrapper/openai-chat.ts @@ -5,8 +5,8 @@ import { fetchSessionMessages, getMostRecentActiveSession } from "./types" -import { cacheToolParametersFromMessages } from "../tool-cache" -import { injectNudge, injectSynth } from "../synth-instruction" +import { cacheToolParametersFromMessages } from "../state/tool-cache" +import { injectNudge, injectSynth } from "../api-formats/synth-instruction" /** * Handles OpenAI Chat Completions format (body.messages with role='tool'). diff --git a/lib/fetch-wrapper/openai-responses.ts b/lib/fetch-wrapper/openai-responses.ts index 7741617..b8a1dbd 100644 --- a/lib/fetch-wrapper/openai-responses.ts +++ b/lib/fetch-wrapper/openai-responses.ts @@ -5,8 +5,8 @@ import { fetchSessionMessages, getMostRecentActiveSession } from "./types" -import { cacheToolParametersFromInput } from "../tool-cache" -import { injectNudgeResponses, injectSynthResponses } from "../synth-instruction" +import { cacheToolParametersFromInput } from "../state/tool-cache" +import { injectNudgeResponses, injectSynthResponses } from "../api-formats/synth-instruction" /** * Handles OpenAI Responses API format (body.input array with function_call_output items). diff --git a/lib/fetch-wrapper/types.ts b/lib/fetch-wrapper/types.ts index f23baf9..d6cf4ab 100644 --- a/lib/fetch-wrapper/types.ts +++ b/lib/fetch-wrapper/types.ts @@ -1,6 +1,6 @@ import { type PluginState, ensureSessionRestored } from "../state" import type { Logger } from "../logger" -import type { ToolTracker } from "../synth-instruction" +import type { ToolTracker } from "../api-formats/synth-instruction" import type { PluginConfig } from "../config" /** The message used to replace pruned tool output content */ diff --git a/lib/hooks.ts b/lib/hooks.ts index d6d1834..dac0b54 100644 --- a/lib/hooks.ts +++ b/lib/hooks.ts @@ -1,9 +1,10 @@ import type { PluginState } from "./state" import type { Logger } from "./logger" -import type { Janitor } from "./janitor" +import type { JanitorContext } from "./core/janitor" +import { runOnIdle } from "./core/janitor" import type { PluginConfig, PruningStrategy } from "./config" -import type { ToolTracker } from "./synth-instruction" -import { resetToolTrackerCount } from "./synth-instruction" +import type { ToolTracker } from "./api-formats/synth-instruction" +import { resetToolTrackerCount } from "./api-formats/synth-instruction" export async function isSubagentSession(client: any, sessionID: string): Promise { try { @@ -20,7 +21,7 @@ function toolStrategiesCoveredByIdle(onIdle: PruningStrategy[], onTool: PruningS export function createEventHandler( client: any, - janitor: Janitor, + janitorCtx: JanitorContext, logger: Logger, config: PluginConfig, toolTracker?: ToolTracker @@ -40,7 +41,7 @@ export function createEventHandler( } try { - const result = await janitor.runOnIdle(event.properties.sessionID, config.strategies.onIdle) + const result = await runOnIdle(janitorCtx, event.properties.sessionID, config.strategies.onIdle) // Reset nudge counter if idle pruning succeeded and covers tool strategies if (result && result.prunedCount > 0 && toolTracker && config.nudge_freq > 0) { diff --git a/lib/janitor.ts b/lib/janitor.ts deleted file mode 100644 index 643beae..0000000 --- a/lib/janitor.ts +++ /dev/null @@ -1,720 +0,0 @@ -import { z } from "zod" -import type { Logger } from "./logger" -import type { PruningStrategy } from "./config" -import type { PluginState } from "./state" -import { buildAnalysisPrompt } from "./prompt" -import { selectModel, extractModelFromSession } from "./model-selector" -import { estimateTokensBatch, formatTokenCount } from "./tokenizer" -import { detectDuplicates } from "./deduplicator" -import { extractParameterKey } from "./display-utils" -import { saveSessionState } from "./state-persistence" -import { ensureSessionRestored } from "./state" - -export interface SessionStats { - totalToolsPruned: number - totalTokensSaved: number -} - -export interface PruningResult { - prunedCount: number - tokensSaved: number - thinkingIds: string[] - deduplicatedIds: string[] - llmPrunedIds: string[] - deduplicationDetails: Map - toolMetadata: Map - sessionStats: SessionStats -} - -export interface PruningOptions { - reason?: string - trigger: 'idle' | 'tool' -} - -export class Janitor { - private prunedIdsState: Map - private statsState: Map - private toolParametersCache: Map - private modelCache: Map - - constructor( - private client: any, - private state: PluginState, - private logger: Logger, - private protectedTools: string[], - private configModel?: string, - private showModelErrorToasts: boolean = true, - private strictModelSelection: boolean = false, - private pruningSummary: "off" | "minimal" | "detailed" = "detailed", - private workingDirectory?: string - ) { - // Bind state references for convenience - this.prunedIdsState = state.prunedIds - this.statsState = state.stats - this.toolParametersCache = state.toolParameters - this.modelCache = state.model - } - - private async sendIgnoredMessage(sessionID: string, text: string, agent?: string) { - try { - await this.client.session.prompt({ - path: { id: sessionID }, - body: { - noReply: true, - agent: agent, - parts: [{ - type: 'text', - text: text, - ignored: true - }] - } - }) - } catch (error: any) { - this.logger.error("janitor", "Failed to send notification", { error: error.message }) - } - } - - async runOnIdle(sessionID: string, strategies: PruningStrategy[]): Promise { - return await this.runWithStrategies(sessionID, strategies, { trigger: 'idle' }) - } - - async runForTool( - sessionID: string, - strategies: PruningStrategy[], - reason?: string - ): Promise { - return await this.runWithStrategies(sessionID, strategies, { trigger: 'tool', reason }) - } - - async runWithStrategies( - sessionID: string, - strategies: PruningStrategy[], - options: PruningOptions - ): Promise { - try { - if (strategies.length === 0) { - return null - } - - // Ensure persisted state is restored before processing - await ensureSessionRestored(this.state, sessionID, this.logger) - - const [sessionInfoResponse, messagesResponse] = await Promise.all([ - this.client.session.get({ path: { id: sessionID } }), - this.client.session.messages({ path: { id: sessionID }, query: { limit: 100 } }) - ]) - - const sessionInfo = sessionInfoResponse.data - const messages = messagesResponse.data || messagesResponse - - if (!messages || messages.length < 3) { - return null - } - - let currentAgent: string | undefined = undefined - for (let i = messages.length - 1; i >= 0; i--) { - const msg = messages[i] - const info = msg.info - if (info?.role === 'user') { - currentAgent = info.agent || 'build' - break - } - } - - const toolCallIds: string[] = [] - const toolOutputs = new Map() - const toolMetadata = new Map() - const batchToolChildren = new Map() - let currentBatchId: string | null = null - - for (const msg of messages) { - if (msg.parts) { - for (const part of msg.parts) { - if (part.type === "tool" && part.callID) { - const normalizedId = part.callID.toLowerCase() - toolCallIds.push(normalizedId) - - const cachedData = this.toolParametersCache.get(part.callID) || this.toolParametersCache.get(normalizedId) - const parameters = cachedData?.parameters ?? part.state?.input ?? part.parameters - - toolMetadata.set(normalizedId, { - tool: part.tool, - parameters: parameters - }) - - if (part.state?.status === "completed" && part.state.output) { - toolOutputs.set(normalizedId, part.state.output) - } - - if (part.tool === "batch") { - currentBatchId = normalizedId - batchToolChildren.set(normalizedId, []) - } else if (currentBatchId && normalizedId.startsWith('prt_')) { - batchToolChildren.get(currentBatchId)!.push(normalizedId) - } else if (currentBatchId && !normalizedId.startsWith('prt_')) { - currentBatchId = null - } - } - } - } - } - - const alreadyPrunedIds = this.prunedIdsState.get(sessionID) ?? [] - const unprunedToolCallIds = toolCallIds.filter(id => !alreadyPrunedIds.includes(id)) - - if (unprunedToolCallIds.length === 0) { - return null - } - - // PHASE 1: DUPLICATE DETECTION - let deduplicatedIds: string[] = [] - let deduplicationDetails = new Map() - - if (strategies.includes('deduplication')) { - const dedupeResult = detectDuplicates(toolMetadata, unprunedToolCallIds, this.protectedTools) - deduplicatedIds = dedupeResult.duplicateIds - deduplicationDetails = dedupeResult.deduplicationDetails - } - - const candidateCount = unprunedToolCallIds.filter(id => { - const metadata = toolMetadata.get(id) - return !metadata || !this.protectedTools.includes(metadata.tool) - }).length - - // PHASE 2: LLM ANALYSIS - let llmPrunedIds: string[] = [] - - if (strategies.includes('ai-analysis')) { - const protectedToolCallIds: string[] = [] - const prunableToolCallIds = unprunedToolCallIds.filter(id => { - if (deduplicatedIds.includes(id)) return false - - const metadata = toolMetadata.get(id) - if (metadata && this.protectedTools.includes(metadata.tool)) { - protectedToolCallIds.push(id) - return false - } - - return true - }) - - if (prunableToolCallIds.length > 0) { - const cachedModelInfo = this.modelCache.get(sessionID) - const sessionModelInfo = extractModelFromSession(sessionInfo, this.logger) - const currentModelInfo = cachedModelInfo || sessionModelInfo - - const modelSelection = await selectModel(currentModelInfo, this.logger, this.configModel, this.workingDirectory) - - this.logger.info("janitor", `Model: ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, { - source: modelSelection.source - }) - - if (modelSelection.failedModel && this.showModelErrorToasts) { - const skipAi = modelSelection.source === 'fallback' && this.strictModelSelection - try { - await this.client.tui.showToast({ - body: { - title: skipAi ? "DCP: AI analysis skipped" : "DCP: Model fallback", - message: skipAi - ? `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nAI analysis skipped (strictModelSelection enabled)` - : `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nUsing ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, - variant: "info", - duration: 5000 - } - }) - } catch (toastError: any) { - } - } - - if (modelSelection.source === 'fallback' && this.strictModelSelection) { - this.logger.info("janitor", "Skipping AI analysis (fallback model, strictModelSelection enabled)") - } else { - const { generateObject } = await import('ai') - - const allPrunedSoFar = [...alreadyPrunedIds, ...deduplicatedIds] - const sanitizedMessages = this.replacePrunedToolOutputs(messages, allPrunedSoFar) - - const analysisPrompt = buildAnalysisPrompt( - prunableToolCallIds, - sanitizedMessages, - allPrunedSoFar, - protectedToolCallIds, - options.reason - ) - - await this.logger.saveWrappedContext( - "janitor-shadow", - [{ role: "user", content: analysisPrompt }], - { - sessionID, - modelProvider: modelSelection.modelInfo.providerID, - modelID: modelSelection.modelInfo.modelID, - candidateToolCount: prunableToolCallIds.length, - alreadyPrunedCount: allPrunedSoFar.length, - protectedToolCount: protectedToolCallIds.length, - trigger: options.trigger, - reason: options.reason - } - ) - - const result = await generateObject({ - model: modelSelection.model, - schema: z.object({ - pruned_tool_call_ids: z.array(z.string()), - reasoning: z.string(), - }), - prompt: analysisPrompt - }) - - const rawLlmPrunedIds = result.object.pruned_tool_call_ids - llmPrunedIds = rawLlmPrunedIds.filter(id => - prunableToolCallIds.includes(id.toLowerCase()) - ) - - if (llmPrunedIds.length > 0) { - const reasoning = result.object.reasoning.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim() - this.logger.info("janitor", `LLM reasoning: ${reasoning.substring(0, 200)}${reasoning.length > 200 ? '...' : ''}`) - } - } - } - } - - // PHASE 3: COMBINE & EXPAND - const newlyPrunedIds = [...deduplicatedIds, ...llmPrunedIds] - - if (newlyPrunedIds.length === 0) { - return null - } - - const expandBatchIds = (ids: string[]): string[] => { - const expanded = new Set() - for (const id of ids) { - const normalizedId = id.toLowerCase() - expanded.add(normalizedId) - const children = batchToolChildren.get(normalizedId) - if (children) { - children.forEach(childId => expanded.add(childId)) - } - } - return Array.from(expanded) - } - - const expandedPrunedIds = new Set(expandBatchIds(newlyPrunedIds)) - const expandedLlmPrunedIds = expandBatchIds(llmPrunedIds) - const finalNewlyPrunedIds = Array.from(expandedPrunedIds).filter(id => !alreadyPrunedIds.includes(id)) - const finalPrunedIds = Array.from(expandedPrunedIds) - - // PHASE 4: CALCULATE STATS & NOTIFICATION - const tokensSaved = await this.calculateTokensSaved(finalNewlyPrunedIds, toolOutputs) - - const currentStats = this.statsState.get(sessionID) ?? { totalToolsPruned: 0, totalTokensSaved: 0 } - const sessionStats: SessionStats = { - totalToolsPruned: currentStats.totalToolsPruned + finalNewlyPrunedIds.length, - totalTokensSaved: currentStats.totalTokensSaved + tokensSaved - } - this.statsState.set(sessionID, sessionStats) - - const hasLlmAnalysis = strategies.includes('ai-analysis') - - if (hasLlmAnalysis) { - await this.sendSmartModeNotification( - sessionID, - deduplicatedIds, - deduplicationDetails, - expandedLlmPrunedIds, - toolMetadata, - tokensSaved, - sessionStats, - currentAgent - ) - } else { - await this.sendAutoModeNotification( - sessionID, - deduplicatedIds, - deduplicationDetails, - tokensSaved, - sessionStats, - currentAgent - ) - } - - // PHASE 5: STATE UPDATE - const allPrunedIds = [...new Set([...alreadyPrunedIds, ...finalPrunedIds])] - this.prunedIdsState.set(sessionID, allPrunedIds) - - const sessionName = sessionInfo?.title - saveSessionState(sessionID, new Set(allPrunedIds), sessionStats, this.logger, sessionName).catch(err => { - this.logger.error("janitor", "Failed to persist state", { error: err.message }) - }) - - const prunedCount = finalNewlyPrunedIds.length - const keptCount = candidateCount - prunedCount - const hasBoth = deduplicatedIds.length > 0 && llmPrunedIds.length > 0 - const breakdown = hasBoth ? ` (${deduplicatedIds.length} duplicate, ${llmPrunedIds.length} llm)` : "" - - const logMeta: Record = { trigger: options.trigger } - if (options.reason) { - logMeta.reason = options.reason - } - - this.logger.info("janitor", `Pruned ${prunedCount}/${candidateCount} tools${breakdown}, ${keptCount} kept (~${formatTokenCount(tokensSaved)} tokens)`, logMeta) - - return { - prunedCount: finalNewlyPrunedIds.length, - tokensSaved, - thinkingIds: [], - deduplicatedIds, - llmPrunedIds: expandedLlmPrunedIds, - deduplicationDetails, - toolMetadata, - sessionStats - } - - } catch (error: any) { - this.logger.error("janitor", "Analysis failed", { - error: error.message, - trigger: options.trigger - }) - return null - } - } - - private shortenPath(input: string): string { - const inPathMatch = input.match(/^(.+) in (.+)$/) - if (inPathMatch) { - const prefix = inPathMatch[1] - const pathPart = inPathMatch[2] - const shortenedPath = this.shortenSinglePath(pathPart) - return `${prefix} in ${shortenedPath}` - } - - return this.shortenSinglePath(input) - } - - private shortenSinglePath(path: string): string { - const homeDir = require('os').homedir() - - if (this.workingDirectory) { - if (path.startsWith(this.workingDirectory + '/')) { - return path.slice(this.workingDirectory.length + 1) - } - if (path === this.workingDirectory) { - return '.' - } - } - - if (path.startsWith(homeDir)) { - path = '~' + path.slice(homeDir.length) - } - - const nodeModulesMatch = path.match(/node_modules\/(@[^\/]+\/[^\/]+|[^\/]+)\/(.*)/) - if (nodeModulesMatch) { - return `${nodeModulesMatch[1]}/${nodeModulesMatch[2]}` - } - - if (this.workingDirectory) { - const workingDirWithTilde = this.workingDirectory.startsWith(homeDir) - ? '~' + this.workingDirectory.slice(homeDir.length) - : null - - if (workingDirWithTilde && path.startsWith(workingDirWithTilde + '/')) { - return path.slice(workingDirWithTilde.length + 1) - } - if (workingDirWithTilde && path === workingDirWithTilde) { - return '.' - } - } - - return path - } - - private replacePrunedToolOutputs(messages: any[], prunedIds: string[]): any[] { - if (prunedIds.length === 0) return messages - - const prunedIdsSet = new Set(prunedIds.map(id => id.toLowerCase())) - - return messages.map(msg => { - if (!msg.parts) return msg - - return { - ...msg, - parts: msg.parts.map((part: any) => { - if (part.type === 'tool' && - part.callID && - prunedIdsSet.has(part.callID.toLowerCase()) && - part.state?.output) { - return { - ...part, - state: { - ...part.state, - output: '[Output removed to save context - information superseded or no longer needed]' - } - } - } - return part - }) - } - }) - } - - private async calculateTokensSaved(prunedIds: string[], toolOutputs: Map): Promise { - const outputsToTokenize: string[] = [] - - for (const prunedId of prunedIds) { - const output = toolOutputs.get(prunedId) - if (output) { - outputsToTokenize.push(output) - } - } - - if (outputsToTokenize.length > 0) { - const tokenCounts = await estimateTokensBatch(outputsToTokenize) - return tokenCounts.reduce((sum, count) => sum + count, 0) - } - - return 0 - } - - private buildToolsSummary(prunedIds: string[], toolMetadata: Map): Map { - const toolsSummary = new Map() - - const truncate = (str: string, maxLen: number = 60): string => { - if (str.length <= maxLen) return str - return str.slice(0, maxLen - 3) + '...' - } - - for (const prunedId of prunedIds) { - const normalizedId = prunedId.toLowerCase() - const metadata = toolMetadata.get(normalizedId) - if (metadata) { - const toolName = metadata.tool - if (toolName === 'batch') continue - if (!toolsSummary.has(toolName)) { - toolsSummary.set(toolName, []) - } - - const paramKey = extractParameterKey(metadata) - if (paramKey) { - const displayKey = truncate(this.shortenPath(paramKey), 80) - toolsSummary.get(toolName)!.push(displayKey) - } else { - toolsSummary.get(toolName)!.push('(default)') - } - } - } - - return toolsSummary - } - - private groupDeduplicationDetails( - deduplicationDetails: Map - ): Map> { - const grouped = new Map>() - - for (const [_, details] of deduplicationDetails) { - const { toolName, parameterKey, duplicateCount } = details - if (toolName === 'batch') continue - if (!grouped.has(toolName)) { - grouped.set(toolName, []) - } - grouped.get(toolName)!.push({ - count: duplicateCount, - key: this.shortenPath(parameterKey) - }) - } - - return grouped - } - - private formatDeduplicationLines( - grouped: Map>, - indent: string = ' ' - ): string[] { - const lines: string[] = [] - - for (const [toolName, items] of grouped.entries()) { - for (const item of items) { - const removedCount = item.count - 1 - lines.push(`${indent}${toolName}: ${item.key} (${removedCount}ร— duplicate)`) - } - } - - return lines - } - - private formatToolSummaryLines( - toolsSummary: Map, - indent: string = ' ' - ): string[] { - const lines: string[] = [] - - for (const [toolName, params] of toolsSummary.entries()) { - if (params.length === 1) { - lines.push(`${indent}${toolName}: ${params[0]}`) - } else if (params.length > 1) { - lines.push(`${indent}${toolName} (${params.length}):`) - for (const param of params) { - lines.push(`${indent} ${param}`) - } - } - } - - return lines - } - - private async sendMinimalNotification( - sessionID: string, - totalPruned: number, - tokensSaved: number, - sessionStats: SessionStats, - agent?: string - ) { - if (totalPruned === 0) return - - const tokensFormatted = formatTokenCount(tokensSaved) - const toolText = totalPruned === 1 ? 'tool' : 'tools' - - let message = `๐Ÿงน DCP: Saved ~${tokensFormatted} tokens (${totalPruned} ${toolText} pruned)` - - if (sessionStats.totalToolsPruned > totalPruned) { - message += ` โ”‚ Session: ~${formatTokenCount(sessionStats.totalTokensSaved)} tokens, ${sessionStats.totalToolsPruned} tools` - } - - await this.sendIgnoredMessage(sessionID, message, agent) - } - - private async sendAutoModeNotification( - sessionID: string, - deduplicatedIds: string[], - deduplicationDetails: Map, - tokensSaved: number, - sessionStats: SessionStats, - agent?: string - ) { - if (deduplicatedIds.length === 0) return - if (this.pruningSummary === 'off') return - - if (this.pruningSummary === 'minimal') { - await this.sendMinimalNotification(sessionID, deduplicatedIds.length, tokensSaved, sessionStats, agent) - return - } - - const tokensFormatted = formatTokenCount(tokensSaved) - const toolText = deduplicatedIds.length === 1 ? 'tool' : 'tools' - let message = `๐Ÿงน DCP: Saved ~${tokensFormatted} tokens (${deduplicatedIds.length} duplicate ${toolText} removed)` - - if (sessionStats.totalToolsPruned > deduplicatedIds.length) { - message += ` โ”‚ Session: ~${formatTokenCount(sessionStats.totalTokensSaved)} tokens, ${sessionStats.totalToolsPruned} tools` - } - message += '\n' - - const grouped = this.groupDeduplicationDetails(deduplicationDetails) - - for (const [toolName, items] of grouped.entries()) { - const totalDupes = items.reduce((sum, item) => sum + (item.count - 1), 0) - message += `\n${toolName} (${totalDupes} duplicate${totalDupes > 1 ? 's' : ''}):\n` - - for (const item of items.slice(0, 5)) { - const dupeCount = item.count - 1 - message += ` ${item.key} (${dupeCount}ร— duplicate)\n` - } - - if (items.length > 5) { - message += ` ... and ${items.length - 5} more\n` - } - } - - await this.sendIgnoredMessage(sessionID, message.trim(), agent) - } - - formatPruningResultForTool(result: PruningResult): string { - const lines: string[] = [] - lines.push(`Context pruning complete. Pruned ${result.prunedCount} tool outputs.`) - lines.push('') - - if (result.deduplicatedIds.length > 0 && result.deduplicationDetails.size > 0) { - lines.push(`Duplicates removed (${result.deduplicatedIds.length}):`) - const grouped = this.groupDeduplicationDetails(result.deduplicationDetails) - lines.push(...this.formatDeduplicationLines(grouped)) - lines.push('') - } - - if (result.llmPrunedIds.length > 0) { - lines.push(`Semantically pruned (${result.llmPrunedIds.length}):`) - const toolsSummary = this.buildToolsSummary(result.llmPrunedIds, result.toolMetadata) - lines.push(...this.formatToolSummaryLines(toolsSummary)) - } - - return lines.join('\n').trim() - } - - private async sendSmartModeNotification( - sessionID: string, - deduplicatedIds: string[], - deduplicationDetails: Map, - llmPrunedIds: string[], - toolMetadata: Map, - tokensSaved: number, - sessionStats: SessionStats, - agent?: string - ) { - const totalPruned = deduplicatedIds.length + llmPrunedIds.length - if (totalPruned === 0) return - if (this.pruningSummary === 'off') return - - if (this.pruningSummary === 'minimal') { - await this.sendMinimalNotification(sessionID, totalPruned, tokensSaved, sessionStats, agent) - return - } - - const tokensFormatted = formatTokenCount(tokensSaved) - - let message = `๐Ÿงน DCP: Saved ~${tokensFormatted} tokens (${totalPruned} tool${totalPruned > 1 ? 's' : ''} pruned)` - - if (sessionStats.totalToolsPruned > totalPruned) { - message += ` โ”‚ Session: ~${formatTokenCount(sessionStats.totalTokensSaved)} tokens, ${sessionStats.totalToolsPruned} tools` - } - message += '\n' - - if (deduplicatedIds.length > 0 && deduplicationDetails) { - message += `\n๐Ÿ“ฆ Duplicates removed (${deduplicatedIds.length}):\n` - const grouped = this.groupDeduplicationDetails(deduplicationDetails) - - for (const [toolName, items] of grouped.entries()) { - message += ` ${toolName}:\n` - for (const item of items) { - const removedCount = item.count - 1 - message += ` ${item.key} (${removedCount}ร— duplicate)\n` - } - } - } - - if (llmPrunedIds.length > 0) { - message += `\n๐Ÿค– LLM analysis (${llmPrunedIds.length}):\n` - const toolsSummary = this.buildToolsSummary(llmPrunedIds, toolMetadata) - - for (const [toolName, params] of toolsSummary.entries()) { - if (params.length > 0) { - message += ` ${toolName} (${params.length}):\n` - for (const param of params) { - message += ` ${param}\n` - } - } - } - - const foundToolNames = new Set(toolsSummary.keys()) - const missingTools = llmPrunedIds.filter(id => { - const normalizedId = id.toLowerCase() - const metadata = toolMetadata.get(normalizedId) - if (metadata?.tool === 'batch') return false - return !metadata || !foundToolNames.has(metadata.tool) - }) - - if (missingTools.length > 0) { - message += ` (${missingTools.length} tool${missingTools.length > 1 ? 's' : ''} with unknown metadata)\n` - } - } - - await this.sendIgnoredMessage(sessionID, message.trim(), agent) - } -} diff --git a/lib/pruning-tool.ts b/lib/pruning-tool.ts index 5401988..20db977 100644 --- a/lib/pruning-tool.ts +++ b/lib/pruning-tool.ts @@ -1,9 +1,11 @@ import { tool } from "@opencode-ai/plugin" -import type { Janitor } from "./janitor" +import type { JanitorContext } from "./core/janitor" +import { runOnTool } from "./core/janitor" +import { formatPruningResultForTool } from "./ui/notification" import type { PluginConfig } from "./config" -import type { ToolTracker } from "./synth-instruction" -import { resetToolTrackerCount } from "./synth-instruction" -import { loadPrompt } from "./prompt" +import type { ToolTracker } from "./api-formats/synth-instruction" +import { resetToolTrackerCount } from "./api-formats/synth-instruction" +import { loadPrompt } from "./core/prompt" import { isSubagentSession } from "./hooks" /** Tool description for the prune tool, loaded from prompts/tool.txt */ @@ -13,7 +15,7 @@ export const CONTEXT_PRUNING_DESCRIPTION = loadPrompt("tool") * Creates the prune tool definition. * Returns a tool definition that can be passed to the plugin's tool registry. */ -export function createPruningTool(client: any, janitor: Janitor, config: PluginConfig, toolTracker: ToolTracker): ReturnType { +export function createPruningTool(client: any, janitorCtx: JanitorContext, config: PluginConfig, toolTracker: ToolTracker): ReturnType { return tool({ description: CONTEXT_PRUNING_DESCRIPTION, args: { @@ -28,7 +30,8 @@ export function createPruningTool(client: any, janitor: Janitor, config: PluginC return "Pruning is unavailable in subagent sessions. Do not call this tool again. Continue with your current task - if you were in the middle of work, proceed with your next step. If you had just finished, provide your final summary/findings to return to the main agent." } - const result = await janitor.runForTool( + const result = await runOnTool( + janitorCtx, ctx.sessionID, config.strategies.onTool, args.reason @@ -48,7 +51,7 @@ export function createPruningTool(client: any, janitor: Janitor, config: PluginC return "No prunable tool outputs found. Context is already optimized." + postPruneGuidance } - return janitor.formatPruningResultForTool(result) + postPruneGuidance + return formatPruningResultForTool(result, janitorCtx.config.workingDirectory) + postPruneGuidance }, }) } diff --git a/lib/state.ts b/lib/state/index.ts similarity index 58% rename from lib/state.ts rename to lib/state/index.ts index 3bdb422..b48c656 100644 --- a/lib/state.ts +++ b/lib/state/index.ts @@ -1,32 +1,17 @@ -import type { SessionStats } from "./janitor" -import type { Logger } from "./logger" -import { loadSessionState } from "./state-persistence" +import type { SessionStats, GCStats } from "../core/janitor" +import type { Logger } from "../logger" +import { loadSessionState } from "./persistence" -/** - * Centralized state management for the DCP plugin. - * All mutable state is stored here and shared across modules. - */ export interface PluginState { - /** Map of session IDs to arrays of pruned tool call IDs */ prunedIds: Map - /** Map of session IDs to session statistics */ stats: Map - /** Cache of tool call IDs to their parameters */ + gcPending: Map toolParameters: Map - /** Cache of session IDs to their model info */ model: Map - /** - * Maps Google/Gemini tool positions to OpenCode tool call IDs for correlation. - * Key: sessionID, Value: Map where positionKey is "toolName:index" - */ googleToolCallMapping: Map> - /** Set of session IDs that have been restored from disk */ restoredSessions: Set - /** Set of session IDs we've already checked for subagent status (to avoid redundant API calls) */ checkedSessions: Set - /** Set of session IDs that are subagents (have a parentID) - used to skip fetch wrapper processing */ subagentSessions: Set - /** The most recent session ID seen in chat.params - used to correlate fetch requests */ lastSeenSessionId: string | null } @@ -40,13 +25,11 @@ export interface ModelInfo { modelID: string } -/** - * Creates a fresh plugin state instance. - */ export function createPluginState(): PluginState { return { prunedIds: new Map(), stats: new Map(), + gcPending: new Map(), toolParameters: new Map(), model: new Map(), googleToolCallMapping: new Map(), @@ -78,7 +61,12 @@ export async function ensureSessionRestored( }) } if (!state.stats.has(sessionId)) { - state.stats.set(sessionId, persisted.stats) + const stats: SessionStats = { + totalToolsPruned: persisted.stats.totalToolsPruned, + totalTokensSaved: persisted.stats.totalTokensSaved, + totalGCTokens: persisted.stats.totalGCTokens ?? 0 + } + state.stats.set(sessionId, stats) } } } diff --git a/lib/state-persistence.ts b/lib/state/persistence.ts similarity index 96% rename from lib/state-persistence.ts rename to lib/state/persistence.ts index 384e610..b394ef2 100644 --- a/lib/state-persistence.ts +++ b/lib/state/persistence.ts @@ -8,8 +8,8 @@ import * as fs from "fs/promises"; import { existsSync } from "fs"; import { homedir } from "os"; import { join } from "path"; -import type { SessionStats } from "./janitor"; -import type { Logger } from "./logger"; +import type { SessionStats } from "../core/janitor"; +import type { Logger } from "../logger"; export interface PersistedSessionState { sessionName?: string; diff --git a/lib/tool-cache.ts b/lib/state/tool-cache.ts similarity index 97% rename from lib/tool-cache.ts rename to lib/state/tool-cache.ts index 669fa0f..aa57b4b 100644 --- a/lib/tool-cache.ts +++ b/lib/state/tool-cache.ts @@ -1,4 +1,4 @@ -import type { PluginState } from "./state" +import type { PluginState } from "./index" /** * Cache tool parameters from OpenAI Chat Completions style messages. diff --git a/lib/display-utils.ts b/lib/ui/display-utils.ts similarity index 95% rename from lib/display-utils.ts rename to lib/ui/display-utils.ts index 8006830..6e4e9e2 100644 --- a/lib/display-utils.ts +++ b/lib/ui/display-utils.ts @@ -64,9 +64,6 @@ export function extractParameterKey(metadata: { tool: string, parameters?: any } if (tool === "task" && parameters.description) { return parameters.description } - if (tool === "batch") { - return `${parameters.tool_calls?.length || 0} parallel tools` - } const paramStr = JSON.stringify(parameters) if (paramStr === '{}' || paramStr === '[]' || paramStr === 'null') { diff --git a/lib/ui/notification.ts b/lib/ui/notification.ts new file mode 100644 index 0000000..7ea5772 --- /dev/null +++ b/lib/ui/notification.ts @@ -0,0 +1,295 @@ +import type { Logger } from "../logger" +import type { SessionStats, GCStats, PruningResult } from "../core/janitor" +import { formatTokenCount } from "../tokenizer" +import { extractParameterKey } from "./display-utils" + +export type PruningSummaryLevel = "off" | "minimal" | "detailed" + +export interface NotificationConfig { + pruningSummary: PruningSummaryLevel + workingDirectory?: string +} + +export interface NotificationContext { + client: any + logger: Logger + config: NotificationConfig +} + +export interface NotificationData { + aiPrunedCount: number + aiTokensSaved: number + aiPrunedIds: string[] + toolMetadata: Map + gcPending: GCStats | null + sessionStats: SessionStats | null +} + +export async function sendIgnoredMessage( + ctx: NotificationContext, + sessionID: string, + text: string, + agent?: string +): Promise { + try { + await ctx.client.session.prompt({ + path: { id: sessionID }, + body: { + noReply: true, + agent: agent, + parts: [{ + type: 'text', + text: text, + ignored: true + }] + } + }) + } catch (error: any) { + ctx.logger.error("notification", "Failed to send notification", { error: error.message }) + } +} + +export async function sendUnifiedNotification( + ctx: NotificationContext, + sessionID: string, + data: NotificationData, + agent?: string +): Promise { + const hasAiPruning = data.aiPrunedCount > 0 + const hasGcActivity = data.gcPending && data.gcPending.toolsDeduped > 0 + + if (!hasAiPruning && !hasGcActivity) { + return false + } + + if (ctx.config.pruningSummary === 'off') { + return false + } + + const message = ctx.config.pruningSummary === 'minimal' + ? buildMinimalMessage(data) + : buildDetailedMessage(data, ctx.config.workingDirectory) + + await sendIgnoredMessage(ctx, sessionID, message, agent) + return true +} + +function buildMinimalMessage(data: NotificationData): string { + const hasAiPruning = data.aiPrunedCount > 0 + const hasGcActivity = data.gcPending && data.gcPending.toolsDeduped > 0 + + if (hasAiPruning) { + const gcTokens = hasGcActivity ? data.gcPending!.tokensCollected : 0 + const totalSaved = formatTokenCount(data.aiTokensSaved + gcTokens) + const toolText = data.aiPrunedCount === 1 ? 'tool' : 'tools' + + let cycleStats = `${data.aiPrunedCount} ${toolText}` + if (hasGcActivity) { + cycleStats += `, ~${formatTokenCount(data.gcPending!.tokensCollected)} ๐Ÿ—‘๏ธ` + } + + let message = `๐Ÿงน DCP: ~${totalSaved} saved (${cycleStats})` + message += buildSessionSuffix(data.sessionStats, data.aiPrunedCount) + + return message + } else { + const tokensCollected = formatTokenCount(data.gcPending!.tokensCollected) + + let message = `๐Ÿ—‘๏ธ DCP: ~${tokensCollected} collected` + message += buildSessionSuffix(data.sessionStats, 0) + + return message + } +} + +function buildDetailedMessage(data: NotificationData, workingDirectory?: string): string { + const hasAiPruning = data.aiPrunedCount > 0 + const hasGcActivity = data.gcPending && data.gcPending.toolsDeduped > 0 + + let message: string + + if (hasAiPruning) { + const gcTokens = hasGcActivity ? data.gcPending!.tokensCollected : 0 + const totalSaved = formatTokenCount(data.aiTokensSaved + gcTokens) + const toolText = data.aiPrunedCount === 1 ? 'tool' : 'tools' + + let cycleStats = `${data.aiPrunedCount} ${toolText}` + if (hasGcActivity) { + cycleStats += `, ~${formatTokenCount(data.gcPending!.tokensCollected)} ๐Ÿ—‘๏ธ` + } + + message = `๐Ÿงน DCP: ~${totalSaved} saved (${cycleStats})` + message += buildSessionSuffix(data.sessionStats, data.aiPrunedCount) + message += '\n' + + message += `\n๐Ÿค– LLM analysis (${data.aiPrunedIds.length}):\n` + const toolsSummary = buildToolsSummary(data.aiPrunedIds, data.toolMetadata, workingDirectory) + + for (const [toolName, params] of toolsSummary.entries()) { + if (params.length > 0) { + message += ` ${toolName} (${params.length}):\n` + for (const param of params) { + message += ` ${param}\n` + } + } + } + + const foundToolNames = new Set(toolsSummary.keys()) + const missingTools = data.aiPrunedIds.filter(id => { + const normalizedId = id.toLowerCase() + const metadata = data.toolMetadata.get(normalizedId) + return !metadata || !foundToolNames.has(metadata.tool) + }) + + if (missingTools.length > 0) { + message += ` (${missingTools.length} tool${missingTools.length > 1 ? 's' : ''} with unknown metadata)\n` + } + } else { + const tokensCollected = formatTokenCount(data.gcPending!.tokensCollected) + + message = `๐Ÿ—‘๏ธ DCP: ~${tokensCollected} collected` + message += buildSessionSuffix(data.sessionStats, 0) + } + + return message.trim() +} + +function buildSessionSuffix(sessionStats: SessionStats | null, currentAiPruned: number): string { + if (!sessionStats) { + return '' + } + + if (sessionStats.totalToolsPruned <= currentAiPruned) { + return '' + } + + const totalSaved = sessionStats.totalTokensSaved + sessionStats.totalGCTokens + let suffix = ` โ”‚ Session: ~${formatTokenCount(totalSaved)} (${sessionStats.totalToolsPruned} tools` + + if (sessionStats.totalGCTokens > 0) { + suffix += `, ~${formatTokenCount(sessionStats.totalGCTokens)} ๐Ÿ—‘๏ธ` + } + + suffix += ')' + return suffix +} + +export function formatPruningResultForTool( + result: PruningResult, + workingDirectory?: string +): string { + const lines: string[] = [] + lines.push(`Context pruning complete. Pruned ${result.prunedCount} tool outputs.`) + lines.push('') + + if (result.llmPrunedIds.length > 0) { + lines.push(`Semantically pruned (${result.llmPrunedIds.length}):`) + const toolsSummary = buildToolsSummary(result.llmPrunedIds, result.toolMetadata, workingDirectory) + lines.push(...formatToolSummaryLines(toolsSummary)) + } + + return lines.join('\n').trim() +} + +export function buildToolsSummary( + prunedIds: string[], + toolMetadata: Map, + workingDirectory?: string +): Map { + const toolsSummary = new Map() + + for (const prunedId of prunedIds) { + const normalizedId = prunedId.toLowerCase() + const metadata = toolMetadata.get(normalizedId) + if (metadata) { + const toolName = metadata.tool + if (!toolsSummary.has(toolName)) { + toolsSummary.set(toolName, []) + } + + const paramKey = extractParameterKey(metadata) + if (paramKey) { + const displayKey = truncate(shortenPath(paramKey, workingDirectory), 80) + toolsSummary.get(toolName)!.push(displayKey) + } else { + toolsSummary.get(toolName)!.push('(default)') + } + } + } + + return toolsSummary +} + +export function formatToolSummaryLines( + toolsSummary: Map, + indent: string = ' ' +): string[] { + const lines: string[] = [] + + for (const [toolName, params] of toolsSummary.entries()) { + if (params.length === 1) { + lines.push(`${indent}${toolName}: ${params[0]}`) + } else if (params.length > 1) { + lines.push(`${indent}${toolName} (${params.length}):`) + for (const param of params) { + lines.push(`${indent} ${param}`) + } + } + } + + return lines +} + +function truncate(str: string, maxLen: number = 60): string { + if (str.length <= maxLen) return str + return str.slice(0, maxLen - 3) + '...' +} + +function shortenPath(input: string, workingDirectory?: string): string { + const inPathMatch = input.match(/^(.+) in (.+)$/) + if (inPathMatch) { + const prefix = inPathMatch[1] + const pathPart = inPathMatch[2] + const shortenedPath = shortenSinglePath(pathPart, workingDirectory) + return `${prefix} in ${shortenedPath}` + } + + return shortenSinglePath(input, workingDirectory) +} + +function shortenSinglePath(path: string, workingDirectory?: string): string { + const homeDir = require('os').homedir() + + if (workingDirectory) { + if (path.startsWith(workingDirectory + '/')) { + return path.slice(workingDirectory.length + 1) + } + if (path === workingDirectory) { + return '.' + } + } + + if (path.startsWith(homeDir)) { + path = '~' + path.slice(homeDir.length) + } + + const nodeModulesMatch = path.match(/node_modules\/(@[^\/]+\/[^\/]+|[^\/]+)\/(.*)/) + if (nodeModulesMatch) { + return `${nodeModulesMatch[1]}/${nodeModulesMatch[2]}` + } + + if (workingDirectory) { + const workingDirWithTilde = workingDirectory.startsWith(homeDir) + ? '~' + workingDirectory.slice(homeDir.length) + : null + + if (workingDirWithTilde && path.startsWith(workingDirWithTilde + '/')) { + return path.slice(workingDirWithTilde.length + 1) + } + if (workingDirWithTilde && path === workingDirWithTilde) { + return '.' + } + } + + return path +} diff --git a/package-lock.json b/package-lock.json index 2845ba8..1953c96 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,7 +11,7 @@ "dependencies": { "@ai-sdk/openai-compatible": "^1.0.27", "@opencode-ai/sdk": "latest", - "@tarquinen/opencode-auth-provider": "^0.1.6", + "@tarquinen/opencode-auth-provider": "^0.1.7", "ai": "^5.0.98", "gpt-tokenizer": "^3.4.0", "jsonc-parser": "^3.3.1", @@ -1923,9 +1923,9 @@ "license": "MIT" }, "node_modules/@tarquinen/opencode-auth-provider": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/@tarquinen/opencode-auth-provider/-/opencode-auth-provider-0.1.6.tgz", - "integrity": "sha512-P1r318UtXAnkLodcVNpEX0PZP1wOhsvJTP4aX3LB958HCKc3JNz1JZecCeggBtEtHlz/NVLkGWiG5M5YCWCTDQ==", + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/@tarquinen/opencode-auth-provider/-/opencode-auth-provider-0.1.7.tgz", + "integrity": "sha512-FH1QEyoirr2e8b48Z6HrjioIZIZUIM9zOpYmku1ad+c4Nv70F37fSWhcObyIdZo4Ly3OntpKPWjadyRhd/kQcg==", "license": "MIT", "dependencies": { "@aws-sdk/credential-providers": "^3.936.0", diff --git a/package.json b/package.json index e5c8014..ba731fa 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,7 @@ "dependencies": { "@ai-sdk/openai-compatible": "^1.0.27", "@opencode-ai/sdk": "latest", - "@tarquinen/opencode-auth-provider": "^0.1.6", + "@tarquinen/opencode-auth-provider": "^0.1.7", "ai": "^5.0.98", "gpt-tokenizer": "^3.4.0", "jsonc-parser": "^3.3.1",