Skip to content

Commit 80220dd

Browse files
committed
fix: restore onIdle LLM analysis and remove dead code
- Restore model-selector.ts for LLM model selection with provider fallback - Restore runLlmAnalysis() and replacePrunedToolOutputs() in janitor.ts - Restore buildAnalysisPrompt() and minimizeMessages() in prompt.ts - Restore pruning.txt prompt template - Restore ModelInfo interface and state.model map for model caching - Add model caching back to hooks.ts chat.params handler - Remove dead code: getNumericIdsForActual, getNumericId, getAllMappings, hasMapping, getNextId
1 parent 86c564d commit 80220dd

File tree

8 files changed

+508
-40
lines changed

8 files changed

+508
-40
lines changed

lib/api-formats/prunable-list.ts

Lines changed: 1 addition & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
*/
1212

1313
import { extractParameterKey } from '../ui/display-utils'
14-
import { getOrCreateNumericId, getNumericId } from '../state/id-mapping'
14+
import { getOrCreateNumericId } from '../state/id-mapping'
1515

1616
export interface ToolMetadata {
1717
tool: string
@@ -107,23 +107,6 @@ export function buildEndInjection(
107107
return parts.join('\n\n')
108108
}
109109

110-
/**
111-
* Gets the numeric IDs for a list of actual tool call IDs.
112-
* Used when the prune tool needs to show what was pruned.
113-
*/
114-
export function getNumericIdsForActual(
115-
sessionId: string,
116-
actualIds: string[]
117-
): number[] {
118-
return actualIds
119-
.map(id => getNumericId(sessionId, id))
120-
.filter((id): id is number => id !== undefined)
121-
}
122-
123-
// ============================================================================
124-
// Injection Functions
125-
// ============================================================================
126-
127110
// ============================================================================
128111
// OpenAI Chat / Anthropic Format
129112
// ============================================================================

lib/core/janitor.ts

Lines changed: 159 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1+
import { z } from "zod"
12
import type { Logger } from "../logger"
23
import type { PruningStrategy } from "../config"
34
import type { PluginState } from "../state"
5+
import { buildAnalysisPrompt } from "./prompt"
6+
import { selectModel, extractModelFromSession } from "../model-selector"
47
import { estimateTokensBatch, formatTokenCount } from "../tokenizer"
58
import { saveSessionState } from "../state/persistence"
69
import { ensureSessionRestored } from "../state"
@@ -142,9 +145,21 @@ async function runWithStrategies(
142145
return !metadata || !config.protectedTools.includes(metadata.tool)
143146
}).length
144147

145-
// For onIdle, we currently don't have AI analysis implemented
146-
// This is a placeholder for future idle pruning strategies
147-
const llmPrunedIds: string[] = []
148+
// PHASE 1: LLM ANALYSIS
149+
let llmPrunedIds: string[] = []
150+
151+
if (strategies.includes('ai-analysis') && unprunedToolCallIds.length > 0) {
152+
llmPrunedIds = await runLlmAnalysis(
153+
ctx,
154+
sessionID,
155+
sessionInfo,
156+
messages,
157+
unprunedToolCallIds,
158+
alreadyPrunedIds,
159+
toolMetadata,
160+
options
161+
)
162+
}
148163

149164
const finalNewlyPrunedIds = llmPrunedIds.filter(id => !alreadyPrunedIds.includes(id))
150165

@@ -237,6 +252,147 @@ async function runWithStrategies(
237252
}
238253
}
239254

255+
// ============================================================================
256+
// LLM Analysis
257+
// ============================================================================
258+
259+
async function runLlmAnalysis(
260+
ctx: JanitorContext,
261+
sessionID: string,
262+
sessionInfo: any,
263+
messages: any[],
264+
unprunedToolCallIds: string[],
265+
alreadyPrunedIds: string[],
266+
toolMetadata: Map<string, { tool: string, parameters?: any }>,
267+
options: PruningOptions
268+
): Promise<string[]> {
269+
const { client, state, logger, config } = ctx
270+
271+
const protectedToolCallIds: string[] = []
272+
const prunableToolCallIds = unprunedToolCallIds.filter(id => {
273+
const metadata = toolMetadata.get(id)
274+
if (metadata && config.protectedTools.includes(metadata.tool)) {
275+
protectedToolCallIds.push(id)
276+
return false
277+
}
278+
return true
279+
})
280+
281+
if (prunableToolCallIds.length === 0) {
282+
return []
283+
}
284+
285+
const cachedModelInfo = state.model.get(sessionID)
286+
const sessionModelInfo = extractModelFromSession(sessionInfo, logger)
287+
const currentModelInfo = cachedModelInfo || sessionModelInfo
288+
289+
const modelSelection = await selectModel(currentModelInfo, logger, config.model, config.workingDirectory)
290+
291+
logger.info("janitor", `Model: ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, {
292+
source: modelSelection.source
293+
})
294+
295+
if (modelSelection.failedModel && config.showModelErrorToasts) {
296+
const skipAi = modelSelection.source === 'fallback' && config.strictModelSelection
297+
try {
298+
await client.tui.showToast({
299+
body: {
300+
title: skipAi ? "DCP: AI analysis skipped" : "DCP: Model fallback",
301+
message: skipAi
302+
? `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nAI analysis skipped (strictModelSelection enabled)`
303+
: `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nUsing ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`,
304+
variant: "info",
305+
duration: 5000
306+
}
307+
})
308+
} catch (toastError: any) {
309+
// Ignore toast errors
310+
}
311+
}
312+
313+
if (modelSelection.source === 'fallback' && config.strictModelSelection) {
314+
logger.info("janitor", "Skipping AI analysis (fallback model, strictModelSelection enabled)")
315+
return []
316+
}
317+
318+
const { generateObject } = await import('ai')
319+
320+
const sanitizedMessages = replacePrunedToolOutputs(messages, alreadyPrunedIds)
321+
322+
const analysisPrompt = buildAnalysisPrompt(
323+
prunableToolCallIds,
324+
sanitizedMessages,
325+
alreadyPrunedIds,
326+
protectedToolCallIds,
327+
options.reason
328+
)
329+
330+
await logger.saveWrappedContext(
331+
"janitor-shadow",
332+
[{ role: "user", content: analysisPrompt }],
333+
{
334+
sessionID,
335+
modelProvider: modelSelection.modelInfo.providerID,
336+
modelID: modelSelection.modelInfo.modelID,
337+
candidateToolCount: prunableToolCallIds.length,
338+
alreadyPrunedCount: alreadyPrunedIds.length,
339+
protectedToolCount: protectedToolCallIds.length,
340+
trigger: options.trigger,
341+
reason: options.reason
342+
}
343+
)
344+
345+
const result = await generateObject({
346+
model: modelSelection.model,
347+
schema: z.object({
348+
pruned_tool_call_ids: z.array(z.string()),
349+
reasoning: z.string(),
350+
}),
351+
prompt: analysisPrompt
352+
})
353+
354+
const rawLlmPrunedIds = result.object.pruned_tool_call_ids
355+
const llmPrunedIds = rawLlmPrunedIds.filter(id =>
356+
prunableToolCallIds.includes(id.toLowerCase())
357+
)
358+
359+
if (llmPrunedIds.length > 0) {
360+
const reasoning = result.object.reasoning.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim()
361+
logger.info("janitor", `LLM reasoning: ${reasoning.substring(0, 200)}${reasoning.length > 200 ? '...' : ''}`)
362+
}
363+
364+
return llmPrunedIds
365+
}
366+
367+
function replacePrunedToolOutputs(messages: any[], prunedIds: string[]): any[] {
368+
if (prunedIds.length === 0) return messages
369+
370+
const prunedIdsSet = new Set(prunedIds.map(id => id.toLowerCase()))
371+
372+
return messages.map(msg => {
373+
if (!msg.parts) return msg
374+
375+
return {
376+
...msg,
377+
parts: msg.parts.map((part: any) => {
378+
if (part.type === 'tool' &&
379+
part.callID &&
380+
prunedIdsSet.has(part.callID.toLowerCase()) &&
381+
part.state?.output) {
382+
return {
383+
...part,
384+
state: {
385+
...part.state,
386+
output: '[Output removed to save context - information superseded or no longer needed]'
387+
}
388+
}
389+
}
390+
return part
391+
})
392+
}
393+
})
394+
}
395+
240396
// ============================================================================
241397
// Message parsing
242398
// ============================================================================

lib/core/prompt.ts

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,130 @@ export function loadPrompt(name: string, vars?: Record<string, string>): string
1111
}
1212
return content
1313
}
14+
15+
function minimizeMessages(messages: any[], alreadyPrunedIds?: string[], protectedToolCallIds?: string[]): any[] {
16+
const prunedIdsSet = alreadyPrunedIds ? new Set(alreadyPrunedIds.map(id => id.toLowerCase())) : new Set()
17+
const protectedIdsSet = protectedToolCallIds ? new Set(protectedToolCallIds.map(id => id.toLowerCase())) : new Set()
18+
19+
return messages.map(msg => {
20+
const minimized: any = {
21+
role: msg.info?.role
22+
}
23+
24+
if (msg.parts) {
25+
minimized.parts = msg.parts
26+
.filter((part: any) => {
27+
if (part.type === 'step-start' || part.type === 'step-finish') {
28+
return false
29+
}
30+
return true
31+
})
32+
.map((part: any) => {
33+
if (part.type === 'text') {
34+
if (part.ignored) {
35+
return null
36+
}
37+
return {
38+
type: 'text',
39+
text: part.text
40+
}
41+
}
42+
43+
// TODO: This should use the opencode normalized system instead of per provider settings
44+
if (part.type === 'reasoning') {
45+
// Calculate encrypted content size if present
46+
let encryptedContentLength = 0
47+
if (part.metadata?.openai?.reasoningEncryptedContent) {
48+
encryptedContentLength = part.metadata.openai.reasoningEncryptedContent.length
49+
} else if (part.metadata?.anthropic?.signature) {
50+
encryptedContentLength = part.metadata.anthropic.signature.length
51+
} else if (part.metadata?.google?.thoughtSignature) {
52+
encryptedContentLength = part.metadata.google.thoughtSignature.length
53+
}
54+
55+
return {
56+
type: 'reasoning',
57+
text: part.text,
58+
textLength: part.text?.length || 0,
59+
encryptedContentLength,
60+
...(part.time && { time: part.time }),
61+
...(part.metadata && { metadataKeys: Object.keys(part.metadata) })
62+
}
63+
}
64+
65+
if (part.type === 'tool') {
66+
const callIDLower = part.callID?.toLowerCase()
67+
const isAlreadyPruned = prunedIdsSet.has(callIDLower)
68+
const isProtected = protectedIdsSet.has(callIDLower)
69+
70+
let displayCallID = part.callID
71+
if (isAlreadyPruned) {
72+
displayCallID = '<already-pruned>'
73+
} else if (isProtected) {
74+
displayCallID = '<protected>'
75+
}
76+
77+
const toolPart: any = {
78+
type: 'tool',
79+
toolCallID: displayCallID,
80+
tool: part.tool
81+
}
82+
83+
if (part.state?.output) {
84+
toolPart.output = part.state.output
85+
}
86+
87+
if (part.state?.input) {
88+
const input = part.state.input
89+
90+
if (input.filePath && (part.tool === 'write' || part.tool === 'edit' || part.tool === 'multiedit' || part.tool === 'patch')) {
91+
toolPart.input = input
92+
}
93+
else if (input.filePath) {
94+
toolPart.input = { filePath: input.filePath }
95+
}
96+
else if (input.tool_calls && Array.isArray(input.tool_calls)) {
97+
toolPart.input = {
98+
batch_summary: `${input.tool_calls.length} tool calls`,
99+
tools: input.tool_calls.map((tc: any) => tc.tool)
100+
}
101+
}
102+
else {
103+
toolPart.input = input
104+
}
105+
}
106+
107+
return toolPart
108+
}
109+
110+
return null
111+
})
112+
.filter(Boolean)
113+
}
114+
115+
return minimized
116+
}).filter(msg => {
117+
return msg.parts && msg.parts.length > 0
118+
})
119+
}
120+
121+
export function buildAnalysisPrompt(
122+
unprunedToolCallIds: string[],
123+
messages: any[],
124+
alreadyPrunedIds?: string[],
125+
protectedToolCallIds?: string[],
126+
reason?: string
127+
): string {
128+
const minimizedMessages = minimizeMessages(messages, alreadyPrunedIds, protectedToolCallIds)
129+
const messagesJson = JSON.stringify(minimizedMessages, null, 2).replace(/\\n/g, '\n')
130+
131+
const reasonContext = reason
132+
? `\nContext: The AI has requested pruning with the following reason: "${reason}"\nUse this context to inform your decisions about what is most relevant to keep.`
133+
: ''
134+
135+
return loadPrompt("pruning", {
136+
reason_context: reasonContext,
137+
available_tool_call_ids: unprunedToolCallIds.join(", "),
138+
session_history: messagesJson
139+
})
140+
}

lib/hooks.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ export function createChatParamsHandler(
6565
return async (input: any, _output: any) => {
6666
const sessionId = input.sessionID
6767
let providerID = (input.provider as any)?.info?.id || input.provider?.id
68+
const modelID = input.model?.id
6869

6970
if (!providerID && input.message?.model?.providerID) {
7071
providerID = input.message.model.providerID
@@ -89,6 +90,14 @@ export function createChatParamsHandler(
8990
}
9091
}
9192

93+
// Cache model info for the session (used by janitor for model selection)
94+
if (providerID && modelID) {
95+
state.model.set(sessionId, {
96+
providerID: providerID,
97+
modelID: modelID
98+
})
99+
}
100+
92101
// Build position-based mapping for Gemini (which loses tool call IDs in native format)
93102
if (providerID === 'google' || providerID === 'google-vertex') {
94103
try {

0 commit comments

Comments
 (0)