Skip to content

Commit d0df97f

Browse files
committed
refactor: remove redundant chat.params fetch wrapper, add direct janitor-shadow logging
1 parent 0024e53 commit d0df97f

File tree

2 files changed

+27
-140
lines changed

2 files changed

+27
-140
lines changed

index.ts

Lines changed: 9 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -206,11 +206,12 @@ const plugin: Plugin = (async (ctx) => {
206206
}
207207
} else if (shouldLogAllRequests) {
208208
// Log requests with NO tool messages (e.g., janitor's shadow inference)
209-
// Detect if this is a janitor request by checking the prompt content
210-
const isJanitorRequest = body.messages.length === 1 &&
211-
body.messages[0]?.role === 'user' &&
212-
typeof body.messages[0]?.content === 'string' &&
213-
body.messages[0].content.includes('conversation analyzer that identifies obsolete tool outputs')
209+
// Detect if this is a janitor request by checking any message for the janitor prompt
210+
// Note: AI SDK may add system messages for JSON schema, so we check all messages
211+
const isJanitorRequest = body.messages.some((m: any) =>
212+
typeof m.content === 'string' &&
213+
m.content.includes('conversation analyzer that identifies obsolete tool outputs')
214+
)
214215

215216
const sessionId = isJanitorRequest ? "janitor-shadow" : "global"
216217

@@ -331,140 +332,9 @@ const plugin: Plugin = (async (ctx) => {
331332
// Skip pruning for subagent sessions
332333
if (await isSubagentSession(ctx.client, sessionId, logger)) return
333334

334-
logger.debug("chat.params", "Wrapping fetch for session", {
335-
sessionID: sessionId,
336-
hasFetch: !!(output.options as any).fetch,
337-
fetchType: (output.options as any).fetch ? typeof (output.options as any).fetch : "none"
338-
})
339-
340-
// Get the existing fetch - this might be from auth provider or globalThis
341-
const existingFetch = (output.options as any).fetch ?? globalThis.fetch
342-
343-
logger.debug("chat.params", "Existing fetch captured", {
344-
sessionID: sessionId,
345-
isGlobalFetch: existingFetch === globalThis.fetch
346-
})
347-
348-
// Wrap the existing fetch with our pruning logic
349-
;(output.options as any).fetch = async (fetchInput: any, init?: any) => {
350-
logger.info("pruning-fetch", "🔥 FETCH WRAPPER CALLED!", {
351-
sessionId,
352-
url: typeof fetchInput === 'string' ? fetchInput.substring(0, 100) : 'URL object'
353-
})
354-
logger.debug("pruning-fetch", "Request intercepted", { sessionId })
355-
356-
// Retrieve the list of pruned tool call IDs from state
357-
const prunedIds = await stateManager.get(sessionId)
358-
logger.debug("pruning-fetch", "Retrieved pruned IDs", {
359-
sessionId,
360-
prunedCount: prunedIds.length,
361-
prunedIds: prunedIds.length > 0 ? prunedIds : undefined
362-
})
363-
364-
// Parse the request body once if possible for logging, caching, and filtering
365-
let parsedBody: any | undefined
366-
if (init?.body && typeof init.body === 'string') {
367-
try {
368-
parsedBody = JSON.parse(init.body)
369-
} catch (e) {
370-
// Ignore parse errors; we'll skip caching/filtering in this case
371-
}
372-
}
373-
374-
if (parsedBody?.messages) {
375-
const toolMessages = parsedBody.messages.filter((m: any) => m.role === 'tool') || []
376-
logger.debug("pruning-fetch", "Request body before filtering", {
377-
sessionId,
378-
totalMessages: parsedBody.messages.length,
379-
toolMessages: toolMessages.length,
380-
toolCallIds: toolMessages.map((m: any) => m.tool_call_id)
381-
})
382-
383-
// Capture tool call parameters from assistant messages so Janitor toast metadata stays rich
384-
cacheToolParameters(parsedBody.messages, "pruning-fetch")
385-
}
386-
387-
// Reset the count for this request
388-
let prunedThisRequest = 0
389-
390-
// Only attempt filtering if there are pruned IDs and a request body exists
391-
if (prunedIds.length > 0 && init?.body) {
392-
let body = parsedBody
393-
394-
if (!body && typeof init.body === 'string') {
395-
try {
396-
body = JSON.parse(init.body)
397-
} catch (error: any) {
398-
logger.error("pruning-fetch", "Failed to parse/filter request body", {
399-
sessionId,
400-
error: error.message,
401-
stack: error.stack
402-
})
403-
return existingFetch(fetchInput, init)
404-
}
405-
}
406-
407-
if (body?.messages && Array.isArray(body.messages)) {
408-
const originalMessageCount = body.messages.length
409-
410-
// Replace tool response messages whose tool_call_id is in the pruned list
411-
// with a short placeholder message instead of removing them entirely.
412-
// This preserves the message structure and avoids API validation errors.
413-
body.messages = body.messages.map((m: any) => {
414-
if (m.role === 'tool' && prunedIds.includes(m.tool_call_id)) {
415-
prunedThisRequest++
416-
return {
417-
...m,
418-
content: '[Output removed to save context - information superseded or no longer needed]'
419-
}
420-
}
421-
return m
422-
})
423-
424-
if (prunedThisRequest > 0) {
425-
logger.info("pruning-fetch", "Replaced pruned tool messages", {
426-
sessionId,
427-
totalMessages: originalMessageCount,
428-
replacedCount: prunedThisRequest,
429-
prunedIds
430-
})
431-
432-
// Log remaining tool messages
433-
const remainingToolMessages = body.messages.filter((m: any) => m.role === 'tool')
434-
logger.debug("pruning-fetch", "Tool messages after replacement", {
435-
sessionId,
436-
totalToolCount: remainingToolMessages.length,
437-
toolCallIds: remainingToolMessages.map((m: any) => m.tool_call_id)
438-
})
439-
440-
// Save wrapped context to file if debug is enabled
441-
await logger.saveWrappedContext(
442-
sessionId,
443-
body.messages,
444-
{
445-
url: typeof fetchInput === 'string' ? fetchInput : 'URL object',
446-
totalMessages: originalMessageCount,
447-
replacedCount: prunedThisRequest,
448-
prunedIds,
449-
wrapper: 'session-specific'
450-
}
451-
)
452-
453-
// Update the request body with modified messages
454-
init.body = JSON.stringify(body)
455-
parsedBody = body
456-
} else {
457-
logger.debug("pruning-fetch", "No messages replaced", {
458-
sessionId,
459-
messageCount: originalMessageCount
460-
})
461-
}
462-
}
463-
}
464-
465-
// Call the EXISTING fetch (which might be from auth provider) with potentially modified body
466-
return existingFetch(fetchInput, init)
467-
}
335+
// Note: Pruning is handled by the global fetch wrapper (lines 95-239)
336+
// which intercepts all AI requests and replaces pruned tool outputs.
337+
// The global wrapper uses case-insensitive matching and queries all sessions.
468338
},
469339
}
470340
}) satisfies Plugin

lib/janitor.ts

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,14 +332,31 @@ export class Janitor {
332332
// Lazy import - only load the 2.8MB ai package when actually needed
333333
const { generateObject } = await import('ai')
334334

335+
// Build the prompt for analysis
336+
const analysisPrompt = buildAnalysisPrompt(prunableToolCallIds, sanitizedMessages, this.protectedTools, allPrunedSoFar, protectedToolCallIds)
337+
338+
// Save janitor shadow context directly (auth providers may bypass globalThis.fetch)
339+
await this.logger.saveWrappedContext(
340+
"janitor-shadow",
341+
[{ role: "user", content: analysisPrompt }],
342+
{
343+
sessionID,
344+
modelProvider: modelSelection.modelInfo.providerID,
345+
modelID: modelSelection.modelInfo.modelID,
346+
candidateToolCount: prunableToolCallIds.length,
347+
alreadyPrunedCount: allPrunedSoFar.length,
348+
protectedToolCount: protectedToolCallIds.length
349+
}
350+
)
351+
335352
// Analyze which tool calls are obsolete
336353
const result = await generateObject({
337354
model: modelSelection.model,
338355
schema: z.object({
339356
pruned_tool_call_ids: z.array(z.string()),
340357
reasoning: z.string(),
341358
}),
342-
prompt: buildAnalysisPrompt(prunableToolCallIds, sanitizedMessages, this.protectedTools, allPrunedSoFar, protectedToolCallIds)
359+
prompt: analysisPrompt
343360
})
344361

345362
// Filter LLM results to only include IDs that were actually candidates

0 commit comments

Comments
 (0)