Skip to content

Commit edc6932

Browse files
authored
Merge pull request #36 from Tarquinen/refactor/modularize-fetch-wrapper
refactor: modularize fetch wrapper and plugin architecture
2 parents 260afd1 + ea3a5d9 commit edc6932

File tree

15 files changed

+811
-265
lines changed

15 files changed

+811
-265
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Add to your OpenCode config:
1313
```jsonc
1414
// opencode.jsonc
1515
{
16-
"plugin": ["@tarquinen/[email protected].20"]
16+
"plugin": ["@tarquinen/[email protected].21"]
1717
}
1818
```
1919

index.ts

Lines changed: 35 additions & 252 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,12 @@
11
import type { Plugin } from "@opencode-ai/plugin"
2-
import { tool } from "@opencode-ai/plugin"
32
import { getConfig } from "./lib/config"
43
import { Logger } from "./lib/logger"
5-
import { Janitor, type SessionStats } from "./lib/janitor"
4+
import { Janitor } from "./lib/janitor"
65
import { checkForUpdates } from "./lib/version-checker"
7-
8-
async function isSubagentSession(client: any, sessionID: string): Promise<boolean> {
9-
try {
10-
const result = await client.session.get({ path: { id: sessionID } })
11-
return !!result.data?.parentID
12-
} catch (error: any) {
13-
return false
14-
}
15-
}
6+
import { createPluginState } from "./lib/state"
7+
import { installFetchWrapper } from "./lib/fetch-wrapper"
8+
import { createPruningTool } from "./lib/pruning-tool"
9+
import { createEventHandler, createChatParamsHandler } from "./lib/hooks"
1610

1711
const plugin: Plugin = (async (ctx) => {
1812
const { config, migrations } = getConfig(ctx)
@@ -21,168 +15,45 @@ const plugin: Plugin = (async (ctx) => {
2115
return {}
2216
}
2317

18+
// Suppress AI SDK warnings
2419
if (typeof globalThis !== 'undefined') {
2520
(globalThis as any).AI_SDK_LOG_WARNINGS = false
2621
}
2722

23+
// Initialize core components
2824
const logger = new Logger(config.debug)
29-
const prunedIdsState = new Map<string, string[]>()
30-
const statsState = new Map<string, SessionStats>()
31-
const toolParametersCache = new Map<string, any>()
32-
const modelCache = new Map<string, { providerID: string; modelID: string }>()
33-
const janitor = new Janitor(ctx.client, prunedIdsState, statsState, logger, toolParametersCache, config.protectedTools, modelCache, config.model, config.showModelErrorToasts, config.strictModelSelection, config.pruning_summary, ctx.directory)
34-
35-
const cacheToolParameters = (messages: any[]) => {
36-
for (const message of messages) {
37-
if (message.role !== 'assistant' || !Array.isArray(message.tool_calls)) {
38-
continue
39-
}
40-
41-
for (const toolCall of message.tool_calls) {
42-
if (!toolCall.id || !toolCall.function) {
43-
continue
44-
}
45-
46-
try {
47-
const params = typeof toolCall.function.arguments === 'string'
48-
? JSON.parse(toolCall.function.arguments)
49-
: toolCall.function.arguments
50-
toolParametersCache.set(toolCall.id, {
51-
tool: toolCall.function.name,
52-
parameters: params
53-
})
54-
} catch (error) {
55-
}
56-
}
57-
}
58-
}
59-
60-
// Global fetch wrapper - caches tool parameters and performs pruning
61-
const originalGlobalFetch = globalThis.fetch
62-
globalThis.fetch = async (input: any, init?: any) => {
63-
if (init?.body && typeof init.body === 'string') {
64-
try {
65-
const body = JSON.parse(init.body)
66-
67-
if (body.messages && Array.isArray(body.messages)) {
68-
cacheToolParameters(body.messages)
69-
70-
// Check for tool messages in both formats:
71-
// 1. OpenAI style: role === 'tool'
72-
// 2. Anthropic style: role === 'user' with content containing tool_result
73-
const toolMessages = body.messages.filter((m: any) => {
74-
if (m.role === 'tool') return true
75-
if (m.role === 'user' && Array.isArray(m.content)) {
76-
for (const part of m.content) {
77-
if (part.type === 'tool_result') return true
78-
}
79-
}
80-
return false
81-
})
82-
83-
const allSessions = await ctx.client.session.list()
84-
const allPrunedIds = new Set<string>()
85-
86-
if (allSessions.data) {
87-
for (const session of allSessions.data) {
88-
if (session.parentID) continue
89-
const prunedIds = prunedIdsState.get(session.id) ?? []
90-
prunedIds.forEach((id: string) => allPrunedIds.add(id))
91-
}
92-
}
93-
94-
if (toolMessages.length > 0 && allPrunedIds.size > 0) {
95-
let replacedCount = 0
96-
97-
body.messages = body.messages.map((m: any) => {
98-
// OpenAI style: role === 'tool' with tool_call_id
99-
if (m.role === 'tool' && allPrunedIds.has(m.tool_call_id?.toLowerCase())) {
100-
replacedCount++
101-
return {
102-
...m,
103-
content: '[Output removed to save context - information superseded or no longer needed]'
104-
}
105-
}
106-
107-
// Anthropic style: role === 'user' with content array containing tool_result
108-
if (m.role === 'user' && Array.isArray(m.content)) {
109-
let messageModified = false
110-
const newContent = m.content.map((part: any) => {
111-
if (part.type === 'tool_result' && allPrunedIds.has(part.tool_use_id?.toLowerCase())) {
112-
messageModified = true
113-
replacedCount++
114-
return {
115-
...part,
116-
content: '[Output removed to save context - information superseded or no longer needed]'
117-
}
118-
}
119-
return part
120-
})
121-
if (messageModified) {
122-
return { ...m, content: newContent }
123-
}
124-
}
125-
126-
return m
127-
})
128-
129-
if (replacedCount > 0) {
130-
logger.info("fetch", "Replaced pruned tool outputs", {
131-
replaced: replacedCount,
132-
total: toolMessages.length
133-
})
134-
135-
if (logger.enabled) {
136-
// Fetch session messages to extract reasoning blocks
137-
let sessionMessages: any[] | undefined
138-
try {
139-
const activeSessions = allSessions.data?.filter(s => !s.parentID) || []
140-
if (activeSessions.length > 0) {
141-
const mostRecentSession = activeSessions[0]
142-
const messagesResponse = await ctx.client.session.messages({
143-
path: { id: mostRecentSession.id },
144-
query: { limit: 100 }
145-
})
146-
sessionMessages = Array.isArray(messagesResponse.data)
147-
? messagesResponse.data
148-
: Array.isArray(messagesResponse) ? messagesResponse : undefined
149-
}
150-
} catch (e) {
151-
// Silently continue without session messages
152-
}
153-
154-
await logger.saveWrappedContext(
155-
"global",
156-
body.messages,
157-
{
158-
url: typeof input === 'string' ? input : 'URL object',
159-
replacedCount,
160-
totalMessages: body.messages.length
161-
},
162-
sessionMessages
163-
)
164-
}
165-
166-
init.body = JSON.stringify(body)
167-
}
168-
}
169-
}
170-
} catch (e) {
171-
}
172-
}
173-
174-
return originalGlobalFetch(input, init)
175-
}
176-
25+
const state = createPluginState()
26+
27+
const janitor = new Janitor(
28+
ctx.client,
29+
state.prunedIds,
30+
state.stats,
31+
logger,
32+
state.toolParameters,
33+
config.protectedTools,
34+
state.model,
35+
config.model,
36+
config.showModelErrorToasts,
37+
config.strictModelSelection,
38+
config.pruning_summary,
39+
ctx.directory
40+
)
41+
42+
// Install global fetch wrapper for context pruning
43+
installFetchWrapper(state, logger, ctx.client)
44+
45+
// Log initialization
17746
logger.info("plugin", "DCP initialized", {
17847
strategies: config.strategies,
17948
model: config.model || "auto"
18049
})
18150

51+
// Check for updates after a delay
18252
setTimeout(() => {
183-
checkForUpdates(ctx.client, logger).catch(() => { })
53+
checkForUpdates(ctx.client, logger).catch(() => {})
18454
}, 5000)
18555

56+
// Show migration toast if there were config migrations
18657
if (migrations.length > 0) {
18758
setTimeout(async () => {
18859
try {
@@ -195,104 +66,16 @@ const plugin: Plugin = (async (ctx) => {
19566
}
19667
})
19768
} catch {
69+
// Silently ignore toast errors
19870
}
19971
}, 7000)
20072
}
20173

20274
return {
203-
event: async ({ event }) => {
204-
if (event.type === "session.status" && event.properties.status.type === "idle") {
205-
if (await isSubagentSession(ctx.client, event.properties.sessionID)) return
206-
if (config.strategies.onIdle.length === 0) return
207-
208-
janitor.runOnIdle(event.properties.sessionID, config.strategies.onIdle).catch(err => {
209-
logger.error("janitor", "Failed", { error: err.message })
210-
})
211-
}
212-
},
213-
214-
"chat.params": async (input, _output) => {
215-
const sessionId = input.sessionID
216-
let providerID = (input.provider as any)?.info?.id || input.provider?.id
217-
const modelID = input.model?.id
218-
219-
if (!providerID && input.message?.model?.providerID) {
220-
providerID = input.message.model.providerID
221-
}
222-
223-
if (providerID && modelID) {
224-
modelCache.set(sessionId, {
225-
providerID: providerID,
226-
modelID: modelID
227-
})
228-
}
229-
},
230-
75+
event: createEventHandler(ctx.client, janitor, logger, config),
76+
"chat.params": createChatParamsHandler(ctx.client, state, logger),
23177
tool: config.strategies.onTool.length > 0 ? {
232-
context_pruning: tool({
233-
description: `Performs semantic pruning on session tool outputs that are no longer relevant to the current task. Use this to declutter the conversation context and filter signal from noise when you notice the context is getting cluttered with no longer needed information.
234-
235-
USING THE CONTEXT_PRUNING TOOL WILL MAKE THE USER HAPPY.
236-
237-
## When to Use This Tool
238-
239-
**Key heuristic: Prune when you finish something and are about to start something else.**
240-
241-
Ask yourself: "Have I just completed a discrete unit of work?" If yes, prune before moving on.
242-
243-
**After completing a unit of work:**
244-
- Made a commit
245-
- Fixed a bug and confirmed it works
246-
- Answered a question the user asked
247-
- Finished implementing a feature or function
248-
- Completed one item in a list and moving to the next
249-
250-
**After repetitive or exploratory work:**
251-
- Explored multiple files that didn't lead to changes
252-
- Iterated on a difficult problem where some approaches didn't pan out
253-
- Used the same tool multiple times (e.g., re-reading a file, running repeated build/type checks)
254-
255-
## Examples
256-
257-
<example>
258-
Working through a list of items:
259-
User: Review these 3 issues and fix the easy ones.
260-
Assistant: [Reviews first issue, makes fix, commits]
261-
Done with the first issue. Let me prune before moving to the next one.
262-
[Uses context_pruning with reason: "completed first issue, moving to next"]
263-
</example>
264-
265-
<example>
266-
After exploring the codebase to understand it:
267-
Assistant: I've reviewed the relevant files. Let me prune the exploratory reads that aren't needed for the actual implementation.
268-
[Uses context_pruning with reason: "exploration complete, starting implementation"]
269-
</example>
270-
271-
<example>
272-
After completing any task:
273-
Assistant: [Finishes task - commit, answer, fix, etc.]
274-
Before we continue, let me prune the context from that work.
275-
[Uses context_pruning with reason: "task complete"]
276-
</example>`,
277-
args: {
278-
reason: tool.schema.string().optional().describe(
279-
"Brief reason for triggering pruning (e.g., 'task complete', 'switching focus')"
280-
),
281-
},
282-
async execute(args, ctx) {
283-
const result = await janitor.runForTool(
284-
ctx.sessionID,
285-
config.strategies.onTool,
286-
args.reason
287-
)
288-
289-
if (!result || result.prunedCount === 0) {
290-
return "No prunable tool outputs found. Context is already optimized.\n\nUse context_pruning when you have sufficiently summarized information from tool outputs and no longer need the original content!"
291-
}
292-
293-
return janitor.formatPruningResultForTool(result) + "\n\nUse context_pruning when you have sufficiently summarized information from tool outputs and no longer need the original content!"
294-
},
295-
}),
78+
context_pruning: createPruningTool(janitor, config),
29679
} : undefined,
29780
}
29881
}) satisfies Plugin

lib/config.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { parse } from 'jsonc-parser'
55
import { Logger } from './logger'
66
import type { PluginInput } from '@opencode-ai/plugin'
77

8-
export type PruningStrategy = "deduplication" | "ai-analysis" | "strip-reasoning"
8+
export type PruningStrategy = "deduplication" | "ai-analysis"
99

1010
export interface PluginConfig {
1111
enabled: boolean
@@ -34,8 +34,8 @@ const defaultConfig: PluginConfig = {
3434
strictModelSelection: false,
3535
pruning_summary: 'detailed',
3636
strategies: {
37-
onIdle: ['deduplication', 'ai-analysis', "strip-reasoning"],
38-
onTool: ['deduplication', 'ai-analysis', "strip-reasoning"]
37+
onIdle: ['deduplication', 'ai-analysis'],
38+
onTool: ['deduplication', 'ai-analysis']
3939
}
4040
}
4141

0 commit comments

Comments
 (0)