Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 18 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,27 @@ Automatically reduces token usage in OpenCode by removing obsolete tool outputs

![DCP in action](dcp-demo.png)

## Pruning Strategies

DCP implements two complementary strategies:

**Deduplication** — Fast, zero-cost pruning that identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs instantly with no LLM calls.

**AI Analysis** — Uses a language model to semantically analyze conversation context and identify tool outputs that are no longer relevant to the current task. More thorough but incurs LLM cost.

## Installation

Add to your OpenCode config (`~/.config/opencode/opencode.json` or `.opencode/opencode.json`):
Add to your OpenCode config:

```json
```jsonc
// opencode.jsonc
{
"plugin": ["@tarquinen/opencode-dcp"]
"plugins": ["@tarquinen/opencode-dcp@0.3.17"]
}
```

When a new version is available, DCP will show a toast notification. Update by changing the version number in your config.

Restart OpenCode. The plugin will automatically start optimizing your sessions.

> **Note:** Project `plugin` arrays override global completely—include all desired plugins in project config if using both.
Expand All @@ -36,6 +47,7 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
| `debug` | `false` | Log to `~/.config/opencode/logs/dcp/` |
| `model` | (session) | Model for analysis (e.g., `"anthropic/claude-haiku-4-5"`) |
| `showModelErrorToasts` | `true` | Show notifications on model fallback |
| `strictModelSelection` | `false` | Only run AI analysis with session or configured model (disables fallback models) |
| `pruning_summary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
| `protectedTools` | `["task", "todowrite", "todoread", "context_pruning"]` | Tools that are never pruned |
| `strategies.onIdle` | `["deduplication", "ai-analysis"]` | Strategies for automatic pruning |
Expand All @@ -54,13 +66,11 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
}
```

Settings merge: **Defaults** → **Global** → **Project**. Restart OpenCode after changes.
### Config Precedence

### Version Pinning
Settings are merged in order: **Defaults** → **Global** (`~/.config/opencode/dcp.jsonc`) → **Project** (`.opencode/dcp.jsonc`). Each level overrides the previous, so project settings take priority over global, which takes priority over defaults.

```json
{ "plugin": ["@tarquinen/[email protected]"] }
```
Restart OpenCode after making config changes.

## License

Expand Down
52 changes: 6 additions & 46 deletions index.ts
Original file line number Diff line number Diff line change
@@ -1,45 +1,36 @@
// index.ts - Main plugin entry point for Dynamic Context Pruning
import type { Plugin } from "@opencode-ai/plugin"
import { tool } from "@opencode-ai/plugin"
import { getConfig } from "./lib/config"
import { Logger } from "./lib/logger"
import { Janitor, type SessionStats } from "./lib/janitor"
import { checkForUpdates } from "./lib/version-checker"

/**
* Checks if a session is a subagent (child session)
* Subagent sessions should skip pruning operations
*/
async function isSubagentSession(client: any, sessionID: string): Promise<boolean> {
try {
const result = await client.session.get({ path: { id: sessionID } })
return !!result.data?.parentID
} catch (error: any) {
// On error, assume it's not a subagent and continue (fail open)
return false
}
}

const plugin: Plugin = (async (ctx) => {
const { config, migrations } = getConfig(ctx)

// Exit early if plugin is disabled
if (!config.enabled) {
return {}
}

// Suppress AI SDK warnings about responseFormat (harmless for our use case)
if (typeof globalThis !== 'undefined') {
(globalThis as any).AI_SDK_LOG_WARNINGS = false
}

// Logger uses ~/.config/opencode/logs/dcp/ for consistent log location
const logger = new Logger(config.debug)
const prunedIdsState = new Map<string, string[]>()
const statsState = new Map<string, SessionStats>()
const toolParametersCache = new Map<string, any>() // callID -> parameters
const modelCache = new Map<string, { providerID: string; modelID: string }>() // sessionID -> model info
const janitor = new Janitor(ctx.client, prunedIdsState, statsState, logger, toolParametersCache, config.protectedTools, modelCache, config.model, config.showModelErrorToasts, config.pruning_summary, ctx.directory)
const toolParametersCache = new Map<string, any>()
const modelCache = new Map<string, { providerID: string; modelID: string }>()
const janitor = new Janitor(ctx.client, prunedIdsState, statsState, logger, toolParametersCache, config.protectedTools, modelCache, config.model, config.showModelErrorToasts, config.strictModelSelection, config.pruning_summary, ctx.directory)

const cacheToolParameters = (messages: any[]) => {
for (const message of messages) {
Expand All @@ -61,45 +52,37 @@ const plugin: Plugin = (async (ctx) => {
parameters: params
})
} catch (error) {
// Ignore JSON parse errors for individual tool calls
}
}
}
}

// Global fetch wrapper that both caches tool parameters AND performs pruning
// This works because all providers ultimately call globalThis.fetch
// Global fetch wrapper - caches tool parameters and performs pruning
const originalGlobalFetch = globalThis.fetch
globalThis.fetch = async (input: any, init?: any) => {
if (init?.body && typeof init.body === 'string') {
try {
const body = JSON.parse(init.body)
if (body.messages && Array.isArray(body.messages)) {
// Cache tool parameters for janitor metadata
cacheToolParameters(body.messages)

// Check for tool messages that might need pruning
const toolMessages = body.messages.filter((m: any) => m.role === 'tool')

// Collect all pruned IDs across all sessions (excluding subagents)
// This is safe because tool_call_ids are globally unique
const allSessions = await ctx.client.session.list()
const allPrunedIds = new Set<string>()

if (allSessions.data) {
for (const session of allSessions.data) {
if (session.parentID) continue // Skip subagent sessions
if (session.parentID) continue
const prunedIds = prunedIdsState.get(session.id) ?? []
prunedIds.forEach((id: string) => allPrunedIds.add(id))
}
}

// Only process tool message replacement if there are tool messages and pruned IDs
if (toolMessages.length > 0 && allPrunedIds.size > 0) {
let replacedCount = 0

body.messages = body.messages.map((m: any) => {
// Normalize ID to lowercase for case-insensitive matching
if (m.role === 'tool' && allPrunedIds.has(m.tool_call_id?.toLowerCase())) {
replacedCount++
return {
Expand All @@ -116,7 +99,6 @@ const plugin: Plugin = (async (ctx) => {
total: toolMessages.length
})

// Save wrapped context to file if debug is enabled
if (logger.enabled) {
await logger.saveWrappedContext(
"global",
Expand All @@ -129,13 +111,11 @@ const plugin: Plugin = (async (ctx) => {
)
}

// Update the request body with modified messages
init.body = JSON.stringify(body)
}
}
}
} catch (e) {
// Ignore parse errors and fall through to original fetch
}
}

Expand All @@ -147,10 +127,8 @@ const plugin: Plugin = (async (ctx) => {
model: config.model || "auto"
})

// Check for updates on launch (fire and forget)
checkForUpdates(ctx.client, logger).catch(() => { })

// Show migration toast if config was migrated (delayed to not overlap with version toast)
if (migrations.length > 0) {
setTimeout(async () => {
try {
Expand All @@ -163,42 +141,27 @@ const plugin: Plugin = (async (ctx) => {
}
})
} catch {
// Silently fail - toast is non-critical
}
}, 7000) // 7s delay to show after version toast (6s) completes
}, 7000)
}

return {
/**
* Event Hook: Triggers janitor analysis when session becomes idle
*/
event: async ({ event }) => {
if (event.type === "session.status" && event.properties.status.type === "idle") {
// Skip pruning for subagent sessions
if (await isSubagentSession(ctx.client, event.properties.sessionID)) return

// Skip if no idle strategies configured
if (config.strategies.onIdle.length === 0) return

// Fire and forget the janitor - don't block the event handler
janitor.runOnIdle(event.properties.sessionID, config.strategies.onIdle).catch(err => {
logger.error("janitor", "Failed", { error: err.message })
})
}
},

/**
* Chat Params Hook: Caches model info for janitor
*/
"chat.params": async (input, _output) => {
const sessionId = input.sessionID

// Cache model information for this session so janitor can access it
// The provider.id is actually nested at provider.info.id (not in SDK types)
let providerID = (input.provider as any)?.info?.id || input.provider?.id
const modelID = input.model?.id

// If provider.id is not available, try to get it from the message
if (!providerID && input.message?.model?.providerID) {
providerID = input.message.model.providerID
}
Expand All @@ -211,9 +174,6 @@ const plugin: Plugin = (async (ctx) => {
}
},

/**
* Tool Hook: Exposes context_pruning tool to AI (if configured)
*/
tool: config.strategies.onTool.length > 0 ? {
context_pruning: tool({
description: `Performs semantic pruning on session tool outputs that are no longer relevant to the current task. Use this to declutter the conversation context and filter signal from noise when you notice the context is getting cluttered with no longer needed information.
Expand Down
Loading