diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml
index f9c7804..e665cfc 100644
--- a/.github/workflows/pr-checks.yml
+++ b/.github/workflows/pr-checks.yml
@@ -2,9 +2,7 @@ name: PR Checks
on:
pull_request:
- branches: [main, master]
- push:
- branches: [main, master]
+ branches: [master, dev]
jobs:
validate:
diff --git a/README.md b/README.md
index 6992da2..965f5f2 100644
--- a/README.md
+++ b/README.md
@@ -23,19 +23,15 @@ Restart OpenCode. The plugin will automatically start optimizing your sessions.
## How Pruning Works
-DCP uses two complementary techniques:
+DCP uses multiple strategies to reduce context size:
-**Automatic Deduplication** — Silently identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs on every request with zero LLM cost.
+**Deduplication** — Identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs automatically on every request with zero LLM cost.
-**AI Analysis** — Uses a language model to semantically analyze conversation context and identify tool outputs that are no longer relevant to the current task.
+**Prune Thinking Blocks** — Removes LLM thinking/reasoning blocks from the conversation history.
-## Context Pruning Tool
+**On Idle Analysis** — Uses a language model to semantically analyze conversation context during idle periods and identify tool outputs that are no longer relevant.
-When `strategies.onTool` is enabled, DCP exposes a `prune` tool to Opencode that the AI can call to trigger pruning on demand.
-
-Adjust `nudgeFreq` to control how aggressively the AI is prompted to prune — lower values trigger reminders sooner and more often.
-
-## How It Works
+**Prune Tool** — Exposes a `prune` tool that the AI can call to manually trigger pruning when it determines context cleanup is needed.
Your session history is never modified. DCP replaces pruned outputs with a placeholder before sending requests to your LLM.
@@ -49,35 +45,67 @@ LLM providers like Anthropic and OpenAI cache prompts based on exact prefix matc
DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.jsonc`), created automatically on first run.
-### Options
-
-| Option | Default | Description |
-|--------|---------|-------------|
-| `enabled` | `true` | Enable/disable the plugin |
-| `debug` | `false` | Log to `~/.config/opencode/logs/dcp/` |
-| `model` | (session) | Model for analysis (e.g., `"anthropic/claude-haiku-4-5"`) |
-| `showModelErrorToasts` | `true` | Show notifications on model fallback |
-| `showUpdateToasts` | `true` | Show notifications when a new version is available |
-| `strictModelSelection` | `false` | Only run AI analysis with session or configured model (disables fallback models) |
-| `pruningSummary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
-| `nudgeFreq` | `10` | How often to remind AI to prune (lower = more frequent) |
-| `protectedTools` | `["task", "todowrite", "todoread", "prune", "batch", "write", "edit"]` | Tools that are never pruned |
-| `strategies.onIdle` | `["ai-analysis"]` | Strategies for automatic pruning |
-| `strategies.onTool` | `["ai-analysis"]` | Strategies when AI calls `prune` |
-
-**Strategies:** `"ai-analysis"` uses LLM to identify prunable outputs. Empty array disables that trigger. Deduplication runs automatically on every request.
+
+Default Configuration (click to expand)
```jsonc
{
+ // Enable or disable the plugin
"enabled": true,
+ // Enable debug logging to ~/.config/opencode/logs/dcp/
+ "debug": false,
+ // Show toast notifications when a new version is available
+ "showUpdateToasts": true,
+ // Summary display: "off", "minimal", or "detailed"
+ "pruningSummary": "detailed",
+ // Strategies for pruning tokens from chat history
"strategies": {
- "onIdle": ["ai-analysis"],
- "onTool": ["ai-analysis"]
- },
- "protectedTools": ["task", "todowrite", "todoread", "prune", "batch", "write", "edit"]
+ // Remove duplicate tool calls (same tool with same arguments)
+ "deduplication": {
+ "enabled": true,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ },
+ // Remove thinking/reasoning LLM blocks
+ "pruneThinkingBlocks": {
+ "enabled": false
+ },
+ // (Legacy) Run an LLM to analyze what tool calls are no longer relevant on idle
+ "onIdle": {
+ "enabled": false,
+ // Override model for analysis (format: "provider/model")
+ // "model": "anthropic/claude-haiku-4-5",
+ // Show toast notifications when model selection fails
+ "showModelErrorToasts": true,
+ // When true, fallback models are not permitted
+ "strictModelSelection": false,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ },
+ // Exposes a prune tool to your LLM to call when it determines pruning is necessary
+ "pruneTool": {
+ "enabled": false,
+ // Additional tools to protect from pruning
+ "protectedTools": [],
+ // Nudge the LLM to use the prune tool (every tool results)
+ "nudge": {
+ "enabled": true,
+ "frequency": 10
+ }
+ }
+ }
}
```
+
+
+### Protected Tools
+
+By default, these tools are always protected from pruning across all strategies:
+`task`, `todowrite`, `todoread`, `prune`, `batch`, `write`, `edit`
+
+The `protectedTools` arrays in each strategy add to this default list.
+
### Config Precedence
Settings are merged in order: **Defaults** → **Global** (`~/.config/opencode/dcp.jsonc`) → **Project** (`.opencode/dcp.jsonc`). Each level overrides the previous, so project settings take priority over global, which takes priority over defaults.
diff --git a/lib/config.ts b/lib/config.ts
index 6352076..3dfdba2 100644
--- a/lib/config.ts
+++ b/lib/config.ts
@@ -21,10 +21,15 @@ export interface OnIdle {
protectedTools: string[]
}
+export interface PruneToolNudge {
+ enabled: boolean
+ frequency: number
+}
+
export interface PruneTool {
enabled: boolean
protectedTools: string[]
- nudgeFrequency: number
+ nudge: PruneToolNudge
}
export interface PluginConfig {
@@ -68,7 +73,9 @@ export const VALID_CONFIG_KEYS = new Set([
'strategies.pruneTool',
'strategies.pruneTool.enabled',
'strategies.pruneTool.protectedTools',
- 'strategies.pruneTool.nudgeFrequency',
+ 'strategies.pruneTool.nudge',
+ 'strategies.pruneTool.nudge.enabled',
+ 'strategies.pruneTool.nudge.frequency',
])
// Extract all key paths from a config object for validation
@@ -160,8 +167,13 @@ function validateConfigTypes(config: Record): ValidationError[] {
if (strategies.pruneTool.protectedTools !== undefined && !Array.isArray(strategies.pruneTool.protectedTools)) {
errors.push({ key: 'strategies.pruneTool.protectedTools', expected: 'string[]', actual: typeof strategies.pruneTool.protectedTools })
}
- if (strategies.pruneTool.nudgeFrequency !== undefined && typeof strategies.pruneTool.nudgeFrequency !== 'number') {
- errors.push({ key: 'strategies.pruneTool.nudgeFrequency', expected: 'number', actual: typeof strategies.pruneTool.nudgeFrequency })
+ if (strategies.pruneTool.nudge) {
+ if (strategies.pruneTool.nudge.enabled !== undefined && typeof strategies.pruneTool.nudge.enabled !== 'boolean') {
+ errors.push({ key: 'strategies.pruneTool.nudge.enabled', expected: 'boolean', actual: typeof strategies.pruneTool.nudge.enabled })
+ }
+ if (strategies.pruneTool.nudge.frequency !== undefined && typeof strategies.pruneTool.nudge.frequency !== 'number') {
+ errors.push({ key: 'strategies.pruneTool.nudge.frequency', expected: 'number', actual: typeof strategies.pruneTool.nudge.frequency })
+ }
}
}
}
@@ -226,10 +238,10 @@ const defaultConfig: PluginConfig = {
protectedTools: [...DEFAULT_PROTECTED_TOOLS]
},
pruneThinkingBlocks: {
- enabled: true
+ enabled: false
},
onIdle: {
- enabled: true,
+ enabled: false,
showModelErrorToasts: true,
strictModelSelection: false,
protectedTools: [...DEFAULT_PROTECTED_TOOLS]
@@ -237,7 +249,10 @@ const defaultConfig: PluginConfig = {
pruneTool: {
enabled: false,
protectedTools: [...DEFAULT_PROTECTED_TOOLS],
- nudgeFrequency: 10
+ nudge: {
+ enabled: true,
+ frequency: 10
+ }
}
}
}
@@ -309,11 +324,11 @@ function createDefaultConfig(): void {
},
// Remove thinking/reasoning LLM blocks
"pruneThinkingBlocks": {
- "enabled": true
+ "enabled": false
},
- // Run an LLM to analyze what tool calls are no longer relevant on idle
+ // (Legacy) Run an LLM to analyze what tool calls are no longer relevant on idle
"onIdle": {
- "enabled": true,
+ "enabled": false,
// Override model for analysis (format: "provider/model")
// "model": "anthropic/claude-haiku-4-5",
// Show toast notifications when model selection fails
@@ -328,8 +343,11 @@ function createDefaultConfig(): void {
"enabled": false,
// Additional tools to protect from pruning
"protectedTools": [],
- // How often to nudge the AI to prune (every N tool results, 0 = disabled)
- "nudgeFrequency": 10
+ // Nudge the LLM to use the prune tool (every tool results)
+ "nudge": {
+ "enabled": true,
+ "frequency": 10
+ }
}
}
}
@@ -401,7 +419,10 @@ function mergeStrategies(
...(override.pruneTool?.protectedTools ?? [])
])
],
- nudgeFrequency: override.pruneTool?.nudgeFrequency ?? base.pruneTool.nudgeFrequency
+ nudge: {
+ enabled: override.pruneTool?.nudge?.enabled ?? base.pruneTool.nudge.enabled,
+ frequency: override.pruneTool?.nudge?.frequency ?? base.pruneTool.nudge.frequency
+ }
}
}
}
@@ -421,7 +442,8 @@ function deepCloneConfig(config: PluginConfig): PluginConfig {
},
pruneTool: {
...config.strategies.pruneTool,
- protectedTools: [...config.strategies.pruneTool.protectedTools]
+ protectedTools: [...config.strategies.pruneTool.protectedTools],
+ nudge: { ...config.strategies.pruneTool.nudge }
}
}
}
diff --git a/lib/hooks.ts b/lib/hooks.ts
index 92abeb2..b3dc9da 100644
--- a/lib/hooks.ts
+++ b/lib/hooks.ts
@@ -18,7 +18,8 @@ export function createChatMessageTransformHandler(
output: { messages: WithParts[] }
) => {
checkSession(state, logger, output.messages);
- syncToolCache(state, logger, output.messages);
+ syncToolCache(state, config, logger, output.messages);
+
deduplicate(state, logger, config, output.messages)
diff --git a/lib/messages/prune.ts b/lib/messages/prune.ts
index e4a7fe9..cca70d1 100644
--- a/lib/messages/prune.ts
+++ b/lib/messages/prune.ts
@@ -3,8 +3,10 @@ import type { Logger } from "../logger"
import type { PluginConfig } from "../config"
import { buildToolIdList } from "../utils"
import { getLastUserMessage, extractParameterKey } from "./utils"
+import { loadPrompt } from "../prompt"
const PRUNED_TOOL_OUTPUT_REPLACEMENT = '[Output removed to save context - information superseded or no longer needed]'
+const NUDGE_STRING = loadPrompt("nudge")
const buildPrunableToolsList = (
state: SessionState,
@@ -45,6 +47,12 @@ export const insertPruneToolContext = (
const prunableToolsList = buildPrunableToolsList(state, config, logger, messages)
+ let nudgeString = ""
+ if (config.strategies.pruneTool.nudge.enabled && state.nudgeCounter >= config.strategies.pruneTool.nudge.frequency) {
+ logger.info("Inserting prune nudge message")
+ nudgeString = "\n" + NUDGE_STRING
+ }
+
const userMessage: WithParts = {
info: {
id: "msg_01234567890123456789012345",
@@ -63,7 +71,7 @@ export const insertPruneToolContext = (
sessionID: lastUserMessage.info.sessionID,
messageID: "msg_01234567890123456789012345",
type: "text",
- text: prunableToolsList,
+ text: prunableToolsList + nudgeString,
}
]
}
diff --git a/lib/state/state.ts b/lib/state/state.ts
index 3e727a0..6682d6f 100644
--- a/lib/state/state.ts
+++ b/lib/state/state.ts
@@ -38,7 +38,8 @@ export function createSessionState(): SessionState {
pruneTokenCounter: 0,
totalPruneTokens: 0,
},
- toolParameters: new Map()
+ toolParameters: new Map(),
+ nudgeCounter: 0
}
}
@@ -52,6 +53,7 @@ export function resetSessionState(state: SessionState): void {
totalPruneTokens: 0,
}
state.toolParameters.clear()
+ state.nudgeCounter = 0
}
export async function ensureSessionInitialized(
diff --git a/lib/state/tool-cache.ts b/lib/state/tool-cache.ts
index 215f9a5..d86005f 100644
--- a/lib/state/tool-cache.ts
+++ b/lib/state/tool-cache.ts
@@ -1,5 +1,6 @@
import type { SessionState, ToolStatus, WithParts } from "./index"
import type { Logger } from "../logger"
+import { PluginConfig } from "../config"
const MAX_TOOL_CACHE_SIZE = 500
@@ -10,6 +11,7 @@ const MAX_TOOL_CACHE_SIZE = 500
*/
export async function syncToolCache(
state: SessionState,
+ config: PluginConfig,
logger: Logger,
messages: WithParts[],
): Promise {
@@ -30,6 +32,10 @@ export async function syncToolCache(
error: part.state.status === "error" ? part.state.error : undefined,
}
)
+
+ if (!config.strategies.pruneTool.protectedTools.includes(part.tool)) {
+ state.nudgeCounter++
+ }
}
}
diff --git a/lib/state/types.ts b/lib/state/types.ts
index b0b6b69..750ca38 100644
--- a/lib/state/types.ts
+++ b/lib/state/types.ts
@@ -28,4 +28,5 @@ export interface SessionState {
prune: Prune
stats: SessionStats
toolParameters: Map
+ nudgeCounter: number
}
diff --git a/lib/strategies/prune-tool.ts b/lib/strategies/prune-tool.ts
index c48af54..08e680c 100644
--- a/lib/strategies/prune-tool.ts
+++ b/lib/strategies/prune-tool.ts
@@ -102,6 +102,7 @@ export function createPruneTool(
)
state.stats.totalPruneTokens += state.stats.pruneTokenCounter
state.stats.pruneTokenCounter = 0
+ state.nudgeCounter = 0
return formatPruningResultForTool(
pruneToolIds,