Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Add to your OpenCode config:
```jsonc
// opencode.jsonc
{
"plugin": ["@tarquinen/opencode-dcp@0.4.2"],
"plugin": ["@tarquinen/opencode-dcp"],
"experimental": {
"primary_tools": ["prune"]
}
Expand All @@ -22,7 +22,7 @@ Add to your OpenCode config:

The `experimental.primary_tools` setting ensures the `prune` tool is only available to the primary agent (not subagents).

When a new version is available, DCP will show a toast notification. Update by changing the version number in your config.
DCP automatically updates itself in the background when new versions are available. You'll see a toast notification when an update is downloaded—just restart OpenCode to apply it. To disable auto-updates, set `"autoUpdate": false` in your DCP config.

Restart OpenCode. The plugin will automatically start optimizing your sessions.

Expand Down Expand Up @@ -63,6 +63,7 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
| `model` | (session) | Model for analysis (e.g., `"anthropic/claude-haiku-4-5"`) |
| `showModelErrorToasts` | `true` | Show notifications on model fallback |
| `showUpdateToasts` | `true` | Show notifications when a new version is available |
| `autoUpdate` | `true` | Automatically download new versions (restart to apply) |
| `strictModelSelection` | `false` | Only run AI analysis with session or configured model (disables fallback models) |
| `pruning_summary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
| `nudge_freq` | `10` | How often to remind AI to prune (lower = more frequent) |
Expand Down
5 changes: 4 additions & 1 deletion index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,10 @@ const plugin: Plugin = (async (ctx) => {

// Check for updates after a delay
setTimeout(() => {
checkForUpdates(ctx.client, logger, config.showUpdateToasts ?? true).catch(() => { })
checkForUpdates(ctx.client, logger, {
showToast: config.showUpdateToasts ?? true,
autoUpdate: config.autoUpdate ?? true
}).catch(() => { })
}, 5000)

// Show migration toast if there were config migrations
Expand Down
7 changes: 7 additions & 0 deletions lib/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ export interface PluginConfig {
model?: string
showModelErrorToasts?: boolean
showUpdateToasts?: boolean
autoUpdate?: boolean
strictModelSelection?: boolean
pruning_summary: "off" | "minimal" | "detailed"
nudge_freq: number
Expand All @@ -34,6 +35,7 @@ const defaultConfig: PluginConfig = {
protectedTools: ['task', 'todowrite', 'todoread', 'prune', 'batch', 'edit', 'write'],
showModelErrorToasts: true,
showUpdateToasts: true,
autoUpdate: true,
strictModelSelection: false,
pruning_summary: 'detailed',
nudge_freq: 10,
Expand All @@ -50,6 +52,7 @@ const VALID_CONFIG_KEYS = new Set([
'model',
'showModelErrorToasts',
'showUpdateToasts',
'autoUpdate',
'strictModelSelection',
'pruning_summary',
'nudge_freq',
Expand Down Expand Up @@ -115,6 +118,8 @@ function createDefaultConfig(): void {
"showModelErrorToasts": true,
// Show toast notifications when a new version is available
"showUpdateToasts": true,
// Automatically update to new versions (restart required to apply)
"autoUpdate": true,
// Only run AI analysis with session model or configured model (disables fallback models)
"strictModelSelection": false,
// AI analysis strategies (deduplication runs automatically on every request)
Expand Down Expand Up @@ -205,6 +210,7 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
model: globalConfig.model ?? config.model,
showModelErrorToasts: globalConfig.showModelErrorToasts ?? config.showModelErrorToasts,
showUpdateToasts: globalConfig.showUpdateToasts ?? config.showUpdateToasts,
autoUpdate: globalConfig.autoUpdate ?? config.autoUpdate,
strictModelSelection: globalConfig.strictModelSelection ?? config.strictModelSelection,
strategies: mergeStrategies(config.strategies, globalConfig.strategies as any),
pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary,
Expand Down Expand Up @@ -237,6 +243,7 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
model: projectConfig.model ?? config.model,
showModelErrorToasts: projectConfig.showModelErrorToasts ?? config.showModelErrorToasts,
showUpdateToasts: projectConfig.showUpdateToasts ?? config.showUpdateToasts,
autoUpdate: projectConfig.autoUpdate ?? config.autoUpdate,
strictModelSelection: projectConfig.strictModelSelection ?? config.strictModelSelection,
strategies: mergeStrategies(config.strategies, projectConfig.strategies as any),
pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary,
Expand Down
40 changes: 18 additions & 22 deletions lib/prompts/synthetic.txt
Original file line number Diff line number Diff line change
@@ -1,30 +1,26 @@
<system-reminder>
The <prunable-tools> list and any pruning nudges are injected by the environment as assistant messages, use this information when deciding what to prune.
</system-reminder>

<instruction name=context_management_protocol>
You are operating in a context-constrained environment. You must actively manage your context window using the `prune` tool.
<instruction name=context_management_protocol policy_level=critical>

## Prune Early, Prune Often
Every tool call adds to your context debt. You MUST pay this down regularly by pruning. Do not wait until context is "full" - by then it's too late. Evaluate what can be pruned after every few tool calls.
ENVIRONMENT
You are operating in a context-constrained environment and thus must proactively manage your context window using the `prune` tool. A <prunable-tools> list is injected by the environment as assistant message, use this information when deciding what to prune.

## When to Prune (Triggers)
You SHOULD use the prune tool when ANY of these are true:
- You just completed a task or sub-task
- You read files that turned out to be unhelpful or only partially useful
- You have gathered enough information to answer a question or make a decision
- You ran commands whose output you have already processed
- Newer tool outputs have made older ones obsolete
- You are about to start a new phase of work
PRUNE EARLY, PRUNE OFTEN - BUT PRUNE METHODICALLY
Every tool call adds to your context debt. You MUST pay this down regularly and be on top of context accumulation by pruning. Evaluate what SHOULD be pruned before jumping the gun.

When in doubt, prune. It is better to prune aggressively than to run out of context.
WHEN TO PRUNE? THE THREE SCENARIOS TO CONSIDER
1. TASK COMPLETION: When work is done, quietly prune the tools that aren't needed anymore
2. NOISE REMOVAL: If outputs are irrelevant, unhelpful, or superseded by newer info, prune IMMEDIATELY. No distillation - gun it down
3. CONTEXT CONSOLIDATION: When pruning valuable context to the task at hand, you MUST ALWAYS distill key findings into your narrative BEFORE pruning. Be surgical and strategic in what you extract. THINK: high signal, low noise

## Three Pruning Modes
Apply the correct mode for each situation:
You WILL use the `prune` tool when ANY of these are true:
- Task or sub-task is complete
- You are about to start a new phase of work
- You have distilled enough information in your messages to prune related tools
- Context contains tools output that are unhelpful, noise, or made obsolete by newer outputs

1. TASK COMPLETION: When work is done, prune the tools used. No distillation needed - just state the task is complete.
2. NOISE REMOVAL: If outputs are irrelevant, unhelpful, or outdated (superseded by newer info), prune IMMEDIATELY. No distillation - just cut it out.
3. CONTEXT CONSOLIDATION: When pruning useful research, you MUST distill key findings into your narrative *before* pruning. Extract only what matters (e.g., a specific function signature from a large file).
NOTES
When in doubt, prune out. Prune often yet remain strategic about it.
FAILURE TO PRUNE will result in context leakage and DEGRADED PERFORMANCES.

FAILURE TO PRUNE will result in context overflow and degraded performance.
</instruction>
</system-reminder>
105 changes: 93 additions & 12 deletions lib/version-checker.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { readFileSync } from 'fs'
import { join, dirname } from 'path'
import { fileURLToPath } from 'url'
import { spawn } from 'child_process'
import { homedir } from 'os'

export const PACKAGE_NAME = '@tarquinen/opencode-dcp'
export const NPM_REGISTRY_URL = `https://registry.npmjs.org/${PACKAGE_NAME}/latest`
Expand Down Expand Up @@ -50,7 +52,65 @@ export function isOutdated(local: string, remote: string): boolean {
return false
}

export async function checkForUpdates(client: any, logger?: { info: (component: string, message: string, data?: any) => void }, showToast: boolean = true): Promise<void> {
export async function performUpdate(targetVersion: string, logger?: { info: (component: string, message: string, data?: any) => void }): Promise<boolean> {
// OpenCode installs packages to ~/.cache/opencode/node_modules/
const cacheDir = join(homedir(), '.cache', 'opencode')
const packageSpec = `${PACKAGE_NAME}@${targetVersion}`

logger?.info("version", "Starting auto-update", { targetVersion, cacheDir })

return new Promise((resolve) => {
let resolved = false

const proc = spawn('npm', ['install', '--legacy-peer-deps', packageSpec], {
cwd: cacheDir,
stdio: 'pipe'
})

let stderr = ''
proc.stderr?.on('data', (data) => {
stderr += data.toString()
})

proc.on('close', (code) => {
if (resolved) return
resolved = true
clearTimeout(timeoutId)
if (code === 0) {
logger?.info("version", "Auto-update succeeded", { targetVersion })
resolve(true)
} else {
logger?.info("version", "Auto-update failed", { targetVersion, code, stderr: stderr.slice(0, 500) })
resolve(false)
}
})

proc.on('error', (err) => {
if (resolved) return
resolved = true
clearTimeout(timeoutId)
logger?.info("version", "Auto-update error", { targetVersion, error: err.message })
resolve(false)
})

// Timeout after 60 seconds
const timeoutId = setTimeout(() => {
if (resolved) return
resolved = true
proc.kill()
logger?.info("version", "Auto-update timed out", { targetVersion })
resolve(false)
}, 60000)
})
}

export async function checkForUpdates(
client: any,
logger?: { info: (component: string, message: string, data?: any) => void },
options: { showToast?: boolean; autoUpdate?: boolean } = {}
): Promise<void> {
const { showToast = true, autoUpdate = false } = options

try {
const local = getLocalVersion()
const npm = await getNpmVersion()
Expand All @@ -65,20 +125,41 @@ export async function checkForUpdates(client: any, logger?: { info: (component:
return
}

logger?.info("version", "Update available", { local, npm })
logger?.info("version", "Update available", { local, npm, autoUpdate })

if (!showToast) {
return
}
if (autoUpdate) {
// Attempt auto-update
const success = await performUpdate(npm, logger)

await client.tui.showToast({
body: {
title: "DCP: Update available",
message: `v${local} → v${npm}\nUpdate opencode.jsonc: ${PACKAGE_NAME}@${npm}`,
variant: "info",
duration: 6000
if (success && showToast) {
await client.tui.showToast({
body: {
title: "DCP: Updated!",
message: `v${local} → v${npm}\nRestart OpenCode to apply`,
variant: "success",
duration: 6000
}
})
} else if (!success && showToast) {
await client.tui.showToast({
body: {
title: "DCP: Update failed",
message: `v${local} → v${npm}\nManual: npm install ${PACKAGE_NAME}@${npm}`,
variant: "warning",
duration: 6000
}
})
}
})
} else if (showToast) {
await client.tui.showToast({
body: {
title: "DCP: Update available",
message: `v${local} → v${npm}`,
variant: "info",
duration: 6000
}
})
}
} catch {
}
}