Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Add to your OpenCode config:
```jsonc
// opencode.jsonc
{
"plugins": ["@tarquinen/[email protected].19"]
"plugin": ["@tarquinen/[email protected].20"]
}
```

Expand Down Expand Up @@ -48,7 +48,7 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
| `pruning_summary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
| `protectedTools` | `["task", "todowrite", "todoread", "context_pruning"]` | Tools that are never pruned |
| `strategies.onIdle` | `["deduplication", "ai-analysis"]` | Strategies for automatic pruning |
| `strategies.onTool` | `["deduplication"]` | Strategies when AI calls `context_pruning` |
| `strategies.onTool` | `["deduplication", "ai-analysis"]` | Strategies when AI calls `context_pruning` |

**Strategies:** `"deduplication"` (fast, zero LLM cost) and `"ai-analysis"` (maximum savings). Empty array disables that trigger.

Expand All @@ -57,7 +57,7 @@ DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.j
"enabled": true,
"strategies": {
"onIdle": ["deduplication", "ai-analysis"],
"onTool": ["deduplication"]
"onTool": ["deduplication", "ai-analysis"]
},
"protectedTools": ["task", "todowrite", "todoread", "context_pruning"]
}
Expand Down
21 changes: 20 additions & 1 deletion index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,33 @@ const plugin: Plugin = (async (ctx) => {
})

if (logger.enabled) {
// Fetch session messages to extract reasoning blocks
let sessionMessages: any[] | undefined
try {
const activeSessions = allSessions.data?.filter(s => !s.parentID) || []
if (activeSessions.length > 0) {
const mostRecentSession = activeSessions[0]
const messagesResponse = await ctx.client.session.messages({
path: { id: mostRecentSession.id },
query: { limit: 100 }
})
sessionMessages = Array.isArray(messagesResponse.data)
? messagesResponse.data
: Array.isArray(messagesResponse) ? messagesResponse : undefined
}
} catch (e) {
// Silently continue without session messages
}

await logger.saveWrappedContext(
"global",
body.messages,
{
url: typeof input === 'string' ? input : 'URL object',
replacedCount,
totalMessages: body.messages.length
}
},
sessionMessages
)
}

Expand Down
4 changes: 2 additions & 2 deletions lib/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const defaultConfig: PluginConfig = {
pruning_summary: 'detailed',
strategies: {
onIdle: ['deduplication', 'ai-analysis'],
onTool: ['deduplication']
onTool: ['deduplication', 'ai-analysis']
}
}

Expand Down Expand Up @@ -114,7 +114,7 @@ function createDefaultConfig(): void {
// Strategies to run when session goes idle
"onIdle": ["deduplication", "ai-analysis"],
// Strategies to run when AI calls context_pruning tool
"onTool": ["deduplication"]
"onTool": ["deduplication", "ai-analysis"]
},
// Summary display: "off", "minimal", or "detailed"
"pruning_summary": "detailed",
Expand Down
52 changes: 50 additions & 2 deletions lib/logger.ts
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,42 @@ export class Logger {
return result
}

async saveWrappedContext(sessionID: string, messages: any[], metadata: any) {
private extractReasoningBlocks(sessionMessages: any[]): any[] {
const reasoningBlocks: any[] = []

for (const msg of sessionMessages) {
if (!msg.parts) continue

for (const part of msg.parts) {
if (part.type === "reasoning") {
// Calculate encrypted content size for different providers
let encryptedContentLength = 0
if (part.metadata?.openai?.reasoningEncryptedContent) {
encryptedContentLength = part.metadata.openai.reasoningEncryptedContent.length
} else if (part.metadata?.anthropic?.signature) {
encryptedContentLength = part.metadata.anthropic.signature.length
} else if (part.metadata?.google?.thoughtSignature) {
encryptedContentLength = part.metadata.google.thoughtSignature.length
}

reasoningBlocks.push({
messageId: msg.id,
messageRole: msg.role,
text: part.text,
textLength: part.text?.length || 0,
encryptedContentLength,
time: part.time,
hasMetadata: !!part.metadata,
metadataKeys: part.metadata ? Object.keys(part.metadata) : []
})
}
}
}

return reasoningBlocks
}

async saveWrappedContext(sessionID: string, messages: any[], metadata: any, sessionMessages?: any[]) {
if (!this.enabled) return

try {
Expand Down Expand Up @@ -197,11 +232,24 @@ export class Logger {
}
}
} else {
// Extract reasoning blocks from session messages if available
const reasoningBlocks = sessionMessages
? this.extractReasoningBlocks(sessionMessages)
: []

content = {
timestamp: new Date().toISOString(),
sessionID,
metadata,
messages
messages,
...(reasoningBlocks.length > 0 && {
reasoning: {
count: reasoningBlocks.length,
totalTextCharacters: reasoningBlocks.reduce((sum, b) => sum + b.textLength, 0),
totalEncryptedCharacters: reasoningBlocks.reduce((sum, b) => sum + b.encryptedContentLength, 0),
blocks: reasoningBlocks
}
})
}
}

Expand Down
3 changes: 2 additions & 1 deletion lib/model-selector.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export interface ModelInfo {

export const FALLBACK_MODELS: Record<string, string> = {
openai: 'gpt-5-mini',
anthropic: 'claude-haiku-4-5',
anthropic: 'claude-haiku-4-5', //This model isn't broken in opencode-auth-provider
google: 'gemini-2.5-flash',
deepseek: 'deepseek-chat',
xai: 'grok-4-fast',
Expand All @@ -28,6 +28,7 @@ const PROVIDER_PRIORITY = [
'opencode'
];

// TODO: some anthropic provided models aren't supported by the opencode-auth-provider package, so this provides a temporary workaround
const SKIP_PROVIDERS = ['github-copilot', 'anthropic'];

export interface ModelSelectionResult {
Expand Down
22 changes: 22 additions & 0 deletions lib/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,28 @@ function minimizeMessages(messages: any[], alreadyPrunedIds?: string[], protecte
}
}

// TODO: This should use the opencode normalized system instead of per provider settings
if (part.type === 'reasoning') {
// Calculate encrypted content size if present
let encryptedContentLength = 0
if (part.metadata?.openai?.reasoningEncryptedContent) {
encryptedContentLength = part.metadata.openai.reasoningEncryptedContent.length
} else if (part.metadata?.anthropic?.signature) {
encryptedContentLength = part.metadata.anthropic.signature.length
} else if (part.metadata?.google?.thoughtSignature) {
encryptedContentLength = part.metadata.google.thoughtSignature.length
}

return {
type: 'reasoning',
text: part.text,
textLength: part.text?.length || 0,
encryptedContentLength,
...(part.time && { time: part.time }),
...(part.metadata && { metadataKeys: Object.keys(part.metadata) })
}
}

if (part.type === 'tool') {
const callIDLower = part.callID?.toLowerCase()
const isAlreadyPruned = prunedIdsSet.has(callIDLower)
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "@tarquinen/opencode-dcp",
"version": "0.3.19",
"version": "0.3.20",
"type": "module",
"description": "OpenCode plugin that optimizes token usage by pruning obsolete tool outputs from conversation context",
"main": "./dist/index.js",
Expand Down