Skip to content

Commit 3a9a9a0

Browse files
committed
feat: add synthetic instruction injection for better context_pruning usage
Integrates PR #33 features into the modular architecture: - Add synth-instruction.ts for injecting instructions into user messages - Add nudge injection every N tool results (configurable via nudge_freq) - Add synthetic.txt prompt teaching AI to narrate findings before pruning - Add nudge.txt prompt reminding AI to prune when appropriate - Add nudge_freq config option (default: 5, 0 to disable) - Update fetch-wrapper to inject synth/nudge when onTool strategies enabled - Copy prompt files to dist during build
1 parent c474053 commit 3a9a9a0

File tree

9 files changed

+214
-6
lines changed

9 files changed

+214
-6
lines changed

index.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ const plugin: Plugin = (async (ctx) => {
3939
ctx.directory
4040
)
4141

42-
// Install global fetch wrapper for context pruning
43-
installFetchWrapper(state, logger, ctx.client)
42+
// Install global fetch wrapper for context pruning and synth injection
43+
installFetchWrapper(state, logger, ctx.client, config)
4444

4545
// Log initialization
4646
logger.info("plugin", "DCP initialized", {

lib/config.ts

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ export interface PluginConfig {
1515
showModelErrorToasts?: boolean
1616
strictModelSelection?: boolean
1717
pruning_summary: "off" | "minimal" | "detailed"
18+
/** Nudge frequency: inject reminder every N tool results (0 = disabled) */
19+
nudge_freq: number
1820
strategies: {
1921
onIdle: PruningStrategy[]
2022
onTool: PruningStrategy[]
@@ -33,6 +35,7 @@ const defaultConfig: PluginConfig = {
3335
showModelErrorToasts: true,
3436
strictModelSelection: false,
3537
pruning_summary: 'detailed',
38+
nudge_freq: 5,
3639
strategies: {
3740
onIdle: ['deduplication', 'ai-analysis'],
3841
onTool: ['deduplication', 'ai-analysis']
@@ -47,6 +50,7 @@ const VALID_CONFIG_KEYS = new Set([
4750
'showModelErrorToasts',
4851
'strictModelSelection',
4952
'pruning_summary',
53+
'nudge_freq',
5054
'strategies'
5155
])
5256

@@ -196,7 +200,8 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
196200
showModelErrorToasts: globalConfig.showModelErrorToasts ?? config.showModelErrorToasts,
197201
strictModelSelection: globalConfig.strictModelSelection ?? config.strictModelSelection,
198202
strategies: mergeStrategies(config.strategies, globalConfig.strategies as any),
199-
pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary
203+
pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary,
204+
nudge_freq: globalConfig.nudge_freq ?? config.nudge_freq
200205
}
201206
logger.info('config', 'Loaded global config', { path: configPaths.global })
202207
}
@@ -226,7 +231,8 @@ export function getConfig(ctx?: PluginInput): ConfigResult {
226231
showModelErrorToasts: projectConfig.showModelErrorToasts ?? config.showModelErrorToasts,
227232
strictModelSelection: projectConfig.strictModelSelection ?? config.strictModelSelection,
228233
strategies: mergeStrategies(config.strategies, projectConfig.strategies as any),
229-
pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary
234+
pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary,
235+
nudge_freq: projectConfig.nudge_freq ?? config.nudge_freq
230236
}
231237
logger.info('config', 'Loaded project config (overrides global)', { path: configPaths.project })
232238
}

lib/fetch-wrapper/index.ts

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,23 @@
11
import type { PluginState } from "../state"
22
import type { Logger } from "../logger"
3+
import type { PluginConfig } from "../config"
34
import type { FetchHandlerContext } from "./types"
45
import { handleOpenAIChatAndAnthropic } from "./openai-chat"
56
import { handleGemini } from "./gemini"
67
import { handleOpenAIResponses } from "./openai-responses"
8+
import { loadPrompt } from "../prompt"
9+
import { injectSynth, injectNudge } from "../synth-instruction"
710

811
export type { FetchHandlerContext, FetchHandlerResult } from "./types"
912

13+
// Load prompts once at module level
14+
const SYNTH_INSTRUCTION = loadPrompt("synthetic")
15+
const NUDGE_INSTRUCTION = loadPrompt("nudge")
16+
1017
/**
1118
* Creates a wrapped global fetch that intercepts API calls and performs
1219
* context pruning on tool outputs that have been marked for removal.
20+
* Also injects synthetic instructions and periodic nudges when tool strategies are enabled.
1321
*
1422
* Supports four API formats:
1523
* 1. OpenAI Chat Completions (body.messages with role='tool')
@@ -20,7 +28,8 @@ export type { FetchHandlerContext, FetchHandlerResult } from "./types"
2028
export function installFetchWrapper(
2129
state: PluginState,
2230
logger: Logger,
23-
client: any
31+
client: any,
32+
config: PluginConfig
2433
): () => void {
2534
const originalGlobalFetch = globalThis.fetch
2635

@@ -40,6 +49,25 @@ export function installFetchWrapper(
4049
// Try each format handler in order
4150
// OpenAI Chat Completions & Anthropic style (body.messages)
4251
if (body.messages && Array.isArray(body.messages)) {
52+
// Inject synth instructions if tool strategies are enabled
53+
if (config.strategies.onTool.length > 0) {
54+
// Inject periodic nudge every N tool results
55+
if (config.nudge_freq > 0) {
56+
if (injectNudge(body.messages, state.toolTracker, NUDGE_INSTRUCTION, config.nudge_freq)) {
57+
logger.debug("fetch", "Injected tool-result nudge", {
58+
toolResultCount: state.toolTracker.toolResultCount
59+
})
60+
modified = true
61+
}
62+
}
63+
64+
// Inject synthInstruction for the context_pruning tool
65+
if (injectSynth(body.messages, SYNTH_INSTRUCTION)) {
66+
logger.debug("fetch", "Injected synthInstruction")
67+
modified = true
68+
}
69+
}
70+
4371
const result = await handleOpenAIChatAndAnthropic(body, ctx, inputUrl)
4472
if (result.modified) {
4573
modified = true

lib/prompt.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,11 @@
1+
import { readFileSync } from "fs"
2+
import { join } from "path"
3+
4+
export function loadPrompt(name: string): string {
5+
const filePath = join(__dirname, "prompts", `${name}.txt`)
6+
return readFileSync(filePath, "utf8").trim()
7+
}
8+
19
function minimizeMessages(messages: any[], alreadyPrunedIds?: string[], protectedToolCallIds?: string[]): any[] {
210
const prunedIdsSet = alreadyPrunedIds ? new Set(alreadyPrunedIds.map(id => id.toLowerCase())) : new Set()
311
const protectedIdsSet = protectedToolCallIds ? new Set(protectedToolCallIds.map(id => id.toLowerCase())) : new Set()

lib/prompts/nudge.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
<instruction name=agent_nudge>
2+
You have accumulated several tool outputs. If you have completed a discrete unit of work and distilled relevant understanding in writting for the user to keep, use the context_pruning tool to remove obsolete tool outputs from this conversation and optimize token usage.
3+
</instruction>

lib/prompts/synthetic.txt

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
<instruction name=signal_management>
2+
After calling a series of tools, YOU ALWAYS HAVE TO distill signals from their results in your next message. This ensures tracability of your reasoning, informs the user of your findings, and is PARAMOUNT to best context window management practices.
3+
4+
THIS IS NON-NEGOTIABLE - YOU ARE EXPECTED TO RESPECT THIS INSTRUCTION THROUGHOUT THE CONVERSATION
5+
</instruction>
6+
7+
<instruction name=context_window_management>
8+
A strong constraint we have in this environment is the context window size. To help keep the conversation focused and clear from the noise, you must use the `context_pruning` tool: at opportune moments, and in an effective manner.
9+
</instruction>
10+
11+
<instruction name=context_pruning>
12+
To effectively manage conversation context, you MUST ALWAYS narrate your findings AS YOU DISCOVER THEM, BEFORE calling any `context_pruning` tool. No tool result (read, bash, grep, webfetch, etc.) should be left unexplained. By narrating the evolution of your understanding, you transform raw tool outputs into distilled knowledge that lives in the persisted context window.
13+
14+
Tools are VOLATILE - Once this distilled knowledge is in your reply, you can safely use the `context_pruning` tool to declutter the conversation.
15+
16+
WHEN TO USE `context_pruning`:
17+
- After you complete a discrete unit of work (e.g. confirming a hypothesis, or closing out one branch of investigation).
18+
- After exploratory bursts of tool calls that led you to a clear conclusion. (or to noise)
19+
- Before starting a new phase of work where old tool outputs are no longer needed to inform your next actions.
20+
21+
CRITICAL:
22+
You must ALWAYS narrate your findings in a message BEFORE using the `context_pruning` tool. Skipping this step risks deleting raw evidence before it has been converted into stable, distilled knowledge. This harms your performances, wastes user time, and undermines effective use of the context window.
23+
24+
EXAMPLE WORKFLOW:
25+
1. You call several tools (read, bash, grep...) to investigate a bug.
26+
2. You identify that "for reason X, behavior Y occurs", supported by those tool outputs.
27+
3. In your next message, you EXPLICITLY narrate:
28+
- What you did (which tools, what you were looking for).
29+
- What you found (the key facts / signals).
30+
- What you concluded (how this affects the task or next step).
31+
>YOU MUST ALWAYS THINK HIGH SIGNAL LOW NOISE FOR THIS NARRATION
32+
4. ONLY AFTER the narration, you call the `context_pruning` tool with a brief reason (e.g. "exploration for bug X complete; moving on to next bug").
33+
</instruction>

lib/state.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
import type { SessionStats } from "./janitor"
2+
import type { ToolTracker } from "./synth-instruction"
3+
import { createToolTracker } from "./synth-instruction"
24

35
/**
46
* Centralized state management for the DCP plugin.
@@ -18,6 +20,8 @@ export interface PluginState {
1820
* Key: sessionID, Value: Map<positionKey, toolCallId> where positionKey is "toolName:index"
1921
*/
2022
googleToolCallMapping: Map<string, Map<string, string>>
23+
/** Tracks tool results for nudge injection */
24+
toolTracker: ToolTracker
2125
}
2226

2327
export interface ToolParameterEntry {
@@ -40,5 +44,6 @@ export function createPluginState(): PluginState {
4044
toolParameters: new Map(),
4145
model: new Map(),
4246
googleToolCallMapping: new Map(),
47+
toolTracker: createToolTracker(),
4348
}
4449
}

lib/synth-instruction.ts

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
export interface ToolTracker {
2+
seenToolResultIds: Set<string>
3+
toolResultCount: number
4+
}
5+
6+
export function createToolTracker(): ToolTracker {
7+
return {
8+
seenToolResultIds: new Set(),
9+
toolResultCount: 0
10+
}
11+
}
12+
13+
function countToolResults(messages: any[], tracker: ToolTracker): number {
14+
let newCount = 0
15+
16+
for (const m of messages) {
17+
if (m.role === 'tool' && m.tool_call_id) {
18+
const id = String(m.tool_call_id).toLowerCase()
19+
if (!tracker.seenToolResultIds.has(id)) {
20+
tracker.seenToolResultIds.add(id)
21+
newCount++
22+
}
23+
} else if (m.role === 'user' && Array.isArray(m.content)) {
24+
for (const part of m.content) {
25+
if (part.type === 'tool_result' && part.tool_use_id) {
26+
const id = String(part.tool_use_id).toLowerCase()
27+
if (!tracker.seenToolResultIds.has(id)) {
28+
tracker.seenToolResultIds.add(id)
29+
newCount++
30+
}
31+
}
32+
}
33+
}
34+
}
35+
36+
tracker.toolResultCount += newCount
37+
return newCount
38+
}
39+
40+
/**
41+
* Counts new tool results and injects nudge instruction every N tool results.
42+
* Returns true if injection happened.
43+
*/
44+
export function injectNudge(
45+
messages: any[],
46+
tracker: ToolTracker,
47+
nudgeText: string,
48+
nudgeFreq: number = 5
49+
): boolean {
50+
const prevCount = tracker.toolResultCount
51+
const newCount = countToolResults(messages, tracker)
52+
53+
if (newCount > 0) {
54+
// Check if we crossed a multiple of nudgeFreq
55+
const prevBucket = Math.floor(prevCount / nudgeFreq)
56+
const newBucket = Math.floor(tracker.toolResultCount / nudgeFreq)
57+
if (newBucket > prevBucket) {
58+
// Inject at the END of messages so it's in immediate context
59+
return appendNudge(messages, nudgeText)
60+
}
61+
}
62+
return false
63+
}
64+
65+
export function isIgnoredUserMessage(msg: any): boolean {
66+
if (!msg || msg.role !== 'user') {
67+
return false
68+
}
69+
70+
// Skip ignored or synthetic messages
71+
if (msg.ignored || msg.info?.ignored || msg.synthetic) {
72+
return true
73+
}
74+
75+
if (Array.isArray(msg.content) && msg.content.length > 0) {
76+
const allPartsIgnored = msg.content.every((part: any) => part?.ignored)
77+
if (allPartsIgnored) {
78+
return true
79+
}
80+
}
81+
82+
return false
83+
}
84+
85+
/**
86+
* Appends a nudge message at the END of the messages array as a new user message.
87+
* This ensures it's in the model's immediate context, not buried in old messages.
88+
*/
89+
function appendNudge(messages: any[], nudgeText: string): boolean {
90+
messages.push({
91+
role: 'user',
92+
content: nudgeText,
93+
synthetic: true
94+
})
95+
return true
96+
}
97+
98+
export function injectSynth(messages: any[], instruction: string): boolean {
99+
// Find the last user message that is not ignored
100+
for (let i = messages.length - 1; i >= 0; i--) {
101+
const msg = messages[i]
102+
if (msg.role === 'user' && !isIgnoredUserMessage(msg)) {
103+
// Avoid double-injecting the same instruction
104+
if (typeof msg.content === 'string') {
105+
if (msg.content.includes(instruction)) {
106+
return false
107+
}
108+
msg.content = msg.content + '\n\n' + instruction
109+
} else if (Array.isArray(msg.content)) {
110+
const alreadyInjected = msg.content.some(
111+
(part: any) => part?.type === 'text' && typeof part.text === 'string' && part.text.includes(instruction)
112+
)
113+
if (alreadyInjected) {
114+
return false
115+
}
116+
msg.content.push({
117+
type: 'text',
118+
text: instruction
119+
})
120+
}
121+
return true
122+
}
123+
}
124+
return false
125+
}

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
"scripts": {
1010
"clean": "rm -rf dist",
1111
"build": "npm run clean && tsc",
12-
"postbuild": "rm -rf dist/logs",
12+
"postbuild": "rm -rf dist/logs && cp -r lib/prompts dist/lib/prompts",
1313
"prepublishOnly": "npm run build",
1414
"dev": "opencode plugin dev",
1515
"typecheck": "tsc --noEmit",

0 commit comments

Comments
 (0)