Skip to content

Commit 2886029

Browse files
committed
feat: add reasoning model support for synthetic message injection
Reasoning models expect their encrypted reasoning parts in assistant messages, which we cannot generate. This adds detection via chat.params hook and appends a synthetic user message (<system-context-injection/>) to close the assistant turn properly when a reasoning model is active.
1 parent 16b86c7 commit 2886029

File tree

4 files changed

+53
-0
lines changed

4 files changed

+53
-0
lines changed

index.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import type { Plugin } from "@opencode-ai/plugin"
2+
import type { Model } from "@opencode-ai/sdk"
23
import { getConfig } from "./lib/config"
34
import { Logger } from "./lib/logger"
45
import { loadPrompt } from "./lib/prompt"
@@ -26,6 +27,22 @@ const plugin: Plugin = (async (ctx) => {
2627
})
2728

2829
return {
30+
"chat.params": async (
31+
input: { sessionID: string; agent: string; model: Model; provider: any; message: any },
32+
_output: { temperature: number; topP: number; options: Record<string, any> },
33+
) => {
34+
const isReasoning = input.model.capabilities?.reasoning ?? false
35+
if (state.isReasoningModel !== isReasoning) {
36+
logger.info(
37+
`Reasoning model status changed: ${state.isReasoningModel} -> ${isReasoning}`,
38+
{
39+
modelId: input.model.id,
40+
providerId: input.model.providerID,
41+
},
42+
)
43+
}
44+
state.isReasoningModel = isReasoning
45+
},
2946
"experimental.chat.system.transform": async (
3047
_input: unknown,
3148
output: { system: string[] },

lib/messages/prune.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,14 @@ Context management was just performed. Do not use the ${toolName} again. A fresh
4848

4949
const SYNTHETIC_MESSAGE_ID = "msg_01234567890123456789012345"
5050
const SYNTHETIC_PART_ID = "prt_01234567890123456789012345"
51+
const SYNTHETIC_USER_MESSAGE_ID = "msg_01234567890123456789012346"
52+
const SYNTHETIC_USER_PART_ID = "prt_01234567890123456789012346"
53+
54+
// Content for the synthetic user message appended after the assistant message for reasoning models.
55+
// This is required because reasoning models expect their reasoning parts in assistant messages,
56+
// and we cannot generate those encrypted/proprietary parts. By closing the assistant turn with
57+
// a user message, the model sees a complete conversation structure.
58+
const REASONING_MODEL_USER_MESSAGE_CONTENT = "<system-context-injection/>"
5159

5260
const buildPrunableToolsList = (
5361
state: SessionState,
@@ -158,6 +166,31 @@ export const insertPruneToolContext = (
158166
}
159167

160168
messages.push(assistantMessage)
169+
170+
// For reasoning models, append a synthetic user message to close the assistant turn.
171+
// This is required because reasoning models expect their reasoning parts in the last
172+
// assistant message, which we cannot generate. The user message signals a complete turn.
173+
if (state.isReasoningModel) {
174+
const userMessage: WithParts = {
175+
info: {
176+
id: SYNTHETIC_USER_MESSAGE_ID,
177+
sessionID: assistantInfo.sessionID,
178+
role: "user",
179+
time: { created: Date.now() + 1 },
180+
} as any, // Using 'as any' because we're creating a minimal synthetic message
181+
parts: [
182+
{
183+
id: SYNTHETIC_USER_PART_ID,
184+
sessionID: assistantInfo.sessionID,
185+
messageID: SYNTHETIC_USER_MESSAGE_ID,
186+
type: "text",
187+
text: REASONING_MODEL_USER_MESSAGE_CONTENT,
188+
},
189+
],
190+
}
191+
messages.push(userMessage)
192+
logger.debug("Appended synthetic user message for reasoning model")
193+
}
161194
}
162195

163196
export const prune = (

lib/state/state.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ export function createSessionState(): SessionState {
5555
lastToolPrune: false,
5656
lastCompaction: 0,
5757
currentTurn: 0,
58+
isReasoningModel: false,
5859
}
5960
}
6061

@@ -73,6 +74,7 @@ export function resetSessionState(state: SessionState): void {
7374
state.lastToolPrune = false
7475
state.lastCompaction = 0
7576
state.currentTurn = 0
77+
state.isReasoningModel = false
7678
}
7779

7880
export async function ensureSessionInitialized(

lib/state/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,5 @@ export interface SessionState {
3434
lastToolPrune: boolean
3535
lastCompaction: number
3636
currentTurn: number // Current turn count derived from step-start parts
37+
isReasoningModel: boolean // Whether the current model has reasoning capabilities
3738
}

0 commit comments

Comments
 (0)