Skip to content

Commit e1af388

Browse files
committed
feat(openai-native): always send instructions with Responses API; remove developer/system role injection from input
- Always set top-level `instructions` for every Responses API call (not just when using `previous_response_id`) - Remove embedding the system prompt as a developer role message in `input` for Responses API - Keep SDK request builder gating: only include `temperature` when the model supports it - Update provider tests to assert `instructions` presence and absence of developer/system role in input feat(ui): gate TemperatureControl by model capability - Only render the Advanced Settings temperature control when `selectedModelInfo.supportsTemperature !== false` - Avoids surfacing a no-op UI for models that ignore temperature (e.g., o-series / certain GPT‑5) - All related UI tests pass Rationale: - Responses API treats roles in `input` as free-form; the only standardized way to set system behavior is the top-level `instructions` field. - UI should reflect model capabilities to prevent user confusion; backend already omits temperature when unsupported, now the UI matches that behavior.
1 parent c2ec3d9 commit e1af388

File tree

6 files changed

+6654
-32
lines changed

6 files changed

+6654
-32
lines changed

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -219,12 +219,9 @@ describe("OpenAiNativeHandler", () => {
219219
const body1 = (mockFetch.mock.calls[0][1] as any).body as string
220220
const parsedBody = JSON.parse(body1)
221221
expect(parsedBody.model).toBe("gpt-5-2025-08-07")
222-
// Now using structured format with content arrays
222+
expect(parsedBody.instructions).toBe("You are a helpful assistant.")
223+
// Now using structured format with content arrays (no system prompt in input; it's provided via `instructions`)
223224
expect(parsedBody.input).toEqual([
224-
{
225-
role: "developer",
226-
content: [{ type: "input_text", text: "You are a helpful assistant." }],
227-
},
228225
{
229226
role: "user",
230227
content: [{ type: "input_text", text: "Hello!" }],
@@ -872,11 +869,8 @@ describe("OpenAiNativeHandler", () => {
872869

873870
// Verify first request sends full conversation in structured format
874871
let firstCallBody = JSON.parse(mockFetch.mock.calls[0][1].body)
872+
expect(firstCallBody.instructions).toBe(systemPrompt)
875873
expect(firstCallBody.input).toEqual([
876-
{
877-
role: "developer",
878-
content: [{ type: "input_text", text: systemPrompt }],
879-
},
880874
{
881875
role: "user",
882876
content: [{ type: "input_text", text: "Hello" }],
@@ -1224,11 +1218,8 @@ describe("GPT-5 streaming event coverage (additional)", () => {
12241218
const requestBody = JSON.parse(mockFetch.mock.calls[0][1].body)
12251219
expect(requestBody).toMatchObject({
12261220
model: "codex-mini-latest",
1221+
instructions: "You are a helpful coding assistant.",
12271222
input: [
1228-
{
1229-
role: "developer",
1230-
content: [{ type: "input_text", text: "You are a helpful coding assistant." }],
1231-
},
12321223
{
12331224
role: "user",
12341225
content: [{ type: "input_text", text: "Write a hello world function" }],
@@ -1321,13 +1312,10 @@ describe("GPT-5 streaming event coverage (additional)", () => {
13211312
chunks.push(chunk)
13221313
}
13231314

1324-
// Verify the request body includes full conversation in structured format
1315+
// Verify the request body includes full conversation in structured format (without embedding system prompt)
13251316
const requestBody = JSON.parse(mockFetch.mock.calls[0][1].body)
1317+
expect(requestBody.instructions).toBe("You are a helpful assistant.")
13261318
expect(requestBody.input).toEqual([
1327-
{
1328-
role: "developer",
1329-
content: [{ type: "input_text", text: "You are a helpful assistant." }],
1330-
},
13311319
{
13321320
role: "user",
13331321
content: [{ type: "input_text", text: "First question" }],

src/api/providers/openai-native.ts

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -214,9 +214,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
214214
input: formattedInput,
215215
stream: true,
216216
store: metadata?.store !== false, // Default to true unless explicitly set to false
217-
// Always include instructions (system prompt) when using previous_response_id
218-
// This ensures the system prompt stays up-to-date even if it changes (e.g., mode switch)
219-
...(requestPreviousResponseId && { instructions: systemPrompt }),
217+
// Always include instructions (system prompt) for Responses API.
218+
// Unlike Chat Completions, system/developer roles in input have no special semantics here.
219+
// The official way to set system behavior is the top-level `instructions` field.
220+
instructions: systemPrompt,
220221
...(reasoningEffort && {
221222
reasoning: {
222223
effort: reasoningEffort,
@@ -316,11 +317,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
316317
// This supports both text and images
317318
const formattedMessages: any[] = []
318319

319-
// Add system prompt as developer message
320-
formattedMessages.push({
321-
role: "developer",
322-
content: [{ type: "input_text", text: systemPrompt }],
323-
})
320+
// Do NOT embed the system prompt as a developer message in the Responses API input.
321+
// The Responses API treats roles as free-form; use the top-level `instructions` field instead.
324322

325323
// Process each message
326324
for (const message of messages) {

src/core/task/Task.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -579,6 +579,18 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
579579

580580
public async overwriteClineMessages(newMessages: ClineMessage[]) {
581581
this.clineMessages = newMessages
582+
583+
// If deletion or history truncation leaves a condense_context as the last message,
584+
// ensure the next API call suppresses previous_response_id so the condensed context is respected.
585+
try {
586+
const last = this.clineMessages.at(-1)
587+
if (last && last.type === "say" && last.say === "condense_context") {
588+
this.skipPrevResponseIdOnce = true
589+
}
590+
} catch {
591+
// non-fatal
592+
}
593+
582594
restoreTodoListForTask(this)
583595
await this.saveClineMessages()
584596
}
@@ -2633,4 +2645,4 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
26332645
public get taskAsk(): ClineMessage | undefined {
26342646
return this.idleAsk || this.resumableAsk || this.interactiveAsk
26352647
}
2636-
}
2648+
}

src/core/task/__tests__/Task.spec.ts

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1615,6 +1615,69 @@ describe("Cline", () => {
16151615
})
16161616
})
16171617

1618+
describe("Conversation continuity after condense and deletion", () => {
1619+
it("should set suppressPreviousResponseId when last message is condense_context", async () => {
1620+
// Arrange: create task
1621+
const task = new Task({
1622+
provider: mockProvider,
1623+
apiConfiguration: mockApiConfig,
1624+
task: "initial task",
1625+
startTask: false,
1626+
})
1627+
1628+
// Ensure provider state returns required fields for attemptApiRequest
1629+
mockProvider.getState = vi.fn().mockResolvedValue({
1630+
apiConfiguration: mockApiConfig,
1631+
})
1632+
1633+
// Simulate deletion that leaves a condense_context as the last message
1634+
const condenseMsg = {
1635+
ts: Date.now(),
1636+
type: "say" as const,
1637+
say: "condense_context" as const,
1638+
contextCondense: {
1639+
summary: "summarized",
1640+
cost: 0.001,
1641+
prevContextTokens: 1200,
1642+
newContextTokens: 400,
1643+
},
1644+
}
1645+
await task.overwriteClineMessages([condenseMsg])
1646+
1647+
// Spy and return a minimal successful stream to exercise attemptApiRequest
1648+
const mockStream = {
1649+
async *[Symbol.asyncIterator]() {
1650+
yield { type: "text", text: "ok" }
1651+
},
1652+
async next() {
1653+
return { done: true, value: { type: "text", text: "ok" } }
1654+
},
1655+
async return() {
1656+
return { done: true, value: undefined }
1657+
},
1658+
async throw(e: any) {
1659+
throw e
1660+
},
1661+
[Symbol.asyncDispose]: async () => {},
1662+
} as AsyncGenerator<ApiStreamChunk>
1663+
1664+
const createMessageSpy = vi.spyOn(task.api, "createMessage").mockReturnValue(mockStream)
1665+
1666+
// Act: initiate an API request
1667+
const iterator = task.attemptApiRequest(0)
1668+
await iterator.next() // read first chunk to ensure call happened
1669+
1670+
// Assert: metadata includes suppressPreviousResponseId set to true
1671+
expect(createMessageSpy).toHaveBeenCalled()
1672+
const callArgs = createMessageSpy.mock.calls[0]
1673+
// Args: [systemPrompt, cleanConversationHistory, metadata]
1674+
const metadata = callArgs?.[2]
1675+
expect(metadata?.suppressPreviousResponseId).toBe(true)
1676+
1677+
// The skip flag should be reset after the call
1678+
expect((task as any).skipPrevResponseIdOnce).toBe(false)
1679+
})
1680+
})
16181681
describe("abortTask", () => {
16191682
it("should set abort flag and emit TaskAborted event", async () => {
16201683
const task = new Task({

0 commit comments

Comments
 (0)