Skip to content

Commit 52c6b8b

Browse files
committed
refactor: improve code quality based on PR review
- Remove console.log statements from Task.ts (use comments instead) - Fix method naming confusion in openai-native.ts: - Renamed formatStructuredInput to formatFullConversation (clearer purpose) - Renamed prepareStructuredInput to prepareResponsesApiInput (clearer purpose) - Add proper TypeScript types for GPT-5 metadata (new types.ts file) - Fix error message inconsistency (GPT-5 -> Responses API) - Extract magic string 'gpt-5' to constant GPT5_MODEL_PREFIX - Remove dead code (isResponsesApiModel method that always returned true) - Improve JSDoc for store parameter with detailed explanation - Update tests to use new method names All tests passing (30/30)
1 parent 03cf158 commit 52c6b8b

File tree

5 files changed

+100
-49
lines changed

5 files changed

+100
-49
lines changed

src/api/index.ts

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,11 @@ export interface ApiHandlerCreateMessageMetadata {
5353
*/
5454
suppressPreviousResponseId?: boolean
5555
/**
56-
* Controls whether the response should be stored for 30 days (OpenAI Responses API).
57-
* Defaults to true if not specified.
56+
* Controls whether the response should be stored for 30 days in OpenAI's Responses API.
57+
* When true (default), responses are stored and can be referenced in future requests
58+
* using the previous_response_id for efficient conversation continuity.
59+
* Set to false to opt out of response storage for privacy or compliance reasons.
60+
* @default true
5861
*/
5962
store?: boolean
6063
}

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -928,10 +928,14 @@ describe("OpenAiNativeHandler", () => {
928928

929929
// Test with metadata that has previousResponseId
930930
// @ts-expect-error - private method
931-
const { formattedInput, previousResponseId } = gpt5Handler.prepareStructuredInput(systemPrompt, messages, {
932-
taskId: "task1",
933-
previousResponseId: "resp_123",
934-
})
931+
const { formattedInput, previousResponseId } = gpt5Handler.prepareResponsesApiInput(
932+
systemPrompt,
933+
messages,
934+
{
935+
taskId: "task1",
936+
previousResponseId: "resp_123",
937+
},
938+
)
935939

936940
expect(previousResponseId).toBe("resp_123")
937941
expect(formattedInput).toEqual([

src/api/providers/openai-native.ts

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
2828

2929
// GPT-5 specific types
3030

31+
// Constants for model identification
32+
const GPT5_MODEL_PREFIX = "gpt-5"
33+
3134
export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler {
3235
protected options: ApiHandlerOptions
3336
private client: OpenAI
@@ -155,7 +158,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
155158
}
156159

157160
// Format input and capture continuity id
158-
const { formattedInput, previousResponseId } = this.prepareStructuredInput(systemPrompt, messages, metadata)
161+
const { formattedInput, previousResponseId } = this.prepareResponsesApiInput(systemPrompt, messages, metadata)
159162
const requestPreviousResponseId = effectivePreviousResponseId || previousResponseId
160163

161164
// Create a new promise for this request's response ID
@@ -222,7 +225,9 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
222225
...(model.info.supportsTemperature !== false && {
223226
temperature:
224227
this.options.modelTemperature ??
225-
(model.id.startsWith("gpt-5") ? GPT5_DEFAULT_TEMPERATURE : OPENAI_NATIVE_DEFAULT_TEMPERATURE),
228+
(model.id.startsWith(GPT5_MODEL_PREFIX)
229+
? GPT5_DEFAULT_TEMPERATURE
230+
: OPENAI_NATIVE_DEFAULT_TEMPERATURE),
226231
}),
227232
// Explicitly include the calculated max output tokens.
228233
// Use the per-request reserved output computed by Roo (params.maxTokens from getModelParams).
@@ -261,7 +266,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
261266
if (is400Error && requestBody.previous_response_id && isPreviousResponseError) {
262267
// Log the error and retry without the previous_response_id
263268
console.warn(
264-
`[GPT-5] Previous response ID not found (${requestBody.previous_response_id}), retrying without it`,
269+
`[Responses API] Previous response ID not found (${requestBody.previous_response_id}), retrying without it`,
265270
)
266271

267272
// Remove the problematic previous_response_id and retry
@@ -301,8 +306,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
301306
}
302307
}
303308

304-
private formatStructuredInput(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): any {
305-
// Format the conversation for the Responses API using structured format
309+
private formatFullConversation(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): any {
310+
// Format the entire conversation history for the Responses API using structured format
306311
// This supports both text and images
307312
const formattedMessages: any[] = []
308313

@@ -526,7 +531,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
526531
}
527532

528533
/**
529-
* Prepares the structured input and conversation continuity parameters for a Responses API call.
534+
* Prepares the input and conversation continuity parameters for a Responses API call.
535+
* Decides whether to send full conversation or just the latest message based on previousResponseId.
530536
*
531537
* - If a `previousResponseId` is available (either from metadata or the handler's state),
532538
* it formats only the most recent user message for the input and returns the response ID
@@ -535,7 +541,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
535541
*
536542
* @returns An object containing the formatted input and the previous response ID (if used).
537543
*/
538-
private prepareStructuredInput(
544+
private prepareResponsesApiInput(
539545
systemPrompt: string,
540546
messages: Anthropic.Messages.MessageParam[],
541547
metadata?: ApiHandlerCreateMessageMetadata,
@@ -560,7 +566,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
560566
return { formattedInput: [], previousResponseId }
561567
} else {
562568
// Format full conversation history (returns an array of structured messages)
563-
const formattedInput = this.formatStructuredInput(systemPrompt, messages)
569+
const formattedInput = this.formatFullConversation(systemPrompt, messages)
564570
return { formattedInput }
565571
}
566572
}
@@ -1108,10 +1114,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11081114
return info.reasoningEffort as ReasoningEffortWithMinimal | undefined
11091115
}
11101116

1111-
private isResponsesApiModel(modelId: string): boolean {
1112-
// ALL models now use the Responses API
1113-
return true
1114-
}
1117+
// Removed isResponsesApiModel method as ALL models now use the Responses API
11151118

11161119
override getModel() {
11171120
const modelId = this.options.apiModelId
@@ -1126,18 +1129,18 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11261129
modelId: id,
11271130
model: info,
11281131
settings: this.options,
1129-
defaultTemperature: id.startsWith("gpt-5") ? GPT5_DEFAULT_TEMPERATURE : OPENAI_NATIVE_DEFAULT_TEMPERATURE,
1132+
defaultTemperature: id.startsWith(GPT5_MODEL_PREFIX)
1133+
? GPT5_DEFAULT_TEMPERATURE
1134+
: OPENAI_NATIVE_DEFAULT_TEMPERATURE,
11301135
})
11311136

1132-
// For models using the Responses API (GPT-5 and Codex Mini), ensure we support reasoning effort
1133-
if (this.isResponsesApiModel(id)) {
1134-
const effort =
1135-
(this.options.reasoningEffort as ReasoningEffortWithMinimal | undefined) ??
1136-
(info.reasoningEffort as ReasoningEffortWithMinimal | undefined)
1137+
// For models using the Responses API, ensure we support reasoning effort
1138+
const effort =
1139+
(this.options.reasoningEffort as ReasoningEffortWithMinimal | undefined) ??
1140+
(info.reasoningEffort as ReasoningEffortWithMinimal | undefined)
11371141

1138-
if (effort) {
1139-
;(params.reasoning as any) = { reasoning_effort: effort }
1140-
}
1142+
if (effort) {
1143+
;(params.reasoning as any) = { reasoning_effort: effort }
11411144
}
11421145

11431146
// The o3 models are named like "o3-mini-[reasoning-effort]", which are

src/core/task/Task.ts

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ import { getMessagesSinceLastSummary, summarizeConversation } from "../condense"
9999
import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
100100
import { restoreTodoListForTask } from "../tools/updateTodoListTool"
101101
import { AutoApprovalHandler } from "./AutoApprovalHandler"
102+
import { Gpt5Metadata, ClineMessageWithMetadata } from "./types"
102103

103104
const MAX_EXPONENTIAL_BACKOFF_SECONDS = 600 // 10 minutes
104105

@@ -711,9 +712,8 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
711712
this.emit(RooCodeEventName.TaskIdle, this.taskId)
712713
}
713714

714-
console.log(`[Task#${this.taskId}] pWaitFor askResponse(${type}) -> blocking`)
715+
// Wait for askResponse to be set
715716
await pWaitFor(() => this.askResponse !== undefined || this.lastMessageTs !== askTs, { interval: 100 })
716-
console.log(`[Task#${this.taskId}] pWaitFor askResponse(${type}) -> unblocked (${this.askResponse})`)
717717

718718
if (this.lastMessageTs !== askTs) {
719719
// Could happen if we send multiple asks in a row i.e. with
@@ -1012,7 +1012,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
10121012

10131013
let imageBlocks: Anthropic.ImageBlockParam[] = formatResponse.imageBlocks(images)
10141014

1015-
console.log(`[subtasks] task ${this.taskId}.${this.instanceId} starting`)
1015+
// Task starting
10161016

10171017
await this.initiateTaskLoop([
10181018
{
@@ -1048,7 +1048,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
10481048
}
10491049

10501050
private async resumeTaskFromHistory() {
1051-
console.log(`[Task#${this.taskId}] Resuming task from history`)
1051+
// Resuming task from history
10521052

10531053
if (this.enableTaskBridge) {
10541054
try {
@@ -1068,10 +1068,13 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
10681068

10691069
// Check for any stored GPT-5 response IDs in the message history
10701070
const gpt5Messages = modifiedClineMessages.filter(
1071-
(m) => m.type === "say" && (m as any).say === "text" && (m as any).metadata?.gpt5?.previous_response_id,
1071+
(m) =>
1072+
m.type === "say" &&
1073+
m.say === "text" &&
1074+
(m as ClineMessageWithMetadata).metadata?.gpt5?.previous_response_id,
10721075
)
10731076
if (gpt5Messages.length > 0) {
1074-
const lastGpt5Message = gpt5Messages[gpt5Messages.length - 1] as any
1077+
const lastGpt5Message = gpt5Messages[gpt5Messages.length - 1] as ClineMessage & ClineMessageWithMetadata
10751078
}
10761079

10771080
// Remove any resume messages that may have been added before
@@ -1303,13 +1306,13 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
13031306

13041307
await this.overwriteApiConversationHistory(modifiedApiConversationHistory)
13051308

1306-
console.log(`[subtasks] task ${this.taskId}.${this.instanceId} resuming from history item`)
1309+
// Task resuming from history item
13071310

13081311
await this.initiateTaskLoop(newUserContent)
13091312
}
13101313

13111314
public dispose(): void {
1312-
console.log(`[Task] disposing task ${this.taskId}.${this.instanceId}`)
1315+
// Disposing task
13131316

13141317
// Stop waiting for child task completion.
13151318
if (this.pauseInterval) {
@@ -1372,7 +1375,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
13721375
}
13731376

13741377
public async abortTask(isAbandoned = false) {
1375-
console.log(`[subtasks] aborting task ${this.taskId}.${this.instanceId}`)
1378+
// Aborting task
13761379

13771380
// Will stop any autonomously running promises.
13781381
if (isAbandoned) {
@@ -1612,7 +1615,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
16121615
// lastMessage.ts = Date.now() DO NOT update ts since it is used as a key for virtuoso list
16131616
lastMessage.partial = false
16141617
// instead of streaming partialMessage events, we do a save and post like normal to persist to disk
1615-
console.log("updating partial message", lastMessage)
1618+
// updating partial message
16161619
// await this.saveClineMessages()
16171620
}
16181621

@@ -1713,7 +1716,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
17131716
}
17141717

17151718
if (this.abort) {
1716-
console.log(`aborting stream, this.abandoned = ${this.abandoned}`)
1719+
// Aborting stream
17171720

17181721
if (!this.abandoned) {
17191722
// Only need to gracefully abort if this instance
@@ -2140,18 +2143,16 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
21402143
this.clineMessages,
21412144
(m) =>
21422145
m.type === "say" &&
2143-
(m as any).say === "text" &&
2144-
(m as any).metadata?.gpt5?.previous_response_id,
2146+
m.say === "text" &&
2147+
!!(m as ClineMessageWithMetadata).metadata?.gpt5?.previous_response_id,
21452148
)
21462149
if (idx !== -1) {
21472150
// Use the previous_response_id from the last assistant message for this request
2148-
previousResponseId = ((this.clineMessages[idx] as any).metadata.gpt5.previous_response_id ||
2149-
undefined) as string | undefined
2151+
const message = this.clineMessages[idx] as ClineMessage & ClineMessageWithMetadata
2152+
previousResponseId = message.metadata?.gpt5?.previous_response_id
21502153
}
21512154
} else if (this.skipPrevResponseIdOnce) {
2152-
console.log(
2153-
`[Task#${this.taskId}] Skipping previous_response_id due to recent condense operation - will send full conversation context`,
2154-
)
2155+
// Skipping previous_response_id due to recent condense operation - will send full conversation context
21552156
}
21562157
} catch (error) {
21572158
console.error(`[Task#${this.taskId}] Error retrieving GPT-5 response ID:`, error)
@@ -2327,17 +2328,20 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
23272328
const lastResponseId: string | undefined = (this.api as any)?.getLastResponseId?.()
23282329
const idx = findLastIndex(
23292330
this.clineMessages,
2330-
(m) => m.type === "say" && (m as any).say === "text" && m.partial !== true,
2331+
(m) => m.type === "say" && m.say === "text" && m.partial !== true,
23312332
)
23322333
if (idx !== -1) {
2333-
const msg = this.clineMessages[idx] as any
2334-
msg.metadata = msg.metadata ?? {}
2335-
msg.metadata.gpt5 = {
2334+
const msg = this.clineMessages[idx] as ClineMessage & ClineMessageWithMetadata
2335+
if (!msg.metadata) {
2336+
msg.metadata = {}
2337+
}
2338+
const gpt5Metadata: Gpt5Metadata = {
23362339
...(msg.metadata.gpt5 ?? {}),
23372340
previous_response_id: lastResponseId,
23382341
instructions: this.lastUsedInstructions,
23392342
reasoning_summary: (reasoningMessage ?? "").trim() || undefined,
23402343
}
2344+
msg.metadata.gpt5 = gpt5Metadata
23412345
}
23422346
} catch (error) {
23432347
console.error(`[Task#${this.taskId}] Error persisting GPT-5 metadata:`, error)

src/core/task/types.ts

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
/**
2+
* Type definitions for Task-related metadata
3+
*/
4+
5+
/**
6+
* GPT-5 specific metadata stored with assistant messages
7+
* for maintaining conversation continuity across requests
8+
*/
9+
export interface Gpt5Metadata {
10+
/**
11+
* The response ID from the previous GPT-5 API response
12+
* Used to maintain conversation continuity in subsequent requests
13+
*/
14+
previous_response_id?: string
15+
16+
/**
17+
* The system instructions/prompt used for this response
18+
* Stored to track what instructions were active when the response was generated
19+
*/
20+
instructions?: string
21+
22+
/**
23+
* The reasoning summary from GPT-5's reasoning process
24+
* Contains the model's internal reasoning if reasoning mode was enabled
25+
*/
26+
reasoning_summary?: string
27+
}
28+
29+
/**
30+
* Extended ClineMessage type with GPT-5 metadata
31+
*/
32+
export interface ClineMessageWithMetadata {
33+
metadata?: {
34+
gpt5?: Gpt5Metadata
35+
[key: string]: any
36+
}
37+
}

0 commit comments

Comments
 (0)