Skip to content

Commit 516f7c3

Browse files
committed
refactor: improve code quality based on PR review
- Remove console.log statements from Task.ts (use comments instead) - Fix method naming confusion in openai-native.ts: - Renamed formatStructuredInput to formatFullConversation (clearer purpose) - Renamed prepareStructuredInput to prepareResponsesApiInput (clearer purpose) - Add proper TypeScript types for GPT-5 metadata (new types.ts file) - Fix error message inconsistency (GPT-5 -> Responses API) - Extract magic string 'gpt-5' to constant GPT5_MODEL_PREFIX - Remove dead code (isResponsesApiModel method that always returned true) - Improve JSDoc for store parameter with detailed explanation - Update tests to use new method names All tests passing (30/30)
1 parent aa6ddbd commit 516f7c3

File tree

5 files changed

+98
-51
lines changed

5 files changed

+98
-51
lines changed

src/api/index.ts

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,11 @@ export interface ApiHandlerCreateMessageMetadata {
5353
*/
5454
suppressPreviousResponseId?: boolean
5555
/**
56-
* Controls whether the response should be stored for 30 days (OpenAI Responses API).
57-
* Defaults to true if not specified.
56+
* Controls whether the response should be stored for 30 days in OpenAI's Responses API.
57+
* When true (default), responses are stored and can be referenced in future requests
58+
* using the previous_response_id for efficient conversation continuity.
59+
* Set to false to opt out of response storage for privacy or compliance reasons.
60+
* @default true
5861
*/
5962
store?: boolean
6063
}

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -928,10 +928,14 @@ describe("OpenAiNativeHandler", () => {
928928

929929
// Test with metadata that has previousResponseId
930930
// @ts-expect-error - private method
931-
const { formattedInput, previousResponseId } = gpt5Handler.prepareStructuredInput(systemPrompt, messages, {
932-
taskId: "task1",
933-
previousResponseId: "resp_123",
934-
})
931+
const { formattedInput, previousResponseId } = gpt5Handler.prepareResponsesApiInput(
932+
systemPrompt,
933+
messages,
934+
{
935+
taskId: "task1",
936+
previousResponseId: "resp_123",
937+
},
938+
)
935939

936940
expect(previousResponseId).toBe("resp_123")
937941
expect(formattedInput).toEqual([

src/api/providers/openai-native.ts

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
2828

2929
// GPT-5 specific types
3030

31+
// Constants for model identification
32+
const GPT5_MODEL_PREFIX = "gpt-5"
33+
3134
export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler {
3235
protected options: ApiHandlerOptions
3336
private client: OpenAI
@@ -155,7 +158,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
155158
}
156159

157160
// Format input and capture continuity id
158-
const { formattedInput, previousResponseId } = this.prepareStructuredInput(systemPrompt, messages, metadata)
161+
const { formattedInput, previousResponseId } = this.prepareResponsesApiInput(systemPrompt, messages, metadata)
159162
const requestPreviousResponseId = effectivePreviousResponseId || previousResponseId
160163

161164
// Create a new promise for this request's response ID
@@ -222,7 +225,9 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
222225
...(model.info.supportsTemperature !== false && {
223226
temperature:
224227
this.options.modelTemperature ??
225-
(model.id.startsWith("gpt-5") ? GPT5_DEFAULT_TEMPERATURE : OPENAI_NATIVE_DEFAULT_TEMPERATURE),
228+
(model.id.startsWith(GPT5_MODEL_PREFIX)
229+
? GPT5_DEFAULT_TEMPERATURE
230+
: OPENAI_NATIVE_DEFAULT_TEMPERATURE),
226231
}),
227232
// Explicitly include the calculated max output tokens.
228233
// Use the per-request reserved output computed by Roo (params.maxTokens from getModelParams).
@@ -261,7 +266,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
261266
if (is400Error && requestBody.previous_response_id && isPreviousResponseError) {
262267
// Log the error and retry without the previous_response_id
263268
console.warn(
264-
`[GPT-5] Previous response ID not found (${requestBody.previous_response_id}), retrying without it`,
269+
`[Responses API] Previous response ID not found (${requestBody.previous_response_id}), retrying without it`,
265270
)
266271

267272
// Remove the problematic previous_response_id and retry
@@ -301,8 +306,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
301306
}
302307
}
303308

304-
private formatStructuredInput(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): any {
305-
// Format the conversation for the Responses API using structured format
309+
private formatFullConversation(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): any {
310+
// Format the entire conversation history for the Responses API using structured format
306311
// This supports both text and images
307312
const formattedMessages: any[] = []
308313

@@ -526,7 +531,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
526531
}
527532

528533
/**
529-
* Prepares the structured input and conversation continuity parameters for a Responses API call.
534+
* Prepares the input and conversation continuity parameters for a Responses API call.
535+
* Decides whether to send full conversation or just the latest message based on previousResponseId.
530536
*
531537
* - If a `previousResponseId` is available (either from metadata or the handler's state),
532538
* it formats only the most recent user message for the input and returns the response ID
@@ -535,7 +541,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
535541
*
536542
* @returns An object containing the formatted input and the previous response ID (if used).
537543
*/
538-
private prepareStructuredInput(
544+
private prepareResponsesApiInput(
539545
systemPrompt: string,
540546
messages: Anthropic.Messages.MessageParam[],
541547
metadata?: ApiHandlerCreateMessageMetadata,
@@ -560,7 +566,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
560566
return { formattedInput: [], previousResponseId }
561567
} else {
562568
// Format full conversation history (returns an array of structured messages)
563-
const formattedInput = this.formatStructuredInput(systemPrompt, messages)
569+
const formattedInput = this.formatFullConversation(systemPrompt, messages)
564570
return { formattedInput }
565571
}
566572
}
@@ -1108,10 +1114,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11081114
return info.reasoningEffort as ReasoningEffortWithMinimal | undefined
11091115
}
11101116

1111-
private isResponsesApiModel(modelId: string): boolean {
1112-
// ALL models now use the Responses API
1113-
return true
1114-
}
1117+
// Removed isResponsesApiModel method as ALL models now use the Responses API
11151118

11161119
override getModel() {
11171120
const modelId = this.options.apiModelId
@@ -1126,18 +1129,18 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11261129
modelId: id,
11271130
model: info,
11281131
settings: this.options,
1129-
defaultTemperature: id.startsWith("gpt-5") ? GPT5_DEFAULT_TEMPERATURE : OPENAI_NATIVE_DEFAULT_TEMPERATURE,
1132+
defaultTemperature: id.startsWith(GPT5_MODEL_PREFIX)
1133+
? GPT5_DEFAULT_TEMPERATURE
1134+
: OPENAI_NATIVE_DEFAULT_TEMPERATURE,
11301135
})
11311136

1132-
// For models using the Responses API (GPT-5 and Codex Mini), ensure we support reasoning effort
1133-
if (this.isResponsesApiModel(id)) {
1134-
const effort =
1135-
(this.options.reasoningEffort as ReasoningEffortWithMinimal | undefined) ??
1136-
(info.reasoningEffort as ReasoningEffortWithMinimal | undefined)
1137+
// For models using the Responses API, ensure we support reasoning effort
1138+
const effort =
1139+
(this.options.reasoningEffort as ReasoningEffortWithMinimal | undefined) ??
1140+
(info.reasoningEffort as ReasoningEffortWithMinimal | undefined)
11371141

1138-
if (effort) {
1139-
;(params.reasoning as any) = { reasoning_effort: effort }
1140-
}
1142+
if (effort) {
1143+
;(params.reasoning as any) = { reasoning_effort: effort }
11411144
}
11421145

11431146
// The o3 models are named like "o3-mini-[reasoning-effort]", which are

src/core/task/Task.ts

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ import { getMessagesSinceLastSummary, summarizeConversation } from "../condense"
102102
import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
103103
import { restoreTodoListForTask } from "../tools/updateTodoListTool"
104104
import { AutoApprovalHandler } from "./AutoApprovalHandler"
105+
import { Gpt5Metadata, ClineMessageWithMetadata } from "./types"
105106

106107
const MAX_EXPONENTIAL_BACKOFF_SECONDS = 600 // 10 minutes
107108
const DEFAULT_USAGE_COLLECTION_TIMEOUT_MS = 5000 // 5 seconds
@@ -773,14 +774,9 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
773774
}
774775
}
775776

776-
console.log(
777-
`[Task#${this.taskId}] pWaitFor askResponse(${type}) -> blocking (isStatusMutable = ${isStatusMutable}, statusMutationTimeouts = ${statusMutationTimeouts.length})`,
778-
)
779-
777+
// Wait for askResponse to be set
780778
await pWaitFor(() => this.askResponse !== undefined || this.lastMessageTs !== askTs, { interval: 100 })
781779

782-
console.log(`[Task#${this.taskId}] pWaitFor askResponse(${type}) -> unblocked (${this.askResponse})`)
783-
784780
if (this.lastMessageTs !== askTs) {
785781
// Could happen if we send multiple asks in a row i.e. with
786782
// command_output. It's important that when we know an ask could
@@ -1086,7 +1082,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
10861082

10871083
let imageBlocks: Anthropic.ImageBlockParam[] = formatResponse.imageBlocks(images)
10881084

1089-
console.log(`[subtasks] task ${this.taskId}.${this.instanceId} starting`)
1085+
// Task starting
10901086

10911087
await this.initiateTaskLoop([
10921088
{
@@ -1121,7 +1117,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
11211117
}
11221118

11231119
private async resumeTaskFromHistory() {
1124-
console.log(`[Task#${this.taskId}] Resuming task from history`)
1120+
// Resuming task from history
11251121

11261122
if (this.enableTaskBridge) {
11271123
try {
@@ -1141,10 +1137,13 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
11411137

11421138
// Check for any stored GPT-5 response IDs in the message history
11431139
const gpt5Messages = modifiedClineMessages.filter(
1144-
(m) => m.type === "say" && (m as any).say === "text" && (m as any).metadata?.gpt5?.previous_response_id,
1140+
(m) =>
1141+
m.type === "say" &&
1142+
m.say === "text" &&
1143+
(m as ClineMessageWithMetadata).metadata?.gpt5?.previous_response_id,
11451144
)
11461145
if (gpt5Messages.length > 0) {
1147-
const lastGpt5Message = gpt5Messages[gpt5Messages.length - 1] as any
1146+
const lastGpt5Message = gpt5Messages[gpt5Messages.length - 1] as ClineMessage & ClineMessageWithMetadata
11481147
}
11491148

11501149
// Remove any resume messages that may have been added before
@@ -1376,13 +1375,13 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
13761375

13771376
await this.overwriteApiConversationHistory(modifiedApiConversationHistory)
13781377

1379-
console.log(`[subtasks] task ${this.taskId}.${this.instanceId} resuming from history item`)
1378+
// Task resuming from history item
13801379

13811380
await this.initiateTaskLoop(newUserContent)
13821381
}
13831382

13841383
public dispose(): void {
1385-
console.log(`[Task] disposing task ${this.taskId}.${this.instanceId}`)
1384+
// Disposing task
13861385

13871386
// Remove all event listeners to prevent memory leaks
13881387
try {
@@ -1452,7 +1451,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
14521451
}
14531452

14541453
public async abortTask(isAbandoned = false) {
1455-
console.log(`[subtasks] aborting task ${this.taskId}.${this.instanceId}`)
1454+
// Aborting task
14561455

14571456
// Will stop any autonomously running promises.
14581457
if (isAbandoned) {
@@ -2394,18 +2393,16 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
23942393
this.clineMessages,
23952394
(m) =>
23962395
m.type === "say" &&
2397-
(m as any).say === "text" &&
2398-
(m as any).metadata?.gpt5?.previous_response_id,
2396+
m.say === "text" &&
2397+
!!(m as ClineMessageWithMetadata).metadata?.gpt5?.previous_response_id,
23992398
)
24002399
if (idx !== -1) {
24012400
// Use the previous_response_id from the last assistant message for this request
2402-
previousResponseId = ((this.clineMessages[idx] as any).metadata.gpt5.previous_response_id ||
2403-
undefined) as string | undefined
2401+
const message = this.clineMessages[idx] as ClineMessage & ClineMessageWithMetadata
2402+
previousResponseId = message.metadata?.gpt5?.previous_response_id
24042403
}
24052404
} else if (this.skipPrevResponseIdOnce) {
2406-
console.log(
2407-
`[Task#${this.taskId}] Skipping previous_response_id due to recent condense operation - will send full conversation context`,
2408-
)
2405+
// Skipping previous_response_id due to recent condense operation - will send full conversation context
24092406
}
24102407
} catch (error) {
24112408
console.error(`[Task#${this.taskId}] Error retrieving GPT-5 response ID:`, error)
@@ -2581,17 +2578,20 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
25812578
const lastResponseId: string | undefined = (this.api as any)?.getLastResponseId?.()
25822579
const idx = findLastIndex(
25832580
this.clineMessages,
2584-
(m) => m.type === "say" && (m as any).say === "text" && m.partial !== true,
2581+
(m) => m.type === "say" && m.say === "text" && m.partial !== true,
25852582
)
25862583
if (idx !== -1) {
2587-
const msg = this.clineMessages[idx] as any
2588-
msg.metadata = msg.metadata ?? {}
2589-
msg.metadata.gpt5 = {
2584+
const msg = this.clineMessages[idx] as ClineMessage & ClineMessageWithMetadata
2585+
if (!msg.metadata) {
2586+
msg.metadata = {}
2587+
}
2588+
const gpt5Metadata: Gpt5Metadata = {
25902589
...(msg.metadata.gpt5 ?? {}),
25912590
previous_response_id: lastResponseId,
25922591
instructions: this.lastUsedInstructions,
25932592
reasoning_summary: (reasoningMessage ?? "").trim() || undefined,
25942593
}
2594+
msg.metadata.gpt5 = gpt5Metadata
25952595
}
25962596
} catch (error) {
25972597
console.error(`[Task#${this.taskId}] Error persisting GPT-5 metadata:`, error)

src/core/task/types.ts

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
/**
2+
* Type definitions for Task-related metadata
3+
*/
4+
5+
/**
6+
* GPT-5 specific metadata stored with assistant messages
7+
* for maintaining conversation continuity across requests
8+
*/
9+
export interface Gpt5Metadata {
10+
/**
11+
* The response ID from the previous GPT-5 API response
12+
* Used to maintain conversation continuity in subsequent requests
13+
*/
14+
previous_response_id?: string
15+
16+
/**
17+
* The system instructions/prompt used for this response
18+
* Stored to track what instructions were active when the response was generated
19+
*/
20+
instructions?: string
21+
22+
/**
23+
* The reasoning summary from GPT-5's reasoning process
24+
* Contains the model's internal reasoning if reasoning mode was enabled
25+
*/
26+
reasoning_summary?: string
27+
}
28+
29+
/**
30+
* Extended ClineMessage type with GPT-5 metadata
31+
*/
32+
export interface ClineMessageWithMetadata {
33+
metadata?: {
34+
gpt5?: Gpt5Metadata
35+
[key: string]: any
36+
}
37+
}

0 commit comments

Comments
 (0)