Skip to content

Commit eb25c45

Browse files
committed
chore: remove unnecessary type cast
1 parent aecb6b2 commit eb25c45

File tree

3 files changed

+81
-54
lines changed

3 files changed

+81
-54
lines changed

src/api/providers/__tests__/openai.spec.ts

Lines changed: 48 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,20 @@ import OpenAI from "openai"
77
import { Package } from "../../../shared/package"
88
import axios from "axios"
99

10+
type ErrorWithStatus = Error & { status?: number }
11+
12+
function getMockCallsOf(fn: unknown): any[] {
13+
const isObj = (v: unknown): v is Record<string, unknown> => typeof v === "object" && v !== null
14+
if (isObj(fn) || typeof fn === "function") {
15+
const rec = fn as Record<string, unknown>
16+
const mock = rec["mock"]
17+
if (isObj(mock)) {
18+
const calls = mock["calls"]
19+
if (Array.isArray(calls)) return calls
20+
}
21+
}
22+
return []
23+
}
1024
const mockCreate = vitest.fn()
1125
const mockResponsesCreate = vitest.fn()
1226

@@ -424,9 +438,9 @@ describe("OpenAiHandler", () => {
424438
})
425439

426440
it("should handle rate limiting", async () => {
427-
const rateLimitError = new Error("Rate limit exceeded")
441+
const rateLimitError: ErrorWithStatus = new Error("Rate limit exceeded")
428442
rateLimitError.name = "Error"
429-
;(rateLimitError as any).status = 429
443+
rateLimitError.status = 429
430444
mockCreate.mockRejectedValueOnce(rateLimitError)
431445

432446
const stream = handler.createMessage("system prompt", testMessages)
@@ -1198,9 +1212,9 @@ describe("OpenAI Compatible - Responses API", () => {
11981212

11991213
it("Verbosity (Responses): include when set; if server rejects, retry without it (warn once)", async () => {
12001214
// First call throws 400 for 'verbosity', second succeeds
1201-
mockResponsesCreate.mockImplementationOnce((_opts: any) => {
1202-
const err = new Error("Unsupported parameter: 'verbosity'")
1203-
;(err as any).status = 400
1215+
mockResponsesCreate.mockImplementationOnce((_opts: unknown) => {
1216+
const err: ErrorWithStatus = new Error("Unsupported parameter: 'verbosity'")
1217+
err.status = 400
12041218
throw err
12051219
})
12061220

@@ -1295,10 +1309,13 @@ describe("OpenAI Compatible - Responses API", () => {
12951309

12961310
// Ensure SDK constructor was called with normalized baseURL and 'preview' apiVersion (per requirement)
12971311
// Note: AzureOpenAI and OpenAI share same mock constructor; inspect last call
1298-
const ctorCalls = vi.mocked(OpenAI as unknown as any).mock.calls as any[]
1299-
const lastCtorArgs = ctorCalls[ctorCalls.length - 1]?.[0] || {}
1300-
expect(lastCtorArgs.baseURL).toBe("https://sample-name.openai.azure.com/openai/v1")
1301-
expect(lastCtorArgs.apiVersion).toBe("preview")
1312+
const ctorCalls = getMockCallsOf(OpenAI)
1313+
const lastCall = ctorCalls[ctorCalls.length - 1]
1314+
const lastArg0 = Array.isArray(lastCall) ? lastCall[0] : undefined
1315+
const lastCtorArgs =
1316+
typeof lastArg0 === "object" && lastArg0 !== null ? (lastArg0 as Record<string, unknown>) : {}
1317+
expect(lastCtorArgs["baseURL"]).toBe("https://sample-name.openai.azure.com/openai/v1")
1318+
expect(lastCtorArgs["apiVersion"]).toBe("preview")
13021319
})
13031320

13041321
it("streams Responses API when provider returns AsyncIterable", async () => {
@@ -1461,7 +1478,7 @@ describe("OpenAI Compatible - Responses API (multimodal)", () => {
14611478
{
14621479
type: "image" as const,
14631480
// Minimal Anthropic-style inline image (base64) block
1464-
source: { media_type: "image/png", data: "BASE64DATA" } as any,
1481+
source: { type: "base64" as const, media_type: "image/png", data: "BASE64DATA" },
14651482
},
14661483
],
14671484
},
@@ -1478,7 +1495,7 @@ describe("OpenAI Compatible - Responses API (multimodal)", () => {
14781495

14791496
// Input should be an array (structured input mode)
14801497
expect(Array.isArray(args.input)).toBe(true)
1481-
const arr = args.input as any[]
1498+
const arr = Array.isArray(args.input) ? args.input : []
14821499

14831500
// First element should be Developer preface as input_text
14841501
expect(arr[0]?.role).toBe("user")
@@ -1537,7 +1554,7 @@ describe("OpenAI Compatible - Responses API (multimodal)", () => {
15371554
{ type: "text" as const, text: "Look at this" },
15381555
{
15391556
type: "image" as const,
1540-
source: { media_type: "image/jpeg", data: "IMGDATA" } as any,
1557+
source: { type: "base64" as const, media_type: "image/jpeg", data: "IMGDATA" },
15411558
},
15421559
],
15431560
},
@@ -1648,7 +1665,7 @@ describe("OpenAI Compatible - Responses API conversation continuity", () => {
16481665
for await (const _ of handler.createMessage(
16491666
"sys",
16501667
[{ role: "user", content: [{ type: "text" as const, text: "Turn 2" }] }],
1651-
{ suppressPreviousResponseId: true } as any,
1668+
{ taskId: "test", suppressPreviousResponseId: true },
16521669
)) {
16531670
}
16541671

@@ -1668,9 +1685,9 @@ describe("OpenAI Compatible - Responses API parity improvements", () => {
16681685
it("retries without previous_response_id when server returns 400 'Previous response ... not found' (non-streaming)", async () => {
16691686
// First call throws 400 for previous_response_id, second succeeds
16701687
mockResponsesCreate
1671-
.mockImplementationOnce((_opts: any) => {
1672-
const err = new Error("Previous response rid-bad not found")
1673-
;(err as any).status = 400
1688+
.mockImplementationOnce((_opts: unknown) => {
1689+
const err: ErrorWithStatus = new Error("Previous response rid-bad not found")
1690+
err.status = 400
16741691
throw err
16751692
})
16761693
.mockImplementationOnce(async (_opts: any) => {
@@ -1688,7 +1705,7 @@ describe("OpenAI Compatible - Responses API parity improvements", () => {
16881705
for await (const ch of h.createMessage(
16891706
"sys",
16901707
[{ role: "user", content: [{ type: "text" as const, text: "Turn" }] }],
1691-
{ previousResponseId: "rid-bad" } as any,
1708+
{ taskId: "test", previousResponseId: "rid-bad" },
16921709
)) {
16931710
chunks.push(ch)
16941711
}
@@ -1709,9 +1726,9 @@ describe("OpenAI Compatible - Responses API parity improvements", () => {
17091726
it("retries without previous_response_id when server returns 400 (streaming)", async () => {
17101727
// First call throws, second returns a stream
17111728
mockResponsesCreate
1712-
.mockImplementationOnce((_opts: any) => {
1713-
const err = new Error("Previous response not found")
1714-
;(err as any).status = 400
1729+
.mockImplementationOnce((_opts: unknown) => {
1730+
const err: ErrorWithStatus = new Error("Previous response not found")
1731+
err.status = 400
17151732
throw err
17161733
})
17171734
.mockImplementationOnce(async (_opts: any) => {
@@ -1734,7 +1751,7 @@ describe("OpenAI Compatible - Responses API parity improvements", () => {
17341751
for await (const ch of h.createMessage(
17351752
"sys",
17361753
[{ role: "user", content: [{ type: "text" as const, text: "Hi" }] }],
1737-
{ previousResponseId: "bad-id" } as any,
1754+
{ taskId: "test", previousResponseId: "bad-id" },
17381755
)) {
17391756
out.push(ch)
17401757
}
@@ -1884,7 +1901,10 @@ describe("OpenAI Compatible - Responses API minimal input parity (new tests)", (
18841901
]
18851902

18861903
const chunks: any[] = []
1887-
for await (const ch of handler.createMessage("System Inst", msgs, { previousResponseId: "prev-1" } as any)) {
1904+
for await (const ch of handler.createMessage("System Inst", msgs, {
1905+
taskId: "test",
1906+
previousResponseId: "prev-1",
1907+
})) {
18881908
chunks.push(ch)
18891909
}
18901910

@@ -1914,12 +1934,15 @@ describe("OpenAI Compatible - Responses API minimal input parity (new tests)", (
19141934
role: "user",
19151935
content: [
19161936
{ type: "text" as const, text: "See" },
1917-
{ type: "image" as const, source: { media_type: "image/png", data: "IMGDATA" } as any },
1937+
{
1938+
type: "image" as const,
1939+
source: { type: "base64" as const, media_type: "image/png", data: "IMGDATA" },
1940+
},
19181941
],
19191942
},
19201943
]
19211944

1922-
const iter = handler.createMessage("Sys", msgs, { previousResponseId: "prev-2" } as any)
1945+
const iter = handler.createMessage("Sys", msgs, { taskId: "test", previousResponseId: "prev-2" })
19231946
for await (const _ of iter) {
19241947
// consume
19251948
}
@@ -1928,7 +1951,7 @@ describe("OpenAI Compatible - Responses API minimal input parity (new tests)", (
19281951
const args = mockResponsesCreate.mock.calls.pop()?.[0]
19291952
expect(Array.isArray(args.input)).toBe(true)
19301953

1931-
const arr = args.input as any[]
1954+
const arr = Array.isArray(args.input) ? args.input : []
19321955
expect(arr.length).toBe(1)
19331956
expect(arr[0]?.role).toBe("user")
19341957

src/api/providers/openai.ts

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import { DEFAULT_HEADERS } from "./constants"
2525
import { BaseProvider } from "./base-provider"
2626
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
2727
import { getApiRequestTimeout } from "./utils/timeout-config"
28-
import { ResponseCreateParamsNonStreaming } from "openai/resources/responses/responses"
2928

3029
// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
3130
// `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -151,19 +150,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
151150
metadata?: ApiHandlerCreateMessageMetadata,
152151
): ApiStream {
153152
// Gather model params (centralized: temperature, max tokens, reasoning, verbosity)
154-
const modelParams = this.getModel()
155-
const {
156-
info: modelInfo,
157-
reasoning,
158-
reasoningEffort,
159-
verbosity,
160-
} = modelParams as unknown as {
161-
id: string
162-
info: ModelInfo
163-
reasoning?: { reasoning_effort?: "low" | "medium" | "high" }
164-
reasoningEffort?: "minimal" | "low" | "medium" | "high"
165-
verbosity?: "low" | "medium" | "high"
166-
}
153+
const { info: modelInfo } = this.getModel()
154+
const openAiParams = getModelParams({
155+
format: "openai",
156+
modelId: this.options.openAiModelId ?? "",
157+
model: modelInfo,
158+
settings: this.options,
159+
})
160+
const { reasoning, reasoningEffort, verbosity } = openAiParams
167161

168162
const modelUrl = this.options.openAiBaseUrl ?? ""
169163
const modelId = this.options.openAiModelId ?? ""
@@ -280,7 +274,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
280274
}
281275

282276
// Always include max_output_tokens for Responses API to cap output length
283-
const reservedMax = (modelParams as any)?.maxTokens
277+
const reservedMax = openAiParams.maxTokens
284278
;(basePayload as Record<string, unknown>).max_output_tokens =
285279
this.options.modelMaxTokens || reservedMax || modelInfo.maxTokens
286280

@@ -293,7 +287,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
293287
systemPrompt,
294288
messages,
295289
})
296-
yield* this._yieldResponsesResult(response as unknown, modelInfo)
290+
yield* this._yieldResponsesResult(response, modelInfo)
297291
return
298292
}
299293

@@ -320,7 +314,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
320314
}
321315
} else {
322316
// Some providers may ignore the stream flag and return a complete response
323-
yield* this._yieldResponsesResult(maybeStream as unknown, modelInfo)
317+
yield* this._yieldResponsesResult(maybeStream, modelInfo)
324318
}
325319
return
326320
}
@@ -521,7 +515,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
521515
} as Anthropic.Messages.MessageParam,
522516
/*includeRole*/ true,
523517
)
524-
const payload: ResponseCreateParamsNonStreaming = {
518+
const payload: Record<string, unknown> = {
525519
model: model.id,
526520
input: formattedInput,
527521
}
@@ -552,7 +546,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
552546
payload.max_output_tokens = this.options.modelMaxTokens || modelInfo.maxTokens
553547
}
554548

555-
const response = await this._responsesCreateWithRetries(payload as unknown as Record<string, unknown>, {
549+
const response = await this._responsesCreateWithRetries(payload, {
556550
usedArrayInput: false,
557551
lastUserMessage: undefined,
558552
previousId: undefined,
@@ -976,10 +970,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
976970
messages: Anthropic.Messages.MessageParam[]
977971
},
978972
): Promise<unknown> {
979-
const create = (body: Record<string, unknown>) =>
980-
(
981-
this.client as unknown as { responses: { create: (b: Record<string, unknown>) => Promise<unknown> } }
982-
).responses.create(body)
973+
const create = (body: Record<string, unknown>) => {
974+
const hasResponsesCreate = (
975+
obj: unknown,
976+
): obj is { responses: { create: (b: Record<string, unknown>) => Promise<unknown> } } => {
977+
if (obj == null || typeof obj !== "object") return false
978+
const responses = (obj as Record<string, unknown>).responses
979+
if (responses == null || typeof responses !== "object") return false
980+
return typeof (responses as Record<string, unknown>).create === "function"
981+
}
982+
if (!hasResponsesCreate(this.client)) {
983+
throw new Error("Responses API not available on client")
984+
}
985+
return this.client.responses.create(body)
986+
}
983987

984988
try {
985989
return await create(payload)

src/api/transform/responses-stream.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ export async function* handleResponsesStream(
1818
for await (const event of stream) {
1919
// Surface response.id to callers when available (for conversation continuity)
2020
if (isObject(event)) {
21-
const resp = (event as Record<string, unknown>).response as unknown
21+
const resp = (event as Record<string, unknown>).response
2222
if (isObject(resp)) {
2323
const rid = (resp as Record<string, unknown>).id
2424
if (typeof rid === "string") {
@@ -224,11 +224,11 @@ function isDoneEvent(event: unknown): event is DoneEvent {
224224

225225
function getChoiceDeltaContent(event: unknown): string | undefined {
226226
if (!isObject(event)) return undefined
227-
const choices = (event as Record<string, unknown>).choices as unknown
227+
const choices = (event as Record<string, unknown>).choices
228228
if (!Array.isArray(choices) || choices.length === 0) return undefined
229-
const first = choices[0] as unknown
229+
const first = choices[0]
230230
if (!isObject(first)) return undefined
231-
const delta = (first as Record<string, unknown>).delta as unknown
231+
const delta = (first as Record<string, unknown>).delta
232232
if (!isObject(delta)) return undefined
233233
const content = (delta as Record<string, unknown>).content
234234
if (content == null) return undefined
@@ -237,11 +237,11 @@ function getChoiceDeltaContent(event: unknown): string | undefined {
237237

238238
function extractUsage(event: unknown): ResponseUsage | undefined {
239239
if (!isObject(event)) return undefined
240-
const resp = (event as Record<string, unknown>).response as unknown
240+
const resp = (event as Record<string, unknown>).response
241241
if (isObject(resp) && isObject((resp as Record<string, unknown>).usage)) {
242242
return (resp as Record<string, unknown>).usage as ResponseUsage
243243
}
244-
const usage = (event as Record<string, unknown>).usage as unknown
244+
const usage = (event as Record<string, unknown>).usage
245245
if (isObject(usage)) {
246246
return usage as ResponseUsage
247247
}

0 commit comments

Comments
 (0)