Skip to content

Commit 3304689

Browse files
committed
refactor: replace type casting with proper TypeScript interfaces for Ark caching
- Replace `(requestOptions as any)` with proper TypeScript interfaces - Add ArkChatCompletionCreateParamsStreaming and ArkChatCompletionCreateParamsNonStreaming interfaces - Update OpenAiHandler to use the new typed interfaces - Update all test files to use proper types - Maintain full backward compatibility and functionality - All tests pass (38 test cases) and TypeScript compilation succeeds Addresses feedback from @Artoria2e5 about using proper OpenAI.Responses interfaces instead of type casting to any.
1 parent 580e8d7 commit 3304689

File tree

3 files changed

+66
-24
lines changed

3 files changed

+66
-24
lines changed

src/api/providers/openai.ts

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,13 @@ import { convertToR1Format } from "../transform/r1-format"
1919
import { convertToSimpleMessages } from "../transform/simple-format"
2020
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
2121
import { getModelParams } from "../transform/model-params"
22-
import { addArkCaching, extractArkResponseId, getArkCachedTokens } from "../transform/caching/ark"
22+
import {
23+
addArkCaching,
24+
extractArkResponseId,
25+
getArkCachedTokens,
26+
ArkChatCompletionCreateParamsStreaming,
27+
ArkChatCompletionCreateParamsNonStreaming,
28+
} from "../transform/caching/ark"
2329

2430
import { DEFAULT_HEADERS } from "./constants"
2531
import { BaseProvider } from "./base-provider"
@@ -151,7 +157,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
151157

152158
const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl)
153159

154-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
160+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
155161
model: modelId,
156162
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
157163
messages: convertedMessages,
@@ -232,7 +238,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
232238
content: systemPrompt,
233239
}
234240

235-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
241+
const requestOptions: ArkChatCompletionCreateParamsNonStreaming = {
236242
model: modelId,
237243
messages: deepseekReasoner
238244
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
@@ -304,7 +310,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
304310
const model = this.getModel()
305311
const modelInfo = model.info
306312

307-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
313+
const requestOptions: ArkChatCompletionCreateParamsNonStreaming = {
308314
model: model.id,
309315
messages: [{ role: "user", content: prompt }],
310316
}
@@ -340,7 +346,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
340346
if (this.options.openAiStreamingEnabled ?? true) {
341347
const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl)
342348

343-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
349+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
344350
model: modelId,
345351
messages: [
346352
{
@@ -375,7 +381,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
375381

376382
yield* this.handleStreamResponse(stream, ark)
377383
} else {
378-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
384+
const requestOptions: ArkChatCompletionCreateParamsNonStreaming = {
379385
model: modelId,
380386
messages: [
381387
{
@@ -482,9 +488,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
482488
* O3 family models handle max_tokens separately in handleO3FamilyMessage
483489
*/
484490
private addMaxTokensIfNeeded(
485-
requestOptions:
486-
| OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming
487-
| OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming,
491+
requestOptions: ArkChatCompletionCreateParamsStreaming | ArkChatCompletionCreateParamsNonStreaming,
488492
modelInfo: ModelInfo,
489493
): void {
490494
// Only add max_completion_tokens if includeMaxTokens is true

src/api/transform/caching/__tests__/ark.spec.ts

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,19 @@
11
// npx vitest run api/transform/caching/__tests__/ark.spec.ts
22

3-
import { addArkCaching, extractArkResponseId, getArkCachedTokens, hasArkCachedTokens } from "../ark"
3+
import {
4+
addArkCaching,
5+
extractArkResponseId,
6+
getArkCachedTokens,
7+
hasArkCachedTokens,
8+
ArkChatCompletionCreateParamsStreaming,
9+
ArkChatCompletionCreateParamsNonStreaming,
10+
} from "../ark"
411
import OpenAI from "openai"
512

613
describe("Ark Context Caching", () => {
714
describe("addArkCaching", () => {
815
it("should add basic caching configuration", () => {
9-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
16+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
1017
model: "doubao-pro-4k",
1118
messages: [
1219
{ role: "system", content: "You are a helpful assistant." },
@@ -28,7 +35,7 @@ describe("Ark Context Caching", () => {
2835
})
2936

3037
it("should add previous response ID when provided", () => {
31-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
38+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
3239
model: "doubao-pro-4k",
3340
messages: [{ role: "user", content: "Follow up question" }],
3441
stream: true,
@@ -47,7 +54,7 @@ describe("Ark Context Caching", () => {
4754
})
4855

4956
it("should add cache TTL when provided", () => {
50-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
57+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
5158
model: "doubao-pro-4k",
5259
messages: [{ role: "user", content: "Hello!" }],
5360
stream: true,
@@ -66,7 +73,7 @@ describe("Ark Context Caching", () => {
6673
})
6774

6875
it("should add both previous response ID and cache TTL", () => {
69-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
76+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
7077
model: "doubao-pro-4k",
7178
messages: [{ role: "user", content: "Hello!" }],
7279
stream: true,
@@ -87,7 +94,7 @@ describe("Ark Context Caching", () => {
8794
})
8895

8996
it("should work with non-streaming requests", () => {
90-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
97+
const requestOptions: ArkChatCompletionCreateParamsNonStreaming = {
9198
model: "doubao-pro-4k",
9299
messages: [{ role: "user", content: "Hello!" }],
93100
}
@@ -107,7 +114,7 @@ describe("Ark Context Caching", () => {
107114
})
108115

109116
it("should not add optional fields when not provided", () => {
110-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
117+
const requestOptions: ArkChatCompletionCreateParamsStreaming = {
111118
model: "doubao-pro-4k",
112119
messages: [{ role: "user", content: "Hello!" }],
113120
stream: true,
@@ -323,7 +330,7 @@ describe("Ark Context Caching", () => {
323330
describe("integration scenarios", () => {
324331
it("should handle complete caching workflow", () => {
325332
// First request - no previous response ID
326-
const firstRequest: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
333+
const firstRequest: ArkChatCompletionCreateParamsStreaming = {
327334
model: "doubao-pro-4k",
328335
messages: [{ role: "user", content: "Hello!" }],
329336
stream: true,
@@ -364,7 +371,7 @@ describe("Ark Context Caching", () => {
364371
expect(hasArkCachedTokens(firstResponse.usage)).toBe(false)
365372

366373
// Second request - with previous response ID
367-
const secondRequest: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
374+
const secondRequest: ArkChatCompletionCreateParamsStreaming = {
368375
model: "doubao-pro-4k",
369376
messages: [
370377
{ role: "user", content: "Hello!" },

src/api/transform/caching/ark.ts

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,31 +19,62 @@ export interface ArkCacheOptions {
1919
cacheTtl?: number
2020
}
2121

22+
/**
23+
* Ark-specific caching configuration
24+
*/
25+
export interface ArkCachingConfig {
26+
type: "enabled"
27+
}
28+
29+
/**
30+
* Extended OpenAI request parameters with Ark-specific caching support
31+
*/
32+
export interface ArkChatCompletionCreateParamsStreaming
33+
extends OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming {
34+
/** Ark caching configuration */
35+
caching?: ArkCachingConfig
36+
/** Previous response ID for context continuation */
37+
previous_response_id?: string
38+
/** Cache TTL in seconds */
39+
cache_ttl?: number
40+
}
41+
42+
/**
43+
* Extended OpenAI request parameters with Ark-specific caching support (non-streaming)
44+
*/
45+
export interface ArkChatCompletionCreateParamsNonStreaming
46+
extends OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
47+
/** Ark caching configuration */
48+
caching?: ArkCachingConfig
49+
/** Previous response ID for context continuation */
50+
previous_response_id?: string
51+
/** Cache TTL in seconds */
52+
cache_ttl?: number
53+
}
54+
2255
/**
2356
* Add context caching support for Ark/Volcengine using the Responses API
2457
*
2558
* @param requestOptions - The OpenAI request options to modify
2659
* @param cacheOptions - Ark-specific caching options
2760
*/
2861
export function addArkCaching(
29-
requestOptions:
30-
| OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming
31-
| OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming,
62+
requestOptions: ArkChatCompletionCreateParamsStreaming | ArkChatCompletionCreateParamsNonStreaming,
3263
cacheOptions: ArkCacheOptions = {},
3364
): void {
3465
// Enable caching for this request
35-
;(requestOptions as any).caching = {
66+
requestOptions.caching = {
3667
type: "enabled",
3768
}
3869

3970
// If we have a previous response ID, reference it for context continuation
4071
if (cacheOptions.previousResponseId) {
41-
;(requestOptions as any).previous_response_id = cacheOptions.previousResponseId
72+
requestOptions.previous_response_id = cacheOptions.previousResponseId
4273
}
4374

4475
// Set cache TTL (default to 1 hour as recommended in the issue)
4576
if (cacheOptions.cacheTtl) {
46-
;(requestOptions as any).cache_ttl = cacheOptions.cacheTtl
77+
requestOptions.cache_ttl = cacheOptions.cacheTtl
4778
}
4879
}
4980

0 commit comments

Comments
 (0)