Skip to content

Commit 62c9d5d

Browse files
committed
test: update getApiMetrics tests for inverted API protocol logic
1 parent 527f36a commit 62c9d5d

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

src/shared/__tests__/getApiMetrics.spec.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ describe("getApiMetrics", () => {
6161
expect(result.totalCacheWrites).toBe(5)
6262
expect(result.totalCacheReads).toBe(10)
6363
expect(result.totalCost).toBe(0.005)
64-
expect(result.contextTokens).toBe(315) // 100 + 200 + 5 + 10 (includes cache tokens)
64+
expect(result.contextTokens).toBe(300) // 100 + 200 (OpenAI default, no cache tokens)
6565
})
6666

6767
it("should calculate metrics from multiple api_req_started messages", () => {
@@ -83,7 +83,7 @@ describe("getApiMetrics", () => {
8383
expect(result.totalCacheWrites).toBe(8) // 5 + 3
8484
expect(result.totalCacheReads).toBe(17) // 10 + 7
8585
expect(result.totalCost).toBe(0.008) // 0.005 + 0.003
86-
expect(result.contextTokens).toBe(210) // 50 + 150 + 3 + 7 (from the last message, includes cache tokens)
86+
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
8787
})
8888

8989
it("should calculate metrics from condense_context messages", () => {
@@ -123,7 +123,7 @@ describe("getApiMetrics", () => {
123123
expect(result.totalCacheWrites).toBe(8) // 5 + 3
124124
expect(result.totalCacheReads).toBe(17) // 10 + 7
125125
expect(result.totalCost).toBe(0.01) // 0.005 + 0.002 + 0.003
126-
expect(result.contextTokens).toBe(210) // 50 + 150 + 3 + 7 (from the last api_req_started message, includes cache tokens)
126+
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
127127
})
128128
})
129129

@@ -243,8 +243,8 @@ describe("getApiMetrics", () => {
243243
expect(result.totalCost).toBe(0.005)
244244

245245
// The implementation will use the last message that has any tokens
246-
// In this case, it's the cacheReads message (10)
247-
expect(result.contextTokens).toBe(10) // Only cacheReads from the last message
246+
// In this case, it's the message with tokensOut:200 (since the last few messages have no tokensIn/Out)
247+
expect(result.contextTokens).toBe(200) // 0 + 200 (from the tokensOut message)
248248
})
249249

250250
it("should handle non-number values in api_req_started message", () => {
@@ -265,7 +265,7 @@ describe("getApiMetrics", () => {
265265
expect(result.totalCost).toBe(0)
266266

267267
// The implementation concatenates all token values including cache tokens
268-
expect(result.contextTokens).toBe("not-a-numbernot-a-numbernot-a-numbernot-a-number") // tokensIn + tokensOut + cacheWrites + cacheReads
268+
expect(result.contextTokens).toBe("not-a-numbernot-a-number") // tokensIn + tokensOut (OpenAI default)
269269
})
270270
})
271271

@@ -279,7 +279,7 @@ describe("getApiMetrics", () => {
279279
const result = getApiMetrics(messages)
280280

281281
// Should use the values from the last api_req_started message
282-
expect(result.contextTokens).toBe(210) // 50 + 150 + 3 + 7 (includes cache tokens)
282+
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
283283
})
284284

285285
it("should calculate contextTokens from the last condense_context message", () => {
@@ -305,7 +305,7 @@ describe("getApiMetrics", () => {
305305
const result = getApiMetrics(messages)
306306

307307
// Should use the values from the last api_req_started message
308-
expect(result.contextTokens).toBe(210) // 50 + 150 + 3 + 7 (includes cache tokens)
308+
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
309309
})
310310

311311
it("should handle missing values when calculating contextTokens", () => {
@@ -320,7 +320,7 @@ describe("getApiMetrics", () => {
320320
const result = getApiMetrics(messages)
321321

322322
// Should handle missing or invalid values
323-
expect(result.contextTokens).toBe(15) // 0 + 0 + 5 + 10 (only cache tokens)
323+
expect(result.contextTokens).toBe(0) // 0 + 0 (OpenAI default, no cache tokens)
324324

325325
// Restore console.error
326326
console.error = originalConsoleError

0 commit comments

Comments
 (0)