Skip to content

Commit c66b76e

Browse files
committed
fix: correct context token calculation to only include input tokens
- Fixed contextTokens calculation in getApiMetrics to only count tokensIn - Previously it was incorrectly summing tokensIn + tokensOut + cacheWrites + cacheReads - This was causing premature context condensing as the context size was inflated - Updated tests to reflect the correct behavior - Added proper type checking to handle non-numeric token values Fixes #6668
1 parent 4e8b174 commit c66b76e

File tree

2 files changed

+15
-18
lines changed

2 files changed

+15
-18
lines changed

src/shared/__tests__/getApiMetrics.spec.ts

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ describe("getApiMetrics", () => {
6161
expect(result.totalCacheWrites).toBe(5)
6262
expect(result.totalCacheReads).toBe(10)
6363
expect(result.totalCost).toBe(0.005)
64-
expect(result.contextTokens).toBe(300) // 100 + 200 (OpenAI default, no cache tokens)
64+
expect(result.contextTokens).toBe(100) // Only input tokens
6565
})
6666

6767
it("should calculate metrics from multiple api_req_started messages", () => {
@@ -83,7 +83,7 @@ describe("getApiMetrics", () => {
8383
expect(result.totalCacheWrites).toBe(8) // 5 + 3
8484
expect(result.totalCacheReads).toBe(17) // 10 + 7
8585
expect(result.totalCost).toBe(0.008) // 0.005 + 0.003
86-
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
86+
expect(result.contextTokens).toBe(50) // Only input tokens from last request
8787
})
8888

8989
it("should calculate metrics from condense_context messages", () => {
@@ -123,7 +123,7 @@ describe("getApiMetrics", () => {
123123
expect(result.totalCacheWrites).toBe(8) // 5 + 3
124124
expect(result.totalCacheReads).toBe(17) // 10 + 7
125125
expect(result.totalCost).toBe(0.01) // 0.005 + 0.002 + 0.003
126-
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
126+
expect(result.contextTokens).toBe(50) // Only input tokens from last request
127127
})
128128
})
129129

@@ -243,8 +243,8 @@ describe("getApiMetrics", () => {
243243
expect(result.totalCost).toBe(0.005)
244244

245245
// The implementation will use the last message that has any tokens
246-
// In this case, it's the message with tokensOut:200 (since the last few messages have no tokensIn/Out)
247-
expect(result.contextTokens).toBe(200) // 0 + 200 (from the tokensOut message)
246+
// In this case, the last message with tokensIn is the first one with tokensIn:100
247+
expect(result.contextTokens).toBe(100) // Only tokensIn from the first message
248248
})
249249

250250
it("should handle non-number values in api_req_started message", () => {
@@ -264,8 +264,8 @@ describe("getApiMetrics", () => {
264264
expect(result.totalCacheReads).toBeUndefined()
265265
expect(result.totalCost).toBe(0)
266266

267-
// The implementation concatenates all token values including cache tokens
268-
expect(result.contextTokens).toBe("not-a-numbernot-a-number") // tokensIn + tokensOut (OpenAI default)
267+
// The implementation should only use tokensIn
268+
expect(result.contextTokens).toBe(0) // tokensIn is "not-a-number" which evaluates to 0
269269
})
270270
})
271271

@@ -278,8 +278,8 @@ describe("getApiMetrics", () => {
278278

279279
const result = getApiMetrics(messages)
280280

281-
// Should use the values from the last api_req_started message
282-
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
281+
// Should use the tokensIn from the last api_req_started message
282+
expect(result.contextTokens).toBe(50) // Only tokensIn
283283
})
284284

285285
it("should calculate contextTokens from the last condense_context message", () => {
@@ -304,8 +304,8 @@ describe("getApiMetrics", () => {
304304

305305
const result = getApiMetrics(messages)
306306

307-
// Should use the values from the last api_req_started message
308-
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
307+
// Should use the tokensIn from the last api_req_started message
308+
expect(result.contextTokens).toBe(50) // Only tokensIn
309309
})
310310

311311
it("should handle missing values when calculating contextTokens", () => {
@@ -320,7 +320,7 @@ describe("getApiMetrics", () => {
320320
const result = getApiMetrics(messages)
321321

322322
// Should handle missing or invalid values
323-
expect(result.contextTokens).toBe(0) // 0 + 0 (OpenAI default, no cache tokens)
323+
expect(result.contextTokens).toBe(0) // tokensIn is null, defaults to 0
324324

325325
// Restore console.error
326326
console.error = originalConsoleError

src/shared/getApiMetrics.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,9 @@ export function getApiMetrics(messages: ClineMessage[]) {
7676
const { tokensIn, tokensOut, cacheWrites, cacheReads, apiProtocol } = parsedText
7777

7878
// Calculate context tokens based on API protocol
79-
if (apiProtocol === "anthropic") {
80-
result.contextTokens = (tokensIn || 0) + (tokensOut || 0) + (cacheWrites || 0) + (cacheReads || 0)
81-
} else {
82-
// For OpenAI (or when protocol is not specified)
83-
result.contextTokens = (tokensIn || 0) + (tokensOut || 0)
84-
}
79+
// Context tokens should only include input tokens (the actual context size)
80+
// Output tokens, cache writes, and cache reads are not part of the context
81+
result.contextTokens = typeof tokensIn === "number" ? tokensIn : 0
8582
} catch (error) {
8683
console.error("Error parsing JSON:", error)
8784
continue

0 commit comments

Comments
 (0)