Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions src/shared/__tests__/getApiMetrics.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ describe("getApiMetrics", () => {
expect(result.totalCacheWrites).toBe(5)
expect(result.totalCacheReads).toBe(10)
expect(result.totalCost).toBe(0.005)
expect(result.contextTokens).toBe(300) // 100 + 200 (OpenAI default, no cache tokens)
expect(result.contextTokens).toBe(100) // Only input tokens
})

it("should calculate metrics from multiple api_req_started messages", () => {
Expand All @@ -83,7 +83,7 @@ describe("getApiMetrics", () => {
expect(result.totalCacheWrites).toBe(8) // 5 + 3
expect(result.totalCacheReads).toBe(17) // 10 + 7
expect(result.totalCost).toBe(0.008) // 0.005 + 0.003
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
expect(result.contextTokens).toBe(50) // Only input tokens from last request
})

it("should calculate metrics from condense_context messages", () => {
Expand Down Expand Up @@ -123,7 +123,7 @@ describe("getApiMetrics", () => {
expect(result.totalCacheWrites).toBe(8) // 5 + 3
expect(result.totalCacheReads).toBe(17) // 10 + 7
expect(result.totalCost).toBe(0.01) // 0.005 + 0.002 + 0.003
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
expect(result.contextTokens).toBe(50) // Only input tokens from last request
})
})

Expand Down Expand Up @@ -243,8 +243,8 @@ describe("getApiMetrics", () => {
expect(result.totalCost).toBe(0.005)

// The implementation will use the last message that has any tokens
// In this case, it's the message with tokensOut:200 (since the last few messages have no tokensIn/Out)
expect(result.contextTokens).toBe(200) // 0 + 200 (from the tokensOut message)
// In this case, the last message with tokensIn is the first one with tokensIn:100
expect(result.contextTokens).toBe(100) // Only tokensIn from the first message
})

it("should handle non-number values in api_req_started message", () => {
Expand All @@ -264,8 +264,8 @@ describe("getApiMetrics", () => {
expect(result.totalCacheReads).toBeUndefined()
expect(result.totalCost).toBe(0)

// The implementation concatenates all token values including cache tokens
expect(result.contextTokens).toBe("not-a-numbernot-a-number") // tokensIn + tokensOut (OpenAI default)
// The implementation should only use tokensIn
expect(result.contextTokens).toBe(0) // tokensIn is "not-a-number" which evaluates to 0
})
})

Expand All @@ -278,8 +278,8 @@ describe("getApiMetrics", () => {

const result = getApiMetrics(messages)

// Should use the values from the last api_req_started message
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
// Should use the tokensIn from the last api_req_started message
expect(result.contextTokens).toBe(50) // Only tokensIn
})

it("should calculate contextTokens from the last condense_context message", () => {
Expand All @@ -304,8 +304,8 @@ describe("getApiMetrics", () => {

const result = getApiMetrics(messages)

// Should use the values from the last api_req_started message
expect(result.contextTokens).toBe(200) // 50 + 150 (OpenAI default, no cache tokens)
// Should use the tokensIn from the last api_req_started message
expect(result.contextTokens).toBe(50) // Only tokensIn
})

it("should handle missing values when calculating contextTokens", () => {
Expand All @@ -320,7 +320,7 @@ describe("getApiMetrics", () => {
const result = getApiMetrics(messages)

// Should handle missing or invalid values
expect(result.contextTokens).toBe(0) // 0 + 0 (OpenAI default, no cache tokens)
expect(result.contextTokens).toBe(0) // tokensIn is null, defaults to 0

// Restore console.error
console.error = originalConsoleError
Expand Down
9 changes: 3 additions & 6 deletions src/shared/getApiMetrics.ts
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,9 @@ export function getApiMetrics(messages: ClineMessage[]) {
const { tokensIn, tokensOut, cacheWrites, cacheReads, apiProtocol } = parsedText

// Calculate context tokens based on API protocol
if (apiProtocol === "anthropic") {
result.contextTokens = (tokensIn || 0) + (tokensOut || 0) + (cacheWrites || 0) + (cacheReads || 0)
} else {
// For OpenAI (or when protocol is not specified)
result.contextTokens = (tokensIn || 0) + (tokensOut || 0)
}
// Context tokens should only include input tokens (the actual context size)
// Output tokens, cache writes, and cache reads are not part of the context
result.contextTokens = typeof tokensIn === "number" ? tokensIn : 0
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice fix! I notice you already added a comment explaining why only tokensIn is used. That's exactly what I would have suggested - it makes the intent crystal clear for future maintainers. The type checking for non-numeric values is also a good defensive programming practice.

} catch (error) {
console.error("Error parsing JSON:", error)
continue
Expand Down
Loading