@@ -61,7 +61,7 @@ describe("getApiMetrics", () => {
6161 expect ( result . totalCacheWrites ) . toBe ( 5 )
6262 expect ( result . totalCacheReads ) . toBe ( 10 )
6363 expect ( result . totalCost ) . toBe ( 0.005 )
64- expect ( result . contextTokens ) . toBe ( 300 ) // 100 + 200 (OpenAI default, no cache tokens)
64+ expect ( result . contextTokens ) . toBe ( 300 ) // 100 + 200 (cumulative tokens)
6565 } )
6666
6767 it ( "should calculate metrics from multiple api_req_started messages" , ( ) => {
@@ -83,7 +83,7 @@ describe("getApiMetrics", () => {
8383 expect ( result . totalCacheWrites ) . toBe ( 8 ) // 5 + 3
8484 expect ( result . totalCacheReads ) . toBe ( 17 ) // 10 + 7
8585 expect ( result . totalCost ) . toBe ( 0.008 ) // 0.005 + 0.003
86- expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens)
86+ expect ( result . contextTokens ) . toBe ( 500 ) // (100 + 200) + (50 + 150) - cumulative
8787 } )
8888
8989 it ( "should calculate metrics from condense_context messages" , ( ) => {
@@ -123,7 +123,7 @@ describe("getApiMetrics", () => {
123123 expect ( result . totalCacheWrites ) . toBe ( 8 ) // 5 + 3
124124 expect ( result . totalCacheReads ) . toBe ( 17 ) // 10 + 7
125125 expect ( result . totalCost ) . toBe ( 0.01 ) // 0.005 + 0.002 + 0.003
126- expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens)
126+ expect ( result . contextTokens ) . toBe ( 700 ) // 500 (from condense) + 50 + 150
127127 } )
128128 } )
129129
@@ -242,9 +242,8 @@ describe("getApiMetrics", () => {
242242 expect ( result . totalCacheReads ) . toBe ( 10 )
243243 expect ( result . totalCost ) . toBe ( 0.005 )
244244
245- // The implementation will use the last message that has any tokens
246- // In this case, it's the message with tokensOut:200 (since the last few messages have no tokensIn/Out)
247- expect ( result . contextTokens ) . toBe ( 200 ) // 0 + 200 (from the tokensOut message)
245+ // The cumulative context should be the sum of all tokens
246+ expect ( result . contextTokens ) . toBe ( 300 ) // 100 + 0 + 0 + 0 + 200 (cumulative)
248247 } )
249248
250249 it ( "should handle non-number values in api_req_started message" , ( ) => {
@@ -264,48 +263,62 @@ describe("getApiMetrics", () => {
264263 expect ( result . totalCacheReads ) . toBeUndefined ( )
265264 expect ( result . totalCost ) . toBe ( 0 )
266265
267- // The implementation concatenates all token values including cache tokens
268- expect ( result . contextTokens ) . toBe ( "not-a-numbernot-a-number" ) // tokensIn + tokensOut (OpenAI default )
266+ // Non-number values should result in 0 context tokens
267+ expect ( result . contextTokens ) . toBe ( 0 )
269268 } )
270269 } )
271270
272271 describe ( "Context tokens calculation" , ( ) => {
273- it ( "should calculate contextTokens from the last api_req_started message " , ( ) => {
272+ it ( "should calculate cumulative contextTokens from all api_req_started messages " , ( ) => {
274273 const messages : ClineMessage [ ] = [
275274 createApiReqStartedMessage ( '{"tokensIn":100,"tokensOut":200,"cacheWrites":5,"cacheReads":10}' , 1000 ) ,
276275 createApiReqStartedMessage ( '{"tokensIn":50,"tokensOut":150,"cacheWrites":3,"cacheReads":7}' , 2000 ) ,
277276 ]
278277
279278 const result = getApiMetrics ( messages )
280279
281- // Should use the values from the last api_req_started message
282- expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens )
280+ // Should sum all tokens from all messages
281+ expect ( result . contextTokens ) . toBe ( 500 ) // (100 + 200) + (50 + 150 )
283282 } )
284283
285- it ( "should calculate contextTokens from the last condense_context message" , ( ) => {
284+ it ( "should reset contextTokens after condense_context message" , ( ) => {
286285 const messages : ClineMessage [ ] = [
287286 createApiReqStartedMessage ( '{"tokensIn":100,"tokensOut":200,"cacheWrites":5,"cacheReads":10}' , 1000 ) ,
288287 createCondenseContextMessage ( 0.002 , 500 , 1000 , 2000 ) ,
289288 ]
290289
291290 const result = getApiMetrics ( messages )
292291
293- // Should use newContextTokens from the last condense_context message
292+ // Should use newContextTokens from the condense_context message
294293 expect ( result . contextTokens ) . toBe ( 500 )
295294 } )
296295
297- it ( "should prioritize the last message for contextTokens calculation " , ( ) => {
296+ it ( "should accumulate tokens after condense_context " , ( ) => {
298297 const messages : ClineMessage [ ] = [
299- createCondenseContextMessage ( 0.002 , 500 , 1000 , 1000 ) ,
300- createApiReqStartedMessage ( '{"tokensIn":100,"tokensOut":200,"cacheWrites":5,"cacheReads":10}' , 2000 ) ,
301- createCondenseContextMessage ( 0.003 , 400 , 800 , 3000 ) ,
302- createApiReqStartedMessage ( '{"tokensIn":50,"tokensOut":150,"cacheWrites":3,"cacheReads":7}' , 4000 ) ,
298+ createApiReqStartedMessage ( '{"tokensIn":100,"tokensOut":200,"cacheWrites":5,"cacheReads":10}' , 1000 ) ,
299+ createCondenseContextMessage ( 0.002 , 500 , 1000 , 2000 ) ,
300+ createApiReqStartedMessage ( '{"tokensIn":50,"tokensOut":150,"cacheWrites":3,"cacheReads":7}' , 3000 ) ,
303301 ]
304302
305303 const result = getApiMetrics ( messages )
306304
307- // Should use the values from the last api_req_started message
308- expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens)
305+ // Should use condense tokens + new tokens after condense
306+ expect ( result . contextTokens ) . toBe ( 700 ) // 500 + (50 + 150)
307+ } )
308+
309+ it ( "should handle multiple condense_context messages correctly" , ( ) => {
310+ const messages : ClineMessage [ ] = [
311+ createApiReqStartedMessage ( '{"tokensIn":100,"tokensOut":200}' , 1000 ) ,
312+ createCondenseContextMessage ( 0.002 , 500 , 1000 , 2000 ) ,
313+ createApiReqStartedMessage ( '{"tokensIn":50,"tokensOut":150}' , 3000 ) ,
314+ createCondenseContextMessage ( 0.003 , 400 , 800 , 4000 ) ,
315+ createApiReqStartedMessage ( '{"tokensIn":25,"tokensOut":75}' , 5000 ) ,
316+ ]
317+
318+ const result = getApiMetrics ( messages )
319+
320+ // Should use the last condense tokens + tokens after it
321+ expect ( result . contextTokens ) . toBe ( 500 ) // 400 + (25 + 75)
309322 } )
310323
311324 it ( "should handle missing values when calculating contextTokens" , ( ) => {
@@ -320,7 +333,7 @@ describe("getApiMetrics", () => {
320333 const result = getApiMetrics ( messages )
321334
322335 // Should handle missing or invalid values
323- expect ( result . contextTokens ) . toBe ( 0 ) // 0 + 0 (OpenAI default, no cache tokens)
336+ expect ( result . contextTokens ) . toBe ( 0 )
324337
325338 // Restore console.error
326339 console . error = originalConsoleError
0 commit comments