@@ -61,7 +61,7 @@ describe("getApiMetrics", () => {
6161 expect ( result . totalCacheWrites ) . toBe ( 5 )
6262 expect ( result . totalCacheReads ) . toBe ( 10 )
6363 expect ( result . totalCost ) . toBe ( 0.005 )
64- expect ( result . contextTokens ) . toBe ( 315 ) // 100 + 200 + 5 + 10
64+ expect ( result . contextTokens ) . toBe ( 300 ) // 100 + 200 (OpenAI default, no cache tokens)
6565 } )
6666
6767 it ( "should calculate metrics from multiple api_req_started messages" , ( ) => {
@@ -83,7 +83,7 @@ describe("getApiMetrics", () => {
8383 expect ( result . totalCacheWrites ) . toBe ( 8 ) // 5 + 3
8484 expect ( result . totalCacheReads ) . toBe ( 17 ) // 10 + 7
8585 expect ( result . totalCost ) . toBe ( 0.008 ) // 0.005 + 0.003
86- expect ( result . contextTokens ) . toBe ( 210 ) // 50 + 150 + 3 + 7 (from the last message )
86+ expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens )
8787 } )
8888
8989 it ( "should calculate metrics from condense_context messages" , ( ) => {
@@ -123,7 +123,7 @@ describe("getApiMetrics", () => {
123123 expect ( result . totalCacheWrites ) . toBe ( 8 ) // 5 + 3
124124 expect ( result . totalCacheReads ) . toBe ( 17 ) // 10 + 7
125125 expect ( result . totalCost ) . toBe ( 0.01 ) // 0.005 + 0.002 + 0.003
126- expect ( result . contextTokens ) . toBe ( 210 ) // 50 + 150 + 3 + 7 (from the last api_req_started message )
126+ expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens )
127127 } )
128128 } )
129129
@@ -242,9 +242,9 @@ describe("getApiMetrics", () => {
242242 expect ( result . totalCacheReads ) . toBe ( 10 )
243243 expect ( result . totalCost ) . toBe ( 0.005 )
244244
245- // The implementation will use the last message with tokens for contextTokens
246- // In this case, it's the cacheReads message
247- expect ( result . contextTokens ) . toBe ( 10 )
245+ // The implementation will use the last message that has any tokens
246+ // In this case, it's the message with tokensOut:200 (since the last few messages have no tokensIn/Out)
247+ expect ( result . contextTokens ) . toBe ( 200 ) // 0 + 200 (from the tokensOut message )
248248 } )
249249
250250 it ( "should handle non-number values in api_req_started message" , ( ) => {
@@ -264,8 +264,8 @@ describe("getApiMetrics", () => {
264264 expect ( result . totalCacheReads ) . toBeUndefined ( )
265265 expect ( result . totalCost ) . toBe ( 0 )
266266
267- // The implementation concatenates string values for contextTokens
268- expect ( result . contextTokens ) . toBe ( "not-a-numbernot-a-numbernot-a-numbernot-a- number" )
267+ // The implementation concatenates all token values including cache tokens
268+ expect ( result . contextTokens ) . toBe ( "not-a-numbernot-a-number" ) // tokensIn + tokensOut (OpenAI default )
269269 } )
270270 } )
271271
@@ -279,7 +279,7 @@ describe("getApiMetrics", () => {
279279 const result = getApiMetrics ( messages )
280280
281281 // Should use the values from the last api_req_started message
282- expect ( result . contextTokens ) . toBe ( 210 ) // 50 + 150 + 3 + 7
282+ expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens)
283283 } )
284284
285285 it ( "should calculate contextTokens from the last condense_context message" , ( ) => {
@@ -305,7 +305,7 @@ describe("getApiMetrics", () => {
305305 const result = getApiMetrics ( messages )
306306
307307 // Should use the values from the last api_req_started message
308- expect ( result . contextTokens ) . toBe ( 210 ) // 50 + 150 + 3 + 7
308+ expect ( result . contextTokens ) . toBe ( 200 ) // 50 + 150 (OpenAI default, no cache tokens)
309309 } )
310310
311311 it ( "should handle missing values when calculating contextTokens" , ( ) => {
@@ -320,7 +320,7 @@ describe("getApiMetrics", () => {
320320 const result = getApiMetrics ( messages )
321321
322322 // Should handle missing or invalid values
323- expect ( result . contextTokens ) . toBe ( 15 ) // 0 + 0 + 5 + 10
323+ expect ( result . contextTokens ) . toBe ( 0 ) // 0 + 0 (OpenAI default, no cache tokens)
324324
325325 // Restore console.error
326326 console . error = originalConsoleError
0 commit comments