File tree Expand file tree Collapse file tree 2 files changed +10
-3
lines changed
Expand file tree Collapse file tree 2 files changed +10
-3
lines changed Original file line number Diff line number Diff line change @@ -751,11 +751,16 @@ def _parse_usage_model(
751751 "reasoning" : "reasoning_tokens" ,
752752 }
753753 normalized_usage = ModelUsage (
754- ** {dataclass_key : parsed_usage .get (mapped_key ) or 0 for mapped_key , dataclass_key in field_mapping .items ()},
754+ ** {
755+ dataclass_key : parsed_usage .get (mapped_key ) or 0
756+ for mapped_key , dataclass_key in field_mapping .items ()
757+ },
755758 )
756759 # input_tokens is the sum of input and cache read tokens.
757760 if normalized_usage .input_tokens and normalized_usage .cache_read_tokens :
758- normalized_usage .input_tokens = max (normalized_usage .input_tokens - normalized_usage .cache_read_tokens , 0 )
761+ normalized_usage .input_tokens = max (
762+ normalized_usage .input_tokens - normalized_usage .cache_read_tokens , 0
763+ )
759764 return normalized_usage
760765
761766
Original file line number Diff line number Diff line change @@ -1969,7 +1969,9 @@ def test_no_cache_read_tokens_no_subtraction(mock_client):
19691969
19701970 This tests the conditional check before the subtraction in callbacks.py line 757.
19711971 """
1972- prompt = ChatPromptTemplate .from_messages ([("user" , "Normal request without cache" )])
1972+ prompt = ChatPromptTemplate .from_messages (
1973+ [("user" , "Normal request without cache" )]
1974+ )
19731975
19741976 # No cache usage - input_tokens should remain as-is
19751977 model = FakeMessagesListChatModel (
You can’t perform that action at this time.
0 commit comments