Skip to content

Commit 9bd032a

Browse files
committed
fix: format
1 parent fc27ff4 commit 9bd032a

File tree

2 files changed

+10
-3
lines changed

2 files changed

+10
-3
lines changed

posthog/ai/langchain/callbacks.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -751,11 +751,16 @@ def _parse_usage_model(
751751
"reasoning": "reasoning_tokens",
752752
}
753753
normalized_usage = ModelUsage(
754-
**{dataclass_key: parsed_usage.get(mapped_key) or 0 for mapped_key, dataclass_key in field_mapping.items()},
754+
**{
755+
dataclass_key: parsed_usage.get(mapped_key) or 0
756+
for mapped_key, dataclass_key in field_mapping.items()
757+
},
755758
)
756759
# input_tokens is the sum of input and cache read tokens.
757760
if normalized_usage.input_tokens and normalized_usage.cache_read_tokens:
758-
normalized_usage.input_tokens = max(normalized_usage.input_tokens - normalized_usage.cache_read_tokens, 0)
761+
normalized_usage.input_tokens = max(
762+
normalized_usage.input_tokens - normalized_usage.cache_read_tokens, 0
763+
)
759764
return normalized_usage
760765

761766

posthog/test/ai/langchain/test_callbacks.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1969,7 +1969,9 @@ def test_no_cache_read_tokens_no_subtraction(mock_client):
19691969
19701970
This tests the conditional check before the subtraction in callbacks.py line 757.
19711971
"""
1972-
prompt = ChatPromptTemplate.from_messages([("user", "Normal request without cache")])
1972+
prompt = ChatPromptTemplate.from_messages(
1973+
[("user", "Normal request without cache")]
1974+
)
19731975

19741976
# No cache usage - input_tokens should remain as-is
19751977
model = FakeMessagesListChatModel(

0 commit comments

Comments
 (0)