@@ -136,11 +136,7 @@ def _normalize_langchain_message(self, message):
136136
137137 def _extract_token_usage (self , token_usage ):
138138 # type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]]
139- """Extract input, output, and total tokens from various token usage formats.
140-
141- Based on LangChain's callback pattern for token tracking:
142- https://python.langchain.com/docs/how_to/llm_token_usage_tracking/
143- """
139+ """Extract input, output, and total tokens from various token usage formats."""
144140 if not token_usage :
145141 return None , None , None
146142
@@ -149,7 +145,6 @@ def _extract_token_usage(self, token_usage):
149145 total_tokens = None
150146
151147 if hasattr (token_usage , "get" ):
152- # Dictionary format - common in LangChain callbacks
153148 input_tokens = token_usage .get ("prompt_tokens" ) or token_usage .get (
154149 "input_tokens"
155150 )
@@ -158,7 +153,6 @@ def _extract_token_usage(self, token_usage):
158153 )
159154 total_tokens = token_usage .get ("total_tokens" )
160155 else :
161- # Object format - used by some model providers
162156 input_tokens = getattr (token_usage , "prompt_tokens" , None ) or getattr (
163157 token_usage , "input_tokens" , None
164158 )
0 commit comments