Skip to content

Commit 7c0e018

Browse files
Refactor whitespace and formatting in tracking.py
1 parent b53582c commit 7c0e018

File tree

1 file changed

+4
-5
lines changed

1 file changed

+4
-5
lines changed

src/agentlab/llm/tracking.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
import requests
1212
from langchain_community.callbacks import bedrock_anthropic_callback, openai_info
1313

14-
1514
TRACKER = threading.local()
1615

1716
ANTHROPHIC_CACHE_PRICING_FACTOR = {
@@ -207,9 +206,7 @@ def update_pricing_tracker(self, raw_response) -> None:
207206
input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)
208207
cost = input_tokens * self.input_cost + output_tokens * self.output_cost
209208

210-
if hasattr(TRACKER, "instance") and isinstance(
211-
TRACKER.instance, LLMTracker
212-
):
209+
if hasattr(TRACKER, "instance") and isinstance(TRACKER.instance, LLMTracker):
213210
TRACKER.instance(input_tokens, output_tokens, cost)
214211

215212
def get_tokens_counts_from_response(self, response) -> tuple:
@@ -286,7 +283,9 @@ def get_effective_cost_from_openai_api(self, response):
286283
usage = getattr(response, "usage", {})
287284
prompt_token_details = getattr(response, "prompt_tokens_details", {})
288285

289-
total_input_tokens = getattr(prompt_token_details, "prompt_tokens", 0) # Cache read tokens + new input tokens
286+
total_input_tokens = getattr(
287+
prompt_token_details, "prompt_tokens", 0
288+
) # Cache read tokens + new input tokens
290289
output_tokens = getattr(usage, "completion_tokens", 0)
291290
cache_read_tokens = getattr(prompt_token_details, "cached_tokens", 0)
292291

0 commit comments

Comments
 (0)