Skip to content

Commit 2d3d96d

Browse files
ShreySataparaOpenHands Evaluation
andauthored
Fix LiteLLM cost tracking for provider-prefixed models (#2257)
Co-authored-by: OpenHands Evaluation <evaluation@openhands.dev>
1 parent 2b54375 commit 2d3d96d

File tree

2 files changed

+59
-3
lines changed

2 files changed

+59
-3
lines changed

openhands-sdk/openhands/sdk/llm/utils/telemetry.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -271,9 +271,10 @@ def _compute_cost(self, resp: ModelResponse | ResponsesAPIResponse) -> float | N
271271

272272
# move on to litellm cost calculator
273273
# Handle model name properly - if it doesn't contain "/", use as-is
274-
model_parts = self.model_name.split("/")
275-
if len(model_parts) > 1:
276-
extra_kwargs["model"] = "/".join(model_parts[1:])
274+
if "/" in self.model_name:
275+
provider, bare = self.model_name.split("/", 1)
276+
extra_kwargs["model"] = bare
277+
extra_kwargs["custom_llm_provider"] = provider
277278
else:
278279
extra_kwargs["model"] = self.model_name
279280
try:

tests/sdk/llm/test_llm_telemetry.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,61 @@ def test_compute_cost_model_name_processing(self, mock_metrics):
332332
# Should strip provider prefix
333333
call_kwargs = mock_cost.call_args[1]
334334
assert call_kwargs["model"] == "gpt-4o-mini"
335+
assert call_kwargs["custom_llm_provider"] == "provider"
336+
337+
def test_compute_cost_passes_provider_to_litellm_cost_calculator(
338+
self, mock_metrics
339+
):
340+
telemetry = Telemetry(
341+
model_name="vertex_ai/claude-sonnet-4-5@20250929",
342+
metrics=mock_metrics,
343+
)
344+
345+
resp = ModelResponse(
346+
id="test-id",
347+
choices=[],
348+
created=1234567890,
349+
model="claude-sonnet-4-5@20250929",
350+
object="chat.completion",
351+
)
352+
353+
with patch(
354+
"openhands.sdk.llm.utils.telemetry.litellm_completion_cost"
355+
) as mock_cost:
356+
mock_cost.return_value = 0.10
357+
telemetry._compute_cost(resp)
358+
359+
mock_cost.assert_called_once()
360+
kwargs = mock_cost.call_args.kwargs
361+
assert kwargs["model"] == "claude-sonnet-4-5@20250929"
362+
assert kwargs["custom_llm_provider"] == "vertex_ai"
363+
364+
def test_compute_cost_passes_provider_to_litellm_cost_calculator_azure(
365+
self, mock_metrics
366+
):
367+
telemetry = Telemetry(
368+
model_name="azure/responses/gpt-5.2-chat",
369+
metrics=mock_metrics,
370+
)
371+
372+
resp = ModelResponse(
373+
id="test-id",
374+
choices=[],
375+
created=1234567890,
376+
model="gpt-5.2-chat",
377+
object="chat.completion",
378+
)
379+
380+
with patch(
381+
"openhands.sdk.llm.utils.telemetry.litellm_completion_cost"
382+
) as mock_cost:
383+
mock_cost.return_value = 0.05
384+
telemetry._compute_cost(resp)
385+
386+
mock_cost.assert_called_once()
387+
kwargs = mock_cost.call_args.kwargs
388+
assert kwargs["model"] == "responses/gpt-5.2-chat"
389+
assert kwargs["custom_llm_provider"] == "azure"
335390

336391

337392
class TestTelemetryLogging:

0 commit comments

Comments
 (0)