77
88import httpx
99import openai
10+ import pydantic
1011import pytest
1112from dirty_equals import IsNumeric
1213from httpx ._transports .mock import MockTransport
2526from opentelemetry .instrumentation .httpx import HTTPXClientInstrumentor
2627
2728import logfire
28- from logfire ._internal .utils import suppress_instrumentation
29+ from logfire ._internal .utils import get_version , suppress_instrumentation
2930from logfire .testing import TestExporter
3031
32+ pytestmark = [
33+ pytest .mark .skipif (
34+ get_version (pydantic .__version__ ) < get_version ('2.5' ),
35+ reason = 'Requires Pydantic 2.5 or higher to import genai-prices and set operation.cost attribute' ,
36+ ),
37+ ]
38+
3139
3240def request_handler (request : httpx .Request ) -> httpx .Response :
3341 """Used to mock httpx requests
@@ -423,6 +431,7 @@ def test_sync_chat_completions(instrumented_client: openai.Client, exporter: Tes
423431 'gen_ai.response.model' : 'gpt-4' ,
424432 'gen_ai.usage.input_tokens' : 2 ,
425433 'gen_ai.usage.output_tokens' : 1 ,
434+ 'operation.cost' : 0.00012 ,
426435 'response_data' : (
427436 {
428437 'message' : {
@@ -454,6 +463,7 @@ def test_sync_chat_completions(instrumented_client: openai.Client, exporter: Tes
454463 'gen_ai.response.model' : {},
455464 'gen_ai.usage.input_tokens' : {},
456465 'gen_ai.usage.output_tokens' : {},
466+ 'operation.cost' : {},
457467 'response_data' : {
458468 'type' : 'object' ,
459469 'properties' : {
@@ -518,6 +528,7 @@ async def test_async_chat_completions(instrumented_async_client: openai.AsyncCli
518528 'gen_ai.response.model' : 'gpt-4' ,
519529 'gen_ai.usage.input_tokens' : 2 ,
520530 'gen_ai.usage.output_tokens' : 1 ,
531+ 'operation.cost' : 0.00012 ,
521532 'response_data' : (
522533 {
523534 'message' : {
@@ -549,6 +560,7 @@ async def test_async_chat_completions(instrumented_async_client: openai.AsyncCli
549560 'gen_ai.response.model' : {},
550561 'gen_ai.usage.input_tokens' : {},
551562 'gen_ai.usage.output_tokens' : {},
563+ 'operation.cost' : {},
552564 'response_data' : {
553565 'type' : 'object' ,
554566 'properties' : {
@@ -1410,6 +1422,7 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter)
14101422 'gen_ai.response.model' : 'gpt-3.5-turbo-instruct' ,
14111423 'gen_ai.usage.input_tokens' : 2 ,
14121424 'gen_ai.usage.output_tokens' : 1 ,
1425+ 'operation.cost' : 5e-06 ,
14131426 'response_data' : {
14141427 'finish_reason' : 'stop' ,
14151428 'text' : 'Nine' ,
@@ -1431,6 +1444,7 @@ def test_completions(instrumented_client: openai.Client, exporter: TestExporter)
14311444 'gen_ai.response.model' : {},
14321445 'gen_ai.usage.input_tokens' : {},
14331446 'gen_ai.usage.output_tokens' : {},
1447+ 'operation.cost' : {},
14341448 'response_data' : {
14351449 'type' : 'object' ,
14361450 'properties' : {
@@ -1933,6 +1947,7 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None:
19331947 'gen_ai.response.model' : 'gpt-3.5-turbo-instruct' ,
19341948 'gen_ai.usage.input_tokens' : 2 ,
19351949 'gen_ai.usage.output_tokens' : 1 ,
1950+ 'operation.cost' : 5e-06 ,
19361951 'response_data' : {
19371952 'finish_reason' : 'stop' ,
19381953 'text' : 'Nine' ,
@@ -1954,6 +1969,7 @@ def test_dont_suppress_httpx(exporter: TestExporter) -> None:
19541969 'gen_ai.response.model' : {},
19551970 'gen_ai.usage.input_tokens' : {},
19561971 'gen_ai.usage.output_tokens' : {},
1972+ 'operation.cost' : {},
19571973 'response_data' : {
19581974 'type' : 'object' ,
19591975 'properties' : {
@@ -2038,6 +2054,7 @@ def test_suppress_httpx(exporter: TestExporter) -> None:
20382054 'gen_ai.response.model' : 'gpt-3.5-turbo-instruct' ,
20392055 'gen_ai.usage.input_tokens' : 2 ,
20402056 'gen_ai.usage.output_tokens' : 1 ,
2057+ 'operation.cost' : 5e-06 ,
20412058 'response_data' : {
20422059 'finish_reason' : 'stop' ,
20432060 'text' : 'Nine' ,
@@ -2059,6 +2076,7 @@ def test_suppress_httpx(exporter: TestExporter) -> None:
20592076 'gen_ai.response.model' : {},
20602077 'gen_ai.usage.input_tokens' : {},
20612078 'gen_ai.usage.output_tokens' : {},
2079+ 'operation.cost' : {},
20622080 'response_data' : {
20632081 'type' : 'object' ,
20642082 'properties' : {
@@ -2306,6 +2324,7 @@ def test_responses_api(exporter: TestExporter) -> None:
23062324 'gen_ai.response.model' : 'gpt-4.1-2025-04-14' ,
23072325 'gen_ai.usage.input_tokens' : 65 ,
23082326 'gen_ai.usage.output_tokens' : 17 ,
2327+ 'operation.cost' : 0.000266 ,
23092328 'events' : [
23102329 {'event.name' : 'gen_ai.system.message' , 'content' : 'Be nice' , 'role' : 'system' },
23112330 {
@@ -2335,6 +2354,7 @@ def test_responses_api(exporter: TestExporter) -> None:
23352354 'gen_ai.response.model' : {},
23362355 'gen_ai.usage.input_tokens' : {},
23372356 'gen_ai.usage.output_tokens' : {},
2357+ 'operation.cost' : {},
23382358 },
23392359 },
23402360 },
@@ -2359,6 +2379,7 @@ def test_responses_api(exporter: TestExporter) -> None:
23592379 'gen_ai.response.model' : 'gpt-4.1-2025-04-14' ,
23602380 'gen_ai.usage.input_tokens' : 43 ,
23612381 'gen_ai.usage.output_tokens' : 21 ,
2382+ 'operation.cost' : 0.000254 ,
23622383 'events' : [
23632384 {
23642385 'event.name' : 'gen_ai.user.message' ,
@@ -2399,6 +2420,7 @@ def test_responses_api(exporter: TestExporter) -> None:
23992420 'gen_ai.response.model' : {},
24002421 'gen_ai.usage.input_tokens' : {},
24012422 'gen_ai.usage.output_tokens' : {},
2423+ 'operation.cost' : {},
24022424 },
24032425 },
24042426 },
0 commit comments