Skip to content

Commit 344c893

Browse files
committed
Move metric tests to seperate module
1 parent 4dae8b5 commit 344c893

File tree

4 files changed

+191
-205
lines changed

4 files changed

+191
-205
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_metrics.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ interactions:
6464
"finish_reason": "stop"
6565
}
6666
],
67+
"service_tier": "default",
6768
"usage": {
6869
"prompt_tokens": 12,
6970
"completion_tokens": 12,

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py

Lines changed: 0 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
from opentelemetry.semconv._incubating.attributes import (
3333
server_attributes as ServerAttributes,
3434
)
35-
from opentelemetry.semconv._incubating.metrics import gen_ai_metrics
3635

3736

3837
@pytest.mark.vcr()
@@ -881,95 +880,3 @@ def get_current_weather_tool_definition():
881880
},
882881
},
883882
}
884-
885-
886-
def assert_all_metric_attributes(data_point):
887-
assert GenAIAttributes.GEN_AI_OPERATION_NAME in data_point.attributes
888-
assert (
889-
data_point.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
890-
== GenAIAttributes.GenAiOperationNameValues.CHAT.value
891-
)
892-
assert GenAIAttributes.GEN_AI_SYSTEM in data_point.attributes
893-
assert (
894-
data_point.attributes[GenAIAttributes.GEN_AI_SYSTEM]
895-
== GenAIAttributes.GenAiSystemValues.OPENAI.value
896-
)
897-
assert GenAIAttributes.GEN_AI_REQUEST_MODEL in data_point.attributes
898-
assert (
899-
data_point.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
900-
== "gpt-4o-mini"
901-
)
902-
assert GenAIAttributes.GEN_AI_RESPONSE_MODEL in data_point.attributes
903-
assert (
904-
data_point.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL]
905-
== "gpt-4o-mini-2024-07-18"
906-
)
907-
908-
909-
@pytest.mark.vcr()
910-
@pytest.mark.asyncio()
911-
async def test_async_chat_completion_metrics(
912-
metric_reader, async_openai_client, instrument_with_content
913-
):
914-
llm_model_value = "gpt-4o-mini"
915-
messages_value = [{"role": "user", "content": "Say this is a test"}]
916-
917-
await async_openai_client.chat.completions.create(
918-
messages=messages_value, model=llm_model_value, stream=False
919-
)
920-
921-
metrics = metric_reader.get_metrics_data().resource_metrics
922-
assert len(metrics) == 1
923-
924-
metric_data = metrics[0].scope_metrics[0].metrics
925-
assert len(metric_data) == 2
926-
927-
duration_metric = next(
928-
(
929-
m
930-
for m in metric_data
931-
if m.name == gen_ai_metrics.GEN_AI_CLIENT_OPERATION_DURATION
932-
),
933-
None,
934-
)
935-
assert duration_metric is not None
936-
assert duration_metric.data.data_points[0].sum > 0
937-
assert_all_metric_attributes(duration_metric.data.data_points[0])
938-
939-
token_usage_metric = next(
940-
(
941-
m
942-
for m in metric_data
943-
if m.name == gen_ai_metrics.GEN_AI_CLIENT_TOKEN_USAGE
944-
),
945-
None,
946-
)
947-
assert token_usage_metric is not None
948-
949-
input_token_usage = next(
950-
(
951-
d
952-
for d in token_usage_metric.data.data_points
953-
if d.attributes[GenAIAttributes.GEN_AI_TOKEN_TYPE]
954-
== GenAIAttributes.GenAiTokenTypeValues.INPUT.value
955-
),
956-
None,
957-
)
958-
959-
assert input_token_usage is not None
960-
assert input_token_usage.sum == 12
961-
assert_all_metric_attributes(input_token_usage)
962-
963-
output_token_usage = next(
964-
(
965-
d
966-
for d in token_usage_metric.data.data_points
967-
if d.attributes[GenAIAttributes.GEN_AI_TOKEN_TYPE]
968-
== GenAIAttributes.GenAiTokenTypeValues.COMPLETION.value
969-
),
970-
None,
971-
)
972-
973-
assert output_token_usage is not None
974-
assert output_token_usage.sum == 12
975-
assert_all_metric_attributes(output_token_usage)

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py

Lines changed: 0 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -902,115 +902,3 @@ def get_current_weather_tool_definition():
902902
},
903903
},
904904
}
905-
906-
907-
def assert_all_metric_attributes(data_point):
908-
assert GenAIAttributes.GEN_AI_OPERATION_NAME in data_point.attributes
909-
assert (
910-
data_point.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
911-
== GenAIAttributes.GenAiOperationNameValues.CHAT.value
912-
)
913-
assert GenAIAttributes.GEN_AI_SYSTEM in data_point.attributes
914-
assert (
915-
data_point.attributes[GenAIAttributes.GEN_AI_SYSTEM]
916-
== GenAIAttributes.GenAiSystemValues.OPENAI.value
917-
)
918-
assert GenAIAttributes.GEN_AI_REQUEST_MODEL in data_point.attributes
919-
assert (
920-
data_point.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
921-
== "gpt-4o-mini"
922-
)
923-
assert GenAIAttributes.GEN_AI_RESPONSE_MODEL in data_point.attributes
924-
assert (
925-
data_point.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL]
926-
== "gpt-4o-mini-2024-07-18"
927-
)
928-
assert "gen_ai.openai.response.system_fingerprint" in data_point.attributes
929-
assert (
930-
data_point.attributes["gen_ai.openai.response.system_fingerprint"]
931-
== "fp_0ba0d124f1"
932-
)
933-
assert (
934-
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER
935-
in data_point.attributes
936-
)
937-
assert (
938-
data_point.attributes[
939-
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER
940-
]
941-
== "default"
942-
)
943-
assert (
944-
data_point.attributes[ServerAttributes.SERVER_ADDRESS]
945-
== "api.openai.com"
946-
)
947-
948-
949-
@pytest.mark.vcr()
950-
def test_chat_completion_metrics(
951-
metric_reader, openai_client, instrument_with_content
952-
):
953-
llm_model_value = "gpt-4o-mini"
954-
messages_value = [{"role": "user", "content": "Say this is a test"}]
955-
956-
openai_client.chat.completions.create(
957-
messages=messages_value, model=llm_model_value, stream=False
958-
)
959-
960-
metrics = metric_reader.get_metrics_data().resource_metrics
961-
assert len(metrics) == 1
962-
963-
metric_data = metrics[0].scope_metrics[0].metrics
964-
assert len(metric_data) == 2
965-
966-
duration_metric = next(
967-
(
968-
m
969-
for m in metric_data
970-
if m.name == gen_ai_metrics.GEN_AI_CLIENT_OPERATION_DURATION
971-
),
972-
None,
973-
)
974-
assert duration_metric is not None
975-
assert duration_metric.data.data_points[0].sum > 0
976-
assert_all_metric_attributes(duration_metric.data.data_points[0])
977-
978-
token_usage_metric = next(
979-
(
980-
m
981-
for m in metric_data
982-
if m.name == gen_ai_metrics.GEN_AI_CLIENT_TOKEN_USAGE
983-
),
984-
None,
985-
)
986-
assert token_usage_metric is not None
987-
988-
input_token_usage = next(
989-
(
990-
d
991-
for d in token_usage_metric.data.data_points
992-
if d.attributes[GenAIAttributes.GEN_AI_TOKEN_TYPE]
993-
== GenAIAttributes.GenAiTokenTypeValues.INPUT.value
994-
),
995-
None,
996-
)
997-
assert input_token_usage is not None
998-
assert input_token_usage.sum == 12
999-
# assert against buckets [1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864]
1000-
assert input_token_usage.bucket_counts[2] == 1
1001-
assert_all_metric_attributes(input_token_usage)
1002-
1003-
output_token_usage = next(
1004-
(
1005-
d
1006-
for d in token_usage_metric.data.data_points
1007-
if d.attributes[GenAIAttributes.GEN_AI_TOKEN_TYPE]
1008-
== GenAIAttributes.GenAiTokenTypeValues.COMPLETION.value
1009-
),
1010-
None,
1011-
)
1012-
assert output_token_usage is not None
1013-
assert output_token_usage.sum == 5
1014-
# assert against buckets [1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864]
1015-
assert output_token_usage.bucket_counts[2] == 1
1016-
assert_all_metric_attributes(output_token_usage)

0 commit comments

Comments
 (0)