11from typing import Optional
2+ from uuid import uuid4
23
34import pytest
45from langchain_core .messages import HumanMessage , SystemMessage
56
7+ from opentelemetry .instrumentation .langchain .callback_handler import (
8+ OpenTelemetryLangChainCallbackHandler ,
9+ )
610from opentelemetry .sdk .trace import ReadableSpan
11+ from opentelemetry .sdk .trace import TracerProvider
12+ from opentelemetry .sdk .trace .export import SimpleSpanProcessor
13+ from opentelemetry .sdk .trace .export .in_memory_span_exporter import (
14+ InMemorySpanExporter ,
15+ )
716from opentelemetry .semconv ._incubating .attributes import gen_ai_attributes
17+ from opentelemetry .semconv ._incubating .attributes .azure_attributes import (
18+ AZURE_RESOURCE_PROVIDER_NAMESPACE ,
19+ )
20+ from opentelemetry .semconv ._incubating .attributes .openai_attributes import (
21+ OPENAI_REQUEST_SERVICE_TIER ,
22+ )
823
924
1025# span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model are coming from fixtures defined in conftest.py
@@ -85,7 +100,9 @@ def assert_openai_completion_attributes(
85100 )
86101 assert span .attributes [gen_ai_attributes .GEN_AI_REQUEST_MAX_TOKENS ] == 100
87102 assert span .attributes [gen_ai_attributes .GEN_AI_REQUEST_TEMPERATURE ] == 0.1
88- assert span .attributes ["gen_ai.provider.name" ] == "openai"
103+ assert (
104+ span .attributes [gen_ai_attributes .GEN_AI_PROVIDER_NAME ] == "openai"
105+ )
89106 assert gen_ai_attributes .GEN_AI_RESPONSE_ID in span .attributes
90107 assert span .attributes [gen_ai_attributes .GEN_AI_REQUEST_TOP_P ] == 0.9
91108 assert (
@@ -139,7 +156,10 @@ def assert_bedrock_completion_attributes(
139156 == "us.amazon.nova-lite-v1:0"
140157 )
141158
142- assert span .attributes ["gen_ai.provider.name" ] == "amazon_bedrock"
159+ assert (
160+ span .attributes [gen_ai_attributes .GEN_AI_PROVIDER_NAME ]
161+ == "aws.bedrock"
162+ )
143163 assert span .attributes [gen_ai_attributes .GEN_AI_REQUEST_MAX_TOKENS ] == 100
144164 assert span .attributes [gen_ai_attributes .GEN_AI_REQUEST_TEMPERATURE ] == 0.1
145165
@@ -164,3 +184,51 @@ def assert_bedrock_completion_attributes(
164184 assert (
165185 gen_ai_attributes .GEN_AI_USAGE_OUTPUT_TOKENS not in span .attributes
166186 )
187+
188+
189+ def test_azure_chat_sets_provider_and_server_attributes ():
190+ exporter = InMemorySpanExporter ()
191+ provider = TracerProvider ()
192+ provider .add_span_processor (SimpleSpanProcessor (exporter ))
193+ handler = OpenTelemetryLangChainCallbackHandler (provider .get_tracer (__name__ ))
194+
195+ run_id = uuid4 ()
196+
197+ handler .on_chat_model_start (
198+ serialized = {"name" : "AzureChatOpenAI" },
199+ messages = [],
200+ run_id = run_id ,
201+ tags = None ,
202+ parent_run_id = None ,
203+ metadata = {"ls_model_name" : "gpt-4o" },
204+ invocation_params = {
205+ "params" : {
206+ "model" : "gpt-4o" ,
207+ "azure_endpoint" : "https://example.openai.azure.com/" ,
208+ "service_tier" : "default" ,
209+ "n" : 2 ,
210+ }
211+ },
212+ )
213+
214+ handler .span_manager .end_span (run_id )
215+ span = exporter .get_finished_spans ()[0 ]
216+
217+ assert span .name == "chat gpt-4o"
218+ assert (
219+ span .attributes [gen_ai_attributes .GEN_AI_REQUEST_MODEL ] == "gpt-4o"
220+ )
221+ assert (
222+ span .attributes [gen_ai_attributes .GEN_AI_PROVIDER_NAME ]
223+ == "azure.ai.openai"
224+ )
225+ assert span .attributes ["server.address" ] == "example.openai.azure.com"
226+ assert span .attributes ["server.port" ] == 443
227+ assert (
228+ span .attributes [gen_ai_attributes .GEN_AI_REQUEST_CHOICE_COUNT ] == 2
229+ )
230+ assert span .attributes [OPENAI_REQUEST_SERVICE_TIER ] == "default"
231+ assert (
232+ span .attributes [AZURE_RESOURCE_PROVIDER_NAMESPACE ]
233+ == "Microsoft.CognitiveServices"
234+ )
0 commit comments