Skip to content

Commit f406488

Browse files
committed
Addressed Aaron's comments
1 parent fe74ddc commit f406488

File tree

6 files changed

+47
-40
lines changed

6 files changed

+47
-40
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
"""
16-
Langchain instrumentation supporting `ChatOpenAI`, it can be enabled by
16+
Langchain instrumentation supporting `ChatOpenAI` and `ChatBedrock`, it can be enabled by
1717
using ``LangChainInstrumentor``.
1818
1919
Usage
@@ -38,6 +38,7 @@
3838

3939
from typing import Any, Callable, Collection, Optional
4040

41+
from langchain_core.callbacks import BaseCallbackHandler # type: ignore
4142
from wrapt import wrap_function_wrapper # type: ignore
4243

4344
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
@@ -108,7 +109,7 @@ def __init__(
108109
def __call__(
109110
self,
110111
wrapped: Callable[..., None],
111-
instance: Any,
112+
instance: BaseCallbackHandler,
112113
args: tuple[Any, ...],
113114
kwargs: dict[str, Any],
114115
):

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ def on_chat_model_start(
6363
params = kwargs
6464

6565
request_model = "unknown"
66-
for model_tag in ("model", "model_id", "model_name", "ls_model_name"):
67-
if (model := kwargs.get(model_tag)) is not None:
68-
request_model = model
69-
break
70-
elif (model := (params or {}).get(model_tag)) is not None:
66+
for model_tag in (
67+
"model_name", # ChatOpenAI
68+
"model_id", # ChatBedrock
69+
):
70+
if (model := (params or {}).get(model_tag)) is not None:
7171
request_model = model
7272
break
7373
elif (model := (metadata or {}).get(model_tag)) is not None:
@@ -102,11 +102,13 @@ def on_chat_model_start(
102102
seed = params.get("seed")
103103
if seed is not None:
104104
span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed)
105+
# ChatOpenAI
105106
temperature = params.get("temperature")
106107
if temperature is not None:
107108
span.set_attribute(
108109
GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature
109110
)
111+
# ChatOpenAI
110112
max_tokens = params.get("max_completion_tokens")
111113
if max_tokens is not None:
112114
span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
@@ -115,18 +117,20 @@ def on_chat_model_start(
115117
provider = metadata.get("ls_provider")
116118
if provider is not None:
117119
span.set_attribute("gen_ai.provider.name", provider)
120+
# ChatBedrock
118121
temperature = metadata.get("ls_temperature")
119122
if temperature is not None:
120123
span.set_attribute(
121124
GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature
122125
)
126+
# ChatBedrock
123127
max_tokens = metadata.get("ls_max_tokens")
124128
if max_tokens is not None:
125129
span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
126130

127131
def on_llm_end(
128132
self,
129-
response: LLMResult, # type: ignore
133+
response: LLMResult,
130134
*,
131135
run_id: UUID,
132136
parent_run_id: UUID | None,
@@ -145,21 +149,23 @@ def on_llm_end(
145149
chat_generation, "generation_info", None
146150
)
147151
if generation_info is not None:
148-
finish_reason = generation_info.get("finish_reason")
152+
finish_reason = generation_info.get(
153+
"finish_reason", "unknown"
154+
)
149155
if finish_reason is not None:
150-
finish_reasons.append(str(finish_reason) or "error")
156+
finish_reasons.append(str(finish_reason))
151157
if chat_generation.message:
152158
if (
153159
generation_info is None
154160
and chat_generation.message.response_metadata
155161
):
156162
finish_reason = (
157163
chat_generation.message.response_metadata.get(
158-
"stopReason"
164+
"stopReason", "unknown"
159165
)
160166
)
161-
if finish_reason is not None and span.is_recording():
162-
finish_reasons.append(finish_reason or "error")
167+
if finish_reason is not None:
168+
finish_reasons.append(str(finish_reason))
163169
if chat_generation.message.usage_metadata:
164170
input_tokens = (
165171
chat_generation.message.usage_metadata.get(

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,10 +96,9 @@ def end_span(self, run_id: UUID) -> None:
9696
for child_id in state.children:
9797
child_state = self.spans.get(child_id)
9898
if child_state:
99-
# Always end child spans as OpenTelemetry spans don't expose end_time directly
10099
child_state.span.end()
101-
# Always end the span as OpenTelemetry spans don't expose end_time directly
102100
state.span.end()
101+
del self.spans[run_id]
103102

104103
def get_span(self, run_id: UUID) -> Optional[Span]:
105104
state = self.spans.get(run_id)

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ interactions:
4242
host:
4343
- api.openai.com
4444
user-agent:
45-
- OpenAI/Python 1.105.0
45+
- OpenAI/Python 1.106.1
4646
x-stainless-arch:
4747
- arm64
4848
x-stainless-async:
@@ -52,7 +52,7 @@ interactions:
5252
x-stainless-os:
5353
- MacOS
5454
x-stainless-package-version:
55-
- 1.105.0
55+
- 1.106.1
5656
x-stainless-raw-response:
5757
- 'true'
5858
x-stainless-retry-count:
@@ -67,9 +67,9 @@ interactions:
6767
body:
6868
string: |-
6969
{
70-
"id": "chatcmpl-CBmRYRvfmoAG6EmqDOH4IwkfN02MJ",
70+
"id": "chatcmpl-CCAQbtjsmG2294sQ6utRc16OQWeol",
7171
"object": "chat.completion",
72-
"created": 1756923860,
72+
"created": 1757016057,
7373
"model": "gpt-3.5-turbo-0125",
7474
"choices": [
7575
{
@@ -104,13 +104,13 @@ interactions:
104104
}
105105
headers:
106106
CF-RAY:
107-
- 9797488a68f42b63-LAX
107+
- 97a01376ad4d2af1-LAX
108108
Connection:
109109
- keep-alive
110110
Content-Type:
111111
- application/json
112112
Date:
113-
- Wed, 03 Sep 2025 18:24:21 GMT
113+
- Thu, 04 Sep 2025 20:00:57 GMT
114114
Server:
115115
- cloudflare
116116
Set-Cookie: test_set_cookie
@@ -130,27 +130,27 @@ interactions:
130130
- '822'
131131
openai-organization: test_openai_org_id
132132
openai-processing-ms:
133-
- '783'
133+
- '282'
134134
openai-project:
135-
- proj_3o0Aqh32nPiGbrex8BJtPTCm
135+
- proj_GLiYlAc06hF0Fm06IMReZLy4
136136
openai-version:
137137
- '2020-10-01'
138138
x-envoy-upstream-service-time:
139-
- '787'
139+
- '287'
140140
x-ratelimit-limit-requests:
141-
- '5000'
141+
- '10000'
142142
x-ratelimit-limit-tokens:
143-
- '2000000'
143+
- '200000'
144144
x-ratelimit-remaining-requests:
145-
- '4999'
145+
- '9999'
146146
x-ratelimit-remaining-tokens:
147-
- '1999982'
147+
- '199982'
148148
x-ratelimit-reset-requests:
149-
- 12ms
149+
- 8.64s
150150
x-ratelimit-reset-tokens:
151-
- 0s
151+
- 5ms
152152
x-request-id:
153-
- req_c1bd8705b06b4e9180a5d8340e44785c
153+
- req_0e343602788d4f33869d09afcc7d4819
154154
status:
155155
code: 200
156156
message: OK

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,16 @@ interactions:
3030
YXBwbGljYXRpb24vanNvbg==
3131
User-Agent:
3232
- !!binary |
33-
Qm90bzMvMS40MC4yMiBtZC9Cb3RvY29yZSMxLjQwLjIyIHVhLzIuMSBvcy9tYWNvcyMyNC42LjAg
34-
bWQvYXJjaCNhcm02NCBsYW5nL3B5dGhvbiMzLjEzLjUgbWQvcHlpbXBsI0NQeXRob24gbS9aLGIs
35-
RCBjZmcvcmV0cnktbW9kZSNsZWdhY3kgQm90b2NvcmUvMS40MC4yMg==
33+
Qm90bzMvMS40MC4yNCBtZC9Cb3RvY29yZSMxLjQwLjI0IHVhLzIuMSBvcy9tYWNvcyMyNC42LjAg
34+
bWQvYXJjaCNhcm02NCBsYW5nL3B5dGhvbiMzLjEzLjUgbWQvcHlpbXBsI0NQeXRob24gbS9iLFos
35+
RCBjZmcvcmV0cnktbW9kZSNsZWdhY3kgQm90b2NvcmUvMS40MC4yNCB4LWNsaWVudC1mcmFtZXdv
36+
cms6bGFuZ2NoYWluLWF3cw==
3637
X-Amz-Date:
3738
- !!binary |
38-
MjAyNTA5MDRUMDIyNzM4Wg==
39+
MjAyNTA5MDRUMjAwMDU4Wg==
3940
amz-sdk-invocation-id:
4041
- !!binary |
41-
MTMwMjBiMWUtZDhkOC00NTNkLWI1ZjYtY2U5Yjk5ZWQ4Zjg4
42+
MGQ5MTVjMDUtNzM3YS00OTQwLWIzM2ItMzYwMGIzZGIzYzMy
4243
amz-sdk-request:
4344
- !!binary |
4445
YXR0ZW1wdD0x
@@ -51,7 +52,7 @@ interactions:
5152
string: |-
5253
{
5354
"metrics": {
54-
"latencyMs": 435
55+
"latencyMs": 416
5556
},
5657
"output": {
5758
"message": {
@@ -78,11 +79,11 @@ interactions:
7879
Content-Type:
7980
- application/json
8081
Date:
81-
- Thu, 04 Sep 2025 02:27:38 GMT
82+
- Thu, 04 Sep 2025 20:00:58 GMT
8283
Set-Cookie: test_set_cookie
8384
openai-organization: test_openai_org_id
8485
x-amzn-RequestId:
85-
- 38e8f9b0-89f9-4d78-8da4-bba3948f1889
86+
- 9fd6d377-fc60-4b28-ab3b-a5c723b218c2
8687
status:
8788
code: 200
8889
message: OK

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def fixture_chat_openai_gpt_3_5_turbo_model():
3434

3535
@pytest.fixture(scope="function", name="us_amazon_nova_lite_v1_0")
3636
def fixture_us_amazon_nova_lite_v1_0():
37-
llm_model_value = "arn:aws:bedrock:us-west-2:906383545488:inference-profile/us.amazon.nova-lite-v1:0"
37+
llm_model_value = "us.amazon.nova-lite-v1:0"
3838
llm = ChatBedrock(
3939
model_id=llm_model_value,
4040
client=boto3.client(

0 commit comments

Comments
 (0)