Skip to content

Commit 44f4c1d

Browse files
committed
Switch tests and samples to gpt-4.1 and refresh VCR cassettes
1 parent c626a5e commit 44f4c1d

File tree

6 files changed

+38
-39
lines changed

6 files changed

+38
-39
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
# Required OpenAI API key
44
OPENAI_API_KEY=sk-YOUR_API_KEY
55

6-
# Optional: override default model (defaults to gpt-4o-mini)
7-
# OPENAI_MODEL=gpt-4o-mini
6+
# Optional: override default model (defaults to gpt-4.1)
7+
# OPENAI_MODEL=gpt-4.1
88

99
# OTLP exporter configuration (update for your collector)
1010
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ class PlannerState(TypedDict):
184184

185185

186186
def _model_name() -> str:
187-
return os.getenv("OPENAI_MODEL", "gpt-4o-mini")
187+
return os.getenv("OPENAI_MODEL", "gpt-4.1")
188188

189189

190190
def _create_llm(
Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ interactions:
1212
"role": "user"
1313
}
1414
],
15-
"model": "gpt-3.5-turbo",
15+
"model": "gpt-4.1",
1616
"frequency_penalty": 0.5,
1717
"max_completion_tokens": 100,
1818
"presence_penalty": 0.5,
@@ -36,13 +36,13 @@ interactions:
3636
connection:
3737
- keep-alive
3838
content-length:
39-
- '316'
39+
- '310'
4040
content-type:
4141
- application/json
4242
host:
4343
- api.openai.com
4444
user-agent:
45-
- OpenAI/Python 1.106.1
45+
- OpenAI/Python 1.109.1
4646
x-stainless-arch:
4747
- arm64
4848
x-stainless-async:
@@ -52,25 +52,25 @@ interactions:
5252
x-stainless-os:
5353
- MacOS
5454
x-stainless-package-version:
55-
- 1.106.1
55+
- 1.109.1
5656
x-stainless-raw-response:
5757
- 'true'
5858
x-stainless-retry-count:
5959
- '0'
6060
x-stainless-runtime:
6161
- CPython
6262
x-stainless-runtime-version:
63-
- 3.13.5
63+
- 3.11.14
6464
method: POST
6565
uri: https://api.openai.com/v1/chat/completions
6666
response:
6767
body:
6868
string: |-
6969
{
70-
"id": "chatcmpl-CCAQbtjsmG2294sQ6utRc16OQWeol",
70+
"id": "chatcmpl-CTB9d2RMw2N2EBwVMAnYR45AvBvud",
7171
"object": "chat.completion",
72-
"created": 1757016057,
73-
"model": "gpt-3.5-turbo-0125",
72+
"created": 1761070425,
73+
"model": "gpt-4.1-2025-04-14",
7474
"choices": [
7575
{
7676
"index": 0,
@@ -100,17 +100,17 @@ interactions:
100100
}
101101
},
102102
"service_tier": "default",
103-
"system_fingerprint": null
103+
"system_fingerprint": "fp_e24a1fec47"
104104
}
105105
headers:
106106
CF-RAY:
107-
- 97a01376ad4d2af1-LAX
107+
- 9922bb0f79e7a9f8-SEA
108108
Connection:
109109
- keep-alive
110110
Content-Type:
111111
- application/json
112112
Date:
113-
- Thu, 04 Sep 2025 20:00:57 GMT
113+
- Tue, 21 Oct 2025 18:13:46 GMT
114114
Server:
115115
- cloudflare
116116
Set-Cookie: test_set_cookie
@@ -127,30 +127,32 @@ interactions:
127127
cf-cache-status:
128128
- DYNAMIC
129129
content-length:
130-
- '822'
130+
- '833'
131131
openai-organization: test_openai_org_id
132132
openai-processing-ms:
133-
- '282'
133+
- '353'
134134
openai-project:
135-
- proj_GLiYlAc06hF0Fm06IMReZLy4
135+
- proj_g22EI7wYA0CcaIteeCvlmSng
136136
openai-version:
137137
- '2020-10-01'
138138
x-envoy-upstream-service-time:
139-
- '287'
139+
- '514'
140+
x-openai-proxy-wasm:
141+
- v0.1
140142
x-ratelimit-limit-requests:
141-
- '10000'
143+
- '500'
142144
x-ratelimit-limit-tokens:
143-
- '200000'
145+
- '30000'
144146
x-ratelimit-remaining-requests:
145-
- '9999'
147+
- '499'
146148
x-ratelimit-remaining-tokens:
147-
- '199982'
149+
- '29982'
148150
x-ratelimit-reset-requests:
149-
- 8.64s
151+
- 120ms
150152
x-ratelimit-reset-tokens:
151-
- 5ms
153+
- 36ms
152154
x-request-id:
153-
- req_0e343602788d4f33869d09afcc7d4819
155+
- req_e6d40dc7ffc44d3e929f2883bdd89612
154156
status:
155157
code: 200
156158
message: OK

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ interactions:
4646
authorization:
4747
- Bearer test_openai_api_key
4848
method: POST
49-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/arn%3Aaws%3Abedrock%3Aus-west-2%3A906383545488%3Ainference-profile%2Fus.amazon.nova-lite-v1%3A0/converse
49+
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/us.amazon.nova-lite-v1%3A0/converse
5050
response:
5151
body:
5252
string: |-

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@
1919
)
2020

2121

22-
@pytest.fixture(scope="function", name="chat_openai_gpt_3_5_turbo_model")
23-
def fixture_chat_openai_gpt_3_5_turbo_model():
22+
@pytest.fixture(scope="function", name="chat_openai_gpt_4_1_model")
23+
def fixture_chat_openai_gpt_4_1_model():
2424
llm = ChatOpenAI(
25-
model="gpt-3.5-turbo",
25+
model="gpt-4.1",
2626
temperature=0.1,
2727
max_tokens=100,
2828
top_p=0.9,

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,17 @@
2121
)
2222

2323

24-
# span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model are coming from fixtures defined in conftest.py
24+
# span_exporter, start_instrumentation, chat_openai_gpt_4_1_model are coming from fixtures defined in conftest.py
2525
@pytest.mark.vcr()
26-
def test_chat_openai_gpt_3_5_turbo_model_llm_call(
27-
span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model
26+
def test_chat_openai_gpt_4_1_model_llm_call(
27+
span_exporter, start_instrumentation, chat_openai_gpt_4_1_model
2828
):
2929
messages = [
3030
SystemMessage(content="You are a helpful assistant!"),
3131
HumanMessage(content="What is the capital of France?"),
3232
]
3333

34-
response = chat_openai_gpt_3_5_turbo_model.invoke(messages)
34+
response = chat_openai_gpt_4_1_model.invoke(messages)
3535
assert response.content == "The capital of France is Paris."
3636

3737
# verify spans
@@ -87,15 +87,12 @@ def test_gemini(span_exporter, start_instrumentation, gemini):
8787
def assert_openai_completion_attributes(
8888
span: ReadableSpan, response: Optional
8989
):
90-
assert span.name == "chat gpt-3.5-turbo"
90+
assert span.name == "chat gpt-4.1"
9191
assert span.attributes[gen_ai_attributes.GEN_AI_OPERATION_NAME] == "chat"
92-
assert (
93-
span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL]
94-
== "gpt-3.5-turbo"
95-
)
92+
assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] == "gpt-4.1"
9693
assert (
9794
span.attributes[gen_ai_attributes.GEN_AI_RESPONSE_MODEL]
98-
== "gpt-3.5-turbo-0125"
95+
== "gpt-4.1-2025-04-14"
9996
)
10097
assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS] == 100
10198
assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE] == 0.1

0 commit comments

Comments
 (0)