Skip to content

Commit 730f0eb

Browse files
committed
remove globals from test
1 parent 9bd128e commit 730f0eb

File tree

1 file changed

+87
-6
lines changed

1 file changed

+87
-6
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 87 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -962,9 +962,6 @@ def test_langchain_message_role_normalization_units():
962962

963963
def test_langchain_llm_exception_captured(sentry_init, capture_events):
964964
"""Test that exceptions during LLM execution are properly captured with full context."""
965-
global llm_type
966-
llm_type = "openai-chat"
967-
968965
sentry_init(
969966
integrations=[LangchainIntegration(include_prompts=True)],
970967
traces_sample_rate=1.0,
@@ -980,9 +977,6 @@ def test_langchain_llm_exception_captured(sentry_init, capture_events):
980977
]
981978
)
982979

983-
global stream_result_mock
984-
stream_result_mock = Mock(side_effect=RuntimeError("LLM service unavailable"))
985-
986980
llm = MockOpenAI(
987981
model_name="gpt-3.5-turbo",
988982
temperature=0,
@@ -1234,3 +1228,90 @@ def test_langchain_exception_span_cleanup(sentry_init, capture_events):
12341228
for span in errored_spans:
12351229
assert "timestamp" in span
12361230
assert span["timestamp"] > span.get("start_timestamp", 0)
1231+
1232+
1233+
def test_langchain_callback_error_handler(sentry_init, capture_events):
1234+
"""Test that the callback error handlers properly capture exceptions."""
1235+
from langchain_core.outputs import LLMResult
1236+
1237+
sentry_init(
1238+
integrations=[LangchainIntegration(include_prompts=True)],
1239+
traces_sample_rate=1.0,
1240+
send_default_pii=True,
1241+
)
1242+
events = capture_events()
1243+
1244+
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1245+
1246+
run_id = "12345678-1234-1234-1234-123456789012"
1247+
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
1248+
prompts = ["Test prompt"]
1249+
1250+
with start_transaction(name="test_callback_error"):
1251+
callback.on_llm_start(
1252+
serialized=serialized,
1253+
prompts=prompts,
1254+
run_id=run_id,
1255+
invocation_params={"model": "gpt-3.5-turbo"},
1256+
)
1257+
1258+
test_exception = RuntimeError("API Error")
1259+
callback.on_llm_error(error=test_exception, run_id=run_id)
1260+
1261+
assert len(events) >= 1
1262+
1263+
error_events = [e for e in events if e.get("level") == "error"]
1264+
assert len(error_events) > 0
1265+
1266+
error_event = error_events[0]
1267+
assert "exception" in error_event
1268+
1269+
exception = error_event["exception"]["values"][0]
1270+
assert exception["type"] == "RuntimeError"
1271+
assert exception["value"] == "API Error"
1272+
1273+
transaction_events = [e for e in events if e.get("type") == "transaction"]
1274+
if transaction_events:
1275+
transaction_event = transaction_events[0]
1276+
assert transaction_event["contexts"]["trace"]["status"] == "error"
1277+
1278+
1279+
def test_langchain_chat_model_error_handler(sentry_init, capture_events):
1280+
"""Test that chat model errors are properly captured."""
1281+
from langchain_core.messages import HumanMessage
1282+
1283+
sentry_init(
1284+
integrations=[LangchainIntegration(include_prompts=True)],
1285+
traces_sample_rate=1.0,
1286+
send_default_pii=True,
1287+
)
1288+
events = capture_events()
1289+
1290+
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1291+
1292+
run_id = "87654321-4321-4321-4321-210987654321"
1293+
serialized = {"_type": "openai-chat", "model_name": "gpt-4"}
1294+
messages = [[HumanMessage(content="Test message")]]
1295+
1296+
with start_transaction(name="test_chat_model_error"):
1297+
callback.on_chat_model_start(
1298+
serialized=serialized,
1299+
messages=messages,
1300+
run_id=run_id,
1301+
invocation_params={"model": "gpt-4", "temperature": 0.7},
1302+
)
1303+
1304+
test_exception = ValueError("Chat model rate limit exceeded")
1305+
callback.on_chat_model_error(error=test_exception, run_id=run_id)
1306+
1307+
assert len(events) >= 1
1308+
1309+
error_events = [e for e in events if e.get("level") == "error"]
1310+
assert len(error_events) > 0
1311+
1312+
error_event = error_events[0]
1313+
assert "exception" in error_event
1314+
1315+
exception = error_event["exception"]["values"][0]
1316+
assert exception["type"] == "ValueError"
1317+
assert exception["value"] == "Chat model rate limit exceeded"

0 commit comments

Comments
 (0)