Skip to content

Commit cbd5c72

Browse files
committed
simplify tests
1 parent 7695b91 commit cbd5c72

File tree

1 file changed

+0
-79
lines changed

1 file changed

+0
-79
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 0 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -1724,9 +1724,6 @@ def test_langchain_response_model_extraction(
17241724
llm_output_model,
17251725
expected_model,
17261726
):
1727-
from langchain_core.outputs import LLMResult
1728-
from langchain_core.messages import AIMessageChunk
1729-
17301727
sentry_init(
17311728
integrations=[LangchainIntegration(include_prompts=True)],
17321729
traces_sample_rate=1.0,
@@ -1792,79 +1789,3 @@ def test_langchain_response_model_extraction(
17921789
assert llm_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model
17931790
else:
17941791
assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("data", {})
1795-
1796-
1797-
@pytest.mark.parametrize(
1798-
"missing_attribute",
1799-
[
1800-
"message",
1801-
"response_metadata",
1802-
"generation_info",
1803-
"llm_output",
1804-
],
1805-
)
1806-
def test_langchain_response_model_extraction_missing_attributes(
1807-
sentry_init,
1808-
capture_events,
1809-
missing_attribute,
1810-
):
1811-
from langchain_core.messages import AIMessageChunk
1812-
1813-
sentry_init(
1814-
integrations=[LangchainIntegration(include_prompts=True)],
1815-
traces_sample_rate=1.0,
1816-
send_default_pii=True,
1817-
)
1818-
events = capture_events()
1819-
1820-
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1821-
1822-
run_id = "test-missing-attr-uuid"
1823-
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
1824-
prompts = ["Test prompt"]
1825-
1826-
with start_transaction():
1827-
callback.on_llm_start(
1828-
serialized=serialized,
1829-
prompts=prompts,
1830-
run_id=run_id,
1831-
invocation_params={"model": "gpt-3.5-turbo"},
1832-
)
1833-
1834-
generation = Mock()
1835-
generation.text = "Test response"
1836-
1837-
if missing_attribute != "message":
1838-
message_mock = Mock()
1839-
message_mock.response_metadata.get.return_value = None
1840-
if missing_attribute == "response_metadata":
1841-
delattr(message_mock, "response_metadata")
1842-
generation.message = message_mock
1843-
1844-
if missing_attribute != "generation_info":
1845-
generation_info_mock = Mock()
1846-
generation_info_mock.get.return_value = None
1847-
generation.generation_info = generation_info_mock
1848-
1849-
response = Mock()
1850-
response.generations = [[generation]]
1851-
1852-
if missing_attribute != "llm_output":
1853-
llm_output_mock = Mock()
1854-
llm_output_mock.get.return_value = None
1855-
response.llm_output = llm_output_mock
1856-
1857-
callback.on_llm_end(response=response, run_id=run_id)
1858-
1859-
assert len(events) > 0
1860-
tx = events[0]
1861-
assert tx["type"] == "transaction"
1862-
1863-
llm_spans = [
1864-
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
1865-
]
1866-
assert len(llm_spans) > 0
1867-
1868-
llm_span = llm_spans[0]
1869-
1870-
assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("data", {})

0 commit comments

Comments
 (0)