@@ -1686,3 +1686,185 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e
16861686 assert "List item" in input_data or "Single string query" in input_data , (
16871687 f"Expected input text in serialized data: { input_data } "
16881688 )
1689+
1690+
1691+ @pytest .mark .parametrize (
1692+ "response_metadata_model,generation_info_model,llm_output_model,expected_model" ,
1693+ [
1694+ ("model-from-metadata" , None , None , "model-from-metadata" ),
1695+ (None , "model-from-generation-info" , None , "model-from-generation-info" ),
1696+ (None , None , "model-from-llm-output" , "model-from-llm-output" ),
1697+ (
1698+ "model-from-metadata" ,
1699+ "model-from-generation-info" ,
1700+ None ,
1701+ "model-from-metadata" ,
1702+ ),
1703+ ("model-from-metadata" , None , "model-from-llm-output" , "model-from-metadata" ),
1704+ (
1705+ None ,
1706+ "model-from-generation-info" ,
1707+ "model-from-llm-output" ,
1708+ "model-from-generation-info" ,
1709+ ),
1710+ (
1711+ "model-from-metadata" ,
1712+ "model-from-generation-info" ,
1713+ "model-from-llm-output" ,
1714+ "model-from-metadata" ,
1715+ ),
1716+ (None , None , None , None ),
1717+ ],
1718+ )
1719+ def test_langchain_response_model_extraction (
1720+ sentry_init ,
1721+ capture_events ,
1722+ response_metadata_model ,
1723+ generation_info_model ,
1724+ llm_output_model ,
1725+ expected_model ,
1726+ ):
1727+ from langchain_core .outputs import LLMResult
1728+ from langchain_core .messages import AIMessageChunk
1729+
1730+ sentry_init (
1731+ integrations = [LangchainIntegration (include_prompts = True )],
1732+ traces_sample_rate = 1.0 ,
1733+ send_default_pii = True ,
1734+ )
1735+ events = capture_events ()
1736+
1737+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
1738+
1739+ run_id = "test-response-model-uuid"
1740+ serialized = {"_type" : "openai-chat" , "model_name" : "gpt-3.5-turbo" }
1741+ prompts = ["Test prompt" ]
1742+
1743+ with start_transaction ():
1744+ callback .on_llm_start (
1745+ serialized = serialized ,
1746+ prompts = prompts ,
1747+ run_id = run_id ,
1748+ invocation_params = {"model" : "gpt-3.5-turbo" },
1749+ )
1750+
1751+ response_metadata = {}
1752+ if response_metadata_model is not None :
1753+ response_metadata ["model_name" ] = response_metadata_model
1754+
1755+ generation_info = {}
1756+ if generation_info_model is not None :
1757+ generation_info ["model_name" ] = generation_info_model
1758+
1759+ llm_output = {}
1760+ if llm_output_model is not None :
1761+ llm_output ["model_name" ] = llm_output_model
1762+
1763+ message = AIMessageChunk (
1764+ content = "Test response" ,
1765+ response_metadata = response_metadata ,
1766+ )
1767+
1768+ generation = Mock ()
1769+ generation .text = "Test response"
1770+ generation .message = message
1771+ generation .generation_info = generation_info
1772+
1773+ response = Mock ()
1774+ response .generations = [[generation ]]
1775+ response .llm_output = llm_output
1776+
1777+ callback .on_llm_end (response = response , run_id = run_id )
1778+
1779+ assert len (events ) > 0
1780+ tx = events [0 ]
1781+ assert tx ["type" ] == "transaction"
1782+
1783+ llm_spans = [
1784+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.pipeline"
1785+ ]
1786+ assert len (llm_spans ) > 0
1787+
1788+ llm_span = llm_spans [0 ]
1789+
1790+ if expected_model is not None :
1791+ assert SPANDATA .GEN_AI_RESPONSE_MODEL in llm_span ["data" ]
1792+ assert llm_span ["data" ][SPANDATA .GEN_AI_RESPONSE_MODEL ] == expected_model
1793+ else :
1794+ assert SPANDATA .GEN_AI_RESPONSE_MODEL not in llm_span .get ("data" , {})
1795+
1796+
1797+ @pytest .mark .parametrize (
1798+ "missing_attribute" ,
1799+ [
1800+ "message" ,
1801+ "response_metadata" ,
1802+ "generation_info" ,
1803+ "llm_output" ,
1804+ ],
1805+ )
1806+ def test_langchain_response_model_extraction_missing_attributes (
1807+ sentry_init ,
1808+ capture_events ,
1809+ missing_attribute ,
1810+ ):
1811+ from langchain_core .messages import AIMessageChunk
1812+
1813+ sentry_init (
1814+ integrations = [LangchainIntegration (include_prompts = True )],
1815+ traces_sample_rate = 1.0 ,
1816+ send_default_pii = True ,
1817+ )
1818+ events = capture_events ()
1819+
1820+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
1821+
1822+ run_id = "test-missing-attr-uuid"
1823+ serialized = {"_type" : "openai-chat" , "model_name" : "gpt-3.5-turbo" }
1824+ prompts = ["Test prompt" ]
1825+
1826+ with start_transaction ():
1827+ callback .on_llm_start (
1828+ serialized = serialized ,
1829+ prompts = prompts ,
1830+ run_id = run_id ,
1831+ invocation_params = {"model" : "gpt-3.5-turbo" },
1832+ )
1833+
1834+ generation = Mock ()
1835+ generation .text = "Test response"
1836+
1837+ if missing_attribute != "message" :
1838+ message_mock = Mock ()
1839+ message_mock .response_metadata .get .return_value = None
1840+ if missing_attribute == "response_metadata" :
1841+ delattr (message_mock , "response_metadata" )
1842+ generation .message = message_mock
1843+
1844+ if missing_attribute != "generation_info" :
1845+ generation_info_mock = Mock ()
1846+ generation_info_mock .get .return_value = None
1847+ generation .generation_info = generation_info_mock
1848+
1849+ response = Mock ()
1850+ response .generations = [[generation ]]
1851+
1852+ if missing_attribute != "llm_output" :
1853+ llm_output_mock = Mock ()
1854+ llm_output_mock .get .return_value = None
1855+ response .llm_output = llm_output_mock
1856+
1857+ callback .on_llm_end (response = response , run_id = run_id )
1858+
1859+ assert len (events ) > 0
1860+ tx = events [0 ]
1861+ assert tx ["type" ] == "transaction"
1862+
1863+ llm_spans = [
1864+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.pipeline"
1865+ ]
1866+ assert len (llm_spans ) > 0
1867+
1868+ llm_span = llm_spans [0 ]
1869+
1870+ assert SPANDATA .GEN_AI_RESPONSE_MODEL not in llm_span .get ("data" , {})
0 commit comments