Skip to content

Commit c3eada8

Browse files
committed
simplify test
1 parent 245b495 commit c3eada8

File tree

1 file changed

+44
-25
lines changed

1 file changed

+44
-25
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 44 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -593,53 +593,72 @@ def test_langchain_callback_list_existing_callback(sentry_init):
593593

594594
def test_tools_integration_in_spans(sentry_init, capture_events):
595595
"""Test that tools are properly set on spans in actual LangChain integration."""
596+
global llm_type
597+
llm_type = "openai-chat"
598+
596599
sentry_init(
597600
integrations=[LangchainIntegration(include_prompts=False)],
598601
traces_sample_rate=1.0,
599602
)
600603
events = capture_events()
601604

602-
# Create a simple mock LLM that supports tools
603-
from langchain_core.language_models.llms import LLM
604-
605-
class MockLLMWithTools(LLM):
606-
def _call(self, prompt, stop=None, run_manager=None, **kwargs):
607-
return "Mock response"
605+
prompt = ChatPromptTemplate.from_messages(
606+
[
607+
("system", "You are a helpful assistant"),
608+
("user", "{input}"),
609+
MessagesPlaceholder(variable_name="agent_scratchpad"),
610+
]
611+
)
608612

609-
@property
610-
def _llm_type(self):
611-
return "mock_llm"
613+
global stream_result_mock
614+
stream_result_mock = Mock(
615+
side_effect=[
616+
[
617+
ChatGenerationChunk(
618+
type="ChatGenerationChunk",
619+
message=AIMessageChunk(content="Simple response"),
620+
),
621+
]
622+
]
623+
)
612624

613-
# Mock tools for testing
614-
test_tools = [
615-
{"name": "search", "description": "Search tool"},
616-
{"name": "calculator", "description": "Math tool"},
617-
]
625+
llm = MockOpenAI(
626+
model_name="gpt-3.5-turbo",
627+
temperature=0,
628+
openai_api_key="badkey",
629+
)
630+
agent = create_openai_tools_agent(llm, [get_word_length], prompt)
631+
agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True)
618632

619633
with start_transaction():
620-
llm = MockLLMWithTools()
621-
# Simulate a call with tools
622-
llm.invoke("test prompt", config={"tools": test_tools})
634+
list(agent_executor.stream({"input": "Hello"}))
623635

624-
# Check that events were captured
636+
# Check that events were captured and contain tools data
625637
if events:
626638
tx = events[0]
627639
spans = tx.get("spans", [])
628640

629641
# Look for spans that should have tools data
642+
tools_found = False
630643
for span in spans:
631644
span_data = span.get("data", {})
632645
if SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in span_data:
646+
tools_found = True
633647
tools_data = span_data[SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS]
634648
# Verify tools are in the expected format
635649
assert isinstance(tools_data, (str, list)) # Could be serialized
636650
if isinstance(tools_data, str):
637-
# If serialized as string, should contain tool names
638-
assert "search" in tools_data
639-
assert "calculator" in tools_data
651+
# If serialized as string, should contain tool name
652+
assert "get_word_length" in tools_data
640653
else:
641654
# If still a list, verify structure
642-
assert len(tools_data) == 2
643-
names = [tool.get("name") for tool in tools_data]
644-
assert "search" in names
645-
assert "calculator" in names
655+
assert len(tools_data) >= 1
656+
names = [
657+
tool.get("name")
658+
for tool in tools_data
659+
if isinstance(tool, dict)
660+
]
661+
assert "get_word_length" in names
662+
663+
# Ensure we found at least one span with tools data
664+
assert tools_found, "No spans found with tools data"

0 commit comments

Comments
 (0)