diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py
index cebd5d132..26b398c5c 100644
--- a/agentops/instrumentation/__init__.py
+++ b/agentops/instrumentation/__init__.py
@@ -125,6 +125,11 @@ class InstrumentorConfig(TypedDict):
"class_name": "SmolagentsInstrumentor",
"min_version": "1.0.0",
},
+ "langgraph": {
+ "module_name": "agentops.instrumentation.agentic.langgraph",
+ "class_name": "LanggraphInstrumentor",
+ "min_version": "0.2.0",
+ },
}
# Combine all target packages for monitoring
diff --git a/agentops/instrumentation/agentic/langgraph/__init__.py b/agentops/instrumentation/agentic/langgraph/__init__.py
new file mode 100644
index 000000000..8d39d3a9c
--- /dev/null
+++ b/agentops/instrumentation/agentic/langgraph/__init__.py
@@ -0,0 +1,3 @@
+from agentops.instrumentation.agentic.langgraph.instrumentation import LanggraphInstrumentor
+
+__all__ = ["LanggraphInstrumentor"]
diff --git a/agentops/instrumentation/agentic/langgraph/attributes.py b/agentops/instrumentation/agentic/langgraph/attributes.py
new file mode 100644
index 000000000..b9f4962ae
--- /dev/null
+++ b/agentops/instrumentation/agentic/langgraph/attributes.py
@@ -0,0 +1,54 @@
+from typing import Dict, Any
+import json
+
+
+def ensure_no_none_values(attributes: Dict[str, Any]) -> Dict[str, Any]:
+ return {k: v for k, v in attributes.items() if v is not None}
+
+
+def set_graph_attributes(span: Any, graph_nodes: list = None, graph_edges: list = None) -> None:
+ if graph_nodes:
+ span.set_attribute("langgraph.graph.nodes", json.dumps(graph_nodes))
+ span.set_attribute("langgraph.graph.node_count", len(graph_nodes))
+
+ for i, node in enumerate(graph_nodes):
+ span.set_attribute(f"langgraph.node.{i}.name", node)
+ span.set_attribute(f"langgraph.node.{i}.type", "unknown")
+
+ if graph_edges:
+ span.set_attribute("langgraph.graph.edges", json.dumps(graph_edges))
+ span.set_attribute("langgraph.graph.edge_count", len(graph_edges))
+
+ for i, edge in enumerate(graph_edges):
+ parts = edge.split("->")
+ if len(parts) == 2:
+ span.set_attribute(f"langgraph.edge.{i}.source", parts[0])
+ span.set_attribute(f"langgraph.edge.{i}.target", parts[1])
+
+
+def extract_messages_from_input(input_data: Any) -> list:
+ if isinstance(input_data, dict) and "messages" in input_data:
+ return input_data["messages"]
+ return []
+
+
+def extract_messages_from_output(output_data: Any) -> list:
+ if isinstance(output_data, dict) and "messages" in output_data:
+ return output_data["messages"]
+ return []
+
+
+def get_message_content(message: Any) -> str:
+ if hasattr(message, "content"):
+ return str(message.content)
+ return ""
+
+
+def get_message_role(message: Any) -> str:
+ if hasattr(message, "role"):
+ return message.role
+ elif hasattr(message, "type"):
+ return message.type
+ elif hasattr(message, "__class__"):
+ return message.__class__.__name__.replace("Message", "").lower()
+ return "unknown"
diff --git a/agentops/instrumentation/agentic/langgraph/instrumentation.py b/agentops/instrumentation/agentic/langgraph/instrumentation.py
new file mode 100644
index 000000000..8f778eee4
--- /dev/null
+++ b/agentops/instrumentation/agentic/langgraph/instrumentation.py
@@ -0,0 +1,598 @@
+from functools import wraps
+from typing import Any, Callable, Collection, Dict, Optional, Tuple
+import json
+import inspect
+
+from opentelemetry import trace
+from opentelemetry.trace import SpanKind, Status, StatusCode, get_tracer
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
+from opentelemetry.instrumentation.utils import unwrap
+from wrapt import wrap_function_wrapper
+
+from agentops.semconv import (
+ SpanAttributes,
+ WorkflowAttributes,
+ MessageAttributes,
+)
+from .attributes import (
+ ensure_no_none_values,
+ set_graph_attributes,
+ extract_messages_from_input,
+ get_message_content,
+ get_message_role,
+)
+
+import sys
+
+if "typing_extensions" not in sys.modules:
+ from unittest import mock
+
+ sys.modules["typing_extensions"] = mock.MagicMock()
+
+
+class LanggraphInstrumentor(BaseInstrumentor):
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
+ super().__init__()
+ self.config = config or {}
+ self._tracer = None
+
+ def instrumentation_dependencies(self) -> Collection[str]:
+ return ["langgraph >= 0.0.1"]
+
+ def _instrument(self, **kwargs):
+ tracer_provider = kwargs.get("tracer_provider")
+ self._tracer = get_tracer("agentops.instrumentation.agentic.langgraph", "0.1.0", tracer_provider)
+
+ # Initialize context variable for tracking graph executions
+ import contextvars
+
+ self._current_graph_execution = contextvars.ContextVar("current_graph_execution", default=None)
+
+ wrap_function_wrapper("langgraph.graph.state", "StateGraph.__init__", self._wrap_state_graph_init)
+
+ wrap_function_wrapper("langgraph.graph.state", "StateGraph.compile", self._wrap_state_graph_compile)
+
+ wrap_function_wrapper("langgraph.pregel", "Pregel.invoke", self._wrap_invoke)
+
+ wrap_function_wrapper("langgraph.pregel", "Pregel.stream", self._wrap_stream)
+
+ wrap_function_wrapper("langgraph.graph.state", "StateGraph.add_node", self._wrap_add_node)
+
+ def _uninstrument(self, **kwargs):
+ unwrap("langgraph.graph.state", "StateGraph.__init__")
+ unwrap("langgraph.graph.state", "StateGraph.compile")
+ unwrap("langgraph.pregel", "Pregel.invoke")
+ unwrap("langgraph.pregel", "Pregel.stream")
+ unwrap("langgraph.graph.state", "StateGraph.add_node")
+
+ def _wrap_state_graph_init(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
+ if not self._tracer:
+ return wrapped(*args, **kwargs)
+
+ with self._tracer.start_as_current_span("langgraph.graph.initialize", kind=SpanKind.INTERNAL) as span:
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
+ WorkflowAttributes.WORKFLOW_TYPE: "graph_initialization",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: "Graph Initialization",
+ }
+ )
+ )
+
+ try:
+ result = wrapped(*args, **kwargs)
+
+ instance._langgraph_instrumented = True
+ instance._langgraph_nodes = []
+ instance._langgraph_edges = []
+
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+
+ def _wrap_state_graph_compile(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
+ if not self._tracer:
+ return wrapped(*args, **kwargs)
+
+ with self._tracer.start_as_current_span("langgraph.graph.compile", kind=SpanKind.INTERNAL) as span:
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
+ WorkflowAttributes.WORKFLOW_TYPE: "graph_compilation",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: "Graph Compilation",
+ SpanAttributes.LLM_SYSTEM: "langgraph",
+ }
+ )
+ )
+
+ try:
+ result = wrapped(*args, **kwargs)
+
+ nodes = []
+ edges = []
+
+ if hasattr(instance, "nodes"):
+ nodes = list(instance.nodes.keys()) if hasattr(instance.nodes, "keys") else []
+
+ if hasattr(instance, "edges") and hasattr(instance.edges, "items"):
+ for source, targets in instance.edges.items():
+ if isinstance(targets, dict):
+ for target in targets.values():
+ edges.append(f"{source}->{target}")
+ elif isinstance(targets, list):
+ for target in targets:
+ edges.append(f"{source}->{target}")
+
+ set_graph_attributes(span, nodes, edges)
+
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+
+ def _wrap_invoke(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
+ if not self._tracer:
+ return wrapped(*args, **kwargs)
+
+ current_span = trace.get_current_span()
+ if current_span and current_span.name == "langgraph.workflow.execute":
+ return wrapped(*args, **kwargs)
+
+ with self._tracer.start_as_current_span("langgraph.workflow.execute", kind=SpanKind.INTERNAL) as span:
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
+ WorkflowAttributes.WORKFLOW_TYPE: "langgraph_invoke",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: "Workflow Execution",
+ SpanAttributes.LLM_REQUEST_STREAMING: False,
+ "langgraph.execution.mode": "invoke",
+ }
+ )
+ )
+
+ execution_state = {"executed_nodes": [], "message_count": 0, "final_response": None}
+
+ # Set the current execution state in context
+ token = self._current_graph_execution.set(execution_state)
+
+ try:
+ input_data = args[0] if args else kwargs.get("input", {})
+ messages = extract_messages_from_input(input_data)
+ if messages:
+ execution_state["message_count"] = len(messages)
+ for i, msg in enumerate(messages[:3]):
+ content = get_message_content(msg)
+ role = get_message_role(msg)
+ if content:
+ span.set_attribute(f"gen_ai.prompt.{i}.content", content[:500])
+ span.set_attribute(f"gen_ai.prompt.{i}.role", role)
+
+ result = wrapped(*args, **kwargs)
+
+ # Extract execution information from result
+ if isinstance(result, dict):
+ # Check for messages in result
+ if "messages" in result:
+ output_messages = result["messages"]
+ if isinstance(output_messages, list):
+ # Count all messages in the result
+ total_messages = len([msg for msg in output_messages if hasattr(msg, "content")])
+ execution_state["message_count"] = total_messages
+
+ if output_messages:
+ # Find the last non-tool message
+ for msg in reversed(output_messages):
+ if hasattr(msg, "content") and not hasattr(msg, "tool_call_id"):
+ content = get_message_content(msg)
+ if content:
+ execution_state["final_response"] = content
+ span.set_attribute("gen_ai.response.0.content", content[:500])
+ break
+
+ # Capture final execution state before returning
+ final_executed_nodes = list(execution_state["executed_nodes"]) # Copy the list
+ final_node_count = len(final_executed_nodes)
+ final_message_count = execution_state["message_count"]
+ final_response = execution_state["final_response"]
+
+ span.set_status(Status(StatusCode.OK))
+
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ "langgraph.graph.executed_nodes": json.dumps(final_executed_nodes),
+ "langgraph.graph.node_execution_count": final_node_count,
+ "langgraph.graph.message_count": final_message_count,
+ "langgraph.graph.final_response": final_response,
+ "langgraph.graph.status": "success",
+ }
+ )
+ )
+
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+ finally:
+ # Reset the context
+ self._current_graph_execution.reset(token)
+
+ def _wrap_stream(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
+ if not self._tracer:
+ return wrapped(*args, **kwargs)
+
+ current_span = trace.get_current_span()
+ if current_span and current_span.name == "langgraph.workflow.execute":
+ return wrapped(*args, **kwargs)
+
+ span = self._tracer.start_span("langgraph.workflow.execute", kind=SpanKind.INTERNAL)
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
+ WorkflowAttributes.WORKFLOW_TYPE: "langgraph_stream",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: "Workflow Stream",
+ SpanAttributes.LLM_REQUEST_STREAMING: True,
+ "langgraph.execution.mode": "stream",
+ }
+ )
+ )
+
+ execution_state = {"executed_nodes": [], "message_count": 0, "chunk_count": 0, "final_response": None}
+
+ # Set the current execution state in context
+ token = self._current_graph_execution.set(execution_state)
+
+ try:
+ # Extract input messages
+ input_data = args[0] if args else kwargs.get("input", {})
+ messages = extract_messages_from_input(input_data)
+ if messages:
+ execution_state["message_count"] = len(messages)
+ for i, msg in enumerate(messages[:3]):
+ content = get_message_content(msg)
+ role = get_message_role(msg)
+ if content:
+ span.set_attribute(f"gen_ai.prompt.{i}.content", content[:500])
+ span.set_attribute(f"gen_ai.prompt.{i}.role", role)
+
+ stream_gen = wrapped(*args, **kwargs)
+
+ def stream_wrapper():
+ try:
+ for chunk in stream_gen:
+ execution_state["chunk_count"] += 1
+
+ if isinstance(chunk, dict):
+ # Debug: print chunk structure
+ # print(f"DEBUG: Chunk keys: {list(chunk.keys())}")
+
+ for key in chunk:
+ # Track node executions (excluding special keys)
+ if (
+ key not in ["__start__", "__end__", "__interrupt__"]
+ and key not in execution_state["executed_nodes"]
+ ):
+ execution_state["executed_nodes"].append(key)
+
+ # Track messages in the chunk value
+ chunk_value = chunk[key]
+ if isinstance(chunk_value, dict):
+ # Check for messages in the chunk value
+ if "messages" in chunk_value:
+ msg_list = chunk_value["messages"]
+ if isinstance(msg_list, list):
+ execution_state["message_count"] += len(msg_list)
+ for msg in msg_list:
+ content = get_message_content(msg)
+ if content:
+ execution_state["final_response"] = content
+ elif key == "messages" and isinstance(chunk_value, list):
+ # Sometimes messages might be directly in the chunk
+ execution_state["message_count"] += len(chunk_value)
+ for msg in chunk_value:
+ content = get_message_content(msg)
+ if content:
+ execution_state["final_response"] = content
+
+ yield chunk
+
+ # Capture final execution state before ending
+ final_executed_nodes = list(execution_state["executed_nodes"])
+ final_node_count = len(final_executed_nodes)
+ final_message_count = execution_state["message_count"]
+ final_chunk_count = execution_state["chunk_count"]
+ final_response = execution_state["final_response"]
+
+ span.set_status(Status(StatusCode.OK))
+
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ "langgraph.graph.executed_nodes": json.dumps(final_executed_nodes),
+ "langgraph.graph.node_execution_count": final_node_count,
+ "langgraph.graph.message_count": final_message_count,
+ "langgraph.graph.total_chunks": final_chunk_count,
+ "langgraph.graph.final_response": final_response,
+ "langgraph.graph.status": "success",
+ }
+ )
+ )
+
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+ finally:
+ span.end()
+
+ return stream_wrapper()
+
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ span.end()
+ raise
+ finally:
+ # Reset the context
+ self._current_graph_execution.reset(token)
+
+ def _wrap_add_node(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
+ if not self._tracer:
+ return wrapped(*args, **kwargs)
+
+ # Get node name and function
+ if args:
+ key = args[0]
+ action = args[1] if len(args) > 1 else kwargs.get("action")
+ else:
+ key = kwargs.get("key")
+ action = kwargs.get("action")
+
+ if not action:
+ return wrapped(*args, **kwargs)
+
+ # Create wrapped node function that instruments LLM calls
+ def create_wrapped_node(original_func):
+ if inspect.iscoroutinefunction(original_func):
+
+ @wraps(original_func)
+ async def wrapped_node_async(state):
+ # Track node execution in parent graph span
+ self._track_node_execution(key)
+
+ # Check if this node contains an LLM call
+ is_llm_node = self._detect_llm_node(original_func)
+
+ if is_llm_node:
+ with self._tracer.start_as_current_span("langgraph.node.execute", kind=SpanKind.CLIENT) as span:
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "llm",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: f"Node: {key}",
+ SpanAttributes.LLM_SYSTEM: "langgraph",
+ "langgraph.node.name": key,
+ }
+ )
+ )
+
+ try:
+ # Call the original function
+ result = await original_func(state)
+
+ # Extract LLM information from the result
+ self._extract_llm_info_from_result(span, state, result)
+
+ span.set_status(Status(StatusCode.OK))
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+ else:
+ # Non-LLM node, just execute normally
+ return await original_func(state)
+ else:
+
+ @wraps(original_func)
+ def wrapped_node_sync(state):
+ # Track node execution in parent graph span
+ self._track_node_execution(key)
+
+ # Check if this node contains an LLM call
+ is_llm_node = self._detect_llm_node(original_func)
+
+ if is_llm_node:
+ with self._tracer.start_as_current_span("langgraph.node.execute", kind=SpanKind.CLIENT) as span:
+ span.set_attributes(
+ ensure_no_none_values(
+ {
+ SpanAttributes.AGENTOPS_SPAN_KIND: "llm",
+ SpanAttributes.AGENTOPS_ENTITY_NAME: f"Node: {key}",
+ SpanAttributes.LLM_SYSTEM: "langgraph",
+ }
+ )
+ )
+
+ try:
+ # Call the original function
+ result = original_func(state)
+
+ # Extract LLM information from the result
+ self._extract_llm_info_from_result(span, state, result)
+
+ span.set_status(Status(StatusCode.OK))
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+ else:
+ # Non-LLM node, just execute normally
+ return original_func(state)
+
+ return wrapped_node_sync
+
+ return wrapped_node_async if inspect.iscoroutinefunction(original_func) else wrapped_node_sync
+
+ # Wrap the action function
+ wrapped_action = create_wrapped_node(action)
+
+ # Call the original add_node with the wrapped action
+ if args and len(args) > 1:
+ new_args = (args[0], wrapped_action) + args[2:]
+ return wrapped(*new_args, **kwargs)
+ else:
+ kwargs["action"] = wrapped_action
+ return wrapped(*args, **kwargs)
+
+ def _track_node_execution(self, node_name: str) -> None:
+ """Track node execution in the active graph span."""
+ # Use context variable to track the current execution
+ if hasattr(self, "_current_graph_execution"):
+ execution_state = self._current_graph_execution.get()
+ if execution_state and node_name not in execution_state["executed_nodes"]:
+ execution_state["executed_nodes"].append(node_name)
+
+ def _detect_llm_node(self, func: Callable) -> bool:
+ """Detect if a node function contains LLM calls."""
+ try:
+ # Get the source code of the function
+ source = inspect.getsource(func)
+
+ # Check for common LLM patterns
+ llm_patterns = [
+ "ChatOpenAI",
+ "ChatAnthropic",
+ "ChatGoogleGenerativeAI",
+ ".invoke(",
+ ".ainvoke(",
+ ".stream(",
+ ".astream(",
+ "llm.",
+ "model.",
+ "chat.",
+ ]
+
+ for pattern in llm_patterns:
+ if pattern in source:
+ return True
+
+ # Check if function has 'llm' or 'model' in its local variables
+ if hasattr(func, "__code__"):
+ local_vars = func.__code__.co_varnames
+ if any(var in ["llm", "model", "chat"] for var in local_vars):
+ return True
+
+ except Exception:
+ # If we can't inspect the source, assume it might be an LLM node
+ pass
+
+ return False
+
+ def _extract_llm_info_from_result(self, span: Any, state: Dict, result: Any) -> None:
+ """Extract LLM information from the node execution result."""
+ try:
+ # Extract messages from state
+ if isinstance(state, dict) and "messages" in state:
+ messages = state["messages"]
+ # Set input messages
+ for i, msg in enumerate(messages[-5:]): # Last 5 messages as context
+ if hasattr(msg, "content"):
+ span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), str(msg.content)[:1000])
+ if hasattr(msg, "role"):
+ span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), msg.role)
+ elif hasattr(msg, "type"):
+ span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), msg.type)
+
+ # Extract messages from result
+ if isinstance(result, dict) and "messages" in result:
+ output_messages = result["messages"]
+ if output_messages:
+ last_msg = output_messages[-1] if isinstance(output_messages, list) else output_messages
+
+ # Extract model information from message if available
+ if hasattr(last_msg, "response_metadata"):
+ metadata = last_msg.response_metadata
+ if isinstance(metadata, dict):
+ if "model_name" in metadata:
+ span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, metadata["model_name"])
+ span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, metadata["model_name"])
+ elif "model" in metadata:
+ span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, metadata["model"])
+ span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, metadata["model"])
+
+ # Token usage
+ if "token_usage" in metadata:
+ usage = metadata["token_usage"]
+ if isinstance(usage, dict):
+ if "prompt_tokens" in usage:
+ span.set_attribute(
+ SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage["prompt_tokens"]
+ )
+ if "completion_tokens" in usage:
+ span.set_attribute(
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage["completion_tokens"]
+ )
+ if "total_tokens" in usage:
+ span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage["total_tokens"])
+
+ # Response ID
+ if "id" in metadata and metadata["id"] is not None:
+ span.set_attribute(SpanAttributes.LLM_RESPONSE_ID, metadata["id"])
+
+ # Finish reason
+ if "finish_reason" in metadata:
+ span.set_attribute(
+ MessageAttributes.COMPLETION_FINISH_REASON.format(i=0), metadata["finish_reason"]
+ )
+
+ # Content
+ if hasattr(last_msg, "content"):
+ span.set_attribute(
+ MessageAttributes.COMPLETION_CONTENT.format(i=0), str(last_msg.content)[:1000]
+ )
+ if hasattr(last_msg, "role"):
+ span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), last_msg.role)
+
+ # Check for tool calls
+ if hasattr(last_msg, "tool_calls") and last_msg.tool_calls:
+ for j, tool_call in enumerate(last_msg.tool_calls[:5]):
+ if hasattr(tool_call, "name"):
+ span.set_attribute(
+ MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j), tool_call.name
+ )
+ if hasattr(tool_call, "args"):
+ span.set_attribute(
+ MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j),
+ json.dumps(tool_call.args)[:500],
+ )
+ if hasattr(tool_call, "id"):
+ span.set_attribute(
+ MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j), tool_call.id
+ )
+
+ # Additional attributes from message
+ if hasattr(last_msg, "id") and last_msg.id is not None:
+ span.set_attribute(SpanAttributes.LLM_RESPONSE_ID, last_msg.id)
+
+ # Usage information might be on the message itself
+ if hasattr(last_msg, "usage_metadata"):
+ usage = last_msg.usage_metadata
+ if hasattr(usage, "input_tokens"):
+ span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.input_tokens)
+ if hasattr(usage, "output_tokens"):
+ span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.output_tokens)
+ if hasattr(usage, "total_tokens"):
+ span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens)
+ except Exception:
+ # Don't fail the span if we can't extract info
+ pass
diff --git a/agentops/instrumentation/agentic/langgraph/version.py b/agentops/instrumentation/agentic/langgraph/version.py
new file mode 100644
index 000000000..3dc1f76bc
--- /dev/null
+++ b/agentops/instrumentation/agentic/langgraph/version.py
@@ -0,0 +1 @@
+__version__ = "0.1.0"
diff --git a/docs/mint.json b/docs/mint.json
index 9d0bfe746..53fb02bed 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -160,6 +160,7 @@
"v2/integrations/google_adk",
"v2/integrations/google_generative_ai",
"v2/integrations/langchain",
+ "v2/integrations/langgraph",
"v2/integrations/llamaindex",
"v2/integrations/litellm",
"v2/integrations/mem0",
diff --git a/docs/v2/examples/langgraph.mdx b/docs/v2/examples/langgraph.mdx
new file mode 100644
index 000000000..4d2fae20d
--- /dev/null
+++ b/docs/v2/examples/langgraph.mdx
@@ -0,0 +1,254 @@
+---
+title: 'Langgraph'
+description: 'LangGraph Integration with AgentOps'
+---
+{/* SOURCE_FILE: examples/langgraph/langgraph_example.ipynb */}
+
+_View Notebook on Github_
+
+# LangGraph Integration with AgentOps
+
+This example demonstrates how to use LangGraph with AgentOps for comprehensive observability of your graph-based agent workflows.
+
+LangGraph is a framework for building stateful, multi-step applications with LLMs. AgentOps automatically instruments LangGraph to track:
+- Graph compilation and structure
+- Node executions and transitions
+- Tool usage within the graph
+- LLM calls made by agents
+- Complete execution flow with timing
+
+
+
+## Installation
+
+ ```bash pip
+ pip install agentops langchain-openai langgraph python-dotenv
+ ```
+ ```bash poetry
+ poetry add agentops langchain-openai langgraph python-dotenv
+ ```
+ ```bash uv
+ uv add agentops langchain-openai langgraph python-dotenv
+ ```
+
+
+## Setup
+
+First, let's import the necessary libraries and initialize AgentOps:
+
+
+```python
+import os
+from typing import Annotated, Literal, TypedDict
+from langgraph.graph import StateGraph, END
+from langgraph.graph.message import add_messages
+from langchain_openai import ChatOpenAI
+from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
+from langchain_core.tools import tool
+import agentops
+from dotenv import load_dotenv
+
+# Load environment variables
+load_dotenv()
+
+# Initialize AgentOps - this enables automatic instrumentation
+agentops.init(os.getenv("AGENTOPS_API_KEY"), auto_start_session=False)
+trace = agentops.start_trace("langgraph_example")
+```
+
+## Define Tools
+
+Let's create some simple tools that our agent can use:
+
+
+```python
+@tool
+def get_weather(location: str) -> str:
+ """Get the weather for a given location."""
+ # Simulated weather data
+ weather_data = {
+ "New York": "Sunny, 72°F",
+ "London": "Cloudy, 60°F",
+ "Tokyo": "Rainy, 65°F",
+ "Paris": "Partly cloudy, 68°F",
+ "Sydney": "Clear, 75°F"
+ }
+ return weather_data.get(location, f"Weather data not available for {location}")
+
+@tool
+def calculate(expression: str) -> str:
+ """Evaluate a mathematical expression."""
+ try:
+ result = eval(expression)
+ return f"The result is: {result}"
+ except Exception as e:
+ return f"Error calculating expression: {str(e)}"
+
+# Collect tools for binding to the model
+tools = [get_weather, calculate]
+```
+
+## Define Agent State
+
+In LangGraph, we need to define the state that will be passed between nodes:
+
+
+```python
+class AgentState(TypedDict):
+ messages: Annotated[list, add_messages]
+```
+
+## Create the Model and Node Functions
+
+We'll create a model with tool binding and define the functions that will be our graph nodes:
+
+
+```python
+# Create model with tool binding
+model = ChatOpenAI(temperature=0, model="gpt-4o-mini").bind_tools(tools)
+
+def should_continue(state: AgentState) -> Literal["tools", "end"]:
+ """Determine if we should continue to tools or end."""
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ # If the LLM wants to use tools, continue to the tools node
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls:
+ return "tools"
+ # Otherwise, we're done
+ return "end"
+
+def call_model(state: AgentState):
+ """Call the language model."""
+ messages = state["messages"]
+ response = model.invoke(messages)
+ return {"messages": [response]}
+
+def call_tools(state: AgentState):
+ """Execute the tool calls requested by the model."""
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ tool_messages = []
+ for tool_call in last_message.tool_calls:
+ tool_name = tool_call["name"]
+ tool_args = tool_call["args"]
+
+ # Find and execute the requested tool
+ for tool in tools:
+ if tool.name == tool_name:
+ result = tool.invoke(tool_args)
+ tool_messages.append(
+ ToolMessage(
+ content=str(result),
+ tool_call_id=tool_call["id"]
+ )
+ )
+ break
+
+ return {"messages": tool_messages}
+```
+
+## Build the Graph
+
+Now let's construct the LangGraph workflow:
+
+
+```python
+# Create the graph
+workflow = StateGraph(AgentState)
+
+# Add nodes
+workflow.add_node("agent", call_model)
+workflow.add_node("tools", call_tools)
+
+# Set the entry point
+workflow.set_entry_point("agent")
+
+# Add conditional edges
+workflow.add_conditional_edges(
+ "agent",
+ should_continue,
+ {
+ "tools": "tools",
+ "end": END
+ }
+)
+
+# Add edge from tools back to agent
+workflow.add_edge("tools", "agent")
+
+# Compile the graph
+app = workflow.compile()
+```
+
+## Run Examples
+
+Let's test our agent with different queries that require tool usage:
+
+
+```python
+# Example 1: Weather query
+print("Example 1: Weather Query")
+print("=" * 50)
+
+messages = [HumanMessage(content="What's the weather in New York and Tokyo?")]
+result = app.invoke({"messages": messages})
+
+final_message = result["messages"][-1]
+print(f"Response: {final_message.content}\n")
+```
+
+
+```python
+# Example 2: Math calculation
+print("Example 2: Math Calculation")
+print("=" * 50)
+
+messages = [HumanMessage(content="Calculate 25 * 4 + 10")]
+result = app.invoke({"messages": messages})
+
+final_message = result["messages"][-1]
+print(f"Response: {final_message.content}\n")
+```
+
+
+```python
+# Example 3: Combined query
+print("Example 3: Combined Query")
+print("=" * 50)
+
+messages = [HumanMessage(content="What's the weather in Paris? Also calculate 100/5")]
+result = app.invoke({"messages": messages})
+
+final_message = result["messages"][-1]
+print(f"Response: {final_message.content}\n")
+```
+
+## View in AgentOps Dashboard
+
+After running this notebook, you can view the traces in your AgentOps dashboard. You'll see:
+
+1. **Graph Compilation**: The structure of your LangGraph with nodes and edges
+2. **Execution Flow**: How the graph executed, including:
+ - Agent node calls
+ - Tool node executions
+ - State transitions
+3. **LLM Calls**: Each ChatGPT call with prompts and completions
+4. **Tool Usage**: Which tools were called and their results
+5. **Timing Information**: How long each step took
+
+The instrumentation captures the full context of your LangGraph application automatically!
+
+
+```python
+print("✅ Check your AgentOps dashboard for comprehensive traces!")
+print("🔍 You'll see the graph structure, execution flow, and all LLM/tool calls.")
+agentops.end_trace(trace)
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v2/integrations/langgraph.mdx b/docs/v2/integrations/langgraph.mdx
new file mode 100644
index 000000000..5cf86f143
--- /dev/null
+++ b/docs/v2/integrations/langgraph.mdx
@@ -0,0 +1,237 @@
+---
+title: LangGraph
+description: "Track and analyze your LangGraph workflows with AgentOps"
+---
+
+[LangGraph](https://github.com/langchain-ai/langgraph) is a framework for building stateful, multi-step applications with LLMs as graphs. AgentOps automatically instruments LangGraph to provide comprehensive observability into your graph-based agent workflows.
+
+## Core Concepts
+
+LangGraph enables you to build complex agentic workflows as graphs with:
+- **Nodes**: Individual steps in your workflow (agents, tools, functions)
+- **Edges**: Connections between nodes that define flow
+- **State**: Shared data that flows through the graph
+- **Conditional Edges**: Dynamic routing based on state or outputs
+- **Cycles**: Support for iterative workflows and feedback loops
+
+## Installation
+
+Install AgentOps and LangGraph along with LangChain dependencies:
+
+
+ ```bash pip
+ pip install agentops langgraph langchain-openai python-dotenv
+ ```
+ ```bash poetry
+ poetry add agentops langgraph langchain-openai python-dotenv
+ ```
+ ```bash uv
+ uv add agentops langgraph langchain-openai python-dotenv
+ ```
+
+
+## Setting Up API Keys
+
+You'll need API keys for AgentOps and your LLM provider:
+- **OPENAI_API_KEY**: From the [OpenAI Platform](https://platform.openai.com/api-keys)
+- **AGENTOPS_API_KEY**: From your [AgentOps Dashboard](https://app.agentops.ai/)
+
+Set these as environment variables or in a `.env` file.
+
+
+ ```bash Export to CLI
+ export OPENAI_API_KEY="your_openai_api_key_here"
+ export AGENTOPS_API_KEY="your_agentops_api_key_here"
+ ```
+ ```txt Set in .env file
+ OPENAI_API_KEY="your_openai_api_key_here"
+ AGENTOPS_API_KEY="your_agentops_api_key_here"
+ ```
+
+
+Then load them in your Python code:
+```python
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+
+AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY")
+OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
+```
+
+## Usage
+
+Initialize AgentOps at the beginning of your application to automatically track all LangGraph operations:
+
+```python
+import agentops
+from typing import Annotated, Literal, TypedDict
+from langgraph.graph import StateGraph, END
+from langgraph.graph.message import add_messages
+from langchain_openai import ChatOpenAI
+
+# Initialize AgentOps
+agentops.init()
+
+# Define your graph state
+class AgentState(TypedDict):
+ messages: Annotated[list, add_messages]
+
+# Create your LLM
+model = ChatOpenAI(temperature=0)
+
+# Define nodes
+def agent_node(state: AgentState):
+ messages = state["messages"]
+ response = model.invoke(messages)
+ return {"messages": [response]}
+
+# Build the graph
+workflow = StateGraph(AgentState)
+workflow.add_node("agent", agent_node)
+workflow.set_entry_point("agent")
+workflow.add_edge("agent", END)
+
+# Compile and run
+app = workflow.compile()
+result = app.invoke({"messages": [{"role": "user", "content": "Hello!"}]})
+```
+
+## What Gets Tracked
+
+AgentOps automatically captures:
+
+- **Graph Structure**: Nodes, edges, and entry points during compilation
+- **Execution Flow**: The path taken through your graph
+- **Node Executions**: Each node execution with inputs and outputs
+- **LLM Calls**: All language model interactions within nodes
+- **Tool Usage**: Any tools called within your graph
+- **State Changes**: How state evolves through the workflow
+- **Timing Information**: Duration of each node and total execution time
+
+## Advanced Example
+
+Here's a more complex example with conditional routing and tools:
+
+```python
+import agentops
+from typing import Annotated, Literal, TypedDict
+from langgraph.graph import StateGraph, END
+from langgraph.graph.message import add_messages
+from langchain_openai import ChatOpenAI
+from langchain_core.tools import tool
+
+# Initialize AgentOps
+agentops.init()
+
+# Define tools
+@tool
+def search(query: str) -> str:
+ """Search for information."""
+ return f"Search results for: {query}"
+
+@tool
+def calculate(expression: str) -> str:
+ """Evaluate a mathematical expression."""
+ try:
+ return str(eval(expression))
+ except:
+ return "Error in calculation"
+
+# Configure model with tools
+tools = [search, calculate]
+model = ChatOpenAI(temperature=0).bind_tools(tools)
+
+# Define state
+class AgentState(TypedDict):
+ messages: Annotated[list, add_messages]
+
+# Define conditional logic
+def should_continue(state: AgentState) -> Literal["tools", "end"]:
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls:
+ return "tools"
+ return "end"
+
+# Define nodes
+def call_model(state: AgentState):
+ messages = state["messages"]
+ response = model.invoke(messages)
+ return {"messages": [response]}
+
+def call_tools(state: AgentState):
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ tool_responses = []
+ for tool_call in last_message.tool_calls:
+ # Execute the appropriate tool
+ if tool_call["name"] == "search":
+ result = search.invoke(tool_call["args"])
+ elif tool_call["name"] == "calculate":
+ result = calculate.invoke(tool_call["args"])
+
+ tool_responses.append({
+ "role": "tool",
+ "content": result,
+ "tool_call_id": tool_call["id"]
+ })
+
+ return {"messages": tool_responses}
+
+# Build the graph
+workflow = StateGraph(AgentState)
+workflow.add_node("agent", call_model)
+workflow.add_node("tools", call_tools)
+workflow.set_entry_point("agent")
+workflow.add_conditional_edges(
+ "agent",
+ should_continue,
+ {
+ "tools": "tools",
+ "end": END
+ }
+)
+workflow.add_edge("tools", "agent")
+
+# Compile and run
+app = workflow.compile()
+result = app.invoke({
+ "messages": [{"role": "user", "content": "Search for AI news and calculate 25*4"}]
+})
+```
+
+## Dashboard Insights
+
+In your AgentOps dashboard, you'll see:
+
+1. **Graph Visualization**: Visual representation of your compiled graph
+2. **Execution Trace**: Step-by-step flow through nodes
+3. **Node Metrics**: Performance data for each node
+4. **LLM Analytics**: Token usage and costs across all model calls
+5. **Tool Usage**: Which tools were called and their results
+6. **Error Tracking**: Any failures in node execution
+
+## Examples
+
+
+
+ Complete example showing agent workflows with tools
+
+
+
+## Best Practices
+
+1. **Initialize Early**: Call `agentops.init()` before creating your graph
+2. **Use Descriptive Names**: Name your nodes clearly for better traces
+3. **Handle Errors**: Implement error handling in your nodes
+4. **Monitor State Size**: Large states can impact performance
+5. **Leverage Conditional Edges**: Use them for dynamic workflows
+
+
+
+
+
\ No newline at end of file
diff --git a/examples/langgraph/langgraph_example.ipynb b/examples/langgraph/langgraph_example.ipynb
new file mode 100644
index 000000000..cd21fe5a2
--- /dev/null
+++ b/examples/langgraph/langgraph_example.ipynb
@@ -0,0 +1,336 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# LangGraph Integration with AgentOps\n",
+ "\n",
+ "This example demonstrates how to use LangGraph with AgentOps for comprehensive observability of your graph-based agent workflows.\n",
+ "\n",
+ "LangGraph is a framework for building stateful, multi-step applications with LLMs. AgentOps automatically instruments LangGraph to track:\n",
+ "- Graph compilation and structure\n",
+ "- Node executions and transitions\n",
+ "- Tool usage within the graph\n",
+ "- LLM calls made by agents\n",
+ "- Complete execution flow with timing"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install agentops langgraph langchain-openai python-dotenv"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setup\n",
+ "\n",
+ "First, let's import the necessary libraries and initialize AgentOps:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from typing import Annotated, Literal, TypedDict\n",
+ "from langgraph.graph import StateGraph, END\n",
+ "from langgraph.graph.message import add_messages\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain_core.messages import HumanMessage, AIMessage, ToolMessage\n",
+ "from langchain_core.tools import tool\n",
+ "import agentops\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "# Load environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Initialize AgentOps - this enables automatic instrumentation\n",
+ "agentops.init(os.getenv(\"AGENTOPS_API_KEY\"), auto_start_session=False)\n",
+ "trace = agentops.start_trace(\"langgraph_example\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Define Tools\n",
+ "\n",
+ "Let's create some simple tools that our agent can use:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@tool\n",
+ "def get_weather(location: str) -> str:\n",
+ " \"\"\"Get the weather for a given location.\"\"\"\n",
+ " # Simulated weather data\n",
+ " weather_data = {\n",
+ " \"New York\": \"Sunny, 72°F\",\n",
+ " \"London\": \"Cloudy, 60°F\", \n",
+ " \"Tokyo\": \"Rainy, 65°F\",\n",
+ " \"Paris\": \"Partly cloudy, 68°F\",\n",
+ " \"Sydney\": \"Clear, 75°F\"\n",
+ " }\n",
+ " return weather_data.get(location, f\"Weather data not available for {location}\")\n",
+ "\n",
+ "@tool\n",
+ "def calculate(expression: str) -> str:\n",
+ " \"\"\"Evaluate a mathematical expression.\"\"\"\n",
+ " try:\n",
+ " result = eval(expression)\n",
+ " return f\"The result is: {result}\"\n",
+ " except Exception as e:\n",
+ " return f\"Error calculating expression: {str(e)}\"\n",
+ "\n",
+ "# Collect tools for binding to the model\n",
+ "tools = [get_weather, calculate]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Define Agent State\n",
+ "\n",
+ "In LangGraph, we need to define the state that will be passed between nodes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class AgentState(TypedDict):\n",
+ " messages: Annotated[list, add_messages]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create the Model and Node Functions\n",
+ "\n",
+ "We'll create a model with tool binding and define the functions that will be our graph nodes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create model with tool binding\n",
+ "model = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").bind_tools(tools)\n",
+ "\n",
+ "def should_continue(state: AgentState) -> Literal[\"tools\", \"end\"]:\n",
+ " \"\"\"Determine if we should continue to tools or end.\"\"\"\n",
+ " messages = state[\"messages\"]\n",
+ " last_message = messages[-1]\n",
+ " \n",
+ " # If the LLM wants to use tools, continue to the tools node\n",
+ " if hasattr(last_message, \"tool_calls\") and last_message.tool_calls:\n",
+ " return \"tools\"\n",
+ " # Otherwise, we're done\n",
+ " return \"end\"\n",
+ "\n",
+ "def call_model(state: AgentState):\n",
+ " \"\"\"Call the language model.\"\"\"\n",
+ " messages = state[\"messages\"]\n",
+ " response = model.invoke(messages)\n",
+ " return {\"messages\": [response]}\n",
+ "\n",
+ "def call_tools(state: AgentState):\n",
+ " \"\"\"Execute the tool calls requested by the model.\"\"\"\n",
+ " messages = state[\"messages\"]\n",
+ " last_message = messages[-1]\n",
+ " \n",
+ " tool_messages = []\n",
+ " for tool_call in last_message.tool_calls:\n",
+ " tool_name = tool_call[\"name\"]\n",
+ " tool_args = tool_call[\"args\"]\n",
+ " \n",
+ " # Find and execute the requested tool\n",
+ " for tool in tools:\n",
+ " if tool.name == tool_name:\n",
+ " result = tool.invoke(tool_args)\n",
+ " tool_messages.append(\n",
+ " ToolMessage(\n",
+ " content=str(result),\n",
+ " tool_call_id=tool_call[\"id\"]\n",
+ " )\n",
+ " )\n",
+ " break\n",
+ " \n",
+ " return {\"messages\": tool_messages}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Build the Graph\n",
+ "\n",
+ "Now let's construct the LangGraph workflow:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create the graph\n",
+ "workflow = StateGraph(AgentState)\n",
+ "\n",
+ "# Add nodes\n",
+ "workflow.add_node(\"agent\", call_model)\n",
+ "workflow.add_node(\"tools\", call_tools)\n",
+ "\n",
+ "# Set the entry point\n",
+ "workflow.set_entry_point(\"agent\")\n",
+ "\n",
+ "# Add conditional edges\n",
+ "workflow.add_conditional_edges(\n",
+ " \"agent\",\n",
+ " should_continue,\n",
+ " {\n",
+ " \"tools\": \"tools\",\n",
+ " \"end\": END\n",
+ " }\n",
+ ")\n",
+ "\n",
+ "# Add edge from tools back to agent\n",
+ "workflow.add_edge(\"tools\", \"agent\")\n",
+ "\n",
+ "# Compile the graph\n",
+ "app = workflow.compile()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Run Examples\n",
+ "\n",
+ "Let's test our agent with different queries that require tool usage:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Example 1: Weather query\n",
+ "print(\"Example 1: Weather Query\")\n",
+ "print(\"=\" * 50)\n",
+ "\n",
+ "messages = [HumanMessage(content=\"What's the weather in New York and Tokyo?\")]\n",
+ "result = app.invoke({\"messages\": messages})\n",
+ "\n",
+ "final_message = result[\"messages\"][-1]\n",
+ "print(f\"Response: {final_message.content}\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Example 2: Math calculation\n",
+ "print(\"Example 2: Math Calculation\")\n",
+ "print(\"=\" * 50)\n",
+ "\n",
+ "messages = [HumanMessage(content=\"Calculate 25 * 4 + 10\")]\n",
+ "result = app.invoke({\"messages\": messages})\n",
+ "\n",
+ "final_message = result[\"messages\"][-1]\n",
+ "print(f\"Response: {final_message.content}\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Example 3: Combined query\n",
+ "print(\"Example 3: Combined Query\")\n",
+ "print(\"=\" * 50)\n",
+ "\n",
+ "messages = [HumanMessage(content=\"What's the weather in Paris? Also calculate 100/5\")]\n",
+ "result = app.invoke({\"messages\": messages})\n",
+ "\n",
+ "final_message = result[\"messages\"][-1]\n",
+ "print(f\"Response: {final_message.content}\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## View in AgentOps Dashboard\n",
+ "\n",
+ "After running this notebook, you can view the traces in your AgentOps dashboard. You'll see:\n",
+ "\n",
+ "1. **Graph Compilation**: The structure of your LangGraph with nodes and edges\n",
+ "2. **Execution Flow**: How the graph executed, including:\n",
+ " - Agent node calls\n",
+ " - Tool node executions\n",
+ " - State transitions\n",
+ "3. **LLM Calls**: Each ChatGPT call with prompts and completions\n",
+ "4. **Tool Usage**: Which tools were called and their results\n",
+ "5. **Timing Information**: How long each step took\n",
+ "\n",
+ "The instrumentation captures the full context of your LangGraph application automatically!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"✅ Check your AgentOps dashboard for comprehensive traces!\")\n",
+ "print(\"🔍 You'll see the graph structure, execution flow, and all LLM/tool calls.\")\n",
+ "agentops.end_trace(trace)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/langgraph/langgraph_example.py b/examples/langgraph/langgraph_example.py
new file mode 100644
index 000000000..2a42b84f6
--- /dev/null
+++ b/examples/langgraph/langgraph_example.py
@@ -0,0 +1,118 @@
+import os
+from typing import Annotated, Literal, TypedDict
+from langgraph.graph import StateGraph, END
+from langgraph.graph.message import add_messages
+from langchain_openai import ChatOpenAI
+from langchain_core.messages import HumanMessage, ToolMessage
+from langchain_core.tools import tool
+import agentops
+from dotenv import load_dotenv
+
+load_dotenv()
+
+agentops.init(os.getenv("AGENTOPS_API_KEY"))
+
+
+@tool
+def get_weather(location: str) -> str:
+ """Get the weather for a given location."""
+ weather_data = {
+ "New York": "Sunny, 72°F",
+ "London": "Cloudy, 60°F",
+ "Tokyo": "Rainy, 65°F",
+ "Paris": "Partly cloudy, 68°F",
+ "Sydney": "Clear, 75°F",
+ }
+ return weather_data.get(location, f"Weather data not available for {location}")
+
+
+@tool
+def calculate(expression: str) -> str:
+ """Evaluate a mathematical expression."""
+ try:
+ result = eval(expression)
+ return f"The result is: {result}"
+ except Exception as e:
+ return f"Error calculating expression: {str(e)}"
+
+
+tools = [get_weather, calculate]
+
+
+class AgentState(TypedDict):
+ messages: Annotated[list, add_messages]
+
+
+model = ChatOpenAI(temperature=0, model="gpt-4o-mini").bind_tools(tools)
+
+
+def should_continue(state: AgentState) -> Literal["tools", "end"]:
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls:
+ return "tools"
+ return "end"
+
+
+def call_model(state: AgentState):
+ messages = state["messages"]
+ response = model.invoke(messages)
+ return {"messages": [response]}
+
+
+def call_tools(state: AgentState):
+ messages = state["messages"]
+ last_message = messages[-1]
+
+ tool_messages = []
+ for tool_call in last_message.tool_calls:
+ tool_name = tool_call["name"]
+ tool_args = tool_call["args"]
+
+ for tool_obj in tools:
+ if tool_obj.name == tool_name:
+ result = tool_obj.invoke(tool_args)
+ tool_messages.append(ToolMessage(content=str(result), tool_call_id=tool_call["id"]))
+ break
+
+ return {"messages": tool_messages}
+
+
+workflow = StateGraph(AgentState)
+
+workflow.add_node("agent", call_model)
+workflow.add_node("tools", call_tools)
+
+workflow.set_entry_point("agent")
+
+workflow.add_conditional_edges("agent", should_continue, {"tools": "tools", "end": END})
+
+workflow.add_edge("tools", "agent")
+
+app = workflow.compile()
+
+
+def run_example():
+ print("=== LangGraph + AgentOps Example ===\n")
+
+ queries = [
+ "What's the weather in New York and Tokyo?",
+ "Calculate 25 * 4 + 10",
+ "What's the weather in Paris? Also calculate 100/5",
+ ]
+
+ for query in queries:
+ print(f"Query: {query}")
+ print("-" * 40)
+
+ messages = [HumanMessage(content=query)]
+ result = app.invoke({"messages": messages})
+
+ final_message = result["messages"][-1]
+ print(f"Response: {final_message.content}\n")
+
+
+if __name__ == "__main__":
+ run_example()
+ print("✅ Check your AgentOps dashboard for the trace!")