@@ -104,7 +104,7 @@ Each tool generated by the SDK can be represented as a LangGraph node. The
104
104
node's functionality would encapsulate the execution of the corresponding tool.
105
105
106
106
``` python
107
- from toolbox_sdk import ToolboxClient
107
+ from toolbox_langchain_sdk import ToolboxClient
108
108
from langgraph.graph import StateGraph, MessagesState
109
109
from langgraph.prebuilt import ToolNode
110
110
@@ -116,7 +116,7 @@ def call_model(state: MessagesState):
116
116
return {" messages" : [response]}
117
117
118
118
model = ChatVertexAI()
119
- builder = StateGraph()
119
+ builder = StateGraph(MessagesState )
120
120
tool_node = ToolNode(tools)
121
121
122
122
builder.add_node(" agent" , call_model)
@@ -131,7 +131,9 @@ from a tool can then be fed back into the LLM for further processing or
131
131
decision-making.
132
132
133
133
``` python
134
+ from typing import Literal
134
135
from langgraph.graph import END , START
136
+ from langchain_core.messages import HumanMessage
135
137
136
138
# Define the function that determines whether to continue or not
137
139
def should_continue (state : MessagesState) -> Literal[" tools" , END ]:
@@ -151,6 +153,10 @@ builder.add_conditional_edges(
151
153
builder.add_edge(" tools" , ' agent' )
152
154
153
155
graph = builder.compile()
156
+
157
+ graph.invoke(
158
+ {" messages" : [HumanMessage(content = " Do something with the tools" )]},
159
+ )
154
160
```
155
161
156
162
## Manual usage
0 commit comments