Skip to content

Commit c836e50

Browse files
committed
Langchain examples for testing
and send with otlp and full chatbot demo --- So... turns out Langchain uses the v1beta1 prediction service client under the hood directly.. So we should probably instrument that after all instead of the main wrapper API. It also has a streaming option so we should try to support that as well, and it has ainvoke() for asyncio.
1 parent b2f6b32 commit c836e50

File tree

5 files changed

+1337
-1
lines changed

5 files changed

+1337
-1
lines changed
Lines changed: 259 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,259 @@
1+
# /// script
2+
# dependencies = [
3+
# "langchain-core",
4+
# "langchain-google-vertexai>=2.0.9",
5+
# "langgraph>0.2.27",
6+
# "opentelemetry-distro",
7+
# "opentelemetry-exporter-otlp-proto-grpc",
8+
# "opentelemetry-instrumentation-vertexai",
9+
# ]
10+
#
11+
# [tool.uv.sources]
12+
# opentelemetry-instrumentation-vertexai = { git = "https://github.com/aabmass/opentelemetry-python-contrib.git", subdirectory = "instrumentation-genai/opentelemetry-instrumentation-vertexai", branch = "vertex-langgraph" }
13+
#
14+
# ///
15+
16+
# https://python.langchain.com/docs/tutorials/chatbot
17+
18+
# Load auto instrumentation
19+
from opentelemetry.instrumentation.auto_instrumentation import sitecustomize # noqa: F401, I001
20+
21+
from typing import Sequence
22+
23+
from langchain_core.messages import (
24+
AIMessage,
25+
BaseMessage,
26+
HumanMessage,
27+
SystemMessage,
28+
trim_messages,
29+
)
30+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
31+
from langchain_google_vertexai import ChatVertexAI
32+
from langgraph.checkpoint.memory import MemorySaver
33+
from langgraph.graph import START, MessagesState, StateGraph
34+
from langgraph.graph.message import add_messages
35+
from typing_extensions import Annotated, TypedDict
36+
37+
from opentelemetry import trace
38+
39+
40+
def main() -> None:
41+
model = ChatVertexAI(model="gemini-1.5-flash")
42+
43+
# Define a new graph
44+
workflow = StateGraph(state_schema=MessagesState)
45+
46+
# Define the function that calls the model
47+
def call_model(state: MessagesState):
48+
response = model.invoke(state["messages"])
49+
return {"messages": response}
50+
51+
# Define the (single) node in the graph
52+
workflow.add_edge(START, "model")
53+
workflow.add_node("model", call_model)
54+
55+
# Add memory
56+
memory = MemorySaver()
57+
app = workflow.compile(checkpointer=memory)
58+
59+
config = {"configurable": {"thread_id": "abc123"}}
60+
61+
query = "Hi! I'm Bob."
62+
63+
input_messages = [HumanMessage(query)]
64+
output = app.invoke({"messages": input_messages}, config)
65+
output["messages"][
66+
-1
67+
].pretty_print() # output contains all messages in state
68+
69+
query = "What's my name?"
70+
71+
input_messages = [HumanMessage(query)]
72+
output = app.invoke({"messages": input_messages}, config)
73+
output["messages"][-1].pretty_print()
74+
75+
config = {"configurable": {"thread_id": "abc234"}}
76+
77+
input_messages = [HumanMessage(query)]
78+
output = app.invoke({"messages": input_messages}, config)
79+
output["messages"][-1].pretty_print()
80+
81+
config = {"configurable": {"thread_id": "abc123"}}
82+
83+
input_messages = [HumanMessage(query)]
84+
output = app.invoke({"messages": input_messages}, config)
85+
output["messages"][-1].pretty_print()
86+
87+
prompt_template = ChatPromptTemplate.from_messages(
88+
[
89+
(
90+
"system",
91+
"You talk like a pirate. Answer all questions to the best of your ability.",
92+
),
93+
MessagesPlaceholder(variable_name="messages"),
94+
]
95+
)
96+
97+
workflow = StateGraph(state_schema=MessagesState)
98+
99+
def call_model(state: MessagesState):
100+
# highlight-start
101+
prompt = prompt_template.invoke(state)
102+
response = model.invoke(prompt)
103+
# highlight-end
104+
return {"messages": response}
105+
106+
workflow.add_edge(START, "model")
107+
workflow.add_node("model", call_model)
108+
109+
memory = MemorySaver()
110+
app = workflow.compile(checkpointer=memory)
111+
112+
config = {"configurable": {"thread_id": "abc345"}}
113+
query = "Hi! I'm Jim."
114+
115+
input_messages = [HumanMessage(query)]
116+
output = app.invoke({"messages": input_messages}, config)
117+
output["messages"][-1].pretty_print()
118+
119+
query = "What is my name?"
120+
121+
input_messages = [HumanMessage(query)]
122+
output = app.invoke({"messages": input_messages}, config)
123+
output["messages"][-1].pretty_print()
124+
125+
prompt_template = ChatPromptTemplate.from_messages(
126+
[
127+
(
128+
"system",
129+
"You are a helpful assistant. Answer all questions to the best of your ability in {language}.",
130+
),
131+
MessagesPlaceholder(variable_name="messages"),
132+
]
133+
)
134+
135+
# highlight-next-line
136+
class State(TypedDict):
137+
# highlight-next-line
138+
messages: Annotated[Sequence[BaseMessage], add_messages]
139+
# highlight-next-line
140+
language: str
141+
142+
workflow = StateGraph(state_schema=State)
143+
144+
def call_model(state: State):
145+
prompt = prompt_template.invoke(state)
146+
response = model.invoke(prompt)
147+
return {"messages": [response]}
148+
149+
workflow.add_edge(START, "model")
150+
workflow.add_node("model", call_model)
151+
152+
memory = MemorySaver()
153+
app = workflow.compile(checkpointer=memory)
154+
155+
config = {"configurable": {"thread_id": "abc456"}}
156+
query = "Hi! I'm Bob."
157+
language = "Spanish"
158+
159+
input_messages = [HumanMessage(query)]
160+
output = app.invoke(
161+
# highlight-next-line
162+
{"messages": input_messages, "language": language},
163+
config,
164+
)
165+
output["messages"][-1].pretty_print()
166+
167+
query = "What is my name?"
168+
169+
input_messages = [HumanMessage(query)]
170+
output = app.invoke(
171+
{"messages": input_messages},
172+
config,
173+
)
174+
output["messages"][-1].pretty_print()
175+
176+
trimmer = trim_messages(
177+
max_tokens=65,
178+
strategy="last",
179+
token_counter=model,
180+
include_system=True,
181+
allow_partial=False,
182+
start_on="human",
183+
)
184+
185+
messages = [
186+
SystemMessage(content="you're a good assistant"),
187+
HumanMessage(content="hi! I'm bob"),
188+
AIMessage(content="hi!"),
189+
HumanMessage(content="I like vanilla ice cream"),
190+
AIMessage(content="nice"),
191+
HumanMessage(content="whats 2 + 2"),
192+
AIMessage(content="4"),
193+
HumanMessage(content="thanks"),
194+
AIMessage(content="no problem!"),
195+
HumanMessage(content="having fun?"),
196+
AIMessage(content="yes!"),
197+
]
198+
199+
trimmer.invoke(messages)
200+
201+
workflow = StateGraph(state_schema=State)
202+
203+
def call_model(state: State):
204+
# highlight-start
205+
trimmed_messages = trimmer.invoke(state["messages"])
206+
prompt = prompt_template.invoke(
207+
{"messages": trimmed_messages, "language": state["language"]}
208+
)
209+
response = model.invoke(prompt)
210+
# highlight-end
211+
return {"messages": [response]}
212+
213+
workflow.add_edge(START, "model")
214+
workflow.add_node("model", call_model)
215+
216+
memory = MemorySaver()
217+
app = workflow.compile(checkpointer=memory)
218+
219+
config = {"configurable": {"thread_id": "abc567"}}
220+
query = "What is my name?"
221+
language = "English"
222+
223+
# highlight-next-line
224+
input_messages = messages + [HumanMessage(query)]
225+
output = app.invoke(
226+
{"messages": input_messages, "language": language},
227+
config,
228+
)
229+
output["messages"][-1].pretty_print()
230+
231+
config = {"configurable": {"thread_id": "abc678"}}
232+
query = "What math problem did I ask?"
233+
language = "English"
234+
235+
input_messages = messages + [HumanMessage(query)]
236+
output = app.invoke(
237+
{"messages": input_messages, "language": language},
238+
config,
239+
)
240+
output["messages"][-1].pretty_print()
241+
242+
config = {"configurable": {"thread_id": "abc789"}}
243+
query = "Hi I'm Todd, please tell me a joke."
244+
language = "English"
245+
246+
input_messages = [HumanMessage(query)]
247+
# highlight-next-line
248+
for chunk, metadata in app.stream(
249+
{"messages": input_messages, "language": language},
250+
config,
251+
# highlight-next-line
252+
stream_mode="messages",
253+
):
254+
if isinstance(chunk, AIMessage): # Filter to just model responses
255+
print(chunk.content, end="|")
256+
257+
258+
with trace.get_tracer(__name__).start_as_current_span("demo-root-span"):
259+
main()

0 commit comments

Comments
 (0)