Skip to content

Commit 4d81f54

Browse files
committed
Langchain examples for testing
and send with otlp and full chatbot demo --- So... turns out Langchain uses the v1beta1 prediction service client under the hood directly.. So we should probably instrument that after all instead of the main wrapper API. It also has a streaming option so we should try to support that as well, and it has ainvoke() for asyncio.
1 parent b2f6b32 commit 4d81f54

File tree

10 files changed

+2011
-1
lines changed

10 files changed

+2011
-1
lines changed
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.venv
2+
docker-compose.yaml
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.13

instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/langchain/README.md

Whitespace-only changes.
Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
# https://python.langchain.com/docs/tutorials/chatbot
2+
3+
from typing import Sequence
4+
5+
from langchain_core.messages import (
6+
AIMessage,
7+
BaseMessage,
8+
HumanMessage,
9+
SystemMessage,
10+
trim_messages,
11+
)
12+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
13+
from langchain_google_vertexai import ChatVertexAI
14+
from langgraph.checkpoint.memory import MemorySaver
15+
from langgraph.graph import START, MessagesState, StateGraph
16+
from langgraph.graph.message import add_messages
17+
from typing_extensions import Annotated, TypedDict
18+
19+
from opentelemetry import trace
20+
21+
22+
def main() -> None:
23+
model = ChatVertexAI(model="gemini-1.5-flash")
24+
25+
# Define a new graph
26+
workflow = StateGraph(state_schema=MessagesState)
27+
28+
# Define the function that calls the model
29+
def call_model(state: MessagesState):
30+
response = model.invoke(state["messages"])
31+
return {"messages": response}
32+
33+
# Define the (single) node in the graph
34+
workflow.add_edge(START, "model")
35+
workflow.add_node("model", call_model)
36+
37+
# Add memory
38+
memory = MemorySaver()
39+
app = workflow.compile(checkpointer=memory)
40+
41+
config = {"configurable": {"thread_id": "abc123"}}
42+
43+
query = "Hi! I'm Bob."
44+
45+
input_messages = [HumanMessage(query)]
46+
output = app.invoke({"messages": input_messages}, config)
47+
output["messages"][
48+
-1
49+
].pretty_print() # output contains all messages in state
50+
51+
query = "What's my name?"
52+
53+
input_messages = [HumanMessage(query)]
54+
output = app.invoke({"messages": input_messages}, config)
55+
output["messages"][-1].pretty_print()
56+
57+
config = {"configurable": {"thread_id": "abc234"}}
58+
59+
input_messages = [HumanMessage(query)]
60+
output = app.invoke({"messages": input_messages}, config)
61+
output["messages"][-1].pretty_print()
62+
63+
config = {"configurable": {"thread_id": "abc123"}}
64+
65+
input_messages = [HumanMessage(query)]
66+
output = app.invoke({"messages": input_messages}, config)
67+
output["messages"][-1].pretty_print()
68+
69+
prompt_template = ChatPromptTemplate.from_messages(
70+
[
71+
(
72+
"system",
73+
"You talk like a pirate. Answer all questions to the best of your ability.",
74+
),
75+
MessagesPlaceholder(variable_name="messages"),
76+
]
77+
)
78+
79+
workflow = StateGraph(state_schema=MessagesState)
80+
81+
def call_model(state: MessagesState):
82+
# highlight-start
83+
prompt = prompt_template.invoke(state)
84+
response = model.invoke(prompt)
85+
# highlight-end
86+
return {"messages": response}
87+
88+
workflow.add_edge(START, "model")
89+
workflow.add_node("model", call_model)
90+
91+
memory = MemorySaver()
92+
app = workflow.compile(checkpointer=memory)
93+
94+
config = {"configurable": {"thread_id": "abc345"}}
95+
query = "Hi! I'm Jim."
96+
97+
input_messages = [HumanMessage(query)]
98+
output = app.invoke({"messages": input_messages}, config)
99+
output["messages"][-1].pretty_print()
100+
101+
query = "What is my name?"
102+
103+
input_messages = [HumanMessage(query)]
104+
output = app.invoke({"messages": input_messages}, config)
105+
output["messages"][-1].pretty_print()
106+
107+
prompt_template = ChatPromptTemplate.from_messages(
108+
[
109+
(
110+
"system",
111+
"You are a helpful assistant. Answer all questions to the best of your ability in {language}.",
112+
),
113+
MessagesPlaceholder(variable_name="messages"),
114+
]
115+
)
116+
117+
# highlight-next-line
118+
class State(TypedDict):
119+
# highlight-next-line
120+
messages: Annotated[Sequence[BaseMessage], add_messages]
121+
# highlight-next-line
122+
language: str
123+
124+
workflow = StateGraph(state_schema=State)
125+
126+
def call_model(state: State):
127+
prompt = prompt_template.invoke(state)
128+
response = model.invoke(prompt)
129+
return {"messages": [response]}
130+
131+
workflow.add_edge(START, "model")
132+
workflow.add_node("model", call_model)
133+
134+
memory = MemorySaver()
135+
app = workflow.compile(checkpointer=memory)
136+
137+
config = {"configurable": {"thread_id": "abc456"}}
138+
query = "Hi! I'm Bob."
139+
language = "Spanish"
140+
141+
input_messages = [HumanMessage(query)]
142+
output = app.invoke(
143+
# highlight-next-line
144+
{"messages": input_messages, "language": language},
145+
config,
146+
)
147+
output["messages"][-1].pretty_print()
148+
149+
query = "What is my name?"
150+
151+
input_messages = [HumanMessage(query)]
152+
output = app.invoke(
153+
{"messages": input_messages},
154+
config,
155+
)
156+
output["messages"][-1].pretty_print()
157+
158+
trimmer = trim_messages(
159+
max_tokens=65,
160+
strategy="last",
161+
token_counter=model,
162+
include_system=True,
163+
allow_partial=False,
164+
start_on="human",
165+
)
166+
167+
messages = [
168+
SystemMessage(content="you're a good assistant"),
169+
HumanMessage(content="hi! I'm bob"),
170+
AIMessage(content="hi!"),
171+
HumanMessage(content="I like vanilla ice cream"),
172+
AIMessage(content="nice"),
173+
HumanMessage(content="whats 2 + 2"),
174+
AIMessage(content="4"),
175+
HumanMessage(content="thanks"),
176+
AIMessage(content="no problem!"),
177+
HumanMessage(content="having fun?"),
178+
AIMessage(content="yes!"),
179+
]
180+
181+
trimmer.invoke(messages)
182+
183+
workflow = StateGraph(state_schema=State)
184+
185+
def call_model(state: State):
186+
# highlight-start
187+
trimmed_messages = trimmer.invoke(state["messages"])
188+
prompt = prompt_template.invoke(
189+
{"messages": trimmed_messages, "language": state["language"]}
190+
)
191+
response = model.invoke(prompt)
192+
# highlight-end
193+
return {"messages": [response]}
194+
195+
workflow.add_edge(START, "model")
196+
workflow.add_node("model", call_model)
197+
198+
memory = MemorySaver()
199+
app = workflow.compile(checkpointer=memory)
200+
201+
config = {"configurable": {"thread_id": "abc567"}}
202+
query = "What is my name?"
203+
language = "English"
204+
205+
# highlight-next-line
206+
input_messages = messages + [HumanMessage(query)]
207+
output = app.invoke(
208+
{"messages": input_messages, "language": language},
209+
config,
210+
)
211+
output["messages"][-1].pretty_print()
212+
213+
config = {"configurable": {"thread_id": "abc678"}}
214+
query = "What math problem did I ask?"
215+
language = "English"
216+
217+
input_messages = messages + [HumanMessage(query)]
218+
output = app.invoke(
219+
{"messages": input_messages, "language": language},
220+
config,
221+
)
222+
output["messages"][-1].pretty_print()
223+
224+
config = {"configurable": {"thread_id": "abc789"}}
225+
query = "Hi I'm Todd, please tell me a joke."
226+
language = "English"
227+
228+
input_messages = [HumanMessage(query)]
229+
# highlight-next-line
230+
for chunk, metadata in app.stream(
231+
{"messages": input_messages, "language": language},
232+
config,
233+
# highlight-next-line
234+
stream_mode="messages",
235+
):
236+
if isinstance(chunk, AIMessage): # Filter to just model responses
237+
print(chunk.content, end="|")
238+
239+
240+
with trace.get_tracer(__name__).start_as_current_span("demo-root-span"):
241+
main()
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
services:
2+
app:
3+
build:
4+
dockerfile_inline: |
5+
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
6+
RUN apt-get update && apt-get install -y git
7+
8+
WORKDIR app/
9+
COPY . /app
10+
RUN uv sync --frozen --no-dev
11+
ENV PATH="/app/.venv/bin:$PATH"
12+
ENTRYPOINT []
13+
CMD ["opentelemetry-instrument", "python", "chatbot.py"]
14+
volumes:
15+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
16+
environment:
17+
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcol:4317
18+
- OTEL_SERVICE_NAME=langgraph-chatbot-demo
19+
- OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
20+
- OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
21+
22+
- GOOGLE_CLOUD_PROJECT
23+
- GOOGLE_CLOUD_QUOTA_PROJECT
24+
- GOOGLE_APPLICATION_CREDENTIALS
25+
depends_on:
26+
- otelcol
27+
otelcol:
28+
image: otel/opentelemetry-collector-contrib:0.118.0
29+
volumes:
30+
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
31+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
32+
environment:
33+
- GOOGLE_CLOUD_PROJECT
34+
- GOOGLE_CLOUD_QUOTA_PROJECT
35+
- GOOGLE_APPLICATION_CREDENTIALS
36+
# If the collector does not have permission to read the mounted volumes, set
37+
# USERID=$(id -u) to run the container as the current user
38+
user: $USERID
39+
40+
volumes:
41+
logs:
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# pylint: skip-file
2+
from langchain_google_vertexai import ChatVertexAI
3+
4+
# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
5+
from opentelemetry import _events, _logs, trace
6+
7+
# from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
8+
# OTLPLogExporter,
9+
# )
10+
# from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
11+
# OTLPSpanExporter,
12+
# )
13+
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
14+
from opentelemetry.sdk._events import EventLoggerProvider
15+
from opentelemetry.sdk._logs import LoggerProvider
16+
from opentelemetry.sdk.trace import TracerProvider
17+
from opentelemetry.sdk.trace.export import (
18+
BatchSpanProcessor,
19+
ConsoleSpanExporter,
20+
)
21+
22+
# configure tracing
23+
trace.set_tracer_provider(TracerProvider())
24+
trace.get_tracer_provider().add_span_processor(
25+
BatchSpanProcessor(ConsoleSpanExporter())
26+
)
27+
28+
# configure logging and events
29+
_logs.set_logger_provider(LoggerProvider())
30+
# _logs.get_logger_provider().add_log_record_processor(
31+
# BatchLogRecordProcessor(OTLPLogExporter())
32+
# )
33+
_events.set_event_logger_provider(EventLoggerProvider())
34+
35+
# instrument VertexAI
36+
VertexAIInstrumentor().instrument()
37+
38+
39+
def main():
40+
model = ChatVertexAI(
41+
model="gemini-1.5-flash", temperature=0.2, max_output_tokens=20
42+
)
43+
res = model.invoke("Hello, world!")
44+
print(res)
45+
46+
47+
if __name__ == "__main__":
48+
main()
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
receivers:
2+
otlp:
3+
protocols:
4+
grpc:
5+
endpoint: "0.0.0.0:4317"
6+
7+
extensions:
8+
googleclientauth:
9+
project: ${GOOGLE_CLOUD_PROJECT}
10+
quota_project: ${GOOGLE_CLOUD_QUOTA_PROJECT}
11+
scopes:
12+
- "https://www.googleapis.com/auth/trace.append"
13+
- "https://www.googleapis.com/auth/cloud-platform"
14+
15+
processors:
16+
resource:
17+
attributes:
18+
- key: gcp.project_id
19+
value: ${GOOGLE_CLOUD_PROJECT}
20+
action: insert
21+
22+
exporters:
23+
googlecloud:
24+
project: ${GOOGLE_CLOUD_PROJECT}
25+
log:
26+
default_log_name: "collector-otlp-logs"
27+
otlp:
28+
endpoint: https://telemetry.us-central1.rep.googleapis.com:443
29+
auth:
30+
authenticator: googleclientauth
31+
32+
service:
33+
extensions: [googleclientauth]
34+
pipelines:
35+
traces:
36+
receivers: [otlp]
37+
processors: [resource]
38+
exporters: [otlp]
39+
logs:
40+
receivers: [otlp]
41+
processors: [resource]
42+
exporters: [googlecloud]
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
[project]
2+
name = "langgraph-chatbot-demo"
3+
version = "0.1.0"
4+
description = "Add your description here"
5+
readme = "README.md"
6+
requires-python = ">=3.9"
7+
dependencies = [
8+
"langchain-core>=0.3.31",
9+
"langchain-google-vertexai>=2.0.7",
10+
"langgraph>0.2.27",
11+
"opentelemetry-distro>=0.50b0",
12+
"opentelemetry-exporter-otlp-proto-grpc>=1.29.0",
13+
"opentelemetry-instrumentation-vertexai",
14+
]
15+
16+
[tool.uv.sources]
17+
opentelemetry-instrumentation-vertexai = { git = "https://github.com/aabmass/opentelemetry-python-contrib.git", subdirectory = "instrumentation-genai/opentelemetry-instrumentation-vertexai", branch = "vertex-langgraph" }

0 commit comments

Comments
 (0)