Skip to content

Commit cf3eea7

Browse files
committed
Langchain examples for testing
and send with otlp and full chatbot demo --- So... turns out Langchain uses the v1beta1 prediction service client under the hood directly.. So we should probably instrument that after all instead of the main wrapper API. It also has a streaming option so we should try to support that as well, and it has ainvoke() for asyncio.
1 parent b2f6b32 commit cf3eea7

File tree

10 files changed

+2088
-1
lines changed

10 files changed

+2088
-1
lines changed
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.venv
2+
docker-compose.yaml
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.13
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
This sample contains part of the LangGraph chatbot demo taken from
2+
https://python.langchain.com/docs/tutorials/chatbot, running with OTel instrumentation. It
3+
sends traces and logs to the OTel collector which sends them to GCP. Docker compose wraps
4+
everything to make it easy to run.
5+
6+
## Running the example
7+
8+
I recommend running in Cloud Shell, it's super simple. You will see GenAI spans in trace
9+
explorer right away. Make sure the Vertex and Trace APIs are enabled in the project.
10+
11+
### Cloud Shell or GCE
12+
13+
```sh
14+
git clone --branch=vertex-langgraph https://github.com/aabmass/opentelemetry-python-contrib.git
15+
cd opentelemetry-python-contrib/instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/langgraph-chatbot-demo
16+
docker compose up --build --abort-on-container-exit
17+
```
18+
19+
### Locally with Application Default Credentials
20+
21+
```sh
22+
git clone --branch=vertex-langgraph https://github.com/aabmass/opentelemetry-python-contrib.git
23+
cd opentelemetry-python-contrib/instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/langgraph-chatbot-demo
24+
25+
# Export the credentials to `GOOGLE_APPLICATION_CREDENTIALS` environment variable so it is
26+
# available inside the docker containers
27+
export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.config/gcloud/application_default_credentials.json
28+
# Lets collector read mounted config
29+
export USERID="$(id -u)"
30+
# Specify the project ID
31+
export GOOGLE_CLOUD_PROJECT=<your project id>
32+
docker compose up --build --abort-on-container-exit
33+
```
Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
# https://python.langchain.com/docs/tutorials/chatbot
2+
3+
from os import environ
4+
from typing import Sequence
5+
6+
from langchain_core.messages import (
7+
AIMessage,
8+
BaseMessage,
9+
HumanMessage,
10+
SystemMessage,
11+
trim_messages,
12+
)
13+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14+
from langchain_google_vertexai import ChatVertexAI
15+
from langgraph.checkpoint.memory import MemorySaver
16+
from langgraph.graph import START, MessagesState, StateGraph
17+
from langgraph.graph.message import add_messages
18+
from typing_extensions import Annotated, TypedDict
19+
20+
from opentelemetry import trace
21+
22+
23+
def main() -> None:
24+
model = ChatVertexAI(
25+
model="gemini-1.5-flash",
26+
project=environ.get("GOOGLE_CLOUD_PROJECT", None),
27+
)
28+
29+
# # Define a new graph
30+
# workflow = StateGraph(state_schema=MessagesState)
31+
32+
# # Define the function that calls the model
33+
# def call_model(state: MessagesState):
34+
# response = model.invoke(state["messages"])
35+
# return {"messages": response}
36+
37+
# # Define the (single) node in the graph
38+
# workflow.add_edge(START, "model")
39+
# workflow.add_node("model", call_model)
40+
41+
# # Add memory
42+
# memory = MemorySaver()
43+
# app = workflow.compile(checkpointer=memory)
44+
45+
# config = {"configurable": {"thread_id": "abc123"}}
46+
47+
# query = "Hi! I'm Bob."
48+
49+
# input_messages = [HumanMessage(query)]
50+
# output = app.invoke({"messages": input_messages}, config)
51+
# output["messages"][
52+
# -1
53+
# ].pretty_print() # output contains all messages in state
54+
55+
# query = "What's my name?"
56+
57+
# input_messages = [HumanMessage(query)]
58+
# output = app.invoke({"messages": input_messages}, config)
59+
# output["messages"][-1].pretty_print()
60+
61+
# config = {"configurable": {"thread_id": "abc234"}}
62+
63+
# input_messages = [HumanMessage(query)]
64+
# output = app.invoke({"messages": input_messages}, config)
65+
# output["messages"][-1].pretty_print()
66+
67+
# config = {"configurable": {"thread_id": "abc123"}}
68+
69+
# input_messages = [HumanMessage(query)]
70+
# output = app.invoke({"messages": input_messages}, config)
71+
# output["messages"][-1].pretty_print()
72+
73+
# prompt_template = ChatPromptTemplate.from_messages(
74+
# [
75+
# (
76+
# "system",
77+
# "You talk like a pirate. Answer all questions to the best of your ability.",
78+
# ),
79+
# MessagesPlaceholder(variable_name="messages"),
80+
# ]
81+
# )
82+
83+
# workflow = StateGraph(state_schema=MessagesState)
84+
85+
# def call_model(state: MessagesState):
86+
# # highlight-start
87+
# prompt = prompt_template.invoke(state)
88+
# response = model.invoke(prompt)
89+
# # highlight-end
90+
# return {"messages": response}
91+
92+
# workflow.add_edge(START, "model")
93+
# workflow.add_node("model", call_model)
94+
95+
# memory = MemorySaver()
96+
# app = workflow.compile(checkpointer=memory)
97+
98+
# config = {"configurable": {"thread_id": "abc345"}}
99+
# query = "Hi! I'm Jim."
100+
101+
# input_messages = [HumanMessage(query)]
102+
# output = app.invoke({"messages": input_messages}, config)
103+
# output["messages"][-1].pretty_print()
104+
105+
# query = "What is my name?"
106+
107+
# input_messages = [HumanMessage(query)]
108+
# output = app.invoke({"messages": input_messages}, config)
109+
# output["messages"][-1].pretty_print()
110+
111+
prompt_template = ChatPromptTemplate.from_messages(
112+
[
113+
(
114+
"system",
115+
"You are a helpful assistant. Answer all questions to the best of your ability in {language}.",
116+
),
117+
MessagesPlaceholder(variable_name="messages"),
118+
]
119+
)
120+
121+
# # highlight-next-line
122+
class State(TypedDict):
123+
# highlight-next-line
124+
messages: Annotated[Sequence[BaseMessage], add_messages]
125+
# highlight-next-line
126+
language: str
127+
128+
# workflow = StateGraph(state_schema=State)
129+
130+
# def call_model(state: State):
131+
# prompt = prompt_template.invoke(state)
132+
# response = model.invoke(prompt)
133+
# return {"messages": [response]}
134+
135+
# workflow.add_edge(START, "model")
136+
# workflow.add_node("model", call_model)
137+
138+
# memory = MemorySaver()
139+
# app = workflow.compile(checkpointer=memory)
140+
141+
# config = {"configurable": {"thread_id": "abc456"}}
142+
# query = "Hi! I'm Bob."
143+
# language = "Spanish"
144+
145+
# input_messages = [HumanMessage(query)]
146+
# output = app.invoke(
147+
# # highlight-next-line
148+
# {"messages": input_messages, "language": language},
149+
# config,
150+
# )
151+
# output["messages"][-1].pretty_print()
152+
153+
# query = "What is my name?"
154+
155+
# input_messages = [HumanMessage(query)]
156+
# output = app.invoke(
157+
# {"messages": input_messages},
158+
# config,
159+
# )
160+
# output["messages"][-1].pretty_print()
161+
162+
trimmer = trim_messages(
163+
max_tokens=65,
164+
strategy="last",
165+
token_counter=model,
166+
include_system=True,
167+
allow_partial=False,
168+
start_on="human",
169+
)
170+
171+
messages = [
172+
SystemMessage(content="you're a good assistant"),
173+
HumanMessage(content="hi! I'm bob"),
174+
AIMessage(content="hi!"),
175+
HumanMessage(content="I like vanilla ice cream"),
176+
AIMessage(content="nice"),
177+
HumanMessage(content="whats 2 + 2"),
178+
AIMessage(content="4"),
179+
HumanMessage(content="thanks"),
180+
AIMessage(content="no problem!"),
181+
HumanMessage(content="having fun?"),
182+
AIMessage(content="yes!"),
183+
]
184+
185+
trimmer.invoke(messages)
186+
187+
workflow = StateGraph(state_schema=State)
188+
189+
def call_model(state: State):
190+
# highlight-start
191+
trimmed_messages = trimmer.invoke(state["messages"])
192+
prompt = prompt_template.invoke(
193+
{"messages": trimmed_messages, "language": state["language"]}
194+
)
195+
response = model.invoke(prompt)
196+
# highlight-end
197+
return {"messages": [response]}
198+
199+
workflow.add_edge(START, "model")
200+
workflow.add_node("model", call_model)
201+
202+
memory = MemorySaver()
203+
app = workflow.compile(checkpointer=memory)
204+
205+
config = {"configurable": {"thread_id": "abc567"}}
206+
query = "What is my name?"
207+
language = "English"
208+
209+
# highlight-next-line
210+
input_messages = messages + [HumanMessage(query)]
211+
output = app.invoke(
212+
{"messages": input_messages, "language": language},
213+
config,
214+
)
215+
output["messages"][-1].pretty_print()
216+
217+
config = {"configurable": {"thread_id": "abc678"}}
218+
query = "What math problem did I ask?"
219+
language = "English"
220+
221+
input_messages = messages + [HumanMessage(query)]
222+
output = app.invoke(
223+
{"messages": input_messages, "language": language},
224+
config,
225+
)
226+
output["messages"][-1].pretty_print()
227+
228+
config = {"configurable": {"thread_id": "abc789"}}
229+
query = "Hi I'm Todd, please tell me a joke."
230+
language = "English"
231+
232+
input_messages = [HumanMessage(query)]
233+
# highlight-next-line
234+
for chunk, metadata in app.stream(
235+
{"messages": input_messages, "language": language},
236+
config,
237+
# highlight-next-line
238+
stream_mode="messages",
239+
):
240+
if isinstance(chunk, AIMessage): # Filter to just model responses
241+
print(chunk.content, end="|")
242+
243+
244+
with trace.get_tracer(__name__).start_as_current_span("demo-root-span"):
245+
main()
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
services:
2+
app:
3+
build:
4+
dockerfile_inline: |
5+
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
6+
RUN apt-get update && apt-get install -y git
7+
8+
WORKDIR app/
9+
COPY pyproject.toml uv.lock /app
10+
RUN uv sync --frozen --no-dev
11+
ENV PATH="/app/.venv/bin:$PATH"
12+
COPY . /app
13+
ENTRYPOINT []
14+
CMD ["opentelemetry-instrument", "python", "chatbot.py"]
15+
volumes:
16+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
17+
environment:
18+
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcol:4317
19+
- OTEL_SERVICE_NAME=langgraph-chatbot-demo
20+
- OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
21+
- OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
22+
23+
- GOOGLE_CLOUD_PROJECT
24+
- GOOGLE_CLOUD_QUOTA_PROJECT
25+
- GOOGLE_APPLICATION_CREDENTIALS
26+
depends_on:
27+
- otelcol
28+
29+
otelcol:
30+
image: otel/opentelemetry-collector-contrib:0.118.0
31+
volumes:
32+
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
33+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
34+
environment:
35+
- GOOGLE_CLOUD_PROJECT
36+
- GOOGLE_CLOUD_QUOTA_PROJECT
37+
- GOOGLE_APPLICATION_CREDENTIALS
38+
# If the collector does not have permission to read the mounted volumes, set
39+
# USERID=$(id -u) to run the container as the current user
40+
user: $USERID
41+
42+
volumes:
43+
logs:
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# pylint: skip-file
2+
from langchain_google_vertexai import ChatVertexAI
3+
4+
# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
5+
from opentelemetry import _events, _logs, trace
6+
7+
# from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
8+
# OTLPLogExporter,
9+
# )
10+
# from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
11+
# OTLPSpanExporter,
12+
# )
13+
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
14+
from opentelemetry.sdk._events import EventLoggerProvider
15+
from opentelemetry.sdk._logs import LoggerProvider
16+
from opentelemetry.sdk.trace import TracerProvider
17+
from opentelemetry.sdk.trace.export import (
18+
BatchSpanProcessor,
19+
ConsoleSpanExporter,
20+
)
21+
22+
# configure tracing
23+
trace.set_tracer_provider(TracerProvider())
24+
trace.get_tracer_provider().add_span_processor(
25+
BatchSpanProcessor(ConsoleSpanExporter())
26+
)
27+
28+
# configure logging and events
29+
_logs.set_logger_provider(LoggerProvider())
30+
# _logs.get_logger_provider().add_log_record_processor(
31+
# BatchLogRecordProcessor(OTLPLogExporter())
32+
# )
33+
_events.set_event_logger_provider(EventLoggerProvider())
34+
35+
# instrument VertexAI
36+
VertexAIInstrumentor().instrument()
37+
38+
39+
def main():
40+
model = ChatVertexAI(
41+
model="gemini-1.5-flash", temperature=0.2, max_output_tokens=20
42+
)
43+
res = model.invoke("Hello, world!")
44+
print(res)
45+
46+
47+
if __name__ == "__main__":
48+
main()

0 commit comments

Comments
 (0)