Skip to content

Commit c32596d

Browse files
committed
Langchain examples for testing
and send with otlp and full chatbot demo --- So... turns out Langchain uses the v1beta1 prediction service client under the hood directly.. So we should probably instrument that after all instead of the main wrapper API. It also has a streaming option so we should try to support that as well, and it has ainvoke() for asyncio.
1 parent 9e4de00 commit c32596d

File tree

8 files changed

+1901
-0
lines changed

8 files changed

+1901
-0
lines changed
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.venv
2+
docker-compose.yaml
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.13
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
This sample contains part of the LangGraph chatbot demo taken from
2+
https://python.langchain.com/docs/tutorials/chatbot, running with OTel instrumentation. It
3+
sends traces and logs to the OTel collector which sends them to GCP. Docker compose wraps
4+
everything to make it easy to run.
5+
6+
## Running the example
7+
8+
I recommend running in Cloud Shell, it's super simple. You will see GenAI spans in trace
9+
explorer right away. Make sure the Vertex and Trace APIs are enabled in the project.
10+
11+
### Cloud Shell or GCE
12+
13+
```sh
14+
git clone --branch=vertex-langgraph https://github.com/aabmass/opentelemetry-python-contrib.git
15+
cd opentelemetry-python-contrib/instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/langgraph-chatbot-demo
16+
docker compose up --build --abort-on-container-exit
17+
```
18+
19+
### Locally with Application Default Credentials
20+
21+
```sh
22+
git clone --branch=vertex-langgraph https://github.com/aabmass/opentelemetry-python-contrib.git
23+
cd opentelemetry-python-contrib/instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/langgraph-chatbot-demo
24+
25+
# Export the credentials to `GOOGLE_APPLICATION_CREDENTIALS` environment variable so it is
26+
# available inside the docker containers
27+
export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.config/gcloud/application_default_credentials.json
28+
# Lets collector read mounted config
29+
export USERID="$(id -u)"
30+
# Specify the project ID
31+
export GOOGLE_CLOUD_PROJECT=<your project id>
32+
docker compose up --build --abort-on-container-exit
33+
```
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# https://python.langchain.com/docs/tutorials/chatbot
2+
3+
from os import environ
4+
from typing import Sequence
5+
6+
from langchain_core.messages import (
7+
AIMessage,
8+
BaseMessage,
9+
HumanMessage,
10+
SystemMessage,
11+
trim_messages,
12+
)
13+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14+
from langchain_google_vertexai import ChatVertexAI
15+
from langgraph.checkpoint.memory import MemorySaver
16+
from langgraph.graph import START, StateGraph
17+
from langgraph.graph.message import add_messages
18+
from typing_extensions import Annotated, TypedDict
19+
20+
from opentelemetry import trace
21+
22+
23+
def main() -> None:
24+
model = ChatVertexAI(
25+
model="gemini-1.5-flash",
26+
project=environ.get("GOOGLE_CLOUD_PROJECT", None),
27+
)
28+
29+
prompt_template = ChatPromptTemplate.from_messages(
30+
[
31+
(
32+
"system",
33+
"You are a helpful assistant. Answer all questions to the best of your ability in {language}.",
34+
),
35+
MessagesPlaceholder(variable_name="messages"),
36+
]
37+
)
38+
39+
class State(TypedDict):
40+
messages: Annotated[Sequence[BaseMessage], add_messages]
41+
language: str
42+
43+
trimmer = trim_messages(
44+
max_tokens=200,
45+
strategy="last",
46+
token_counter=model,
47+
include_system=True,
48+
allow_partial=False,
49+
start_on="human",
50+
)
51+
52+
messages = [
53+
SystemMessage(content="you're a good assistant"),
54+
HumanMessage(content="hi! I'm bob"),
55+
AIMessage(content="hi!"),
56+
HumanMessage(content="I like vanilla ice cream"),
57+
AIMessage(content="nice"),
58+
HumanMessage(content="whats 2 + 2"),
59+
AIMessage(content="4"),
60+
HumanMessage(content="thanks"),
61+
AIMessage(content="no problem!"),
62+
HumanMessage(content="having fun?"),
63+
AIMessage(content="yes!"),
64+
]
65+
66+
workflow = StateGraph(state_schema=State)
67+
68+
def call_model(state: State):
69+
trimmed_messages = trimmer.invoke(state["messages"])
70+
prompt = prompt_template.invoke(
71+
{"messages": trimmed_messages, "language": state["language"]}
72+
)
73+
response = model.invoke(prompt)
74+
return {"messages": [response]}
75+
76+
workflow.add_edge(START, "model")
77+
workflow.add_node("model", call_model)
78+
79+
memory = MemorySaver()
80+
app = workflow.compile(checkpointer=memory)
81+
82+
config = {"configurable": {"thread_id": "abc567"}}
83+
query = "What is my name?"
84+
language = "English"
85+
86+
input_messages = messages + [HumanMessage(query)]
87+
output = app.invoke(
88+
{"messages": input_messages, "language": language},
89+
config,
90+
)
91+
output["messages"][-1].pretty_print()
92+
93+
config = {"configurable": {"thread_id": "abc678"}}
94+
query = "What math problem did I ask?"
95+
language = "English"
96+
97+
input_messages = messages + [HumanMessage(query)]
98+
output = app.invoke(
99+
{"messages": input_messages, "language": language},
100+
config,
101+
)
102+
output["messages"][-1].pretty_print()
103+
104+
105+
with trace.get_tracer(__name__).start_as_current_span("demo-root-span"):
106+
main()
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
services:
2+
app:
3+
build:
4+
dockerfile_inline: |
5+
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
6+
RUN apt-get update && apt-get install -y git
7+
8+
WORKDIR app/
9+
COPY pyproject.toml uv.lock /app
10+
RUN uv sync --frozen --no-dev
11+
ENV PATH="/app/.venv/bin:$PATH"
12+
COPY . /app
13+
ENTRYPOINT []
14+
CMD ["opentelemetry-instrument", "python", "chatbot.py"]
15+
volumes:
16+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
17+
environment:
18+
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcol:4317
19+
- OTEL_SERVICE_NAME=langgraph-chatbot-demo
20+
- OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
21+
- OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
22+
23+
- GOOGLE_CLOUD_PROJECT
24+
- GOOGLE_CLOUD_QUOTA_PROJECT
25+
- GOOGLE_APPLICATION_CREDENTIALS
26+
depends_on:
27+
- otelcol
28+
29+
otelcol:
30+
image: otel/opentelemetry-collector-contrib:0.118.0
31+
volumes:
32+
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
33+
- ${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:${GOOGLE_APPLICATION_CREDENTIALS:-/dev/null}:ro
34+
environment:
35+
- GOOGLE_CLOUD_PROJECT
36+
- GOOGLE_CLOUD_QUOTA_PROJECT
37+
- GOOGLE_APPLICATION_CREDENTIALS
38+
# If the collector does not have permission to read the mounted volumes, set
39+
# USERID=$(id -u) to run the container as the current user
40+
user: $USERID
41+
42+
volumes:
43+
logs:
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
receivers:
2+
otlp:
3+
protocols:
4+
grpc:
5+
endpoint: "0.0.0.0:4317"
6+
7+
extensions:
8+
googleclientauth:
9+
project: ${GOOGLE_CLOUD_PROJECT}
10+
quota_project: ${GOOGLE_CLOUD_QUOTA_PROJECT}
11+
scopes:
12+
- "https://www.googleapis.com/auth/trace.append"
13+
- "https://www.googleapis.com/auth/cloud-platform"
14+
15+
processors:
16+
resource:
17+
attributes:
18+
- key: gcp.project_id
19+
value: ${GOOGLE_CLOUD_PROJECT}
20+
action: insert
21+
22+
exporters:
23+
googlecloud:
24+
project: ${GOOGLE_CLOUD_PROJECT}
25+
log:
26+
default_log_name: "collector-otlp-logs"
27+
otlp:
28+
endpoint: https://telemetry.us-central1.rep.googleapis.com:443
29+
auth:
30+
authenticator: googleclientauth
31+
32+
service:
33+
extensions: [googleclientauth]
34+
pipelines:
35+
traces:
36+
receivers: [otlp]
37+
processors: [resource]
38+
exporters: [otlp]
39+
logs:
40+
receivers: [otlp]
41+
processors: [resource]
42+
exporters: [googlecloud]
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
[project]
2+
name = "langgraph-chatbot-demo"
3+
version = "0.1.0"
4+
description = "Add your description here"
5+
readme = "README.md"
6+
requires-python = ">=3.9"
7+
dependencies = [
8+
"langchain-core>=0.3.31",
9+
"langchain-google-vertexai>=2.0.7",
10+
"langgraph>0.2.27",
11+
"opentelemetry-distro>=0.50b0",
12+
"opentelemetry-exporter-otlp-proto-grpc>=1.29.0",
13+
"opentelemetry-instrumentation-vertexai",
14+
]
15+
16+
[tool.uv.sources]
17+
opentelemetry-instrumentation-vertexai = { git = "https://github.com/aabmass/opentelemetry-python-contrib.git", subdirectory = "instrumentation-genai/opentelemetry-instrumentation-vertexai", branch = "vertex-langgraph" }
18+
19+
[dependency-groups]
20+
dev = [
21+
"ruff>=0.9.2",
22+
]

0 commit comments

Comments
 (0)