Skip to content

Commit 513553f

Browse files
committed
added tests (testing agent and llm application)
1 parent 264ea9d commit 513553f

File tree

3 files changed

+131
-1
lines changed

3 files changed

+131
-1
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,17 @@ def vcr_config():
114114
"decode_compressed_response": True,
115115
"before_record_response": scrub_aws_credentials,
116116
}
117-
117+
118+
@pytest.fixture(scope="session")
119+
def instrument_langchain(reader, tracer_provider):
120+
langchain_instrumentor = LangChainInstrumentor()
121+
langchain_instrumentor.instrument(
122+
tracer_provider=tracer_provider
123+
)
124+
125+
yield
126+
127+
langchain_instrumentor.uninstrument()
118128

119129
@pytest.fixture(scope="function")
120130
def instrument_no_content(
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import os
2+
from typing import Tuple
3+
4+
import pytest
5+
from langchain import hub
6+
from langchain_aws import ChatBedrock
7+
from langchain.agents import AgentExecutor, create_tool_calling_agent
8+
from langchain_community.tools import DuckDuckGoSearchResults
9+
10+
import boto3
11+
from langchain_aws import BedrockLLM
12+
13+
from langchain.chains import LLMChain, SequentialChain
14+
15+
@pytest.mark.vcr
16+
def test_sequential_chain(instrument_legacy, span_exporter, log_exporter):
17+
bedrock_client = boto3.client(
18+
service_name='bedrock-runtime',
19+
region_name='us-west-2' # Replace with your region
20+
)
21+
22+
llm = BedrockLLM(
23+
client=bedrock_client,
24+
model_id="anthropic.claude-v2",
25+
model_kwargs={
26+
"max_tokens_to_sample": 500,
27+
"temperature": 0.7,
28+
},
29+
)
30+
synopsis_template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
31+
32+
Title: {title}
33+
Era: {era}
34+
Playwright: This is a synopsis for the above play:""" # noqa: E501
35+
synopsis_prompt_template = PromptTemplate(
36+
input_variables=["title", "era"], template=synopsis_template
37+
)
38+
synopsis_chain = LLMChain(
39+
llm=llm, prompt=synopsis_prompt_template, output_key="synopsis", name="synopsis"
40+
)
41+
42+
template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
43+
44+
Play Synopsis:
45+
{synopsis}
46+
Review from a New York Times play critic of the above play:""" # noqa: E501
47+
prompt_template = PromptTemplate(input_variables=["synopsis"], template=template)
48+
review_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="review")
49+
50+
overall_chain = SequentialChain(
51+
chains=[synopsis_chain, review_chain],
52+
input_variables=["era", "title"],
53+
# Here we return multiple variables
54+
output_variables=["synopsis", "review"],
55+
verbose=True,
56+
)
57+
overall_chain.invoke(
58+
{"title": "Tragedy at sunset on the beach", "era": "Victorian England"}
59+
)
60+
61+
spans = span_exporter.get_finished_spans()
62+
63+
langchain_spans = [
64+
span for span in spans
65+
if span.name.startswith("chain ")
66+
]
67+
68+
assert [
69+
"chain synopsis",
70+
"chain LLMChain",
71+
"chain SequentialChain",
72+
] == [span.name for span in langchain_spans]
73+
74+
synopsis_span = next(span for span in spans if span.name == "chain synopsis")
75+
review_span = next(span for span in spans if span.name == "chain LLMChain")
76+
overall_span = next(span for span in spans if span.name == "chain SequentialChain")
77+
78+
assert synopsis_span.kind == "SpanKind.INTERNAL"
79+
assert "gen_ai.prompt" in synopsis_span.attributes
80+
assert "gen_ai.completion" in synopsis_span.attributes
81+
82+
synopsis_prompt = json.loads(synopsis_span.attributes["gen_ai.prompt"].replace("'", "\""))
83+
synopsis_completion = json.loads(synopsis_span.attributes["gen_ai.completion"].replace("'", "\""))
84+
85+
assert synopsis_prompt == {
86+
"title": "Tragedy at sunset on the beach",
87+
"era": "Victorian England"
88+
}
89+
assert "synopsis" in synopsis_completion
90+
91+
assert review_span.kind == "SpanKind.INTERNAL"
92+
assert "gen_ai.prompt" in review_span.attributes
93+
assert "gen_ai.completion" in review_span.attributes
94+
95+
review_prompt = json.loads(review_span.attributes["gen_ai.prompt"].replace("'", "\""))
96+
review_completion = json.loads(review_span.attributes["gen_ai.completion"].replace("'", "\""))
97+
98+
assert "title" in review_prompt
99+
assert "era" in review_prompt
100+
assert "synopsis" in review_prompt
101+
assert "review" in review_completion
102+
103+
assert overall_span.kind == "SpanKind.INTERNAL"
104+
assert "gen_ai.prompt" in overall_span.attributes
105+
assert "gen_ai.completion" in overall_span.attributes
106+
107+
overall_prompt = json.loads(overall_span.attributes["gen_ai.prompt"].replace("'", "\""))
108+
overall_completion = json.loads(overall_span.attributes["gen_ai.completion"].replace("'", "\""))
109+
110+
assert overall_prompt == {
111+
"title": "Tragedy at sunset on the beach",
112+
"era": "Victorian England"
113+
}
114+
assert "synopsis" in overall_completion
115+
assert "review" in overall_completion

tox.ini

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,11 @@ envlist =
88
; Environments are organized by individual package, allowing
99
; for specifying supported Python versions per package.
1010

11+
; instrumentation-openai
12+
py3{9,10,11,12,13}-test-instrumentation-langchain-{oldest,latest}
13+
pypy3-test-instrumentation-langchain-{oldest,latest}
14+
lint-instrumentation-langchain
15+
1116
; instrumentation-openai
1217
py3{9,10,11,12,13}-test-instrumentation-openai-v2-{oldest,latest}
1318
pypy3-test-instrumentation-openai-v2-{oldest,latest}

0 commit comments

Comments
 (0)