Skip to content

Commit 5d7d17d

Browse files
committed
removed more than 15 local vars from testing file and changed return logic for init file
1 parent 4ecaf95 commit 5d7d17d

File tree

3 files changed

+59
-54
lines changed

3 files changed

+59
-54
lines changed

ai_agent_instrumentation/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain_v2/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,3 +60,4 @@ def __call__(
6060
return None
6161

6262
instance.add_handler(self.callback_handler, True)
63+
return None

ai_agent_instrumentation/opentelemetry-instrumentation-langchain-v2/tests/test_chains.py

Lines changed: 54 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -14,48 +14,68 @@
1414
from opentelemetry.trace import SpanKind
1515

1616

17-
@pytest.mark.vcr(filter_headers=["Authorization", "X-Amz-Date", "X-Amz-Security-Token"], record_mode="once")
18-
def test_sequential_chain(instrument_langchain, span_exporter):
19-
span_exporter.clear()
20-
21-
session = boto3.Session(region_name="us-west-2")
22-
23-
bedrock_client = session.client(service_name="bedrock-runtime", region_name="us-west-2")
24-
25-
llm = BedrockLLM(
17+
def create_bedrock_llm(region="us-west-2"):
18+
"""Create and return a BedrockLLM instance."""
19+
session = boto3.Session(region_name=region)
20+
bedrock_client = session.client(service_name="bedrock-runtime", region_name=region)
21+
return BedrockLLM(
2622
client=bedrock_client,
2723
model_id="anthropic.claude-v2",
28-
model_kwargs={
29-
"max_tokens_to_sample": 500,
30-
"temperature": 0.7,
31-
},
24+
model_kwargs={"max_tokens_to_sample": 500, "temperature": 0.7},
3225
)
33-
synopsis_template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
26+
27+
28+
def create_chains(llm):
29+
"""Create and return the synopsis chain, review chain, and overall chain."""
30+
31+
synopsis_prompt = PromptTemplate(
32+
input_variables=["title", "era"],
33+
template="""You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
3434
3535
Title: {title}
3636
Era: {era}
37-
Playwright: This is a synopsis for the above play:""" # noqa: E501
38-
synopsis_prompt_template = PromptTemplate(input_variables=["title", "era"], template=synopsis_template)
39-
synopsis_chain = LLMChain(llm=llm, prompt=synopsis_prompt_template, output_key="synopsis", name="synopsis")
37+
Playwright: This is a synopsis for the above play:""", # noqa: E501
38+
)
39+
synopsis_chain = LLMChain(llm=llm, prompt=synopsis_prompt, output_key="synopsis", name="synopsis")
4040

41-
template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
41+
review_prompt = PromptTemplate(
42+
input_variables=["synopsis"],
43+
template="""You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
4244
4345
Play Synopsis:
4446
{synopsis}
45-
Review from a New York Times play critic of the above play:""" # noqa: E501
46-
prompt_template = PromptTemplate(input_variables=["synopsis"], template=template)
47-
review_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="review")
47+
Review from a New York Times play critic of the above play:""", # noqa: E501
48+
)
49+
review_chain = LLMChain(llm=llm, prompt=review_prompt, output_key="review")
4850

4951
overall_chain = SequentialChain(
5052
chains=[synopsis_chain, review_chain],
5153
input_variables=["era", "title"],
5254
output_variables=["synopsis", "review"],
5355
verbose=True,
5456
)
55-
overall_chain.invoke({"title": "Tragedy at sunset on the beach", "era": "Victorian England"})
5657

57-
spans = span_exporter.get_finished_spans()
58+
return overall_chain
59+
60+
61+
def validate_span(span, expected_kind, expected_attrs):
62+
"""Validate a span against expected values."""
63+
assert span.kind == expected_kind
64+
for attr in expected_attrs:
65+
assert attr in span.attributes
66+
return ast.literal_eval(span.attributes["gen_ai.prompt"]), ast.literal_eval(span.attributes["gen_ai.completion"])
67+
5868

69+
@pytest.mark.vcr(filter_headers=["Authorization", "X-Amz-Date", "X-Amz-Security-Token"], record_mode="once")
70+
def test_sequential_chain(instrument_langchain, span_exporter):
71+
span_exporter.clear()
72+
73+
llm = create_bedrock_llm()
74+
chain = create_chains(llm)
75+
input_data = {"title": "Tragedy at sunset on the beach", "era": "Victorian England"}
76+
chain.invoke(input_data)
77+
78+
spans = span_exporter.get_finished_spans()
5979
langchain_spans = [span for span in spans if span.name.startswith("chain ")]
6080

6181
assert [
@@ -68,36 +88,23 @@ def test_sequential_chain(instrument_langchain, span_exporter):
6888
review_span = next(span for span in spans if span.name == "chain LLMChain")
6989
overall_span = next(span for span in spans if span.name == "chain SequentialChain")
7090

71-
assert synopsis_span.kind == SpanKind.INTERNAL
72-
assert "gen_ai.prompt" in synopsis_span.attributes
73-
assert "gen_ai.completion" in synopsis_span.attributes
74-
75-
synopsis_prompt = ast.literal_eval(synopsis_span.attributes["gen_ai.prompt"])
76-
synopsis_completion = ast.literal_eval(synopsis_span.attributes["gen_ai.completion"])
77-
78-
assert synopsis_prompt == {"title": "Tragedy at sunset on the beach", "era": "Victorian England"}
91+
synopsis_prompt, synopsis_completion = validate_span(
92+
synopsis_span, SpanKind.INTERNAL, ["gen_ai.prompt", "gen_ai.completion"]
93+
)
94+
assert synopsis_prompt == input_data
7995
assert "synopsis" in synopsis_completion
8096

81-
assert review_span.kind == SpanKind.INTERNAL
82-
assert "gen_ai.prompt" in review_span.attributes
83-
assert "gen_ai.completion" in review_span.attributes
84-
print("Raw completion value:", repr(synopsis_span.attributes["gen_ai.completion"]))
85-
86-
review_prompt = ast.literal_eval(review_span.attributes["gen_ai.prompt"])
87-
review_completion = ast.literal_eval(review_span.attributes["gen_ai.completion"])
88-
97+
review_prompt, review_completion = validate_span(
98+
review_span, SpanKind.INTERNAL, ["gen_ai.prompt", "gen_ai.completion"]
99+
)
89100
assert "title" in review_prompt
90101
assert "era" in review_prompt
91102
assert "synopsis" in review_prompt
92103
assert "review" in review_completion
93104

94-
assert overall_span.kind == SpanKind.INTERNAL
95-
assert "gen_ai.prompt" in overall_span.attributes
96-
assert "gen_ai.completion" in overall_span.attributes
97-
98-
overall_prompt = ast.literal_eval(overall_span.attributes["gen_ai.prompt"])
99-
overall_completion = ast.literal_eval(overall_span.attributes["gen_ai.completion"])
100-
101-
assert overall_prompt == {"title": "Tragedy at sunset on the beach", "era": "Victorian England"}
105+
overall_prompt, overall_completion = validate_span(
106+
overall_span, SpanKind.INTERNAL, ["gen_ai.prompt", "gen_ai.completion"]
107+
)
108+
assert overall_prompt == input_data
102109
assert "synopsis" in overall_completion
103110
assert "review" in overall_completion

ai_agent_instrumentation/opentelemetry-instrumentation-langchain-v2/tests/test_langgraph_agent.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ def calculate(state: State):
5656
print(f" Attributes: {span.attributes}")
5757
print("---")
5858

59-
expected_spans = {"chain LangGraph", "chain calculate", "chat anthropic.claude-3-haiku-20240307-v1:0"}
60-
61-
assert expected_spans == {span.name for span in spans}
59+
assert {"chain LangGraph", "chain calculate", "chat anthropic.claude-3-haiku-20240307-v1:0"} == {
60+
span.name for span in spans
61+
}
6262

6363
llm_span = next(span for span in spans if span.name == "chat anthropic.claude-3-haiku-20240307-v1:0")
6464
calculate_task_span = next(span for span in spans if span.name == "chain calculate")
@@ -115,8 +115,7 @@ def calculate(state: State):
115115

116116
langgraph = workflow.compile()
117117

118-
user_request = "What's 5 + 5?"
119-
await langgraph.ainvoke(input={"request": user_request})
118+
await langgraph.ainvoke(input={"request": "What's 5 + 5?"})
120119
spans = span_exporter.get_finished_spans()
121120

122121
assert set(["chain LangGraph", "chain calculate", "chat anthropic.claude-3-haiku-20240307-v1:0"]) == {
@@ -147,8 +146,6 @@ def build_graph():
147146

148147
graph = build_graph()
149148

150-
from opentelemetry import trace
151-
152149
assert trace.get_current_span() == INVALID_SPAN
153150

154151
graph.invoke({"result": "init"})

0 commit comments

Comments
 (0)