14
14
from opentelemetry .trace import SpanKind
15
15
16
16
17
- @pytest .mark .vcr (filter_headers = ["Authorization" , "X-Amz-Date" , "X-Amz-Security-Token" ], record_mode = "once" )
18
- def test_sequential_chain (instrument_langchain , span_exporter ):
19
- span_exporter .clear ()
20
-
21
- session = boto3 .Session (region_name = "us-west-2" )
22
-
23
- bedrock_client = session .client (service_name = "bedrock-runtime" , region_name = "us-west-2" )
24
-
25
- llm = BedrockLLM (
17
+ def create_bedrock_llm (region = "us-west-2" ):
18
+ """Create and return a BedrockLLM instance."""
19
+ session = boto3 .Session (region_name = region )
20
+ bedrock_client = session .client (service_name = "bedrock-runtime" , region_name = region )
21
+ return BedrockLLM (
26
22
client = bedrock_client ,
27
23
model_id = "anthropic.claude-v2" ,
28
- model_kwargs = {
29
- "max_tokens_to_sample" : 500 ,
30
- "temperature" : 0.7 ,
31
- },
24
+ model_kwargs = {"max_tokens_to_sample" : 500 , "temperature" : 0.7 },
32
25
)
33
- synopsis_template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
26
+
27
+
28
+ def create_chains (llm ):
29
+ """Create and return the synopsis chain, review chain, and overall chain."""
30
+
31
+ synopsis_prompt = PromptTemplate (
32
+ input_variables = ["title" , "era" ],
33
+ template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
34
34
35
35
Title: {title}
36
36
Era: {era}
37
- Playwright: This is a synopsis for the above play:""" # noqa: E501
38
- synopsis_prompt_template = PromptTemplate ( input_variables = [ "title" , "era" ], template = synopsis_template )
39
- synopsis_chain = LLMChain (llm = llm , prompt = synopsis_prompt_template , output_key = "synopsis" , name = "synopsis" )
37
+ Playwright: This is a synopsis for the above play:""" , # noqa: E501
38
+ )
39
+ synopsis_chain = LLMChain (llm = llm , prompt = synopsis_prompt , output_key = "synopsis" , name = "synopsis" )
40
40
41
- template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
41
+ review_prompt = PromptTemplate (
42
+ input_variables = ["synopsis" ],
43
+ template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
42
44
43
45
Play Synopsis:
44
46
{synopsis}
45
- Review from a New York Times play critic of the above play:""" # noqa: E501
46
- prompt_template = PromptTemplate ( input_variables = [ "synopsis" ], template = template )
47
- review_chain = LLMChain (llm = llm , prompt = prompt_template , output_key = "review" )
47
+ Review from a New York Times play critic of the above play:""" , # noqa: E501
48
+ )
49
+ review_chain = LLMChain (llm = llm , prompt = review_prompt , output_key = "review" )
48
50
49
51
overall_chain = SequentialChain (
50
52
chains = [synopsis_chain , review_chain ],
51
53
input_variables = ["era" , "title" ],
52
54
output_variables = ["synopsis" , "review" ],
53
55
verbose = True ,
54
56
)
55
- overall_chain .invoke ({"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" })
56
57
57
- spans = span_exporter .get_finished_spans ()
58
+ return overall_chain
59
+
60
+
61
+ def validate_span (span , expected_kind , expected_attrs ):
62
+ """Validate a span against expected values."""
63
+ assert span .kind == expected_kind
64
+ for attr in expected_attrs :
65
+ assert attr in span .attributes
66
+ return ast .literal_eval (span .attributes ["gen_ai.prompt" ]), ast .literal_eval (span .attributes ["gen_ai.completion" ])
67
+
58
68
69
+ @pytest .mark .vcr (filter_headers = ["Authorization" , "X-Amz-Date" , "X-Amz-Security-Token" ], record_mode = "once" )
70
+ def test_sequential_chain (instrument_langchain , span_exporter ):
71
+ span_exporter .clear ()
72
+
73
+ llm = create_bedrock_llm ()
74
+ chain = create_chains (llm )
75
+ input_data = {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
76
+ chain .invoke (input_data )
77
+
78
+ spans = span_exporter .get_finished_spans ()
59
79
langchain_spans = [span for span in spans if span .name .startswith ("chain " )]
60
80
61
81
assert [
@@ -68,36 +88,23 @@ def test_sequential_chain(instrument_langchain, span_exporter):
68
88
review_span = next (span for span in spans if span .name == "chain LLMChain" )
69
89
overall_span = next (span for span in spans if span .name == "chain SequentialChain" )
70
90
71
- assert synopsis_span .kind == SpanKind .INTERNAL
72
- assert "gen_ai.prompt" in synopsis_span .attributes
73
- assert "gen_ai.completion" in synopsis_span .attributes
74
-
75
- synopsis_prompt = ast .literal_eval (synopsis_span .attributes ["gen_ai.prompt" ])
76
- synopsis_completion = ast .literal_eval (synopsis_span .attributes ["gen_ai.completion" ])
77
-
78
- assert synopsis_prompt == {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
91
+ synopsis_prompt , synopsis_completion = validate_span (
92
+ synopsis_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
93
+ )
94
+ assert synopsis_prompt == input_data
79
95
assert "synopsis" in synopsis_completion
80
96
81
- assert review_span .kind == SpanKind .INTERNAL
82
- assert "gen_ai.prompt" in review_span .attributes
83
- assert "gen_ai.completion" in review_span .attributes
84
- print ("Raw completion value:" , repr (synopsis_span .attributes ["gen_ai.completion" ]))
85
-
86
- review_prompt = ast .literal_eval (review_span .attributes ["gen_ai.prompt" ])
87
- review_completion = ast .literal_eval (review_span .attributes ["gen_ai.completion" ])
88
-
97
+ review_prompt , review_completion = validate_span (
98
+ review_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
99
+ )
89
100
assert "title" in review_prompt
90
101
assert "era" in review_prompt
91
102
assert "synopsis" in review_prompt
92
103
assert "review" in review_completion
93
104
94
- assert overall_span .kind == SpanKind .INTERNAL
95
- assert "gen_ai.prompt" in overall_span .attributes
96
- assert "gen_ai.completion" in overall_span .attributes
97
-
98
- overall_prompt = ast .literal_eval (overall_span .attributes ["gen_ai.prompt" ])
99
- overall_completion = ast .literal_eval (overall_span .attributes ["gen_ai.completion" ])
100
-
101
- assert overall_prompt == {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
105
+ overall_prompt , overall_completion = validate_span (
106
+ overall_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
107
+ )
108
+ assert overall_prompt == input_data
102
109
assert "synopsis" in overall_completion
103
110
assert "review" in overall_completion
0 commit comments