1414from opentelemetry .trace import SpanKind
1515
1616
17- @pytest .mark .vcr (filter_headers = ["Authorization" , "X-Amz-Date" , "X-Amz-Security-Token" ], record_mode = "once" )
18- def test_sequential_chain (instrument_langchain , span_exporter ):
19- span_exporter .clear ()
20-
21- session = boto3 .Session (region_name = "us-west-2" )
22-
23- bedrock_client = session .client (service_name = "bedrock-runtime" , region_name = "us-west-2" )
24-
25- llm = BedrockLLM (
17+ def create_bedrock_llm (region = "us-west-2" ):
18+ """Create and return a BedrockLLM instance."""
19+ session = boto3 .Session (region_name = region )
20+ bedrock_client = session .client (service_name = "bedrock-runtime" , region_name = region )
21+ return BedrockLLM (
2622 client = bedrock_client ,
2723 model_id = "anthropic.claude-v2" ,
28- model_kwargs = {
29- "max_tokens_to_sample" : 500 ,
30- "temperature" : 0.7 ,
31- },
24+ model_kwargs = {"max_tokens_to_sample" : 500 , "temperature" : 0.7 },
3225 )
33- synopsis_template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
26+
27+
28+ def create_chains (llm ):
29+ """Create and return the synopsis chain, review chain, and overall chain."""
30+
31+ synopsis_prompt = PromptTemplate (
32+ input_variables = ["title" , "era" ],
33+ template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
3434
3535 Title: {title}
3636 Era: {era}
37- Playwright: This is a synopsis for the above play:""" # noqa: E501
38- synopsis_prompt_template = PromptTemplate ( input_variables = [ "title" , "era" ], template = synopsis_template )
39- synopsis_chain = LLMChain (llm = llm , prompt = synopsis_prompt_template , output_key = "synopsis" , name = "synopsis" )
37+ Playwright: This is a synopsis for the above play:""" , # noqa: E501
38+ )
39+ synopsis_chain = LLMChain (llm = llm , prompt = synopsis_prompt , output_key = "synopsis" , name = "synopsis" )
4040
41- template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
41+ review_prompt = PromptTemplate (
42+ input_variables = ["synopsis" ],
43+ template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
4244
4345 Play Synopsis:
4446 {synopsis}
45- Review from a New York Times play critic of the above play:""" # noqa: E501
46- prompt_template = PromptTemplate ( input_variables = [ "synopsis" ], template = template )
47- review_chain = LLMChain (llm = llm , prompt = prompt_template , output_key = "review" )
47+ Review from a New York Times play critic of the above play:""" , # noqa: E501
48+ )
49+ review_chain = LLMChain (llm = llm , prompt = review_prompt , output_key = "review" )
4850
4951 overall_chain = SequentialChain (
5052 chains = [synopsis_chain , review_chain ],
5153 input_variables = ["era" , "title" ],
5254 output_variables = ["synopsis" , "review" ],
5355 verbose = True ,
5456 )
55- overall_chain .invoke ({"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" })
5657
57- spans = span_exporter .get_finished_spans ()
58+ return overall_chain
59+
60+
61+ def validate_span (span , expected_kind , expected_attrs ):
62+ """Validate a span against expected values."""
63+ assert span .kind == expected_kind
64+ for attr in expected_attrs :
65+ assert attr in span .attributes
66+ return ast .literal_eval (span .attributes ["gen_ai.prompt" ]), ast .literal_eval (span .attributes ["gen_ai.completion" ])
67+
5868
69+ @pytest .mark .vcr (filter_headers = ["Authorization" , "X-Amz-Date" , "X-Amz-Security-Token" ], record_mode = "once" )
70+ def test_sequential_chain (instrument_langchain , span_exporter ):
71+ span_exporter .clear ()
72+
73+ llm = create_bedrock_llm ()
74+ chain = create_chains (llm )
75+ input_data = {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
76+ chain .invoke (input_data )
77+
78+ spans = span_exporter .get_finished_spans ()
5979 langchain_spans = [span for span in spans if span .name .startswith ("chain " )]
6080
6181 assert [
@@ -68,36 +88,23 @@ def test_sequential_chain(instrument_langchain, span_exporter):
6888 review_span = next (span for span in spans if span .name == "chain LLMChain" )
6989 overall_span = next (span for span in spans if span .name == "chain SequentialChain" )
7090
71- assert synopsis_span .kind == SpanKind .INTERNAL
72- assert "gen_ai.prompt" in synopsis_span .attributes
73- assert "gen_ai.completion" in synopsis_span .attributes
74-
75- synopsis_prompt = ast .literal_eval (synopsis_span .attributes ["gen_ai.prompt" ])
76- synopsis_completion = ast .literal_eval (synopsis_span .attributes ["gen_ai.completion" ])
77-
78- assert synopsis_prompt == {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
91+ synopsis_prompt , synopsis_completion = validate_span (
92+ synopsis_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
93+ )
94+ assert synopsis_prompt == input_data
7995 assert "synopsis" in synopsis_completion
8096
81- assert review_span .kind == SpanKind .INTERNAL
82- assert "gen_ai.prompt" in review_span .attributes
83- assert "gen_ai.completion" in review_span .attributes
84- print ("Raw completion value:" , repr (synopsis_span .attributes ["gen_ai.completion" ]))
85-
86- review_prompt = ast .literal_eval (review_span .attributes ["gen_ai.prompt" ])
87- review_completion = ast .literal_eval (review_span .attributes ["gen_ai.completion" ])
88-
97+ review_prompt , review_completion = validate_span (
98+ review_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
99+ )
89100 assert "title" in review_prompt
90101 assert "era" in review_prompt
91102 assert "synopsis" in review_prompt
92103 assert "review" in review_completion
93104
94- assert overall_span .kind == SpanKind .INTERNAL
95- assert "gen_ai.prompt" in overall_span .attributes
96- assert "gen_ai.completion" in overall_span .attributes
97-
98- overall_prompt = ast .literal_eval (overall_span .attributes ["gen_ai.prompt" ])
99- overall_completion = ast .literal_eval (overall_span .attributes ["gen_ai.completion" ])
100-
101- assert overall_prompt == {"title" : "Tragedy at sunset on the beach" , "era" : "Victorian England" }
105+ overall_prompt , overall_completion = validate_span (
106+ overall_span , SpanKind .INTERNAL , ["gen_ai.prompt" , "gen_ai.completion" ]
107+ )
108+ assert overall_prompt == input_data
102109 assert "synopsis" in overall_completion
103110 assert "review" in overall_completion
0 commit comments