@@ -16,43 +16,7 @@ def _render_prompt(template_config: dict) -> str:
1616
1717 chat_cfg = template_config .get ("chat" )
1818 if chat_cfg :
19- parts : list [str ] = []
20-
21- system_items = chat_cfg .get ("system" , [])
22- logger .debug ("Processing system messages for prompt rendering" , extra = {"system_items" : system_items })
23- if isinstance (system_items , list ):
24- system_texts = [
25- item ["text" ].strip ()
26- for item in system_items
27- if isinstance (item , dict ) and "text" in item and item ["text" ].strip ()
28- ]
29- if system_texts :
30- parts .append ("\n " .join (system_texts ))
31-
32- role_prefix = {
33- "user" : "Human: " ,
34- "assistant" : "Assistant: " ,
35- }
36-
37- logger .debug ("Processing chat messages for prompt rendering" , extra = {"messages" : chat_cfg .get ("messages" , [])})
38-
39- for msg in chat_cfg .get ("messages" , []):
40- role = (msg .get ("role" ) or "" ).lower ()
41- prefix = role_prefix .get (role )
42- if not prefix :
43- continue
44-
45- content_items = msg .get ("content" , [])
46- content_texts = [
47- item ["text" ].strip ()
48- for item in content_items
49- if isinstance (item , dict ) and "text" in item and item ["text" ].strip ()
50- ]
51-
52- if content_texts :
53- parts .append (prefix + "\n " .join (content_texts ))
54-
55- return "\n \n " .join (parts )
19+ return parse_system_message (chat_cfg )
5620
5721 text_cfg = template_config .get ("text" )
5822 if isinstance (text_cfg , dict ) and "text" in text_cfg :
@@ -67,6 +31,46 @@ def _render_prompt(template_config: dict) -> str:
6731 raise PromptLoadError (f"Unsupported prompt configuration. Keys: { list (template_config .keys ())} " )
6832
6933
34+ def parse_system_message (chat_cfg : dict ) -> str :
35+ parts : list [str ] = []
36+
37+ system_items = chat_cfg .get ("system" , [])
38+ logger .debug ("Processing system messages for prompt rendering" , extra = {"system_items" : system_items })
39+ if isinstance (system_items , list ):
40+ system_texts = [
41+ item ["text" ].strip ()
42+ for item in system_items
43+ if isinstance (item , dict ) and "text" in item and item ["text" ].strip ()
44+ ]
45+ if system_texts :
46+ parts .append ("\n " .join (system_texts ))
47+
48+ role_prefix = {
49+ "user" : "Human: " ,
50+ "assistant" : "Assistant: " ,
51+ }
52+
53+ logger .debug ("Processing chat messages for prompt rendering" , extra = {"messages" : chat_cfg .get ("messages" , [])})
54+
55+ for msg in chat_cfg .get ("messages" , []):
56+ role = (msg .get ("role" ) or "" ).lower ()
57+ prefix = role_prefix .get (role )
58+ if not prefix :
59+ continue
60+
61+ content_items = msg .get ("content" , [])
62+ content_texts = [
63+ item ["text" ].strip ()
64+ for item in content_items
65+ if isinstance (item , dict ) and "text" in item and item ["text" ].strip ()
66+ ]
67+
68+ if content_texts :
69+ parts .append (prefix + "\n " .join (content_texts ))
70+
71+ return "\n \n " .join (parts )
72+
73+
7074def load_prompt (prompt_name : str , prompt_version : str = None ) -> str :
7175 """
7276 Load a prompt template from Amazon Bedrock Prompt Management.
@@ -97,10 +101,6 @@ def load_prompt(prompt_name: str, prompt_version: str = None) -> str:
97101 response = client .get_prompt (promptIdentifier = prompt_id )
98102
99103 template_config = response ["variants" ][0 ]["templateConfiguration" ]
100- # TODO: derive actual inference config then pass it along with prompt text to the retrieve_and_generate call
101- # so that all settings from the prompt management are applied directly from the cdk
102- # inference_config = response["variants"][0]["inferenceConfiguration"]
103-
104104 prompt_text = _render_prompt (template_config )
105105 actual_version = response .get ("version" , "DRAFT" )
106106
0 commit comments