Skip to content

Commit 49f12d0

Browse files
feat: Use Markdown in Prompt Response
1 parent e39b31b commit 49f12d0

File tree

3 files changed

+46
-41
lines changed

3 files changed

+46
-41
lines changed

packages/cdk/resources/BedrockPromptSettings.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ export class BedrockPromptSettings extends Construct {
7272

7373
const file = files.find(file => file.startsWith(`${type}Prompt`))!
7474

75+
if (!file) {
76+
throw new Error(`No prompt file found for type: ${type}`)
77+
}
78+
7579
const text = fs.readFileSync(file, "utf-8")
7680

7781
return {text, filename: file}

packages/slackBotFunction/app/services/prompt_loader.py

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -16,43 +16,7 @@ def _render_prompt(template_config: dict) -> str:
1616

1717
chat_cfg = template_config.get("chat")
1818
if chat_cfg:
19-
parts: list[str] = []
20-
21-
system_items = chat_cfg.get("system", [])
22-
logger.debug("Processing system messages for prompt rendering", extra={"system_items": system_items})
23-
if isinstance(system_items, list):
24-
system_texts = [
25-
item["text"].strip()
26-
for item in system_items
27-
if isinstance(item, dict) and "text" in item and item["text"].strip()
28-
]
29-
if system_texts:
30-
parts.append("\n".join(system_texts))
31-
32-
role_prefix = {
33-
"user": "Human: ",
34-
"assistant": "Assistant: ",
35-
}
36-
37-
logger.debug("Processing chat messages for prompt rendering", extra={"messages": chat_cfg.get("messages", [])})
38-
39-
for msg in chat_cfg.get("messages", []):
40-
role = (msg.get("role") or "").lower()
41-
prefix = role_prefix.get(role)
42-
if not prefix:
43-
continue
44-
45-
content_items = msg.get("content", [])
46-
content_texts = [
47-
item["text"].strip()
48-
for item in content_items
49-
if isinstance(item, dict) and "text" in item and item["text"].strip()
50-
]
51-
52-
if content_texts:
53-
parts.append(prefix + "\n".join(content_texts))
54-
55-
return "\n\n".join(parts)
19+
return parse_system_message(chat_cfg)
5620

5721
text_cfg = template_config.get("text")
5822
if isinstance(text_cfg, dict) and "text" in text_cfg:
@@ -67,6 +31,46 @@ def _render_prompt(template_config: dict) -> str:
6731
raise PromptLoadError(f"Unsupported prompt configuration. Keys: {list(template_config.keys())}")
6832

6933

34+
def parse_system_message(chat_cfg: dict) -> str:
35+
parts: list[str] = []
36+
37+
system_items = chat_cfg.get("system", [])
38+
logger.debug("Processing system messages for prompt rendering", extra={"system_items": system_items})
39+
if isinstance(system_items, list):
40+
system_texts = [
41+
item["text"].strip()
42+
for item in system_items
43+
if isinstance(item, dict) and "text" in item and item["text"].strip()
44+
]
45+
if system_texts:
46+
parts.append("\n".join(system_texts))
47+
48+
role_prefix = {
49+
"user": "Human: ",
50+
"assistant": "Assistant: ",
51+
}
52+
53+
logger.debug("Processing chat messages for prompt rendering", extra={"messages": chat_cfg.get("messages", [])})
54+
55+
for msg in chat_cfg.get("messages", []):
56+
role = (msg.get("role") or "").lower()
57+
prefix = role_prefix.get(role)
58+
if not prefix:
59+
continue
60+
61+
content_items = msg.get("content", [])
62+
content_texts = [
63+
item["text"].strip()
64+
for item in content_items
65+
if isinstance(item, dict) and "text" in item and item["text"].strip()
66+
]
67+
68+
if content_texts:
69+
parts.append(prefix + "\n".join(content_texts))
70+
71+
return "\n\n".join(parts)
72+
73+
7074
def load_prompt(prompt_name: str, prompt_version: str = None) -> str:
7175
"""
7276
Load a prompt template from Amazon Bedrock Prompt Management.
@@ -97,10 +101,6 @@ def load_prompt(prompt_name: str, prompt_version: str = None) -> str:
97101
response = client.get_prompt(promptIdentifier=prompt_id)
98102

99103
template_config = response["variants"][0]["templateConfiguration"]
100-
# TODO: derive actual inference config then pass it along with prompt text to the retrieve_and_generate call
101-
# so that all settings from the prompt management are applied directly from the cdk
102-
# inference_config = response["variants"][0]["inferenceConfiguration"]
103-
104104
prompt_text = _render_prompt(template_config)
105105
actual_version = response.get("version", "DRAFT")
106106

prompts/systemPrompt.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
</Constraints>
2828

2929
<Output>
30+
- Use Markdown, avoid XML
3031
- Structured, informative, and tailored to the specific context of the question.
3132
- Provide evidence to support results
3233
- Acknowledging any assumptions or limitations in your knowledge or understanding.

0 commit comments

Comments
 (0)