Skip to content

Commit cd3375f

Browse files
feat: Get ai agent from bedrock
1 parent ee4477c commit cd3375f

File tree

2 files changed

+15
-17
lines changed

2 files changed

+15
-17
lines changed

packages/slackBotFunction/app/services/bedrock.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,10 @@ def query_bedrock(user_query: str, session_id: str = None) -> RetrieveAndGenerat
4242
"type": "KNOWLEDGE_BASE",
4343
"knowledgeBaseConfiguration": {
4444
"knowledgeBaseId": config.KNOWLEDGEBASE_ID,
45-
"modelArn": config.RAG_MODEL_ID,
46-
"retrievalConfiguration": {"vectorSearchConfiguration": {"numberOfResults": 5}},
45+
"modelArn": prompt_template.get("model_id", config.RAG_MODEL_ID),
46+
"retrievalConfiguration": {
47+
"vectorSearchConfiguration": {"numberOfResults": 10, "overrideSearchType": "SEMANTIC"}
48+
},
4749
"generationConfiguration": {
4850
"guardrailConfiguration": {
4951
"guardrailId": config.GUARD_RAIL_ID,
@@ -58,16 +60,6 @@ def query_bedrock(user_query: str, session_id: str = None) -> RetrieveAndGenerat
5860
}
5961
},
6062
},
61-
"orchestrationConfiguration": {
62-
"inferenceConfig": {
63-
"textInferenceConfig": {
64-
**inference_config,
65-
"stopSequences": [
66-
"Human:",
67-
],
68-
}
69-
},
70-
},
7163
},
7264
},
7365
}

packages/slackBotFunction/app/services/prompt_loader.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,23 +92,28 @@ def load_prompt(prompt_name: str, prompt_version: str = None) -> dict:
9292

9393
logger.info(
9494
f"Loading prompt {prompt_name}' (ID: {prompt_id})",
95-
extra={"prompt_name": prompt_name, "prompt_id": prompt_id, "prompt_version": prompt_version},
95+
extra={"prompt_version": prompt_version},
9696
)
9797

9898
if is_explicit_version:
9999
response = client.get_prompt(promptIdentifier=prompt_id, promptVersion=selected_version)
100100
else:
101101
response = client.get_prompt(promptIdentifier=prompt_id)
102102

103+
logger.info("Prompt Found", extra={"prompt": response})
104+
105+
variant = response["variants"][0]
106+
103107
# Extract and render the prompt template
104-
template_config = response["variants"][0]["templateConfiguration"]
108+
template_config = variant["templateConfiguration"]
105109
prompt_text = _render_prompt(template_config)
106110
actual_version = response.get("version", "DRAFT")
107111

108112
# Extract inference configuration with defaults
109113
default_inference = {"temperature": 0, "topP": 1, "maxTokens": 1500}
110-
raw_inference = response["variants"][0].get("inferenceConfiguration", {})
111-
raw_text_config = raw_inference.get("textInferenceConfiguration", {})
114+
model_id = variant.get("modelId", "")
115+
raw_inference = variant.get("inferenceConfiguration", {})
116+
raw_text_config = raw_inference.get("text", {})
112117
inference_config = {**default_inference, **raw_text_config}
113118

114119
logger.info(
@@ -117,10 +122,11 @@ def load_prompt(prompt_name: str, prompt_version: str = None) -> dict:
117122
"prompt_name": prompt_name,
118123
"prompt_id": prompt_id,
119124
"version_used": actual_version,
125+
"model_id": model_id,
120126
**inference_config,
121127
},
122128
)
123-
return {"prompt_text": prompt_text, "inference_config": inference_config}
129+
return {"prompt_text": prompt_text, "model_id": model_id, "inference_config": inference_config}
124130

125131
except ClientError as e:
126132
error_code = e.response.get("Error", {}).get("Code", "Unknown")

0 commit comments

Comments
 (0)