Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
031881f
pre-test
Avi-Robusta Oct 12, 2025
a51bfe0
pre-test refactor
Avi-Robusta Oct 12, 2025
ecdeae9
test working version
Avi-Robusta Oct 12, 2025
2511ad3
working version
Avi-Robusta Oct 12, 2025
e9956f6
refactoring
Avi-Robusta Oct 16, 2025
8fcd7ac
working version
Avi-Robusta Oct 16, 2025
50d1132
moved date to improve cacheing
Avi-Robusta Oct 16, 2025
f959337
small improvements
Avi-Robusta Oct 16, 2025
a83ef67
added py tests for new runbooks prompt
Avi-Robusta Oct 16, 2025
c83a6b7
added runbook eval
Avi-Robusta Oct 16, 2025
1776035
fix runbook eval
Avi-Robusta Oct 19, 2025
dbd7074
eval to use mock supabase data
Avi-Robusta Oct 19, 2025
67bdfd5
remove time changes
Avi-Robusta Oct 19, 2025
d30e342
refactoring
Avi-Robusta Oct 19, 2025
8daa403
fix runbook eval
Avi-Robusta Oct 19, 2025
c7014ad
fix runbook prompt
Avi-Robusta Oct 19, 2025
f853b3a
refactoring and cleaning
Avi-Robusta Oct 19, 2025
19e3ff9
refactoring and cleaning
Avi-Robusta Oct 19, 2025
4ca5cb4
removed unneeded file
Avi-Robusta Oct 19, 2025
6349c2d
Merge branch 'master' into robusta-runbooks
Avi-Robusta Oct 19, 2025
f7b0131
test fix
Avi-Robusta Oct 19, 2025
e7ba549
runbook pytest fix
Avi-Robusta Oct 20, 2025
2601c5c
runbook pytest fix
Avi-Robusta Oct 20, 2025
f385bf9
runbook pytest removed, moved runbooks to user prompt
Avi-Robusta Oct 20, 2025
481fd78
Merge branch 'master' into robusta-runbooks
Avi-Robusta Oct 22, 2025
271a421
pr changes
Avi-Robusta Oct 23, 2025
173bd89
added comment and fixed test
Avi-Robusta Oct 26, 2025
0da9362
Merge branch 'master' into robusta-runbooks
Avi-Robusta Oct 26, 2025
f256489
pr change
Avi-Robusta Oct 27, 2025
80bfb34
Merge branch 'master' into robusta-runbooks
Avi-Robusta Oct 27, 2025
879c873
pr change
Avi-Robusta Oct 27, 2025
5a077ee
Merge branch 'robusta-runbooks' of https://github.com/robusta-dev/hol…
Avi-Robusta Oct 27, 2025
9d9575b
pr change
Avi-Robusta Oct 27, 2025
f887e6d
pr change
Avi-Robusta Oct 27, 2025
00cf43c
Merge branch 'master' into robusta-runbooks
Avi-Robusta Oct 27, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions holmes/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,9 @@ def __get_cluster_name() -> Optional[str]:

return None

@staticmethod
def get_runbook_catalog() -> Optional[RunbookCatalog]:
def get_runbook_catalog(self) -> Optional[RunbookCatalog]:
# TODO(mainred): besides the built-in runbooks, we need to allow the user to bring their own runbooks
runbook_catalog = load_runbook_catalog()
runbook_catalog = load_runbook_catalog(dal=self.dal)
return runbook_catalog

def create_console_tool_executor(
Expand Down
52 changes: 40 additions & 12 deletions holmes/core/conversations.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from typing import Dict, List, Optional

import sentry_sdk

from holmes.config import Config
from holmes.core.models import (
ToolCallConversationResult,
Expand All @@ -10,9 +9,10 @@
)
from holmes.plugins.prompts import load_and_render_prompt
from holmes.core.tool_calling_llm import ToolCallingLLM
from holmes.plugins.runbooks import RunbookCatalog
from holmes.utils.global_instructions import (
Instructions,
add_global_instructions_to_user_prompt,
add_runbooks_to_user_prompt,
)

DEFAULT_TOOL_SIZE = 10000
Expand Down Expand Up @@ -64,6 +64,7 @@ def build_issue_chat_messages(
ai: ToolCallingLLM,
config: Config,
global_instructions: Optional[Instructions] = None,
runbooks: Optional[RunbookCatalog] = None,
):
"""
This function generates a list of messages for issue conversation and ensures that the message sequence adheres to the model's context window limitations
Expand Down Expand Up @@ -120,8 +121,10 @@ def build_issue_chat_messages(
tools_for_investigation = issue_chat_request.investigation_result.tools

if not conversation_history or len(conversation_history) == 0:
user_prompt = add_global_instructions_to_user_prompt(
user_prompt, global_instructions
user_prompt = add_runbooks_to_user_prompt(
user_prompt=user_prompt,
runbook_catalog=runbooks,
global_instructions=global_instructions,
)

number_of_tools_for_investigation = len(tools_for_investigation) # type: ignore
Expand All @@ -134,6 +137,7 @@ def build_issue_chat_messages(
"issue": issue_chat_request.issue_type,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
},
)
messages = [
Expand All @@ -154,6 +158,7 @@ def build_issue_chat_messages(
"issue": issue_chat_request.issue_type,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_without_tools = load_and_render_prompt(
template_path, template_context_without_tools
Expand Down Expand Up @@ -187,6 +192,7 @@ def build_issue_chat_messages(
"issue": issue_chat_request.issue_type,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_with_truncated_tools = load_and_render_prompt(
template_path, truncated_template_context
Expand All @@ -202,8 +208,10 @@ def build_issue_chat_messages(
},
]

user_prompt = add_global_instructions_to_user_prompt(
user_prompt, global_instructions
user_prompt = add_runbooks_to_user_prompt(
user_prompt=user_prompt,
runbook_catalog=runbooks,
global_instructions=global_instructions,
)

conversation_history.append(
Expand All @@ -228,6 +236,7 @@ def build_issue_chat_messages(
"issue": issue_chat_request.issue_type,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_without_tools = load_and_render_prompt(
template_path, template_context_without_tools
Expand All @@ -251,6 +260,7 @@ def build_issue_chat_messages(
"issue": issue_chat_request.issue_type,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_with_truncated_tools = load_and_render_prompt(
template_path, template_context
Expand All @@ -267,6 +277,7 @@ def add_or_update_system_prompt(
ai: ToolCallingLLM,
config: Config,
additional_system_prompt: Optional[str] = None,
runbooks: Optional[RunbookCatalog] = None,
):
"""Either add the system prompt or replace an existing system prompt.
As a 'defensive' measure, this code will only replace an existing system prompt if it is the
Expand All @@ -278,10 +289,10 @@ def add_or_update_system_prompt(
context = {
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}

system_prompt = load_and_render_prompt(template_path, context)

if additional_system_prompt:
system_prompt = system_prompt + "\n" + additional_system_prompt

Expand Down Expand Up @@ -311,6 +322,7 @@ def build_chat_messages(
config: Config,
global_instructions: Optional[Instructions] = None,
additional_system_prompt: Optional[str] = None,
runbooks: Optional[RunbookCatalog] = None,
) -> List[dict]:
"""
This function generates a list of messages for general chat conversation and ensures that the message sequence adheres to the model's context window limitations
Expand Down Expand Up @@ -370,9 +382,15 @@ def build_chat_messages(
ai=ai,
config=config,
additional_system_prompt=additional_system_prompt,
runbooks=runbooks,
)

ask = add_runbooks_to_user_prompt(
user_prompt=ask,
runbook_catalog=runbooks,
global_instructions=global_instructions,
)

ask = add_global_instructions_to_user_prompt(ask, global_instructions)
conversation_history.append( # type: ignore
{
"role": "user",
Expand Down Expand Up @@ -404,6 +422,7 @@ def build_workload_health_chat_messages(
ai: ToolCallingLLM,
config: Config,
global_instructions: Optional[Instructions] = None,
runbooks: Optional[RunbookCatalog] = None,
):
"""
This function generates a list of messages for workload health conversation and ensures that the message sequence adheres to the model's context window limitations
Expand Down Expand Up @@ -462,8 +481,10 @@ def build_workload_health_chat_messages(
resource = workload_health_chat_request.resource

if not conversation_history or len(conversation_history) == 0:
user_prompt = add_global_instructions_to_user_prompt(
user_prompt, global_instructions
user_prompt = add_runbooks_to_user_prompt(
user_prompt=user_prompt,
runbook_catalog=runbooks,
global_instructions=global_instructions,
)

number_of_tools_for_workload = len(tools_for_workload) # type: ignore
Expand All @@ -476,6 +497,7 @@ def build_workload_health_chat_messages(
"resource": resource,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
},
)
messages = [
Expand All @@ -496,6 +518,7 @@ def build_workload_health_chat_messages(
"resource": resource,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_without_tools = load_and_render_prompt(
template_path, template_context_without_tools
Expand Down Expand Up @@ -529,6 +552,7 @@ def build_workload_health_chat_messages(
"resource": resource,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_with_truncated_tools = load_and_render_prompt(
template_path, truncated_template_context
Expand All @@ -544,8 +568,10 @@ def build_workload_health_chat_messages(
},
]

user_prompt = add_global_instructions_to_user_prompt(
user_prompt, global_instructions
user_prompt = add_runbooks_to_user_prompt(
user_prompt=user_prompt,
runbook_catalog=runbooks,
global_instructions=global_instructions,
)

conversation_history.append(
Expand All @@ -570,6 +596,7 @@ def build_workload_health_chat_messages(
"resource": resource,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_without_tools = load_and_render_prompt(
template_path, template_context_without_tools
Expand All @@ -593,6 +620,7 @@ def build_workload_health_chat_messages(
"resource": resource,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbooks else False,
}
system_prompt_with_truncated_tools = load_and_render_prompt(
template_path, template_context
Expand Down
37 changes: 17 additions & 20 deletions holmes/core/investigation.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
import logging
from typing import Optional


from holmes.common.env_vars import HOLMES_POST_PROCESSING_PROMPT
from holmes.config import Config
from holmes.core.investigation_structured_output import process_response_into_sections
from holmes.core.issue import Issue
from holmes.core.models import InvestigateRequest, InvestigationResult
from holmes.core.supabase_dal import SupabaseDal
from holmes.core.tracing import DummySpan, SpanType
from holmes.utils.global_instructions import add_global_instructions_to_user_prompt
from holmes.plugins.runbooks import RunbookCatalog
from holmes.utils.global_instructions import add_runbooks_to_user_prompt

from holmes.core.investigation_structured_output import (
DEFAULT_SECTIONS,
Expand All @@ -25,6 +27,7 @@ def investigate_issues(
config: Config,
model: Optional[str] = None,
trace_span=DummySpan(),
runbooks: Optional[RunbookCatalog] = None,
) -> InvestigationResult:
context = dal.get_issue_data(investigate_request.context.get("robusta_issue_id"))

Expand Down Expand Up @@ -60,6 +63,7 @@ def investigate_issues(
global_instructions=global_instructions,
sections=investigate_request.sections,
trace_span=trace_span,
runbooks=runbooks,
)

(text_response, sections) = process_response_into_sections(investigation.result)
Expand Down Expand Up @@ -95,18 +99,11 @@ def get_investigation_context(
raw=raw_data,
)

runbooks = ai.runbook_manager.get_instructions_for_issue(issue)
issue_instructions = ai.runbook_manager.get_instructions_for_issue(issue)

instructions = dal.get_resource_instructions(
resource_instructions = dal.get_resource_instructions(
"alert", investigate_request.context.get("issue_type")
)
if instructions is not None and instructions.instructions:
runbooks.extend(instructions.instructions)
if instructions is not None and len(instructions.documents) > 0:
docPrompts = []
for document in instructions.documents:
docPrompts.append(f"* fetch information from this URL: {document.url}\n")
runbooks.extend(docPrompts)

# This section is about setting vars to request the LLM to return structured output.
# It does not mean that Holmes will not return structured sections for investigation as it is
Expand All @@ -131,6 +128,7 @@ def get_investigation_context(
else:
logging.info("Structured output is disabled for this request")

runbook_catalog = config.get_runbook_catalog()
system_prompt = load_and_render_prompt(
investigate_request.prompt_template,
{
Expand All @@ -139,21 +137,20 @@ def get_investigation_context(
"structured_output": request_structured_output_from_llm,
"toolsets": ai.tool_executor.toolsets,
"cluster_name": config.cluster_name,
"runbooks_enabled": True if runbook_catalog else False,
},
)

user_prompt = ""
if runbooks:
for runbook_str in runbooks:
user_prompt += f"* {runbook_str}\n"

user_prompt = f'My instructions to check \n"""{user_prompt}"""'

global_instructions = dal.get_global_instructions_for_account()
user_prompt = add_global_instructions_to_user_prompt(
user_prompt, global_instructions
user_prompt = add_runbooks_to_user_prompt(
user_prompt=user_prompt,
runbook_catalog=runbook_catalog,
global_instructions=global_instructions,
issue_instructions=issue_instructions,
resource_instructions=resource_instructions,
)

user_prompt = f"{user_prompt}\n This is context from the issue {issue.raw}"
user_prompt = f"{user_prompt}\n #This is context from the issue:\n{issue.raw}"

return ai, system_prompt, user_prompt, response_format, sections, runbooks
return ai, system_prompt, user_prompt, response_format, sections, issue_instructions
2 changes: 1 addition & 1 deletion holmes/core/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def build_initial_ask_messages(
system_prompt_template = "builtin://generic_ask.jinja2"
template_context = {
"toolsets": tool_executor.toolsets,
"runbooks": runbooks or {},
"runbooks_enabled": True if runbooks else False,
"system_prompt_additions": system_prompt_additions or "",
}
system_prompt_rendered = load_and_render_prompt(
Expand Down
Loading
Loading