Skip to content

Commit 7c1a697

Browse files
committed
Add default prompt values
Updated all LLM provider classes to use default values for prompt components if environment variables are not set.
1 parent 16058ba commit 7c1a697

File tree

7 files changed

+88
-45
lines changed

7 files changed

+88
-45
lines changed

pr_security_review/llm_providers/anthropic.py

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -99,15 +99,32 @@ def get_skeptical_verification_prompt(self, code_changes: str, findings: List[Di
9999
for i, finding in enumerate(findings)
100100
])
101101

102-
# Load prompt components from environment variables (required)
103-
intro = os.getenv('LLM_SKEPTICAL_VERIFICATION_INTRO')
104-
critical_questions = os.getenv('LLM_SKEPTICAL_VERIFICATION_CRITICAL_QUESTIONS')
105-
be_critical = os.getenv('LLM_SKEPTICAL_VERIFICATION_BE_CRITICAL')
106-
only_confirm = os.getenv('LLM_SKEPTICAL_VERIFICATION_ONLY_CONFIRM')
107-
response_format = os.getenv('LLM_SKEPTICAL_VERIFICATION_RESPONSE_FORMAT')
102+
# Default values (can be overridden by environment variables)
103+
intro = os.getenv('LLM_SKEPTICAL_VERIFICATION_INTRO',
104+
"You are a skeptical security auditor tasked with CRITICALLY reviewing and VERIFYING potential vulnerabilities.")
108105

109-
if not all([intro, critical_questions, be_critical, only_confirm, response_format]):
110-
raise ValueError("Required LLM skeptical verification environment variables are not set. Please check your .env file.")
106+
critical_questions = os.getenv('LLM_SKEPTICAL_VERIFICATION_CRITICAL_QUESTIONS',
107+
"Ask yourself is this is really a vulnerability.")
108+
109+
be_critical = os.getenv('LLM_SKEPTICAL_VERIFICATION_BE_CRITICAL',
110+
"Keep a critical mindset.")
111+
112+
only_confirm = os.getenv('LLM_SKEPTICAL_VERIFICATION_ONLY_CONFIRM',
113+
"Only confirm vulnerabilities you are very sure about.")
114+
115+
response_format = os.getenv('LLM_SKEPTICAL_VERIFICATION_RESPONSE_FORMAT',
116+
"""Return ONLY a JSON object with your verification results:
117+
{
118+
"verified_findings": [
119+
{
120+
"original_index": <index of the original finding, starting from 0>,
121+
"is_real_vulnerability": <true/false>,
122+
"verification_confidence": <0-100>,
123+
"reason": "<why you believe this is or isnt a real vulnerability>"
124+
}
125+
],
126+
"summary": "<brief summary of your verification>"
127+
}""")
111128

112129
prompt = f"""{intro}
113130
@@ -145,9 +162,8 @@ def verify_findings(self, code_changes: str, initial_result: Dict) -> Tuple[Dict
145162
return initial_result, CostInfo(0.0, 0, 0, self.model, self.get_provider_name())
146163

147164
try:
148-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT_ANTHROPIC')
149-
if not system_prompt:
150-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT_ANTHROPIC environment variable is not set.")
165+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT_ANTHROPIC',
166+
"You are a skeptical security auditor. Return ONLY JSON output with no additional text or explanation.")
151167

152168
response = self.client.messages.create(
153169
model=self.model,
@@ -265,9 +281,8 @@ def analyze_security(self, code_changes: str, context: str = "") -> Tuple[Dict,
265281
"""
266282
try:
267283
# First analysis
268-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT')
269-
if not system_prompt:
270-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT environment variable is not set.")
284+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT',
285+
"You are a security expert specializing in code review. Return ONLY JSON output with no additional text or explanation.")
271286

272287
response = self.client.messages.create(
273288
model=self.model,

pr_security_review/llm_providers/base.py

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -120,16 +120,51 @@ def get_security_prompt(code_changes: str, context: str = "") -> str:
120120
Returns:
121121
str: The formatted prompt for security analysis
122122
"""
123-
# Load prompt components from environment variables (required)
124-
intro = os.getenv('LLM_SECURITY_PROMPT_INTRO')
125-
focus_areas = os.getenv('LLM_SECURITY_PROMPT_FOCUS_AREAS')
126-
important_notes = os.getenv('LLM_SECURITY_PROMPT_IMPORTANT_NOTES')
127-
examples = os.getenv('LLM_SECURITY_PROMPT_EXAMPLES')
128-
response_format = os.getenv('LLM_SECURITY_PROMPT_RESPONSE_FORMAT')
129-
no_vulns_response = os.getenv('LLM_SECURITY_PROMPT_NO_VULNS_RESPONSE')
123+
# Default values (can be overridden by environment variables)
124+
intro = os.getenv('LLM_SECURITY_PROMPT_INTRO',
125+
"You are a security expert specializing in Ethereum client implementations and blockchain security.")
130126

131-
if not all([intro, focus_areas, important_notes, examples, response_format, no_vulns_response]):
132-
raise ValueError("Required LLM prompt environment variables are not set. Please check your .env file.")
127+
focus_areas = os.getenv('LLM_SECURITY_PROMPT_FOCUS_AREAS',
128+
"Pay special attention to Blockchain specific vulnerabilities.")
129+
130+
important_notes = os.getenv('LLM_SECURITY_PROMPT_IMPORTANT_NOTES',
131+
"IMPORTANT:\n- Focus on concrete exploitable vulnerabilities.")
132+
133+
examples = os.getenv('LLM_SECURITY_PROMPT_EXAMPLES',
134+
"Examples of concrete vulnerabilities:\n- Gas costs that deviate from EIP specifications.")
135+
136+
response_format = os.getenv('LLM_SECURITY_PROMPT_RESPONSE_FORMAT',
137+
"""CRITICAL: Your response must be ONLY the following JSON object, with no additional text, explanation, or markdown formatting:
138+
{
139+
"confidence_score": <use highest confidence from findings, or 100 if no vulnerabilities>,
140+
"has_vulnerabilities": <true/false>,
141+
"findings": [
142+
{
143+
"severity": "<HIGH|MEDIUM|LOW>",
144+
"description": "<specific vulnerability with exact code location>",
145+
"recommendation": "<precise fix required>",
146+
"confidence": <0-100, how certain you are about this specific vulnerability>,
147+
"detailed_explanation": "<comprehensive explanation of what the issue is>",
148+
"impact_explanation": "<what can happen if this vulnerability is exploited>",
149+
"detailed_recommendation": "<detailed explanation of how to fix the issue>",
150+
"code_example": "<the existing problematic code block, with proposed changes highlighted using html-style comments>",
151+
"additional_resources": "<links to documentation or other resources>"
152+
}
153+
],
154+
"summary": "<only mention concrete vulnerabilities found>"
155+
}
156+
157+
IMPORTANT: The overall confidence_score should match the highest confidence score from the findings.
158+
For example, if you find one vulnerability with 90% confidence, the overall confidence_score should also be 90.""")
159+
160+
no_vulns_response = os.getenv('LLM_SECURITY_PROMPT_NO_VULNS_RESPONSE',
161+
"""If no clear vulnerabilities are found in the code changes, return:
162+
{
163+
"confidence_score": 100,
164+
"has_vulnerabilities": false,
165+
"findings": [],
166+
"summary": "No concrete vulnerabilities identified in the changed code."
167+
}""")
133168

134169
# Build the prompt
135170
prompt = f"""{intro}

pr_security_review/llm_providers/deepseek.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,8 @@ def analyze_security(self, code_changes: str, context: str = "") -> Tuple[Dict,
9090
# context = ""
9191
try:
9292
# Prepare the messages
93-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT')
94-
if not system_prompt:
95-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT environment variable is not set.")
93+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT',
94+
"You are a security expert specializing in code review. Return ONLY JSON output with no additional text or explanation.")
9695

9796
messages = [
9897
{

pr_security_review/llm_providers/gemini.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,8 @@ def analyze_security(self, code_changes: str, context: str = "") -> Tuple[Dict,
8484
"response_mime_type": "application/json",
8585
}
8686

87-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT')
88-
if not system_prompt:
89-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT environment variable is not set.")
87+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT',
88+
"You are a security expert specializing in code review. Return ONLY JSON output with no additional text or explanation.")
9089

9190
model = genai.GenerativeModel(
9291
model_name=self.model,

pr_security_review/llm_providers/llama.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,8 @@ def analyze_security(self, code_changes: str, context: str = "") -> Tuple[Dict,
9595
# context = ""
9696
try:
9797
# Prepare the messages
98-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT')
99-
if not system_prompt:
100-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT environment variable is not set.")
98+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT',
99+
"You are a security expert specializing in code review. Return ONLY JSON output with no additional text or explanation.")
101100

102101
messages = [
103102
{

pr_security_review/llm_providers/multi_judge.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,10 +96,9 @@ def _synthesize_report(self, all_results: Dict[str, Dict], vote_result: Dict, co
9696
"""Use Anthropic to synthesize a combined report from all analyses."""
9797
anthropic = self.providers['anthropic']
9898

99-
# Load synthesis intro from environment variable (required)
100-
synthesis_intro = os.getenv('LLM_SYNTHESIS_PROMPT_INTRO')
101-
if not synthesis_intro:
102-
raise ValueError("LLM_SYNTHESIS_PROMPT_INTRO environment variable is not set.")
99+
# Load synthesis intro from environment variable (with default)
100+
synthesis_intro = os.getenv('LLM_SYNTHESIS_PROMPT_INTRO',
101+
"You are a security expert tasked with synthesizing multiple security analyses into a single coherent report.")
103102

104103
# Create a synthesis prompt
105104
synthesis_prompt = f"""{synthesis_intro}
@@ -124,9 +123,8 @@ def _synthesize_report(self, all_results: Dict[str, Dict], vote_result: Dict, co
124123
for finding in result['findings']:
125124
synthesis_prompt += f" * {finding['severity']}: {finding['description']}\n"
126125

127-
synthesis_instruction = os.getenv('LLM_SYNTHESIS_PROMPT_INSTRUCTION')
128-
if not synthesis_instruction:
129-
raise ValueError("LLM_SYNTHESIS_PROMPT_INSTRUCTION environment variable is not set.")
126+
synthesis_instruction = os.getenv('LLM_SYNTHESIS_PROMPT_INSTRUCTION',
127+
"Please synthesize these analyses into a single, coherent security report. Combine similar findings, use the highest confidence scores where appropriate, and create a unified summary.")
130128

131129
synthesis_prompt += f"""
132130
@@ -159,9 +157,8 @@ def _synthesize_report(self, all_results: Dict[str, Dict], vote_result: Dict, co
159157

160158
try:
161159
# Use a fresh Claude instance for synthesis to avoid token limit issues
162-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT_SYNTHESIZE')
163-
if not system_prompt:
164-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT_SYNTHESIZE environment variable is not set.")
160+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT_SYNTHESIZE',
161+
"You are a security expert specializing in synthesizing multiple analyses. Return ONLY JSON output with no additional text or explanation.")
165162

166163
response = anthropic.client.messages.create(
167164
model=anthropic.model,

pr_security_review/llm_providers/openai.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,8 @@ def analyze_security(self, code_changes: str, context: str = "") -> Tuple[Dict,
9595
- CostInfo: Cost information for the request
9696
"""
9797
try:
98-
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT')
99-
if not system_prompt:
100-
raise ValueError("LLM_SYNTHESIS_SYSTEM_PROMPT environment variable is not set.")
98+
system_prompt = os.getenv('LLM_SYNTHESIS_SYSTEM_PROMPT',
99+
"You are a security expert specializing in code review. Return ONLY JSON output with no additional text or explanation.")
101100

102101
response = self.client.chat.completions.create(
103102
model=self.model,

0 commit comments

Comments
 (0)