@@ -120,16 +120,51 @@ def get_security_prompt(code_changes: str, context: str = "") -> str:
120120 Returns:
121121 str: The formatted prompt for security analysis
122122 """
123- # Load prompt components from environment variables (required)
124- intro = os .getenv ('LLM_SECURITY_PROMPT_INTRO' )
125- focus_areas = os .getenv ('LLM_SECURITY_PROMPT_FOCUS_AREAS' )
126- important_notes = os .getenv ('LLM_SECURITY_PROMPT_IMPORTANT_NOTES' )
127- examples = os .getenv ('LLM_SECURITY_PROMPT_EXAMPLES' )
128- response_format = os .getenv ('LLM_SECURITY_PROMPT_RESPONSE_FORMAT' )
129- no_vulns_response = os .getenv ('LLM_SECURITY_PROMPT_NO_VULNS_RESPONSE' )
123+ # Default values (can be overridden by environment variables)
124+ intro = os .getenv ('LLM_SECURITY_PROMPT_INTRO' ,
125+ "You are a security expert specializing in Ethereum client implementations and blockchain security." )
130126
131- if not all ([intro , focus_areas , important_notes , examples , response_format , no_vulns_response ]):
132- raise ValueError ("Required LLM prompt environment variables are not set. Please check your .env file." )
127+ focus_areas = os .getenv ('LLM_SECURITY_PROMPT_FOCUS_AREAS' ,
128+ "Pay special attention to Blockchain specific vulnerabilities." )
129+
130+ important_notes = os .getenv ('LLM_SECURITY_PROMPT_IMPORTANT_NOTES' ,
131+ "IMPORTANT:\n - Focus on concrete exploitable vulnerabilities." )
132+
133+ examples = os .getenv ('LLM_SECURITY_PROMPT_EXAMPLES' ,
134+ "Examples of concrete vulnerabilities:\n - Gas costs that deviate from EIP specifications." )
135+
136+ response_format = os .getenv ('LLM_SECURITY_PROMPT_RESPONSE_FORMAT' ,
137+ """CRITICAL: Your response must be ONLY the following JSON object, with no additional text, explanation, or markdown formatting:
138+ {
139+ "confidence_score": <use highest confidence from findings, or 100 if no vulnerabilities>,
140+ "has_vulnerabilities": <true/false>,
141+ "findings": [
142+ {
143+ "severity": "<HIGH|MEDIUM|LOW>",
144+ "description": "<specific vulnerability with exact code location>",
145+ "recommendation": "<precise fix required>",
146+ "confidence": <0-100, how certain you are about this specific vulnerability>,
147+ "detailed_explanation": "<comprehensive explanation of what the issue is>",
148+ "impact_explanation": "<what can happen if this vulnerability is exploited>",
149+ "detailed_recommendation": "<detailed explanation of how to fix the issue>",
150+ "code_example": "<the existing problematic code block, with proposed changes highlighted using html-style comments>",
151+ "additional_resources": "<links to documentation or other resources>"
152+ }
153+ ],
154+ "summary": "<only mention concrete vulnerabilities found>"
155+ }
156+
157+ IMPORTANT: The overall confidence_score should match the highest confidence score from the findings.
158+ For example, if you find one vulnerability with 90% confidence, the overall confidence_score should also be 90.""" )
159+
160+ no_vulns_response = os .getenv ('LLM_SECURITY_PROMPT_NO_VULNS_RESPONSE' ,
161+ """If no clear vulnerabilities are found in the code changes, return:
162+ {
163+ "confidence_score": 100,
164+ "has_vulnerabilities": false,
165+ "findings": [],
166+ "summary": "No concrete vulnerabilities identified in the changed code."
167+ }""" )
133168
134169 # Build the prompt
135170 prompt = f"""{ intro }
0 commit comments