-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathassistant.py
More file actions
79 lines (66 loc) · 3.96 KB
/
assistant.py
File metadata and controls
79 lines (66 loc) · 3.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import sys
import json
from llm_interface import LLMInterface
from command_executor import CommandExecutor
from config import LLM_MODEL_NAME, REQUIRE_COMMAND_CONFIRMATION
class PentestAssistant:
def __init__(self):
self.llm_interface = LLMInterface()
self.command_executor = CommandExecutor()
self.conversation_history = [] # To store context for the LLM
self._system_message = (
"You are Johnny, an AI pentesting assistant. Your goal is to help users with security assessments. "
"You can suggest commands, analyze output, and have a conversation. "
"For any request that requires information from the user's local system (e.g., listing files, checking IP addresses, reading files), "
"you MUST use the 'command' field to get the information. Do not try to answer from memory. "
"ALWAYS respond with a single, valid JSON object. This object MUST contain a 'response' field with your conversational text. "
"If a command is required, include a 'command' field. If no command is needed, the 'command' field should be omitted.\n\n"
"Here are some examples of how to respond:\n\n"
"--- Example 1: User starts a conversation ---\n"
'User: "Hello there"\n'
'Assistant: {"response": "Hello! How can I help you with your security assessment today?"}\n\n'
"--- Example 2: User asks to list files ---\n"
'User: "Can you list the files in the current directory?"\n'
'Assistant: {"response": "Of course. Here is the command to list the files:", "command": "ls -la"}\n\n'
"--- Example 3: User asks a question that does not require a command ---\n"
'User: "What is nmap?"\n'
'Assistant: {"response": "Nmap is a free and open-source network scanner used to discover hosts and services on a computer network by sending packets and analyzing the responses."}'
)
def run(self):
print("--- Johnny: Pentest Assistant ---")
print("Type 'exit' or 'quit' to end the session.")
while True:
try:
user_input = input("\n[User]> ").strip()
if user_input.lower() in ["exit", "quit"]:
print("Exiting assistant. Goodbye!")
break
# Construct a more explicit prompt for the LLM
# We use a simplified history for now, just sending the last user message with the system prompt
prompt = f"{self._system_message}\n\nUser: {user_input}\n\nAssistant (must be JSON):"
llm_response = self.llm_interface.query_llm(prompt)
# Default empty response
assistant_response = "Sorry, I didn't understand that."
command_to_run = None
if "response" in llm_response:
assistant_response = llm_response["response"]
if "command" in llm_response:
command_to_run = llm_response["command"]
# Print the conversational part of the response
print(f"\n[Johnny]: {assistant_response}")
if command_to_run:
print(f" > Suggested command: {command_to_run}")
success, output = self.command_executor.execute_command(command_to_run)
print(f"\n[Command Output]:\n{output}")
elif "error" in llm_response:
print(f"\n[Assistant Error]: {llm_response['error']}")
print(f"Raw Response: {llm_response.get('raw_response', '')}")
except KeyboardInterrupt:
print("\n\nExiting assistant. Goodbye!")
break
except Exception as e:
print(f"\nAn unexpected error occurred in the main loop: {e}")
break
if __name__ == "__main__":
assistant = PentestAssistant()
assistant.run()