-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_interface.py
More file actions
83 lines (69 loc) · 3.42 KB
/
llm_interface.py
File metadata and controls
83 lines (69 loc) · 3.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import requests
import json
from config import LLM_API_URL, LLM_MODEL_NAME
class LLMInterface:
def __init__(self, api_url: str = LLM_API_URL, model_name: str = LLM_MODEL_NAME):
self.api_url = api_url
self.model_name = model_name
def query_llm(self, prompt: str) -> dict:
"""
Sends a prompt to the LLM and returns its parsed JSON response.
Expects the LLM to return a JSON string in its response.
"""
headers = {"Content-Type": "application/json"}
data = {
"model": self.model_name,
"prompt": prompt,
"stream": False, # We want the full response at once
"format": "json" # Request JSON format from Ollama if supported
}
try:
response = requests.post(self.api_url, headers=headers, json=data, timeout=120)
response.raise_for_status() # Raise an exception for HTTP errors
# Ollama's /api/generate endpoint returns a JSON object with a 'response' field
# containing the actual generated text, which might be a JSON string itself.
full_response = response.json()
llm_text_response = full_response.get("response", "")
# Attempt to parse the LLM's text response as JSON
try:
parsed_response = json.loads(llm_text_response)
return parsed_response
except json.JSONDecodeError:
print(f"Warning: LLM response is not a valid JSON string: {llm_text_response}")
return {"error": "LLM returned non-JSON response", "raw_response": llm_text_response}
except requests.exceptions.RequestException as e:
print(f"Error communicating with LLM API: {e}")
return {"error": str(e)}
except Exception as e:
print(f"An unexpected error occurred: {e}")
return {"error": str(e)}
if __name__ == "__main__":
# Example Usage:
# Make sure your Ollama server is running and the model is available.
# e.g., ollama run llama3
llm = LLMInterface()
print("Testing LLMInterface with a simple prompt...")
test_prompt = """You are a helpful assistant. Provide a JSON object with a greeting message.
Example: {"message": "Hello there!"}
"""
llm_response = llm.query_llm(test_prompt)
print("\nLLM Raw Response:")
print(llm_response)
if "message" in llm_response:
print(f"\nParsed Message: {llm_response['message']}")
else:
print("\nFailed to get a parsed message.")
print("\nTesting LLMInterface with a command suggestion prompt...")
test_command_prompt = """You are a pentesting assistant. The user wants to scan "example.com" for open ports.
Suggest a command line tool and the command to use, along with a brief explanation.
Respond only with a JSON object containing "command" and "explanation" fields.
Example: {"command": "nmap -sV example.com", "explanation": "Scans example.com for open ports and service versions."}
"""
command_response = llm.query_llm(test_command_prompt)
print("\nLLM Command Suggestion Response:")
print(command_response)
if "command" in command_response and "explanation" in command_response:
print(f"\nSuggested Command: {command_response['command']}")
print(f"Explanation: {command_response['explanation']}")
else:
print("\nFailed to get a command suggestion.")