1212from .history import log_interaction
1313from .tools import ToolExecutionCancelled
1414
15- # Import prompt_toolkit for better input handling
1615import pathlib
1716from prompt_toolkit import PromptSession
1817from prompt_toolkit .history import FileHistory
1918
2019
21- # Create a simple system prompt instead of using the old prompts module
2220def get_system_prompt ():
2321 return """You are an intelligent shell operations assistant. You help users with:
2422
@@ -36,7 +34,6 @@ def get_system_prompt():
3634Be concise but thorough in your responses."""
3735
3836
39- # Initialize console for rich output
4037console = Console ()
4138
4239
@@ -68,25 +65,18 @@ def add_message(
6865 "execute_shell_command" # Add tool name for compatibility
6966 )
7067
71- # Keep system message separate
7268 if role == "system" :
7369 self .system_message = message_dict
7470 else :
7571 self .messages .append (message_dict )
76-
77- # Maintain context window
7872 if len (self .messages ) > self .max_messages :
7973 self .messages = self .messages [- self .max_messages :]
8074
8175 def get_context_messages (self ) -> List [Dict [str , Any ]]:
8276 """Get messages formatted for LLM API."""
8377 context = []
84-
85- # Add system message first
8678 if self .system_message :
8779 context .append (self .system_message )
88-
89- # Add conversation messages
9080 context .extend (self .messages )
9181
9282 return context
@@ -106,35 +96,22 @@ def __init__(self, model_string: str, api_key: str, verbose: bool = False):
10696 self .verbose = verbose
10797 self .context = ConversationContext ()
10898
109- # Create LLM adapter
11099 self .llm_adapter = get_llm_adapter (
111100 model_string = model_string , api_key = api_key , verbose = verbose
112101 )
113102
114- # Initialize command history session
115103 self ._setup_command_history ()
116-
117- # Don't set up custom signal handlers - let prompt_toolkit handle them
118-
119- # Initialize system message
120104 self ._initialize_system_message ()
121105
122106 def _setup_command_history (self ):
123107 """Setup command history with persistent storage."""
124108 try :
125- # Create history directory following InfraGPT conventions
126109 history_dir = pathlib .Path .home () / ".infragpt"
127110 history_dir .mkdir (exist_ok = True )
128-
129- # History file for command-line input
130111 history_file = history_dir / "history"
131112
132- # WARNING: FileHistory writes clear-text inputs to disk.
133- # This is a security risk if users enter secrets at the prompt.
134- # Create PromptSession with FileHistory
113+ # WARNING: FileHistory writes clear-text inputs to disk - security risk if users enter secrets
135114 self .prompt_session = PromptSession (history = FileHistory (str (history_file )))
136-
137- # Best-effort: restrict file permissions to user-only
138115 try :
139116 history_file .chmod (0o600 )
140117 except Exception :
@@ -144,7 +121,6 @@ def _setup_command_history(self):
144121 console .print (f"[dim]Command history: { history_file } [/dim]" )
145122
146123 except Exception as e :
147- # Fallback to no history if setup fails
148124 self .prompt_session = PromptSession ()
149125 if self .verbose :
150126 console .print (
@@ -167,8 +143,6 @@ def run_interactive_session(self):
167143 )
168144
169145 console .print (f"[yellow]Model:[/yellow] [bold]{ self .model_string } [/bold]" )
170-
171- # Validate API key
172146 console .print ("[dim]Validating API key...[/dim]" )
173147 try :
174148 if self .llm_adapter .validate_api_key ():
@@ -180,34 +154,26 @@ def run_interactive_session(self):
180154 console .print (f"[red]✗ API key validation failed: { e } [/red]" )
181155 return
182156
183- # Show initial prompt
184157 console .print ("[bold cyan]What would you like me to help with?[/bold cyan]" )
185158 console .print (
186159 "[dim]Press Ctrl+D to exit, Ctrl+C to interrupt operations[/dim]\n "
187160 )
188161
189162 while True :
190163 try :
191- # Get user input
192164 user_input = self ._get_user_input ()
193165 if not user_input :
194- continue # Go back to prompt for empty input
166+ continue
195167
196- # Check for exit commands
197168 if user_input .lower () in ["exit" , "quit" , "bye" ]:
198169 break
199170
200- # Add user message to context
201171 self .context .add_message ("user" , user_input )
202-
203- # Process with LLM
204172 self ._process_user_input (user_input )
205173
206174 except KeyboardInterrupt :
207- # This shouldn't happen with prompt_toolkit, but handle just in case
208175 continue
209176 except EOFError :
210- # Ctrl+D - exit the application
211177 console .print ("\n [dim]EOF received (Ctrl+D). Exiting...[/dim]" )
212178 break
213179
@@ -218,19 +184,15 @@ def _get_user_input(self) -> str:
218184 try :
219185 return self .prompt_session .prompt ("> " )
220186 except KeyboardInterrupt :
221- # Ctrl+C should just return empty string to continue
222187 return ""
223188 except EOFError :
224- # Ctrl+D - let this propagate for proper exit handling
225189 raise
226190
227191 def _process_user_input (self , user_input : str ):
228192 """Process user input with direct SDK streaming and tool execution."""
229193 try :
230- # Get context messages
231194 messages = self .context .get_context_messages ()
232195
233- # Debug: Show message structure if verbose
234196 if self .verbose :
235197 console .print (f"[dim]Context has { len (messages )} messages[/dim]" )
236198 for i , msg in enumerate (messages ):
@@ -242,21 +204,15 @@ def _process_user_input(self, user_input: str):
242204 f"[dim] { i } : { role } (content: { content_len } chars, tools: { has_tools } , tool_id: { has_tool_id } )[/dim]"
243205 )
244206
245- # Show thinking and stream response
246207 console .print ("\n [dim]Thinking...[/dim]" )
247208
248209 response_content = ""
249210 first_content = True
250211
251- # Stream response using new adapter with interrupt checking
252- # Note: stream_with_tools already handles the complete tool execution cycle
253- # including getting the final response after tool execution
254212 try :
255213 for chunk in self .llm_adapter .stream_with_tools (messages ):
256- # Handle content streaming
257214 if chunk .content :
258215 if first_content :
259- # Clear thinking message and show A: prefix
260216 console .print (
261217 "\033 [1A\033 [K" , end = ""
262218 ) # Move up and clear line
@@ -266,7 +222,6 @@ def _process_user_input(self, user_input: str):
266222 response_content += chunk .content
267223 console .print (chunk .content , end = "" )
268224
269- # Handle finish reason
270225 if chunk .finish_reason :
271226 if self .verbose :
272227 console .print (
@@ -276,23 +231,15 @@ def _process_user_input(self, user_input: str):
276231 console .print ("\n [yellow]Operation cancelled by user.[/yellow]" )
277232 return
278233
279- # Add newline after streaming
280234 if response_content :
281235 console .print ()
282-
283- # Add assistant message to context if we have content
284- if response_content :
285236 self .context .add_message ("assistant" , response_content )
286237
287- # Log interaction
288238 self ._log_interaction (user_input , response_content )
289239
290240 except ToolExecutionCancelled :
291- # User cancelled tool execution - just return to prompt
292- # No need to print anything - the tool already printed a message
293241 return
294242 except KeyboardInterrupt :
295- # Ctrl+C during streaming - just return to prompt (message already printed by LLM adapter)
296243 return
297244 except Exception as e :
298245 console .print (f"[bold red]Error processing input:[/bold red] { e } " )
@@ -304,19 +251,15 @@ def _process_user_input(self, user_input: str):
304251 def _log_interaction (self , user_input : str , response : str ):
305252 """Log the interaction for history. Sensitive fields are excluded explicitly."""
306253 try :
307- # Only safe fields, do not include api_key or any secrets
308254 interaction_data = {
309255 "user_input" : user_input ,
310256 "assistant_response" : response ,
311257 "model" : self .model_string ,
312258 "timestamp" : datetime .utcnow ().isoformat () + "Z" ,
313- # Not persisted: allow-list will drop this; used only for error reporting in history.py
314259 "verbose" : self .verbose ,
315260 }
316- # Note: self.api_key is NOT logged ever
317261 log_interaction ("agent_conversation_v2" , interaction_data )
318262 except Exception as e :
319- # Don't let logging failures interrupt the session
320263 if self .verbose :
321264 console .print (f"[dim]Warning: Could not log interaction: { e } [/dim]" )
322265
0 commit comments