Skip to content

Commit b12eb7b

Browse files
committed
Add custom command to wtf
1 parent 6c3058a commit b12eb7b

File tree

1 file changed

+115
-20
lines changed

1 file changed

+115
-20
lines changed

scripts/wtf.py

Lines changed: 115 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,28 @@
11
from yaspin import yaspin
22

3+
# Start spinner
34
spinner = yaspin()
45
spinner.start()
56

67
import os
78
import platform
89
import re
910
import subprocess
11+
import sys
1012
import time
1113

1214
import platformdirs
1315
import pyperclip
1416
import yaml
1517
from pynput.keyboard import Controller, Key
1618

19+
# Don't let litellm go online here, this slows it down
1720
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
1821
import litellm
1922

23+
# Define system messages
2024
SYSTEM_MESSAGE = f"""
21-
You are a fast, efficient AI assistant specialized in analyzing terminal history and providing solutions. You are summoned via the wtf command. Your task is to:
25+
You are a fast, efficient terminal assistant. Your task is to:
2226
2327
1. Scan the provided terminal history.
2428
2. Identify the most recent error or issue.
@@ -36,7 +40,7 @@
3640
• Using double quotes around the sed expression to handle single quotes within the command.
3741
• Combining single and double quotes to properly escape characters within the shell command.
3842
- If previous commands attempted to fix the issue and failed, learn from them by proposing a DIFFERENT command.
39-
- Focus on the most recent error, ignoring earlier unrelated commands.
43+
- Focus on the most recent error, ignoring earlier unrelated commands. If the user included a message at the end, focus on helping them.
4044
- If you need more information to confidently fix the problem, ask the user to run wtf again in a moment, then write a command like grep to learn more about the problem.
4145
- The error may be as simple as a spelling error, or as complex as requiring tests to be run, or code to be find-and-replaced.
4246
- Prioritize speed and conciseness in your response. Don't use markdown headings. Don't say more than a sentence or two. Be incredibly concise.
@@ -47,8 +51,70 @@
4751
4852
"""
4953

54+
CUSTOM_MESSAGE_SYSTEM_MESSAGE = f"""
55+
56+
You are a fast, efficient AI assistant for terminal and coding tasks. When summoned, you will:
57+
58+
1. Review the provided terminal history (which may or may not be relevant) and final user query.
59+
2. Determine the most appropriate solution or debugging step to resolve the user's final query.
60+
3. Respond with a brief explanation and a single shell command in a markdown code block.
61+
62+
Rules:
63+
- Provide one logical command (use \ or ^ for multiline).
64+
- Keep explanations concise and place them before the code block.
65+
- Use proper command escaping (e.g., sed with correct quotes).
66+
- Avoid comments in the code block.
67+
- If more info is needed, provide a command to gather it (e.g., grep).
68+
- Focus on the user's FINAL query and ADDRESS NOTHING ELSE, using terminal history for context if relevant.
69+
- For multi-step solutions, explain briefly and provide the first or combined command.
70+
- Prioritize addressing the user's specific request (at the END, after "wtf") efficiently.
71+
72+
User's System: {platform.system()}
73+
CWD: {os.getcwd()}
74+
{"Shell: " + os.environ.get('SHELL') if os.environ.get('SHELL') else ''}
75+
76+
"""
77+
78+
LOCAL_SYSTEM_MESSAGE = f"""
79+
You're a fast AI assistant for terminal issues. You must:
80+
81+
1. Scan terminal history
82+
2. Identify latest error
83+
3. Determine best solution
84+
4. Reply with brief explanation + single shell command in markdown
85+
86+
Rules:
87+
- One logical command (use \ or ^ for multiline)
88+
- Explain briefly, then provide command
89+
- No comments in code
90+
- Proper escaping (e.g., sed with correct quotes)
91+
- If unsure, get more info with a command like grep
92+
- Prioritize speed and conciseness
93+
94+
Example response:
95+
96+
We need to fix the file permissions on config.yml.
97+
```bash
98+
chmod 644 config.yml
99+
```
100+
101+
User's System: {platform.system()}
102+
CWD: {os.getcwd()}
103+
{"Shell: " + os.environ.get('SHELL') if os.environ.get('SHELL') else ''}
104+
105+
Now, it's your turn:
106+
"""
107+
50108

51109
def main():
110+
### GET OPTIONAL CUSTOM MESSAGE
111+
112+
custom_message = None
113+
if len(sys.argv) > 1:
114+
custom_message = "wtf " + " ".join(sys.argv[1:])
115+
116+
### GET TERMINAL HISTORY
117+
52118
keyboard = Controller()
53119
history = None
54120

@@ -246,13 +312,11 @@ def main():
246312
history = history[: -len(command)].strip()
247313
break
248314

249-
commands_to_remove = ["poetry run wtf", "wtf"]
250-
for command in commands_to_remove:
251-
if history.endswith(command):
252-
history = history[: -len(command)].strip()
253-
break
315+
if "wtf" in history:
316+
last_wtf_index = history.rindex("wtf")
317+
history = history[:last_wtf_index]
254318

255-
# Get error context
319+
### GET ERROR CONTEXT
256320

257321
# Regex pattern to extract filename and line number
258322
pattern = r'File "([^"]+)", line (\d+)'
@@ -290,15 +354,7 @@ def get_lines_from_file(filename, line_number):
290354
for entry in result:
291355
history = f"""File: {entry["filename"]}\n{entry["text"]}\n\n""" + history
292356

293-
# print(history)
294-
# print("---")
295-
# time.sleep(10)
296-
297-
# Prepare messages for LLM
298-
messages = [
299-
{"role": "system", "content": SYSTEM_MESSAGE},
300-
{"role": "user", "content": history},
301-
]
357+
### PREPARE FOR LLM
302358

303359
# Get LLM model from profile
304360
default_profile_path = os.path.join(
@@ -314,13 +370,52 @@ def get_lines_from_file(filename, line_number):
314370
else:
315371
model = profile.get("llm", {}).get("model", "gpt-3.5-turbo")
316372
except:
317-
model = "gpt-3.5-turbo"
373+
model = "gpt-4o-mini"
374+
375+
# If they're using a local model (improve this heuristic) use the LOCAL_SYSTEM_MESSAGE
376+
if "ollama" in model or "llama" in model:
377+
system_message = LOCAL_SYSTEM_MESSAGE
378+
else:
379+
system_message = SYSTEM_MESSAGE
380+
381+
# If they passed in a custom message, use the CUSTOM_MESSAGE_SYSTEM_MESSAGE
382+
if custom_message:
383+
system_message = CUSTOM_MESSAGE_SYSTEM_MESSAGE
384+
user_message = (
385+
"Answer THIS question by telling me what code to run:"
386+
+ custom_message
387+
+ "\nMy terminal history, which may or may not be relevant, is this:\n---\n"
388+
+ history
389+
+ "\n---\nPlease now answer THIS question by telling me what code to run: "
390+
+ custom_message
391+
)
392+
else:
393+
user_message = (
394+
history.strip()
395+
+ "\n\nPlease help me resolve the final error above by telling me what code to run in a markdown code block."
396+
)
397+
398+
# So stupid but sometimes fun
399+
# system_message += "\nBe incredibly rude and use swear words, like a joker, like you're some kind of jester. You can say 'Fuck' and 'Fucking'. This is for adults. Be funny. USE ALL LOWER CASE. Act like you're an aggreived pair programmer to the idiot that got themselves in this situation."
400+
401+
messages = [
402+
{"role": "system", "content": system_message.strip()},
403+
{"role": "user", "content": user_message.strip()},
404+
]
405+
406+
# DEBUGGING: PRINT MESSAGES
407+
408+
# print("---")
409+
# import pprint
410+
# pprint.pprint(messages)
411+
# print("---")
412+
# time.sleep(100)
413+
414+
### PARSE LLM RESPONSE
318415

319-
# Process LLM response
320416
in_code = False
321417
backtick_count = 0
322418
language_buffer = ""
323-
324419
started = False
325420

326421
for chunk in litellm.completion(

0 commit comments

Comments
 (0)