Skip to content

Commit ca90a22

Browse files
committed
Improved CLI, pass through params to LiteLLM
1 parent a9e4178 commit ca90a22

File tree

6 files changed

+46
-11
lines changed

6 files changed

+46
-11
lines changed

interpreter_1/cli.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,11 @@ def _profile_to_arg_params(profile: Profile) -> Dict[str, Dict[str, Any]]:
143143
"default": profile.instructions,
144144
"help": "Appended to default system message",
145145
},
146+
"input": {
147+
"flags": ["--input"],
148+
"default": profile.input,
149+
"help": "Pre-fill first user message",
150+
},
146151
"max_turns": {
147152
"flags": ["--max-turns"],
148153
"type": int,
@@ -237,7 +242,6 @@ def parse_args():
237242

238243
parser.add_argument("--help", "-h", action="store_true", help=argparse.SUPPRESS)
239244
parser.add_argument("--version", action="store_true", help=argparse.SUPPRESS)
240-
parser.add_argument("--input", action="store", help=argparse.SUPPRESS)
241245
parser.add_argument(
242246
"--profiles", action="store_true", help="Open profiles directory"
243247
)

interpreter_1/interpreter.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -627,9 +627,16 @@ async def async_respond(self):
627627
else:
628628
api_base = self.api_base
629629
else:
630+
if (
631+
not self.model.startswith("openai/")
632+
and self.provider == "openai"
633+
):
634+
actual_model = "openai/" + self.model
635+
else:
636+
actual_model = self.model
637+
630638
stream = True
631639
api_base = self.api_base
632-
actual_model = self.model
633640

634641
if not self.tool_calling:
635642
system_message += "\n\nPLEASE write code to satisfy the user's request, use ```bash\n...\n``` to run code. You CAN run code."
@@ -641,6 +648,8 @@ async def async_respond(self):
641648
"stream": stream,
642649
"api_base": api_base,
643650
"temperature": self.temperature,
651+
"api_key": self.api_key,
652+
"api_version": self.api_version,
644653
}
645654

646655
if self.tool_calling:
@@ -652,6 +661,12 @@ async def async_respond(self):
652661
if self.debug:
653662
print(params)
654663

664+
if self.debug:
665+
print("Sending request...", params)
666+
import time
667+
668+
time.sleep(3)
669+
655670
raw_response = litellm.completion(**params)
656671

657672
if not stream:

interpreter_1/misc/get_input.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@
66
import termios
77

88

9-
async def get_input(placeholder_text=None, placeholder_color: str = "gray") -> str:
9+
async def get_input(
10+
placeholder_text=None, placeholder_color: str = "gray", multiline_support=True
11+
) -> str:
1012
if placeholder_text is None:
1113
common_placeholders = [
1214
"How can I help you?",
@@ -15,7 +17,7 @@ async def get_input(placeholder_text=None, placeholder_color: str = "gray") -> s
1517
'Use """ for multi-line input',
1618
"Psst... try the wtf command",
1719
]
18-
very_rare_placeholders = ["Let's make history together!"]
20+
very_rare_placeholders = [""]
1921

2022
# 69% common, 30% rare, 1% very rare
2123
rand = random.random()
@@ -56,13 +58,15 @@ async def get_input(placeholder_text=None, placeholder_color: str = "gray") -> s
5658

5759
def redraw():
5860
sys.stdout.write("\r\033[K") # Clear line
59-
sys.stdout.write("\r> ")
61+
if multiline_support:
62+
sys.stdout.write("\r> ")
6063
if current_input:
6164
sys.stdout.write("".join(current_input))
6265
elif show_placeholder:
6366
color_code = COLORS.get(placeholder_color.lower(), COLORS["gray"])
6467
sys.stdout.write(f"{color_code}{placeholder_text}{RESET}")
65-
sys.stdout.write("\r> ")
68+
if multiline_support:
69+
sys.stdout.write("\r> ")
6670
sys.stdout.flush()
6771

6872
try:
@@ -74,7 +78,18 @@ def redraw():
7478
if char == "\n":
7579
if current_input:
7680
result = "".join(current_input)
77-
return result
81+
# Multiline support
82+
if multiline_support and result.startswith('"""'):
83+
while True:
84+
print()
85+
extra_input = await get_input(multiline_support=False)
86+
if extra_input.endswith('"""'):
87+
result += extra_input
88+
return result
89+
else:
90+
result += extra_input
91+
else:
92+
return result
7893
else:
7994
redraw()
8095
elif char == "\x7f": # Backspace

interpreter_1/misc/help.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,12 @@ def help_message():
7474
def help_message():
7575
print(
7676
"""
77-
usage: interpreter [flags]
77+
usage: interpreter [options]
7878
i [prompt]
7979
8080
A modern command-line assistant.
8181
82-
flags:
82+
options:
8383
--model model to use for completion
8484
--provider api provider (e.g. openai, anthropic)
8585
--api-base base url for api requests
@@ -104,7 +104,7 @@ def help_message():
104104
--profiles open profiles directory
105105
--serve start openai-compatible server
106106
107-
example: i want a venv here
107+
example: i want a venv
108108
example: interpreter --model ollama/llama3.2 --serve
109109
example: i -y --input "run pytest, fix errors"
110110
example: cat instructions.txt | i

interpreter_1/misc/welcome.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,6 @@ def welcome_message():
301301
or: interpreter [options]
302302
303303
Documentation: docs.openinterpreter.com
304-
Run 'interpreter --help' for full options
304+
Run 'interpreter --help' for all options
305305
"""
306306
)

interpreter_1/profiles.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ def __init__(self):
5151
self.messages = [] # List of conversation messages
5252
self.system_message = None # System message override
5353
self.instructions = "" # Additional model instructions
54+
self.input = None # Pre-filled first user message
5455

5556
# Available tools and settings
5657
self.tools = ["interpreter", "editor"] # Enabled tool modules

0 commit comments

Comments
 (0)