Skip to content

Commit 4dd1930

Browse files
committed
logic stuff done probably
1 parent 3cffb8e commit 4dd1930

File tree

4 files changed

+127
-1
lines changed

4 files changed

+127
-1
lines changed

interpreter/core/core.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ def __init__(
7373
skills_path=None,
7474
import_skills=False,
7575
multi_line=False,
76+
contributing_future_conversations=None,
77+
contribute_current_conversation=False
7678
):
7779
# State
7880
self.messages = [] if messages is None else messages
@@ -123,6 +125,9 @@ def __init__(
123125

124126
self.computer.import_skills = import_skills
125127

128+
self.contributing_future_conversations = contributing_future_conversations
129+
self.contribute_current_conversation = contribute_current_conversation
130+
126131
def server(self, *args, **kwargs):
127132
server(self, *args, **kwargs)
128133

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import os
2+
import json
3+
4+
5+
contribute_cache_path = os.path.join(
6+
os.path.expanduser("~"), ".cache", "open-interpreter", "contribute.json"
7+
)
8+
9+
10+
def ask_user_to_run_contribute():
11+
print("---")
12+
print("not contributing current")
13+
print("Run --contribute_conversation to contribute the current conversation!")
14+
print()
15+
16+
17+
def send_past_conversations(interpreter):
18+
print("sending all past conversations!")
19+
20+
21+
def set_send_future_conversations(interpreter, should_send_future):
22+
if should_send_future:
23+
print("sending!")
24+
else:
25+
print("not sending!")
26+
27+
28+
def ask_user_to_contribute_past():
29+
print("do you want to contribute all past conversations?")
30+
response = input("(y/n) ")
31+
return response.lower() == "y"
32+
33+
34+
def ask_user_to_contribute_future():
35+
print("do you want to contribute all future conversations?")
36+
response = input("(y/n) ")
37+
return response.lower() == "y"
38+
39+
40+
def contribute_conversation_launch_logic(interpreter):
41+
contribution_cache = get_contribute_cache_contents()
42+
displayed_contribution_message = contribution_cache["asked_to_run_contribute"]
43+
contribute_current = interpreter.contribute_conversation
44+
45+
if displayed_contribution_message:
46+
if contribute_current:
47+
# second launch, --contribute-conversation.
48+
contribute_past_and_future_logic(interpreter, contribution_cache)
49+
else:
50+
# second launch, no --contribute-conversation.
51+
# continue launching as normal!
52+
# no need to modify contribution_cache because we're not asking about
53+
# past conversations and we've already displayed the contribution message.
54+
return
55+
else:
56+
if contribute_current:
57+
# first launch, --contribute-conversation.
58+
contribute_past_and_future_logic(interpreter, contribution_cache)
59+
else:
60+
# first launch, no --contribute-conversation.
61+
ask_user_to_run_contribute()
62+
contribution_cache["asked_to_run_contribute"] = True
63+
64+
write_to_contribution_cache(contribution_cache)
65+
66+
67+
# modifies the contribution cache!
68+
def contribute_past_and_future_logic(interpreter, contribution_cache):
69+
if not contribution_cache["asked_to_contribute_past"]:
70+
contribute_past = ask_user_to_contribute_past()
71+
if contribute_past:
72+
send_past_conversations(interpreter)
73+
74+
# set asked_to_contribute_past to True no matter what!
75+
contribution_cache["asked_to_contribute_past"] = True
76+
77+
contributing_future = interpreter.contributing_future_conversations
78+
if not contributing_future:
79+
contribute_future = ask_user_to_contribute_future()
80+
if contribute_future:
81+
set_send_future_conversations(interpreter, True)
82+
else:
83+
set_send_future_conversations(interpreter, False)
84+
85+
86+
# Returns a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool}.
87+
# Writes the contribution cache file if it doesn't already exist.
88+
def get_contribute_cache_contents():
89+
if not os.path.exists(contribute_cache_path):
90+
default_dict = {
91+
"asked_to_contribute_past": False,
92+
"asked_to_run_contribute": False,
93+
}
94+
with open(contribute_cache_path, "a") as file:
95+
file.write(json.dumps(default_dict))
96+
return default_dict
97+
98+
with open(contribute_cache_path, "r") as file:
99+
contribute_cache = json.load(file)
100+
return contribute_cache
101+
102+
103+
# Takes in a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool}
104+
def write_to_contribution_cache(contribution_cache):
105+
with open(contribute_cache_path, "w") as file:
106+
json.dump(contribution_cache, file)
107+
108+

interpreter/terminal_interface/start_terminal_interface.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44

55
import pkg_resources
66

7+
from interpreter.terminal_interface.contributing_conversations import contribute_conversation_launch_logic
8+
79
from ..core.core import OpenInterpreter
810
from .conversation_navigator import conversation_navigator
911
from .profiles.profiles import open_storage_dir, profile, reset_profile
@@ -237,6 +239,11 @@ def start_terminal_interface(interpreter):
237239
"help_text": "get Open Interpreter's version number",
238240
"type": bool,
239241
},
242+
{
243+
"name": "contribute_conversation",
244+
"help_text": "let Open Interpreter use the current conversation to train an Open-Source LLM",
245+
"type": bool
246+
}
240247
]
241248

242249
# Check for deprecated flags before parsing arguments
@@ -419,7 +426,9 @@ def start_terminal_interface(interpreter):
419426
if args.server:
420427
interpreter.server()
421428
return
422-
429+
430+
interpreter.contribute_conversation = args.contribute_conversation
431+
423432
validate_llm_settings(interpreter)
424433

425434
interpreter.in_terminal_interface = True

interpreter/terminal_interface/validate_llm_settings.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
import litellm
99
from prompt_toolkit import prompt
1010

11+
from interpreter.terminal_interface.contributing_conversations import contribute_conversation_launch_logic
12+
1113
from .utils.display_markdown_message import display_markdown_message
1214

1315

@@ -32,6 +34,8 @@ def validate_llm_settings(interpreter):
3234
if not os.environ.get("OPENAI_API_KEY") and not interpreter.llm.api_key and not interpreter.llm.api_base:
3335
display_welcome_message_once()
3436

37+
contribute_conversation_launch_logic(interpreter)
38+
3539
display_markdown_message(
3640
"""---
3741
> OpenAI API key not found

0 commit comments

Comments
 (0)