|
1 | 1 | import os
|
2 | 2 | import json
|
| 3 | +import time |
| 4 | +from typing import List, TypedDict |
| 5 | + |
| 6 | +import pkg_resources |
| 7 | +import requests |
| 8 | + |
| 9 | +from interpreter.terminal_interface.profiles.profiles import write_key_to_profile |
| 10 | +from interpreter.terminal_interface.utils.display_markdown_message import display_markdown_message |
3 | 11 |
|
4 | 12 |
|
5 | 13 | contribute_cache_path = os.path.join(
|
6 | 14 | os.path.expanduser("~"), ".cache", "open-interpreter", "contribute.json"
|
7 | 15 | )
|
8 | 16 |
|
9 | 17 |
|
10 |
| -def ask_user_to_run_contribute(): |
11 |
| - print("---") |
12 |
| - print("not contributing current") |
13 |
| - print("Run --contribute_conversation to contribute the current conversation!") |
14 |
| - print() |
| 18 | +def display_contribution_message(): |
| 19 | + display_markdown_message( |
| 20 | +""" |
| 21 | +--- |
| 22 | +> We're training an open-source language model! |
| 23 | +
|
| 24 | +You can help us train it with your past, current, or future conversations by: |
| 25 | +1. Closing out of OpenInterpreter, |
| 26 | +2. Running `interpreter --contribute_conversation`. |
| 27 | +""" |
| 28 | + ) |
| 29 | + time.sleep(1) |
| 30 | + |
| 31 | + |
| 32 | +def display_contributing_current_message(): |
| 33 | + display_markdown_message( |
| 34 | +f""" |
| 35 | +--- |
| 36 | +> This conversation will be used to train OpenInterpreter's language model. |
| 37 | +""" |
| 38 | + ) |
15 | 39 |
|
16 | 40 |
|
17 | 41 | def send_past_conversations(interpreter):
|
18 |
| - print("sending all past conversations!") |
| 42 | + past_conversations = get_all_conversations(interpreter) |
| 43 | + if len(past_conversations) > 0: |
| 44 | + print() |
| 45 | + print("Sending all previous conversations to OpenInterpreter...") |
| 46 | + contribute_conversations(past_conversations) |
| 47 | + print() |
19 | 48 |
|
20 | 49 |
|
21 | 50 | def set_send_future_conversations(interpreter, should_send_future):
|
22 |
| - if should_send_future: |
23 |
| - print("sending!") |
24 |
| - else: |
25 |
| - print("not sending!") |
| 51 | + write_key_to_profile("contribute_conversation", should_send_future) |
| 52 | + display_markdown_message( |
| 53 | +""" |
| 54 | +> OpenInterpreter will contribute all your conversations from now on. |
| 55 | +
|
| 56 | +To change this, consult the documentation at [https://unassuminglink.com](https://www.readthefuckingmanual.com). |
| 57 | +""" |
| 58 | + ) |
26 | 59 |
|
27 | 60 |
|
28 |
| -def ask_user_to_contribute_past(): |
29 |
| - print("do you want to contribute all past conversations?") |
| 61 | +def user_wants_to_contribute_past(): |
| 62 | + print("Would you like to contribute all past conversations?") |
30 | 63 | response = input("(y/n) ")
|
31 | 64 | return response.lower() == "y"
|
32 | 65 |
|
33 | 66 |
|
34 |
| -def ask_user_to_contribute_future(): |
35 |
| - print("do you want to contribute all future conversations?") |
| 67 | +def user_wants_to_contribute_future(): |
| 68 | + print("Would you like to contribute all future conversations?") |
36 | 69 | response = input("(y/n) ")
|
37 | 70 | return response.lower() == "y"
|
38 | 71 |
|
39 | 72 |
|
40 | 73 | def contribute_conversation_launch_logic(interpreter):
|
41 | 74 | contribution_cache = get_contribute_cache_contents()
|
42 |
| - displayed_contribution_message = contribution_cache["asked_to_run_contribute"] |
43 |
| - contribute_current = interpreter.contribute_conversation |
44 | 75 |
|
45 |
| - if displayed_contribution_message: |
46 |
| - if contribute_current: |
47 |
| - # second launch, --contribute-conversation. |
48 |
| - contribute_past_and_future_logic(interpreter, contribution_cache) |
49 |
| - else: |
50 |
| - # second launch, no --contribute-conversation. |
51 |
| - # continue launching as normal! |
52 |
| - # no need to modify contribution_cache because we're not asking about |
53 |
| - # past conversations and we've already displayed the contribution message. |
54 |
| - return |
55 |
| - else: |
56 |
| - if contribute_current: |
57 |
| - # first launch, --contribute-conversation. |
58 |
| - contribute_past_and_future_logic(interpreter, contribution_cache) |
59 |
| - else: |
60 |
| - # first launch, no --contribute-conversation. |
61 |
| - ask_user_to_run_contribute() |
62 |
| - contribution_cache["asked_to_run_contribute"] = True |
| 76 | + if interpreter.will_contribute: |
| 77 | + contribute_past_and_future_logic(interpreter, contribution_cache) |
| 78 | + elif not contribution_cache["displayed_contribution_message"]: |
| 79 | + display_contribution_message() |
63 | 80 |
|
| 81 | + # don't show the contribution message again no matter what. |
| 82 | + contribution_cache["displayed_contribution_message"] = True |
64 | 83 | write_to_contribution_cache(contribution_cache)
|
65 | 84 |
|
66 | 85 |
|
| 86 | +class ContributionCache(TypedDict): |
| 87 | + displayed_contribution_message: bool |
| 88 | + asked_to_contribute_past: bool |
| 89 | + asked_to_contribute_future: bool |
| 90 | + |
| 91 | + |
67 | 92 | # modifies the contribution cache!
|
68 |
| -def contribute_past_and_future_logic(interpreter, contribution_cache): |
| 93 | +def contribute_past_and_future_logic( |
| 94 | + interpreter, contribution_cache: ContributionCache |
| 95 | +): |
69 | 96 | if not contribution_cache["asked_to_contribute_past"]:
|
70 |
| - contribute_past = ask_user_to_contribute_past() |
71 |
| - if contribute_past: |
| 97 | + if user_wants_to_contribute_past(): |
72 | 98 | send_past_conversations(interpreter)
|
| 99 | + contribution_cache["asked_to_contribute_past"] = True |
73 | 100 |
|
74 |
| - # set asked_to_contribute_past to True no matter what! |
75 |
| - contribution_cache["asked_to_contribute_past"] = True |
76 |
| - |
77 |
| - contributing_future = interpreter.contributing_future_conversations |
78 |
| - if not contributing_future: |
79 |
| - contribute_future = ask_user_to_contribute_future() |
80 |
| - if contribute_future: |
| 101 | + if not contribution_cache["asked_to_contribute_future"]: |
| 102 | + if user_wants_to_contribute_future(): |
81 | 103 | set_send_future_conversations(interpreter, True)
|
82 |
| - else: |
83 |
| - set_send_future_conversations(interpreter, False) |
| 104 | + contribution_cache["asked_to_contribute_future"] = True |
84 | 105 |
|
| 106 | + display_contributing_current_message() |
85 | 107 |
|
86 |
| -# Returns a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool}. |
| 108 | + |
| 109 | +# Returns a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool} |
| 110 | +# as the first part of its Tuple, a bool as a second. |
87 | 111 | # Writes the contribution cache file if it doesn't already exist.
|
88 |
| -def get_contribute_cache_contents(): |
| 112 | +# The bool is True if the file does not already exist, False if it does. |
| 113 | +def get_contribute_cache_contents() -> ContributionCache: |
89 | 114 | if not os.path.exists(contribute_cache_path):
|
90 |
| - default_dict = { |
| 115 | + default_dict: ContributionCache = { |
91 | 116 | "asked_to_contribute_past": False,
|
92 |
| - "asked_to_run_contribute": False, |
| 117 | + "displayed_contribution_message": False, |
| 118 | + "asked_to_contribute_future": False, |
93 | 119 | }
|
94 | 120 | with open(contribute_cache_path, "a") as file:
|
95 | 121 | file.write(json.dumps(default_dict))
|
96 | 122 | return default_dict
|
97 |
| - |
98 |
| - with open(contribute_cache_path, "r") as file: |
99 |
| - contribute_cache = json.load(file) |
100 |
| - return contribute_cache |
| 123 | + else: |
| 124 | + with open(contribute_cache_path, "r") as file: |
| 125 | + contribute_cache = json.load(file) |
| 126 | + return contribute_cache |
101 | 127 |
|
102 | 128 |
|
103 | 129 | # Takes in a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool}
|
104 |
| -def write_to_contribution_cache(contribution_cache): |
| 130 | +def write_to_contribution_cache(contribution_cache: ContributionCache): |
105 | 131 | with open(contribute_cache_path, "w") as file:
|
106 | 132 | json.dump(contribution_cache, file)
|
107 | 133 |
|
108 | 134 |
|
| 135 | +def get_all_conversations(interpreter) -> List[List]: |
| 136 | + def is_conversation_path(path: str): |
| 137 | + _, ext = os.path.splitext(path) |
| 138 | + return ext == ".json" |
| 139 | + |
| 140 | + history_path = interpreter.conversation_history_path |
| 141 | + all_conversations: List[List] = [] |
| 142 | + for mpath in os.listdir(history_path): |
| 143 | + if not is_conversation_path(mpath): |
| 144 | + continue |
| 145 | + full_path = os.path.join(history_path, mpath) |
| 146 | + with open(full_path, "r") as cfile: |
| 147 | + conversation = json.load(cfile) |
| 148 | + all_conversations.append(conversation) |
| 149 | + return all_conversations |
| 150 | + |
| 151 | + |
| 152 | +def is_list_of_lists(l): |
| 153 | + return isinstance(l, list) and all([isinstance(e, list) for e in l]) |
| 154 | + |
| 155 | + |
| 156 | +def contribute_conversations(conversations: List[List]): |
| 157 | + if len(conversations) == 0 or len(conversations[0]) == 0: |
| 158 | + return None |
| 159 | + |
| 160 | + url = "https://api.openinterpreter.com/v0/conversations/contribute/" |
| 161 | + version = pkg_resources.get_distribution("open-interpreter").version |
| 162 | + |
| 163 | + payload = {"conversations": conversations, "oi_version": version} |
| 164 | + |
| 165 | + assert is_list_of_lists(payload["conversations"]), "the contribution payload is not a list of lists!" |
| 166 | + |
| 167 | + try: |
| 168 | + response = requests.post(url, json=payload) |
| 169 | + if response.status_code != 200: |
| 170 | + print( |
| 171 | + f"Failed to contribute conversation: {response.status_code} {response.text}" |
| 172 | + ) |
| 173 | + return None |
| 174 | + else: |
| 175 | + print(f"Successfully contributed conversations!") |
| 176 | + except requests.RequestException as e: |
| 177 | + print(f"Failed to contribute conversation: {e}") |
| 178 | + return None |
0 commit comments