|
| 1 | +import os |
| 2 | +import json |
| 3 | +import time |
| 4 | +from typing import List, TypedDict |
| 5 | + |
| 6 | +import pkg_resources |
| 7 | +import requests |
| 8 | + |
| 9 | +from interpreter.terminal_interface.profiles.profiles import write_key_to_profile |
| 10 | +from interpreter.terminal_interface.utils.display_markdown_message import display_markdown_message |
| 11 | + |
| 12 | + |
| 13 | +contribute_cache_path = os.path.join( |
| 14 | + os.path.expanduser("~"), ".cache", "open-interpreter", "contribute.json" |
| 15 | +) |
| 16 | + |
| 17 | + |
| 18 | +def display_contribution_message(): |
| 19 | + display_markdown_message( |
| 20 | +""" |
| 21 | +--- |
| 22 | +> We're training an open-source language model! |
| 23 | +
|
| 24 | +You can help us train it with your past, current, or future conversations by: |
| 25 | +1. Closing out of OpenInterpreter, |
| 26 | +2. Running `interpreter --contribute_conversation`. |
| 27 | +""" |
| 28 | + ) |
| 29 | + time.sleep(1) |
| 30 | + |
| 31 | + |
| 32 | +def display_contributing_current_message(): |
| 33 | + display_markdown_message( |
| 34 | +f""" |
| 35 | +--- |
| 36 | +> This conversation will be used to train OpenInterpreter's language model. |
| 37 | +""" |
| 38 | + ) |
| 39 | + |
| 40 | + |
| 41 | +def send_past_conversations(interpreter): |
| 42 | + past_conversations = get_all_conversations(interpreter) |
| 43 | + if len(past_conversations) > 0: |
| 44 | + print() |
| 45 | + print("Sending all previous conversations to OpenInterpreter...") |
| 46 | + contribute_conversations(past_conversations) |
| 47 | + print() |
| 48 | + |
| 49 | + |
| 50 | +def set_send_future_conversations(interpreter, should_send_future): |
| 51 | + write_key_to_profile("contribute_conversation", should_send_future) |
| 52 | + display_markdown_message( |
| 53 | +""" |
| 54 | +> OpenInterpreter will contribute all your conversations from now on. |
| 55 | +
|
| 56 | +To change this, consult the documentation at [https://unassuminglink.com](https://www.readthefuckingmanual.com). |
| 57 | +""" |
| 58 | + ) |
| 59 | + |
| 60 | + |
| 61 | +def user_wants_to_contribute_past(): |
| 62 | + print("Would you like to contribute all past conversations?") |
| 63 | + response = input("(y/n) ") |
| 64 | + return response.lower() == "y" |
| 65 | + |
| 66 | + |
| 67 | +def user_wants_to_contribute_future(): |
| 68 | + print("Would you like to contribute all future conversations?") |
| 69 | + response = input("(y/n) ") |
| 70 | + return response.lower() == "y" |
| 71 | + |
| 72 | + |
| 73 | +def contribute_conversation_launch_logic(interpreter): |
| 74 | + contribution_cache = get_contribute_cache_contents() |
| 75 | + |
| 76 | + if interpreter.will_contribute: |
| 77 | + contribute_past_and_future_logic(interpreter, contribution_cache) |
| 78 | + elif not contribution_cache["displayed_contribution_message"]: |
| 79 | + display_contribution_message() |
| 80 | + |
| 81 | + # don't show the contribution message again no matter what. |
| 82 | + contribution_cache["displayed_contribution_message"] = True |
| 83 | + write_to_contribution_cache(contribution_cache) |
| 84 | + |
| 85 | + |
| 86 | +class ContributionCache(TypedDict): |
| 87 | + displayed_contribution_message: bool |
| 88 | + asked_to_contribute_past: bool |
| 89 | + asked_to_contribute_future: bool |
| 90 | + |
| 91 | + |
| 92 | +# modifies the contribution cache! |
| 93 | +def contribute_past_and_future_logic( |
| 94 | + interpreter, contribution_cache: ContributionCache |
| 95 | +): |
| 96 | + if not contribution_cache["asked_to_contribute_past"]: |
| 97 | + if user_wants_to_contribute_past(): |
| 98 | + send_past_conversations(interpreter) |
| 99 | + contribution_cache["asked_to_contribute_past"] = True |
| 100 | + |
| 101 | + if not contribution_cache["asked_to_contribute_future"]: |
| 102 | + if user_wants_to_contribute_future(): |
| 103 | + set_send_future_conversations(interpreter, True) |
| 104 | + contribution_cache["asked_to_contribute_future"] = True |
| 105 | + |
| 106 | + display_contributing_current_message() |
| 107 | + |
| 108 | + |
| 109 | +# Returns a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool} |
| 110 | +# as the first part of its Tuple, a bool as a second. |
| 111 | +# Writes the contribution cache file if it doesn't already exist. |
| 112 | +# The bool is True if the file does not already exist, False if it does. |
| 113 | +def get_contribute_cache_contents() -> ContributionCache: |
| 114 | + if not os.path.exists(contribute_cache_path): |
| 115 | + default_dict: ContributionCache = { |
| 116 | + "asked_to_contribute_past": False, |
| 117 | + "displayed_contribution_message": False, |
| 118 | + "asked_to_contribute_future": False, |
| 119 | + } |
| 120 | + with open(contribute_cache_path, "a") as file: |
| 121 | + file.write(json.dumps(default_dict)) |
| 122 | + return default_dict |
| 123 | + else: |
| 124 | + with open(contribute_cache_path, "r") as file: |
| 125 | + contribute_cache = json.load(file) |
| 126 | + return contribute_cache |
| 127 | + |
| 128 | + |
| 129 | +# Takes in a {"asked_to_run_contribute": bool, "asked_to_contribute_past": bool} |
| 130 | +def write_to_contribution_cache(contribution_cache: ContributionCache): |
| 131 | + with open(contribute_cache_path, "w") as file: |
| 132 | + json.dump(contribution_cache, file) |
| 133 | + |
| 134 | + |
| 135 | +def get_all_conversations(interpreter) -> List[List]: |
| 136 | + def is_conversation_path(path: str): |
| 137 | + _, ext = os.path.splitext(path) |
| 138 | + return ext == ".json" |
| 139 | + |
| 140 | + history_path = interpreter.conversation_history_path |
| 141 | + all_conversations: List[List] = [] |
| 142 | + conversation_files = os.listdir(history_path) if os.path.exists(history_path) else [] |
| 143 | + for mpath in conversation_files: |
| 144 | + if not is_conversation_path(mpath): |
| 145 | + continue |
| 146 | + full_path = os.path.join(history_path, mpath) |
| 147 | + with open(full_path, "r") as cfile: |
| 148 | + conversation = json.load(cfile) |
| 149 | + all_conversations.append(conversation) |
| 150 | + return all_conversations |
| 151 | + |
| 152 | + |
| 153 | +def is_list_of_lists(l): |
| 154 | + return isinstance(l, list) and all([isinstance(e, list) for e in l]) |
| 155 | + |
| 156 | + |
| 157 | +def contribute_conversations(conversations: List[List]): |
| 158 | + if len(conversations) == 0 or len(conversations[0]) == 0: |
| 159 | + return None |
| 160 | + |
| 161 | + url = "https://api.openinterpreter.com/v0/conversations/contribute/" |
| 162 | + version = pkg_resources.get_distribution("open-interpreter").version |
| 163 | + |
| 164 | + payload = {"conversations": conversations, "oi_version": version} |
| 165 | + |
| 166 | + assert is_list_of_lists(payload["conversations"]), "the contribution payload is not a list of lists!" |
| 167 | + |
| 168 | + try: |
| 169 | + response = requests.post(url, json=payload) |
| 170 | + if response.status_code != 200: |
| 171 | + print( |
| 172 | + f"Failed to contribute conversation: {response.status_code} {response.text}" |
| 173 | + ) |
| 174 | + return None |
| 175 | + else: |
| 176 | + print(f"Successfully contributed conversations!") |
| 177 | + except requests.RequestException as e: |
| 178 | + print(f"Failed to contribute conversation: {e}") |
| 179 | + return None |
0 commit comments