diff --git a/README.md b/README.md
index b69b7182..e464c4e6 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,10 @@
+## FOR PLEO
+To build and run (assuming a lot of stuff) go for
+```
+chmod +x run.sh
+./run.sh
+```
+
An open source implementation of OpenAI's ChatGPT [Code interpreter](https://openai.com/blog/chatgpt-plugins#code-interpreter).
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 3212d273..587c6a76 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -37,16 +37,19 @@ function App() {
);
let [openAIKey, setOpenAIKey] = useLocalStorage("OpenAIKey", "");
-
+ console.log("test")
+ console.error("test2")
+ console.info("test3")
+ console.debug("test4")
let [messages, setMessages] = useState>(
Array.from([
{
- text: "Hello! I'm a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.",
+ text: "Hello! I'm the Pleo GPT Code and Chat assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.",
role: "generator",
type: "message",
},
{
- text: "If I get stuck just type 'reset' and I'll restart the kernel.",
+ text: "If I get stuck just type 'reset' and I'll restart the kernel. 2",
role: "generator",
type: "message",
},
@@ -119,8 +122,9 @@ function App() {
const data = await response.json();
const code = data.code;
+
- addMessage({ text: data.text, type: "message", role: "generator" });
+
if (response.status != 200) {
setWaitingForSystem(WaitingStates.Idle);
@@ -128,10 +132,14 @@ function App() {
}
if (!!code) {
+ await injectContext(`EXPERT: \n\n ${data.text} \n\n The code you asked for: \n\n ${data.code} \n\n I will now execute it and get back to you with a result and analysis.`)
submitCode(code);
setWaitingForSystem(WaitingStates.RunningCode);
+ addMessage({ text: data.text, type: "message", role: "generator" });
} else {
+ await injectContext(`EXPERT: \n\n ${data.text} \n\n `)
setWaitingForSystem(WaitingStates.Idle);
+ addMessage({ text: data.text, type: "message", role: "generator" });
}
} catch (error) {
console.error(
@@ -142,20 +150,75 @@ function App() {
};
async function getApiData() {
+
if(document.hidden){
return;
}
-
+ console.log("starting the check")
let response = await fetch(`${Config.API_ADDRESS}/api`);
+ //console.log("response:", response)
let data = await response.json();
- data.results.forEach(function (result: {value: string, type: string}) {
+ for await (const result of data.results) {
+ if (result.value.trim().length === 0) {
+ continue;
+ }
+ if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') {
+ console.error(`INJECTING DATA: ${result.value}`)
+ const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({
+ prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now.
+ [Python code results]:
+ ${result.value}`,
+ model: selectedModel,
+ openAIKey: openAIKey,
+ }),
+ });
+
+ console.error('Response: ', chatResponse)
+ const data = await chatResponse.json();
+
+ addMessage({ text: data.text, type: "message", role: "generator" });
+ setWaitingForSystem(WaitingStates.Idle);
+ } else {
+ addMessage({ text: result.value, type: result.type, role: "system" });
+ setWaitingForSystem(WaitingStates.Idle);
+ }
+ }
+ /*await data.results.forEach(async function (result: {value: string, type: string}) {
if (result.value.trim().length == 0) {
return;
}
- addMessage({ text: result.value, type: result.type, role: "system" });
- setWaitingForSystem(WaitingStates.Idle);
- });
+ if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') {
+ console.error(`INJECTING DATA: ${result.value}`)
+ const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({
+ prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now.
+ [Python code results]:
+ ${result.value}`,
+ model: selectedModel,
+ openAIKey: openAIKey,
+ }),
+ });
+
+ console.error('Response: ', chatResponse)
+ const data = await chatResponse.json();
+
+ addMessage({ text: data.text, type: "message", role: "generator" });
+ setWaitingForSystem(WaitingStates.Idle);
+ } else {
+ addMessage({ text: result.value, type: result.type, role: "system" });
+ setWaitingForSystem(WaitingStates.Idle);
+ }
+ });*/
}
function completeUpload(message: string) {
@@ -176,6 +239,20 @@ function App() {
.catch((error) => console.error("Error:", error));
}
+ async function injectContext(context: string) {
+ await fetch(`${Config.WEB_ADDRESS}/inject-context`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({
+ prompt: context,
+ }),
+ })
+ .then(() => {})
+ .catch((error) => console.error("Error:", error));
+ }
+
function startUpload(_: string) {
setWaitingForSystem(WaitingStates.UploadingFile);
}
diff --git a/gpt_code_ui/main.py b/gpt_code_ui/main.py
index 5683608b..a2c3d4c1 100644
--- a/gpt_code_ui/main.py
+++ b/gpt_code_ui/main.py
@@ -52,7 +52,7 @@ def print_color(text, color="gray"):
def print_banner():
- print("""
+ print(""" PLEO!!!
█▀▀ █▀█ ▀█▀ ▄▄ █▀▀ █▀█ █▀▄ █▀▀
█▄█ █▀▀ ░█░ ░░ █▄▄ █▄█ █▄▀ ██▄
""")
diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py
index 332986e5..3e300c34 100644
--- a/gpt_code_ui/webapp/main.py
+++ b/gpt_code_ui/webapp/main.py
@@ -41,7 +41,7 @@
class LimitedLengthString:
- def __init__(self, maxlen=2000):
+ def __init__(self, maxlen=20000):
self.data = deque()
self.len = 0
self.maxlen = maxlen
@@ -91,37 +91,48 @@ def inspect_file(filename: str) -> str:
except Exception:
return '' # file reading failed. - Don't want to know why.
+system=f"""Act as a data analyst hereby referred to as EXPERT with ten years of experience in the domain of expense management and general accounting. Your role is to help inexperienced people analyse data about expenses and accounting. Be sure to help the user understand what to focus on and give suggestions on where it would make sense to dig deeper.
-async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
-
- prompt = f"""First, here is a history of what I asked you to do earlier.
- The actual prompt follows after ENDOFHISTORY.
- History:
- {message_buffer.get_string()}
- ENDOFHISTORY.
- Write Python code, in a triple backtick Markdown code block, that does the following:
- {user_prompt}
+ If generating Python code, follow the instructions under [GENERATE_PYTHON_INSTRUCTIONS].
+ [GENERATE_PYTHON_INSTRUCTIONS]
Notes:
First, think step by step what you want to do and write it down in English.
Then generate valid Python code in a code block
- Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment.
+ Make sure all code is valid - it will be run in a Jupyter Python 3 kernel environment.
Define every variable before you use it.
For data munging, you can use
- 'numpy', # numpy==1.24.3
- 'dateparser' #dateparser==1.1.8
- 'pandas', # matplotlib==1.5.3
- 'geopandas' # geopandas==0.13.2
+ 'numpy', # numpy==1.26.2
+ 'dateparser', # dateparser==1.2.0
+ 'pandas', # pandas==1.5.3
+ 'geopandas', # geopandas==0.14.1
For pdf extraction, you can use
'PyPDF2', # PyPDF2==3.0.1
'pdfminer', # pdfminer==20191125
- 'pdfplumber', # pdfplumber==0.9.0
+ 'pdfplumber', # pdfplumber==0.10.3
For data visualization, you can use
- 'matplotlib', # matplotlib==3.7.1
+ 'matplotlib', # matplotlib==3.8.2
Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module.
If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files)
- Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename."""
+ Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.
+ """
+async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
+
+
+ prompt = f"""First, here is a history of what I asked you to do earlier.
+ The actual prompt follows after ENDOFHISTORY.
+ History:
+ {message_buffer.get_string()}
+ ENDOFHISTORY.
+ Aiming to help the user in the best possible way with the below [USER_PROMPT] do one of the following:
+ 1. Write Python code, in a triple backtick Markdown code block, that supports what the user is trying to achieve. Use the instructions under [GENERATE_PYTHON_INSTRUCTIONS]
+ 2. Answer the question the user asks.
+ 3. Ask follow-up questions to better understand what the user wants to achieve.
+
+ [USER_PROMPT]
+ {user_prompt}
+ """
if user_openai_key:
openai.api_key = user_openai_key
@@ -130,7 +141,7 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
temperature=0.7,
headers=OPENAI_EXTRA_HEADERS,
messages=[
- # {"role": "system", "content": system},
+ {"role": "system", "content": system},
{"role": "user", "content": prompt},
]
)
@@ -160,6 +171,8 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
except AttributeError:
return None, f"Malformed answer from API: {content}", 500
+ print('CONTENT FROM CODE:' + content)
+
def extract_code(text):
# Match triple backtick blocks first
triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL)
@@ -173,6 +186,58 @@ def extract_code(text):
return extract_code(content), content.strip(), 200
+async def get_chat(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
+
+ prompt = f"""First, here is a history of what I asked you to do earlier.
+ The actual prompt follows after ENDOFHISTORY.
+ History:
+ {message_buffer.get_string()}
+ ENDOFHISTORY.
+ {user_prompt}
+ DO NOT GENERATE ANY CODE
+ Teacher mode: if the results return a link to a file that was generated, make sure to include the link in your answer.
+ """
+
+ print('PROMPT FROM CHAT:' + prompt)
+
+ if user_openai_key:
+ openai.api_key = user_openai_key
+
+ arguments = dict(
+ temperature=0.7,
+ headers=OPENAI_EXTRA_HEADERS,
+ messages=[
+ {"role": "system", "content": system},
+ {"role": "user", "content": prompt},
+ ]
+ )
+
+ if openai.api_type == 'open_ai':
+ arguments["model"] = model
+ elif openai.api_type == 'azure':
+ arguments["deployment_id"] = model
+ else:
+ return None, f"Error: Invalid OPENAI_PROVIDER: {openai.api_type}", 500
+
+ try:
+ result_GPT = openai.ChatCompletion.create(**arguments)
+
+ if 'error' in result_GPT:
+ raise openai.APIError(code=result_GPT.error.code, message=result_GPT.error.message)
+
+ if result_GPT.choices[0].finish_reason == 'content_filter':
+ raise openai.APIError('Content Filter')
+
+ except openai.OpenAIError as e:
+ return None, f"Error from API: {e}", 500
+
+ try:
+ content = result_GPT.choices[0].message.content
+
+ except AttributeError:
+ return None, f"Malformed answer from API: {content}", 500
+ return content, 200
+
# We know this Flask app is for local use. So we can disable the verbose Werkzeug logger
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@@ -204,8 +269,11 @@ def models():
@app.route('/api/', methods=["GET", "POST"])
def proxy_kernel_manager(path):
if request.method == "POST":
+ print('starting code execution')
resp = requests.post(
f'http://localhost:{KERNEL_APP_PORT}/{path}', json=request.get_json())
+ requestjson = request.get_json()
+ print(f"""started code execution with status: {resp.status_code} {requestjson}""")
else:
resp = requests.get(f'http://localhost:{KERNEL_APP_PORT}/{path}')
@@ -213,7 +281,7 @@ def proxy_kernel_manager(path):
'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
-
+
response = Response(resp.content, resp.status_code, headers)
return response
@@ -236,16 +304,16 @@ def download_file():
@app.route('/inject-context', methods=['POST'])
def inject_context():
user_prompt = request.json.get('prompt', '')
-
- # Append all messages to the message buffer for later use
+ print('INJECTING-CONTEXT:' + user_prompt)
message_buffer.append(user_prompt + "\n\n")
-
+ print('message_buffer: ' + message_buffer.get_string())
return jsonify({"result": "success"})
@app.route('/generate', methods=['POST'])
def generate_code():
user_prompt = request.json.get('prompt', '')
+ print('ACTION:' + user_prompt)
user_openai_key = request.json.get('openAIKey', None)
model = request.json.get('model', None)
@@ -257,11 +325,33 @@ def generate_code():
loop.close()
# Append all messages to the message buffer for later use
- message_buffer.append(user_prompt + "\n\n")
+ message_buffer.append('USER: ' + user_prompt + "\n\n")
return jsonify({'code': code, 'text': text}), status
+@app.route('/chat', methods=['POST'])
+def generate_chat():
+ #all of this comes from the system, not the user!
+ user_prompt = request.json.get('prompt', '')
+ user_openai_key = request.json.get('openAIKey', None)
+ model = request.json.get('model', None)
+ print('CHAT_TEXT: ' + user_prompt)
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+
+ text, status = loop.run_until_complete(
+ get_chat(user_prompt, user_openai_key, model))
+ loop.close()
+
+ print('CHAT_TEXT: ' + text)
+
+ # Append all messages to the message buffer for later use
+ message_buffer.append('USER: ' + user_prompt + "\n\n")
+ print(f"""RETURNING TO UI: ${status}""")
+ return jsonify({'text': text}), status
+
+
@app.route('/upload', methods=['POST'])
def upload_file():
# check if the post request has the file part
diff --git a/run.sh b/run.sh
new file mode 100755
index 00000000..d7183b19
--- /dev/null
+++ b/run.sh
@@ -0,0 +1 @@
+rm -rf $HOME/Library/Python/3.9/lib/python/site-packages/gpt_code_ui/ && make build && mv build/lib/gpt_code_ui/ $HOME/Library/Python/3.9/lib/python/site-packages/gpt_code_ui/ && make build && python3 build/lib/gpt_code_ui/main.py