Skip to content

Commit 16d2d9f

Browse files
committed
Added a new Jupyter Notebook example for building a local Open Interpreter server with a custom front end, including Flask API endpoint and configuration for local and hosted models.
1 parent 6e003f2 commit 16d2d9f

File tree

1 file changed

+119
-0
lines changed

1 file changed

+119
-0
lines changed

examples/local_server.ipynb

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# Build a local Open Interpreter server for a custom front end"
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": null,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"from flask import Flask, request, jsonify\n",
17+
"from interpreter import interpreter\n",
18+
"import json"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": null,
24+
"metadata": {},
25+
"outputs": [],
26+
"source": [
27+
"app = Flask(__name__)\n",
28+
"\n",
29+
"# Configure Open Interpreter\n",
30+
"\n",
31+
"## Local Model\n",
32+
"# interpreter.offline = True\n",
33+
"# interpreter.llm.model = \"ollama/llama3.1\"\n",
34+
"# interpreter.llm.api_base = \"http://localhost:11434\"\n",
35+
"# interpreter.llm.context_window = 4000\n",
36+
"# interpreter.llm.max_tokens = 3000\n",
37+
"# interpreter.auto_run = True\n",
38+
"# interpreter.verbose = True\n",
39+
"\n",
40+
"## Hosted Model\n",
41+
"interpreter.llm.model = \"gpt-4o\"\n",
42+
"interpreter.llm.context_window = 10000\n",
43+
"interpreter.llm.max_tokens = 4096\n",
44+
"interpreter.auto_run = True\n",
45+
"\n",
46+
"# Create an endpoint\n",
47+
"@app.route('/chat', methods=['POST'])\n",
48+
"def chat():\n",
49+
" # Expected payload: {\"prompt\": \"User's message or question\"}\n",
50+
" data = request.json\n",
51+
" prompt = data.get('prompt')\n",
52+
" \n",
53+
" if not prompt:\n",
54+
" return jsonify({\"error\": \"No prompt provided\"}), 400\n",
55+
"\n",
56+
" full_response = \"\"\n",
57+
" try:\n",
58+
" for chunk in interpreter.chat(prompt, stream=True, display=False):\n",
59+
" if isinstance(chunk, dict):\n",
60+
" if chunk.get(\"type\") == \"message\":\n",
61+
" full_response += chunk.get(\"content\", \"\")\n",
62+
" elif isinstance(chunk, str):\n",
63+
" # Attempt to parse the string as JSON\n",
64+
" try:\n",
65+
" json_chunk = json.loads(chunk)\n",
66+
" full_response += json_chunk.get(\"response\", \"\")\n",
67+
" except json.JSONDecodeError:\n",
68+
" # If it's not valid JSON, just add the string\n",
69+
" full_response += chunk\n",
70+
" except Exception as e:\n",
71+
" return jsonify({\"error\": str(e)}), 500\n",
72+
"\n",
73+
" return jsonify({\"response\": full_response.strip()})\n",
74+
"\n",
75+
"if __name__ == '__main__':\n",
76+
" app.run(host='0.0.0.0', port=5001)\n",
77+
"\n",
78+
"print(\"Open Interpreter server is running on http://0.0.0.0:5001\")"
79+
]
80+
},
81+
{
82+
"cell_type": "markdown",
83+
"metadata": {},
84+
"source": [
85+
"## Make a request to the server"
86+
]
87+
},
88+
{
89+
"cell_type": "markdown",
90+
"metadata": {},
91+
"source": [
92+
"curl -X POST http://localhost:5001/chat \\\n",
93+
" -H \"Content-Type: application/json\" \\\n",
94+
" -d '{\"prompt\": \"Hello, how are you?\"}'"
95+
]
96+
}
97+
],
98+
"metadata": {
99+
"kernelspec": {
100+
"display_name": "Python 3",
101+
"language": "python",
102+
"name": "python3"
103+
},
104+
"language_info": {
105+
"codemirror_mode": {
106+
"name": "ipython",
107+
"version": 3
108+
},
109+
"file_extension": ".py",
110+
"mimetype": "text/x-python",
111+
"name": "python",
112+
"nbconvert_exporter": "python",
113+
"pygments_lexer": "ipython3",
114+
"version": "3.11.9"
115+
}
116+
},
117+
"nbformat": 4,
118+
"nbformat_minor": 2
119+
}

0 commit comments

Comments
 (0)