Skip to content

Commit a45e0b3

Browse files
committed
Add an installation script
1 parent fc10d19 commit a45e0b3

File tree

19 files changed

+2318
-13
lines changed

19 files changed

+2318
-13
lines changed

.github/workflows/build-python.yml

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Reusable GitHub Actions workflow for building Python packages
2+
3+
name: Build python package
4+
5+
on:
6+
workflow_call:
7+
inputs:
8+
python-version:
9+
description: 'Python version'
10+
required: false
11+
type: string
12+
default: '3.x'
13+
patch-version:
14+
description: 'Version to patch into pyproject.toml'
15+
required: false
16+
type: string
17+
18+
jobs:
19+
build:
20+
runs-on: ubuntu-latest
21+
steps:
22+
- name: Checkout repo
23+
uses: actions/checkout@v4
24+
25+
- name: Set up Python ${{ inputs.python-version }}
26+
uses: actions/setup-python@v5
27+
with:
28+
python-version: ${{ inputs.python-version }}
29+
30+
- name: Patch version in pyproject.toml
31+
if: inputs.patch-version != ''
32+
run: |
33+
VERSION="${{ inputs.patch-version }}"
34+
echo "Updating pyproject.toml version to $VERSION"
35+
sed -i -E "s/^version = \".*\"/version = \"$VERSION\"/" pyproject.toml
36+
37+
- name: Install dependencies and uv
38+
run: |
39+
python -m pip install --upgrade pip
40+
pip install --upgrade build
41+
pip install uv
42+
43+
- name: Build the package with uv
44+
run: |
45+
uv build
46+
ls -lh dist/
47+
48+
- name: Upload built distributions as artifact
49+
uses: actions/upload-artifact@v4
50+
with:
51+
name: python-package-dist
52+
path: dist/*
Lines changed: 282 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,282 @@
1+
# .github/workflows/windows-test.yml
2+
3+
name: Installation Test
4+
5+
# on:
6+
# push:
7+
# branches: [ main ]
8+
# pull_request:
9+
# branches: [ main ]
10+
on:
11+
push:
12+
branches:
13+
- installation-script-2
14+
15+
jobs:
16+
build:
17+
uses: ./.github/workflows/build-python.yml
18+
with:
19+
python-version: "3.12"
20+
21+
check-linux:
22+
needs: build
23+
runs-on: ubuntu-latest
24+
25+
services:
26+
postgres:
27+
image: pgvector/pgvector:pg16
28+
env:
29+
POSTGRES_USER: memmachine
30+
POSTGRES_PASSWORD: mammachine_password
31+
POSTGRES_DB: memmachine
32+
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
33+
ports:
34+
- 5432:5432
35+
options: >-
36+
--health-cmd="pg_isready -U memmachine"
37+
--health-interval=10s
38+
--health-timeout=5s
39+
--health-retries=5
40+
41+
ollama:
42+
image: ollama/ollama
43+
ports:
44+
- 11434:11434
45+
46+
steps:
47+
- name: Set up Python 3.12
48+
uses: actions/setup-python@v5
49+
with:
50+
python-version: "3.12"
51+
52+
- name: Download built distributions
53+
uses: actions/download-artifact@v4
54+
with:
55+
name: python-package-dist
56+
57+
- name: Set up ollama
58+
run: |
59+
set -eo pipefail
60+
until curl -s http://localhost:11434/ > /dev/null; do
61+
echo "Ollama not up yet, sleeping..."
62+
sleep 1
63+
done
64+
65+
echo "Ollama is up, loading model..."
66+
curl http://localhost:11434/api/pull -d '{
67+
"name": "qwen3:0.6b"
68+
}' -v
69+
echo "pulled model qwen3:0.6b into ollama"
70+
71+
curl http://localhost:11434/api/pull -d '{
72+
"name": "nomic-embed-text"
73+
}' -v
74+
echo "pulled model nomic-embed-text into ollama"
75+
76+
- name: Test install on Ubuntu
77+
run: |
78+
set -eo pipefail
79+
whl_name=$(ls *.whl)
80+
echo "Installing wheel file: $whl_name"
81+
python -m pip install $whl_name
82+
echo "configuring memmachine"
83+
memmachine-configure << EOF
84+
y
85+
CPU
86+
Ollama
87+
qwen3:0.6b
88+
nomic-embed-text
89+
localhost
90+
5432
91+
memmachine
92+
mammachine_password
93+
memmachine
94+
http://localhost:11434/v1
95+
localhost
96+
8080
97+
EOF
98+
99+
memmachine-nltk-setup
100+
memmachine-sync-profile-schema
101+
102+
memmachine-server &
103+
server_pid=$!
104+
sleep 5
105+
106+
curl -f http://127.0.0.1:8080/v1/sessions
107+
108+
shell: bash
109+
110+
check-windows:
111+
needs: build
112+
runs-on: windows-latest
113+
114+
steps:
115+
- name: Setup Miniconda
116+
uses: conda-incubator/setup-miniconda@v3
117+
with:
118+
python-version: 3.12
119+
auto-update-conda: true
120+
121+
- name: Download built distributions
122+
uses: actions/download-artifact@v4
123+
with:
124+
name: python-package-dist
125+
126+
- name: install postgresql
127+
run: |
128+
conda install anaconda::postgresql
129+
conda install -c conda-forge pgvector
130+
initdb -D postgres_data --encoding=UTF-8 --lc-collate=C --lc-ctype=C
131+
createuser -s memmachine
132+
createdb -O memmachine memmachine
133+
psql -d postgres -c "ALTER USER memmachine WITH PASSWORD 'mammachine_password';"
134+
psql -d memmachine -U memmachine -c "CREATE EXTENSION IF NOT EXISTS vector;"
135+
136+
- name: Set up ollama
137+
shell: bash
138+
run: |
139+
set -eo pipefail
140+
winget install Ollama.Ollama --accept-source-agreements
141+
142+
until curl -s http://localhost:11434/ > /dev/null; do
143+
echo "Ollama not up yet, sleeping..."
144+
sleep 1
145+
done
146+
147+
echo "Ollama is up, loading model..."
148+
curl http://localhost:11434/api/pull -d '{
149+
"name": "qwen3:0.6b"
150+
}' -v
151+
echo "pulled model qwen3:0.6b into ollama"
152+
153+
curl http://localhost:11434/api/pull -d '{
154+
"name": "nomic-embed-text"
155+
}' -v
156+
echo "pulled model nomic-embed-text into ollama"
157+
158+
- name: Test install on Windows
159+
run: |
160+
set -eo pipefail
161+
export PYTHONUTF8=1
162+
whl_name=$(ls *.whl)
163+
echo "Installing wheel file: $whl_name"
164+
python -m pip install $whl_name
165+
echo "configuring memmachine"
166+
memmachine-configure << EOF
167+
y
168+
169+
CPU
170+
Ollama
171+
qwen3:0.6b
172+
nomic-embed-text
173+
localhost
174+
5432
175+
memmachine
176+
mammachine_password
177+
memmachine
178+
http://localhost:11434/v1
179+
localhost
180+
8080
181+
EOF
182+
183+
python -c "import nltk; nltk.download('punkt')"
184+
memmachine-nltk-setup
185+
memmachine-sync-profile-schema
186+
187+
memmachine-server &
188+
server_pid=$!
189+
sleep 5
190+
191+
curl -f http://127.0.0.1:8080/v1/sessions
192+
193+
shell: bash
194+
195+
check-macos:
196+
needs: build
197+
runs-on: macos-latest
198+
199+
steps:
200+
- name: Set up Python 3.12
201+
uses: actions/setup-python@v5
202+
with:
203+
python-version: "3.12"
204+
205+
- name: Download built distributions
206+
uses: actions/download-artifact@v4
207+
with:
208+
name: python-package-dist
209+
210+
- name: Set up postgresql (pgvector) using Homebrew
211+
run: |
212+
set -eo pipefail
213+
brew update
214+
brew upgrade
215+
216+
brew install postgresql@18
217+
brew install pgvector
218+
rm -rf /opt/homebrew/var/postgresql@18
219+
"/opt/homebrew/opt/postgresql@18/bin/initdb" -D /opt/homebrew/var/postgresql@18 --encoding=UTF-8 --lc-collate=C --lc-ctype=C
220+
brew services start postgresql@18
221+
222+
export PATH="/opt/homebrew/opt/postgresql@18/bin:$PATH"
223+
while ! pg_isready -q; do
224+
sleep 1
225+
done
226+
227+
createuser -s memmachine
228+
createdb -O memmachine memmachine
229+
psql -d postgres -c "ALTER USER memmachine WITH PASSWORD 'mammachine_password';"
230+
psql -d memmachine -U memmachine -c "CREATE EXTENSION IF NOT EXISTS vector;"
231+
232+
- name: Set up ollama
233+
run: |
234+
set -eo pipefail
235+
brew install ollama
236+
brew services start ollama
237+
until curl -s http://localhost:11434/ > /dev/null; do
238+
echo "Ollama not up yet, sleeping..."
239+
sleep 1
240+
done
241+
242+
echo "Ollama is up, loading model..."
243+
curl http://localhost:11434/api/pull -d '{
244+
"name": "qwen3:0.6b"
245+
}' -v
246+
echo "pulled model qwen3:0.6b into ollama"
247+
248+
curl http://localhost:11434/api/pull -d '{
249+
"name": "nomic-embed-text"
250+
}' -v
251+
echo "pulled model nomic-embed-text into ollama"
252+
253+
- name: Test install on macOS
254+
run: |
255+
set -eo pipefail
256+
whl_name=$(ls *.whl)
257+
echo "Installing wheel file: $whl_name"
258+
python -m pip install $whl_name
259+
echo "configuring memmachine"
260+
memmachine-configure << EOF
261+
y
262+
CPU
263+
OpenAI
264+
gpt-4o-mini
265+
text-embedding-3-small
266+
localhost
267+
5432
268+
memmachine
269+
mammachine_password
270+
memmachine
271+
sk-test
272+
localhost
273+
8080
274+
EOF
275+
memmachine-nltk-setup
276+
memmachine-sync-profile-schema
277+
278+
memmachine-server &
279+
server_pid=$!
280+
sleep 5
281+
curl -f http://127.0.0.1:8080/v1/sessions
282+
shell: bash

.vscode/launch.json

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
{
2+
// Use IntelliSense to learn about possible attributes.
3+
// Hover to view descriptions of existing attributes.
4+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5+
"version": "0.2.0",
6+
"configurations": [
7+
{
8+
"name": "Python Debugger: Remote Attach",
9+
"type": "debugpy",
10+
"request": "attach",
11+
"connect": {
12+
"host": "localhost",
13+
"port": 5678
14+
},
15+
"pathMappings": [
16+
{
17+
"localRoot": "${workspaceFolder}",
18+
"remoteRoot": "."
19+
}
20+
],
21+
"justMyCode": false
22+
}
23+
]
24+
}

DOCKER_COMPOSE_README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99
### 1. Configure Environment
1010
Copy the example environment file and add your OpenAI API key:
1111
```bash
12-
cp sample_configs/env.dockercompose .env
12+
cp src/memmachine/sample_configs/env.dockercompose .env
1313
# Edit .env and add your OPENAI_API_KEY
1414
```
1515

1616
### 2. Configure MemMachine
1717
Copy the sample configuration file and update it with your settings:
1818
```bash
19-
cp sample_configs/episodic_memory_config.sample configuration.yml
19+
cp src/memmachine/sample_configs/episodic_memory_config.sample configuration.yml
2020
# Edit configuration.yml and update:
2121
# - Replace <YOUR_API_KEY> with your OpenAI API key
2222
# - Replace <YOUR_PASSWORD_HERE> with your Neo4j password

0 commit comments

Comments
 (0)