Skip to content

Commit 0567c07

Browse files
committed
Add an installation script
1 parent fc10d19 commit 0567c07

File tree

16 files changed

+2105
-13
lines changed

16 files changed

+2105
-13
lines changed

.github/workflows/build-python.yml

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Reusable GitHub Actions workflow for building Python packages
2+
3+
name: Build python package
4+
5+
on:
6+
workflow_call:
7+
inputs:
8+
python-version:
9+
description: 'Python version'
10+
required: false
11+
type: string
12+
default: '3.x'
13+
patch-version:
14+
description: 'Version to patch into pyproject.toml'
15+
required: false
16+
type: string
17+
18+
jobs:
19+
build:
20+
runs-on: ubuntu-latest
21+
steps:
22+
- name: Checkout repo
23+
uses: actions/checkout@v4
24+
25+
- name: Set up Python ${{ inputs.python-version }}
26+
uses: actions/setup-python@v5
27+
with:
28+
python-version: ${{ inputs.python-version }}
29+
30+
- name: Patch version in pyproject.toml
31+
if: inputs.patch-version != ''
32+
run: |
33+
VERSION="${{ inputs.patch-version }}"
34+
echo "Updating pyproject.toml version to $VERSION"
35+
sed -i -E "s/^version = \".*\"/version = \"$VERSION\"/" pyproject.toml
36+
37+
- name: Install dependencies and uv
38+
run: |
39+
python -m pip install --upgrade pip
40+
pip install --upgrade build
41+
pip install uv
42+
43+
- name: Build the package with uv
44+
run: |
45+
uv build
46+
ls -lh dist/
47+
48+
- name: Upload built distributions as artifact
49+
uses: actions/upload-artifact@v4
50+
with:
51+
name: python-package-dist
52+
path: dist/*
Lines changed: 283 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,283 @@
1+
# .github/workflows/windows-test.yml
2+
3+
name: Installation Test
4+
5+
# on:
6+
# push:
7+
# branches: [ main ]
8+
# pull_request:
9+
# branches: [ main ]
10+
on:
11+
push:
12+
branches:
13+
- installation-script-2
14+
15+
jobs:
16+
build:
17+
uses: ./.github/workflows/build-python.yml
18+
with:
19+
python-version: "3.12"
20+
21+
check-linux:
22+
needs: build
23+
runs-on: ubuntu-latest
24+
25+
services:
26+
postgres:
27+
image: pgvector/pgvector:pg16
28+
env:
29+
POSTGRES_USER: memmachine
30+
POSTGRES_PASSWORD: mammachine_password
31+
POSTGRES_DB: memmachine
32+
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
33+
ports:
34+
- 5432:5432
35+
options: >-
36+
--health-cmd="pg_isready -U memmachine"
37+
--health-interval=10s
38+
--health-timeout=5s
39+
--health-retries=5
40+
41+
ollama:
42+
image: ollama/ollama
43+
ports:
44+
- 11434:11434
45+
46+
steps:
47+
- name: Set up Python 3.12
48+
uses: actions/setup-python@v5
49+
with:
50+
python-version: "3.12"
51+
52+
- name: Download built distributions
53+
uses: actions/download-artifact@v4
54+
with:
55+
name: python-package-dist
56+
57+
- name: Set up ollama
58+
run: |
59+
set -eo pipefail
60+
until curl -s http://localhost:11434/ > /dev/null; do
61+
echo "Ollama not up yet, sleeping..."
62+
sleep 1
63+
done
64+
65+
echo "Ollama is up, loading model..."
66+
curl http://localhost:11434/api/pull -d '{
67+
"name": "qwen3:0.6b"
68+
}' -v
69+
echo "pulled model qwen3:0.6b into ollama"
70+
71+
curl http://localhost:11434/api/pull -d '{
72+
"name": "nomic-embed-text"
73+
}' -v
74+
echo "pulled model nomic-embed-text into ollama"
75+
76+
- name: Test install on Ubuntu
77+
run: |
78+
set -eo pipefail
79+
whl_name=$(ls *.whl)
80+
echo "Installing wheel file: $whl_name"
81+
python -m pip install $whl_name
82+
echo "configuring memmachine"
83+
memmachine-configure << EOF
84+
y
85+
CPU
86+
Ollama
87+
qwen3:0.6b
88+
nomic-embed-text
89+
localhost
90+
5432
91+
memmachine
92+
mammachine_password
93+
memmachine
94+
http://localhost:11434/v1
95+
localhost
96+
8080
97+
EOF
98+
99+
memmachine-nltk-setup
100+
memmachine-sync-profile-schema
101+
102+
memmachine-server &
103+
server_pid=$!
104+
sleep 5
105+
106+
curl -f http://127.0.0.1:8080/v1/sessions
107+
108+
shell: bash
109+
110+
check-windows:
111+
needs: build
112+
runs-on: windows-latest
113+
114+
steps:
115+
- name: Setup Miniconda
116+
uses: conda-incubator/setup-miniconda@v3
117+
with:
118+
python-version: 3.12
119+
auto-update-conda: true
120+
121+
- name: Download built distributions
122+
uses: actions/download-artifact@v4
123+
with:
124+
name: python-package-dist
125+
126+
- name: install postgresql
127+
run: |
128+
# conda install -c conda-forge postgresql
129+
conda install -c conda-forge pgvector
130+
initdb -D postgres_data --encoding=UTF-8 --lc-collate=C --lc-ctype=C
131+
pg_ctl -D postgres_data -o "-F" -l logfile start
132+
createuser -U runneradmin -s memmachine
133+
createdb -U runneradmin -O memmachine memmachine
134+
psql -U runneradmin -d postgres -c "ALTER USER memmachine WITH PASSWORD 'mammachine_password';"
135+
psql -d memmachine -U memmachine -c "CREATE EXTENSION IF NOT EXISTS vector;"
136+
137+
- name: Set up ollama
138+
shell: bash
139+
run: |
140+
set -eo pipefail
141+
winget install Ollama.Ollama --accept-source-agreements
142+
143+
until curl -s http://localhost:11434/ > /dev/null; do
144+
echo "Ollama not up yet, sleeping..."
145+
sleep 1
146+
done
147+
148+
echo "Ollama is up, loading model..."
149+
curl http://localhost:11434/api/pull -d '{
150+
"name": "qwen3:0.6b"
151+
}' -v
152+
echo "pulled model qwen3:0.6b into ollama"
153+
154+
curl http://localhost:11434/api/pull -d '{
155+
"name": "nomic-embed-text"
156+
}' -v
157+
echo "pulled model nomic-embed-text into ollama"
158+
159+
- name: Test install on Windows
160+
run: |
161+
set -eo pipefail
162+
export PYTHONUTF8=1
163+
whl_name=$(ls *.whl)
164+
echo "Installing wheel file: $whl_name"
165+
python -m pip install $whl_name
166+
echo "configuring memmachine"
167+
memmachine-configure << EOF
168+
y
169+
170+
CPU
171+
Ollama
172+
qwen3:0.6b
173+
nomic-embed-text
174+
localhost
175+
5432
176+
memmachine
177+
mammachine_password
178+
memmachine
179+
http://localhost:11434/v1
180+
localhost
181+
8080
182+
EOF
183+
184+
python -c "import nltk; nltk.download('punkt')"
185+
memmachine-nltk-setup
186+
memmachine-sync-profile-schema
187+
188+
memmachine-server &
189+
server_pid=$!
190+
sleep 5
191+
192+
curl -f http://127.0.0.1:8080/v1/sessions
193+
194+
shell: bash
195+
196+
check-macos:
197+
needs: build
198+
runs-on: macos-latest
199+
200+
steps:
201+
- name: Set up Python 3.12
202+
uses: actions/setup-python@v5
203+
with:
204+
python-version: "3.12"
205+
206+
- name: Download built distributions
207+
uses: actions/download-artifact@v4
208+
with:
209+
name: python-package-dist
210+
211+
- name: Set up postgresql (pgvector) using Homebrew
212+
run: |
213+
set -eo pipefail
214+
brew update
215+
brew upgrade
216+
217+
brew install postgresql@18
218+
brew install pgvector
219+
rm -rf /opt/homebrew/var/postgresql@18
220+
"/opt/homebrew/opt/postgresql@18/bin/initdb" -D /opt/homebrew/var/postgresql@18 --encoding=UTF-8 --lc-collate=C --lc-ctype=C
221+
brew services start postgresql@18
222+
223+
export PATH="/opt/homebrew/opt/postgresql@18/bin:$PATH"
224+
while ! pg_isready -q; do
225+
sleep 1
226+
done
227+
228+
createuser -s memmachine
229+
createdb -O memmachine memmachine
230+
psql -d postgres -c "ALTER USER memmachine WITH PASSWORD 'mammachine_password';"
231+
psql -d memmachine -U memmachine -c "CREATE EXTENSION IF NOT EXISTS vector;"
232+
233+
- name: Set up ollama
234+
run: |
235+
set -eo pipefail
236+
brew install ollama
237+
brew services start ollama
238+
until curl -s http://localhost:11434/ > /dev/null; do
239+
echo "Ollama not up yet, sleeping..."
240+
sleep 1
241+
done
242+
243+
echo "Ollama is up, loading model..."
244+
curl http://localhost:11434/api/pull -d '{
245+
"name": "qwen3:0.6b"
246+
}' -v
247+
echo "pulled model qwen3:0.6b into ollama"
248+
249+
curl http://localhost:11434/api/pull -d '{
250+
"name": "nomic-embed-text"
251+
}' -v
252+
echo "pulled model nomic-embed-text into ollama"
253+
254+
- name: Test install on macOS
255+
run: |
256+
set -eo pipefail
257+
whl_name=$(ls *.whl)
258+
echo "Installing wheel file: $whl_name"
259+
python -m pip install $whl_name
260+
echo "configuring memmachine"
261+
memmachine-configure << EOF
262+
y
263+
CPU
264+
OpenAI
265+
gpt-4o-mini
266+
text-embedding-3-small
267+
localhost
268+
5432
269+
memmachine
270+
mammachine_password
271+
memmachine
272+
sk-test
273+
localhost
274+
8080
275+
EOF
276+
memmachine-nltk-setup
277+
memmachine-sync-profile-schema
278+
279+
memmachine-server &
280+
server_pid=$!
281+
sleep 5
282+
curl -f http://127.0.0.1:8080/v1/sessions
283+
shell: bash

DOCKER_COMPOSE_README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99
### 1. Configure Environment
1010
Copy the example environment file and add your OpenAI API key:
1111
```bash
12-
cp sample_configs/env.dockercompose .env
12+
cp src/memmachine/sample_configs/env.dockercompose .env
1313
# Edit .env and add your OPENAI_API_KEY
1414
```
1515

1616
### 2. Configure MemMachine
1717
Copy the sample configuration file and update it with your settings:
1818
```bash
19-
cp sample_configs/episodic_memory_config.sample configuration.yml
19+
cp src/memmachine/sample_configs/episodic_memory_config.sample configuration.yml
2020
# Edit configuration.yml and update:
2121
# - Replace <YOUR_API_KEY> with your OpenAI API key
2222
# - Replace <YOUR_PASSWORD_HERE> with your Neo4j password

docs/install_guide/install_guide.mdx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,13 +88,13 @@ MemMachine uses a single configuration file, `cfg.yml` to provide login and opti
8888

8989
Create a file named `cfg.yml` in the same directory that you plan on running memmachine from. This file configures the various models and storage options for MemMachine.
9090

91-
There are two examples of `cfg.yml` files in the `sample_configs` directory of the GitHub repository: one for CPU-only installations and another for GPU-enabled setups. Be sure to choose the one that matches your environment.
91+
There are two examples of `cfg.yml` files in the `src/memmachine/sample_configs` directory of the GitHub repository: one for CPU-only installations and another for GPU-enabled setups. Be sure to choose the one that matches your environment.
9292

9393
<AccordionGroup>
9494
<Accordion title="CPU-Only Installations">
9595
You can download this file from our GitHub repository as a template using the following curl command:
9696
```sh
97-
curl -o cfg.yml https://raw.githubusercontent.com/MemMachine/MemMachine/refs/heads/main/sample_configs/episodic_memory_config.cpu.sample
97+
curl -o cfg.yml https://raw.githubusercontent.com/MemMachine/MemMachine/refs/heads/main/src/memmachine/sample_configs/episodic_memory_config.cpu.sample
9898
```
9999
Below is an example of what the CPU-only `config.yml` file should look like:
100100

@@ -172,7 +172,7 @@ prompt:
172172
<Accordion title="GPU-Enabled Installations">
173173
You can download this file from our GitHub repository as a template using the following curl command:
174174
```sh
175-
curl -o cfg.yml https://raw.githubusercontent.com/MemMachine/MemMachine/refs/heads/main/sample_configs/episodic_memory_config.gpu.sample
175+
curl -o cfg.yml https://raw.githubusercontent.com/MemMachine/MemMachine/refs/heads/main/src/memmachine/sample_configs/episodic_memory_config.gpu.sample
176176
```
177177
Below is an example of what the GPU-enabled `config.yml` file should look like:
178178

0 commit comments

Comments
 (0)