@@ -36,18 +36,21 @@ check_docker_compose() {
36
36
}
37
37
check_docker_compose
38
38
39
+ # check the operating system
40
+ # this is required in case local Ollama is used
41
+ # reference: https://stackoverflow.com/a/68706298
39
42
OS=" "
40
43
check_os () {
41
44
unameOut=$( uname -a)
42
45
case " ${unameOut} " in
43
- * Microsoft* ) OS=" WSL" ;; # must be first since Windows subsystem for linux will have Linux in the name too
44
- * microsoft* ) OS=" WSL2" ;; # WARNING: My v2 uses ubuntu 20.4 at the moment slightly different name may not always work
45
- Linux* ) OS=" Linux" ;;
46
- Darwin* ) OS=" Mac" ;;
47
- CYGWIN* ) OS=" Cygwin" ;;
48
- MINGW* ) OS=" Windows" ;;
49
- * Msys) OS=" Windows" ;;
50
- * ) OS=" UNKNOWN:${unameOut} "
46
+ * Microsoft* ) OS=" WSL" ;; # must be first since WSL will have Linux in the name too
47
+ * microsoft* ) OS=" WSL2" ;; # WARNING: My v2 uses Ubuntu 20.4 at the moment slightly different name may not always work
48
+ Linux* ) OS=" Linux" ;;
49
+ Darwin* ) OS=" Mac" ;;
50
+ CYGWIN* ) OS=" Cygwin" ;;
51
+ MINGW* ) OS=" Windows" ;;
52
+ * Msys) OS=" Windows" ;;
53
+ * ) OS=" UNKNOWN:${unameOut} "
51
54
esac
52
55
}
53
56
check_os
@@ -71,6 +74,11 @@ MODELS_LIST=""
71
74
LOCAL_OLLAMA_PID=" "
72
75
DOCKER_HOST=" http://host.docker.internal"
73
76
77
+ # this is the default network mode, but
78
+ # based on local Ollama & OS we may set it to `host`
79
+ # https://docs.docker.com/engine/network/#drivers
80
+ DKN_DOCKER_NETWORK_MODE=bridge
81
+
74
82
# handle command line arguments
75
83
while [ " $# " -gt 0 ]; do
76
84
case $1 in
@@ -90,13 +98,18 @@ while [ "$#" -gt 0 ]; do
90
98
--trace)
91
99
RUST_LOG=" none,dkn_compute=trace"
92
100
;;
101
+
93
102
-b|--background) START_MODE=" BACKGROUND" ;;
103
+
94
104
-h|--help) docs ;;
105
+
95
106
* ) echo " ERROR: Unknown parameter passed: $1 " ; exit 1 ;;
96
107
esac
97
108
shift
98
109
done
99
110
111
+ # check required environment variables
112
+ # we only need the secret key & admin public key
100
113
check_required_env_vars () {
101
114
required_vars="
102
115
DKN_WALLET_SECRET_KEY
@@ -143,6 +156,7 @@ handle_compute_env() {
143
156
ANTHROPIC_API_KEY
144
157
RUST_LOG
145
158
DKN_MODELS
159
+ DKN_DOCKER_NETWORK_MODE
146
160
"
147
161
as_pairs $compute_env_vars > /dev/null 2>&1
148
162
@@ -163,6 +177,7 @@ handle_ollama_env() {
163
177
OLLAMA_PORT
164
178
OLLAMA_AUTO_PULL
165
179
"
180
+ # loads env variables (TODO: !)
166
181
as_pairs " $ollama_env_vars " > /dev/null 2>&1
167
182
168
183
# if there is no ollama model given, do not add any ollama compose profile
@@ -178,7 +193,10 @@ handle_ollama_env() {
178
193
return
179
194
fi
180
195
181
- # check local ollama
196
+ # check local ollama first
197
+ # if it can be found, try launching it & configure network to be able to connect to localhost
198
+ # if not, use the docker ollama image
199
+ # if the user explicitly wants to use the docker ollama image, this condition skips the local checks
182
200
if [ " $DOCKER_OLLAMA " = false ]; then
183
201
if command -v ollama > /dev/null 2>&1 ; then
184
202
# host machine has ollama installed
@@ -203,10 +221,11 @@ handle_ollama_env() {
203
221
curl -s -o /dev/null -w " %{http_code}" ${ollama_url}
204
222
}
205
223
224
+ # check if ollama is already running
206
225
if [ " $( check_ollama_server) " -eq 200 ]; then
207
226
echo " Local Ollama is already up at $ollama_url and running, using it"
208
- # Using already running local Ollama
209
227
else
228
+ # ollama is not live, so we launch it ourselves
210
229
echo " Local Ollama is not live, running ollama serve"
211
230
212
231
# `ollama serve` uses `OLLAMA_HOST` variable with both host and port,
@@ -216,7 +235,7 @@ handle_ollama_env() {
216
235
eval " ollama serve >/dev/null 2>&1 &"
217
236
OLLAMA_HOST=$temp_ollama_host
218
237
219
- # we grab the PID of Ollama
238
+ # grab the PID of Ollama
220
239
temp_pid=$!
221
240
222
241
# Loop until the server responds with HTTP 200 or the retry limit is reached
@@ -228,51 +247,60 @@ handle_ollama_env() {
228
247
RETRY_COUNT=$(( RETRY_COUNT + 1 ))
229
248
done
230
249
250
+ # exit with error if we couldnt launch Ollama
231
251
if [ " $RETRY_COUNT " -ge " $MAX_RETRIES " ]; then
232
252
echo " Local Ollama server failed to start after $MAX_RETRIES attempts."
233
253
echo " You can use the --docker-ollama flag to use the Docker Ollama image instead."
234
254
exit 1
235
255
else
236
256
LOCAL_OLLAMA_PID=$temp_pid
237
257
echo " Local Ollama server is up at $ollama_url and running with PID $LOCAL_OLLAMA_PID "
238
- # Using local ollama
239
258
fi
240
259
fi
241
- # Depending on the host os, use localhost or host.docker.internal for Ollama host
242
- if [ " $OS " = " Mac" ]; then
243
- OLLAMA_HOST=" http://host.docker.internal"
244
- elif [ " $OS " = " Linux" ]; then
260
+
261
+ # to use the local Ollama, we need to configure the network depending on the Host
262
+ # Windows and Mac should work with host.docker.internal alright,
263
+ # but Linux requires `host` network mode with `localhost` as the Host URL
264
+ if [ " $OS " = " Linux" ]; then
245
265
OLLAMA_HOST=" http://localhost"
266
+ DKN_DOCKER_NETWORK_MODE=host
267
+ else
268
+ OLLAMA_HOST=" http://host.docker.internal"
246
269
fi
247
- return
248
270
else
271
+ # although --docker-ollama was not passed, we checked and couldnt find Ollama
272
+ # so we will use Docker anyways
273
+ echo " Ollama is not installed on this machine, will use Docker Ollama service"
249
274
DOCKER_OLLAMA=true
250
- echo " Ollama is not installed on this machine, using the Docker ollama instead"
251
275
fi
252
276
fi
253
277
254
- # check for cuda gpu
255
- if command -v nvidia-smi > /dev/null 2>&1 ; then
256
- if nvidia-smi > /dev/null 2>&1 ; then
257
- echo " GPU type detected: CUDA"
258
- COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-cuda"
259
- return
260
- fi
261
- fi
278
+ # this is in a separate if condition rather than `else`, due to a fallback condition above
279
+ if [ " $DOCKER_OLLAMA " = true ]; then
280
+ # check for cuda gpu
281
+ if command -v nvidia-smi > /dev/null 2>&1 ; then
282
+ if nvidia-smi > /dev/null 2>&1 ; then
283
+ echo " GPU type detected: CUDA"
284
+ COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-cuda"
285
+ fi
286
+ # check for rocm gpu
287
+ elif command -v rocminfo > /dev/null 2>&1 ; then
288
+ if rocminfo > /dev/null 2>&1 ; then
289
+ echo " GPU type detected: ROCM"
290
+ COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-rocm"
291
+ fi
292
+ # otherwise, fallback to cpu
293
+ else
294
+ echo " No GPU detected, using CPU"
295
+ COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-cpu"
296
+ fi
262
297
263
- # check for rocm gpu
264
- if command -v rocminfo > /dev/null 2>&1 ; then
265
- if rocminfo > /dev/null 2>&1 ; then
266
- echo " GPU type detected: ROCM"
267
- COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-rocm"
268
- return
269
- fi
298
+ # use docker internal for the Ollama host
299
+ OLLAMA_HOST=$DOCKER_HOST
300
+ DKN_DOCKER_NETWORK_MODE=bridge
270
301
fi
271
302
272
- # if there are no local ollama and gpu, use docker-compose with cpu profile
273
- echo " No GPU found, using ollama-cpu"
274
- COMPOSE_PROFILES=" $COMPOSE_PROFILES ollama-cpu"
275
- OLLAMA_HOST=$DOCKER_HOST
303
+ echo " Ollama host: $OLLAMA_HOST (network mode: $DKN_DOCKER_NETWORK_MODE )"
276
304
}
277
305
handle_ollama_env
278
306
@@ -298,10 +326,12 @@ echo ""
298
326
echo " Starting in ${START_MODE} mode..."
299
327
echo " Log level: ${RUST_LOG} "
300
328
echo " Models: ${DKN_MODELS} "
329
+ echo " Operating System: ${OS} "
301
330
echo " ${COMPOSE_PROFILES} "
302
331
echo " "
303
332
eval " ${COMPOSE_UP} "
304
333
334
+ # grap the exit code of docker compose
305
335
compose_exit_code=$?
306
336
307
337
# handle docker-compose error
@@ -312,6 +342,7 @@ if [ $compose_exit_code -ne 0 ]; then
312
342
fi
313
343
314
344
echo " All good! Compute node is up and running."
345
+ echo " You can check logs with: docker compose logs -f compute"
315
346
316
347
# background/foreground mode
317
348
if [ " $START_MODE " = " FOREGROUND" ]; then
0 commit comments