Skip to content

Commit 7fe02d2

Browse files
authored
Merge pull request #100 from firstbatchxyz/erhant/fix-network-host [skip ci]
rm redundant vars, rfk start.sh, add network mode
2 parents bebeb97 + aa0da18 commit 7fe02d2

File tree

3 files changed

+70
-41
lines changed

3 files changed

+70
-41
lines changed

.env.example

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@ DKN_ADMIN_PUBLIC_KEY=0208ef5e65a9c656a6f92fb2c770d5d5e2ecffe02a6aade19207f75110b
99
DKN_MODELS=phi3:3.8b
1010

1111
## DRIA (optional) ##
12-
# info | debug | error | none,dkn_compute=debug
13-
DKN_LOG_LEVEL=info
1412
# P2P address, you don't need to change this unless you really want this port.
1513
DKN_P2P_LISTEN_ADDR=/ip4/0.0.0.0/tcp/4001
1614
# Comma-separated static relay nodes

compose.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ services:
77
DKN_WALLET_SECRET_KEY: ${DKN_WALLET_SECRET_KEY}
88
DKN_ADMIN_PUBLIC_KEY: ${DKN_ADMIN_PUBLIC_KEY}
99
DKN_MODELS: ${DKN_MODELS}
10-
RUST_LOG: ${RUST_LOG}
10+
RUST_LOG: ${RUST_LOG-none,dkn_compute=info}
1111
DKN_P2P_LISTEN_ADDR: ${DKN_P2P_LISTEN_ADDR}
1212
DKN_RELAY_NODES: ${DKN_RELAY_NODES}
1313
DKN_BOOTSTRAP_NODES: ${DKN_BOOTSTRAP_NODES}
@@ -17,7 +17,7 @@ services:
1717
OLLAMA_HOST: ${OLLAMA_HOST}
1818
OLLAMA_PORT: ${OLLAMA_PORT}
1919
OLLAMA_AUTO_PULL: ${OLLAMA_AUTO_PULL:-true}
20-
network_mode: "host"
20+
network_mode: ${DKN_DOCKER_NETWORK_MODE:-bridge}
2121
extra_hosts:
2222
# for Linux, we need to add this line manually
2323
- "host.docker.internal:host-gateway"

start.sh

Lines changed: 68 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -36,18 +36,21 @@ check_docker_compose() {
3636
}
3737
check_docker_compose
3838

39+
# check the operating system
40+
# this is required in case local Ollama is used
41+
# reference: https://stackoverflow.com/a/68706298
3942
OS=""
4043
check_os() {
4144
unameOut=$(uname -a)
4245
case "${unameOut}" in
43-
*Microsoft*) OS="WSL";; #must be first since Windows subsystem for linux will have Linux in the name too
44-
*microsoft*) OS="WSL2";; #WARNING: My v2 uses ubuntu 20.4 at the moment slightly different name may not always work
45-
Linux*) OS="Linux";;
46-
Darwin*) OS="Mac";;
47-
CYGWIN*) OS="Cygwin";;
48-
MINGW*) OS="Windows";;
49-
*Msys) OS="Windows";;
50-
*) OS="UNKNOWN:${unameOut}"
46+
*Microsoft*) OS="WSL";; # must be first since WSL will have Linux in the name too
47+
*microsoft*) OS="WSL2";; #WARNING: My v2 uses Ubuntu 20.4 at the moment slightly different name may not always work
48+
Linux*) OS="Linux";;
49+
Darwin*) OS="Mac";;
50+
CYGWIN*) OS="Cygwin";;
51+
MINGW*) OS="Windows";;
52+
*Msys) OS="Windows";;
53+
*) OS="UNKNOWN:${unameOut}"
5154
esac
5255
}
5356
check_os
@@ -71,6 +74,11 @@ MODELS_LIST=""
7174
LOCAL_OLLAMA_PID=""
7275
DOCKER_HOST="http://host.docker.internal"
7376

77+
# this is the default network mode, but
78+
# based on local Ollama & OS we may set it to `host`
79+
# https://docs.docker.com/engine/network/#drivers
80+
DKN_DOCKER_NETWORK_MODE=bridge
81+
7482
# handle command line arguments
7583
while [ "$#" -gt 0 ]; do
7684
case $1 in
@@ -90,13 +98,18 @@ while [ "$#" -gt 0 ]; do
9098
--trace)
9199
RUST_LOG="none,dkn_compute=trace"
92100
;;
101+
93102
-b|--background) START_MODE="BACKGROUND" ;;
103+
94104
-h|--help) docs ;;
105+
95106
*) echo "ERROR: Unknown parameter passed: $1"; exit 1 ;;
96107
esac
97108
shift
98109
done
99110

111+
# check required environment variables
112+
# we only need the secret key & admin public key
100113
check_required_env_vars() {
101114
required_vars="
102115
DKN_WALLET_SECRET_KEY
@@ -143,6 +156,7 @@ handle_compute_env() {
143156
ANTHROPIC_API_KEY
144157
RUST_LOG
145158
DKN_MODELS
159+
DKN_DOCKER_NETWORK_MODE
146160
"
147161
as_pairs $compute_env_vars > /dev/null 2>&1
148162

@@ -163,6 +177,7 @@ handle_ollama_env() {
163177
OLLAMA_PORT
164178
OLLAMA_AUTO_PULL
165179
"
180+
# loads env variables (TODO: !)
166181
as_pairs "$ollama_env_vars" > /dev/null 2>&1
167182

168183
# if there is no ollama model given, do not add any ollama compose profile
@@ -178,7 +193,10 @@ handle_ollama_env() {
178193
return
179194
fi
180195

181-
# check local ollama
196+
# check local ollama first
197+
# if it can be found, try launching it & configure network to be able to connect to localhost
198+
# if not, use the docker ollama image
199+
# if the user explicitly wants to use the docker ollama image, this condition skips the local checks
182200
if [ "$DOCKER_OLLAMA" = false ]; then
183201
if command -v ollama >/dev/null 2>&1; then
184202
# host machine has ollama installed
@@ -203,10 +221,11 @@ handle_ollama_env() {
203221
curl -s -o /dev/null -w "%{http_code}" ${ollama_url}
204222
}
205223

224+
# check if ollama is already running
206225
if [ "$(check_ollama_server)" -eq 200 ]; then
207226
echo "Local Ollama is already up at $ollama_url and running, using it"
208-
# Using already running local Ollama
209227
else
228+
# ollama is not live, so we launch it ourselves
210229
echo "Local Ollama is not live, running ollama serve"
211230

212231
# `ollama serve` uses `OLLAMA_HOST` variable with both host and port,
@@ -216,7 +235,7 @@ handle_ollama_env() {
216235
eval "ollama serve >/dev/null 2>&1 &"
217236
OLLAMA_HOST=$temp_ollama_host
218237

219-
# we grab the PID of Ollama
238+
# grab the PID of Ollama
220239
temp_pid=$!
221240

222241
# Loop until the server responds with HTTP 200 or the retry limit is reached
@@ -228,51 +247,60 @@ handle_ollama_env() {
228247
RETRY_COUNT=$((RETRY_COUNT + 1))
229248
done
230249

250+
# exit with error if we couldnt launch Ollama
231251
if [ "$RETRY_COUNT" -ge "$MAX_RETRIES" ]; then
232252
echo "Local Ollama server failed to start after $MAX_RETRIES attempts."
233253
echo "You can use the --docker-ollama flag to use the Docker Ollama image instead."
234254
exit 1
235255
else
236256
LOCAL_OLLAMA_PID=$temp_pid
237257
echo "Local Ollama server is up at $ollama_url and running with PID $LOCAL_OLLAMA_PID"
238-
# Using local ollama
239258
fi
240259
fi
241-
# Depending on the host os, use localhost or host.docker.internal for Ollama host
242-
if [ "$OS" = "Mac" ]; then
243-
OLLAMA_HOST="http://host.docker.internal"
244-
elif [ "$OS" = "Linux" ]; then
260+
261+
# to use the local Ollama, we need to configure the network depending on the Host
262+
# Windows and Mac should work with host.docker.internal alright,
263+
# but Linux requires `host` network mode with `localhost` as the Host URL
264+
if [ "$OS" = "Linux" ]; then
245265
OLLAMA_HOST="http://localhost"
266+
DKN_DOCKER_NETWORK_MODE=host
267+
else
268+
OLLAMA_HOST="http://host.docker.internal"
246269
fi
247-
return
248270
else
271+
# although --docker-ollama was not passed, we checked and couldnt find Ollama
272+
# so we will use Docker anyways
273+
echo "Ollama is not installed on this machine, will use Docker Ollama service"
249274
DOCKER_OLLAMA=true
250-
echo "Ollama is not installed on this machine, using the Docker ollama instead"
251275
fi
252276
fi
253277

254-
# check for cuda gpu
255-
if command -v nvidia-smi >/dev/null 2>&1; then
256-
if nvidia-smi >/dev/null 2>&1; then
257-
echo "GPU type detected: CUDA"
258-
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-cuda"
259-
return
260-
fi
261-
fi
278+
# this is in a separate if condition rather than `else`, due to a fallback condition above
279+
if [ "$DOCKER_OLLAMA" = true ]; then
280+
# check for cuda gpu
281+
if command -v nvidia-smi >/dev/null 2>&1; then
282+
if nvidia-smi >/dev/null 2>&1; then
283+
echo "GPU type detected: CUDA"
284+
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-cuda"
285+
fi
286+
# check for rocm gpu
287+
elif command -v rocminfo >/dev/null 2>&1; then
288+
if rocminfo >/dev/null 2>&1; then
289+
echo "GPU type detected: ROCM"
290+
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-rocm"
291+
fi
292+
# otherwise, fallback to cpu
293+
else
294+
echo "No GPU detected, using CPU"
295+
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-cpu"
296+
fi
262297

263-
# check for rocm gpu
264-
if command -v rocminfo >/dev/null 2>&1; then
265-
if rocminfo >/dev/null 2>&1; then
266-
echo "GPU type detected: ROCM"
267-
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-rocm"
268-
return
269-
fi
298+
# use docker internal for the Ollama host
299+
OLLAMA_HOST=$DOCKER_HOST
300+
DKN_DOCKER_NETWORK_MODE=bridge
270301
fi
271302

272-
# if there are no local ollama and gpu, use docker-compose with cpu profile
273-
echo "No GPU found, using ollama-cpu"
274-
COMPOSE_PROFILES="$COMPOSE_PROFILES ollama-cpu"
275-
OLLAMA_HOST=$DOCKER_HOST
303+
echo "Ollama host: $OLLAMA_HOST (network mode: $DKN_DOCKER_NETWORK_MODE)"
276304
}
277305
handle_ollama_env
278306

@@ -298,10 +326,12 @@ echo ""
298326
echo "Starting in ${START_MODE} mode..."
299327
echo "Log level: ${RUST_LOG}"
300328
echo "Models: ${DKN_MODELS}"
329+
echo "Operating System: ${OS}"
301330
echo "${COMPOSE_PROFILES}"
302331
echo ""
303332
eval "${COMPOSE_UP}"
304333

334+
# grap the exit code of docker compose
305335
compose_exit_code=$?
306336

307337
# handle docker-compose error
@@ -312,6 +342,7 @@ if [ $compose_exit_code -ne 0 ]; then
312342
fi
313343

314344
echo "All good! Compute node is up and running."
345+
echo "You can check logs with: docker compose logs -f compute"
315346

316347
# background/foreground mode
317348
if [ "$START_MODE" = "FOREGROUND" ]; then

0 commit comments

Comments
 (0)