@@ -29,8 +29,6 @@ services:
2929 condition : service_completed_successfully
3030 realtime :
3131 condition : service_healthy
32- ollama :
33- condition : service_healthy
3432 healthcheck :
3533 test : ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3000']
3634 interval : 90s
@@ -99,6 +97,8 @@ services:
9997
10098 # Ollama with GPU support (default)
10199 ollama :
100+ profiles :
101+ - gpu
102102 image : ollama/ollama:latest
103103 pull_policy : always
104104 volumes :
@@ -120,7 +120,7 @@ services:
120120 count : all
121121 capabilities : [gpu]
122122 healthcheck :
123- test : ['CMD', 'curl ', '-f', 'http://localhost:11434/ ']
123+ test : ['CMD', 'ollama ', 'list ']
124124 interval : 10s
125125 timeout : 5s
126126 retries : 5
@@ -144,12 +144,16 @@ services:
144144 - OLLAMA_HOST=0.0.0.0:11434
145145 command : ' serve'
146146 healthcheck :
147- test : ['CMD', 'curl ', '-f', 'http://localhost:11434/ ']
147+ test : ['CMD', 'ollama ', 'list ']
148148 interval : 10s
149149 timeout : 5s
150150 retries : 5
151151 start_period : 30s
152152 restart : unless-stopped
153+ networks :
154+ default :
155+ aliases :
156+ - ollama
153157
154158 # Helper container to pull models automatically
155159 model-setup :
@@ -160,13 +164,11 @@ services:
160164 - ollama_data:/root/.ollama
161165 environment :
162166 - OLLAMA_HOST=ollama:11434
163- depends_on :
164- ollama :
165- condition : service_healthy
167+ entrypoint : ["/bin/sh", "-lc"]
166168 command : >
167169 sh -c "
168170 echo 'Waiting for Ollama to be ready...' &&
169- sleep 10 &&
171+ until ollama list >/dev/null 2>&1; do echo 'Waiting for Ollama...'; sleep 2; done &&
170172 echo 'Pulling gemma3:4b model (recommended starter model)...' &&
171173 ollama pull gemma3:4b &&
172174 echo 'Model setup complete! You can now use gemma3:4b in Sim.' &&
0 commit comments