From 26cda893f042d12eb1c27ea743e285261778bca8 Mon Sep 17 00:00:00 2001 From: kat Date: Fri, 6 Jun 2025 15:12:20 +0200 Subject: [PATCH 1/2] Add ollama service to docker compose --- README.md | 33 ++++++++++++++++++++++++--------- docker-compose.yml | 17 +++++++++++++---- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index d623e8b..1ed61c7 100644 --- a/README.md +++ b/README.md @@ -47,18 +47,24 @@ Choose models based on your system capabilities: | **Chat** | `phi3:mini` | ~2.3GB | 4GB | Low-resource systems | +### Installation Options + +Choose your preferred installation method: + +### Option 1: Direct Installation -### Prerequisites (Required for Both Installation Methods) +**Prerequisite: Ollama (for local AI models)** + +Install Ollama -**1. Install Ollama** (for local AI models): ```bash # macOS brew install ollama # Or download from https://ollama.com ``` + Start Ollama and install required models -**2. Start Ollama and install required models**: ```bash ollama serve @@ -69,11 +75,7 @@ ollama pull nomic-embed-text ollama pull qwen3:14b ``` -### Installation Options - -Choose your preferred installation method: -### Option 1: Direct Installation **Additional Prerequisites:** - Python 3.8+ @@ -106,7 +108,10 @@ Choose your preferred installation method: ### Option 2: Docker Installation -**Additional Prerequisites:** +With this option, you don't need to separately install Ollama, it will automatically +get started by docker compose. + +**Prerequisites:** - Docker and Docker Compose **Installation Steps:** @@ -122,7 +127,17 @@ Choose your preferred installation method: docker-compose up ``` -3. **Open your browser** to `http://localhost:8501` +3. **Install models** + + ``` + # embedding model + docker exec -it ollama ollama pull nomic-embed-text + + # chat model + docker exec -it ollama ollama pull qwen3:14b + ``` + +4. **Open your browser** to `http://localhost:8501` ## 📖 How to Use diff --git a/docker-compose.yml b/docker-compose.yml index 3bf846b..f3d1f63 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,10 +11,19 @@ services: - "8501:8501" environment: # Configure Ollama connection (both env vars for compatibility) - - OLLAMA_HOST=http://host.docker.internal:11434 - - OLLAMA_BASE_URL=http://host.docker.internal:11434 + - OLLAMA_HOST=ollama:11434 + - OLLAMA_BASE_URL=http://ollama:11434 restart: unless-stopped volumes: - ./data:/app/data # For persistent data storage - extra_hosts: - - "host.docker.internal:host-gateway" \ No newline at end of file + + ollama: + image: docker.io/ollama/ollama:latest + container_name: ollama + pull_policy: always + tty: true + restart: always + environment: + - OLLAMA_KEEP_ALIVE=24h + - OLLAMA_HOST=0.0.0.0 + - OLLAMA_PORT=11434 \ No newline at end of file From 9c9cc245ca303c22bb95cf18d2da1954a2131e58 Mon Sep 17 00:00:00 2001 From: krasch Date: Fri, 6 Jun 2025 15:48:33 +0200 Subject: [PATCH 2/2] Add volume and GPU configuration for ollama service --- README.md | 4 ++++ docker-compose.gpu.yml | 10 ++++++++++ docker-compose.yml | 7 ++++++- docker.env.example | 8 -------- 4 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 docker-compose.gpu.yml delete mode 100644 docker.env.example diff --git a/README.md b/README.md index 1ed61c7..54623b9 100644 --- a/README.md +++ b/README.md @@ -124,7 +124,11 @@ get started by docker compose. 2. **Start with Docker Compose**: ```bash + # if you don't have a GPU docker-compose up + + # if you have a GPU + docker compose -f docker-compose.yml -f docker-compose.gpu.yml up ``` 3. **Install models** diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml new file mode 100644 index 0000000..7f5be24 --- /dev/null +++ b/docker-compose.gpu.yml @@ -0,0 +1,10 @@ +services: + ollama: + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: + - gpu \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index f3d1f63..48c8697 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -26,4 +26,9 @@ services: environment: - OLLAMA_KEEP_ALIVE=24h - OLLAMA_HOST=0.0.0.0 - - OLLAMA_PORT=11434 \ No newline at end of file + - OLLAMA_PORT=11434 + volumes: + - ollama:/root/.ollama + +volumes: + ollama: { } diff --git a/docker.env.example b/docker.env.example deleted file mode 100644 index 75f04ba..0000000 --- a/docker.env.example +++ /dev/null @@ -1,8 +0,0 @@ -# Ollama Configuration -# Set the base URL for your Ollama instance -OLLAMA_BASE_URL=http://localhost:11434 - -# Examples for different setups: -# Local Ollama on host: http://host.docker.internal:11434 -# Remote Ollama server: http://your-ollama-server:11434 -# Docker network Ollama: http://ollama-container:11434 \ No newline at end of file