Skip to content

Commit a05fc8b

Browse files
authored
hcastc00/change to dnp (#2)
* Update dappnode_package.json * Update dappnode_package.json * Update docker-compose.yml * Update dappnode_package.json * Update docker-compose.yml * Update docker-compose.yml * Fixing my dislexia
1 parent e7152fc commit a05fc8b

File tree

6 files changed

+18
-22
lines changed

6 files changed

+18
-22
lines changed

dappnode_package.json

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,20 @@
11
{
2-
"name": "ollama-openwebui.public.dappnode.eth",
2+
"name": "ollama-openwebui.dnp.dappnode.eth",
33
"version": "0.1.0",
44
"shortDescription": "Local AI chat interface with Ollama and Open WebUI",
5-
"description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with AMD ROCm support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n\nAccess Open WebUI at http://ollama-openwebui.public.dappnode:8080",
5+
"description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with AMD ROCm support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n\nAccess Open WebUI at http://ollama-openwebui.dappnode:8080",
66
"type": "service",
77
"mainService": "webui",
88
"author": "DAppNode Community",
99
"license": "GPL-3.0",
10-
"categories": ["AI"],
10+
"categories": [
11+
"AI"
12+
],
1113
"links": {
1214
"homepage": "https://github.com/open-webui/open-webui",
13-
"ui": "http://ollama-openwebui.public.dappnode:8080"
15+
"ui": "http://ollama-openwebui.dappnode:8080"
1416
},
1517
"architectures": [
1618
"linux/amd64"
1719
]
18-
}
20+
}

docker-compose.yml

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ services:
44
webui:
55
build:
66
context: webui
7-
container_name: openwebui.ollama-openwebui.public.dappnode.eth
7+
container_name: openwebui.ollama-openwebui.dnp.dappnode.eth
88
ports:
99
- "8080:8080/tcp"
1010
environment:
@@ -19,16 +19,12 @@ services:
1919
ollama:
2020
build:
2121
context: ollama
22-
container_name: ollama.ollama-openwebui.public.dappnode.eth
22+
container_name: ollama.ollama-openwebui.dnp.dappnode.eth
2323
ports:
2424
- "11434:11434/tcp"
2525
volumes:
2626
- "ollama:/root/.ollama"
2727
restart: unless-stopped
28-
# -------------------------------------------------
29-
# 👉 Added environment variables for logging &
30-
# Prometheus‑style metrics
31-
# -------------------------------------------------
3228
environment:
3329
# Show token‑throughput and other debug info in the container logs
3430
- OLLAMA_LOG_LEVEL=debug
@@ -41,8 +37,6 @@ services:
4137

4238
# OPTIONAL – Turn off outbound telemetry if you only want local metrics
4339
- OLLAMA_TELEMETRY=0
44-
# -------------------------------------------------
45-
4640
volumes:
4741
ollama: {}
4842
webui: {}

package_variants/amd/dappnode_package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
{
2-
"name": "ollama-amd-openwebui.public.dappnode.eth",
2+
"name": "ollama-amd-openwebui.dnp.dappnode.eth",
33
"version": "0.1.0",
44
"links": {
55
"homepage": "https://github.com/open-webui/open-webui",
6-
"ui": "http://ollama-openwebui.public.dappnode:8080"
6+
"ui": "http://ollama-openwebui.dappnode:8080"
77
},
88
"warnings": {
99
"onInstall": "This package requires an AMD GPU with ROCm support. The Ollama service will expose GPU devices /dev/kfd and /dev/dri from the host system."

package_variants/amd/docker-compose.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ services:
66
context: webui
77
args:
88
WEBUI_VERSION: main
9-
container_name: openwebui-amd.ollama-openwebui.public.dappnode.eth
9+
container_name: openwebui-amd.ollama-openwebui.dnp.dappnode.eth
1010

1111
ollama:
1212
build:
@@ -16,4 +16,4 @@ services:
1616
devices:
1717
- /dev/kfd
1818
- /dev/dri
19-
container_name: ollama-amd.ollama-openwebui.public.dappnode.eth
19+
container_name: ollama-amd.ollama-openwebui.dnp.dappnode.eth
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
{
2-
"name": "ollama-nvidia-openwebui.public.dappnode.eth",
2+
"name": "ollama-nvidia-openwebui.dnp.dappnode.eth",
33
"version": "0.1.0",
44
"links": {
55
"homepage": "https://github.com/open-webui/open-webui",
6-
"ui": "http://ollama-nvidia-openwebui.public.dappnode:8080"
6+
"ui": "http://ollama-nvidia-openwebui.dappnode:8080"
77
},
88
"warnings": {
99
"onInstall": "This package requires an Nvidia GPU with CUDA support."
@@ -12,4 +12,4 @@
1212
"linux/amd64",
1313
"linux/arm64"
1414
]
15-
}
15+
}

package_variants/nvidia/docker-compose.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ services:
66
context: webui
77
args:
88
WEBUI_VERSION: main
9-
container_name: openwebui-nvidia.ollama-openwebui.public.dappnode.eth
9+
container_name: openwebui-nvidia.ollama-openwebui.dnp.dappnode.eth
1010
ollama:
1111
deploy:
1212
resources:
@@ -19,4 +19,4 @@ services:
1919
context: ollama
2020
args:
2121
OLLAMA_VERSION: 0.12.6
22-
container_name: ollama-nvidia.ollama-openwebui.public.dappnode.eth
22+
container_name: ollama-nvidia.ollama-openwebui.dnp.dappnode.eth

0 commit comments

Comments
 (0)