Skip to content

Commit 074bff3

Browse files
Merge pull request #837 from gabsprogrammer/codex/settings-reliability-polish
fix(settings): harden loading and polish status view
2 parents 54020d8 + 6d29c22 commit 074bff3

File tree

3 files changed

+715
-188
lines changed

3 files changed

+715
-188
lines changed

dream-server/extensions/services/dashboard-api/main.py

Lines changed: 213 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,10 @@
1515
"""
1616

1717
import asyncio
18+
import json
1819
import logging
1920
import os
21+
import re
2022
import socket
2123
import shutil
2224
import time
@@ -28,7 +30,7 @@
2830
from fastapi.middleware.cors import CORSMiddleware
2931

3032
# --- Local modules ---
31-
from config import SERVICES, DATA_DIR, SIDEBAR_ICONS, MANIFEST_ERRORS
33+
from config import SERVICES, DATA_DIR, INSTALL_DIR, SIDEBAR_ICONS, MANIFEST_ERRORS
3234
from models import (
3335
GPUInfo, ServiceStatus, DiskUsage, ModelInfo, BootstrapStatus,
3436
FullStatus, PortCheckRequest,
@@ -75,13 +77,181 @@ def set(self, key: str, value: object, ttl: float):
7577
_GPU_CACHE_TTL = 3.0
7678
_STATUS_CACHE_TTL = 2.0
7779
_STORAGE_CACHE_TTL = 30.0
80+
_SETTINGS_SUMMARY_CACHE_TTL = 5.0
7881
_SERVICE_POLL_INTERVAL = 10.0 # background health check interval
7982

8083
# --- Router imports ---
8184
from routers import workflows, features, setup, updates, agents, privacy, extensions, gpu as gpu_router, resources
8285

8386
logger = logging.getLogger(__name__)
8487

88+
89+
def _resolve_install_root() -> Path:
90+
host_root = Path("/dream-server")
91+
if host_root.exists():
92+
return host_root
93+
return Path(INSTALL_DIR)
94+
95+
96+
def _read_installed_version() -> str:
97+
install_root = _resolve_install_root()
98+
env_file = install_root / ".env"
99+
if env_file.exists():
100+
try:
101+
for line in env_file.read_text().splitlines():
102+
if line.startswith("DREAM_VERSION="):
103+
return line.split("=", 1)[1].strip().strip("\"'")
104+
except OSError:
105+
pass
106+
107+
version_file = install_root / ".version"
108+
if version_file.exists():
109+
try:
110+
raw = version_file.read_text().strip()
111+
if raw:
112+
return raw
113+
except OSError:
114+
pass
115+
116+
manifest_file = install_root / "manifest.json"
117+
if manifest_file.exists():
118+
try:
119+
data = json.loads(manifest_file.read_text())
120+
version = (
121+
data.get("release", {}).get("version")
122+
or data.get("dream_version")
123+
or data.get("manifestVersion")
124+
)
125+
if version:
126+
return str(version)
127+
except (OSError, json.JSONDecodeError, ValueError, AttributeError):
128+
pass
129+
130+
return app.version
131+
132+
133+
def _normalize_timestamp_precision(timestamp: str) -> str:
134+
match = re.match(r"^(.*?\.\d{6})\d+(.*)$", timestamp)
135+
if match:
136+
return f"{match.group(1)}{match.group(2)}"
137+
return timestamp
138+
139+
140+
def _read_install_date() -> Optional[str]:
141+
install_root = _resolve_install_root()
142+
env_file = install_root / ".env"
143+
if env_file.exists():
144+
try:
145+
for line in env_file.read_text(encoding="utf-8").splitlines()[:8]:
146+
if line.startswith("# Generated by ") and " on " in line:
147+
raw_timestamp = line.split(" on ", 1)[1].strip()
148+
normalized = _normalize_timestamp_precision(raw_timestamp)
149+
try:
150+
return datetime.fromisoformat(normalized).isoformat()
151+
except ValueError:
152+
return raw_timestamp
153+
except OSError:
154+
pass
155+
156+
for candidate in (
157+
env_file,
158+
install_root / ".version",
159+
install_root / "manifest.json",
160+
):
161+
if candidate.exists():
162+
try:
163+
return datetime.fromtimestamp(candidate.stat().st_mtime, tz=timezone.utc).isoformat()
164+
except OSError:
165+
continue
166+
167+
return None
168+
169+
170+
def _infer_tier(gpu_info) -> str:
171+
if not gpu_info:
172+
return "Unknown"
173+
174+
vram_gb = gpu_info.memory_total_mb / 1024
175+
if gpu_info.memory_type == "unified" and gpu_info.gpu_backend == "amd":
176+
return "Strix Halo 90+" if vram_gb >= 90 else "Strix Halo Compact"
177+
if vram_gb >= 80:
178+
return "Professional"
179+
if vram_gb >= 24:
180+
return "Prosumer"
181+
if vram_gb >= 16:
182+
return "Standard"
183+
if vram_gb >= 8:
184+
return "Entry"
185+
return "Minimal"
186+
187+
188+
def _serialize_gpu(gpu_info) -> Optional[dict]:
189+
if not gpu_info:
190+
return None
191+
192+
gpu_count = 1
193+
gpu_count_env = os.environ.get("GPU_COUNT", "")
194+
if gpu_count_env.isdigit():
195+
gpu_count = int(gpu_count_env)
196+
elif " × " in gpu_info.name:
197+
try:
198+
gpu_count = int(gpu_info.name.rsplit(" × ", 1)[-1])
199+
except ValueError:
200+
pass
201+
elif " + " in gpu_info.name:
202+
gpu_count = gpu_info.name.count(" + ") + 1
203+
204+
gpu_data = {
205+
"name": gpu_info.name,
206+
"vramUsed": round(gpu_info.memory_used_mb / 1024, 1),
207+
"vramTotal": round(gpu_info.memory_total_mb / 1024, 1),
208+
"utilization": gpu_info.utilization_percent,
209+
"temperature": gpu_info.temperature_c,
210+
"memoryType": gpu_info.memory_type,
211+
"backend": gpu_info.gpu_backend,
212+
"gpu_count": gpu_count,
213+
"memoryLabel": "VRAM Partition" if gpu_info.memory_type == "unified" else "VRAM",
214+
}
215+
if gpu_info.power_w is not None:
216+
gpu_data["powerDraw"] = gpu_info.power_w
217+
return gpu_data
218+
219+
220+
def _serialize_model(model_info) -> Optional[dict]:
221+
if not model_info:
222+
return None
223+
return {
224+
"name": model_info.name,
225+
"contextLength": model_info.context_length,
226+
}
227+
228+
229+
def _serialize_services(service_statuses: list[ServiceStatus], uptime: int) -> list[dict]:
230+
return [
231+
{
232+
"name": service.name,
233+
"status": service.status,
234+
"port": service.external_port,
235+
"uptime": uptime if service.status == "healthy" else None,
236+
}
237+
for service in service_statuses
238+
]
239+
240+
241+
def _fallback_services() -> list[dict]:
242+
links = []
243+
for service_id, config in SERVICES.items():
244+
external_port = config.get("external_port", config.get("port", 0))
245+
if not external_port:
246+
continue
247+
links.append({
248+
"name": config.get("name", service_id),
249+
"status": "unknown",
250+
"port": external_port,
251+
"uptime": None,
252+
})
253+
return links
254+
85255
# --- App ---
86256

87257
app = FastAPI(
@@ -501,6 +671,48 @@ def _compute_storage():
501671
return result
502672

503673

674+
@app.get("/api/settings/summary")
675+
async def api_settings_summary(api_key: str = Depends(verify_api_key)):
676+
"""Fast settings payload that avoids slow live service probes on first load."""
677+
cached = _cache.get("settings_summary")
678+
if cached is not None:
679+
return cached
680+
681+
gpu_info, model_info, uptime, cpu_metrics, ram_metrics = await asyncio.gather(
682+
asyncio.to_thread(get_gpu_info),
683+
asyncio.to_thread(get_model_info),
684+
asyncio.to_thread(get_uptime),
685+
asyncio.to_thread(get_cpu_metrics),
686+
asyncio.to_thread(get_ram_metrics),
687+
)
688+
689+
cached_services = get_cached_services()
690+
services_data = (
691+
_serialize_services(cached_services, uptime)
692+
if cached_services is not None
693+
else _fallback_services()
694+
)
695+
696+
result = {
697+
"version": _read_installed_version(),
698+
"install_date": _read_install_date(),
699+
"tier": _infer_tier(gpu_info),
700+
"uptime": uptime,
701+
"cpu": cpu_metrics,
702+
"ram": ram_metrics,
703+
"gpu": _serialize_gpu(gpu_info),
704+
"model": _serialize_model(model_info),
705+
"services": services_data,
706+
"system": {
707+
"uptime": uptime,
708+
"hostname": os.environ.get("HOSTNAME", "dream-server"),
709+
},
710+
"manifest_errors": MANIFEST_ERRORS,
711+
}
712+
_cache.set("settings_summary", result, _SETTINGS_SUMMARY_CACHE_TTL)
713+
return result
714+
715+
504716
# --- Service Health Polling ---
505717

506718
async def _get_services() -> list[ServiceStatus]:

0 commit comments

Comments
 (0)