-
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathconfig.py
More file actions
768 lines (676 loc) · 32.5 KB
/
config.py
File metadata and controls
768 lines (676 loc) · 32.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
"""Config controller — settings, preferences, secrets, version, serial ports."""
from __future__ import annotations
import asyncio
import hmac
import logging
import os
import tempfile
import threading
from collections.abc import Iterator
from contextlib import contextmanager, suppress
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from esphome import yaml_util
from esphome.const import __version__ as esphome_version
from esphome.core import CORE
from esphome.helpers import get_bool_env
from esphome.storage_json import StorageJSON, ext_storage_path
from esphome.util import get_serial_ports
from ..constants import DEFAULT_INGRESS_PORT
from ..constants import __version__ as server_version
from ..helpers.api import CommandError, api_command
from ..helpers.auth import hash_password
from ..helpers.json import JSONDecodeError, dumps_indent, loads
from ..models import ErrorCode, Label, UserPreferences
if TYPE_CHECKING:
from ..device_builder import DeviceBuilder
_LOGGER = logging.getLogger(__name__)
_DASHBOARD_SENTINEL_FILE = "___DASHBOARD_SENTINEL___.yaml"
_METADATA_FILE = ".device-builder.json"
_PREFS_KEY = "_preferences"
_LABELS_KEY = "_labels"
# ---------------------------------------------------------------------------
# Settings
# ---------------------------------------------------------------------------
@dataclass
class DashboardSettings:
"""Application settings parsed from CLI args and environment."""
config_dir: Path = field(default_factory=Path)
absolute_config_dir: Path | None = None
username: str = ""
password_hash: bytes = field(default_factory=bytes)
using_password: bool = False
on_ha_addon: bool = False
log_level: str = "info"
port: int = 6052
host: str = "0.0.0.0"
ingress_port: int = DEFAULT_INGRESS_PORT
ingress_host: str = ""
# In dev mode the SPA shell is served with ``Cache-Control: no-cache``
# so a re-deployed wheel isn't masked by a browser-cached
# ``index.html`` pointing at a now-deleted hashed bundle. In
# production we let the browser apply its default heuristic; the
# hashed bundles are still served as ``immutable`` regardless.
dev_mode: bool = False
# Hostnames we trust for cross-origin / Host validation in the
# WebSocket handshake. Carries the legacy
# ``ESPHOME_TRUSTED_DOMAINS`` semantics from the upstream
# dashboard, plus a DNS-rebinding-defense Host check:
#
# * Origin allowlist - when the browser's Origin header
# doesn't match the request's Host (reverse-proxy hostname
# mismatch), accept the connection if Origin's hostname is
# in this list. Fixes the
# "lose-dashboard-access-behind-nginx" papercut.
# * Host allowlist - reject the request entirely if its Host
# header isn't in this list. Defense in depth against DNS
# rebinding, on top of the existing per-IP-rate-limited
# ``auth/login`` gate.
#
# Empty list = both checks disabled (existing strict
# Origin/Host equality is the only gate; no Host allowlist).
# ``"*"`` is the explicit "match anything" escape hatch for
# operators who want to acknowledge the knob without
# restricting hosts.
trusted_domains: list[str] = field(default_factory=list)
def parse_args(self, args: Any) -> None:
"""Parse CLI arguments into settings."""
self.on_ha_addon = getattr(args, "ha_addon", False)
# Env-var fallback uses ``ESPHOME_*`` rather than the legacy
# dashboard's bare ``USERNAME`` / ``PASSWORD``: the bare names
# collide with login-shell / Windows system vars (``$USERNAME``
# is the OS user on both), which would silently promote the
# OS user to the dashboard username when only ``--password``
# / ``$ESPHOME_PASSWORD`` is set. Intentional divergence from
# ``esphome/dashboard/settings.py``.
username = getattr(args, "username", None) or os.getenv("ESPHOME_USERNAME") or ""
password = getattr(args, "password", None) or os.getenv("ESPHOME_PASSWORD") or ""
self.username = username
self.using_password = bool(username and password)
if self.using_password:
self.password_hash = hash_password(password)
self.config_dir = Path(args.configuration)
self.config_dir.mkdir(parents=True, exist_ok=True)
self.absolute_config_dir = self.config_dir.resolve()
# Ensure secrets.yaml exists (ESPHome fails if !secret references can't find it)
secrets_path = self.config_dir / "secrets.yaml"
if not secrets_path.exists():
secrets_path.write_text(
"# Secrets — referenced from device configs via !secret\n"
"# Update these values for your network\n"
'wifi_ssid: ""\n'
'wifi_password: ""\n',
encoding="utf-8",
)
self.log_level = getattr(args, "log_level", "info")
self.port = getattr(args, "port", 6052)
self.host = getattr(args, "host", "0.0.0.0")
self.ingress_port = getattr(args, "ingress_port", DEFAULT_INGRESS_PORT)
self.ingress_host = getattr(args, "ingress_host", "") or ""
self.dev_mode = bool(getattr(args, "dev", False))
# ``--trusted-domains a,b,c`` (or ``$ESPHOME_TRUSTED_DOMAINS``).
# Comma-separated. Lower-cased for the case-insensitive match
# in the WS handshake. Empty list = both Origin and Host
# allowlists disabled.
#
# Precedence: a CLI flag value of ``None`` (argparse default
# when ``--trusted-domains`` wasn't passed) means "flag not
# set, consult the env var"; any string value, including the
# empty string, is an explicit override and wins over the
# env var. Lets operators say ``--trusted-domains ""`` to
# disable the checks even when ``$ESPHOME_TRUSTED_DOMAINS``
# is set in the environment (e.g. inherited from a parent).
cli_value = getattr(args, "trusted_domains", None)
raw_trusted = (
cli_value if cli_value is not None else os.getenv("ESPHOME_TRUSTED_DOMAINS", "")
)
self.trusted_domains = [
host.strip().lower() for host in raw_trusted.split(",") if host.strip()
]
CORE.config_path = self.config_dir / _DASHBOARD_SENTINEL_FILE
def rel_path(self, *parts: str) -> Path:
"""
Return a path relative to the config dir, validated against path traversal.
``relative_to`` raises ``ValueError`` when ``parts`` resolve outside
the config dir; we translate that into a ``CommandError`` so the
WS dispatcher surfaces it as ``INVALID_ARGS`` instead of the
generic ``INTERNAL_ERROR`` that an unclassified ``ValueError``
would produce. Single chokepoint for every handler that builds
a configuration path.
"""
joined = self.config_dir.joinpath(*parts)
assert self.absolute_config_dir is not None # type narrowing
try:
joined.resolve().relative_to(self.absolute_config_dir)
except ValueError as err:
# ``!r`` quotes + escapes the offending value so embedded
# CR/LF/control bytes can't break the error string when
# the frontend echoes it back to the user. ``!r`` *first*,
# then truncate, so the bound holds even for control-heavy
# payloads (a single ``\x00`` repr's to 4 chars, so an
# 80-byte raw value can otherwise blow past 200 chars).
display = repr("/".join(parts))
if len(display) > 100:
display = f"{display[:97]}..."
raise CommandError(
ErrorCode.INVALID_ARGS,
f"Invalid configuration filename: {display}",
) from err
return joined
@property
def status_use_mqtt(self) -> bool:
return bool(get_bool_env("ESPHOME_DASHBOARD_USE_MQTT"))
@property
def create_ingress_site(self) -> bool:
"""Whether to bind the trusted HA Ingress TCP site alongside the public site."""
if not self.on_ha_addon:
return False
# DISABLE_HA_AUTHENTICATION lets operators force ingress users
# through the password-gated public port too.
return not get_bool_env("DISABLE_HA_AUTHENTICATION")
def check_password(self, username: str, password: str) -> bool:
"""
Verify *username* and *password* in constant time.
Returns ``False`` when no password is configured — check
``using_password`` separately to know whether the gate is active.
"""
if not self.using_password:
return False
username_ok = hmac.compare_digest(username.encode("utf-8"), self.username.encode("utf-8"))
password_ok = hmac.compare_digest(self.password_hash, hash_password(password))
return username_ok and password_ok
# ---------------------------------------------------------------------------
# Metadata persistence (device-builder.json)
# ---------------------------------------------------------------------------
# Several controllers (firmware queue, device CRUD, preferences, IP
# cache) all RMW this file from the executor pool. Without serialisation
# two writers landing in the same window lose each other's updates.
_METADATA_LOCK = threading.Lock()
@contextmanager
def metadata_transaction(config_dir: Path) -> Iterator[dict[str, Any]]:
"""
Atomic read-modify-write context for the metadata sidecar.
Yields the current metadata dict. Mutate it in place; on a clean
exit the changes are persisted atomically. Exceptions raised
inside the block discard the pending mutation. Concurrent
transactions are serialised so updates can't clobber each other.
"""
with _METADATA_LOCK:
data = _load_metadata(config_dir)
yield data
_save_metadata(config_dir, data)
def _load_metadata(config_dir: Path) -> dict[str, Any]:
path = config_dir / _METADATA_FILE
try:
# orjson decodes bytes directly, so skip the read_text → encode
# round-trip. JSONDecodeError is a subclass of ValueError.
data = loads(path.read_bytes())
return data if isinstance(data, dict) else {}
except (FileNotFoundError, JSONDecodeError):
return {}
def _save_metadata(config_dir: Path, data: dict[str, Any]) -> None:
path = config_dir / _METADATA_FILE
# tempfile + os.replace so lock-free readers never observe a partial write.
fd, tmp_name = tempfile.mkstemp(prefix=f"{_METADATA_FILE}.", suffix=".tmp", dir=str(config_dir))
tmp_path = Path(tmp_name)
try:
# ``dumps_indent`` yields bytes, so open the temp file in
# binary mode. The on-disk file stays readable / diffable.
with os.fdopen(fd, "wb") as fh:
fh.write(dumps_indent(data))
os.replace(tmp_path, path)
except Exception:
with suppress(OSError):
tmp_path.unlink()
raise
def get_board_id(config_dir: Path, filename: str) -> str:
"""Get the board_id for a device."""
return str(_load_metadata(config_dir).get(filename, {}).get("board_id", ""))
def set_device_metadata(
config_dir: Path,
filename: str,
*,
board_id: str | None = None,
friendly_name: str | None = None,
comment: str | None = None,
ip: str | None = None,
expected_config_hash: str | None = None,
mac_address: str | None = None,
regen_failed_mtime: float | None = None,
regen_failed_at: float | None = None,
build_size_bytes: int | None = None,
build_size_dir_mtime: int | None = None,
build_size_info_mtime: int | None = None,
labels: list[str] | None = None,
) -> None:
"""
Set metadata fields for a device.
``ip`` is the last-known resolved IP — persisted so the address
cache survives backend restarts. Pass an empty string to leave the
persisted value unchanged (mDNS clears the in-memory IP whenever a
device drops off the network, but the cache is still useful).
``expected_config_hash`` is the 8-char hex FNV-1a-32 hash of the
YAML as last successfully compiled — pair it with the mDNS
``config_hash`` TXT record (esphome/esphome#16145) to tell whether
the running firmware matches the compiled config. Passing an empty
string clears it (e.g. after a YAML edit invalidates the prior
compile).
``mac_address`` is the canonical ``XX:XX:XX:XX:XX:XX`` MAC
from the mDNS ``mac`` TXT record (normalized at ingest).
Persisted so the dashboard renders the address immediately on
startup, before the first mDNS probe response. Passing an
empty string clears it.
``regen_failed_mtime`` is the YAML's mtime when the last
``--only-generate`` storage-regen attempt failed; pair it with
``regen_failed_at`` (the wall-clock time the failure was
recorded). Together they let a backend restart skip retrying
the same broken config (missing ``!secret`` / ``!include`` /
unreachable git package) — the next attempt only runs when
the YAML's mtime has actually moved past the cached stamp,
OR when the cached stamp is older than the controller's
failure-TTL (so transient external problems eventually get
re-checked). The two fields are written together by
:meth:`DevicesController._stamp_regen_failure`; the
success / archive paths clear them by passing ``0.0`` to
*both* — clearing only one half leaves the other behind, so
callers should always touch the pair as a unit.
``build_size_bytes`` caches the total size of the per-device
``.esphome/build/<name>/`` tree at the freshness pair
captured by the last walk. The pair is split because each
half catches a class of compile-time changes the other
misses: ``build_size_dir_mtime`` moves on entry-set churn
(PlatformIO atomic-replaces, sibling add/remove),
``build_size_info_mtime`` moves on every real ESPHome
recompile (``write_file_if_changed`` rewrites
``build_info.json``). Either side moving counts as stale,
so a freshly-restarted dashboard re-walks any device whose
pair drifted from what was persisted. Pass ``0`` for any
field to clear (used by the archive flow's volatile-field
scrub).
``labels`` is the list of label IDs assigned to this device
(opaque ``uuid.uuid4().hex`` references into the global
``_labels`` catalog). ``None`` leaves the persisted list
alone; ``[]`` clears it (drops the key entirely so empty
entries don't bloat the file); a populated list replaces
the assignments wholesale.
"""
with metadata_transaction(config_dir) as data:
entry = data.setdefault(filename, {})
if board_id is not None:
entry["board_id"] = board_id
if friendly_name is not None:
entry["friendly_name"] = friendly_name
if comment is not None:
entry["comment"] = comment
if ip:
entry["ip"] = ip
if labels is not None:
if labels:
entry["labels"] = list(labels)
else:
entry.pop("labels", None)
# Tri-state fields: ``None`` means "leave alone", a truthy
# value writes, an explicit falsy (``""`` / ``0``) clears.
# The numeric stamps below (``regen_failed_*`` /
# ``build_size_*``) all carry timestamps or sizes whose
# legitimate values are strictly positive — ``0`` is
# therefore safe as the explicit-clear sentinel.
# Loop over the (key, value) pairs so adding a new
# tri-state field doesn't bump this function's branch
# count (ruff PLR0912 caps at 12).
for key, value in (
("expected_config_hash", expected_config_hash),
("mac_address", mac_address),
("regen_failed_mtime", regen_failed_mtime),
("regen_failed_at", regen_failed_at),
("build_size_bytes", build_size_bytes),
("build_size_dir_mtime", build_size_dir_mtime),
("build_size_info_mtime", build_size_info_mtime),
):
if value is None:
continue
if value:
entry[key] = value
else:
entry.pop(key, None)
def get_device_metadata(config_dir: Path, filename: str) -> dict[str, Any]:
"""Get all metadata for a device."""
result = _load_metadata(config_dir).get(filename, {})
return result if isinstance(result, dict) else {}
def get_device_ip(config_dir: Path, filename: str) -> str:
"""Return the last-known resolved IP for a device, or ``""`` if unknown."""
return str(_load_metadata(config_dir).get(filename, {}).get("ip", ""))
def remove_device_metadata(config_dir: Path, filename: str) -> None:
"""Remove metadata for a device."""
with metadata_transaction(config_dir) as data:
data.pop(filename, None)
# Fields on a device-metadata entry whose meaning is tied to the
# *running* firmware / network state, not to the YAML's identity.
# Used by ``clear_volatile_device_metadata`` (and its caller in
# the archive flow) to scrub these on archive while preserving
# the stable identity fields. New volatile fields added here
# must also be added to ``set_device_metadata`` (and any
# accessors) so the cleared / un-cleared shapes stay aligned.
_VOLATILE_DEVICE_METADATA_FIELDS: frozenset[str] = frozenset(
{
# Last resolved IP — meaningless once the device is no
# longer on the network.
"ip",
# Last successfully-compiled config hash. Pairs with the
# mDNS ``config_hash`` TXT record to detect "running
# firmware out of sync with YAML"; on archive the build
# dir is wiped so the hash no longer corresponds to any
# available artifact and would be misleading on the next
# compile.
"expected_config_hash",
# Last MAC observed via mDNS. Stable per device but tied
# to the running firmware — on archive the YAML may later
# be redeployed to a different physical board, and a
# stale persisted MAC would render until the next mDNS
# announce overwrote it.
"mac_address",
# YAML mtime + wall-clock timestamp at the last failed
# storage-regen attempt. Archive moves the YAML; a future
# unarchive may put it back with a fresh mtime, so any
# cached failure stamp would be meaningless. Cleared so
# the next scan retries the regen.
"regen_failed_mtime",
"regen_failed_at",
# Cached size of the per-device build directory at the
# freshness pair captured alongside it. Archive wipes the
# build tree (``_wipe_device_build_dir``) so the cached
# triple would describe a directory that no longer
# exists.
"build_size_bytes",
"build_size_dir_mtime",
"build_size_info_mtime",
}
)
def clear_volatile_device_metadata(config_dir: Path, filename: str) -> None:
"""Drop runtime / observed state fields, keep stable identity fields.
On archive the dashboard removes the YAML's compile output
and the StorageJSON sidecar (both are build artifacts), but
the device-metadata entry carries a mix of:
- Stable identity fields (``board_id``, ``friendly_name``,
``comment``) — set by the user or derived from the YAML
itself, still meaningful on unarchive.
- Volatile fields (``ip``, ``expected_config_hash``) —
describe the firmware / network state at archive time and
go stale immediately.
The earlier shape removed the entire entry on archive, which
closed the "future same-name device inherits stale state"
risk but also lost the identity fields. The catalog → YAML
match key is ``board_id``; losing it on every archive →
unarchive cycle forced a re-derive (or a re-pick by the
user) that wasn't necessary. This helper preserves identity
+ clears volatile so unarchive restores the user-visible
state unchanged. Same-name new-device leakage of identity
fields is acceptable: the new device's create flow either
derives or supplies its own ``board_id``, and friendly_name
/ comment are user labels the new device's editor can
overwrite if desired.
"""
with metadata_transaction(config_dir) as data:
entry = data.get(filename)
if entry is None:
return
if not isinstance(entry, dict):
# Treat a non-dict value as corrupt — leaving it in place
# would later break ``set_device_metadata`` (which assumes
# the existing entry is a dict and item-assigns into it).
# Drop the bad value so the next write starts from a
# clean shape.
data.pop(filename, None)
return
for field_name in _VOLATILE_DEVICE_METADATA_FIELDS:
entry.pop(field_name, None)
# If the entry is now empty (no identity fields ever
# set) drop it entirely so we don't leave dead keys
# behind in the metadata file.
if not entry:
data.pop(filename, None)
def load_preferences(config_dir: Path) -> UserPreferences:
"""Load user preferences, returning defaults for missing fields."""
raw = _load_metadata(config_dir).get(_PREFS_KEY, {})
try:
return UserPreferences.from_dict(raw)
except Exception:
return UserPreferences()
def save_preferences(config_dir: Path, prefs: UserPreferences) -> None:
"""Save user preferences to disk."""
with metadata_transaction(config_dir) as data:
data[_PREFS_KEY] = prefs.to_dict()
def _decode_labels(raw: Any) -> list[Label]:
"""Parse the on-disk ``_labels`` list, dropping malformed entries."""
if not isinstance(raw, list):
return []
out: list[Label] = []
for entry in raw:
if not isinstance(entry, dict):
continue
try:
out.append(Label.from_dict(entry))
except Exception as err:
# A hand-edited sidecar that landed a malformed entry
# shouldn't take the whole catalog down — labels are
# advisory. Debug-log so a developer hunting "why did
# my label disappear?" can find a paper trail without
# noisy WARN-level chatter on every load.
_LOGGER.debug("Skipping malformed label entry %r: %s", entry, err)
return out
def load_labels(config_dir: Path) -> list[Label]:
"""
Load the global label catalog.
Returns an empty list when the ``_labels`` key is missing or
corrupt. Individual entries that fail to round-trip through
:class:`Label` are skipped silently so a single bad entry can't
take the whole catalog down — labels are advisory metadata, not
load-bearing state.
"""
return _decode_labels(_load_metadata(config_dir).get(_LABELS_KEY, []))
def save_labels(config_dir: Path, labels: list[Label]) -> None:
"""Replace the global label catalog atomically."""
with metadata_transaction(config_dir) as data:
data[_LABELS_KEY] = [label.to_dict() for label in labels]
@contextmanager
def labels_transaction(config_dir: Path) -> Iterator[list[Label]]:
"""
Atomic read-modify-write context for the global label catalog.
Yields a mutable list of :class:`Label` instances decoded from the
``_labels`` key. Mutate the list in place; on a clean exit the
catalog is re-encoded and persisted atomically alongside the rest
of the metadata file. Exceptions raised inside the block discard
the pending mutation. Use when you need uniqueness / existence
checks and the write to share a single transaction — the validate
happens inside the lock so a concurrent writer can't slip in
between.
"""
with metadata_transaction(config_dir) as data:
catalog = _decode_labels(data.get(_LABELS_KEY))
yield catalog
data[_LABELS_KEY] = [label.to_dict() for label in catalog]
def set_device_labels(config_dir: Path, configuration: str, label_ids: list[str]) -> None:
"""
Replace a device's label assignments atomically.
Validates *label_ids* against the live catalog inside the same
metadata transaction as the write so a concurrent
``labels/delete`` cascade can't leave the device with a
dangling reference. ``label_ids`` is treated as a set in
semantics — duplicate IDs in the input are deduplicated while
preserving first-seen order. Pass ``[]`` to clear all
assignments. Raises :class:`ValueError` for non-string entries
in *label_ids* and for ids that aren't in the catalog (caller
translates to ``CommandError(INVALID_ARGS)`` at the API surface).
"""
deduped: list[str] = []
seen: set[str] = set()
for lid in label_ids:
if not isinstance(lid, str):
# Silent skipping would let a payload of all-bad types
# become an effective ``[]`` (clear-all) write — surprising
# and user-hostile. Surface a clear error instead so the
# frontend can fix the payload.
raise ValueError(f"label_ids must be strings, got {type(lid).__name__}: {lid!r}")
if lid in seen:
continue
deduped.append(lid)
seen.add(lid)
with metadata_transaction(config_dir) as data:
catalog = data.get(_LABELS_KEY, [])
if isinstance(catalog, list):
known = {
entry["id"]
for entry in catalog
if isinstance(entry, dict) and isinstance(entry.get("id"), str)
}
else:
known = set()
unknown = [lid for lid in deduped if lid not in known]
if unknown:
raise ValueError(f"Unknown label id(s): {', '.join(repr(u) for u in unknown)}")
entry = data.setdefault(configuration, {})
if not isinstance(entry, dict):
# A non-dict entry shouldn't survive long — overwrite to
# restore the expected shape rather than crash here.
entry = {}
data[configuration] = entry
if deduped:
entry["labels"] = deduped
else:
entry.pop("labels", None)
def delete_label_cascade(config_dir: Path, label_id: str) -> tuple[bool, set[str]]:
"""
Drop *label_id* from the catalog and every device entry.
Performed inside a single ``metadata_transaction`` so the
existence check, catalog removal, and per-device cleanup all
share one lock — a concurrent writer can't reintroduce the
deleted ID, and the existence check works against the *raw*
on-disk dict so a corrupt catalog entry (one that
``Label.from_dict`` would skip) is still removable.
Returns a tuple ``(found, affected)`` where *found* is ``True``
when the catalog actually contained an entry with this id (so
the caller can raise ``NOT_FOUND`` otherwise) and *affected* is
the set of configuration filenames whose ``labels`` list
changed — callers use this to schedule a per-device scanner
reload so live ``Device`` objects pick up the cleaned state
without having to wait for the next disk-driven scan.
"""
affected: set[str] = set()
found = False
with metadata_transaction(config_dir) as data:
existing = data.get(_LABELS_KEY)
if isinstance(existing, list):
new_catalog: list[Any] = []
for entry in existing:
if isinstance(entry, dict) and entry.get("id") == label_id:
found = True
continue
new_catalog.append(entry)
if found:
data[_LABELS_KEY] = new_catalog
if not found:
return False, set()
for filename, entry in data.items():
if filename.startswith("_") or not isinstance(entry, dict):
continue
current = entry.get("labels")
if not isinstance(current, list) or label_id not in current:
continue
new = [lid for lid in current if lid != label_id]
if new:
entry["labels"] = new
else:
entry.pop("labels", None)
affected.add(filename)
return True, affected
# ---------------------------------------------------------------------------
# ConfigController
# ---------------------------------------------------------------------------
class ConfigController:
"""Manages application configuration, preferences, and system info."""
def __init__(self, device_builder: DeviceBuilder) -> None:
self._db = device_builder
@api_command("config/version")
async def get_version(self, **kwargs: Any) -> dict:
"""Get ESPHome and server version."""
return {"server_version": server_version, "esphome_version": esphome_version}
@api_command("config/serial_ports")
async def get_serial_ports_cmd(self, **kwargs: Any) -> list[dict]:
"""List available serial ports."""
loop = asyncio.get_running_loop()
ports = await loop.run_in_executor(None, get_serial_ports)
return [
{"port": p.path, "desc": p.description if p.description != "n/a" else p.path}
for p in ports
]
@api_command("config/get_preferences")
async def get_prefs(self, **kwargs: Any) -> UserPreferences:
"""Get user preferences."""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, load_preferences, self._db.settings.config_dir)
@api_command("config/set_preferences")
async def set_prefs(self, **kwargs: Any) -> UserPreferences:
"""Update user preferences.
Accepts partial updates — only provided fields are changed,
others keep their current values.
"""
loop = asyncio.get_running_loop()
config_dir = self._db.settings.config_dir
# Load current, merge with provided fields, validate, save
current = await loop.run_in_executor(None, load_preferences, config_dir)
update_fields = {k: v for k, v in kwargs.items() if k not in ("client", "message_id")}
# Merge into current preferences
current_dict = current.to_dict()
current_dict.update(update_fields)
updated = UserPreferences.from_dict(current_dict)
await loop.run_in_executor(None, save_preferences, config_dir, updated)
return updated
@api_command("config/get_secrets")
async def get_secrets(self, **kwargs: Any) -> list[str]:
"""Get secret key names from secrets.yaml."""
loop = asyncio.get_running_loop()
def _read_secrets() -> list[str]:
secrets_path = self._db.settings.config_dir / "secrets.yaml"
if not secrets_path.exists():
return []
try:
# ``yaml_util.load_yaml`` expects a ``Path``, not a
# ``str`` — passing a string raises ``AttributeError``
# at ``fname.open(...)`` and the bare except below
# would silently swallow it, leaving the secrets
# dropdown permanently empty.
data = yaml_util.load_yaml(secrets_path)
return sorted(data.keys()) if isinstance(data, dict) else []
except Exception:
return []
return await loop.run_in_executor(None, _read_secrets)
@api_command("config/get_info")
async def get_info(self, *, configuration: str, **kwargs: Any) -> dict | None:
"""Get compiled device metadata (StorageJSON) for a configuration."""
loop = asyncio.get_running_loop()
def _load_info() -> dict | None:
# ``rel_path`` calls ``Path.resolve`` (an ``os.path.abspath``
# syscall under the hood) and the StorageJSON load below
# opens the sidecar from disk — both block the event loop
# if run inline. Do them together inside the executor so
# a slow filesystem (NFS-mounted config dir, EBS-backed
# Docker volume) can't stall the dashboard. ``rel_path``
# raises ``CommandError`` on traversal; the awaited future
# propagates that out to the WS dispatcher unchanged.
self._db.settings.rel_path(configuration)
storage = StorageJSON.load(ext_storage_path(configuration))
if storage is None:
return None
return {
"name": storage.name,
"friendly_name": storage.friendly_name,
"comment": storage.comment,
"address": storage.address,
"web_port": storage.web_port,
"target_platform": storage.target_platform,
"current_version": storage.esphome_version,
"deployed_version": storage.firmware_bin_path,
"loaded_integrations": storage.loaded_integrations,
}
return await loop.run_in_executor(None, _load_info)