diff --git a/CODEOWNERS b/CODEOWNERS
index 00583e9024986..1cc5357555dcf 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -472,6 +472,7 @@
/bundles/org.openhab.persistence.mapdb/ @openhab/add-ons-maintainers
/bundles/org.openhab.persistence.mongodb/ @openhab/add-ons-maintainers
/bundles/org.openhab.persistence.rrd4j/ @openhab/add-ons-maintainers
+/bundles/org.openhab.persistence.timescaledb/ @ulbi
/bundles/org.openhab.transform.basicprofiles/ @cweitkamp @J-N-K
/bundles/org.openhab.transform.bin2json/ @paulianttila
/bundles/org.openhab.transform.exec/ @openhab/add-ons-maintainers
diff --git a/bom/openhab-addons/pom.xml b/bom/openhab-addons/pom.xml
index 25eb90310ad7c..05d81faf20d2d 100644
--- a/bom/openhab-addons/pom.xml
+++ b/bom/openhab-addons/pom.xml
@@ -2341,6 +2341,11 @@
org.openhab.persistence.rrd4j${project.version}
+
+ org.openhab.addons.bundles
+ org.openhab.persistence.timescaledb
+ ${project.version}
+ org.openhab.addons.bundlesorg.openhab.transform.basicprofiles
diff --git a/bundles/org.openhab.persistence.timescaledb/AGENTS.md b/bundles/org.openhab.persistence.timescaledb/AGENTS.md
new file mode 100644
index 0000000000000..7680773654a44
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/AGENTS.md
@@ -0,0 +1,312 @@
+# AGENTS.md - TimescaleDB Persistence Development Guide
+
+## Context
+
+You are working on a **native TimescaleDB persistence service** for openHAB.
+TimescaleDB is a time-series extension for PostgreSQL — all standard PostgreSQL JDBC drivers work, but the schema and queries use TimescaleDB-specific features.
+
+**openHAB has no built-in downsampling/aggregation framework.** `FilterCriteria`, `PersistenceStrategy` and `PersistenceItemConfiguration` contain no aggregation concepts. Everything must be implemented inside this service.
+
+---
+
+## Architecture
+
+### Key Classes
+
+| Class | Role |
+|---|---|
+| `TimescaleDBPersistenceService` | Main OSGi service, implements `ModifiablePersistenceService` |
+| `TimescaleDBMapper` | `State` ↔ SQL value conversion (all openHAB item types) |
+| `TimescaleDBSchema` | Schema creation and migration on startup |
+| `TimescaleDBQuery` | SQL query builder for all persistence operations |
+| `TimescaleDBMetadataService` | Reads per-item downsampling config from `MetadataRegistry` |
+| `TimescaleDBDownsampleJob` | Scheduled daily job: aggregates + deletes raw rows in-place |
+
+### OSGi Service Registration
+
+- Service ID: `timescaledb`
+- Implements: `ModifiablePersistenceService` (= `QueryablePersistenceService` + `remove()`)
+- Config PID: `org.openhab.persistence.timescaledb`
+- `OH-INF/addon/addon.xml` required — registers the addon in the openHAB UI (Settings → Add-ons → TimescaleDB). Without it the bundle runs but is invisible to the UI.
+- Config description: `OH-INF/config/timescaledb.xml`
+- `ConfigurationPolicy.REQUIRE` — service does not start without configuration
+- Scheduler: `ThreadPoolManager.getScheduledPool("timescaledb")` (shared pool — never call `shutdownNow()`)
+- Deactivate: `ScheduledFuture.cancel(false)`, then `HikariDataSource.close()`
+- State indicator: `dataSource != null` — no `initialized` boolean
+
+### Dependencies
+
+- JDBC Driver: `org.postgresql:postgresql`
+- Connection pooling: HikariCP (already used in other openHAB bundles)
+- openHAB Core: `org.openhab.core.persistence`, `org.openhab.core.items` (for `MetadataRegistry`)
+
+---
+
+## Database Schema
+
+```sql
+CREATE TABLE item_meta (
+ id SERIAL PRIMARY KEY,
+ name TEXT NOT NULL UNIQUE,
+ label TEXT,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
+);
+
+CREATE TABLE items (
+ time TIMESTAMPTZ NOT NULL,
+ item_id INTEGER NOT NULL REFERENCES item_meta(id),
+ value DOUBLE PRECISION,
+ string TEXT,
+ unit TEXT, -- stored per row, NOT in item_meta
+ downsampled BOOLEAN NOT NULL DEFAULT FALSE
+);
+
+SELECT create_hypertable('items', 'time');
+CREATE INDEX ON items (item_id, time DESC);
+```
+
+### Why `unit` is per row, not in `item_meta`
+
+A `QuantityType` unit can change over time (sensor reconfiguration, firmware update, etc.). Storing it in `item_meta` would corrupt historical reads. The unit is stored with each measurement and read back from the row when reconstructing `QuantityType` states.
+
+### Why downsampling is in-place (same hypertable)
+
+openHAB reads persisted data directly from the hypertable via `QueryablePersistenceService`. If aggregated data lived in separate views or tables, openHAB would not see it without query-layer changes. In-place replacement (delete raw rows → insert aggregated rows with `downsampled=TRUE`) keeps a single source of truth that openHAB reads transparently.
+
+---
+
+## State Type Mapping
+
+All openHAB item types are fully supported. `TimescaleDBMapper` handles the conversion in both directions.
+
+### Store direction (`toRow`)
+
+| State type | `value` column | `string` column | `unit` column |
+|---|---|---|---|
+| `QuantityType` | numeric | null | unit string (e.g. `"°C"`) |
+| `DecimalType` | numeric | null | null |
+| `OnOffType` | `ON=1.0 / OFF=0.0` | null | null |
+| `OpenClosedType` | `OPEN=1.0 / CLOSED=0.0` | null | null |
+| `PercentType` | 0.0–100.0 | null | null |
+| `UpDownType` | `UP=0.0 / DOWN=1.0` | null | null |
+| `HSBType` | null | `"H,S,B"` | null |
+| `DateTimeType` | null | ISO-8601 string | null |
+| `PointType` | null | `"lat,lon[,alt]"` | null |
+| `PlayPauseType` | null | enum name (`"PLAY"`, `"PAUSE"`, …) | null |
+| `StringListType` | null | comma-separated values | null |
+| `RawType` | null | Base64-encoded bytes | MIME type |
+| `StringType` | null | raw string | null |
+
+### Load direction (`toState`)
+
+`GroupItem` is unwrapped to its base item before dispatch. Item type determines how the row is interpreted:
+
+- `ColorItem` → `HSBType` (parsed from `string`)
+- `DateTimeItem` → `DateTimeType` (parsed from `string`)
+- `LocationItem` → `PointType` (parsed from `string`)
+- `PlayerItem` → `PlayPauseType` (parsed from `string`)
+- `CallItem` → `StringListType` (parsed from `string`)
+- `ImageItem` → `RawType` (Base64-decoded from `string`, MIME type from `unit`)
+- `DimmerItem` / `RollershutterItem` → `PercentType` (**must be checked before `SwitchItem`**)
+- `SwitchItem` → `OnOffType`
+- `ContactItem` → `OpenClosedType`
+- `NumberItem` with `unit != null` → `QuantityType`
+- `NumberItem` without unit → `DecimalType`
+- anything else with `string` → `StringType`
+
+**Critical instanceof ordering in `toRow()`:** `HSBType` before `PercentType` before `DecimalType`
+(because `HSBType extends PercentType extends DecimalType`).
+
+---
+
+## Per-Item Downsampling via Item Metadata
+
+### How to read metadata (same pattern as InfluxDB persistence)
+
+```java
+@Reference
+private MetadataRegistry metadataRegistry;
+
+private Optional getItemMetadata(String itemName) {
+ MetadataKey key = new MetadataKey("timescaledb", itemName);
+ return Optional.ofNullable(metadataRegistry.get(key));
+}
+```
+
+`Metadata` has:
+- `getValue()` → main value string, e.g. `"AVG"`, `"MAX"`, `"MIN"`, `"SUM"`, or `""` (no aggregation)
+- `getConfiguration()` → `Map` with keys like `"downsampleInterval"`, `"retainRawDays"`, `"retentionDays"`
+
+### Metadata format (configured by users in .items files)
+
+```java
+Number:Temperature MySensor {
+ timescaledb="AVG" [ downsampleInterval="1h", retainRawDays="5", retentionDays="365" ]
+}
+```
+
+### Parsing the metadata
+
+```java
+public record DownsampleConfig(
+ AggregationFunction function, // AVG / MAX / MIN / SUM
+ Duration interval, // e.g. Duration.ofHours(1)
+ int retainRawDays, // default 5
+ int retentionDays // default 0 = disabled
+) {}
+
+public enum AggregationFunction { AVG, MAX, MIN, SUM }
+```
+
+Interval parsing — **validate against an allowlist** (used in SQL string formatting):
+
+| Metadata value | SQL interval literal |
+|---|---|
+| `1m` | `1 minute` |
+| `5m` | `5 minutes` |
+| `15m` | `15 minutes` |
+| `30m` | `30 minutes` |
+| `1h` | `1 hour` |
+| `6h` | `6 hours` |
+| `1d` | `1 day` |
+
+Throw `IllegalArgumentException` for any value not in this list to prevent SQL injection.
+
+---
+
+## Downsampling Job (`TimescaleDBDownsampleJob`)
+
+Runs daily (e.g. via `@Scheduled` or openHAB's `CronScheduler`). For each item that has `timescaledb` metadata with a non-empty aggregation function:
+
+```sql
+-- Step 1: aggregate raw rows older than retainRawDays into buckets
+INSERT INTO items (time, item_id, value, unit, downsampled)
+SELECT
+ time_bucket('', time) AS time,
+ item_id,
+ (value) AS value,
+ last(unit, time) AS unit, -- keep most recent unit in bucket
+ TRUE AS downsampled
+FROM items
+WHERE item_id = ?
+ AND downsampled = FALSE
+ AND time < NOW() - INTERVAL ' days'
+GROUP BY time_bucket('', time), item_id
+ON CONFLICT DO NOTHING;
+
+-- Step 2: delete replaced raw rows
+DELETE FROM items
+WHERE item_id = ?
+ AND downsampled = FALSE
+ AND time < NOW() - INTERVAL ' days';
+
+-- Step 3 (if retentionDays > 0): drop everything older than retention window
+DELETE FROM items
+WHERE item_id = ?
+ AND time < NOW() - INTERVAL ' days';
+```
+
+**Important:**
+- ``, ``, ``, `` are formatted into the SQL string — **never from user input directly**. Validate interval against allowlist, validate function against enum. Use `?` for `item_id`.
+- `last(unit, time)` is a TimescaleDB hyperfuction — verify it is available, otherwise use `MAX(unit)` as fallback.
+- Run steps 1+2 in a transaction per item to avoid partial state.
+- Log errors per item and continue (don't abort the entire job on a single-item failure).
+
+---
+
+## Query Implementation
+
+- All item name / date / state lookups use JDBC `PreparedStatement` — no string concatenation for user-controlled values.
+- `time_bucket()` interval is formatted as a string but validated against the allowlist above.
+- `historicState`: `WHERE item_id=? AND time <= ? ORDER BY time DESC LIMIT 1`
+- `getAllStatesBetween`: `WHERE item_id=? AND time BETWEEN ? AND ? ORDER BY time ASC` — returns both raw and downsampled rows.
+- Aggregate queries (`averageSince`, `minSince`, etc.): `WHERE item_id=? AND time >= ?` — operate on all rows including downsampled ones, which is correct.
+
+---
+
+## item_id Caching
+
+Cache `name → item_id` in a `ConcurrentHashMap` to avoid a SELECT on every `store()` call. Invalidate on service restart. Auto-insert into `item_meta` on first `store()` if the item is unknown.
+
+```java
+private final Map itemIdCache = new ConcurrentHashMap<>();
+
+private int getOrCreateItemId(String name, @Nullable String label) {
+ return itemIdCache.computeIfAbsent(name, n -> fetchOrInsertItemMeta(n, label));
+}
+```
+
+---
+
+## Testing
+
+### Unit Tests (no DB required)
+
+Location: `src/test/java/org/openhab/persistence/timescaledb/internal/`
+
+- `BundleManifestTest` — OSGi Import-Package allowlist + presence of `OH-INF/addon/addon.xml`
+- `TimescaleDBMapperTest` — State ↔ SQL value round-trips for all state types
+- `TimescaleDBMetadataServiceTest` — parsing of metadata values and config keys
+- `TimescaleDBDownsampleJobTest` — SQL generation for aggregation/delete, interval allowlist validation
+
+Run with `mvn test` — last result: **183 tests, 0 failures** (2026-03-13).
+
+### Integration Tests (requires Docker + TimescaleDB)
+
+Tagged `@Tag("integration")`, run automatically via Testcontainers during `mvn test`:
+
+```java
+@Container
+static PostgreSQLContainer> db = new PostgreSQLContainer<>("timescale/timescaledb:latest-pg16")
+ .withDatabaseName("openhab_test")
+ .withUsername("openhab")
+ .withPassword("openhab");
+```
+
+Test: schema creation, store/query round-trips, downsampling job result, compression policy creation.
+
+### Performance Tests (requires external DB)
+
+Tagged `@Tag("performance")` — **excluded from `mvn test`** via `performance,external-integration` in pom.xml.
+
+Run explicitly against an external TimescaleDB:
+
+```bash
+HOST=... PORT=5432 DBNAME=openhab USER=openhab PASSWORD=... \
+ mvn test -Dtest=TimescaleDBPerformanceIT \
+ -pl bundles/org.openhab.persistence.timescaledb
+```
+
+See `PERFORMANCE_TESTS.md` for SLOs, scale constants, and the heavy 18-month scenario.
+
+---
+
+## Common Pitfalls
+
+1. **TimescaleDB extension not installed**: check on startup with `SELECT extname FROM pg_extension WHERE extname='timescaledb'`, fail with a clear error if missing.
+2. **`last()` availability**: `last(unit, time)` requires the TimescaleDB Toolkit — check availability, fall back to `MAX(unit)` otherwise.
+3. **Compression + INSERT conflict**: compressed chunks are read-only. The downsampling INSERT must target the uncompressed region (data newer than `compressionAfterDays`). Ensure `retainRawDays < compressionAfterDays`.
+4. **Interval allowlist is mandatory**: `time_bucket('1h', time)` is dynamically formatted — any non-allowlisted value must throw before it reaches SQL.
+5. **`ON CONFLICT DO NOTHING`** on the aggregation INSERT: the job may run twice if interrupted; duplicate bucket rows must be prevented.
+6. **`QuantityType` unit changes**: never update `item_meta` with a unit — the unit lives on each row. On read, take the `unit` value from the row.
+
+---
+
+## Relevant openHAB Core APIs
+
+- `org.openhab.core.persistence.QueryablePersistenceService` — implement this
+- `org.openhab.core.persistence.FilterCriteria` — query parameters passed to `query()`
+- `org.openhab.core.items.MetadataRegistry` — OSGi service, inject via `@Reference`
+- `org.openhab.core.items.Metadata` — `getValue()` + `getConfiguration()` for per-item config
+- `org.openhab.core.items.MetadataKey` — constructed as `new MetadataKey("timescaledb", itemName)`
+- `org.openhab.core.library.types.*` — `QuantityType`, `DecimalType`, `OnOffType`, etc.
+
+## References
+
+- [TimescaleDB docs](https://docs.timescale.com/)
+- [time_bucket()](https://docs.timescale.com/api/latest/hyperfunctions/time_bucket/)
+- [last()](https://docs.timescale.com/api/latest/hyperfunctions/last/)
+- [Compression](https://docs.timescale.com/use-timescale/latest/compression/)
+- InfluxDB persistence (metadata pattern): `bundles/org.openhab.persistence.influxdb/src/main/java/.../InfluxDBMetadataService.java`
+- Existing downsampling logic (Python/MongoDB): `DOWNSAMPLE_IMPLEMENTATION_GUIDE.md` in this bundle
+
diff --git a/bundles/org.openhab.persistence.timescaledb/README.md b/bundles/org.openhab.persistence.timescaledb/README.md
new file mode 100644
index 0000000000000..004feb6e38b13
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/README.md
@@ -0,0 +1,234 @@
+# TimescaleDB Persistence
+
+This service persists and queries openHAB item states using [TimescaleDB](https://www.timescale.com/), a time-series database built on PostgreSQL.
+
+Unlike the generic JDBC persistence (which can also connect to TimescaleDB via the PostgreSQL driver), this service is purpose-built for TimescaleDB and leverages its native time-series features:
+
+- **Hypertables** for automatic time-based partitioning and fast range queries
+- **In-place downsampling** — raw data is aggregated and replaced in the hypertable directly, so openHAB reads aggregated data transparently without any schema changes
+- **Per-item downsampling config** via item metadata (namespace `timescaledb`)
+- **Compression Policies** to automatically compress older data and reduce storage
+- **Retention Policies** to automatically drop data older than a configured threshold
+
+## Prerequisites
+
+- TimescaleDB 2.x installed and running (as a PostgreSQL extension)
+- A database and user created for openHAB
+
+```sql
+CREATE DATABASE openhab;
+CREATE USER openhab WITH PASSWORD 'openhab';
+GRANT ALL PRIVILEGES ON DATABASE openhab TO openhab;
+
+-- Connect to openhab database, then:
+CREATE EXTENSION IF NOT EXISTS timescaledb;
+```
+
+## Database Schema
+
+The service **creates all tables automatically on startup** — no manual DDL required.
+Item states are stored in a single hypertable `items` (columns: `time`, `item_id`, `value`, `string`, `unit`, `downsampled`) and a name-lookup table `item_meta`.
+
+## State Type Mapping
+
+| openHAB Type | `value` column | `string` column | `unit` column |
+|-------------------|--------------------------------|-----------------|------------------------|
+| `DecimalType` | numeric value | — | — |
+| `QuantityType` | numeric value (stripped) | — | unit string, e.g. `°C` |
+| `OnOffType` | `1.0` (ON) / `0.0` (OFF) | — | — |
+| `OpenClosedType` | `1.0` (OPEN) / `0.0` (CLOSED) | — | — |
+| `PercentType` | `0.0`–`100.0` | — | — |
+| `UpDownType` | `0.0` (UP) / `1.0` (DOWN) | — | — |
+| `HSBType` | — | `H,S,B` | — |
+| `DateTimeType` | — | ISO-8601 | — |
+| `StringType` | — | raw string | — |
+
+## Configuration
+
+Configure via `$OPENHAB_CONF/services/timescaledb.cfg` or in the UI under `Settings → Add-ons → TimescaleDB → Configure`.
+
+| Property | Default | Required | Description |
+|------------------------|-----------|:--------:|-----------------------------------------------------------|
+| `url` | | Yes | JDBC URL, e.g. `jdbc:postgresql://localhost:5432/openhab` |
+| `user` | `openhab` | No | Database user |
+| `password` | | Yes | Database password |
+| `chunkInterval` | `7 days` | No | TimescaleDB chunk interval for the hypertable |
+| `retentionDays` | `0` | No | Drop data older than N days. `0` = disabled |
+| `compressionAfterDays` | `0` | No | Compress chunks older than N days. `0` = disabled |
+| `maxConnections` | `5` | No | Maximum DB connections in the pool |
+| `connectTimeout` | `5000` | No | Connection timeout in milliseconds |
+
+## Persistence Configuration
+
+All item- and event-related configuration is defined in `persistence/timescaledb.persist`:
+
+```java
+Strategies {
+ everyMinute : "0 * * * * ?"
+ everyHour : "0 0 * * * ?"
+ everyDay : "0 0 0 * * ?"
+ default = everyChange
+}
+
+Items {
+ * : strategy = everyChange, restoreOnStartup
+ Temperature_* : strategy = everyMinute
+ Energy_* : strategy = everyHour
+}
+```
+
+## Per-Item Downsampling
+
+Downsampling is configured **per item** via item metadata in the `timescaledb` namespace.
+
+### Metadata format
+
+```text
+timescaledb="" [downsampleInterval="", retainRawDays="", retentionDays=""]
+```
+
+| Metadata key | Values | Description |
+|----------------------|--------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
+| value (main) | `AVG`, `MAX`, `MIN`, `SUM`, or `" "` | Aggregation function. Use a single space `" "` for retention-only (no downsampling). openHAB rejects a truly empty value, so a space is required. |
+| `downsampleInterval` | e.g. `1h`, `15m`, `1d` | Time bucket size for aggregation. Required when value is an aggregation function. |
+| `retainRawDays` | integer, default `5` | Keep raw data for N days before replacing with aggregated rows. |
+| `retentionDays` | integer, default `0` | Drop all data (raw + downsampled) older than N days. `0` = off. |
+
+### Configuration in `.items` files
+
+```java
+Number:Temperature Sensor_Temperature_Living "Living Room [%.1f °C]" {
+ timescaledb="AVG" [ downsampleInterval="1h", retainRawDays="5" ]
+}
+
+Number:Power Meter_Power_House "House Power [%.1f W]" {
+ timescaledb="AVG" [ downsampleInterval="15m", retainRawDays="3", retentionDays="365" ]
+}
+
+Number:Energy Meter_Energy_House "House Energy [%.3f kWh]" {
+ timescaledb="SUM" [ downsampleInterval="1h", retainRawDays="7" ]
+}
+
+// Retention-only: no downsampling, just drop data older than 30 days.
+// The value must be a single space " " — openHAB rejects a truly empty string.
+Number:Temperature Sensor_Temp_Outdoor {
+ timescaledb=" " [ retentionDays="30" ]
+}
+```
+
+### Configuration in mainUI
+
+**Downsampling + Retention:**
+
+`Item → Metadata → Add Metadata → Enter namespace "timescaledb"`:
+
+- Value: `AVG`
+- Additional config: `downsampleInterval=1h`, `retainRawDays=5`, `retentionDays=365`
+
+**Retention-only (no downsampling):**
+
+`Item → Metadata → Add Metadata → Enter namespace "timescaledb"`:
+
+- Value: ` ` (a single space — openHAB rejects an empty value)
+- Additional config: `retentionDays=30`
+
+### How in-place downsampling works
+
+The downsampling runs as a scheduled job (daily, at midnight):
+
+```text
+For each item with timescaledb metadata:
+ 1. Parse operation + downsampleInterval from metadata
+ 2. Compute cutoff = NOW() - retainRawDays
+ 3. SELECT time_bucket(interval, time), agg_fn(value), MAX(unit)
+ FROM items
+ WHERE item_id = ? AND downsampled = FALSE AND time < cutoff
+ GROUP BY bucket
+ 4. INSERT aggregated rows with downsampled = TRUE
+ 5. DELETE original rows (downsampled = FALSE, time < cutoff)
+```
+
+This keeps the hypertable as the single source of truth. openHAB reads aggregated and raw data from the same table — no query changes needed.
+
+### Supported intervals
+
+| Metadata value | SQL interval |
+|----------------|---------------|
+| `1m` | `1 minute` |
+| `5m` | `5 minutes` |
+| `15m` | `15 minutes` |
+| `30m` | `30 minutes` |
+| `1h` | `1 hour` |
+| `6h` | `6 hours` |
+| `1d` | `1 day` |
+
+## Querying from openHAB
+
+The service implements the full `QueryablePersistenceService` interface:
+
+| openHAB Query | TimescaleDB Implementation |
+|--------------------------------------------|---------------------------------------|
+| `historicState(item, timestamp)` | `SELECT … ORDER BY time DESC LIMIT 1` |
+| `averageSince(item, timestamp)` | `AVG(value) WHERE time >= ?` |
+| `sumSince(item, timestamp)` | `SUM(value) WHERE time >= ?` |
+| `minSince(item, timestamp)` | `MIN(value) WHERE time >= ?` |
+| `maxSince(item, timestamp)` | `MAX(value) WHERE time >= ?` |
+| `countSince(item, timestamp)` | `COUNT(*) WHERE time >= ?` |
+| `getAllStatesBetween(item, begin, end)` | Range scan (raw + downsampled) |
+| `removeAllStatesBetween(item, begin, end)` | `DELETE WHERE time BETWEEN ? AND ?` |
+
+## Compression
+
+When `compressionAfterDays > 0`, the service configures automatic chunk compression:
+
+```sql
+ALTER TABLE items SET (
+ timescaledb.compress,
+ timescaledb.compress_segmentby = 'item_id',
+ timescaledb.compress_orderby = 'time DESC'
+);
+SELECT add_compression_policy('items', INTERVAL '30 days');
+```
+
+## Retention
+
+When `retentionDays > 0` (global config), a TimescaleDB retention policy is added:
+
+```sql
+SELECT add_retention_policy('items', INTERVAL '365 days');
+```
+
+Per-item retention (via metadata `retentionDays`) is applied by the daily downsampling job using a targeted DELETE.
+This works independently of downsampling: an item can have `retentionDays` set without any aggregation function
+(use a single space `" "` as the metadata value in that case).
+
+## Grafana Integration
+
+TimescaleDB works natively with the Grafana PostgreSQL data source:
+
+```sql
+-- Raw + downsampled data for a sensor (last 24 h)
+SELECT
+ time_bucket('5 minutes', time) AS time,
+ AVG(value) AS temperature,
+ MAX(unit) AS unit
+FROM items
+JOIN item_meta ON items.item_id = item_meta.id
+WHERE item_meta.name = 'Sensor_Temperature_Living'
+ AND time > NOW() - INTERVAL '24 hours'
+GROUP BY 1
+ORDER BY 1;
+```
+
+## Differences from JDBC Persistence
+
+| Feature | JDBC Persistence | TimescaleDB Persistence |
+|--------------------------------|----------------------|-------------------------|
+| TimescaleDB hypertables | No (plain tables) | Yes |
+| In-place downsampling | No | Yes |
+| Per-item aggregation config | No | Yes (item metadata) |
+| Automatic compression | No | Yes |
+| Retention policies | No | Yes (global + per-item) |
+| Unit stored per measurement | No | Yes |
+| Multiple DB backends | Yes | No (TimescaleDB only) |
+| Schema (one table per item) | Yes | No (single hypertable) |
diff --git a/bundles/org.openhab.persistence.timescaledb/pom.xml b/bundles/org.openhab.persistence.timescaledb/pom.xml
new file mode 100644
index 0000000000000..deb945e822e0a
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/pom.xml
@@ -0,0 +1,68 @@
+
+
+
+ 4.0.0
+
+
+ org.openhab.addons.bundles
+ org.openhab.addons.reactor.bundles
+ 5.2.0-SNAPSHOT
+
+
+ org.openhab.persistence.timescaledb
+
+ openHAB Add-ons :: Bundles :: Persistence Service :: TimescaleDB
+
+
+ !com.codahale.metrics.*,!io.prometheus.*,!org.checkerframework.*,!org.jetbrains.annotations.*,!org.hibernate.*,!waffle.windows.auth.*,!org.osgi.service.jdbc.*,!com.sun.jna.*,!javassist.*
+ 42.7.9
+ 5.1.0
+
+
+
+
+ org.postgresql
+ postgresql
+ ${postgresql.version}
+ compile
+
+
+ com.zaxxer
+ HikariCP
+ ${hikari.version}
+ compile
+
+
+
+
+ org.testcontainers
+ testcontainers-postgresql
+ 2.0.3
+ test
+
+
+ org.testcontainers
+ testcontainers-junit-jupiter
+ 2.0.3
+ test
+
+
+ org.mockito
+ mockito-core
+ ${mockito.version}
+ test
+
+
+
+
+
+
+ maven-surefire-plugin
+
+ performance,external-integration
+
+
+
+
+
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml b/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml
new file mode 100644
index 0000000000000..1f07a75b88f36
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml
@@ -0,0 +1,11 @@
+
+
+ mvn:org.openhab.core.features.karaf/org.openhab.core.features.karaf.openhab-core/${ohc.version}/xml/features
+
+
+
+ openhab-runtime-base
+ mvn:org.openhab.addons.bundles/org.openhab.persistence.timescaledb/${project.version}
+
+
+
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java
new file mode 100644
index 0000000000000..0fced0219221c
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * Aggregation function used for per-item downsampling.
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+public enum AggregationFunction {
+ AVG,
+ MAX,
+ MIN,
+ SUM;
+
+ /** Returns the SQL aggregate function name, e.g. {@code AVG(value)}. */
+ public String toSql() {
+ return name();
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java
new file mode 100644
index 0000000000000..2f6fa83f498a0
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Per-item configuration parsed from item metadata (namespace {@code timescaledb}).
+ *
+ *
+ * A config is either a full downsampling config (function + interval + retention) or a
+ * retention-only config (function and sqlInterval are {@code null}). Use {@link #hasDownsampling()}
+ * to distinguish the two cases.
+ *
+ * @param function Aggregation function (AVG, MAX, MIN, SUM), or {@code null} for retention-only.
+ * @param sqlInterval Validated SQL interval literal, e.g. {@code "1 hour"}, or {@code null} for retention-only.
+ * @param retainRawDays Keep raw data for N days before aggregating. Ignored for retention-only configs.
+ * @param retentionDays Drop all data older than N days. 0 = disabled.
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+public record DownsampleConfig(@Nullable AggregationFunction function, @Nullable String sqlInterval, int retainRawDays,
+ int retentionDays) {
+
+ /** Allowlist mapping from metadata interval strings to SQL interval literals. */
+ public static final java.util.Map INTERVAL_MAP = java.util.Map.of("1m", "1 minute", "5m",
+ "5 minutes", "15m", "15 minutes", "30m", "30 minutes", "1h", "1 hour", "2h", "2 hours", "6h", "6 hours",
+ "12h", "12 hours", "1d", "1 day");
+
+ /**
+ * Converts a metadata interval string to its SQL literal.
+ *
+ * @param interval The metadata interval string, e.g. {@code "1h"}.
+ * @return The SQL interval literal.
+ * @throws IllegalArgumentException if the interval is not in the allowlist.
+ */
+ public static String toSqlInterval(String interval) {
+ String sql = INTERVAL_MAP.get(interval);
+ if (sql == null) {
+ throw new IllegalArgumentException(
+ "Invalid downsampleInterval '" + interval + "'. Allowed: " + INTERVAL_MAP.keySet());
+ }
+ return sql;
+ }
+
+ /**
+ * Returns {@code true} if this config describes a full downsampling run (aggregation + raw-data pruning).
+ * Returns {@code false} for retention-only configs where only the retention DELETE is executed.
+ */
+ public boolean hasDownsampling() {
+ return function != null;
+ }
+
+ /**
+ * Creates a retention-only config: no aggregation, just a periodic DELETE of rows older than
+ * {@code retentionDays} days.
+ *
+ * @param retentionDays Days after which all rows are deleted. Must be > 0.
+ */
+ public static DownsampleConfig retentionOnly(int retentionDays) {
+ if (retentionDays <= 0) {
+ throw new IllegalArgumentException("retentionDays must be > 0, got " + retentionDays);
+ }
+ return new DownsampleConfig(null, null, 0, retentionDays);
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java
new file mode 100644
index 0000000000000..187b69facba96
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.util.List;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+import org.openhab.core.io.console.Console;
+import org.openhab.core.io.console.ConsoleCommandCompleter;
+import org.openhab.core.io.console.StringsCompleter;
+import org.openhab.core.io.console.extensions.AbstractConsoleCommandExtension;
+import org.openhab.core.io.console.extensions.ConsoleCommandExtension;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Reference;
+
+/**
+ * Karaf console commands for the TimescaleDB persistence service.
+ *
+ *
+ * openhab:timescaledb downsample - run the downsampling job immediately
+ *
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+@Component(service = ConsoleCommandExtension.class)
+public class TimescaleDBConsoleCommandExtension extends AbstractConsoleCommandExtension
+ implements ConsoleCommandCompleter {
+
+ private static final String CMD_DOWNSAMPLE = "downsample";
+ private static final StringsCompleter CMD_COMPLETER = new StringsCompleter(List.of(CMD_DOWNSAMPLE), false);
+
+ private final TimescaleDBPersistenceService persistenceService;
+
+ @Activate
+ public TimescaleDBConsoleCommandExtension(@Reference TimescaleDBPersistenceService persistenceService) {
+ super("timescaledb", "TimescaleDB persistence commands.");
+ this.persistenceService = persistenceService;
+ }
+
+ @Override
+ public List getUsages() {
+ return List.of(buildCommandUsage(CMD_DOWNSAMPLE, "run the downsampling/retention job immediately"));
+ }
+
+ @Override
+ public @Nullable ConsoleCommandCompleter getCompleter() {
+ return this;
+ }
+
+ @Override
+ public boolean complete(String[] args, int cursorArgumentIndex, int cursorPosition, List candidates) {
+ return CMD_COMPLETER.complete(args, cursorArgumentIndex, cursorPosition, candidates);
+ }
+
+ @Override
+ public void execute(String[] args, Console console) {
+ if (args.length == 1 && CMD_DOWNSAMPLE.equals(args[0])) {
+ console.println("Starting downsampling job...");
+ boolean ran = persistenceService.runDownsampleNow();
+ if (ran) {
+ console.println("Downsampling job finished.");
+ } else {
+ console.println("TimescaleDB persistence service is not active — cannot run job.");
+ }
+ } else {
+ printUsage(console);
+ }
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java
new file mode 100644
index 0000000000000..4e01c60fc32ad
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.List;
+
+import javax.sql.DataSource;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Daily scheduled job that performs in-place downsampling for all items
+ * configured with {@code timescaledb} metadata.
+ *
+ *
+ * For each eligible item, the job runs atomically in a single transaction:
+ *
+ *
INSERT aggregated rows (time_bucket + agg_fn) with {@code downsampled=TRUE}
+ *
DELETE original raw rows that have been aggregated
+ *
If {@code retentionDays > 0}: DELETE all rows older than the retention window
+ *
+ *
+ *
+ * Security note: The SQL interval and aggregation function are
+ * formatted into the query string but are validated against an allowlist
+ * ({@link DownsampleConfig#INTERVAL_MAP} and {@link AggregationFunction} enum)
+ * before use. The {@code item_id} is always a JDBC bind parameter.
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+public class TimescaleDBDownsampleJob implements Runnable {
+
+ private final Logger logger = LoggerFactory.getLogger(TimescaleDBDownsampleJob.class);
+
+ /**
+ * INSERT aggregated rows for one item, skipping buckets that already have a downsampled row.
+ * The NOT EXISTS guard makes this statement idempotent at the query level; the trailing
+ * ON CONFLICT DO NOTHING (no conflict target) provides a second line of defence for
+ * concurrent execution. The target-less form is required because TimescaleDB hypertables
+ * do not support column-inference conflict targets. Because the schema uses
+ * UNIQUE(time, item_id, downsampled) and we always insert downsampled=TRUE, this never
+ * silently drops a bucket that merely collides with a raw row at the bucket boundary.
+ * Placeholder: (1) item_id.
+ * Interval and agg-fn are pre-validated strings from the allowlist/enum.
+ */
+ private static final String SQL_INSERT_AGGREGATED_TEMPLATE = """
+ INSERT INTO items (time, item_id, value, unit, downsampled)
+ SELECT bucket, item_id, agg_value, agg_unit, TRUE
+ FROM (
+ SELECT
+ time_bucket('%s', time) AS bucket,
+ item_id,
+ %s(value) AS agg_value,
+ MAX(unit) AS agg_unit
+ FROM items
+ WHERE item_id = ?
+ AND downsampled = FALSE
+ AND time < NOW() - INTERVAL '%d days'
+ GROUP BY time_bucket('%s', time), item_id
+ ) AS new_buckets
+ WHERE NOT EXISTS (
+ SELECT 1
+ FROM items existing
+ WHERE existing.item_id = new_buckets.item_id
+ AND existing.downsampled = TRUE
+ AND existing.time = new_buckets.bucket
+ )
+ ON CONFLICT DO NOTHING
+ """;
+
+ /**
+ * DELETE raw rows that were just aggregated.
+ * Placeholder: (1) item_id.
+ */
+ private static final String SQL_DELETE_RAW_TEMPLATE = """
+ DELETE FROM items
+ WHERE item_id = ?
+ AND downsampled = FALSE
+ AND time < NOW() - INTERVAL '%d days'
+ """;
+
+ /**
+ * DELETE all rows (raw + downsampled) outside the per-item retention window.
+ * Placeholder: (1) item_id.
+ */
+ private static final String SQL_DELETE_RETENTION_TEMPLATE = """
+ DELETE FROM items
+ WHERE item_id = ?
+ AND time < NOW() - INTERVAL '%d days'
+ """;
+
+ private final DataSource dataSource;
+ private final TimescaleDBMetadataService metadataService;
+
+ /**
+ * @param dataSource The connection pool.
+ * @param metadataService The metadata parser.
+ */
+ public TimescaleDBDownsampleJob(DataSource dataSource, TimescaleDBMetadataService metadataService) {
+ this.dataSource = dataSource;
+ this.metadataService = metadataService;
+ }
+
+ @Override
+ public void run() {
+ List itemNames = metadataService.getConfiguredItemNames();
+ logger.info("Downsampling job started: {} item(s) to process", itemNames.size());
+
+ int success = 0;
+ int skipped = 0;
+ int failed = 0;
+
+ for (String itemName : itemNames) {
+ DownsampleConfig config;
+ {
+ var configOpt = metadataService.getDownsampleConfig(itemName);
+ if (configOpt.isEmpty()) {
+ logger.debug("Item '{}': no valid DownsampleConfig — skipping", itemName);
+ skipped++;
+ continue;
+ }
+ config = configOpt.get();
+ }
+
+ try {
+ downsampleItem(itemName, config);
+ success++;
+ } catch (Exception e) {
+ logger.error("Downsampling failed for item '{}': {}", itemName, e.getMessage(), e);
+ failed++;
+ }
+ }
+
+ logger.info("Downsampling job finished: {} succeeded, {} skipped, {} failed", success, skipped, failed);
+ }
+
+ private void downsampleItem(String itemName, DownsampleConfig config) throws SQLException {
+ try (Connection conn = dataSource.getConnection()) {
+ var itemIdOpt = TimescaleDBQuery.findItemId(conn, itemName);
+ if (itemIdOpt.isEmpty()) {
+ logger.debug("Item '{}': not yet known to the database — skipping", itemName);
+ return;
+ }
+ int itemId = itemIdOpt.get();
+ conn.setAutoCommit(false);
+ try {
+ if (config.hasDownsampling()) {
+ AggregationFunction fn = config.function();
+ String interval = config.sqlInterval();
+ if (fn != null && interval != null) {
+ String sqlInsert = SQL_INSERT_AGGREGATED_TEMPLATE.formatted(interval, fn.toSql(),
+ config.retainRawDays(), interval);
+ String sqlDeleteRaw = SQL_DELETE_RAW_TEMPLATE.formatted(config.retainRawDays());
+ int inserted = executeUpdate(conn, sqlInsert, itemId);
+ int deleted = executeUpdate(conn, sqlDeleteRaw, itemId);
+ logger.debug("Item '{}': aggregated {} bucket(s), deleted {} raw row(s)", itemName, inserted,
+ deleted);
+ }
+ }
+
+ if (config.retentionDays() > 0) {
+ String sqlRetention = SQL_DELETE_RETENTION_TEMPLATE.formatted(config.retentionDays());
+ int dropped = executeUpdate(conn, sqlRetention, itemId);
+ logger.debug("Item '{}': dropped {} row(s) outside {}d retention window", itemName, dropped,
+ config.retentionDays());
+ }
+
+ conn.commit();
+ } catch (SQLException e) {
+ conn.rollback();
+ throw e;
+ } finally {
+ conn.setAutoCommit(true);
+ }
+ }
+ }
+
+ private static int executeUpdate(Connection conn, String sql, int itemId) throws SQLException {
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ ps.setInt(1, itemId);
+ return ps.executeUpdate();
+ }
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java
new file mode 100644
index 0000000000000..4bc90b0020236
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.openhab.core.persistence.HistoricItem;
+import org.openhab.core.types.State;
+
+/**
+ * A historic item returned by TimescaleDB queries.
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+public class TimescaleDBHistoricItem implements HistoricItem {
+
+ private final String name;
+ private final State state;
+ private final Instant timestamp;
+
+ public TimescaleDBHistoricItem(String name, State state, Instant timestamp) {
+ this.name = name;
+ this.state = state;
+ this.timestamp = timestamp;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public State getState() {
+ return state;
+ }
+
+ @Override
+ public ZonedDateTime getTimestamp() {
+ return timestamp.atZone(ZoneId.systemDefault());
+ }
+
+ @Override
+ public String toString() {
+ return "TimescaleDBHistoricItem{name='" + name + "', state=" + state + ", timestamp=" + timestamp + "}";
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java
new file mode 100644
index 0000000000000..e5b2720f96357
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.util.Base64;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+import org.openhab.core.items.GroupItem;
+import org.openhab.core.items.Item;
+import org.openhab.core.library.items.CallItem;
+import org.openhab.core.library.items.ColorItem;
+import org.openhab.core.library.items.ContactItem;
+import org.openhab.core.library.items.DateTimeItem;
+import org.openhab.core.library.items.DimmerItem;
+import org.openhab.core.library.items.ImageItem;
+import org.openhab.core.library.items.LocationItem;
+import org.openhab.core.library.items.PlayerItem;
+import org.openhab.core.library.items.RollershutterItem;
+import org.openhab.core.library.items.SwitchItem;
+import org.openhab.core.library.types.DateTimeType;
+import org.openhab.core.library.types.DecimalType;
+import org.openhab.core.library.types.HSBType;
+import org.openhab.core.library.types.OnOffType;
+import org.openhab.core.library.types.OpenClosedType;
+import org.openhab.core.library.types.PercentType;
+import org.openhab.core.library.types.PlayPauseType;
+import org.openhab.core.library.types.PointType;
+import org.openhab.core.library.types.QuantityType;
+import org.openhab.core.library.types.RawType;
+import org.openhab.core.library.types.StringListType;
+import org.openhab.core.library.types.StringType;
+import org.openhab.core.library.types.UpDownType;
+import org.openhab.core.types.State;
+import org.openhab.core.types.UnDefType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Converts between openHAB {@link State} objects and the three-column schema
+ * ({@code value DOUBLE PRECISION}, {@code string TEXT}, {@code unit TEXT}).
+ *
+ *
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+@Component(service = TimescaleDBMetadataService.class)
+public class TimescaleDBMetadataService {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBMetadataService.class);
+
+ /** The metadata namespace used by this persistence service. */
+ public static final String METADATA_NAMESPACE = "timescaledb";
+
+ private static final int DEFAULT_RETAIN_RAW_DAYS = 5;
+ private static final int DEFAULT_RETENTION_DAYS = 0;
+
+ private final MetadataRegistry metadataRegistry;
+
+ @Activate
+ public TimescaleDBMetadataService(final @Reference MetadataRegistry metadataRegistry) {
+ this.metadataRegistry = metadataRegistry;
+ }
+
+ /**
+ * Returns the {@link DownsampleConfig} for the given item, or empty if no downsampling
+ * is configured or the metadata cannot be parsed.
+ *
+ * @param itemName The item name.
+ * @return Optional containing the parsed config, or empty.
+ */
+ public Optional getDownsampleConfig(String itemName) {
+ MetadataKey key = new MetadataKey(METADATA_NAMESPACE, itemName);
+ @Nullable
+ Metadata metadata = metadataRegistry.get(key);
+ if (metadata == null) {
+ return Optional.empty();
+ }
+ return parseConfig(itemName, metadata);
+ }
+
+ /**
+ * Returns the names of all items that have a {@code timescaledb} metadata entry,
+ * regardless of whether they configure downsampling, retention-only, or both.
+ *
+ * @return List of item names with any timescaledb metadata.
+ */
+ public List getConfiguredItemNames() {
+ List result = new ArrayList<>();
+ for (Metadata metadata : metadataRegistry.getAll()) {
+ if (!METADATA_NAMESPACE.equals(metadata.getUID().getNamespace())) {
+ continue;
+ }
+ result.add(metadata.getUID().getItemName());
+ }
+ return result;
+ }
+
+ private Optional parseConfig(String itemName, Metadata metadata) {
+ String functionStr = metadata.getValue();
+ if (functionStr.isBlank()) {
+ // No aggregation function — check for retention-only config.
+ // Note: openHAB requires a non-empty metadata value, so use a single space (" ")
+ // in item files and the UI when you only want retention without downsampling.
+ int retentionDays = getInt(metadata.getConfiguration(), "retentionDays", DEFAULT_RETENTION_DAYS);
+ if (retentionDays < 0) {
+ LOGGER.warn("Item '{}': retentionDays must be >= 0, ignoring negative value {}", itemName,
+ retentionDays);
+ return Optional.empty();
+ }
+ if (retentionDays > 0) {
+ LOGGER.debug("Item '{}': retention-only config with retentionDays={}", itemName, retentionDays);
+ return Optional.of(DownsampleConfig.retentionOnly(retentionDays));
+ }
+ return Optional.empty();
+ }
+
+ AggregationFunction function;
+ try {
+ function = AggregationFunction.valueOf(functionStr.trim().toUpperCase());
+ } catch (IllegalArgumentException e) {
+ LOGGER.warn("Item '{}': unknown aggregation function '{}' in timescaledb metadata — skipping", itemName,
+ functionStr);
+ return Optional.empty();
+ }
+
+ var config = metadata.getConfiguration();
+
+ String intervalStr = getString(config, "downsampleInterval", null);
+ if (intervalStr == null || intervalStr.isBlank()) {
+ LOGGER.warn("Item '{}': timescaledb metadata has function '{}' but no downsampleInterval — skipping",
+ itemName, functionStr);
+ return Optional.empty();
+ }
+
+ String sqlInterval;
+ try {
+ sqlInterval = DownsampleConfig.toSqlInterval(intervalStr);
+ } catch (IllegalArgumentException e) {
+ LOGGER.warn("Item '{}': {}", itemName, e.getMessage());
+ return Optional.empty();
+ }
+
+ int retainRawDays = getInt(config, "retainRawDays", DEFAULT_RETAIN_RAW_DAYS);
+ if (retainRawDays < 0) {
+ LOGGER.warn("Item '{}': retainRawDays must be >= 0, using default {}", itemName, DEFAULT_RETAIN_RAW_DAYS);
+ retainRawDays = DEFAULT_RETAIN_RAW_DAYS;
+ }
+ int retentionDays = getInt(config, "retentionDays", DEFAULT_RETENTION_DAYS);
+ if (retentionDays < 0) {
+ LOGGER.warn("Item '{}': retentionDays must be >= 0, using default {}", itemName, DEFAULT_RETENTION_DAYS);
+ retentionDays = DEFAULT_RETENTION_DAYS;
+ }
+
+ DownsampleConfig result = new DownsampleConfig(function, sqlInterval, retainRawDays, retentionDays);
+ LOGGER.debug("Item '{}': parsed DownsampleConfig {}", itemName, result);
+ return Optional.of(result);
+ }
+
+ private static @Nullable String getString(java.util.Map config, String key,
+ @Nullable String defaultValue) {
+ Object val = config.get(key);
+ return val != null ? val.toString() : defaultValue;
+ }
+
+ private static int getInt(java.util.Map config, String key, int defaultValue) {
+ Object val = config.get(key);
+ if (val == null) {
+ return defaultValue;
+ }
+ try {
+ return Integer.parseInt(val.toString());
+ } catch (NumberFormatException e) {
+ LOGGER.warn("Invalid integer value '{}' for metadata key '{}', using default {}", val, key, defaultValue);
+ return defaultValue;
+ }
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java
new file mode 100644
index 0000000000000..a0ff5443f5948
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.time.ZonedDateTime;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+import org.openhab.core.common.ThreadPoolManager;
+import org.openhab.core.config.core.ConfigurableService;
+import org.openhab.core.items.Item;
+import org.openhab.core.items.ItemNotFoundException;
+import org.openhab.core.items.ItemRegistry;
+import org.openhab.core.persistence.FilterCriteria;
+import org.openhab.core.persistence.HistoricItem;
+import org.openhab.core.persistence.ModifiablePersistenceService;
+import org.openhab.core.persistence.PersistenceService;
+import org.openhab.core.persistence.QueryablePersistenceService;
+import org.openhab.core.persistence.strategy.PersistenceStrategy;
+import org.openhab.core.types.State;
+import org.openhab.core.types.UnDefType;
+import org.osgi.framework.Constants;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.ConfigurationPolicy;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.zaxxer.hikari.HikariConfig;
+import com.zaxxer.hikari.HikariDataSource;
+
+/**
+ * TimescaleDB persistence service for openHAB.
+ *
+ *
+ * Implements {@link ModifiablePersistenceService} to support store, query, and remove operations
+ * against a TimescaleDB (PostgreSQL extension) hypertable.
+ *
+ *
+ * Item names are cached in-memory ({@code name → item_id}) to avoid a SELECT on every
+ * {@link #store} call. The cache is populated lazily on first store per item.
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+@Component(service = { PersistenceService.class, QueryablePersistenceService.class, ModifiablePersistenceService.class,
+ TimescaleDBPersistenceService.class }, configurationPid = "org.openhab.timescaledb", configurationPolicy = ConfigurationPolicy.REQUIRE, property = Constants.SERVICE_PID
+ + "=org.openhab.timescaledb")
+@ConfigurableService(category = "persistence", label = "TimescaleDB Persistence Service", description_uri = TimescaleDBPersistenceService.CONFIG_URI)
+public class TimescaleDBPersistenceService implements ModifiablePersistenceService {
+
+ static final String CONFIG_URI = "persistence:timescaledb";
+ static final String CONFIGURATION_PID = "org.openhab.timescaledb";
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBPersistenceService.class);
+ private static final String THREAD_POOL_NAME = "timescaledb";
+
+ private static final String SERVICE_ID = "timescaledb";
+ private static final String SERVICE_LABEL = "TimescaleDB";
+
+ // item name → item_id, populated lazily
+ private final Map itemIdCache = new ConcurrentHashMap<>();
+
+ private final ItemRegistry itemRegistry;
+ private final TimescaleDBMetadataService metadataService;
+
+ private @Nullable HikariDataSource dataSource;
+ private @Nullable ScheduledFuture> downsampleJob;
+ private @Nullable TimescaleDBDownsampleJob downsampleJobInstance;
+
+ @Activate
+ public TimescaleDBPersistenceService(final @Reference ItemRegistry itemRegistry,
+ final @Reference TimescaleDBMetadataService metadataService) {
+ this.itemRegistry = itemRegistry;
+ this.metadataService = metadataService;
+ }
+
+ /** Package-private constructor for unit tests — skips OSGi activation, allows injecting a DataSource. */
+ TimescaleDBPersistenceService(ItemRegistry itemRegistry, TimescaleDBMetadataService metadataService,
+ @Nullable HikariDataSource dataSource) {
+ this.itemRegistry = itemRegistry;
+ this.metadataService = metadataService;
+ this.dataSource = dataSource;
+ }
+
+ @Activate
+ public void activate(final Map config) {
+ String url = (String) config.getOrDefault("url", "");
+ if (url.isBlank()) {
+ LOGGER.warn("TimescaleDB persistence not configured: missing 'url'. "
+ + "Configure org.openhab.timescaledb:url.");
+ return;
+ }
+
+ String user = (String) config.getOrDefault("user", "openhab");
+ String password = (String) config.getOrDefault("password", "");
+ int maxConnections = parseIntConfig(config, "maxConnections", 5);
+ int connectTimeout = parseIntConfig(config, "connectTimeout", 5000);
+ String chunkInterval = (String) config.getOrDefault("chunkInterval", "7 days");
+ int retentionDays = parseIntConfig(config, "retentionDays", 0);
+ int compressionAfterDays = parseIntConfig(config, "compressionAfterDays", 0);
+
+ LOGGER.debug(
+ "Activating TimescaleDB persistence: url={}, user={}, maxConnections={}, "
+ + "chunkInterval={}, retentionDays={}, compressionAfterDays={}",
+ url, user, maxConnections, chunkInterval, retentionDays, compressionAfterDays);
+
+ HikariDataSource ds;
+ try {
+ ds = createDataSource(url, user, password, maxConnections, connectTimeout);
+ } catch (Exception e) {
+ LOGGER.error("Failed to create TimescaleDB connection pool: {}", e.getMessage(), e);
+ return;
+ }
+ dataSource = ds;
+
+ try (Connection conn = ds.getConnection()) {
+ TimescaleDBSchema.initialize(conn, chunkInterval, compressionAfterDays, retentionDays);
+ } catch (SQLException e) {
+ LOGGER.error("Failed to initialize TimescaleDB schema: {}", e.getMessage(), e);
+ ds.close();
+ dataSource = null;
+ return;
+ }
+
+ if (compressionAfterDays > 0) {
+ LOGGER.warn("TimescaleDB: compressionAfterDays={} is set. Ensure all per-item retainRawDays "
+ + "are less than compressionAfterDays, otherwise downsampling will attempt to write into "
+ + "already-compressed (read-only) chunks and cause SQLExceptions.", compressionAfterDays);
+ }
+
+ // Schedule the daily downsampling job via the openHAB shared thread pool
+ TimescaleDBDownsampleJob job = new TimescaleDBDownsampleJob(ds, metadataService);
+ downsampleJobInstance = job;
+ long initialDelay = secondsUntilMidnight();
+ downsampleJob = ThreadPoolManager.getScheduledPool(THREAD_POOL_NAME).scheduleWithFixedDelay(job, initialDelay,
+ TimeUnit.DAYS.toSeconds(1), TimeUnit.SECONDS);
+ LOGGER.info("Downsampling job scheduled: first run in {}s, then every 24h", initialDelay);
+
+ LOGGER.info("TimescaleDB persistence service activated");
+ }
+
+ /**
+ * Triggers the downsampling job immediately in the calling thread.
+ * Intended for use by the Karaf console command for on-demand testing.
+ *
+ * @return {@code true} if the job ran, {@code false} if the service is not yet activated.
+ */
+ public boolean runDownsampleNow() {
+ TimescaleDBDownsampleJob job = downsampleJobInstance;
+ if (job == null) {
+ return false;
+ }
+ job.run();
+ return true;
+ }
+
+ @Deactivate
+ public void deactivate() {
+ LOGGER.debug("Deactivating TimescaleDB persistence service");
+ itemIdCache.clear();
+
+ ScheduledFuture> job = downsampleJob;
+ if (job != null) {
+ job.cancel(false);
+ downsampleJob = null;
+ }
+ downsampleJobInstance = null;
+
+ HikariDataSource ds = dataSource;
+ if (ds != null) {
+ ds.close();
+ dataSource = null;
+ }
+ LOGGER.info("TimescaleDB persistence service deactivated");
+ }
+
+ // -------------------------------------------------------------------------
+ // PersistenceService
+ // -------------------------------------------------------------------------
+
+ @Override
+ public String getId() {
+ return SERVICE_ID;
+ }
+
+ @Override
+ public String getLabel(@Nullable Locale locale) {
+ return SERVICE_LABEL;
+ }
+
+ @Override
+ public List getSuggestedStrategies() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void store(Item item) {
+ store(item, null);
+ }
+
+ @Override
+ public void store(Item item, @Nullable String alias) {
+ store(item, ZonedDateTime.now(), item.getState(), alias);
+ }
+
+ // -------------------------------------------------------------------------
+ // ModifiablePersistenceService (includes QueryablePersistenceService)
+ // -------------------------------------------------------------------------
+
+ @Override
+ public void store(Item item, ZonedDateTime date, State state) {
+ store(item, date, state, null);
+ }
+
+ @Override
+ public void store(Item item, ZonedDateTime date, State state, @Nullable String alias) {
+ if (state instanceof UnDefType) {
+ LOGGER.trace("Skipping store for item '{}': state is UnDefType", item.getName());
+ return;
+ }
+
+ TimescaleDBMapper.Row row = TimescaleDBMapper.toRow(state);
+ if (row == null) {
+ // Warning is already logged by the mapper
+ return;
+ }
+
+ String name = alias != null ? alias : item.getName();
+ @Nullable
+ String label = item.getLabel();
+
+ HikariDataSource ds = dataSource;
+ if (ds == null) {
+ LOGGER.warn("TimescaleDB data source not available — cannot store item '{}'", name);
+ return;
+ }
+
+ try (Connection conn = ds.getConnection()) {
+ int itemId = getOrCreateItemId(conn, name, label);
+ TimescaleDBQuery.insert(conn, itemId, date, row);
+ } catch (SQLException e) {
+ LOGGER.error("Failed to store item '{}': {}", name, e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Iterable query(FilterCriteria filter) {
+ return query(filter, null);
+ }
+
+ @Override
+ public Iterable query(FilterCriteria filter, @Nullable String alias) {
+ String itemName = filter.getItemName();
+ if (itemName == null) {
+ LOGGER.warn("FilterCriteria has no item name — returning empty query result");
+ return Collections.emptyList();
+ }
+
+ String queryName = alias != null ? alias : itemName;
+
+ @Nullable
+ Integer itemId = itemIdCache.get(queryName);
+ if (itemId == null) {
+ HikariDataSource dsFallback = dataSource;
+ if (dsFallback == null) {
+ LOGGER.warn(
+ "TimescaleDB data source not available while resolving item_id for '{}' — returning empty query result",
+ queryName);
+ return Collections.emptyList();
+ }
+ try (Connection connFallback = dsFallback.getConnection()) {
+ Optional resolved = TimescaleDBQuery.findItemId(connFallback, queryName);
+ if (resolved.isEmpty()) {
+ LOGGER.debug("Item '{}' not present in TimescaleDB — returning empty query result", queryName);
+ return Collections.emptyList();
+ }
+ itemId = resolved.get();
+ itemIdCache.put(queryName, itemId);
+ } catch (SQLException e) {
+ LOGGER.error("Failed to resolve item_id for item '{}': {}", queryName, e.getMessage(), e);
+ return Collections.emptyList();
+ }
+ }
+
+ Item item;
+ try {
+ item = itemRegistry.getItem(itemName);
+ } catch (ItemNotFoundException e) {
+ LOGGER.warn("Item '{}' not found in ItemRegistry — returning empty query result", itemName);
+ return Collections.emptyList();
+ }
+
+ HikariDataSource ds = dataSource;
+ if (ds == null) {
+ LOGGER.warn("TimescaleDB data source not available — returning empty query result");
+ return Collections.emptyList();
+ }
+
+ try (Connection conn = ds.getConnection()) {
+ return TimescaleDBQuery.query(conn, item, itemId, filter);
+ } catch (SQLException e) {
+ LOGGER.error("Query failed for item '{}': {}", queryName, e.getMessage(), e);
+ return Collections.emptyList();
+ }
+ }
+
+ @Override
+ public boolean remove(FilterCriteria filter) {
+ String itemName = filter.getItemName();
+ if (itemName == null) {
+ LOGGER.warn("FilterCriteria has no item name — cannot remove data");
+ return false;
+ }
+
+ @Nullable
+ Integer itemId = itemIdCache.get(itemName);
+ if (itemId == null) {
+ HikariDataSource dsFallback = dataSource;
+ if (dsFallback == null) {
+ LOGGER.warn(
+ "TimescaleDB data source not available while resolving item_id for '{}' — cannot remove data",
+ itemName);
+ return false;
+ }
+ try (Connection connFallback = dsFallback.getConnection()) {
+ Optional resolved = TimescaleDBQuery.findItemId(connFallback, itemName);
+ if (resolved.isEmpty()) {
+ LOGGER.debug("Item '{}' not present in TimescaleDB — nothing to remove", itemName);
+ return false;
+ }
+ itemId = resolved.get();
+ itemIdCache.put(itemName, itemId);
+ } catch (SQLException e) {
+ LOGGER.error("Failed to resolve item_id for item '{}': {}", itemName, e.getMessage(), e);
+ return false;
+ }
+ }
+
+ HikariDataSource ds = dataSource;
+ if (ds == null) {
+ LOGGER.warn("TimescaleDB data source not available — cannot remove data for item '{}'", itemName);
+ return false;
+ }
+
+ try (Connection conn = ds.getConnection()) {
+ int deleted = TimescaleDBQuery.remove(conn, itemId, filter);
+ LOGGER.debug("Removed {} row(s) for item '{}'", deleted, itemName);
+ return true;
+ } catch (SQLException e) {
+ LOGGER.error("Failed to remove data for item '{}': {}", itemName, e.getMessage(), e);
+ return false;
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Internal helpers
+ // -------------------------------------------------------------------------
+
+ private int getOrCreateItemId(Connection conn, String name, @Nullable String label) throws SQLException {
+ Integer cached = itemIdCache.get(name);
+ if (cached != null) {
+ return cached;
+ }
+ int id = TimescaleDBQuery.getOrCreateItemId(conn, name, label);
+ itemIdCache.put(name, id);
+ return id;
+ }
+
+ private static HikariDataSource createDataSource(String url, String user, String password, int maxConnections,
+ int connectTimeoutMs) {
+ HikariConfig cfg = new HikariConfig();
+ // Explicitly set the driver class name so HikariCP uses Class.forName() in the
+ // bundle classloader instead of DriverManager.getDriver(). In an OSGi runtime
+ // DriverManager lives in the boot classloader and cannot see the PostgreSQL driver
+ // that is embedded in this bundle, which causes "Failed to get driver instance".
+ cfg.setDriverClassName("org.postgresql.Driver");
+ cfg.setJdbcUrl(url);
+ cfg.setUsername(user);
+ cfg.setPassword(password);
+ cfg.setMaximumPoolSize(maxConnections);
+ cfg.setConnectionTimeout(connectTimeoutMs);
+ cfg.setPoolName("timescaledb-persistence");
+ return new HikariDataSource(cfg);
+ }
+
+ static int parseIntConfig(Map config, String key, int defaultValue) {
+ Object val = config.get(key);
+ if (val == null) {
+ return defaultValue;
+ }
+ try {
+ return Integer.parseInt(val.toString());
+ } catch (NumberFormatException e) {
+ LOGGER.warn("Invalid integer value '{}' for config key '{}', using default {}", val, key, defaultValue);
+ return defaultValue;
+ }
+ }
+
+ static long secondsUntilMidnight() {
+ ZonedDateTime now = ZonedDateTime.now();
+ ZonedDateTime midnight = now.toLocalDate().plusDays(1).atStartOfDay(now.getZone());
+ return java.time.Duration.between(now, midnight).getSeconds();
+ }
+}
diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java
new file mode 100644
index 0000000000000..6b046bc09e2db
--- /dev/null
+++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2010-2026 Contributors to the openHAB project
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ */
+package org.openhab.persistence.timescaledb.internal;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.time.ZonedDateTime;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
+import org.openhab.core.items.Item;
+import org.openhab.core.persistence.FilterCriteria;
+import org.openhab.core.persistence.FilterCriteria.Ordering;
+import org.openhab.core.persistence.HistoricItem;
+import org.openhab.core.types.State;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * SQL query builder and executor for all TimescaleDB persistence operations.
+ *
+ *
+ * All user-controlled values (item name, timestamps, state values) are passed as
+ * JDBC {@link PreparedStatement} parameters to prevent SQL injection.
+ * The only dynamically formatted strings are validated enum/allowlist values
+ * (SQL operator, ORDER BY direction).
+ *
+ * @author René Ulbricht - Initial contribution
+ */
+@NonNullByDefault
+public class TimescaleDBQuery {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBQuery.class);
+
+ // --- INSERT ---
+ // ON CONFLICT DO NOTHING (no conflict target) silently discards duplicate writes.
+ // TimescaleDB hypertables do not support column-inference conflict targets, so the target-less
+ // form must be used. The UNIQUE(time, item_id, downsampled) constraint on the table ensures
+ // that a raw row (downsampled=FALSE) and a downsampled row (downsampled=TRUE) at the same
+ // timestamp can coexist and only true duplicates are dropped.
+ private static final String SQL_INSERT = "INSERT INTO items (time, item_id, value, string, unit) VALUES (?, ?, ?, ?, ?) ON CONFLICT DO NOTHING";
+
+ // --- item_meta lookup / insert ---
+ private static final String SQL_SELECT_ITEM_ID = "SELECT id FROM item_meta WHERE name = ?";
+
+ private static final String SQL_INSERT_ITEM_META = "INSERT INTO item_meta (name, label) VALUES (?, ?) ON CONFLICT (name) DO UPDATE SET label = EXCLUDED.label RETURNING id";
+
+ // --- SELECT base ---
+ private static final String SQL_SELECT_BASE = "SELECT time, value, string, unit FROM items WHERE item_id = ?";
+
+ // --- DELETE ---
+ private static final String SQL_DELETE_BASE = "DELETE FROM items WHERE item_id = ?";
+
+ private TimescaleDBQuery() {
+ // utility class
+ }
+
+ /**
+ * Inserts a single item state row.
+ *
+ * @param connection The JDBC connection.
+ * @param itemId The item_id from {@code item_meta}.
+ * @param timestamp The measurement timestamp.
+ * @param row The mapped state row.
+ * @throws SQLException on any database error.
+ */
+ public static void insert(Connection connection, int itemId, ZonedDateTime timestamp, TimescaleDBMapper.Row row)
+ throws SQLException {
+ try (PreparedStatement ps = connection.prepareStatement(SQL_INSERT)) {
+ ps.setTimestamp(1, Timestamp.from(timestamp.toInstant()));
+ ps.setInt(2, itemId);
+ Double value = row.value();
+ if (value != null) {
+ ps.setDouble(3, value);
+ } else {
+ ps.setNull(3, Types.DOUBLE);
+ }
+ ps.setString(4, row.string());
+ ps.setString(5, row.unit());
+ ps.executeUpdate();
+ }
+ LOGGER.debug("Stored item_id={} at {} value={} string={} unit={}", itemId, timestamp, row.value(), row.string(),
+ row.unit());
+ }
+
+ /**
+ * Returns the item_id for the given name, or inserts a new {@code item_meta} row and returns its id.
+ *
+ * @param connection The JDBC connection.
+ * @param name The item name.
+ * @param label The item label (may be null; stored for informational purposes).
+ * @return The item_id.
+ * @throws SQLException on any database error.
+ */
+ public static int getOrCreateItemId(Connection connection, String name, @Nullable String label)
+ throws SQLException {
+ // Try SELECT first (fast path for known items)
+ try (PreparedStatement ps = connection.prepareStatement(SQL_SELECT_ITEM_ID)) {
+ ps.setString(1, name);
+ try (ResultSet rs = ps.executeQuery()) {
+ if (rs.next()) {
+ return rs.getInt(1);
+ }
+ }
+ }
+ // Not found: INSERT with ON CONFLICT DO UPDATE so concurrent calls are safe
+ try (PreparedStatement ps = connection.prepareStatement(SQL_INSERT_ITEM_META)) {
+ ps.setString(1, name);
+ ps.setString(2, label);
+ try (ResultSet rs = ps.executeQuery()) {
+ if (rs.next()) {
+ int id = rs.getInt(1);
+ LOGGER.debug("Registered new item '{}' with item_id={}", name, id);
+ return id;
+ }
+ }
+ }
+ throw new SQLException("Failed to get or create item_meta entry for item '" + name + "'");
+ }
+
+ /**
+ * Queries historic items according to the given filter criteria.
+ *
+ * @param connection The JDBC connection.
+ * @param item The openHAB item (used for state reconstruction).
+ * @param itemId The item_id from {@code item_meta}.
+ * @param filter The filter criteria.
+ * @return An ordered list of matching {@link HistoricItem}s.
+ * @throws SQLException on any database error.
+ */
+ public static List query(Connection connection, Item item, int itemId, FilterCriteria filter)
+ throws SQLException {
+ StringBuilder sql = new StringBuilder(SQL_SELECT_BASE);
+ List