diff --git a/CODEOWNERS b/CODEOWNERS index 00583e9024986..1cc5357555dcf 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -472,6 +472,7 @@ /bundles/org.openhab.persistence.mapdb/ @openhab/add-ons-maintainers /bundles/org.openhab.persistence.mongodb/ @openhab/add-ons-maintainers /bundles/org.openhab.persistence.rrd4j/ @openhab/add-ons-maintainers +/bundles/org.openhab.persistence.timescaledb/ @ulbi /bundles/org.openhab.transform.basicprofiles/ @cweitkamp @J-N-K /bundles/org.openhab.transform.bin2json/ @paulianttila /bundles/org.openhab.transform.exec/ @openhab/add-ons-maintainers diff --git a/bom/openhab-addons/pom.xml b/bom/openhab-addons/pom.xml index 25eb90310ad7c..05d81faf20d2d 100644 --- a/bom/openhab-addons/pom.xml +++ b/bom/openhab-addons/pom.xml @@ -2341,6 +2341,11 @@ org.openhab.persistence.rrd4j ${project.version} + + org.openhab.addons.bundles + org.openhab.persistence.timescaledb + ${project.version} + org.openhab.addons.bundles org.openhab.transform.basicprofiles diff --git a/bundles/org.openhab.persistence.timescaledb/AGENTS.md b/bundles/org.openhab.persistence.timescaledb/AGENTS.md new file mode 100644 index 0000000000000..7680773654a44 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/AGENTS.md @@ -0,0 +1,312 @@ +# AGENTS.md - TimescaleDB Persistence Development Guide + +## Context + +You are working on a **native TimescaleDB persistence service** for openHAB. +TimescaleDB is a time-series extension for PostgreSQL — all standard PostgreSQL JDBC drivers work, but the schema and queries use TimescaleDB-specific features. + +**openHAB has no built-in downsampling/aggregation framework.** `FilterCriteria`, `PersistenceStrategy` and `PersistenceItemConfiguration` contain no aggregation concepts. Everything must be implemented inside this service. + +--- + +## Architecture + +### Key Classes + +| Class | Role | +|---|---| +| `TimescaleDBPersistenceService` | Main OSGi service, implements `ModifiablePersistenceService` | +| `TimescaleDBMapper` | `State` ↔ SQL value conversion (all openHAB item types) | +| `TimescaleDBSchema` | Schema creation and migration on startup | +| `TimescaleDBQuery` | SQL query builder for all persistence operations | +| `TimescaleDBMetadataService` | Reads per-item downsampling config from `MetadataRegistry` | +| `TimescaleDBDownsampleJob` | Scheduled daily job: aggregates + deletes raw rows in-place | + +### OSGi Service Registration + +- Service ID: `timescaledb` +- Implements: `ModifiablePersistenceService` (= `QueryablePersistenceService` + `remove()`) +- Config PID: `org.openhab.persistence.timescaledb` +- `OH-INF/addon/addon.xml` required — registers the addon in the openHAB UI (Settings → Add-ons → TimescaleDB). Without it the bundle runs but is invisible to the UI. +- Config description: `OH-INF/config/timescaledb.xml` +- `ConfigurationPolicy.REQUIRE` — service does not start without configuration +- Scheduler: `ThreadPoolManager.getScheduledPool("timescaledb")` (shared pool — never call `shutdownNow()`) +- Deactivate: `ScheduledFuture.cancel(false)`, then `HikariDataSource.close()` +- State indicator: `dataSource != null` — no `initialized` boolean + +### Dependencies + +- JDBC Driver: `org.postgresql:postgresql` +- Connection pooling: HikariCP (already used in other openHAB bundles) +- openHAB Core: `org.openhab.core.persistence`, `org.openhab.core.items` (for `MetadataRegistry`) + +--- + +## Database Schema + +```sql +CREATE TABLE item_meta ( + id SERIAL PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + label TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE items ( + time TIMESTAMPTZ NOT NULL, + item_id INTEGER NOT NULL REFERENCES item_meta(id), + value DOUBLE PRECISION, + string TEXT, + unit TEXT, -- stored per row, NOT in item_meta + downsampled BOOLEAN NOT NULL DEFAULT FALSE +); + +SELECT create_hypertable('items', 'time'); +CREATE INDEX ON items (item_id, time DESC); +``` + +### Why `unit` is per row, not in `item_meta` + +A `QuantityType` unit can change over time (sensor reconfiguration, firmware update, etc.). Storing it in `item_meta` would corrupt historical reads. The unit is stored with each measurement and read back from the row when reconstructing `QuantityType` states. + +### Why downsampling is in-place (same hypertable) + +openHAB reads persisted data directly from the hypertable via `QueryablePersistenceService`. If aggregated data lived in separate views or tables, openHAB would not see it without query-layer changes. In-place replacement (delete raw rows → insert aggregated rows with `downsampled=TRUE`) keeps a single source of truth that openHAB reads transparently. + +--- + +## State Type Mapping + +All openHAB item types are fully supported. `TimescaleDBMapper` handles the conversion in both directions. + +### Store direction (`toRow`) + +| State type | `value` column | `string` column | `unit` column | +|---|---|---|---| +| `QuantityType` | numeric | null | unit string (e.g. `"°C"`) | +| `DecimalType` | numeric | null | null | +| `OnOffType` | `ON=1.0 / OFF=0.0` | null | null | +| `OpenClosedType` | `OPEN=1.0 / CLOSED=0.0` | null | null | +| `PercentType` | 0.0–100.0 | null | null | +| `UpDownType` | `UP=0.0 / DOWN=1.0` | null | null | +| `HSBType` | null | `"H,S,B"` | null | +| `DateTimeType` | null | ISO-8601 string | null | +| `PointType` | null | `"lat,lon[,alt]"` | null | +| `PlayPauseType` | null | enum name (`"PLAY"`, `"PAUSE"`, …) | null | +| `StringListType` | null | comma-separated values | null | +| `RawType` | null | Base64-encoded bytes | MIME type | +| `StringType` | null | raw string | null | + +### Load direction (`toState`) + +`GroupItem` is unwrapped to its base item before dispatch. Item type determines how the row is interpreted: + +- `ColorItem` → `HSBType` (parsed from `string`) +- `DateTimeItem` → `DateTimeType` (parsed from `string`) +- `LocationItem` → `PointType` (parsed from `string`) +- `PlayerItem` → `PlayPauseType` (parsed from `string`) +- `CallItem` → `StringListType` (parsed from `string`) +- `ImageItem` → `RawType` (Base64-decoded from `string`, MIME type from `unit`) +- `DimmerItem` / `RollershutterItem` → `PercentType` (**must be checked before `SwitchItem`**) +- `SwitchItem` → `OnOffType` +- `ContactItem` → `OpenClosedType` +- `NumberItem` with `unit != null` → `QuantityType` +- `NumberItem` without unit → `DecimalType` +- anything else with `string` → `StringType` + +**Critical instanceof ordering in `toRow()`:** `HSBType` before `PercentType` before `DecimalType` +(because `HSBType extends PercentType extends DecimalType`). + +--- + +## Per-Item Downsampling via Item Metadata + +### How to read metadata (same pattern as InfluxDB persistence) + +```java +@Reference +private MetadataRegistry metadataRegistry; + +private Optional getItemMetadata(String itemName) { + MetadataKey key = new MetadataKey("timescaledb", itemName); + return Optional.ofNullable(metadataRegistry.get(key)); +} +``` + +`Metadata` has: +- `getValue()` → main value string, e.g. `"AVG"`, `"MAX"`, `"MIN"`, `"SUM"`, or `""` (no aggregation) +- `getConfiguration()` → `Map` with keys like `"downsampleInterval"`, `"retainRawDays"`, `"retentionDays"` + +### Metadata format (configured by users in .items files) + +```java +Number:Temperature MySensor { + timescaledb="AVG" [ downsampleInterval="1h", retainRawDays="5", retentionDays="365" ] +} +``` + +### Parsing the metadata + +```java +public record DownsampleConfig( + AggregationFunction function, // AVG / MAX / MIN / SUM + Duration interval, // e.g. Duration.ofHours(1) + int retainRawDays, // default 5 + int retentionDays // default 0 = disabled +) {} + +public enum AggregationFunction { AVG, MAX, MIN, SUM } +``` + +Interval parsing — **validate against an allowlist** (used in SQL string formatting): + +| Metadata value | SQL interval literal | +|---|---| +| `1m` | `1 minute` | +| `5m` | `5 minutes` | +| `15m` | `15 minutes` | +| `30m` | `30 minutes` | +| `1h` | `1 hour` | +| `6h` | `6 hours` | +| `1d` | `1 day` | + +Throw `IllegalArgumentException` for any value not in this list to prevent SQL injection. + +--- + +## Downsampling Job (`TimescaleDBDownsampleJob`) + +Runs daily (e.g. via `@Scheduled` or openHAB's `CronScheduler`). For each item that has `timescaledb` metadata with a non-empty aggregation function: + +```sql +-- Step 1: aggregate raw rows older than retainRawDays into buckets +INSERT INTO items (time, item_id, value, unit, downsampled) +SELECT + time_bucket('', time) AS time, + item_id, + (value) AS value, + last(unit, time) AS unit, -- keep most recent unit in bucket + TRUE AS downsampled +FROM items +WHERE item_id = ? + AND downsampled = FALSE + AND time < NOW() - INTERVAL ' days' +GROUP BY time_bucket('', time), item_id +ON CONFLICT DO NOTHING; + +-- Step 2: delete replaced raw rows +DELETE FROM items +WHERE item_id = ? + AND downsampled = FALSE + AND time < NOW() - INTERVAL ' days'; + +-- Step 3 (if retentionDays > 0): drop everything older than retention window +DELETE FROM items +WHERE item_id = ? + AND time < NOW() - INTERVAL ' days'; +``` + +**Important:** +- ``, ``, ``, `` are formatted into the SQL string — **never from user input directly**. Validate interval against allowlist, validate function against enum. Use `?` for `item_id`. +- `last(unit, time)` is a TimescaleDB hyperfuction — verify it is available, otherwise use `MAX(unit)` as fallback. +- Run steps 1+2 in a transaction per item to avoid partial state. +- Log errors per item and continue (don't abort the entire job on a single-item failure). + +--- + +## Query Implementation + +- All item name / date / state lookups use JDBC `PreparedStatement` — no string concatenation for user-controlled values. +- `time_bucket()` interval is formatted as a string but validated against the allowlist above. +- `historicState`: `WHERE item_id=? AND time <= ? ORDER BY time DESC LIMIT 1` +- `getAllStatesBetween`: `WHERE item_id=? AND time BETWEEN ? AND ? ORDER BY time ASC` — returns both raw and downsampled rows. +- Aggregate queries (`averageSince`, `minSince`, etc.): `WHERE item_id=? AND time >= ?` — operate on all rows including downsampled ones, which is correct. + +--- + +## item_id Caching + +Cache `name → item_id` in a `ConcurrentHashMap` to avoid a SELECT on every `store()` call. Invalidate on service restart. Auto-insert into `item_meta` on first `store()` if the item is unknown. + +```java +private final Map itemIdCache = new ConcurrentHashMap<>(); + +private int getOrCreateItemId(String name, @Nullable String label) { + return itemIdCache.computeIfAbsent(name, n -> fetchOrInsertItemMeta(n, label)); +} +``` + +--- + +## Testing + +### Unit Tests (no DB required) + +Location: `src/test/java/org/openhab/persistence/timescaledb/internal/` + +- `BundleManifestTest` — OSGi Import-Package allowlist + presence of `OH-INF/addon/addon.xml` +- `TimescaleDBMapperTest` — State ↔ SQL value round-trips for all state types +- `TimescaleDBMetadataServiceTest` — parsing of metadata values and config keys +- `TimescaleDBDownsampleJobTest` — SQL generation for aggregation/delete, interval allowlist validation + +Run with `mvn test` — last result: **183 tests, 0 failures** (2026-03-13). + +### Integration Tests (requires Docker + TimescaleDB) + +Tagged `@Tag("integration")`, run automatically via Testcontainers during `mvn test`: + +```java +@Container +static PostgreSQLContainer db = new PostgreSQLContainer<>("timescale/timescaledb:latest-pg16") + .withDatabaseName("openhab_test") + .withUsername("openhab") + .withPassword("openhab"); +``` + +Test: schema creation, store/query round-trips, downsampling job result, compression policy creation. + +### Performance Tests (requires external DB) + +Tagged `@Tag("performance")` — **excluded from `mvn test`** via `performance,external-integration` in pom.xml. + +Run explicitly against an external TimescaleDB: + +```bash +HOST=... PORT=5432 DBNAME=openhab USER=openhab PASSWORD=... \ + mvn test -Dtest=TimescaleDBPerformanceIT \ + -pl bundles/org.openhab.persistence.timescaledb +``` + +See `PERFORMANCE_TESTS.md` for SLOs, scale constants, and the heavy 18-month scenario. + +--- + +## Common Pitfalls + +1. **TimescaleDB extension not installed**: check on startup with `SELECT extname FROM pg_extension WHERE extname='timescaledb'`, fail with a clear error if missing. +2. **`last()` availability**: `last(unit, time)` requires the TimescaleDB Toolkit — check availability, fall back to `MAX(unit)` otherwise. +3. **Compression + INSERT conflict**: compressed chunks are read-only. The downsampling INSERT must target the uncompressed region (data newer than `compressionAfterDays`). Ensure `retainRawDays < compressionAfterDays`. +4. **Interval allowlist is mandatory**: `time_bucket('1h', time)` is dynamically formatted — any non-allowlisted value must throw before it reaches SQL. +5. **`ON CONFLICT DO NOTHING`** on the aggregation INSERT: the job may run twice if interrupted; duplicate bucket rows must be prevented. +6. **`QuantityType` unit changes**: never update `item_meta` with a unit — the unit lives on each row. On read, take the `unit` value from the row. + +--- + +## Relevant openHAB Core APIs + +- `org.openhab.core.persistence.QueryablePersistenceService` — implement this +- `org.openhab.core.persistence.FilterCriteria` — query parameters passed to `query()` +- `org.openhab.core.items.MetadataRegistry` — OSGi service, inject via `@Reference` +- `org.openhab.core.items.Metadata` — `getValue()` + `getConfiguration()` for per-item config +- `org.openhab.core.items.MetadataKey` — constructed as `new MetadataKey("timescaledb", itemName)` +- `org.openhab.core.library.types.*` — `QuantityType`, `DecimalType`, `OnOffType`, etc. + +## References + +- [TimescaleDB docs](https://docs.timescale.com/) +- [time_bucket()](https://docs.timescale.com/api/latest/hyperfunctions/time_bucket/) +- [last()](https://docs.timescale.com/api/latest/hyperfunctions/last/) +- [Compression](https://docs.timescale.com/use-timescale/latest/compression/) +- InfluxDB persistence (metadata pattern): `bundles/org.openhab.persistence.influxdb/src/main/java/.../InfluxDBMetadataService.java` +- Existing downsampling logic (Python/MongoDB): `DOWNSAMPLE_IMPLEMENTATION_GUIDE.md` in this bundle + diff --git a/bundles/org.openhab.persistence.timescaledb/README.md b/bundles/org.openhab.persistence.timescaledb/README.md new file mode 100644 index 0000000000000..004feb6e38b13 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/README.md @@ -0,0 +1,234 @@ +# TimescaleDB Persistence + +This service persists and queries openHAB item states using [TimescaleDB](https://www.timescale.com/), a time-series database built on PostgreSQL. + +Unlike the generic JDBC persistence (which can also connect to TimescaleDB via the PostgreSQL driver), this service is purpose-built for TimescaleDB and leverages its native time-series features: + +- **Hypertables** for automatic time-based partitioning and fast range queries +- **In-place downsampling** — raw data is aggregated and replaced in the hypertable directly, so openHAB reads aggregated data transparently without any schema changes +- **Per-item downsampling config** via item metadata (namespace `timescaledb`) +- **Compression Policies** to automatically compress older data and reduce storage +- **Retention Policies** to automatically drop data older than a configured threshold + +## Prerequisites + +- TimescaleDB 2.x installed and running (as a PostgreSQL extension) +- A database and user created for openHAB + +```sql +CREATE DATABASE openhab; +CREATE USER openhab WITH PASSWORD 'openhab'; +GRANT ALL PRIVILEGES ON DATABASE openhab TO openhab; + +-- Connect to openhab database, then: +CREATE EXTENSION IF NOT EXISTS timescaledb; +``` + +## Database Schema + +The service **creates all tables automatically on startup** — no manual DDL required. +Item states are stored in a single hypertable `items` (columns: `time`, `item_id`, `value`, `string`, `unit`, `downsampled`) and a name-lookup table `item_meta`. + +## State Type Mapping + +| openHAB Type | `value` column | `string` column | `unit` column | +|-------------------|--------------------------------|-----------------|------------------------| +| `DecimalType` | numeric value | — | — | +| `QuantityType` | numeric value (stripped) | — | unit string, e.g. `°C` | +| `OnOffType` | `1.0` (ON) / `0.0` (OFF) | — | — | +| `OpenClosedType` | `1.0` (OPEN) / `0.0` (CLOSED) | — | — | +| `PercentType` | `0.0`–`100.0` | — | — | +| `UpDownType` | `0.0` (UP) / `1.0` (DOWN) | — | — | +| `HSBType` | — | `H,S,B` | — | +| `DateTimeType` | — | ISO-8601 | — | +| `StringType` | — | raw string | — | + +## Configuration + +Configure via `$OPENHAB_CONF/services/timescaledb.cfg` or in the UI under `Settings → Add-ons → TimescaleDB → Configure`. + +| Property | Default | Required | Description | +|------------------------|-----------|:--------:|-----------------------------------------------------------| +| `url` | | Yes | JDBC URL, e.g. `jdbc:postgresql://localhost:5432/openhab` | +| `user` | `openhab` | No | Database user | +| `password` | | Yes | Database password | +| `chunkInterval` | `7 days` | No | TimescaleDB chunk interval for the hypertable | +| `retentionDays` | `0` | No | Drop data older than N days. `0` = disabled | +| `compressionAfterDays` | `0` | No | Compress chunks older than N days. `0` = disabled | +| `maxConnections` | `5` | No | Maximum DB connections in the pool | +| `connectTimeout` | `5000` | No | Connection timeout in milliseconds | + +## Persistence Configuration + +All item- and event-related configuration is defined in `persistence/timescaledb.persist`: + +```java +Strategies { + everyMinute : "0 * * * * ?" + everyHour : "0 0 * * * ?" + everyDay : "0 0 0 * * ?" + default = everyChange +} + +Items { + * : strategy = everyChange, restoreOnStartup + Temperature_* : strategy = everyMinute + Energy_* : strategy = everyHour +} +``` + +## Per-Item Downsampling + +Downsampling is configured **per item** via item metadata in the `timescaledb` namespace. + +### Metadata format + +```text +timescaledb="" [downsampleInterval="", retainRawDays="", retentionDays=""] +``` + +| Metadata key | Values | Description | +|----------------------|--------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| value (main) | `AVG`, `MAX`, `MIN`, `SUM`, or `" "` | Aggregation function. Use a single space `" "` for retention-only (no downsampling). openHAB rejects a truly empty value, so a space is required. | +| `downsampleInterval` | e.g. `1h`, `15m`, `1d` | Time bucket size for aggregation. Required when value is an aggregation function. | +| `retainRawDays` | integer, default `5` | Keep raw data for N days before replacing with aggregated rows. | +| `retentionDays` | integer, default `0` | Drop all data (raw + downsampled) older than N days. `0` = off. | + +### Configuration in `.items` files + +```java +Number:Temperature Sensor_Temperature_Living "Living Room [%.1f °C]" { + timescaledb="AVG" [ downsampleInterval="1h", retainRawDays="5" ] +} + +Number:Power Meter_Power_House "House Power [%.1f W]" { + timescaledb="AVG" [ downsampleInterval="15m", retainRawDays="3", retentionDays="365" ] +} + +Number:Energy Meter_Energy_House "House Energy [%.3f kWh]" { + timescaledb="SUM" [ downsampleInterval="1h", retainRawDays="7" ] +} + +// Retention-only: no downsampling, just drop data older than 30 days. +// The value must be a single space " " — openHAB rejects a truly empty string. +Number:Temperature Sensor_Temp_Outdoor { + timescaledb=" " [ retentionDays="30" ] +} +``` + +### Configuration in mainUI + +**Downsampling + Retention:** + +`Item → Metadata → Add Metadata → Enter namespace "timescaledb"`: + +- Value: `AVG` +- Additional config: `downsampleInterval=1h`, `retainRawDays=5`, `retentionDays=365` + +**Retention-only (no downsampling):** + +`Item → Metadata → Add Metadata → Enter namespace "timescaledb"`: + +- Value: ` ` (a single space — openHAB rejects an empty value) +- Additional config: `retentionDays=30` + +### How in-place downsampling works + +The downsampling runs as a scheduled job (daily, at midnight): + +```text +For each item with timescaledb metadata: + 1. Parse operation + downsampleInterval from metadata + 2. Compute cutoff = NOW() - retainRawDays + 3. SELECT time_bucket(interval, time), agg_fn(value), MAX(unit) + FROM items + WHERE item_id = ? AND downsampled = FALSE AND time < cutoff + GROUP BY bucket + 4. INSERT aggregated rows with downsampled = TRUE + 5. DELETE original rows (downsampled = FALSE, time < cutoff) +``` + +This keeps the hypertable as the single source of truth. openHAB reads aggregated and raw data from the same table — no query changes needed. + +### Supported intervals + +| Metadata value | SQL interval | +|----------------|---------------| +| `1m` | `1 minute` | +| `5m` | `5 minutes` | +| `15m` | `15 minutes` | +| `30m` | `30 minutes` | +| `1h` | `1 hour` | +| `6h` | `6 hours` | +| `1d` | `1 day` | + +## Querying from openHAB + +The service implements the full `QueryablePersistenceService` interface: + +| openHAB Query | TimescaleDB Implementation | +|--------------------------------------------|---------------------------------------| +| `historicState(item, timestamp)` | `SELECT … ORDER BY time DESC LIMIT 1` | +| `averageSince(item, timestamp)` | `AVG(value) WHERE time >= ?` | +| `sumSince(item, timestamp)` | `SUM(value) WHERE time >= ?` | +| `minSince(item, timestamp)` | `MIN(value) WHERE time >= ?` | +| `maxSince(item, timestamp)` | `MAX(value) WHERE time >= ?` | +| `countSince(item, timestamp)` | `COUNT(*) WHERE time >= ?` | +| `getAllStatesBetween(item, begin, end)` | Range scan (raw + downsampled) | +| `removeAllStatesBetween(item, begin, end)` | `DELETE WHERE time BETWEEN ? AND ?` | + +## Compression + +When `compressionAfterDays > 0`, the service configures automatic chunk compression: + +```sql +ALTER TABLE items SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'item_id', + timescaledb.compress_orderby = 'time DESC' +); +SELECT add_compression_policy('items', INTERVAL '30 days'); +``` + +## Retention + +When `retentionDays > 0` (global config), a TimescaleDB retention policy is added: + +```sql +SELECT add_retention_policy('items', INTERVAL '365 days'); +``` + +Per-item retention (via metadata `retentionDays`) is applied by the daily downsampling job using a targeted DELETE. +This works independently of downsampling: an item can have `retentionDays` set without any aggregation function +(use a single space `" "` as the metadata value in that case). + +## Grafana Integration + +TimescaleDB works natively with the Grafana PostgreSQL data source: + +```sql +-- Raw + downsampled data for a sensor (last 24 h) +SELECT + time_bucket('5 minutes', time) AS time, + AVG(value) AS temperature, + MAX(unit) AS unit +FROM items +JOIN item_meta ON items.item_id = item_meta.id +WHERE item_meta.name = 'Sensor_Temperature_Living' + AND time > NOW() - INTERVAL '24 hours' +GROUP BY 1 +ORDER BY 1; +``` + +## Differences from JDBC Persistence + +| Feature | JDBC Persistence | TimescaleDB Persistence | +|--------------------------------|----------------------|-------------------------| +| TimescaleDB hypertables | No (plain tables) | Yes | +| In-place downsampling | No | Yes | +| Per-item aggregation config | No | Yes (item metadata) | +| Automatic compression | No | Yes | +| Retention policies | No | Yes (global + per-item) | +| Unit stored per measurement | No | Yes | +| Multiple DB backends | Yes | No (TimescaleDB only) | +| Schema (one table per item) | Yes | No (single hypertable) | diff --git a/bundles/org.openhab.persistence.timescaledb/pom.xml b/bundles/org.openhab.persistence.timescaledb/pom.xml new file mode 100644 index 0000000000000..deb945e822e0a --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/pom.xml @@ -0,0 +1,68 @@ + + + + 4.0.0 + + + org.openhab.addons.bundles + org.openhab.addons.reactor.bundles + 5.2.0-SNAPSHOT + + + org.openhab.persistence.timescaledb + + openHAB Add-ons :: Bundles :: Persistence Service :: TimescaleDB + + + !com.codahale.metrics.*,!io.prometheus.*,!org.checkerframework.*,!org.jetbrains.annotations.*,!org.hibernate.*,!waffle.windows.auth.*,!org.osgi.service.jdbc.*,!com.sun.jna.*,!javassist.* + 42.7.9 + 5.1.0 + + + + + org.postgresql + postgresql + ${postgresql.version} + compile + + + com.zaxxer + HikariCP + ${hikari.version} + compile + + + + + org.testcontainers + testcontainers-postgresql + 2.0.3 + test + + + org.testcontainers + testcontainers-junit-jupiter + 2.0.3 + test + + + org.mockito + mockito-core + ${mockito.version} + test + + + + + + + maven-surefire-plugin + + performance,external-integration + + + + + diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml b/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml new file mode 100644 index 0000000000000..1f07a75b88f36 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/feature/feature.xml @@ -0,0 +1,11 @@ + + + mvn:org.openhab.core.features.karaf/org.openhab.core.features.karaf.openhab-core/${ohc.version}/xml/features + + + + openhab-runtime-base + mvn:org.openhab.addons.bundles/org.openhab.persistence.timescaledb/${project.version} + + + diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java new file mode 100644 index 0000000000000..0fced0219221c --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/AggregationFunction.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import org.eclipse.jdt.annotation.NonNullByDefault; + +/** + * Aggregation function used for per-item downsampling. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public enum AggregationFunction { + AVG, + MAX, + MIN, + SUM; + + /** Returns the SQL aggregate function name, e.g. {@code AVG(value)}. */ + public String toSql() { + return name(); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java new file mode 100644 index 0000000000000..2f6fa83f498a0 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/DownsampleConfig.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; + +/** + * Per-item configuration parsed from item metadata (namespace {@code timescaledb}). + * + *

+ * A config is either a full downsampling config (function + interval + retention) or a + * retention-only config (function and sqlInterval are {@code null}). Use {@link #hasDownsampling()} + * to distinguish the two cases. + * + * @param function Aggregation function (AVG, MAX, MIN, SUM), or {@code null} for retention-only. + * @param sqlInterval Validated SQL interval literal, e.g. {@code "1 hour"}, or {@code null} for retention-only. + * @param retainRawDays Keep raw data for N days before aggregating. Ignored for retention-only configs. + * @param retentionDays Drop all data older than N days. 0 = disabled. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public record DownsampleConfig(@Nullable AggregationFunction function, @Nullable String sqlInterval, int retainRawDays, + int retentionDays) { + + /** Allowlist mapping from metadata interval strings to SQL interval literals. */ + public static final java.util.Map INTERVAL_MAP = java.util.Map.of("1m", "1 minute", "5m", + "5 minutes", "15m", "15 minutes", "30m", "30 minutes", "1h", "1 hour", "2h", "2 hours", "6h", "6 hours", + "12h", "12 hours", "1d", "1 day"); + + /** + * Converts a metadata interval string to its SQL literal. + * + * @param interval The metadata interval string, e.g. {@code "1h"}. + * @return The SQL interval literal. + * @throws IllegalArgumentException if the interval is not in the allowlist. + */ + public static String toSqlInterval(String interval) { + String sql = INTERVAL_MAP.get(interval); + if (sql == null) { + throw new IllegalArgumentException( + "Invalid downsampleInterval '" + interval + "'. Allowed: " + INTERVAL_MAP.keySet()); + } + return sql; + } + + /** + * Returns {@code true} if this config describes a full downsampling run (aggregation + raw-data pruning). + * Returns {@code false} for retention-only configs where only the retention DELETE is executed. + */ + public boolean hasDownsampling() { + return function != null; + } + + /** + * Creates a retention-only config: no aggregation, just a periodic DELETE of rows older than + * {@code retentionDays} days. + * + * @param retentionDays Days after which all rows are deleted. Must be > 0. + */ + public static DownsampleConfig retentionOnly(int retentionDays) { + if (retentionDays <= 0) { + throw new IllegalArgumentException("retentionDays must be > 0, got " + retentionDays); + } + return new DownsampleConfig(null, null, 0, retentionDays); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java new file mode 100644 index 0000000000000..187b69facba96 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBConsoleCommandExtension.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.util.List; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.io.console.Console; +import org.openhab.core.io.console.ConsoleCommandCompleter; +import org.openhab.core.io.console.StringsCompleter; +import org.openhab.core.io.console.extensions.AbstractConsoleCommandExtension; +import org.openhab.core.io.console.extensions.ConsoleCommandExtension; +import org.osgi.service.component.annotations.Activate; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.Reference; + +/** + * Karaf console commands for the TimescaleDB persistence service. + * + *

+ * openhab:timescaledb downsample   - run the downsampling job immediately
+ * 
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +@Component(service = ConsoleCommandExtension.class) +public class TimescaleDBConsoleCommandExtension extends AbstractConsoleCommandExtension + implements ConsoleCommandCompleter { + + private static final String CMD_DOWNSAMPLE = "downsample"; + private static final StringsCompleter CMD_COMPLETER = new StringsCompleter(List.of(CMD_DOWNSAMPLE), false); + + private final TimescaleDBPersistenceService persistenceService; + + @Activate + public TimescaleDBConsoleCommandExtension(@Reference TimescaleDBPersistenceService persistenceService) { + super("timescaledb", "TimescaleDB persistence commands."); + this.persistenceService = persistenceService; + } + + @Override + public List getUsages() { + return List.of(buildCommandUsage(CMD_DOWNSAMPLE, "run the downsampling/retention job immediately")); + } + + @Override + public @Nullable ConsoleCommandCompleter getCompleter() { + return this; + } + + @Override + public boolean complete(String[] args, int cursorArgumentIndex, int cursorPosition, List candidates) { + return CMD_COMPLETER.complete(args, cursorArgumentIndex, cursorPosition, candidates); + } + + @Override + public void execute(String[] args, Console console) { + if (args.length == 1 && CMD_DOWNSAMPLE.equals(args[0])) { + console.println("Starting downsampling job..."); + boolean ran = persistenceService.runDownsampleNow(); + if (ran) { + console.println("Downsampling job finished."); + } else { + console.println("TimescaleDB persistence service is not active — cannot run job."); + } + } else { + printUsage(console); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java new file mode 100644 index 0000000000000..4e01c60fc32ad --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJob.java @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.List; + +import javax.sql.DataSource; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Daily scheduled job that performs in-place downsampling for all items + * configured with {@code timescaledb} metadata. + * + *

+ * For each eligible item, the job runs atomically in a single transaction: + *

    + *
  1. INSERT aggregated rows (time_bucket + agg_fn) with {@code downsampled=TRUE}
  2. + *
  3. DELETE original raw rows that have been aggregated
  4. + *
  5. If {@code retentionDays > 0}: DELETE all rows older than the retention window
  6. + *
+ * + *

+ * Security note: The SQL interval and aggregation function are + * formatted into the query string but are validated against an allowlist + * ({@link DownsampleConfig#INTERVAL_MAP} and {@link AggregationFunction} enum) + * before use. The {@code item_id} is always a JDBC bind parameter. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public class TimescaleDBDownsampleJob implements Runnable { + + private final Logger logger = LoggerFactory.getLogger(TimescaleDBDownsampleJob.class); + + /** + * INSERT aggregated rows for one item, skipping buckets that already have a downsampled row. + * The NOT EXISTS guard makes this statement idempotent at the query level; the trailing + * ON CONFLICT DO NOTHING (no conflict target) provides a second line of defence for + * concurrent execution. The target-less form is required because TimescaleDB hypertables + * do not support column-inference conflict targets. Because the schema uses + * UNIQUE(time, item_id, downsampled) and we always insert downsampled=TRUE, this never + * silently drops a bucket that merely collides with a raw row at the bucket boundary. + * Placeholder: (1) item_id. + * Interval and agg-fn are pre-validated strings from the allowlist/enum. + */ + private static final String SQL_INSERT_AGGREGATED_TEMPLATE = """ + INSERT INTO items (time, item_id, value, unit, downsampled) + SELECT bucket, item_id, agg_value, agg_unit, TRUE + FROM ( + SELECT + time_bucket('%s', time) AS bucket, + item_id, + %s(value) AS agg_value, + MAX(unit) AS agg_unit + FROM items + WHERE item_id = ? + AND downsampled = FALSE + AND time < NOW() - INTERVAL '%d days' + GROUP BY time_bucket('%s', time), item_id + ) AS new_buckets + WHERE NOT EXISTS ( + SELECT 1 + FROM items existing + WHERE existing.item_id = new_buckets.item_id + AND existing.downsampled = TRUE + AND existing.time = new_buckets.bucket + ) + ON CONFLICT DO NOTHING + """; + + /** + * DELETE raw rows that were just aggregated. + * Placeholder: (1) item_id. + */ + private static final String SQL_DELETE_RAW_TEMPLATE = """ + DELETE FROM items + WHERE item_id = ? + AND downsampled = FALSE + AND time < NOW() - INTERVAL '%d days' + """; + + /** + * DELETE all rows (raw + downsampled) outside the per-item retention window. + * Placeholder: (1) item_id. + */ + private static final String SQL_DELETE_RETENTION_TEMPLATE = """ + DELETE FROM items + WHERE item_id = ? + AND time < NOW() - INTERVAL '%d days' + """; + + private final DataSource dataSource; + private final TimescaleDBMetadataService metadataService; + + /** + * @param dataSource The connection pool. + * @param metadataService The metadata parser. + */ + public TimescaleDBDownsampleJob(DataSource dataSource, TimescaleDBMetadataService metadataService) { + this.dataSource = dataSource; + this.metadataService = metadataService; + } + + @Override + public void run() { + List itemNames = metadataService.getConfiguredItemNames(); + logger.info("Downsampling job started: {} item(s) to process", itemNames.size()); + + int success = 0; + int skipped = 0; + int failed = 0; + + for (String itemName : itemNames) { + DownsampleConfig config; + { + var configOpt = metadataService.getDownsampleConfig(itemName); + if (configOpt.isEmpty()) { + logger.debug("Item '{}': no valid DownsampleConfig — skipping", itemName); + skipped++; + continue; + } + config = configOpt.get(); + } + + try { + downsampleItem(itemName, config); + success++; + } catch (Exception e) { + logger.error("Downsampling failed for item '{}': {}", itemName, e.getMessage(), e); + failed++; + } + } + + logger.info("Downsampling job finished: {} succeeded, {} skipped, {} failed", success, skipped, failed); + } + + private void downsampleItem(String itemName, DownsampleConfig config) throws SQLException { + try (Connection conn = dataSource.getConnection()) { + var itemIdOpt = TimescaleDBQuery.findItemId(conn, itemName); + if (itemIdOpt.isEmpty()) { + logger.debug("Item '{}': not yet known to the database — skipping", itemName); + return; + } + int itemId = itemIdOpt.get(); + conn.setAutoCommit(false); + try { + if (config.hasDownsampling()) { + AggregationFunction fn = config.function(); + String interval = config.sqlInterval(); + if (fn != null && interval != null) { + String sqlInsert = SQL_INSERT_AGGREGATED_TEMPLATE.formatted(interval, fn.toSql(), + config.retainRawDays(), interval); + String sqlDeleteRaw = SQL_DELETE_RAW_TEMPLATE.formatted(config.retainRawDays()); + int inserted = executeUpdate(conn, sqlInsert, itemId); + int deleted = executeUpdate(conn, sqlDeleteRaw, itemId); + logger.debug("Item '{}': aggregated {} bucket(s), deleted {} raw row(s)", itemName, inserted, + deleted); + } + } + + if (config.retentionDays() > 0) { + String sqlRetention = SQL_DELETE_RETENTION_TEMPLATE.formatted(config.retentionDays()); + int dropped = executeUpdate(conn, sqlRetention, itemId); + logger.debug("Item '{}': dropped {} row(s) outside {}d retention window", itemName, dropped, + config.retentionDays()); + } + + conn.commit(); + } catch (SQLException e) { + conn.rollback(); + throw e; + } finally { + conn.setAutoCommit(true); + } + } + } + + private static int executeUpdate(Connection conn, String sql, int itemId) throws SQLException { + try (PreparedStatement ps = conn.prepareStatement(sql)) { + ps.setInt(1, itemId); + return ps.executeUpdate(); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java new file mode 100644 index 0000000000000..4bc90b0020236 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBHistoricItem.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.openhab.core.persistence.HistoricItem; +import org.openhab.core.types.State; + +/** + * A historic item returned by TimescaleDB queries. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public class TimescaleDBHistoricItem implements HistoricItem { + + private final String name; + private final State state; + private final Instant timestamp; + + public TimescaleDBHistoricItem(String name, State state, Instant timestamp) { + this.name = name; + this.state = state; + this.timestamp = timestamp; + } + + @Override + public String getName() { + return name; + } + + @Override + public State getState() { + return state; + } + + @Override + public ZonedDateTime getTimestamp() { + return timestamp.atZone(ZoneId.systemDefault()); + } + + @Override + public String toString() { + return "TimescaleDBHistoricItem{name='" + name + "', state=" + state + ", timestamp=" + timestamp + "}"; + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java new file mode 100644 index 0000000000000..e5b2720f96357 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapper.java @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.Base64; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.items.GroupItem; +import org.openhab.core.items.Item; +import org.openhab.core.library.items.CallItem; +import org.openhab.core.library.items.ColorItem; +import org.openhab.core.library.items.ContactItem; +import org.openhab.core.library.items.DateTimeItem; +import org.openhab.core.library.items.DimmerItem; +import org.openhab.core.library.items.ImageItem; +import org.openhab.core.library.items.LocationItem; +import org.openhab.core.library.items.PlayerItem; +import org.openhab.core.library.items.RollershutterItem; +import org.openhab.core.library.items.SwitchItem; +import org.openhab.core.library.types.DateTimeType; +import org.openhab.core.library.types.DecimalType; +import org.openhab.core.library.types.HSBType; +import org.openhab.core.library.types.OnOffType; +import org.openhab.core.library.types.OpenClosedType; +import org.openhab.core.library.types.PercentType; +import org.openhab.core.library.types.PlayPauseType; +import org.openhab.core.library.types.PointType; +import org.openhab.core.library.types.QuantityType; +import org.openhab.core.library.types.RawType; +import org.openhab.core.library.types.StringListType; +import org.openhab.core.library.types.StringType; +import org.openhab.core.library.types.UpDownType; +import org.openhab.core.types.State; +import org.openhab.core.types.UnDefType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Converts between openHAB {@link State} objects and the three-column schema + * ({@code value DOUBLE PRECISION}, {@code string TEXT}, {@code unit TEXT}). + * + *

+ * Mapping rules (store direction): + *

    + *
  • {@link DecimalType} → value, unit=null, string=null
  • + *
  • {@link QuantityType} → value=numeric, unit=unit-string, string=null
  • + *
  • {@link OnOffType} → value (ON=1.0, OFF=0.0)
  • + *
  • {@link OpenClosedType} → value (OPEN=1.0, CLOSED=0.0)
  • + *
  • {@link PercentType} → value (0.0–100.0)
  • + *
  • {@link UpDownType} → value (UP=0.0, DOWN=1.0)
  • + *
  • {@link HSBType} → string="H,S,B"
  • + *
  • {@link DateTimeType} → string=ISO-8601
  • + *
  • {@link PointType} → string="lat,lon[,alt]"
  • + *
  • {@link PlayPauseType} → string=enum name
  • + *
  • {@link StringListType} → string=comma-separated values
  • + *
  • {@link RawType} → string=Base64-encoded bytes, unit=MIME type
  • + *
  • {@link StringType} → string=raw value
  • + *
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public class TimescaleDBMapper { + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBMapper.class); + + private TimescaleDBMapper() { + // utility class + } + + /** + * SQL row representation of a persisted state. + * + * @param value Numeric value, or null for string-only states. + * @param string String value, or null for numeric states. + * @param unit Unit string for {@link QuantityType}, null otherwise. + */ + public record Row(@Nullable Double value, @Nullable String string, @Nullable String unit) { + } + + /** + * Converts an openHAB {@link State} to a SQL {@link Row}. + * + * @param state The state to convert. + * @return The row to be stored, or null if the state type is unsupported. + */ + public static @Nullable Row toRow(State state) { + if (state instanceof QuantityType qty) { + return new Row(qty.doubleValue(), null, qty.getUnit().toString()); + } else if (state instanceof HSBType hsb) { + // HSBType extends PercentType extends DecimalType — must be checked before both + return new Row(null, hsb.toString(), null); + } else if (state instanceof OnOffType onOff) { + return new Row(onOff == OnOffType.ON ? 1.0 : 0.0, null, null); + } else if (state instanceof OpenClosedType openClosed) { + return new Row(openClosed == OpenClosedType.OPEN ? 1.0 : 0.0, null, null); + } else if (state instanceof PercentType pct) { + return new Row(pct.doubleValue(), null, null); + } else if (state instanceof DecimalType dec) { + return new Row(dec.doubleValue(), null, null); + } else if (state instanceof UpDownType upDown) { + return new Row(upDown == UpDownType.UP ? 0.0 : 1.0, null, null); + } else if (state instanceof DateTimeType dt) { + return new Row(null, dt.getZonedDateTime(ZoneId.systemDefault()).toString(), null); + } else if (state instanceof PointType point) { + return new Row(null, point.toString(), null); + } else if (state instanceof PlayPauseType playPause) { + return new Row(null, playPause.toString(), null); + } else if (state instanceof StringListType stringList) { + return new Row(null, stringList.toString(), null); + } else if (state instanceof RawType raw) { + return new Row(null, Base64.getEncoder().encodeToString(raw.getBytes()), raw.getMimeType()); + } else if (state instanceof StringType str) { + return new Row(null, str.toString(), null); + } else { + LOGGER.warn("Unsupported state type for TimescaleDB persistence: {}", state.getClass().getSimpleName()); + return null; + } + } + + /** + * Reconstructs an openHAB {@link State} from a SQL row, using the item type for disambiguation. + * + * @param item The openHAB item (used for type-based disambiguation of numeric states). + * @param value The {@code value} column, may be null. + * @param string The {@code string} column, may be null. + * @param unit The {@code unit} column, may be null. + * @return The reconstructed state, or {@link UnDefType#UNDEF} if reconstruction fails. + */ + public static State toState(Item item, @Nullable Double value, @Nullable String string, @Nullable String unit) { + // Unwrap GroupItem to its base item for type dispatch + Item realItem = item; + if (item instanceof GroupItem groupItem) { + Item baseItem = groupItem.getBaseItem(); + if (baseItem != null) { + realItem = baseItem; + } + } + + // QuantityType: unit column present together with a numeric value + if (unit != null && value != null) { + try { + return new QuantityType<>(value + " " + unit); + } catch (IllegalArgumentException e) { + LOGGER.warn("Failed to parse QuantityType for item '{}' with value={} unit={}: {}", item.getName(), + value, unit, e.getMessage()); + return UnDefType.UNDEF; + } + } + + // String-based states + if (string != null) { + if (realItem instanceof ColorItem) { + try { + return new HSBType(string); + } catch (IllegalArgumentException e) { + LOGGER.warn("Failed to parse HSBType for item '{}': {}", item.getName(), e.getMessage()); + return UnDefType.UNDEF; + } + } + if (realItem instanceof DateTimeItem) { + try { + return new DateTimeType(ZonedDateTime.parse(string)); + } catch (Exception e) { + LOGGER.warn("Failed to parse DateTimeType for item '{}': {}", item.getName(), e.getMessage()); + return UnDefType.UNDEF; + } + } + if (realItem instanceof LocationItem) { + try { + return new PointType(string); + } catch (IllegalArgumentException e) { + LOGGER.warn("Failed to parse PointType for item '{}': {}", item.getName(), e.getMessage()); + return UnDefType.UNDEF; + } + } + if (realItem instanceof PlayerItem) { + try { + return PlayPauseType.valueOf(string); + } catch (IllegalArgumentException e) { + LOGGER.warn("Failed to parse PlayPauseType for item '{}': {}", item.getName(), e.getMessage()); + return UnDefType.UNDEF; + } + } + if (realItem instanceof CallItem) { + return new StringListType(string); + } + if (realItem instanceof ImageItem) { + try { + byte[] bytes = Base64.getDecoder().decode(string); + String mimeType = unit != null ? unit : "application/octet-stream"; + return new RawType(bytes, mimeType); + } catch (IllegalArgumentException e) { + LOGGER.warn("Failed to decode RawType for item '{}': {}", item.getName(), e.getMessage()); + return UnDefType.UNDEF; + } + } + // StringItem, GenericItem, and anything else with a string value + return new StringType(string); + } + + // Numeric states without unit + if (value != null) { + if (realItem instanceof DimmerItem || realItem instanceof RollershutterItem) { + // DimmerItem extends SwitchItem — must be checked before SwitchItem + return new PercentType((int) Math.round(value)); + } + if (realItem instanceof SwitchItem) { + return value >= 0.5 ? OnOffType.ON : OnOffType.OFF; + } + if (realItem instanceof ContactItem) { + return value >= 0.5 ? OpenClosedType.OPEN : OpenClosedType.CLOSED; + } + // NumberItem, GenericItem, UpDownType stored as decimal, etc. + return new DecimalType(value); + } + + LOGGER.warn("Cannot reconstruct state for item '{}': value=null, string=null, unit=null", item.getName()); + return UnDefType.UNDEF; + } + + /** + * Converts a SQL operator from {@link org.openhab.core.persistence.FilterCriteria.Operator} + * to a JDBC-compatible SQL operator string. + * + * @param operator The filter operator. + * @return The SQL operator string, or null if not supported. + */ + public static @Nullable String toSqlOperator(org.openhab.core.persistence.FilterCriteria.Operator operator) { + return switch (operator) { + case EQ -> "="; + case NEQ -> "<>"; + case LT -> "<"; + case LTE -> "<="; + case GT -> ">"; + case GTE -> ">="; + default -> null; + }; + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataService.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataService.java new file mode 100644 index 0000000000000..2b037a6ad2c9f --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataService.java @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.items.Metadata; +import org.openhab.core.items.MetadataKey; +import org.openhab.core.items.MetadataRegistry; +import org.osgi.service.component.annotations.Activate; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.Reference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Reads and parses per-item downsampling configuration from the {@link MetadataRegistry} + * using the {@code timescaledb} namespace. + * + *

+ * Example item metadata: + * + *

+ * Number:Temperature MySensor {
+ *     timescaledb="AVG" [ downsampleInterval="1h", retainRawDays="5", retentionDays="365" ]
+ * }
+ * 
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +@Component(service = TimescaleDBMetadataService.class) +public class TimescaleDBMetadataService { + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBMetadataService.class); + + /** The metadata namespace used by this persistence service. */ + public static final String METADATA_NAMESPACE = "timescaledb"; + + private static final int DEFAULT_RETAIN_RAW_DAYS = 5; + private static final int DEFAULT_RETENTION_DAYS = 0; + + private final MetadataRegistry metadataRegistry; + + @Activate + public TimescaleDBMetadataService(final @Reference MetadataRegistry metadataRegistry) { + this.metadataRegistry = metadataRegistry; + } + + /** + * Returns the {@link DownsampleConfig} for the given item, or empty if no downsampling + * is configured or the metadata cannot be parsed. + * + * @param itemName The item name. + * @return Optional containing the parsed config, or empty. + */ + public Optional getDownsampleConfig(String itemName) { + MetadataKey key = new MetadataKey(METADATA_NAMESPACE, itemName); + @Nullable + Metadata metadata = metadataRegistry.get(key); + if (metadata == null) { + return Optional.empty(); + } + return parseConfig(itemName, metadata); + } + + /** + * Returns the names of all items that have a {@code timescaledb} metadata entry, + * regardless of whether they configure downsampling, retention-only, or both. + * + * @return List of item names with any timescaledb metadata. + */ + public List getConfiguredItemNames() { + List result = new ArrayList<>(); + for (Metadata metadata : metadataRegistry.getAll()) { + if (!METADATA_NAMESPACE.equals(metadata.getUID().getNamespace())) { + continue; + } + result.add(metadata.getUID().getItemName()); + } + return result; + } + + private Optional parseConfig(String itemName, Metadata metadata) { + String functionStr = metadata.getValue(); + if (functionStr.isBlank()) { + // No aggregation function — check for retention-only config. + // Note: openHAB requires a non-empty metadata value, so use a single space (" ") + // in item files and the UI when you only want retention without downsampling. + int retentionDays = getInt(metadata.getConfiguration(), "retentionDays", DEFAULT_RETENTION_DAYS); + if (retentionDays < 0) { + LOGGER.warn("Item '{}': retentionDays must be >= 0, ignoring negative value {}", itemName, + retentionDays); + return Optional.empty(); + } + if (retentionDays > 0) { + LOGGER.debug("Item '{}': retention-only config with retentionDays={}", itemName, retentionDays); + return Optional.of(DownsampleConfig.retentionOnly(retentionDays)); + } + return Optional.empty(); + } + + AggregationFunction function; + try { + function = AggregationFunction.valueOf(functionStr.trim().toUpperCase()); + } catch (IllegalArgumentException e) { + LOGGER.warn("Item '{}': unknown aggregation function '{}' in timescaledb metadata — skipping", itemName, + functionStr); + return Optional.empty(); + } + + var config = metadata.getConfiguration(); + + String intervalStr = getString(config, "downsampleInterval", null); + if (intervalStr == null || intervalStr.isBlank()) { + LOGGER.warn("Item '{}': timescaledb metadata has function '{}' but no downsampleInterval — skipping", + itemName, functionStr); + return Optional.empty(); + } + + String sqlInterval; + try { + sqlInterval = DownsampleConfig.toSqlInterval(intervalStr); + } catch (IllegalArgumentException e) { + LOGGER.warn("Item '{}': {}", itemName, e.getMessage()); + return Optional.empty(); + } + + int retainRawDays = getInt(config, "retainRawDays", DEFAULT_RETAIN_RAW_DAYS); + if (retainRawDays < 0) { + LOGGER.warn("Item '{}': retainRawDays must be >= 0, using default {}", itemName, DEFAULT_RETAIN_RAW_DAYS); + retainRawDays = DEFAULT_RETAIN_RAW_DAYS; + } + int retentionDays = getInt(config, "retentionDays", DEFAULT_RETENTION_DAYS); + if (retentionDays < 0) { + LOGGER.warn("Item '{}': retentionDays must be >= 0, using default {}", itemName, DEFAULT_RETENTION_DAYS); + retentionDays = DEFAULT_RETENTION_DAYS; + } + + DownsampleConfig result = new DownsampleConfig(function, sqlInterval, retainRawDays, retentionDays); + LOGGER.debug("Item '{}': parsed DownsampleConfig {}", itemName, result); + return Optional.of(result); + } + + private static @Nullable String getString(java.util.Map config, String key, + @Nullable String defaultValue) { + Object val = config.get(key); + return val != null ? val.toString() : defaultValue; + } + + private static int getInt(java.util.Map config, String key, int defaultValue) { + Object val = config.get(key); + if (val == null) { + return defaultValue; + } + try { + return Integer.parseInt(val.toString()); + } catch (NumberFormatException e) { + LOGGER.warn("Invalid integer value '{}' for metadata key '{}', using default {}", val, key, defaultValue); + return defaultValue; + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java new file mode 100644 index 0000000000000..a0ff5443f5948 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceService.java @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.sql.Connection; +import java.sql.SQLException; +import java.time.ZonedDateTime; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.common.ThreadPoolManager; +import org.openhab.core.config.core.ConfigurableService; +import org.openhab.core.items.Item; +import org.openhab.core.items.ItemNotFoundException; +import org.openhab.core.items.ItemRegistry; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.persistence.HistoricItem; +import org.openhab.core.persistence.ModifiablePersistenceService; +import org.openhab.core.persistence.PersistenceService; +import org.openhab.core.persistence.QueryablePersistenceService; +import org.openhab.core.persistence.strategy.PersistenceStrategy; +import org.openhab.core.types.State; +import org.openhab.core.types.UnDefType; +import org.osgi.framework.Constants; +import org.osgi.service.component.annotations.Activate; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.ConfigurationPolicy; +import org.osgi.service.component.annotations.Deactivate; +import org.osgi.service.component.annotations.Reference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +/** + * TimescaleDB persistence service for openHAB. + * + *

+ * Implements {@link ModifiablePersistenceService} to support store, query, and remove operations + * against a TimescaleDB (PostgreSQL extension) hypertable. + * + *

+ * Item names are cached in-memory ({@code name → item_id}) to avoid a SELECT on every + * {@link #store} call. The cache is populated lazily on first store per item. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +@Component(service = { PersistenceService.class, QueryablePersistenceService.class, ModifiablePersistenceService.class, + TimescaleDBPersistenceService.class }, configurationPid = "org.openhab.timescaledb", configurationPolicy = ConfigurationPolicy.REQUIRE, property = Constants.SERVICE_PID + + "=org.openhab.timescaledb") +@ConfigurableService(category = "persistence", label = "TimescaleDB Persistence Service", description_uri = TimescaleDBPersistenceService.CONFIG_URI) +public class TimescaleDBPersistenceService implements ModifiablePersistenceService { + + static final String CONFIG_URI = "persistence:timescaledb"; + static final String CONFIGURATION_PID = "org.openhab.timescaledb"; + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBPersistenceService.class); + private static final String THREAD_POOL_NAME = "timescaledb"; + + private static final String SERVICE_ID = "timescaledb"; + private static final String SERVICE_LABEL = "TimescaleDB"; + + // item name → item_id, populated lazily + private final Map itemIdCache = new ConcurrentHashMap<>(); + + private final ItemRegistry itemRegistry; + private final TimescaleDBMetadataService metadataService; + + private @Nullable HikariDataSource dataSource; + private @Nullable ScheduledFuture downsampleJob; + private @Nullable TimescaleDBDownsampleJob downsampleJobInstance; + + @Activate + public TimescaleDBPersistenceService(final @Reference ItemRegistry itemRegistry, + final @Reference TimescaleDBMetadataService metadataService) { + this.itemRegistry = itemRegistry; + this.metadataService = metadataService; + } + + /** Package-private constructor for unit tests — skips OSGi activation, allows injecting a DataSource. */ + TimescaleDBPersistenceService(ItemRegistry itemRegistry, TimescaleDBMetadataService metadataService, + @Nullable HikariDataSource dataSource) { + this.itemRegistry = itemRegistry; + this.metadataService = metadataService; + this.dataSource = dataSource; + } + + @Activate + public void activate(final Map config) { + String url = (String) config.getOrDefault("url", ""); + if (url.isBlank()) { + LOGGER.warn("TimescaleDB persistence not configured: missing 'url'. " + + "Configure org.openhab.timescaledb:url."); + return; + } + + String user = (String) config.getOrDefault("user", "openhab"); + String password = (String) config.getOrDefault("password", ""); + int maxConnections = parseIntConfig(config, "maxConnections", 5); + int connectTimeout = parseIntConfig(config, "connectTimeout", 5000); + String chunkInterval = (String) config.getOrDefault("chunkInterval", "7 days"); + int retentionDays = parseIntConfig(config, "retentionDays", 0); + int compressionAfterDays = parseIntConfig(config, "compressionAfterDays", 0); + + LOGGER.debug( + "Activating TimescaleDB persistence: url={}, user={}, maxConnections={}, " + + "chunkInterval={}, retentionDays={}, compressionAfterDays={}", + url, user, maxConnections, chunkInterval, retentionDays, compressionAfterDays); + + HikariDataSource ds; + try { + ds = createDataSource(url, user, password, maxConnections, connectTimeout); + } catch (Exception e) { + LOGGER.error("Failed to create TimescaleDB connection pool: {}", e.getMessage(), e); + return; + } + dataSource = ds; + + try (Connection conn = ds.getConnection()) { + TimescaleDBSchema.initialize(conn, chunkInterval, compressionAfterDays, retentionDays); + } catch (SQLException e) { + LOGGER.error("Failed to initialize TimescaleDB schema: {}", e.getMessage(), e); + ds.close(); + dataSource = null; + return; + } + + if (compressionAfterDays > 0) { + LOGGER.warn("TimescaleDB: compressionAfterDays={} is set. Ensure all per-item retainRawDays " + + "are less than compressionAfterDays, otherwise downsampling will attempt to write into " + + "already-compressed (read-only) chunks and cause SQLExceptions.", compressionAfterDays); + } + + // Schedule the daily downsampling job via the openHAB shared thread pool + TimescaleDBDownsampleJob job = new TimescaleDBDownsampleJob(ds, metadataService); + downsampleJobInstance = job; + long initialDelay = secondsUntilMidnight(); + downsampleJob = ThreadPoolManager.getScheduledPool(THREAD_POOL_NAME).scheduleWithFixedDelay(job, initialDelay, + TimeUnit.DAYS.toSeconds(1), TimeUnit.SECONDS); + LOGGER.info("Downsampling job scheduled: first run in {}s, then every 24h", initialDelay); + + LOGGER.info("TimescaleDB persistence service activated"); + } + + /** + * Triggers the downsampling job immediately in the calling thread. + * Intended for use by the Karaf console command for on-demand testing. + * + * @return {@code true} if the job ran, {@code false} if the service is not yet activated. + */ + public boolean runDownsampleNow() { + TimescaleDBDownsampleJob job = downsampleJobInstance; + if (job == null) { + return false; + } + job.run(); + return true; + } + + @Deactivate + public void deactivate() { + LOGGER.debug("Deactivating TimescaleDB persistence service"); + itemIdCache.clear(); + + ScheduledFuture job = downsampleJob; + if (job != null) { + job.cancel(false); + downsampleJob = null; + } + downsampleJobInstance = null; + + HikariDataSource ds = dataSource; + if (ds != null) { + ds.close(); + dataSource = null; + } + LOGGER.info("TimescaleDB persistence service deactivated"); + } + + // ------------------------------------------------------------------------- + // PersistenceService + // ------------------------------------------------------------------------- + + @Override + public String getId() { + return SERVICE_ID; + } + + @Override + public String getLabel(@Nullable Locale locale) { + return SERVICE_LABEL; + } + + @Override + public List getSuggestedStrategies() { + return Collections.emptyList(); + } + + @Override + public void store(Item item) { + store(item, null); + } + + @Override + public void store(Item item, @Nullable String alias) { + store(item, ZonedDateTime.now(), item.getState(), alias); + } + + // ------------------------------------------------------------------------- + // ModifiablePersistenceService (includes QueryablePersistenceService) + // ------------------------------------------------------------------------- + + @Override + public void store(Item item, ZonedDateTime date, State state) { + store(item, date, state, null); + } + + @Override + public void store(Item item, ZonedDateTime date, State state, @Nullable String alias) { + if (state instanceof UnDefType) { + LOGGER.trace("Skipping store for item '{}': state is UnDefType", item.getName()); + return; + } + + TimescaleDBMapper.Row row = TimescaleDBMapper.toRow(state); + if (row == null) { + // Warning is already logged by the mapper + return; + } + + String name = alias != null ? alias : item.getName(); + @Nullable + String label = item.getLabel(); + + HikariDataSource ds = dataSource; + if (ds == null) { + LOGGER.warn("TimescaleDB data source not available — cannot store item '{}'", name); + return; + } + + try (Connection conn = ds.getConnection()) { + int itemId = getOrCreateItemId(conn, name, label); + TimescaleDBQuery.insert(conn, itemId, date, row); + } catch (SQLException e) { + LOGGER.error("Failed to store item '{}': {}", name, e.getMessage(), e); + } + } + + @Override + public Iterable query(FilterCriteria filter) { + return query(filter, null); + } + + @Override + public Iterable query(FilterCriteria filter, @Nullable String alias) { + String itemName = filter.getItemName(); + if (itemName == null) { + LOGGER.warn("FilterCriteria has no item name — returning empty query result"); + return Collections.emptyList(); + } + + String queryName = alias != null ? alias : itemName; + + @Nullable + Integer itemId = itemIdCache.get(queryName); + if (itemId == null) { + HikariDataSource dsFallback = dataSource; + if (dsFallback == null) { + LOGGER.warn( + "TimescaleDB data source not available while resolving item_id for '{}' — returning empty query result", + queryName); + return Collections.emptyList(); + } + try (Connection connFallback = dsFallback.getConnection()) { + Optional resolved = TimescaleDBQuery.findItemId(connFallback, queryName); + if (resolved.isEmpty()) { + LOGGER.debug("Item '{}' not present in TimescaleDB — returning empty query result", queryName); + return Collections.emptyList(); + } + itemId = resolved.get(); + itemIdCache.put(queryName, itemId); + } catch (SQLException e) { + LOGGER.error("Failed to resolve item_id for item '{}': {}", queryName, e.getMessage(), e); + return Collections.emptyList(); + } + } + + Item item; + try { + item = itemRegistry.getItem(itemName); + } catch (ItemNotFoundException e) { + LOGGER.warn("Item '{}' not found in ItemRegistry — returning empty query result", itemName); + return Collections.emptyList(); + } + + HikariDataSource ds = dataSource; + if (ds == null) { + LOGGER.warn("TimescaleDB data source not available — returning empty query result"); + return Collections.emptyList(); + } + + try (Connection conn = ds.getConnection()) { + return TimescaleDBQuery.query(conn, item, itemId, filter); + } catch (SQLException e) { + LOGGER.error("Query failed for item '{}': {}", queryName, e.getMessage(), e); + return Collections.emptyList(); + } + } + + @Override + public boolean remove(FilterCriteria filter) { + String itemName = filter.getItemName(); + if (itemName == null) { + LOGGER.warn("FilterCriteria has no item name — cannot remove data"); + return false; + } + + @Nullable + Integer itemId = itemIdCache.get(itemName); + if (itemId == null) { + HikariDataSource dsFallback = dataSource; + if (dsFallback == null) { + LOGGER.warn( + "TimescaleDB data source not available while resolving item_id for '{}' — cannot remove data", + itemName); + return false; + } + try (Connection connFallback = dsFallback.getConnection()) { + Optional resolved = TimescaleDBQuery.findItemId(connFallback, itemName); + if (resolved.isEmpty()) { + LOGGER.debug("Item '{}' not present in TimescaleDB — nothing to remove", itemName); + return false; + } + itemId = resolved.get(); + itemIdCache.put(itemName, itemId); + } catch (SQLException e) { + LOGGER.error("Failed to resolve item_id for item '{}': {}", itemName, e.getMessage(), e); + return false; + } + } + + HikariDataSource ds = dataSource; + if (ds == null) { + LOGGER.warn("TimescaleDB data source not available — cannot remove data for item '{}'", itemName); + return false; + } + + try (Connection conn = ds.getConnection()) { + int deleted = TimescaleDBQuery.remove(conn, itemId, filter); + LOGGER.debug("Removed {} row(s) for item '{}'", deleted, itemName); + return true; + } catch (SQLException e) { + LOGGER.error("Failed to remove data for item '{}': {}", itemName, e.getMessage(), e); + return false; + } + } + + // ------------------------------------------------------------------------- + // Internal helpers + // ------------------------------------------------------------------------- + + private int getOrCreateItemId(Connection conn, String name, @Nullable String label) throws SQLException { + Integer cached = itemIdCache.get(name); + if (cached != null) { + return cached; + } + int id = TimescaleDBQuery.getOrCreateItemId(conn, name, label); + itemIdCache.put(name, id); + return id; + } + + private static HikariDataSource createDataSource(String url, String user, String password, int maxConnections, + int connectTimeoutMs) { + HikariConfig cfg = new HikariConfig(); + // Explicitly set the driver class name so HikariCP uses Class.forName() in the + // bundle classloader instead of DriverManager.getDriver(). In an OSGi runtime + // DriverManager lives in the boot classloader and cannot see the PostgreSQL driver + // that is embedded in this bundle, which causes "Failed to get driver instance". + cfg.setDriverClassName("org.postgresql.Driver"); + cfg.setJdbcUrl(url); + cfg.setUsername(user); + cfg.setPassword(password); + cfg.setMaximumPoolSize(maxConnections); + cfg.setConnectionTimeout(connectTimeoutMs); + cfg.setPoolName("timescaledb-persistence"); + return new HikariDataSource(cfg); + } + + static int parseIntConfig(Map config, String key, int defaultValue) { + Object val = config.get(key); + if (val == null) { + return defaultValue; + } + try { + return Integer.parseInt(val.toString()); + } catch (NumberFormatException e) { + LOGGER.warn("Invalid integer value '{}' for config key '{}', using default {}", val, key, defaultValue); + return defaultValue; + } + } + + static long secondsUntilMidnight() { + ZonedDateTime now = ZonedDateTime.now(); + ZonedDateTime midnight = now.toLocalDate().plusDays(1).atStartOfDay(now.getZone()); + return java.time.Duration.between(now, midnight).getSeconds(); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java new file mode 100644 index 0000000000000..6b046bc09e2db --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQuery.java @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.items.Item; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.persistence.FilterCriteria.Ordering; +import org.openhab.core.persistence.HistoricItem; +import org.openhab.core.types.State; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * SQL query builder and executor for all TimescaleDB persistence operations. + * + *

+ * All user-controlled values (item name, timestamps, state values) are passed as + * JDBC {@link PreparedStatement} parameters to prevent SQL injection. + * The only dynamically formatted strings are validated enum/allowlist values + * (SQL operator, ORDER BY direction). + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public class TimescaleDBQuery { + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBQuery.class); + + // --- INSERT --- + // ON CONFLICT DO NOTHING (no conflict target) silently discards duplicate writes. + // TimescaleDB hypertables do not support column-inference conflict targets, so the target-less + // form must be used. The UNIQUE(time, item_id, downsampled) constraint on the table ensures + // that a raw row (downsampled=FALSE) and a downsampled row (downsampled=TRUE) at the same + // timestamp can coexist and only true duplicates are dropped. + private static final String SQL_INSERT = "INSERT INTO items (time, item_id, value, string, unit) VALUES (?, ?, ?, ?, ?) ON CONFLICT DO NOTHING"; + + // --- item_meta lookup / insert --- + private static final String SQL_SELECT_ITEM_ID = "SELECT id FROM item_meta WHERE name = ?"; + + private static final String SQL_INSERT_ITEM_META = "INSERT INTO item_meta (name, label) VALUES (?, ?) ON CONFLICT (name) DO UPDATE SET label = EXCLUDED.label RETURNING id"; + + // --- SELECT base --- + private static final String SQL_SELECT_BASE = "SELECT time, value, string, unit FROM items WHERE item_id = ?"; + + // --- DELETE --- + private static final String SQL_DELETE_BASE = "DELETE FROM items WHERE item_id = ?"; + + private TimescaleDBQuery() { + // utility class + } + + /** + * Inserts a single item state row. + * + * @param connection The JDBC connection. + * @param itemId The item_id from {@code item_meta}. + * @param timestamp The measurement timestamp. + * @param row The mapped state row. + * @throws SQLException on any database error. + */ + public static void insert(Connection connection, int itemId, ZonedDateTime timestamp, TimescaleDBMapper.Row row) + throws SQLException { + try (PreparedStatement ps = connection.prepareStatement(SQL_INSERT)) { + ps.setTimestamp(1, Timestamp.from(timestamp.toInstant())); + ps.setInt(2, itemId); + Double value = row.value(); + if (value != null) { + ps.setDouble(3, value); + } else { + ps.setNull(3, Types.DOUBLE); + } + ps.setString(4, row.string()); + ps.setString(5, row.unit()); + ps.executeUpdate(); + } + LOGGER.debug("Stored item_id={} at {} value={} string={} unit={}", itemId, timestamp, row.value(), row.string(), + row.unit()); + } + + /** + * Returns the item_id for the given name, or inserts a new {@code item_meta} row and returns its id. + * + * @param connection The JDBC connection. + * @param name The item name. + * @param label The item label (may be null; stored for informational purposes). + * @return The item_id. + * @throws SQLException on any database error. + */ + public static int getOrCreateItemId(Connection connection, String name, @Nullable String label) + throws SQLException { + // Try SELECT first (fast path for known items) + try (PreparedStatement ps = connection.prepareStatement(SQL_SELECT_ITEM_ID)) { + ps.setString(1, name); + try (ResultSet rs = ps.executeQuery()) { + if (rs.next()) { + return rs.getInt(1); + } + } + } + // Not found: INSERT with ON CONFLICT DO UPDATE so concurrent calls are safe + try (PreparedStatement ps = connection.prepareStatement(SQL_INSERT_ITEM_META)) { + ps.setString(1, name); + ps.setString(2, label); + try (ResultSet rs = ps.executeQuery()) { + if (rs.next()) { + int id = rs.getInt(1); + LOGGER.debug("Registered new item '{}' with item_id={}", name, id); + return id; + } + } + } + throw new SQLException("Failed to get or create item_meta entry for item '" + name + "'"); + } + + /** + * Queries historic items according to the given filter criteria. + * + * @param connection The JDBC connection. + * @param item The openHAB item (used for state reconstruction). + * @param itemId The item_id from {@code item_meta}. + * @param filter The filter criteria. + * @return An ordered list of matching {@link HistoricItem}s. + * @throws SQLException on any database error. + */ + public static List query(Connection connection, Item item, int itemId, FilterCriteria filter) + throws SQLException { + StringBuilder sql = new StringBuilder(SQL_SELECT_BASE); + List params = new ArrayList<>(); + params.add(itemId); + + // Date range filters + ZonedDateTime beginDate = filter.getBeginDate(); + if (beginDate != null) { + sql.append(" AND time >= ?"); + params.add(Timestamp.from(beginDate.toInstant())); + } + ZonedDateTime endDate = filter.getEndDate(); + if (endDate != null) { + sql.append(" AND time <= ?"); + params.add(Timestamp.from(endDate.toInstant())); + } + + // Optional state filter on the value column (numeric states only) + State filterState = filter.getState(); + if (filterState != null) { + TimescaleDBMapper.Row filterRow = TimescaleDBMapper.toRow(filterState); + String sqlOp = TimescaleDBMapper.toSqlOperator(filter.getOperator()); + Double filterValue = filterRow != null ? filterRow.value() : null; + if (filterValue != null && sqlOp != null) { + sql.append(" AND value ").append(sqlOp).append(" ?"); + params.add(filterValue); + } else { + LOGGER.debug("State filter on non-numeric or unsupported state/operator — ignoring"); + } + } + + // ORDER BY + String direction = filter.getOrdering() == Ordering.ASCENDING ? "ASC" : "DESC"; + sql.append(" ORDER BY time ").append(direction); + + // Pagination + if (filter.getPageSize() > 0) { + sql.append(" LIMIT ?"); + params.add(filter.getPageSize()); + if (filter.getPageNumber() > 0) { + sql.append(" OFFSET ?"); + params.add((long) filter.getPageNumber() * filter.getPageSize()); + } + } + + LOGGER.debug("Query SQL: {} params={}", sql, params); + + List results = new ArrayList<>(); + try (PreparedStatement ps = connection.prepareStatement(sql.toString())) { + for (int i = 0; i < params.size(); i++) { + ps.setObject(i + 1, params.get(i)); + } + try (ResultSet rs = ps.executeQuery()) { + while (rs.next()) { + Timestamp time = rs.getTimestamp(1); + Double value = (Double) rs.getObject(2); + String string = rs.getString(3); + String unit = rs.getString(4); + + State state = TimescaleDBMapper.toState(item, value, string, unit); + results.add(new TimescaleDBHistoricItem(item.getName(), state, time.toInstant())); + } + } + } + LOGGER.debug("Query returned {} items for item_id={}", results.size(), itemId); + return results; + } + + /** + * Deletes rows matching the filter criteria. + * + * @param connection The JDBC connection. + * @param itemId The item_id from {@code item_meta}. + * @param filter The filter criteria (only date range is evaluated). + * @return The number of deleted rows. + * @throws SQLException on any database error. + */ + public static int remove(Connection connection, int itemId, FilterCriteria filter) throws SQLException { + StringBuilder sql = new StringBuilder(SQL_DELETE_BASE); + List params = new ArrayList<>(); + params.add(itemId); + + ZonedDateTime beginDate = filter.getBeginDate(); + if (beginDate != null) { + sql.append(" AND time >= ?"); + params.add(Timestamp.from(beginDate.toInstant())); + } + ZonedDateTime endDate = filter.getEndDate(); + if (endDate != null) { + sql.append(" AND time <= ?"); + params.add(Timestamp.from(endDate.toInstant())); + } + + LOGGER.debug("Remove SQL: {} params={}", sql, params); + + try (PreparedStatement ps = connection.prepareStatement(sql.toString())) { + for (int i = 0; i < params.size(); i++) { + ps.setObject(i + 1, params.get(i)); + } + int deleted = ps.executeUpdate(); + LOGGER.debug("Deleted {} rows for item_id={}", deleted, itemId); + return deleted; + } + } + + /** + * Looks up the item_id for the given name from {@code item_meta}, without creating a new entry. + * + *

+ * Used by the downsampling job to resolve item names that exist in the DB but are not yet in + * the in-memory cache (e.g. after a bundle restart where no {@code store()} call has been made). + * + * @param connection The JDBC connection. + * @param name The item name. + * @return An {@link java.util.Optional} containing the item_id, or empty if not found. + * @throws SQLException on any database error. + */ + public static java.util.Optional findItemId(Connection connection, String name) throws SQLException { + try (PreparedStatement ps = connection.prepareStatement(SQL_SELECT_ITEM_ID)) { + ps.setString(1, name); + try (ResultSet rs = ps.executeQuery()) { + if (rs.next()) { + return java.util.Optional.of(rs.getInt(1)); + } + } + } + return java.util.Optional.empty(); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchema.java b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchema.java new file mode 100644 index 0000000000000..9e2e228c5777b --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchema.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Creates and migrates the TimescaleDB schema on startup. + * + *

+ * Schema overview: + *

    + *
  • {@code item_meta} — name-to-ID lookup table for items
  • + *
  • {@code items} — single hypertable for all item states
  • + *
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault +public class TimescaleDBSchema { + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBSchema.class); + + private static final String SQL_CREATE_ITEM_META = """ + CREATE TABLE IF NOT EXISTS item_meta ( + id SERIAL PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + label TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ) + """; + + private static final String SQL_CREATE_ITEMS = """ + CREATE TABLE IF NOT EXISTS items ( + time TIMESTAMPTZ NOT NULL, + item_id INTEGER NOT NULL REFERENCES item_meta(id), + value DOUBLE PRECISION, + string TEXT, + unit TEXT, + downsampled BOOLEAN NOT NULL DEFAULT FALSE, + CONSTRAINT items_time_item_id_downsampled_ukey UNIQUE (time, item_id, downsampled) + ) + """; + + /** + * Migration: replaces any legacy UNIQUE(time, item_id) constraint with the correct + * UNIQUE(time, item_id, downsampled) constraint so that a raw row and a downsampled row + * at the same timestamp can coexist. For fresh installs the correct constraint is already + * part of CREATE TABLE above, so the DO block is a no-op. + */ + private static final String SQL_MIGRATE_ADD_UNIQUE_CONSTRAINT = """ + DO $$ BEGIN + IF EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conname = 'items_time_item_id_ukey' + ) THEN + ALTER TABLE items DROP CONSTRAINT items_time_item_id_ukey; + END IF; + IF NOT EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conname = 'items_time_item_id_downsampled_ukey' + ) THEN + ALTER TABLE items ADD CONSTRAINT items_time_item_id_downsampled_ukey UNIQUE (time, item_id, downsampled); + END IF; + END $$ + """; + + private static final String SQL_CREATE_HYPERTABLE = "SELECT create_hypertable('items', 'time', if_not_exists => TRUE, chunk_time_interval => ?::INTERVAL)"; + + private static final String SQL_CREATE_INDEX = "CREATE INDEX IF NOT EXISTS items_item_id_time_idx ON items (item_id, time DESC)"; + + private static final String SQL_CHECK_TIMESCALEDB = "SELECT extname FROM pg_extension WHERE extname = 'timescaledb'"; + + private static final String SQL_ENABLE_COMPRESSION = """ + ALTER TABLE items SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'item_id', + timescaledb.compress_orderby = 'time DESC' + ) + """; + + private static final String SQL_ADD_COMPRESSION_POLICY = "SELECT add_compression_policy('items', INTERVAL '%d days', if_not_exists => TRUE)"; + + private static final String SQL_ADD_RETENTION_POLICY = "SELECT add_retention_policy('items', INTERVAL '%d days', if_not_exists => TRUE)"; + + private TimescaleDBSchema() { + // utility class + } + + /** + * Initializes the full schema. Throws {@link SQLException} if the TimescaleDB extension + * is not installed or if any DDL statement fails. + * + * @param connection An open JDBC connection. + * @param chunkInterval Chunk interval for the hypertable, e.g. {@code "7 days"}. + * @param compressionAfterDays Compress chunks older than N days. 0 = disabled. + * @param retentionDays Drop data older than N days via retention policy. 0 = disabled. + * @throws SQLException on any database error, including missing TimescaleDB extension. + */ + public static void initialize(Connection connection, String chunkInterval, int compressionAfterDays, + int retentionDays) throws SQLException { + checkTimescaleDBExtension(connection); + createTables(connection, chunkInterval); + if (compressionAfterDays > 0) { + setupCompression(connection, compressionAfterDays); + } + if (retentionDays > 0) { + setupRetentionPolicy(connection, retentionDays); + } + LOGGER.info("TimescaleDB schema initialized (chunkInterval={}, compression={}d, retention={}d)", chunkInterval, + compressionAfterDays, retentionDays); + } + + private static void checkTimescaleDBExtension(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(SQL_CHECK_TIMESCALEDB)) { + if (!rs.next()) { + throw new SQLException("TimescaleDB extension is not installed in the target database. " + + "Run 'CREATE EXTENSION IF NOT EXISTS timescaledb;' first."); + } + LOGGER.debug("TimescaleDB extension found"); + } + } + + private static void createTables(Connection connection, String chunkInterval) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.execute(SQL_CREATE_ITEM_META); + LOGGER.debug("Table item_meta ready"); + + stmt.execute(SQL_CREATE_ITEMS); + LOGGER.debug("Table items ready"); + } + + try (PreparedStatement ps = connection.prepareStatement(SQL_CREATE_HYPERTABLE)) { + ps.setString(1, chunkInterval); + ps.execute(); + LOGGER.debug("Hypertable configured with chunk interval '{}'", chunkInterval); + } + + try (Statement stmt = connection.createStatement()) { + stmt.execute(SQL_CREATE_INDEX); + LOGGER.debug("Index on (item_id, time DESC) ready"); + + stmt.execute(SQL_MIGRATE_ADD_UNIQUE_CONSTRAINT); + LOGGER.debug("UNIQUE(time, item_id, downsampled) constraint ensured"); + } + } + + private static void setupCompression(Connection connection, int compressionAfterDays) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.execute(SQL_ENABLE_COMPRESSION); + stmt.execute(SQL_ADD_COMPRESSION_POLICY.formatted(compressionAfterDays)); + LOGGER.info("Compression policy set: compress after {} days", compressionAfterDays); + } + } + + private static void setupRetentionPolicy(Connection connection, int retentionDays) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.execute(SQL_ADD_RETENTION_POLICY.formatted(retentionDays)); + LOGGER.info("Retention policy set: drop data older than {} days", retentionDays); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/addon/addon.xml b/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/addon/addon.xml new file mode 100644 index 0000000000000..e474782f16464 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/addon/addon.xml @@ -0,0 +1,13 @@ + + + + persistence + TimescaleDB Persistence + This is the persistence add-on for TimescaleDB. + local + + + + diff --git a/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/config/timescaledb.xml b/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/config/timescaledb.xml new file mode 100644 index 0000000000000..792fe97cf6888 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/main/resources/OH-INF/config/timescaledb.xml @@ -0,0 +1,68 @@ + + + + + + + false + + + + + true + + + + url + + JDBC connection URL, e.g. jdbc:postgresql://localhost:5432/openhab + + + + + Database username + openhab + + + + password + + Database password + + + + + TimescaleDB hypertable chunk interval (e.g. "7 days", "1 day") + 7 days + + + + + Drop all data older than N days. 0 = disabled. + 0 + + + + + Compress chunks older than N days. 0 = disabled. + 0 + + + + + Maximum number of database connections in the pool + 5 + + + + + Connection timeout in milliseconds + 5000 + + + + diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/BundleManifestTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/BundleManifestTest.java new file mode 100644 index 0000000000000..c13298d6309c0 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/BundleManifestTest.java @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assumptions.*; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.jar.Manifest; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.Test; + +/** + * Verifies structural requirements of the OSGi bundle to ensure it deploys and integrates + * correctly into the openHAB runtime and UI. + * + *
    + *
  • Checks that all {@code Import-Package} entries are available in the openHAB OSGi + * runtime (prevents "Unresolved requirement" errors on startup).
  • + *
  • Checks that {@code OH-INF/addon/addon.xml} exists (required for the addon to appear + * in the openHAB UI and settings).
  • + *
+ * + * @author Contributors to the openHAB project - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +class BundleManifestTest { + + /** + * Package prefixes known to be available in the openHAB OSGi runtime. + * Each entry must end with '.' so that e.g. "org.slf4j." matches both + * the exact package "org.slf4j" and any sub-package "org.slf4j.Logger". + */ + private static final Set KNOWN_OSGI_PACKAGES = Set.of("java.", // Java SE + "javax.", // Java SE extensions + "org.osgi.", // OSGi framework + "org.slf4j.", // SLF4J (embedded in openHAB core) + "org.openhab.", // openHAB core + "org.w3c.dom.", // Java XML DOM + "org.xml.sax.", // Java XML SAX + "org.ietf.jgss.", // Java GSSAPI + "io.micrometer." // Micrometer metrics (embedded in openHAB core) + ); + + @Test + void bundleManifestImportsOnlyPackagesAvailableInOpenHABRuntime() throws IOException { + Path manifestPath = Path.of("target/classes/META-INF/MANIFEST.MF"); + assumeTrue(Files.exists(manifestPath), "MANIFEST.MF not yet generated — run 'mvn compile' first"); + + String importPackageHeader; + try (InputStream in = Files.newInputStream(manifestPath)) { + Manifest manifest = new Manifest(in); + importPackageHeader = manifest.getMainAttributes().getValue("Import-Package"); + } + assertNotNull(importPackageHeader, "Import-Package header must be present in MANIFEST.MF"); + + // Append '.' so that e.g. "org.slf4j" matches prefix "org.slf4j." + // and "org.slf4j.Logger" also matches "org.slf4j.". + List unknownPackages = parsePackageNames(importPackageHeader).stream() + .filter(pkg -> KNOWN_OSGI_PACKAGES.stream().noneMatch(known -> (pkg + ".").startsWith(known))).toList(); + + assertTrue(unknownPackages.isEmpty(), () -> """ + Bundle imports packages not available in the openHAB OSGi runtime: %s + + These are likely optional features of embedded libraries (e.g. GSSAPI auth, \ + bytecode weaving). + Fix: add '!the.package.*' to in pom.xml for each package. + Example: add ',!%s.*' to the existing bnd.importpackage property. + """.formatted(unknownPackages, unknownPackages.get(0))); + } + + @Test + void servicePidFollowsOpenHABConvention() { + assertTrue(TimescaleDBPersistenceService.CONFIGURATION_PID.matches("org\\.openhab\\.[a-z]+"), + "CONFIGURATION_PID must be 'org.openhab.' (no extra segments) so that " + + "timescaledb.cfg is resolved correctly by the openHAB ConfigDispatcher. Got: " + + TimescaleDBPersistenceService.CONFIGURATION_PID); + } + + @Test + void addonXmlExists() { + assertTrue(Files.exists(Path.of("src/main/resources/OH-INF/addon/addon.xml")), + "OH-INF/addon/addon.xml is missing — the addon will not appear in the openHAB UI. " + + "Create src/main/resources/OH-INF/addon/addon.xml."); + } + + /** + * Parses the OSGi {@code Import-Package} header value into a list of package names, + * stripping directives and attributes (e.g. {@code version="[1.0,2)"}). + * + *

+ * The header is comma-separated, but commas also appear inside quoted version ranges + * like {@code version="[1.16,2)"}. This parser handles quoted strings correctly. + */ + static List parsePackageNames(String importPackageHeader) { + List packages = new ArrayList<>(); + int start = 0; + boolean inQuotes = false; + for (int i = 0; i < importPackageHeader.length(); i++) { + char c = importPackageHeader.charAt(i); + if (c == '"') { + inQuotes = !inQuotes; + } else if (c == ',' && !inQuotes) { + addPackageName(packages, importPackageHeader.substring(start, i)); + start = i + 1; + } + } + addPackageName(packages, importPackageHeader.substring(start)); + return packages; + } + + private static void addPackageName(List result, String clause) { + String trimmed = clause.trim(); + if (trimmed.isEmpty()) { + return; + } + // A clause is "package.name;attr=val;directive:=val" — take only the package name part + int semicolon = trimmed.indexOf(';'); + result.add(semicolon >= 0 ? trimmed.substring(0, semicolon).trim() : trimmed); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/DownsampleConfigTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/DownsampleConfigTest.java new file mode 100644 index 0000000000000..bd55cccacb906 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/DownsampleConfigTest.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.Test; + +/** + * Unit tests for {@link DownsampleConfig}. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +class DownsampleConfigTest { + + // --- retentionOnly() guard (Fix 6) --- + + @Test + void retentionOnlyWithPositiveDaysSucceeds() { + DownsampleConfig cfg = DownsampleConfig.retentionOnly(30); + assertFalse(cfg.hasDownsampling()); + assertNull(cfg.function()); + assertNull(cfg.sqlInterval()); + assertEquals(30, cfg.retentionDays()); + assertEquals(0, cfg.retainRawDays()); + } + + @Test + void retentionOnlyWithZeroDaysThrows() { + assertThrows(IllegalArgumentException.class, () -> DownsampleConfig.retentionOnly(0), + "retentionDays=0 must throw — a zero-day retention policy would delete everything immediately"); + } + + @Test + void retentionOnlyWithNegativeDaysThrows() { + assertThrows(IllegalArgumentException.class, () -> DownsampleConfig.retentionOnly(-1), + "Negative retentionDays must throw — would generate dangerous SQL window"); + } + + @Test + void retentionOnlyExceptionMessageMentionsValue() { + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, + () -> DownsampleConfig.retentionOnly(-5)); + String msg = ex.getMessage(); + assertNotNull(msg, "Exception must have a message"); + assertTrue(msg.contains("-5"), "Exception message should include the rejected value"); + } + + // --- toSqlInterval() allowlist --- + + @Test + void toSqlIntervalValidIntervalReturnsLiteral() { + assertEquals("1 hour", DownsampleConfig.toSqlInterval("1h")); + assertEquals("1 day", DownsampleConfig.toSqlInterval("1d")); + assertEquals("15 minutes", DownsampleConfig.toSqlInterval("15m")); + } + + @Test + void toSqlIntervalUnknownIntervalThrows() { + assertThrows(IllegalArgumentException.class, () -> DownsampleConfig.toSqlInterval("99x")); + } + + // --- hasDownsampling() --- + + @Test + void hasDownsamplingTrueForFullConfig() { + DownsampleConfig cfg = new DownsampleConfig(AggregationFunction.AVG, "1 hour", 5, 365); + assertTrue(cfg.hasDownsampling()); + } + + @Test + void hasDownsamplingFalseForRetentionOnly() { + DownsampleConfig cfg = DownsampleConfig.retentionOnly(90); + assertFalse(cfg.hasDownsampling()); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBContainerTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBContainerTest.java new file mode 100644 index 0000000000000..6a6b434c0df80 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBContainerTest.java @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.time.ZonedDateTime; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ScheduledFuture; +import java.util.stream.StreamSupport; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.openhab.core.items.GroupItem; +import org.openhab.core.items.ItemRegistry; +import org.openhab.core.items.Metadata; +import org.openhab.core.items.MetadataKey; +import org.openhab.core.items.MetadataRegistry; +import org.openhab.core.library.items.CallItem; +import org.openhab.core.library.items.ColorItem; +import org.openhab.core.library.items.ContactItem; +import org.openhab.core.library.items.DateTimeItem; +import org.openhab.core.library.items.DimmerItem; +import org.openhab.core.library.items.ImageItem; +import org.openhab.core.library.items.LocationItem; +import org.openhab.core.library.items.NumberItem; +import org.openhab.core.library.items.PlayerItem; +import org.openhab.core.library.items.RollershutterItem; +import org.openhab.core.library.items.StringItem; +import org.openhab.core.library.items.SwitchItem; +import org.openhab.core.library.types.DateTimeType; +import org.openhab.core.library.types.DecimalType; +import org.openhab.core.library.types.HSBType; +import org.openhab.core.library.types.OnOffType; +import org.openhab.core.library.types.OpenClosedType; +import org.openhab.core.library.types.PercentType; +import org.openhab.core.library.types.PlayPauseType; +import org.openhab.core.library.types.PointType; +import org.openhab.core.library.types.QuantityType; +import org.openhab.core.library.types.RawType; +import org.openhab.core.library.types.StringListType; +import org.openhab.core.library.types.StringType; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.persistence.FilterCriteria.Ordering; +import org.openhab.core.persistence.HistoricItem; +import org.openhab.core.types.State; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.postgresql.PostgreSQLContainer; +import org.testcontainers.utility.DockerImageName; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +/** + * Integration tests that run against a real TimescaleDB instance via Testcontainers. + * + *

+ * These tests verify end-to-end behavior: schema creation, store/query round-trips for + * all state types, downsampling job execution, and the {@code remove()} operation. + * + *

+ * Tag: {@code integration} — can be excluded from fast unit-test runs with + * {@code mvn test -Dgroups='!integration'}. + * + * @author René Ulbricht - Initial contribution + */ +@Tag("integration") +@Testcontainers +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBContainerTest { + + @Container + static final PostgreSQLContainer DB; + + static { + var container = new PostgreSQLContainer( + DockerImageName.parse("timescale/timescaledb:latest-pg16").asCompatibleSubstituteFor("postgres")); + container.withDatabaseName("openhab_test"); + container.withUsername("openhab"); + container.withPassword("openhab"); + DB = container; + } + + private static HikariDataSource dataSource; + + @BeforeAll + static void setUpDataSource() { + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl(DB.getJdbcUrl()); + cfg.setUsername(DB.getUsername()); + cfg.setPassword(DB.getPassword()); + cfg.setMaximumPoolSize(3); + dataSource = new HikariDataSource(cfg); + } + + @BeforeEach + void initSchema() throws SQLException { + // Drop and recreate for test isolation + try (Connection conn = dataSource.getConnection(); var stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS items CASCADE"); + stmt.execute("DROP TABLE IF EXISTS item_meta CASCADE"); + } + try (Connection conn = dataSource.getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + } + + // ------------------------------------------------------------------ + // Schema + // ------------------------------------------------------------------ + + @Test + @Order(1) + void schemaHypertableexists() throws SQLException { + try (Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn + .prepareStatement("SELECT hypertable_name FROM timescaledb_information.hypertables " + + "WHERE hypertable_name = 'items'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "Hypertable 'items' should exist"); + } + } + + @Test + @Order(2) + void schemaItemmetatableexists() throws SQLException { + try (Connection conn = dataSource.getConnection(); + var stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM item_meta")) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1), "item_meta should be empty after init"); + } + } + + // ------------------------------------------------------------------ + // Store / query round-trips — all state types + // ------------------------------------------------------------------ + + @Test + @Order(10) + void roundTripDecimaltype() throws SQLException { + storeAndVerify("DecimalSensor", new NumberItem("DecimalSensor"), new DecimalType(42.5), + s -> assertInstanceOf(DecimalType.class, s), + s -> assertEquals(42.5, ((DecimalType) s).doubleValue(), 1e-6)); + } + + @Test + @Order(11) + void roundTripQuantitytypeTemperature() throws SQLException { + storeAndVerify("TempSensor", new NumberItem("TempSensor"), new QuantityType<>("23.4 °C"), + s -> assertInstanceOf(QuantityType.class, s), s -> assertEquals("23.4 °C", s.toString())); + } + + @Test + @Order(12) + void roundTripOnofftype() throws SQLException { + storeAndVerify("Switch1", new SwitchItem("Switch1"), OnOffType.ON, s -> assertEquals(OnOffType.ON, s)); + storeAndVerify("Switch2", new SwitchItem("Switch2"), OnOffType.OFF, s -> assertEquals(OnOffType.OFF, s)); + } + + @Test + @Order(13) + void roundTripOpenclosedtype() throws SQLException { + storeAndVerify("Contact1", new ContactItem("Contact1"), OpenClosedType.OPEN, + s -> assertEquals(OpenClosedType.OPEN, s)); + } + + @Test + @Order(14) + void roundTripPercenttype() throws SQLException { + storeAndVerify("Dimmer1", new DimmerItem("Dimmer1"), new PercentType(75), + s -> assertInstanceOf(PercentType.class, s), s -> assertEquals(75, ((PercentType) s).intValue())); + } + + @Test + @Order(15) + void roundTripHsbtype() throws SQLException { + storeAndVerify("Color1", new ColorItem("Color1"), new HSBType("120,50,80"), + s -> assertInstanceOf(HSBType.class, s), s -> assertEquals("120,50,80", s.toString())); + } + + @Test + @Order(16) + void roundTripDatetimetype() throws SQLException { + ZonedDateTime dt = ZonedDateTime.parse("2024-06-01T10:00:00+02:00"); + storeAndVerify("DateTime1", new DateTimeItem("DateTime1"), new DateTimeType(dt), + s -> assertInstanceOf(DateTimeType.class, s)); + } + + @Test + @Order(17) + void roundTripStringtype() throws SQLException { + storeAndVerify("String1", new StringItem("String1"), new StringType("hello world"), + s -> assertEquals("hello world", s.toString())); + } + + @Test + @Order(18) + void roundTripPointtypeLocationitem() throws SQLException { + storeAndVerify("Location1", new LocationItem("Location1"), new PointType("52.5200,13.4050,34.0000"), + s -> assertInstanceOf(PointType.class, s), s -> { + PointType pt = (PointType) s; + assertEquals(52.52, pt.getLatitude().doubleValue(), 1e-3); + assertEquals(13.405, pt.getLongitude().doubleValue(), 1e-3); + }); + } + + @Test + @Order(19) + void roundTripPlaypausetypePlayeritem() throws SQLException { + storeAndVerify("Player1", new PlayerItem("Player1"), PlayPauseType.PLAY, + s -> assertEquals(PlayPauseType.PLAY, s)); + storeAndVerify("Player2", new PlayerItem("Player2"), PlayPauseType.PAUSE, + s -> assertEquals(PlayPauseType.PAUSE, s)); + } + + @Test + @Order(20) + void roundTripStringlisttypeCallitem() throws SQLException { + storeAndVerify("Call1", new CallItem("Call1"), new StringListType("Alice", "Bob"), + s -> assertInstanceOf(StringListType.class, s), + s -> assertEquals(new StringListType("Alice", "Bob").toString(), s.toString())); + } + + @Test + @Order(21) + void roundTripRawtypeImageitem() throws SQLException { + byte[] bytes = { (byte) 0x89, 0x50, 0x4E, 0x47 }; // PNG magic bytes + storeAndVerify("Image1", new ImageItem("Image1"), new RawType(bytes, "image/png"), + s -> assertInstanceOf(RawType.class, s), s -> { + RawType raw = (RawType) s; + assertEquals("image/png", raw.getMimeType()); + assertArrayEquals(bytes, raw.getBytes()); + }); + } + + @Test + @Order(22) + void roundTripUpdowntypeRollershutteritem() throws SQLException { + storeAndVerify("Roller2", new RollershutterItem("Roller2"), new PercentType(30), + s -> assertInstanceOf(PercentType.class, s), s -> assertEquals(30, ((PercentType) s).intValue())); + } + + @Test + @Order(23) + void roundTripGroupitemWithnumberbase() throws SQLException { + var baseItem = new NumberItem("GBase"); + var groupItem = new GroupItem("Group1", baseItem); + storeAndVerify("Group1", groupItem, new DecimalType(55.5), s -> assertInstanceOf(DecimalType.class, s), + s -> assertEquals(55.5, ((DecimalType) s).doubleValue(), 1e-6)); + } + + @Test + @Order(24) + void roundTripGroupitemWithswitchbase() throws SQLException { + var baseItem = new SwitchItem("GBase"); + var groupItem = new GroupItem("GroupSwitch1", baseItem); + storeAndVerify("GroupSwitch1", groupItem, OnOffType.ON, s -> assertEquals(OnOffType.ON, s)); + } + + @Test + @Order(25) + void roundTripGroupitemWithcolorbase() throws SQLException { + var baseItem = new ColorItem("GBase"); + var groupItem = new GroupItem("GroupColor1", baseItem); + storeAndVerify("GroupColor1", groupItem, new HSBType("240,100,50"), s -> assertInstanceOf(HSBType.class, s), + s -> assertEquals("240,100,50", s.toString())); + } + + // ------------------------------------------------------------------ + // Query — filtering and ordering + // ------------------------------------------------------------------ + + @Test + @Order(31) + void queryDaterangeReturnsonlyrowsinrange() throws SQLException { + ZonedDateTime t1 = ZonedDateTime.now().minusDays(10); + ZonedDateTime t2 = ZonedDateTime.now().minusDays(5); + ZonedDateTime t3 = ZonedDateTime.now().minusDays(1); + NumberItem item = new NumberItem("RangeSensor"); + + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "RangeSensor", null); + TimescaleDBQuery.insert(conn, id, t1, new TimescaleDBMapper.Row(1.0, null, null)); + TimescaleDBQuery.insert(conn, id, t2, new TimescaleDBMapper.Row(2.0, null, null)); + TimescaleDBQuery.insert(conn, id, t3, new TimescaleDBMapper.Row(3.0, null, null)); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("RangeSensor"); + filter.setBeginDate(ZonedDateTime.now().minusDays(7)); + filter.setEndDate(ZonedDateTime.now().minusDays(2)); + + List results = TimescaleDBQuery.query(conn, item, id, filter); + assertEquals(1, results.size(), "Only t2 should be in range"); + assertEquals(2.0, ((DecimalType) results.get(0).getState()).doubleValue(), 1e-6); + } + } + + @Test + @Order(32) + void queryOrderingAscending() throws SQLException { + NumberItem item = new NumberItem("OrderSensor"); + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "OrderSensor", null); + ZonedDateTime base = ZonedDateTime.now().minusHours(3); + TimescaleDBQuery.insert(conn, id, base.plusHours(2), new TimescaleDBMapper.Row(3.0, null, null)); + TimescaleDBQuery.insert(conn, id, base, new TimescaleDBMapper.Row(1.0, null, null)); + TimescaleDBQuery.insert(conn, id, base.plusHours(1), new TimescaleDBMapper.Row(2.0, null, null)); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("OrderSensor"); + filter.setOrdering(Ordering.ASCENDING); + + List results = TimescaleDBQuery.query(conn, item, id, filter); + assertEquals(3, results.size()); + assertEquals(1.0, ((DecimalType) results.get(0).getState()).doubleValue(), 1e-6); + assertEquals(3.0, ((DecimalType) results.get(2).getState()).doubleValue(), 1e-6); + } + } + + @Test + @Order(33) + void queryPaginationLimitsresults() throws SQLException { + NumberItem item = new NumberItem("PageSensor"); + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "PageSensor", null); + ZonedDateTime base = ZonedDateTime.now().minusHours(10); + for (int i = 0; i < 10; i++) { + TimescaleDBQuery.insert(conn, id, base.plusHours(i), new TimescaleDBMapper.Row((double) i, null, null)); + } + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("PageSensor"); + filter.setOrdering(Ordering.ASCENDING); + filter.setPageSize(3); + filter.setPageNumber(0); + + List page0 = TimescaleDBQuery.query(conn, item, id, filter); + assertEquals(3, page0.size()); + + filter.setPageNumber(1); + List page1 = TimescaleDBQuery.query(conn, item, id, filter); + assertEquals(3, page1.size()); + + // Pages should not overlap + assertNotEquals(page0.get(0).getTimestamp(), page1.get(0).getTimestamp()); + } + } + + // ------------------------------------------------------------------ + // Duplicate timestamp handling + // ------------------------------------------------------------------ + + @Test + @Order(29) + void insertDuplicatetimestampIssilentlyIgnored() throws SQLException { + NumberItem item = new NumberItem("DupSensor"); + ZonedDateTime ts = ZonedDateTime.now().minusHours(1); + + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "DupSensor", null); + + TimescaleDBQuery.insert(conn, id, ts, new TimescaleDBMapper.Row(1.0, null, null)); + // Second write for the same (time, item_id) — must be silently ignored + TimescaleDBQuery.insert(conn, id, ts, new TimescaleDBMapper.Row(99.0, null, null)); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("DupSensor"); + List results = TimescaleDBQuery.query(conn, item, id, filter); + + assertEquals(1, results.size(), "Duplicate timestamp must be silently ignored, only one row stored"); + assertEquals(1.0, ((DecimalType) results.get(0).getState()).doubleValue(), 1e-6, + "First value must be kept, the duplicate discarded"); + } + } + + // ------------------------------------------------------------------ + // remove + // ------------------------------------------------------------------ + + @Test + @Order(30) + void removeDeletesbydaterange() throws SQLException { + NumberItem item = new NumberItem("DeleteSensor"); + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "DeleteSensor", null); + ZonedDateTime base = ZonedDateTime.now().minusDays(5); + TimescaleDBQuery.insert(conn, id, base, new TimescaleDBMapper.Row(1.0, null, null)); + TimescaleDBQuery.insert(conn, id, base.plusDays(1), new TimescaleDBMapper.Row(2.0, null, null)); + TimescaleDBQuery.insert(conn, id, base.plusDays(4), new TimescaleDBMapper.Row(3.0, null, null)); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("DeleteSensor"); + filter.setBeginDate(base.minusMinutes(1)); + filter.setEndDate(base.plusDays(2)); + + int deleted = TimescaleDBQuery.remove(conn, id, filter); + assertEquals(2, deleted, "Should delete the first two rows"); + + FilterCriteria allFilter = new FilterCriteria(); + allFilter.setItemName("DeleteSensor"); + List remaining = TimescaleDBQuery.query(conn, item, id, allFilter); + assertEquals(1, remaining.size(), "One row should remain"); + assertEquals(3.0, ((DecimalType) remaining.get(0).getState()).doubleValue(), 1e-6); + } + } + + // ------------------------------------------------------------------ + // item_id caching + // ------------------------------------------------------------------ + + @Test + @Order(40) + void itemIdCacheSamenamereturnssameid() throws SQLException { + try (Connection conn = dataSource.getConnection()) { + int id1 = TimescaleDBQuery.getOrCreateItemId(conn, "CacheSensor", "label"); + int id2 = TimescaleDBQuery.getOrCreateItemId(conn, "CacheSensor", "label"); + assertEquals(id1, id2, "Same name should always return the same item_id"); + } + } + + // ------------------------------------------------------------------ + // Duplicate / constraint behaviour + // ------------------------------------------------------------------ + + @Test + @Order(45) + void insertDuplicateRawrowIsSilentlydroppedNoerror() throws SQLException { + // Insert the same (time, item_id) twice as raw rows. + // ON CONFLICT DO NOTHING must swallow the conflict silently — no exception. + ZonedDateTime ts = ZonedDateTime.now().minusHours(1); + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "DupSensor", null); + TimescaleDBQuery.insert(conn, id, ts, new TimescaleDBMapper.Row(1.0, null, null)); + assertDoesNotThrow(() -> TimescaleDBQuery.insert(conn, id, ts, new TimescaleDBMapper.Row(2.0, null, null)), + "Second insert with same (time, item_id) must not throw — ON CONFLICT DO NOTHING"); + } + + // Only one row must exist (the first write wins) + try (Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT COUNT(*), value FROM items WHERE item_id = (SELECT id FROM item_meta WHERE name = 'DupSensor') GROUP BY value"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "At least one row must exist"); + assertEquals(1, rs.getInt(1), "Exactly one raw row must be stored despite two inserts"); + assertEquals(1.0, rs.getDouble(2), 1e-6, "First write must win"); + assertFalse(rs.next(), "No second row must exist"); + } + } + + @Test + @Order(46) + void downsampleRowAtBucketBoundaryCoexistsWithRawrow() throws SQLException { + // Regression test for: downsampled row at exact bucket-boundary timestamp used to + // conflict with the raw row at the same timestamp because the UNIQUE constraint was + // only on (time, item_id). With UNIQUE(time, item_id, downsampled) both rows can + // coexist (one with downsampled=FALSE, one with downsampled=TRUE). + // + // Setup: one raw row exactly at the 2h bucket boundary (e.g. 02:00:00), plus one + // raw row inside the same bucket (02:30:00). The downsampling job must produce one + // downsampled row at 02:00:00 WITHOUT silently dropping it due to the raw row there. + + ZonedDateTime bucketBoundary = ZonedDateTime.now().minusDays(2).withHour(2).withMinute(0).withSecond(0) + .withNano(0); + ZonedDateTime insideBucket = bucketBoundary.plusMinutes(30); + + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "BoundarySensor", null); + // raw row exactly on the bucket boundary + TimescaleDBQuery.insert(conn, id, bucketBoundary, new TimescaleDBMapper.Row(10.0, null, null)); + // raw row inside the bucket + TimescaleDBQuery.insert(conn, id, insideBucket, new TimescaleDBMapper.Row(20.0, null, null)); + } + + MetadataRegistry mr = mock(MetadataRegistry.class); + Metadata meta = new Metadata(new MetadataKey("timescaledb", "BoundarySensor"), "AVG", + Map.of("downsampleInterval", "2h", "retainRawDays", "0")); + when(mr.get(new MetadataKey("timescaledb", "BoundarySensor"))).thenReturn(meta); + when(mr.getAll()).thenAnswer(inv -> List.of(meta)); + + int[] itemId = new int[1]; + try (Connection conn = dataSource.getConnection()) { + itemId[0] = TimescaleDBQuery.getOrCreateItemId(conn, "BoundarySensor", null); + } + + new TimescaleDBDownsampleJob(dataSource, new TimescaleDBMetadataService(mr)).run(); + + try (Connection conn = dataSource.getConnection()) { + // The downsampled row at the bucket boundary must exist (avg of 10.0 and 20.0 = 15.0) + try (PreparedStatement ps = conn + .prepareStatement("SELECT value FROM items WHERE item_id = ? AND downsampled = TRUE")) { + ps.setInt(1, itemId[0]); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "Downsampled row must exist even when a raw row sits on the bucket boundary"); + assertEquals(15.0, rs.getDouble(1), 1e-6, + "Downsampled value must be the average of both raw rows (10 + 20) / 2 = 15"); + } + } + } + } + + // ------------------------------------------------------------------ + // Downsampling job (integration) + // ------------------------------------------------------------------ + + @Test + @Order(50) + void downsampleJobAggregatesrawrowsanddeletesthem() throws SQLException { + // Seed 6 raw rows: 3 buckets of 2h each, all older than retainRawDays=0 (use 0 for test) + // We use retainRawDays=0 so the job processes all rows immediately (NOW() - 0 days) + // Base is set to :30 past midnight so no raw row lands on a 2h bucket boundary — if a raw row + // shared the exact bucket-boundary timestamp the ON CONFLICT DO NOTHING on the aggregated INSERT + // would silently discard the downsampled row (correct behaviour in production, but not what we + // want to assert here). + ZonedDateTime base = ZonedDateTime.now().minusDays(1).withHour(0).withMinute(30).withSecond(0).withNano(0); + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "DownsampleSensor", null); + + // 3 pairs, each pair falls in a different 2h bucket (00:30/01:30, 02:30/03:30, 04:30/05:30) + for (int bucket = 0; bucket < 3; bucket++) { + for (int offset = 0; offset < 2; offset++) { + TimescaleDBQuery.insert(conn, id, base.plusHours(bucket * 2L + offset), + new TimescaleDBMapper.Row((double) (bucket * 10 + offset), null, null)); + } + } + } + + // Configure metadata for downsampling with retainRawDays=0 + MetadataRegistry metadataRegistry = mock(MetadataRegistry.class); + Metadata meta = new Metadata(new MetadataKey("timescaledb", "DownsampleSensor"), "AVG", + Map.of("downsampleInterval", "2h", "retainRawDays", "0")); + when(metadataRegistry.get(new MetadataKey("timescaledb", "DownsampleSensor"))).thenReturn(meta); + when(metadataRegistry.getAll()).thenAnswer(inv -> List.of(meta)); + + TimescaleDBMetadataService metaService = new TimescaleDBMetadataService(metadataRegistry); + + // Resolve item_id for the job + int[] storedId = new int[1]; + try (Connection conn = dataSource.getConnection()) { + storedId[0] = TimescaleDBQuery.getOrCreateItemId(conn, "DownsampleSensor", null); + } + + TimescaleDBDownsampleJob job = new TimescaleDBDownsampleJob(dataSource, metaService); + job.run(); + + // Verify: raw rows gone, aggregated rows present + try (Connection conn = dataSource.getConnection()) { + int id = storedId[0]; + + // No raw rows should remain (retainRawDays=0 means all raw rows are eligible) + try (PreparedStatement ps = conn + .prepareStatement("SELECT COUNT(*) FROM items WHERE item_id = ? AND downsampled = FALSE")) { + ps.setInt(1, id); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1), "No raw rows should remain after downsampling"); + } + } + + // 3 aggregated rows should exist (one per 2h bucket) + try (PreparedStatement ps = conn + .prepareStatement("SELECT COUNT(*) FROM items WHERE item_id = ? AND downsampled = TRUE")) { + ps.setInt(1, id); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1), "3 aggregated rows should exist"); + } + } + } + } + + @Test + @Order(51) + void downsampleJobRetentiondaysDropsolddata() throws SQLException { + ZonedDateTime old = ZonedDateTime.now().minusDays(60); + ZonedDateTime recent = ZonedDateTime.now().minusHours(1); + + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, "RetentionSensor", null); + TimescaleDBQuery.insert(conn, id, old, new TimescaleDBMapper.Row(1.0, null, null)); + TimescaleDBQuery.insert(conn, id, recent, new TimescaleDBMapper.Row(2.0, null, null)); + } + + MetadataRegistry mr = mock(MetadataRegistry.class); + // retentionDays=30 → the 60-day-old row should be deleted + Metadata meta = new Metadata(new MetadataKey("timescaledb", "RetentionSensor"), "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", "0", "retentionDays", "30")); + when(mr.get(new MetadataKey("timescaledb", "RetentionSensor"))).thenReturn(meta); + when(mr.getAll()).thenAnswer(inv -> List.of(meta)); + + TimescaleDBMetadataService ms = new TimescaleDBMetadataService(mr); + + int[] retentionId = new int[1]; + try (Connection conn = dataSource.getConnection()) { + retentionId[0] = TimescaleDBQuery.getOrCreateItemId(conn, "RetentionSensor", null); + } + + new TimescaleDBDownsampleJob(dataSource, ms).run(); + + try (Connection conn = dataSource.getConnection()) { + FilterCriteria all = new FilterCriteria(); + all.setItemName("RetentionSensor"); + List remaining = TimescaleDBQuery.query(conn, new NumberItem("RetentionSensor"), + retentionId[0], all); + // Only the recent row should survive (as a downsampled row) + assertEquals(1, remaining.size(), "Old data should have been dropped by retention policy"); + } + } + + // ------------------------------------------------------------------ + // Compression / Retention policies (schema level) + // ------------------------------------------------------------------ + + @Test + @Order(60) + void schemaCompressionpolicyIsregistered() throws SQLException { + // Re-init schema with compression enabled + try (Connection conn = dataSource.getConnection(); var stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS items CASCADE"); + stmt.execute("DROP TABLE IF EXISTS item_meta CASCADE"); + } + try (Connection conn = dataSource.getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 30, 0); + } + + // Verify compression is enabled on the hypertable + try (Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn + .prepareStatement("SELECT compression_enabled FROM timescaledb_information.hypertables " + + "WHERE hypertable_name = 'items'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "Hypertable 'items' should exist"); + assertTrue(rs.getBoolean(1), "Compression should be enabled on the hypertable"); + } + // Verify a background policy job was registered (table was freshly recreated, so any job is the compression + // job) + try (Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT COUNT(*) FROM timescaledb_information.jobs WHERE hypertable_name = 'items'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) > 0, "A compression policy job should be registered"); + } + } + + @Test + @Order(61) + void schemaRetentionpolicyIsregistered() throws SQLException { + try (Connection conn = dataSource.getConnection(); var stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS items CASCADE"); + stmt.execute("DROP TABLE IF EXISTS item_meta CASCADE"); + } + try (Connection conn = dataSource.getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 365); + } + + // Verify a background policy job was registered (table was freshly recreated, so any job is the retention job) + try (Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT COUNT(*) FROM timescaledb_information.jobs WHERE hypertable_name = 'items'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) > 0, "A retention policy job should be registered"); + } + } + + // ------------------------------------------------------------------ + // TimescaleDBPersistenceService — full lifecycle via real DB + // ------------------------------------------------------------------ + + @Test + @Order(70) + void serviceActivateInitializesschemaandschedulesjob() throws Exception { + MetadataRegistry mr = mock(MetadataRegistry.class); + when(mr.getAll()).thenReturn(Collections.emptyList()); + TimescaleDBPersistenceService service = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mr)); + + service.activate(Map.of("url", DB.getJdbcUrl(), "user", DB.getUsername(), "password", DB.getPassword())); + + var dsField = TimescaleDBPersistenceService.class.getDeclaredField("dataSource"); + dsField.setAccessible(true); + assertNotNull(dsField.get(service), "DataSource must be initialized after activate()"); + + var jobField = TimescaleDBPersistenceService.class.getDeclaredField("downsampleJob"); + jobField.setAccessible(true); + assertNotNull(jobField.get(service), "Downsampling job must be scheduled after activate()"); + + service.deactivate(); + assertNull(dsField.get(service), "DataSource must be null after deactivate()"); + assertNull(jobField.get(service), "Downsampling job must be null after deactivate()"); + } + + @Test + @Order(71) + void serviceStoreandqueryViaserviceinterface() throws Exception { + MetadataRegistry mr = mock(MetadataRegistry.class); + when(mr.getAll()).thenReturn(Collections.emptyList()); + ItemRegistry ir = mock(ItemRegistry.class); + NumberItem item = new NumberItem("ServiceSensor"); + when(ir.getItem("ServiceSensor")).thenReturn(item); + + TimescaleDBPersistenceService service = new TimescaleDBPersistenceService(ir, + new TimescaleDBMetadataService(mr)); + service.activate(Map.of("url", DB.getJdbcUrl(), "user", DB.getUsername(), "password", DB.getPassword())); + + try { + service.store(item, ZonedDateTime.now(), new DecimalType(77.7), null); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName("ServiceSensor"); + List results = StreamSupport + .stream(service.query(filter).spliterator(), false).toList(); + + assertFalse(results.isEmpty(), "Service.query() must return the stored value"); + assertEquals(77.7, ((DecimalType) results.get(0).getState()).doubleValue(), 1e-6); + + assertTrue(service.remove(filter), "Service.remove() must return true for known item"); + } finally { + service.deactivate(); + } + } + + @Test + @Order(72) + @SuppressWarnings("unchecked") + void serviceDeactivateCancelsscheduledfuture() throws Exception { + MetadataRegistry mr = mock(MetadataRegistry.class); + when(mr.getAll()).thenReturn(Collections.emptyList()); + TimescaleDBPersistenceService service = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mr)); + service.activate(Map.of("url", DB.getJdbcUrl(), "user", DB.getUsername(), "password", DB.getPassword())); + + var jobField = TimescaleDBPersistenceService.class.getDeclaredField("downsampleJob"); + jobField.setAccessible(true); + ScheduledFuture job = (ScheduledFuture) jobField.get(service); + assertNotNull(job); + assertFalse(job.isCancelled(), "Job must not be cancelled before deactivate()"); + + service.deactivate(); + assertTrue(job.isCancelled(), "Job must be cancelled after deactivate()"); + } + + // ------------------------------------------------------------------ + // Helpers + // ------------------------------------------------------------------ + + @SafeVarargs + private void storeAndVerify(String itemName, org.openhab.core.items.Item item, State state, + java.util.function.Consumer... assertions) throws SQLException { + try (Connection conn = dataSource.getConnection()) { + int id = TimescaleDBQuery.getOrCreateItemId(conn, itemName, null); + TimescaleDBMapper.Row row = TimescaleDBMapper.toRow(state); + assertNotNull(row, "Mapper must produce a row for state type: " + state.getClass().getSimpleName()); + TimescaleDBQuery.insert(conn, id, ZonedDateTime.now(), row); + + FilterCriteria filter = new FilterCriteria(); + filter.setItemName(itemName); + List results = TimescaleDBQuery.query(conn, item, id, filter); + + assertFalse(results.isEmpty(), "Query should return at least one result for item " + itemName); + State loadedState = results.get(0).getState(); + for (var assertion : assertions) { + assertion.accept(loadedState); + } + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJobTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJobTest.java new file mode 100644 index 0000000000000..fb410a8322d73 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleJobTest.java @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; + +import javax.sql.DataSource; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.openhab.core.items.Metadata; +import org.openhab.core.items.MetadataKey; +import org.openhab.core.items.MetadataRegistry; + +/** + * Unit tests for {@link TimescaleDBDownsampleJob} using mocked DB connections. + * + *

+ * Verifies: + *

    + *
  • Correct SQL is sent for all interval/function combinations
  • + *
  • Both INSERT and DELETE statements are executed within a transaction
  • + *
  • Retention DELETE is only sent when retentionDays > 0
  • + *
  • Retention-only items (blank value + retentionDays) execute only the retention DELETE
  • + *
  • Items not found in item_meta are skipped gracefully without DML
  • + *
  • A per-item SQL error does not abort the entire job
  • + *
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBDownsampleJobTest { + + private DataSource dataSource; + private Connection connection; + private PreparedStatement preparedStatement; + private ResultSet resultSet; + private MetadataRegistry registry; + private TimescaleDBMetadataService metadataService; + + @BeforeEach + void setUp() throws SQLException { + dataSource = mock(DataSource.class); + connection = mock(Connection.class); + preparedStatement = mock(PreparedStatement.class); + resultSet = mock(ResultSet.class); + registry = mock(MetadataRegistry.class); + metadataService = new TimescaleDBMetadataService(registry); + + when(dataSource.getConnection()).thenReturn(connection); + // Default: all prepareStatement calls return the shared mock + when(connection.prepareStatement(anyString())).thenReturn(preparedStatement); + when(preparedStatement.executeQuery()).thenReturn(resultSet); + // Default: SELECT id FROM item_meta returns nothing (item not in DB) + when(resultSet.next()).thenReturn(false); + when(preparedStatement.executeUpdate()).thenReturn(1); + } + + @Test + void runSingleitemExecutesinsertanddeleteintransaction() throws SQLException { + stubMetadata("SensorA", "AVG", Map.of("downsampleInterval", "1h", "retainRawDays", "5")); + stubItemNames(List.of("SensorA")); + stubItemIdInDb(connection, "SensorA", 42); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // Transaction: setAutoCommit(false), commit, setAutoCommit(true) + verify(connection).setAutoCommit(false); + verify(connection).commit(); + verify(connection, never()).rollback(); + + // DML: setInt(1, 42) called twice (INSERT + DELETE) + verify(preparedStatement, times(2)).setInt(1, 42); + verify(preparedStatement, times(2)).executeUpdate(); + } + + @Test + void runWithretentiondaysExecutesthreestatements() throws SQLException { + stubMetadata("SensorA", "MAX", + Map.of("downsampleInterval", "15m", "retainRawDays", "3", "retentionDays", "90")); + stubItemNames(List.of("SensorA")); + stubItemIdInDb(connection, "SensorA", 7); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // 3 DML statements: INSERT, DELETE raw, DELETE retention + verify(preparedStatement, times(3)).setInt(1, 7); + } + + @Test + void runItemnotInDbSkipsWithoutDml() throws SQLException { + stubMetadata("UnknownItem", "AVG", Map.of("downsampleInterval", "1h")); + stubItemNames(List.of("UnknownItem")); + // No stubItemIdInDb → SELECT returns empty (default setUp) + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // Connection IS opened for the SELECT lookup, but no DML executed + verify(dataSource, atLeastOnce()).getConnection(); + verify(preparedStatement, never()).executeUpdate(); + } + + @Test + void runSqlfailureforoneitemDoesnotabortotheritems() throws SQLException { + stubMetadata("SensorA", "AVG", Map.of("downsampleInterval", "1h")); + stubMetadata("SensorB", "SUM", Map.of("downsampleInterval", "1d")); + stubItemNames(List.of("SensorA", "SensorB")); + + // First connection: SELECT succeeds (item found), INSERT fails + Connection failConn = mock(Connection.class); + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(true); + when(selectRs.getInt(1)).thenReturn(1); + when(selectPs.executeQuery()).thenReturn(selectRs); + when(failConn.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(selectPs); + when(failConn.prepareStatement(contains("INSERT"))).thenThrow(new SQLException("simulated error")); + + // Second connection: succeeds + Connection okConn = mock(Connection.class); + PreparedStatement okSelectPs = mock(PreparedStatement.class); + ResultSet okSelectRs = mock(ResultSet.class); + when(okSelectRs.next()).thenReturn(true); + when(okSelectRs.getInt(1)).thenReturn(2); + when(okSelectPs.executeQuery()).thenReturn(okSelectRs); + PreparedStatement okPs = mock(PreparedStatement.class); + when(okPs.executeUpdate()).thenReturn(1); + when(okConn.prepareStatement(anyString())).thenAnswer(inv -> { + String sql = inv.getArgument(0); + return sql.contains("SELECT id FROM item_meta") ? okSelectPs : okPs; + }); + + when(dataSource.getConnection()).thenReturn(failConn).thenReturn(okConn); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + assertNoException(job); // Must not throw + + // Second item was still processed + verify(okConn, atLeastOnce()).prepareStatement(contains("INSERT")); + } + + @Test + void runNoitemsconfiguredDoesnothing() throws SQLException { + when(registry.getAll()).thenReturn(List.of()); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + verify(dataSource, never()).getConnection(); + } + + @Test + void runRetentiononlyitemExecutesonlyretentiondelete() throws SQLException { + stubRetentionOnlyItem("SensorRO", 30); + stubItemIdInDb(connection, "SensorRO", 42); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // Only 1 DML: the retention DELETE + verify(preparedStatement, times(1)).setInt(1, 42); + verify(preparedStatement, times(1)).executeUpdate(); + verify(connection).commit(); + verify(connection, never()).rollback(); + } + + @Test + void runRetentiononlyitemSqlcontainscorrectretentiondays() throws SQLException { + stubRetentionOnlyItem("SensorRO", 14); + + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(true); + when(selectRs.getInt(1)).thenReturn(42); + when(selectPs.executeQuery()).thenReturn(selectRs); + + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + String sql = inv.getArgument(0); + if (sql.contains("SELECT id FROM item_meta")) { + return selectPs; + } + capturedSql.add(sql); + return preparedStatement; + }); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + assertEquals(1, capturedSql.size(), "Retention-only: exactly one DML statement expected"); + assertTrue(capturedSql.get(0).contains("14 days"), "Retention DELETE must reference retentionDays=14"); + assertFalse(capturedSql.get(0).contains("INSERT"), "No INSERT for retention-only item"); + assertFalse(capturedSql.get(0).contains("downsampled = FALSE"), "No raw-delete for retention-only item"); + } + + @Test + void runRetentiononlyitemWithzeroretentiondaysIsSkipped() throws SQLException { + // Blank value but retentionDays=0 (or missing) → no DB access at all (invalid config) + MetadataKey key = new MetadataKey("timescaledb", "SensorRO"); + Metadata meta = new Metadata(key, " ", Map.of()); + when(registry.getAll()).thenReturn(List.of(meta)); + when(registry.get(key)).thenReturn(meta); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + verify(dataSource, never()).getConnection(); + } + + @Test + void runRetentiononlyitemNotInDbIsSkipped() throws SQLException { + stubRetentionOnlyItem("SensorRO", 30); + // No stubItemIdInDb → SELECT returns empty (default setUp) + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // Connection opened for SELECT but no DML + verify(preparedStatement, never()).executeUpdate(); + } + + @Test + void runInsertsqlcontainscorrectintervalandfunction() throws SQLException { + stubMetadata("Sensor", "MIN", Map.of("downsampleInterval", "6h", "retainRawDays", "3")); + stubItemNames(List.of("Sensor")); + + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(true); + when(selectRs.getInt(1)).thenReturn(99); + when(selectPs.executeQuery()).thenReturn(selectRs); + + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + String sql = inv.getArgument(0); + if (sql.contains("SELECT id FROM item_meta")) { + return selectPs; + } + capturedSql.add(sql); + return preparedStatement; + }); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // capturedSql only contains DML (SELECT is handled by selectPs, not the answer) + String insertSql = capturedSql.get(0); + assertTrue(insertSql.contains("6 hours"), "INSERT should contain interval '6 hours'"); + assertTrue(insertSql.contains("MIN(value)"), "INSERT should contain aggregation 'MIN(value)'"); + assertTrue(insertSql.contains("3 days"), "INSERT should reference retainRawDays=3"); + } + + @Test + void runDeletesqlreferencesretainrawdays() throws SQLException { + stubMetadata("Sensor", "AVG", Map.of("downsampleInterval", "1h", "retainRawDays", "7")); + stubItemNames(List.of("Sensor")); + + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(true); + when(selectRs.getInt(1)).thenReturn(5); + when(selectPs.executeQuery()).thenReturn(selectRs); + + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + String sql = inv.getArgument(0); + if (sql.contains("SELECT id FROM item_meta")) { + return selectPs; + } + capturedSql.add(sql); + return preparedStatement; + }); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); + + // capturedSql only contains DML (SELECT is handled by selectPs, not the answer) + String deleteSql = capturedSql.get(1); + assertTrue(deleteSql.contains("downsampled = FALSE"), "DELETE should only remove raw rows"); + assertTrue(deleteSql.contains("7 days"), "DELETE should reference retainRawDays=7"); + } + + @Test + void runRollbackonsqlerror() throws SQLException { + stubMetadata("SensorA", "AVG", Map.of("downsampleInterval", "1h")); + stubItemNames(List.of("SensorA")); + stubItemIdInDb(connection, "SensorA", 1); + + // Make the INSERT fail + when(connection.prepareStatement(contains("INSERT"))).thenThrow(new SQLException("insert failed")); + + var job = new TimescaleDBDownsampleJob(dataSource, metadataService); + job.run(); // should not throw + + verify(connection).rollback(); + } + + // ------------------------------------------------------------------ + // Helpers + // ------------------------------------------------------------------ + + /** + * Stubs the SELECT id FROM item_meta query to return the given id for any item name. + */ + private static void stubItemIdInDb(Connection conn, String itemName, int id) throws SQLException { + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(true); + when(selectRs.getInt(1)).thenReturn(id); + when(selectPs.executeQuery()).thenReturn(selectRs); + when(conn.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(selectPs); + } + + private void stubMetadata(String itemName, String value, Map config) { + MetadataKey key = new MetadataKey("timescaledb", itemName); + Metadata meta = new Metadata(key, value, config); + when(registry.get(key)).thenReturn(meta); + } + + private void stubItemNames(List names) { + var metaList = names.stream() + .map(n -> new Metadata(new MetadataKey("timescaledb", n), "AVG", Map.of("downsampleInterval", "1h"))) + .toList(); + when(registry.getAll()).thenAnswer(inv -> metaList); + for (String name : names) { + MetadataKey key = new MetadataKey("timescaledb", name); + if (registry.get(key) == null) { + when(registry.get(key)).thenReturn(new Metadata(key, "AVG", Map.of("downsampleInterval", "1h"))); + } + } + } + + /** + * Stubs registry.getAll() and registry.get() for a retention-only item + * (blank metadata value + retentionDays configured). + */ + private void stubRetentionOnlyItem(String itemName, int retentionDays) { + MetadataKey key = new MetadataKey("timescaledb", itemName); + Metadata meta = new Metadata(key, " ", Map.of("retentionDays", String.valueOf(retentionDays))); + when(registry.getAll()).thenReturn(List.of(meta)); + when(registry.get(key)).thenReturn(meta); + } + + private static void assertNoException(Runnable r) { + try { + r.run(); + } catch (Exception e) { + org.junit.jupiter.api.Assertions.fail("Expected no exception but got: " + e.getMessage()); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleSemanticsTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleSemanticsTest.java new file mode 100644 index 0000000000000..aec23fe9728f2 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBDownsampleSemanticsTest.java @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.Test; +import org.openhab.core.library.items.ContactItem; +import org.openhab.core.library.items.SwitchItem; +import org.openhab.core.library.types.OnOffType; +import org.openhab.core.library.types.OpenClosedType; + +/** + * Unit tests for the downsampling semantics of Switch (ON/OFF) and Contact (OPEN/CLOSED). + * + *

+ * Both types are stored numerically: ON/OPEN = 1.0, OFF/CLOSED = 0.0. + * The DB-side aggregation (MAX, MIN, AVG, SUM) produces a numeric result that + * {@link TimescaleDBMapper#toState} reconstructs via threshold: value >= 0.5 → ON/OPEN. + * + *

+ * Reference sequence for all tests: [OFF, ON, OFF] = [0.0, 1.0, 0.0] + *

    + *
  • MAX = 1.0 → ON / OPEN (was the state ever active?)
  • + *
  • MIN = 0.0 → OFF / CLOSED (was the state ever inactive?)
  • + *
  • AVG = 0.333 → OFF / CLOSED (minority of the time active)
  • + *
  • SUM = 1.0 → ON / OPEN (at least one active event in the bucket)
  • + *
+ * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +class TimescaleDBDownsampleSemanticsTest { + + // ------------------------------------------------------------------ + // Encoding: toRow + // ------------------------------------------------------------------ + + @Test + void encodingSwitchOnIs1() { + var row = TimescaleDBMapper.toRow(OnOffType.ON); + assertNotNull(row); + assertEquals(1.0, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + @Test + void encodingSwitchOffIs0() { + var row = TimescaleDBMapper.toRow(OnOffType.OFF); + assertNotNull(row); + assertEquals(0.0, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + @Test + void encodingContactOpenIs1() { + var row = TimescaleDBMapper.toRow(OpenClosedType.OPEN); + assertNotNull(row); + assertEquals(1.0, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + @Test + void encodingContactClosedIs0() { + var row = TimescaleDBMapper.toRow(OpenClosedType.CLOSED); + assertNotNull(row); + assertEquals(0.0, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + // ------------------------------------------------------------------ + // Switch: MAX — sequence [OFF, ON, OFF] → MAX(0,1,0) = 1.0 → ON + // ------------------------------------------------------------------ + + @Test + void switchMaxOffonoffReturnson() { + double aggregated = max(0.0, 1.0, 0.0); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchMaxAlloffReturnsoff() { + double aggregated = max(0.0, 0.0, 0.0); + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchMaxAllonReturnson() { + double aggregated = max(1.0, 1.0, 1.0); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Switch: MIN — sequence [OFF, ON, OFF] → MIN(0,1,0) = 0.0 → OFF + // ------------------------------------------------------------------ + + @Test + void switchMinOffonoffReturnsoff() { + double aggregated = min(0.0, 1.0, 0.0); + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchMinAllonReturnson() { + double aggregated = min(1.0, 1.0, 1.0); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchMinAlloffReturnsoff() { + double aggregated = min(0.0, 0.0, 0.0); + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Switch: AVG — sequence [OFF, ON, OFF] → AVG(0,1,0) = 0.333 → OFF + // ------------------------------------------------------------------ + + @Test + void switchAvgOffonoffReturnsoff() { + double aggregated = avg(0.0, 1.0, 0.0); // = 0.333 + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchAvgMajorityonReturnson() { + // ON, ON, OFF → AVG = 0.666 → ON + double aggregated = avg(1.0, 1.0, 0.0); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchAvgExactlyhalfReturnson() { + // Boundary: 0.5 exactly → ON (threshold is >= 0.5) + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), 0.5, null, null)); + } + + @Test + void switchAvgJustbelowhalfReturnsoff() { + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), 0.499, null, null)); + } + + // ------------------------------------------------------------------ + // Switch: SUM — sequence [OFF, ON, OFF] → SUM(0,1,0) = 1.0 → ON + // ------------------------------------------------------------------ + + @Test + void switchSumOffonoffReturnson() { + double aggregated = sum(0.0, 1.0, 0.0); // = 1.0 + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchSumAlloffReturnsoff() { + double aggregated = sum(0.0, 0.0, 0.0); // = 0.0 + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + @Test + void switchSumMultipleonReturnson() { + // Multiple ON events sum to > 1 — still reads as ON + double aggregated = sum(1.0, 1.0, 0.0); // = 2.0 + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(new SwitchItem("s"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Contact: MAX — sequence [CLOSED, OPEN, CLOSED] → MAX(0,1,0) = 1.0 → OPEN + // ------------------------------------------------------------------ + + @Test + void contactMaxClosedopenclosedReturnsopen() { + double aggregated = max(0.0, 1.0, 0.0); + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactMaxAllclosedReturnsclosed() { + double aggregated = max(0.0, 0.0, 0.0); + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactMaxAllopenReturnsopen() { + double aggregated = max(1.0, 1.0, 1.0); + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Contact: MIN — sequence [CLOSED, OPEN, CLOSED] → MIN(0,1,0) = 0.0 → CLOSED + // ------------------------------------------------------------------ + + @Test + void contactMinClosedopenclosedReturnsclosed() { + double aggregated = min(0.0, 1.0, 0.0); + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactMinAllopenReturnsopen() { + double aggregated = min(1.0, 1.0, 1.0); + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Contact: AVG — sequence [CLOSED, OPEN, CLOSED] → AVG(0,1,0) = 0.333 → CLOSED + // ------------------------------------------------------------------ + + @Test + void contactAvgClosedopenclosedReturnsclosed() { + double aggregated = avg(0.0, 1.0, 0.0); // = 0.333 + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactAvgMajorityopenReturnsopen() { + double aggregated = avg(1.0, 1.0, 0.0); // = 0.666 + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactAvgExactlyhalfReturnsopen() { + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), 0.5, null, null)); + } + + @Test + void contactAvgJustbelowhalfReturnsclosed() { + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(new ContactItem("c"), 0.499, null, null)); + } + + // ------------------------------------------------------------------ + // Contact: SUM — sequence [CLOSED, OPEN, CLOSED] → SUM(0,1,0) = 1.0 → OPEN + // ------------------------------------------------------------------ + + @Test + void contactSumClosedopenclosedReturnsopen() { + double aggregated = sum(0.0, 1.0, 0.0); // = 1.0 + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactSumAllclosedReturnsclosed() { + double aggregated = sum(0.0, 0.0, 0.0); + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + @Test + void contactSumMultipleopenReturnsopen() { + double aggregated = sum(1.0, 1.0, 0.0); // = 2.0 + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(new ContactItem("c"), aggregated, null, null)); + } + + // ------------------------------------------------------------------ + // Helpers — simulate DB-side aggregation on a value series + // ------------------------------------------------------------------ + + private static double max(double... values) { + double result = Double.NEGATIVE_INFINITY; + for (double v : values) { + result = Math.max(result, v); + } + return result; + } + + private static double min(double... values) { + double result = Double.POSITIVE_INFINITY; + for (double v : values) { + result = Math.min(result, v); + } + return result; + } + + private static double avg(double... values) { + double sum = 0; + for (double v : values) { + sum += v; + } + return sum / values.length; + } + + private static double sum(double... values) { + double result = 0; + for (double v : values) { + result += v; + } + return result; + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBExternalIT.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBExternalIT.java new file mode 100644 index 0000000000000..3db48d01e039b --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBExternalIT.java @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assumptions.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +/** + * Integration test that verifies {@link TimescaleDBSchema#initialize(Connection, String, int, int)} + * against the external TimescaleDB instance configured via environment variables: + * HOST, PORT, DBNAME, USER, PASSWORD. + * + * Run with: mvn test -Dgroups=external-integration -DexcludedGroups="" -pl bundles/org.openhab.persistence.timescaledb + */ +@Tag("external-integration") +@NonNullByDefault +@SuppressWarnings("null") +class TimescaleDBExternalIT { + + private static @Nullable HikariDataSource dataSource; + + @BeforeAll + static void connect() { + String host = System.getenv("HOST"); + String port = System.getenv("PORT"); + String db = System.getenv("DBNAME"); + String user = System.getenv("USER"); + String pass = System.getenv("PASSWORD"); + + assumeTrue(host != null && !host.isBlank(), "HOST env var not set — skipping external integration tests"); + assumeTrue(db != null && !db.isBlank(), "DBNAME env var not set — skipping external integration tests"); + + String url = "jdbc:postgresql://" + host + ":" + (port != null ? port : "5432") + "/" + db; + + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl(url); + cfg.setUsername(user); + cfg.setPassword(pass); + cfg.setMaximumPoolSize(3); + cfg.setConnectionTimeout(5000); + dataSource = new HikariDataSource(cfg); + } + + @AfterAll + static void disconnect() { + if (dataSource != null) { + dataSource.close(); + } + } + + @Test + void schemaInitializeCreateshypertableandindex() throws SQLException { + HikariDataSource ds = dataSource; + assumeTrue(ds != null, "DataSource not initialized — skipping"); + + // Drop and recreate for a clean test run + try (Connection conn = ds.getConnection(); var s = conn.createStatement()) { + s.execute("DROP TABLE IF EXISTS items CASCADE"); + s.execute("DROP TABLE IF EXISTS item_meta CASCADE"); + } + + // This is the method under test — must succeed without any manual DDL + try (Connection conn = ds.getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + + // Verify item_meta table + try (Connection conn = ds.getConnection(); + var s = conn.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM item_meta")) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1), "item_meta should be empty after fresh init"); + } + + // Verify items is a hypertable + try (Connection conn = ds.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT hypertable_name, num_chunks FROM timescaledb_information.hypertables WHERE hypertable_name = 'items'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "Hypertable 'items' must exist after initialize()"); + assertEquals("items", rs.getString("hypertable_name")); + } + + // Verify composite index + try (Connection conn = ds.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT indexname FROM pg_indexes WHERE tablename = 'items' AND indexname = 'items_item_id_time_idx'"); + ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next(), "Index items_item_id_time_idx must exist after initialize()"); + } + } + + @Test + void schemaInitializeIsidempotent() throws SQLException { + HikariDataSource ds = dataSource; + assumeTrue(ds != null, "DataSource not initialized — skipping"); + + // Calling initialize() twice on an existing schema must not throw + try (Connection conn = ds.getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + try (Connection conn = ds.getConnection()) { + assertDoesNotThrow(() -> TimescaleDBSchema.initialize(conn, "7 days", 0, 0), + "initialize() must be idempotent — safe to call on existing schema"); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapperTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapperTest.java new file mode 100644 index 0000000000000..10f4c92362483 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMapperTest.java @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; + +import java.time.ZonedDateTime; +import java.util.Base64; +import java.util.Objects; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.openhab.core.items.GroupItem; +import org.openhab.core.library.items.CallItem; +import org.openhab.core.library.items.ColorItem; +import org.openhab.core.library.items.ContactItem; +import org.openhab.core.library.items.DateTimeItem; +import org.openhab.core.library.items.DimmerItem; +import org.openhab.core.library.items.ImageItem; +import org.openhab.core.library.items.LocationItem; +import org.openhab.core.library.items.NumberItem; +import org.openhab.core.library.items.PlayerItem; +import org.openhab.core.library.items.RollershutterItem; +import org.openhab.core.library.items.StringItem; +import org.openhab.core.library.items.SwitchItem; +import org.openhab.core.library.types.DateTimeType; +import org.openhab.core.library.types.DecimalType; +import org.openhab.core.library.types.HSBType; +import org.openhab.core.library.types.OnOffType; +import org.openhab.core.library.types.OpenClosedType; +import org.openhab.core.library.types.PercentType; +import org.openhab.core.library.types.PlayPauseType; +import org.openhab.core.library.types.PointType; +import org.openhab.core.library.types.QuantityType; +import org.openhab.core.library.types.RawType; +import org.openhab.core.library.types.StringListType; +import org.openhab.core.library.types.StringType; +import org.openhab.core.library.types.UpDownType; +import org.openhab.core.persistence.FilterCriteria.Operator; +import org.openhab.core.types.UnDefType; + +/** + * Unit tests for {@link TimescaleDBMapper} — covers all state types in both directions. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +class TimescaleDBMapperTest { + + // ------------------------------------------------------------------ + // toRow — store direction + // ------------------------------------------------------------------ + + @Test + void toRowDecimaltype() { + var row = TimescaleDBMapper.toRow(new DecimalType(42.5)); + assertNotNull(row); + assertEquals(42.5, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + @Test + void toRowQuantitytypeTemperature() { + var row = TimescaleDBMapper.toRow(new QuantityType<>("23.4 °C")); + assertNotNull(row); + assertNotNull(row.value()); + assertEquals(23.4, Objects.requireNonNull(row.value()), 1e-6); + assertNull(row.string()); + assertEquals("°C", row.unit()); + } + + @Test + void toRowOnofftypeOn() { + var row = TimescaleDBMapper.toRow(OnOffType.ON); + assertNotNull(row); + assertEquals(1.0, row.value()); + assertNull(row.string()); + assertNull(row.unit()); + } + + @Test + void toRowOnofftypeOff() { + var row = TimescaleDBMapper.toRow(OnOffType.OFF); + assertNotNull(row); + assertEquals(0.0, row.value()); + } + + @Test + void toRowOpenclosedtypeOpen() { + var row = TimescaleDBMapper.toRow(OpenClosedType.OPEN); + assertNotNull(row); + assertEquals(1.0, row.value()); + } + + @Test + void toRowOpenclosedtypeClosed() { + var row = TimescaleDBMapper.toRow(OpenClosedType.CLOSED); + assertNotNull(row); + assertEquals(0.0, row.value()); + } + + @Test + void toRowPercenttype() { + var row = TimescaleDBMapper.toRow(new PercentType(75)); + assertNotNull(row); + assertEquals(75.0, Objects.requireNonNull(row.value()), 1e-6); + assertNull(row.unit()); + } + + @Test + void toRowUpdowntypeUp() { + var row = TimescaleDBMapper.toRow(UpDownType.UP); + assertNotNull(row); + assertEquals(0.0, row.value()); + } + + @Test + void toRowUpdowntypeDown() { + var row = TimescaleDBMapper.toRow(UpDownType.DOWN); + assertNotNull(row); + assertEquals(1.0, row.value()); + } + + @Test + void toRowHsbtype() { + var row = TimescaleDBMapper.toRow(new HSBType("120,50,80")); + assertNotNull(row); + assertNull(row.value()); + assertEquals("120,50,80", row.string()); + assertNull(row.unit()); + } + + @Test + void toRowDatetimetype() { + ZonedDateTime now = ZonedDateTime.now(); + var row = TimescaleDBMapper.toRow(new DateTimeType(now)); + assertNotNull(row); + assertNull(row.value()); + assertNotNull(row.string()); + assertNull(row.unit()); + } + + @Test + void toRowStringtype() { + var row = TimescaleDBMapper.toRow(new StringType("hello world")); + assertNotNull(row); + assertNull(row.value()); + assertEquals("hello world", row.string()); + assertNull(row.unit()); + } + + @Test + void toRowPointtype() { + var row = TimescaleDBMapper.toRow(new PointType("52.5,13.4,34.0")); + assertNotNull(row); + assertNull(row.value()); + String pointStr = Objects.requireNonNull(row.string()); + assertTrue(pointStr.contains("52.5")); + assertTrue(pointStr.contains("13.4")); + assertNull(row.unit()); + } + + @Test + void toRowPlaypausetypePlay() { + var row = TimescaleDBMapper.toRow(PlayPauseType.PLAY); + assertNotNull(row); + assertNull(row.value()); + assertEquals("PLAY", row.string()); + assertNull(row.unit()); + } + + @Test + void toRowPlaypausetypePause() { + var row = TimescaleDBMapper.toRow(PlayPauseType.PAUSE); + assertNotNull(row); + assertEquals("PAUSE", row.string()); + } + + @Test + void toRowStringlisttype() { + var row = TimescaleDBMapper.toRow(new StringListType("Alice", "Bob", "Charlie")); + assertNotNull(row); + assertNull(row.value()); + assertTrue(Objects.requireNonNull(row.string()).contains("Alice")); + assertNull(row.unit()); + } + + @Test + void toRowRawtype() { + byte[] bytes = { 0x01, 0x02, 0x03 }; + var row = TimescaleDBMapper.toRow(new RawType(bytes, "image/png")); + assertNotNull(row); + assertNull(row.value()); + assertNotNull(row.string()); + assertEquals("image/png", row.unit()); + // Verify round-trip base64 + byte[] decoded = Base64.getDecoder().decode(Objects.requireNonNull(row.string())); + assertArrayEquals(bytes, decoded); + } + + @Test + void toRowUndeftypeReturnsnull() { + var row = TimescaleDBMapper.toRow(UnDefType.UNDEF); + assertNull(row); + } + + // ------------------------------------------------------------------ + // toState — load direction + // ------------------------------------------------------------------ + + @Test + void toStateQuantitytypeFromvalueandunit() { + var item = new NumberItem("TestNumber"); + var state = TimescaleDBMapper.toState(item, 23.4, null, "°C"); + assertInstanceOf(QuantityType.class, state); + assertEquals("23.4 °C", state.toString()); + } + + @Test + void toStateDecimaltypeFromvalue() { + var item = new NumberItem("TestNumber"); + var state = TimescaleDBMapper.toState(item, 42.0, null, null); + assertInstanceOf(DecimalType.class, state); + assertEquals(new DecimalType(42.0), state); + } + + @Test + void toStateOnofftypeFromvalueSwitchitem() { + var item = new SwitchItem("TestSwitch"); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(item, 1.0, null, null)); + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(item, 0.0, null, null)); + } + + @Test + void toStateOpenclosedtypeFromvalue() { + var item = new ContactItem("TestContact"); + assertEquals(OpenClosedType.OPEN, TimescaleDBMapper.toState(item, 1.0, null, null)); + assertEquals(OpenClosedType.CLOSED, TimescaleDBMapper.toState(item, 0.0, null, null)); + } + + @Test + void toStatePercenttypeFromvalueDimmeritem() { + var item = new DimmerItem("TestDimmer"); + var state = TimescaleDBMapper.toState(item, 75.0, null, null); + assertInstanceOf(PercentType.class, state); + assertEquals(75, ((PercentType) state).intValue()); + } + + @Test + void toStatePercenttypeFromvalueRollershutteritem() { + var item = new RollershutterItem("TestRoller"); + var state = TimescaleDBMapper.toState(item, 50.0, null, null); + assertInstanceOf(PercentType.class, state); + } + + @Test + void toStateHsbtypeFromstring() { + var item = new ColorItem("TestColor"); + var state = TimescaleDBMapper.toState(item, null, "120,50,80", null); + assertInstanceOf(HSBType.class, state); + assertEquals("120,50,80", state.toString()); + } + + @Test + void toStateDatetimetypeFromstring() { + ZonedDateTime dt = ZonedDateTime.parse("2024-01-15T10:30:00+01:00"); + var item = new DateTimeItem("TestDateTime"); + var state = TimescaleDBMapper.toState(item, null, dt.toString(), null); + assertInstanceOf(DateTimeType.class, state); + } + + @Test + void toStateStringtypeFromstringStringitem() { + var item = new StringItem("TestString"); + var state = TimescaleDBMapper.toState(item, null, "hello", null); + assertInstanceOf(StringType.class, state); + assertEquals("hello", state.toString()); + } + + @Test + void toStatePointtypeFromstringLocationitem() { + var item = new LocationItem("TestLocation"); + var state = TimescaleDBMapper.toState(item, null, "52.5200,13.4050,34.0000", null); + assertInstanceOf(PointType.class, state); + PointType point = (PointType) state; + assertEquals(52.52, point.getLatitude().doubleValue(), 1e-3); + assertEquals(13.405, point.getLongitude().doubleValue(), 1e-3); + } + + @Test + void toStatePlaypausetypeFromstringPlayeritem() { + var item = new PlayerItem("TestPlayer"); + assertEquals(PlayPauseType.PLAY, TimescaleDBMapper.toState(item, null, "PLAY", null)); + assertEquals(PlayPauseType.PAUSE, TimescaleDBMapper.toState(item, null, "PAUSE", null)); + } + + @Test + void toStateStringlisttypeFromstringCallitem() { + var item = new CallItem("TestCall"); + var state = TimescaleDBMapper.toState(item, null, "Alice,Bob,Charlie", null); + assertInstanceOf(StringListType.class, state); + assertEquals("Alice,Bob,Charlie", state.toString()); + } + + @Test + void toStateRawtypeFrombase64Imageitem() { + byte[] bytes = { 0x01, 0x02, 0x03 }; + String encoded = Base64.getEncoder().encodeToString(bytes); + var item = new ImageItem("TestImage"); + var state = TimescaleDBMapper.toState(item, null, encoded, "image/jpeg"); + assertInstanceOf(RawType.class, state); + RawType raw = (RawType) state; + assertEquals("image/jpeg", raw.getMimeType()); + assertArrayEquals(bytes, raw.getBytes()); + } + + @Test + void toStateRawtypeMissingmimetypeUsesdefault() { + byte[] bytes = { 0x00 }; + String encoded = Base64.getEncoder().encodeToString(bytes); + var item = new ImageItem("TestImage"); + var state = TimescaleDBMapper.toState(item, null, encoded, null); + assertInstanceOf(RawType.class, state); + assertEquals("application/octet-stream", ((RawType) state).getMimeType()); + } + + @Test + void toStateGroupitemWithnumberbaseitemReturnsdecimaltype() { + var baseItem = new NumberItem("Base"); + var groupItem = new GroupItem("TestGroup", baseItem); + var state = TimescaleDBMapper.toState(groupItem, 99.0, null, null); + assertInstanceOf(DecimalType.class, state); + assertEquals(99.0, ((DecimalType) state).doubleValue(), 1e-6); + } + + @Test + void toStateGroupitemWithswitchbaseitemReturnsonofftype() { + var baseItem = new SwitchItem("Base"); + var groupItem = new GroupItem("TestGroup", baseItem); + assertEquals(OnOffType.ON, TimescaleDBMapper.toState(groupItem, 1.0, null, null)); + assertEquals(OnOffType.OFF, TimescaleDBMapper.toState(groupItem, 0.0, null, null)); + } + + @Test + void toStateGroupitemWithcolorbaseitemReturnshsbtype() { + var baseItem = new ColorItem("Base"); + var groupItem = new GroupItem("TestGroup", baseItem); + var state = TimescaleDBMapper.toState(groupItem, null, "240,100,50", null); + assertInstanceOf(HSBType.class, state); + } + + @Test + void toStateAllnullreturnsundef() { + var item = new NumberItem("TestNumber"); + var state = TimescaleDBMapper.toState(item, null, null, null); + assertEquals(UnDefType.UNDEF, state); + } + + @Test + void toStateInvalidquantityunitReturnsundef() { + var item = new NumberItem("TestNumber"); + var state = TimescaleDBMapper.toState(item, 10.0, null, "NOT_A_UNIT"); + // Should return UNDEF or fall back gracefully + assertNotNull(state); + } + + @Test + void toStateInvalidhsbReturnsundef() { + var item = new ColorItem("TestColor"); + var state = TimescaleDBMapper.toState(item, null, "not-a-valid-hsb", null); + assertEquals(UnDefType.UNDEF, state); + } + + @Test + void toStateInvalidpointtypeReturnsundef() { + var item = new LocationItem("TestLocation"); + var state = TimescaleDBMapper.toState(item, null, "not-a-valid-point", null); + assertEquals(UnDefType.UNDEF, state); + } + + @Test + void toStateInvalidplaypausetypeReturnsundef() { + var item = new PlayerItem("TestPlayer"); + var state = TimescaleDBMapper.toState(item, null, "INVALID_STATE", null); + assertEquals(UnDefType.UNDEF, state); + } + + @Test + void toStateInvalidbase64Returnsundef() { + var item = new ImageItem("TestImage"); + var state = TimescaleDBMapper.toState(item, null, "!!!not-base64!!!", null); + assertEquals(UnDefType.UNDEF, state); + } + + // ------------------------------------------------------------------ + // toSqlOperator + // ------------------------------------------------------------------ + + @ParameterizedTest + @CsvSource({ "EQ,=", "NEQ,<>", "LT,<", "LTE,<=", "GT,>", "GTE,>=" }) + void toSqlOperatorAllsupportedoperators(Operator op, String expected) { + assertEquals(expected, TimescaleDBMapper.toSqlOperator(op)); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataServiceTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataServiceTest.java new file mode 100644 index 0000000000000..0fa8bb71ae9b3 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBMetadataServiceTest.java @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.openhab.core.items.Metadata; +import org.openhab.core.items.MetadataKey; +import org.openhab.core.items.MetadataRegistry; + +/** + * Unit tests for {@link TimescaleDBMetadataService}. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBMetadataServiceTest { + + private MetadataRegistry registry; + private TimescaleDBMetadataService service; + + @BeforeEach + void setUp() { + registry = mock(MetadataRegistry.class); + service = new TimescaleDBMetadataService(registry); + } + + // ------------------------------------------------------------------ + // getDownsampleConfig — happy paths + // ------------------------------------------------------------------ + + @ParameterizedTest + @CsvSource({ "AVG,1h,1 hour", "MAX,15m,15 minutes", "MIN,1d,1 day", "SUM,30m,30 minutes" }) + void getDownsampleConfigValidfunctionandinterval(String function, String interval, String expectedSql) { + stubMetadata("MySensor", function, Map.of("downsampleInterval", interval)); + + var config = service.getDownsampleConfig("MySensor"); + + assertTrue(config.isPresent()); + assertEquals(AggregationFunction.valueOf(function), config.get().function()); + assertEquals(expectedSql, config.get().sqlInterval()); + assertEquals(5, config.get().retainRawDays()); // default + assertEquals(0, config.get().retentionDays()); // default + } + + @Test + void getDownsampleConfigCustomretainrawandretentiondays() { + stubMetadata("MySensor", "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", "7", "retentionDays", "365")); + + var config = service.getDownsampleConfig("MySensor").orElseThrow(); + + assertEquals(7, config.retainRawDays()); + assertEquals(365, config.retentionDays()); + } + + @Test + void getDownsampleConfigAllsupportedintervals() { + for (Map.Entry entry : DownsampleConfig.INTERVAL_MAP.entrySet()) { + stubMetadata("Item_" + entry.getKey(), "AVG", Map.of("downsampleInterval", entry.getKey())); + var config = service.getDownsampleConfig("Item_" + entry.getKey()); + assertTrue(config.isPresent(), "Should parse interval: " + entry.getKey()); + assertEquals(entry.getValue(), config.get().sqlInterval()); + } + } + + // ------------------------------------------------------------------ + // getDownsampleConfig — no / empty metadata + // ------------------------------------------------------------------ + + @Test + void getDownsampleConfigNometadataReturnsempty() { + when(registry.get(new MetadataKey("timescaledb", "Unknown"))).thenReturn(null); + + assertTrue(service.getDownsampleConfig("Unknown").isEmpty()); + } + + @Test + void getDownsampleConfigBlankfunctionWithretentiondaysReturnsretentiononlyconfig() { + // Blank value + retentionDays → retention-only config (no downsampling) + stubMetadata("MySensor", " ", Map.of("retentionDays", "30")); + + Optional result = service.getDownsampleConfig("MySensor"); + assertTrue(result.isPresent()); + assertFalse(result.get().hasDownsampling()); + assertEquals(30, result.get().retentionDays()); + assertNull(result.get().function()); + assertNull(result.get().sqlInterval()); + } + + @Test + void getDownsampleConfigBlankfunctionWithoutretentiondaysReturnsempty() { + // Blank value + no retentionDays → nothing to do, skip item + stubMetadata("MySensor", " ", Map.of()); + + assertTrue(service.getDownsampleConfig("MySensor").isEmpty()); + } + + // ------------------------------------------------------------------ + // getDownsampleConfig — invalid / unsupported values + // ------------------------------------------------------------------ + + @ParameterizedTest + @ValueSource(strings = { "2h30m", "3m", "1w", "invalid", "" }) + void getDownsampleConfigInvalidintervalReturnsempty(String badInterval) { + if (badInterval.isBlank()) { + // handled by the missing-interval branch + stubMetadata("MySensor", "AVG", Map.of()); + } else { + stubMetadata("MySensor", "AVG", Map.of("downsampleInterval", badInterval)); + } + + assertTrue(service.getDownsampleConfig("MySensor").isEmpty()); + } + + @Test + void getDownsampleConfigInvalidfunctionReturnsempty() { + stubMetadata("MySensor", "MEDIAN", Map.of("downsampleInterval", "1h")); + + assertTrue(service.getDownsampleConfig("MySensor").isEmpty()); + } + + @Test + void getDownsampleConfigMissingintervalReturnsempty() { + // Function present but no interval → cannot downsample + stubMetadata("MySensor", "AVG", Map.of()); + + assertTrue(service.getDownsampleConfig("MySensor").isEmpty()); + } + + @Test + void getDownsampleConfigInvalidretainrawdaysUsesdefault() { + stubMetadata("MySensor", "AVG", Map.of("downsampleInterval", "1h", "retainRawDays", "not-a-number")); + + var config = service.getDownsampleConfig("MySensor").orElseThrow(); + assertEquals(5, config.retainRawDays()); // falls back to default + } + + // ------------------------------------------------------------------ + // DownsampleConfig.toSqlInterval — allowlist enforcement + // ------------------------------------------------------------------ + + @Test + void toSqlIntervalInvalidvalueThrowsillegalargument() { + assertThrows(IllegalArgumentException.class, () -> DownsampleConfig.toSqlInterval("99h")); + } + + @ParameterizedTest + @CsvSource({ "1m,1 minute", "5m,5 minutes", "15m,15 minutes", "30m,30 minutes", "1h,1 hour", "6h,6 hours", + "1d,1 day" }) + void toSqlIntervalValidvalues(String input, String expected) { + assertEquals(expected, DownsampleConfig.toSqlInterval(input)); + } + + // ------------------------------------------------------------------ + // getConfiguredItemNames + // ------------------------------------------------------------------ + + @Test + void getConfiguredItemNamesReturnsallTimescaledbitemsRegardlessofvalue() { + Metadata withFunction = metadata("SensorA", "AVG", Map.of("downsampleInterval", "1h")); + Metadata retentionOnly = metadata("SensorB", " ", Map.of("retentionDays", "30")); + Metadata otherNamespace = new Metadata(new MetadataKey("influxdb", "SensorC"), "some", Map.of()); + + when(registry.getAll()).thenAnswer(inv -> List.of(withFunction, retentionOnly, otherNamespace)); + + List names = service.getConfiguredItemNames(); + + assertEquals(2, names.size()); + assertTrue(names.contains("SensorA")); + assertTrue(names.contains("SensorB")); + assertFalse(names.contains("SensorC")); + } + + @Test + void getConfiguredItemNamesEmptyregistryReturnsemptylist() { + when(registry.getAll()).thenReturn(List.of()); + assertTrue(service.getConfiguredItemNames().isEmpty()); + } + + // ------------------------------------------------------------------ + // Helpers + // ------------------------------------------------------------------ + + private void stubMetadata(String itemName, String value, Map config) { + MetadataKey key = new MetadataKey("timescaledb", itemName); + Metadata meta = metadata(itemName, value, config); + when(registry.get(key)).thenReturn(meta); + } + + private static Metadata metadata(String itemName, String value, Map config) { + return new Metadata(new MetadataKey("timescaledb", itemName), value, config); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPerformanceIT.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPerformanceIT.java new file mode 100644 index 0000000000000..48da696c42f9e --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPerformanceIT.java @@ -0,0 +1,1494 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assumptions.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.Timeout; +import org.openhab.core.items.Metadata; +import org.openhab.core.items.MetadataKey; +import org.openhab.core.items.MetadataRegistry; +import org.openhab.core.library.items.NumberItem; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.persistence.FilterCriteria.Ordering; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.helpers.MessageFormatter; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +/** + * Performance and scaling tests for the TimescaleDB persistence layer. + * + *

+ * Runs against the external DB configured via environment variables HOST, PORT, DBNAME, USER, PASSWORD. + * All test items use the prefix {@code perf_} to avoid conflicts with production data. + * + *

+ * Scale constants at the top of the class can be increased for full-scale runs. + * Current values are sized for a smoke/verification run (seconds, not minutes). + * + *

+ * Run with: + * + *

+ *   mvn test -Dtest=TimescaleDBPerformanceIT -pl bundles/org.openhab.persistence.timescaledb
+ * 
+ * + * @author René Ulbricht - Initial contribution + * @see PERFORMANCE_TESTS.md for full scenario descriptions and SLOs + */ +@Tag("performance") +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBPerformanceIT { + + // ------------------------------------------------------------------ + // Scale constants — increase for full-scale runs + // ------------------------------------------------------------------ + + /** T-01: number of sequential inserts for baseline latency measurement. */ + static final int BASELINE_INSERTS = 100; + + /** T-02/T-06: concurrent writer threads (full-scale: 600). */ + static final int WRITE_THREADS = 20; + + /** T-02/T-06: sustained-load duration in seconds (full-scale: 600). */ + static final int WRITE_DURATION_SECONDS = 30; + + /** T-03: number of items in the cold-start burst (full-scale: 3,000). */ + static final int BURST_ITEMS = 100; + + /** T-04/T-05: rows pre-seeded for query tests (full-scale: 2,592,000). */ + static final int QUERY_HISTORY_ROWS = 2_000; + + /** T-05: page size for pagination test. */ + static final int PAGE_SIZE = 50; + + /** T-06: concurrent reader threads during mixed-load test. */ + static final int READ_THREADS = 5; + + /** T-07/T-08: items configured for downsampling (full-scale: 3,000). */ + static final int DOWNSAMPLE_ITEMS = 15; + + /** T-07/T-08: raw rows per downsample item (5-min intervals for 1 day = 288). */ + static final int DOWNSAMPLE_ROWS_PER_ITEM = 288; + + /** T-09: items seeded with 6-day-old data for retention test (full-scale: 600). */ + static final int RETENTION_ITEMS = 15; + + // ------------------------------------------------------------------ + // SLO thresholds (milliseconds) — calibrated for smoke scale + remote DB + // ------------------------------------------------------------------ + + static final long SLO_WRITE_P99_MS = 200; + static final long SLO_WRITE_P95_MS = 100; + static final long SLO_QUERY_1H_P99_MS = 100; + static final long SLO_QUERY_7D_P99_MS = 500; + static final long SLO_QUERY_30D_P99_MS = 1_000; + static final long SLO_PAGE_P99_MS = 100; + static final long SLO_BURST_TOTAL_MS = 30_000; + static final long SLO_DOWNSAMPLE_JOB_MS = 120_000; + static final long SLO_RETENTION_MS = 30_000; + static final long SLO_SCHEMA_INIT_MS = 2_000; + static final long SLO_SCHEMA_REINIT_MS = 500; + static final long SLO_QUERY_30D_P99_18M_MS = 1_000; + static final long SLO_DOWNSAMPLE_JOB_18M_MS = 1_800_000; + /** T-01: p50 write latency smoke gate (spec: < 5 ms). */ + static final long SLO_WRITE_P50_MS = 50; + /** T-04: last-24h query p99 smoke gate (spec: < 100 ms). */ + static final long SLO_QUERY_24H_P99_MS = 200; + /** T-06: read p99 smoke gate (spec: < 150 ms). */ + static final long SLO_READ_P99_MS = 300; + /** T-02/T-09: connection acquisition p99 (spec: < 100 ms). */ + static final long SLO_CONN_ACQ_P99_MS = 100; + /** + * T-10: max acquisition time for the fastest poolSize threads under saturation (spec: < 100 ms). Relaxed to 500 ms + * for smoke/CI: with minimumIdle=1 the pool must create 2 new TCP connections on-demand against a remote DB. + */ + static final long SLO_T10_FAST_ACQ_MAX_MS = 500; + + // ------------------------------------------------------------------ + // Shared state + // ------------------------------------------------------------------ + + private static final Logger LOGGER = LoggerFactory.getLogger(TimescaleDBPerformanceIT.class); + + private static @Nullable HikariDataSource dataSource; + + /** Returns the data source, guaranteed non-null during test execution. */ + private static HikariDataSource ds() { + HikariDataSource d = dataSource; + if (d == null) { + throw new IllegalStateException("dataSource not initialized — @BeforeAll did not run"); + } + return d; + } + + @BeforeAll + static void connect() throws SQLException { + // Prefer namespaced variables to avoid collisions with generic HOST/USER shell variables. + String host = firstNonBlankEnv("TIMESCALEDB_HOST", "HOST"); + String port = firstNonBlankEnv("TIMESCALEDB_PORT", "PORT"); + String db = firstNonBlankEnv("TIMESCALEDB_DBNAME", "DBNAME"); + String user = firstNonBlankEnv("TIMESCALEDB_USER", "USER"); + String pass = firstNonBlankEnv("TIMESCALEDB_PASSWORD", "PASSWORD"); + + // Only run when explicit DB coordinates are provided. + assumeTrue(isNonBlank(host) && isNonBlank(db), + "TimescaleDB perf tests require TIMESCALEDB_HOST/TIMESCALEDB_DBNAME (or HOST/DBNAME) — skipping"); + + String nonNullHost = Objects.requireNonNull(host); + String nonNullDb = Objects.requireNonNull(db); + String nonNullPort = Objects.requireNonNullElse(port, "5432"); + String nonNullUser = Objects.requireNonNullElse(user, ""); + String nonNullPass = Objects.requireNonNullElse(pass, ""); + + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl("jdbc:postgresql://" + nonNullHost + ":" + nonNullPort + "/" + nonNullDb); + cfg.setUsername(nonNullUser); + cfg.setPassword(nonNullPass); + cfg.setMaximumPoolSize(Math.max(WRITE_THREADS, 10)); + // Avoid connection storms against pgbouncer/managed DBs during pool startup. + cfg.setMinimumIdle(1); + cfg.setConnectionTimeout(10_000); + applyBoundedStartupTimeouts(cfg); + cfg.setPoolName("perf-test"); + dataSource = new HikariDataSource(cfg); + + // Ensure schema exists + try (Connection conn = ds().getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + } + + @AfterAll + static void cleanup() { + HikariDataSource d = dataSource; + if (d == null) { + return; + } + // Remove all test items and their data + try (Connection conn = d.getConnection(); + PreparedStatement ps = conn.prepareStatement( + "DELETE FROM items WHERE item_id IN (SELECT id FROM item_meta WHERE name LIKE 'perf_%')")) { + ps.setQueryTimeout(30); + int deleted = ps.executeUpdate(); + LOGGER.info("[cleanup] deleted {} perf rows", deleted); + } catch (SQLException e) { + LOGGER.warn("[cleanup] timed out/failed while deleting perf rows: {}", e.getMessage()); + } + try (Connection conn = d.getConnection(); + PreparedStatement ps = conn.prepareStatement("DELETE FROM item_meta WHERE name LIKE 'perf_%'")) { + ps.setQueryTimeout(30); + ps.executeUpdate(); + } catch (SQLException e) { + LOGGER.warn("[cleanup] timed out/failed while deleting perf item metadata: {}", e.getMessage()); + } + try { + d.close(); + } finally { + dataSource = null; + } + } + + // ------------------------------------------------------------------ + // T-01 · Single Write — Baseline Latency + // ------------------------------------------------------------------ + + @Test + @Order(1) + void t01SinglewriteBaselinelatency() throws Exception { + LOGGER.info("=== T-01: Single Write Baseline Latency ==="); + int itemId = getOrCreate("perf_t01_item", null); + + List latencies = new ArrayList<>(BASELINE_INSERTS); + ZonedDateTime base = ZonedDateTime.now().minusMinutes(BASELINE_INSERTS); + + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < BASELINE_INSERTS; i++) { + long t0 = System.nanoTime(); + TimescaleDBQuery.insert(conn, itemId, base.plusSeconds(i), + new TimescaleDBMapper.Row((double) i, null, null)); + latencies.add(nanosToMillis(System.nanoTime() - t0)); + } + } + + Latencies l = Latencies.of(latencies); + l.print("T-01"); + + assertTrue(l.p99 <= SLO_WRITE_P99_MS, + String.format("T-01 p99 %d ms exceeds SLO %d ms", l.p99, SLO_WRITE_P99_MS)); + assertTrue(l.p95 <= SLO_WRITE_P95_MS, + String.format("T-01 p95 %d ms exceeds SLO %d ms", l.p95, SLO_WRITE_P95_MS)); + assertTrue(l.p50 <= SLO_WRITE_P50_MS, + String.format("T-01 p50 %d ms exceeds SLO %d ms", l.p50, SLO_WRITE_P50_MS)); + assertTrue(l.max <= SLO_WRITE_P99_MS, + String.format("T-01 max %d ms exceeds SLO %d ms", l.max, SLO_WRITE_P99_MS)); + } + + // ------------------------------------------------------------------ + // T-02 · Sustained Write Throughput + // ------------------------------------------------------------------ + + @Test + @Order(2) + void t02Sustainedwritethroughput() throws Exception { + LOGGER.info("=== T-02: Sustained Write Throughput ({} threads, {}s) ===", WRITE_THREADS, + WRITE_DURATION_SECONDS); + + // Pre-register items + int[] itemIds = new int[WRITE_THREADS]; + for (int i = 0; i < WRITE_THREADS; i++) { + itemIds[i] = getOrCreate("perf_t02_item_" + i, null); + } + + ConcurrentLinkedQueue latencies = new ConcurrentLinkedQueue<>(); + AtomicInteger errors = new AtomicInteger(0); + AtomicInteger totalWrites = new AtomicInteger(0); + ConcurrentLinkedQueue acquireLats = new ConcurrentLinkedQueue<>(); + + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch doneLatch = new CountDownLatch(WRITE_THREADS); + long deadline = System.currentTimeMillis() + WRITE_DURATION_SECONDS * 1_000L; + + ExecutorService pool = Executors.newFixedThreadPool(WRITE_THREADS); + for (int t = 0; t < WRITE_THREADS; t++) { + final int itemId = itemIds[t]; + pool.submit(() -> { + try { + startLatch.await(); + int seq = 0; + while (System.currentTimeMillis() < deadline) { + ZonedDateTime ts = ZonedDateTime.now(); + long t0 = System.nanoTime(); + try (Connection conn = ds().getConnection()) { + acquireLats.add(nanosToMillis(System.nanoTime() - t0)); + TimescaleDBQuery.insert(conn, itemId, ts, + new TimescaleDBMapper.Row((double) seq++, null, null)); + } catch (SQLException e) { + acquireLats.add(nanosToMillis(System.nanoTime() - t0)); + errors.incrementAndGet(); + } + latencies.add(nanosToMillis(System.nanoTime() - t0)); + totalWrites.incrementAndGet(); + sleepMs(1_000); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + doneLatch.countDown(); + } + }); + } + + long wallStart = System.currentTimeMillis(); + startLatch.countDown(); + doneLatch.await(WRITE_DURATION_SECONDS + 10, TimeUnit.SECONDS); + pool.shutdown(); + long wallMs = System.currentTimeMillis() - wallStart; + + Latencies l = Latencies.of(new ArrayList<>(latencies)); + double throughput = totalWrites.get() * 1000.0 / wallMs; + LOGGER.info("T-02: {} writes in {} ms → {} writes/s, errors={}", totalWrites.get(), wallMs, throughput, + errors.get()); + l.print("T-02"); + Latencies al = Latencies.of(new ArrayList<>(acquireLats)); + al.print("T-02 acquisition"); + + assertEquals(0, errors.get(), "T-02: no write errors expected"); + assertTrue(l.p95 <= SLO_WRITE_P95_MS, + String.format("T-02 p95 %d ms exceeds SLO %d ms", l.p95, SLO_WRITE_P95_MS)); + assertTrue(l.p99 <= SLO_WRITE_P99_MS, + String.format("T-02 p99 %d ms exceeds SLO %d ms", l.p99, SLO_WRITE_P99_MS)); + assertTrue(al.p99 <= SLO_CONN_ACQ_P99_MS, + String.format("T-02 connection acquisition p99 %d ms exceeds SLO %d ms", al.p99, SLO_CONN_ACQ_P99_MS)); + double expectedThroughput = WRITE_THREADS * 0.8; + assertTrue(throughput >= expectedThroughput, String + .format("T-02: throughput %.1f writes/s below expected %.1f writes/s", throughput, expectedThroughput)); + } + + // ------------------------------------------------------------------ + // T-03 · Write Burst — Cold-Start Spike + // ------------------------------------------------------------------ + + @Test + @Order(3) + void t03WriteburstColdstart() throws Exception { + LOGGER.info("=== T-03: Write Burst ({} items) ===", BURST_ITEMS); + + int[] itemIds = new int[BURST_ITEMS]; + for (int i = 0; i < BURST_ITEMS; i++) { + itemIds[i] = getOrCreate("perf_t03_item_" + i, null); + } + + AtomicInteger errors = new AtomicInteger(0); + CountDownLatch latch = new CountDownLatch(BURST_ITEMS); + ExecutorService pool = Executors.newFixedThreadPool(Math.min(BURST_ITEMS, 50)); + ZonedDateTime now = ZonedDateTime.now(); + + long t0 = System.currentTimeMillis(); + for (int i = 0; i < BURST_ITEMS; i++) { + final int itemId = itemIds[i]; + pool.submit(() -> { + try (Connection conn = ds().getConnection()) { + TimescaleDBQuery.insert(conn, itemId, now, new TimescaleDBMapper.Row(1.0, null, null)); + } catch (SQLException e) { + errors.incrementAndGet(); + } finally { + latch.countDown(); + } + }); + } + + latch.await(60, TimeUnit.SECONDS); + pool.shutdown(); + long elapsed = System.currentTimeMillis() - t0; + LOGGER.info("T-03: {} burst inserts in {} ms, errors={}", BURST_ITEMS, elapsed, errors.get()); + + assertEquals(0, errors.get(), "T-03: no burst write errors expected"); + assertTrue(elapsed <= SLO_BURST_TOTAL_MS, + String.format("T-03: burst took %d ms, exceeds SLO %d ms", elapsed, SLO_BURST_TOTAL_MS)); + } + + // ------------------------------------------------------------------ + // T-04 · Query Latency — Single Time-Series + // ------------------------------------------------------------------ + + @Test + @Order(4) + void t04QuerylatencySingletimeseries() throws Exception { + LOGGER.info("=== T-04: Query Latency ({} rows pre-seeded) ===", QUERY_HISTORY_ROWS); + + int itemId = getOrCreate("perf_t04_sensor", null); + NumberItem item = new NumberItem("perf_t04_sensor"); + + // Seed rows spread over 30 days + ZonedDateTime base = ZonedDateTime.now().minusDays(30); + long intervalSeconds = (30L * 24 * 3600) / QUERY_HISTORY_ROWS; + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < QUERY_HISTORY_ROWS; i++) { + TimescaleDBQuery.insert(conn, itemId, base.plusSeconds(i * intervalSeconds), + new TimescaleDBMapper.Row((double) i, null, null)); + } + } + LOGGER.info("T-04: seeded {} rows (interval ~{}s)", QUERY_HISTORY_ROWS, intervalSeconds); + + // Measure query latency for different time windows + ZonedDateTime now = ZonedDateTime.now(); + assertQueryWindow("T-04 last-1h", item, itemId, now.minusHours(1), now, SLO_QUERY_1H_P99_MS); + assertQueryWindow("T-04 last-24h", item, itemId, now.minusDays(1), now, SLO_QUERY_24H_P99_MS); + assertQueryWindow("T-04 last-7d", item, itemId, now.minusDays(7), now, SLO_QUERY_7D_P99_MS); + assertQueryWindow("T-04 last-30d", item, itemId, now.minusDays(30), now, SLO_QUERY_30D_P99_MS); + } + + // ------------------------------------------------------------------ + // T-05 · Query Latency — Pagination + // ------------------------------------------------------------------ + + @Test + @Order(5) + void t05QuerylatencyPagination() throws Exception { + LOGGER.info("=== T-05: Pagination (pageSize={}) ===", PAGE_SIZE); + + int itemId = getOrCreate("perf_t05_sensor", null); + NumberItem item = new NumberItem("perf_t05_sensor"); + + int totalRows = PAGE_SIZE * 10; + ZonedDateTime base = ZonedDateTime.now().minusHours(totalRows); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < totalRows; i++) { + TimescaleDBQuery.insert(conn, itemId, base.plusHours(i), + new TimescaleDBMapper.Row((double) i, null, null)); + } + } + + List latencies = new ArrayList<>(); + List allTimestamps = new ArrayList<>(); + + try (Connection conn = ds().getConnection()) { + for (int page = 0; page < 10; page++) { + FilterCriteria f = new FilterCriteria(); + f.setItemName("perf_t05_sensor"); + f.setOrdering(Ordering.ASCENDING); + f.setPageSize(PAGE_SIZE); + f.setPageNumber(page); + + long t0 = System.nanoTime(); + var results = TimescaleDBQuery.query(conn, item, itemId, f); + latencies.add(nanosToMillis(System.nanoTime() - t0)); + + assertEquals(PAGE_SIZE, results.size(), + String.format("T-05: page %d should have %d rows", page, PAGE_SIZE)); + + results.forEach(r -> allTimestamps.add(r.getTimestamp())); + } + } + + // Verify no overlapping timestamps across pages + long distinctCount = allTimestamps.stream().distinct().count(); + assertEquals(allTimestamps.size(), distinctCount, "T-05: pages must not overlap"); + + Latencies l = Latencies.of(latencies); + l.print("T-05"); + assertTrue(l.p99 <= SLO_PAGE_P99_MS, String.format("T-05 p99 %d ms exceeds SLO %d ms", l.p99, SLO_PAGE_P99_MS)); + } + + // ------------------------------------------------------------------ + // T-06 · Concurrent Read/Write — Mixed Load + // ------------------------------------------------------------------ + + @Test + @Order(6) + void t06ConcurrentreadwriteMixedload() throws Exception { + LOGGER.info("=== T-06: Mixed Load ({} writers, {} readers, {}s) ===", WRITE_THREADS, READ_THREADS, + WRITE_DURATION_SECONDS); + + // Seed one query item + int queryItemId = getOrCreate("perf_t06_query", null); + NumberItem queryItem = new NumberItem("perf_t06_query"); + ZonedDateTime base = ZonedDateTime.now().minusHours(1); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < 100; i++) { + TimescaleDBQuery.insert(conn, queryItemId, base.plusMinutes(i), + new TimescaleDBMapper.Row((double) i, null, null)); + } + } + + int[] writerItemIds = new int[WRITE_THREADS]; + for (int i = 0; i < WRITE_THREADS; i++) { + writerItemIds[i] = getOrCreate("perf_t06_writer_" + i, null); + } + + ConcurrentLinkedQueue writeLats = new ConcurrentLinkedQueue<>(); + ConcurrentLinkedQueue readLats = new ConcurrentLinkedQueue<>(); + AtomicInteger writeErrors = new AtomicInteger(0); + AtomicInteger readErrors = new AtomicInteger(0); + + long deadline = System.currentTimeMillis() + WRITE_DURATION_SECONDS * 1_000L; + CountDownLatch start = new CountDownLatch(1); + CountDownLatch done = new CountDownLatch(WRITE_THREADS + READ_THREADS); + + ExecutorService pool = Executors.newFixedThreadPool(WRITE_THREADS + READ_THREADS); + + // Writers + for (int t = 0; t < WRITE_THREADS; t++) { + final int itemId = writerItemIds[t]; + pool.submit(() -> { + try { + start.await(); + int seq = 0; + while (System.currentTimeMillis() < deadline) { + long t0 = System.nanoTime(); + try (Connection conn = ds().getConnection()) { + TimescaleDBQuery.insert(conn, itemId, ZonedDateTime.now(), + new TimescaleDBMapper.Row((double) seq++, null, null)); + } catch (SQLException e) { + writeErrors.incrementAndGet(); + } + writeLats.add(nanosToMillis(System.nanoTime() - t0)); + sleepMs(1_000); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + done.countDown(); + } + }); + } + + // Readers + for (int t = 0; t < READ_THREADS; t++) { + pool.submit(() -> { + try { + start.await(); + while (System.currentTimeMillis() < deadline) { + FilterCriteria f = new FilterCriteria(); + f.setItemName("perf_t06_query"); + f.setBeginDate(ZonedDateTime.now().minusHours(1)); + long t0 = System.nanoTime(); + try (Connection conn = ds().getConnection()) { + TimescaleDBQuery.query(conn, queryItem, queryItemId, f); + } catch (SQLException e) { + readErrors.incrementAndGet(); + } + readLats.add(nanosToMillis(System.nanoTime() - t0)); + sleepMs(500); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + done.countDown(); + } + }); + } + + start.countDown(); + done.await(WRITE_DURATION_SECONDS + 15, TimeUnit.SECONDS); + pool.shutdown(); + + Latencies wl = Latencies.of(new ArrayList<>(writeLats)); + Latencies rl = Latencies.of(new ArrayList<>(readLats)); + wl.print("T-06 writes"); + rl.print("T-06 reads"); + LOGGER.info("T-06: write errors={}, read errors={}", writeErrors.get(), readErrors.get()); + + assertEquals(0, writeErrors.get(), "T-06: no write errors during mixed load"); + assertEquals(0, readErrors.get(), "T-06: no read errors during mixed load"); + assertTrue(wl.p99 <= SLO_WRITE_P99_MS, + String.format("T-06 write p99 %d ms exceeds SLO %d ms", wl.p99, SLO_WRITE_P99_MS)); + assertTrue(rl.p99 <= SLO_READ_P99_MS, + String.format("T-06 read p99 %d ms exceeds SLO %d ms", rl.p99, SLO_READ_P99_MS)); + } + + // ------------------------------------------------------------------ + // T-07 · Downsampling Job — Runtime + // ------------------------------------------------------------------ + + @Test + @Order(7) + void t07DownsamplejobRuntime() throws Exception { + LOGGER.info("=== T-07: Downsampling Job ({} items × {} rows) ===", DOWNSAMPLE_ITEMS, DOWNSAMPLE_ROWS_PER_ITEM); + + // Seed raw data: DOWNSAMPLE_ROWS_PER_ITEM rows per item at 5-min intervals, all > 1 day old + ZonedDateTime base = ZonedDateTime.now().minusDays(2); + int[] ids = new int[DOWNSAMPLE_ITEMS]; + for (int i = 0; i < DOWNSAMPLE_ITEMS; i++) { + ids[i] = getOrCreate("perf_t07_item_" + i, null); + } + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < DOWNSAMPLE_ITEMS; i++) { + for (int r = 0; r < DOWNSAMPLE_ROWS_PER_ITEM; r++) { + TimescaleDBQuery.insert(conn, ids[i], base.plusMinutes(r * 5L), + new TimescaleDBMapper.Row((double) r, null, null)); + } + } + } + LOGGER.info("T-07: seeded {} raw rows", DOWNSAMPLE_ITEMS * DOWNSAMPLE_ROWS_PER_ITEM); + + // Build metadata: all items → AVG, 1h buckets, retainRawDays=0 + MetadataRegistry mr = mock(MetadataRegistry.class); + List metaList = new ArrayList<>(); + for (int i = 0; i < DOWNSAMPLE_ITEMS; i++) { + String name = "perf_t07_item_" + i; + Metadata m = new Metadata(new MetadataKey("timescaledb", name), "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", "0")); + metaList.add(m); + when(mr.get(new MetadataKey("timescaledb", name))).thenReturn(m); + } + when(mr.getAll()).thenAnswer(inv -> metaList); + + TimescaleDBMetadataService ms = new TimescaleDBMetadataService(mr); + Map idMap = new java.util.HashMap<>(); + for (int i = 0; i < DOWNSAMPLE_ITEMS; i++) { + idMap.put("perf_t07_item_" + i, ids[i]); + } + + TimescaleDBDownsampleJob job = new TimescaleDBDownsampleJob(ds(), ms); + + long t0 = System.currentTimeMillis(); + job.run(); + long elapsed = System.currentTimeMillis() - t0; + LOGGER.info("T-07: job completed in {} ms", elapsed); + + // Verify: no raw rows remain for any of the test items + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < DOWNSAMPLE_ITEMS; i++) { + int id = ids[i]; + try (PreparedStatement ps = conn + .prepareStatement("SELECT COUNT(*) FROM items WHERE item_id = ? AND downsampled = FALSE")) { + ps.setInt(1, id); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1), String.format("T-07: item %d still has raw rows after job", i)); + } + } + } + + // Verify aggregated rows exist + try (PreparedStatement ps = conn + .prepareStatement("SELECT COUNT(*) FROM items WHERE item_id = ? AND downsampled = TRUE")) { + ps.setInt(1, ids[0]); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) > 0, "T-07: no aggregated rows found for item_0"); + } + } + } + + assertTrue(elapsed <= SLO_DOWNSAMPLE_JOB_MS, + String.format("T-07: job took %d ms, exceeds SLO %d ms", elapsed, SLO_DOWNSAMPLE_JOB_MS)); + } + + // ------------------------------------------------------------------ + // T-08 · Downsampling Job — Correctness Under Concurrent Writes + // ------------------------------------------------------------------ + + @Test + @Order(8) + void t08DownsamplejobCorrectnessunderload() throws Exception { + LOGGER.info("=== T-08: Downsampling Correctness Under Concurrent Writes ==="); + + int itemId = getOrCreate("perf_t08_item", null); + + // Ensure a clean slate so repeated runs on the same DB don't carry over stale data + try (Connection conn = ds().getConnection(); + PreparedStatement ps = conn.prepareStatement("DELETE FROM items WHERE item_id = ?")) { + ps.setInt(1, itemId); + ps.executeUpdate(); + } + + // Seed 2-day-old raw data to be aggregated + ZonedDateTime old = ZonedDateTime.now().minusDays(2); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < 50; i++) { + TimescaleDBQuery.insert(conn, itemId, old.plusMinutes(i * 30L), + new TimescaleDBMapper.Row((double) i, null, null)); + } + } + + // Count raw rows before + long rawBefore = countRows(itemId, false); + LOGGER.info("T-08: raw rows before job: {}", rawBefore); + + // Start concurrent writers writing *current* timestamps (not eligible for this job run) + AtomicInteger writeErrors = new AtomicInteger(0); + CountDownLatch writersDone = new CountDownLatch(5); + long deadline = System.currentTimeMillis() + 10_000; + + for (int t = 0; t < 5; t++) { + Thread.ofVirtual().start(() -> { + int seq = 0; + while (System.currentTimeMillis() < deadline) { + try (Connection conn = ds().getConnection()) { + TimescaleDBQuery.insert(conn, itemId, ZonedDateTime.now(), + new TimescaleDBMapper.Row((double) seq, null, null)); + } catch (SQLException e) { + writeErrors.incrementAndGet(); + } + sleepMs(200); + } + writersDone.countDown(); + }); + } + + // Run job while writers are active + MetadataRegistry mr = mock(MetadataRegistry.class); + Metadata m = new Metadata(new MetadataKey("timescaledb", "perf_t08_item"), "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", "0")); + when(mr.get(new MetadataKey("timescaledb", "perf_t08_item"))).thenReturn(m); + when(mr.getAll()).thenAnswer(inv -> List.of(m)); + + new TimescaleDBDownsampleJob(ds(), new TimescaleDBMetadataService(mr)).run(); + + writersDone.await(15, TimeUnit.SECONDS); + + // Old raw rows must be gone + long rawOldAfter = countRowsBefore(itemId, false, ZonedDateTime.now().minusDays(1)); + assertEquals(0, rawOldAfter, "T-08: old raw rows must be gone after job"); + + // Aggregated rows must exist + long agg = countRows(itemId, true); + assertTrue(agg > 0, "T-08: aggregated rows must exist after job"); + + // Recent raw rows must still exist: concurrent current writes continue while old data is downsampled. + long rawRecentAfter = countRowsAfter(itemId, false, ZonedDateTime.now().minusMinutes(1)); + assertTrue(rawRecentAfter > 0, + "T-08: recent raw rows must remain after downsampling while concurrent writes are active"); + + // No write errors from concurrent writers + assertEquals(0, writeErrors.get(), "T-08: concurrent writes must not error"); + + // Verify no duplicate aggregated rows (one row per time bucket per item) + try (Connection conn = ds().getConnection(); + PreparedStatement dupPs = conn.prepareStatement( + "SELECT COUNT(*) FROM (SELECT time FROM items WHERE item_id = ? AND downsampled = TRUE GROUP BY time HAVING COUNT(*) > 1) AS dups")) { + dupPs.setInt(1, itemId); + try (ResultSet dupRs = dupPs.executeQuery()) { + assertTrue(dupRs.next()); + assertEquals(0L, dupRs.getLong(1), "T-08: duplicate aggregated rows must not exist"); + } + } + + LOGGER.info("T-08: old raw rows after={}, recent raw rows={}, aggregated={}, write errors={}", rawOldAfter, + rawRecentAfter, agg, writeErrors.get()); + } + + // ------------------------------------------------------------------ + // T-09 · Retention Cleanup + // ------------------------------------------------------------------ + + @Test + @Order(9) + void t09Retentioncleanup() throws Exception { + LOGGER.info("=== T-09: Retention Cleanup ({} items) ===", RETENTION_ITEMS); + + int[] ids = new int[RETENTION_ITEMS]; + for (int i = 0; i < RETENTION_ITEMS; i++) { + ids[i] = getOrCreate("perf_t09_item_" + i, null); + } + + // Seed 6-day-old rows (must be deleted) and 2-day-old rows (must survive with 5d retention) + ZonedDateTime old = ZonedDateTime.now().minusDays(6); + ZonedDateTime recent = ZonedDateTime.now().minusDays(2); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < RETENTION_ITEMS; i++) { + for (int r = 0; r < 10; r++) { + TimescaleDBQuery.insert(conn, ids[i], old.plusHours(r), + new TimescaleDBMapper.Row((double) r, null, null)); + TimescaleDBQuery.insert(conn, ids[i], recent.plusHours(r), + new TimescaleDBMapper.Row((double) r, null, null)); + } + } + } + + // Build metadata: retentionDays=5, retainRawDays=0 + MetadataRegistry mr = mock(MetadataRegistry.class); + List metaList = new ArrayList<>(); + for (int i = 0; i < RETENTION_ITEMS; i++) { + String name = "perf_t09_item_" + i; + Metadata meta = new Metadata(new MetadataKey("timescaledb", name), "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", "0", "retentionDays", "5")); + metaList.add(meta); + when(mr.get(new MetadataKey("timescaledb", name))).thenReturn(meta); + } + when(mr.getAll()).thenAnswer(inv -> metaList); + + TimescaleDBMetadataService ms = new TimescaleDBMetadataService(mr); + Map idMap = new java.util.HashMap<>(); + for (int i = 0; i < RETENTION_ITEMS; i++) { + idMap.put("perf_t09_item_" + i, ids[i]); + } + + // Start concurrent writers to validate write latency is not blocked during cleanup + ConcurrentLinkedQueue writeLats = new ConcurrentLinkedQueue<>(); + AtomicInteger writerErrors = new AtomicInteger(0); + int writerCount = 3; + CountDownLatch writersDone = new CountDownLatch(writerCount); + int writerItemId = getOrCreate("perf_t09_writer", null); + AtomicInteger writerSeq = new AtomicInteger(0); + long writerDeadline = System.currentTimeMillis() + SLO_RETENTION_MS + 5_000; + for (int w = 0; w < writerCount; w++) { + Thread.ofVirtual().start(() -> { + while (System.currentTimeMillis() < writerDeadline) { + long wt0 = System.nanoTime(); + try (Connection conn = ds().getConnection()) { + TimescaleDBQuery.insert(conn, writerItemId, + ZonedDateTime.now().plusSeconds(writerSeq.getAndIncrement()), + new TimescaleDBMapper.Row(1.0, null, null)); + } catch (SQLException e) { + writerErrors.incrementAndGet(); + } + writeLats.add(nanosToMillis(System.nanoTime() - wt0)); + sleepMs(100); + } + writersDone.countDown(); + }); + } + + long t0 = System.currentTimeMillis(); + new TimescaleDBDownsampleJob(ds(), ms).run(); + long elapsed = System.currentTimeMillis() - t0; + + writersDone.await(SLO_RETENTION_MS / 1000 + 10, TimeUnit.SECONDS); + Latencies wl = Latencies.of(new ArrayList<>(writeLats)); + wl.print("T-09 write during cleanup"); + LOGGER.info("T-09: cleanup completed in {} ms, write errors={}", elapsed, writerErrors.get()); + + // All 6-day-old rows must be gone for every item + try (Connection conn = ds().getConnection()) { + for (int id : ids) { + long old6d = countRowsBefore(id, null, ZonedDateTime.now().minusDays(5)); + assertEquals(0, old6d, String.format("T-09: item_id=%d still has rows older than 5 days", id)); + } + } + + assertTrue(elapsed <= SLO_RETENTION_MS, + String.format("T-09: cleanup took %d ms, exceeds SLO %d ms", elapsed, SLO_RETENTION_MS)); + assertEquals(0, writerErrors.get(), "T-09: no write errors during concurrent cleanup"); + assertTrue(wl.p99 <= SLO_WRITE_P99_MS, + String.format("T-09: write p99 %d ms during cleanup exceeds SLO %d ms", wl.p99, SLO_WRITE_P99_MS)); + } + + // ------------------------------------------------------------------ + // T-10 · Connection Pool Saturation + // ------------------------------------------------------------------ + + @Test + @Order(10) + void t10ConnectionpoolSaturation() throws Exception { + LOGGER.info("=== T-10: Connection Pool Saturation (pool=3, 15 concurrent writers) ==="); + + // Create a separate pool with only 3 connections + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl(ds().getJdbcUrl()); + cfg.setUsername(ds().getUsername()); + cfg.setPassword(ds().getPassword()); + cfg.setMaximumPoolSize(3); + cfg.setMinimumIdle(1); + cfg.setConnectionTimeout(5_000); + applyBoundedStartupTimeouts(cfg); + cfg.setPoolName("perf-saturation"); + + int threadCount = 15; + int itemId = getOrCreate("perf_t10_item", null); + long rowsBefore = countRows(itemId, false); + + HikariDataSource smallPool; + try { + smallPool = new HikariDataSource(cfg); + } catch (Exception initEx) { + assumeTrue(false, "T-10 skipped: second pool could not be initialised (infrastructure constraint): " + + initEx.getMessage()); + return; // unreachable — satisfies flow analysis + } + + try (HikariDataSource saturatedPool = smallPool) { + ConcurrentLinkedQueue acquisitionTimes = new ConcurrentLinkedQueue<>(); + AtomicInteger timeouts = new AtomicInteger(0); + CountDownLatch latch = new CountDownLatch(threadCount); + CountDownLatch start = new CountDownLatch(1); + + ExecutorService pool = Executors.newFixedThreadPool(threadCount); + for (int t = 0; t < threadCount; t++) { + final int ti = t; + pool.submit(() -> { + try { + start.await(); + long t0 = System.nanoTime(); + try (Connection conn = saturatedPool.getConnection()) { + acquisitionTimes.add(nanosToMillis(System.nanoTime() - t0)); + TimescaleDBQuery.insert(conn, itemId, ZonedDateTime.now().plusSeconds(ti * 10L), + new TimescaleDBMapper.Row(1.0, null, null)); + sleepMs(100); // hold connection briefly + } catch (SQLException e) { + acquisitionTimes.add(nanosToMillis(System.nanoTime() - t0)); + String msg = e.getMessage(); + if (msg != null && msg.contains("timeout")) { + timeouts.incrementAndGet(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + latch.countDown(); + } + }); + } + + start.countDown(); + latch.await(30, TimeUnit.SECONDS); + pool.shutdown(); + + Latencies l = Latencies.of(new ArrayList<>(acquisitionTimes)); + l.print("T-10 acquisition"); + LOGGER.info("T-10: timeouts={} out of {} threads", timeouts.get(), threadCount); + + // The fastest poolSize threads get a connection immediately; check their max acquisition time. + int poolSize = cfg.getMaximumPoolSize(); + List sortedTimes = new ArrayList<>(acquisitionTimes); + Collections.sort(sortedTimes); + long fastMax = sortedTimes.isEmpty() ? 0L : sortedTimes.get(Math.min(poolSize, sortedTimes.size()) - 1); + assertTrue(fastMax <= SLO_T10_FAST_ACQ_MAX_MS, + String.format("T-10: fastest %d threads max acquisition %d ms too slow (expected <= %d ms)", + poolSize, fastMax, SLO_T10_FAST_ACQ_MAX_MS)); + + // Verify no silent data loss: successful connections must have written their row. + long rowsAfter = countRows(itemId, false); + long expectedRows = rowsBefore + (threadCount - timeouts.get()); + assertTrue(rowsAfter >= expectedRows, String + .format("T-10: silent drop detected — expected >= %d rows but found %d", expectedRows, rowsAfter)); + } + } + + // ------------------------------------------------------------------ + // T-11 · Schema Initialisation Speed & Idempotency + // ------------------------------------------------------------------ + + @Test + @Order(11) + void t11SchemainitSpeedandidempotency() throws Exception { + LOGGER.info("=== T-11: Schema Init Speed & Idempotency ==="); + + // Note: first-init against an empty DB (spec: < 2 s) is not separately timed here because + // TimescaleDBSchema.initialize() was already called in @BeforeAll. Dropping and re-creating + // the schema would be too destructive in a shared integration-test environment. + // The re-init timing below covers the idempotency branch (tables already exist). + + // Repeated init on existing schema + long t0 = System.currentTimeMillis(); + try (Connection conn = ds().getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + long elapsed = System.currentTimeMillis() - t0; + LOGGER.info("T-11: re-init on existing schema: {} ms", elapsed); + + assertTrue(elapsed <= SLO_SCHEMA_REINIT_MS, + String.format("T-11: re-init took %d ms, exceeds SLO %d ms", elapsed, SLO_SCHEMA_REINIT_MS)); + + // Must be idempotent + assertDoesNotThrow(() -> { + try (Connection conn = ds().getConnection()) { + TimescaleDBSchema.initialize(conn, "7 days", 0, 0); + } + }, "T-11: repeated init must not throw"); + } + + // ------------------------------------------------------------------ + // T-12 · Long-Horizon Dataset (18 months) using bulk direct write + // ------------------------------------------------------------------ + + @Test + @Order(12) + @Timeout(value = 45, unit = TimeUnit.MINUTES) + void t12LonghorizonBulkseed18months() throws Exception { + assumeTrue(envFlag("TIMESCALEDB_ENABLE_HEAVY_18M", false), + "T-12 disabled: set TIMESCALEDB_ENABLE_HEAVY_18M=true to run heavy 18-month bulk test"); + + long days = envLong("TIMESCALEDB_T12_DAYS", 548L); + int itemCount = (int) envLong("TIMESCALEDB_T12_ITEM_COUNT", 3_000L); + long totalRows = envLong("TIMESCALEDB_T12_TARGET_ROWS", theoreticalDownsampledRowsFor18MonthProfile(days)); + int segmentRows = (int) envLong("TIMESCALEDB_T12_SEGMENT_ROWS", 1_000_000L); + int seedThreads = (int) envLong("TIMESCALEDB_T12_SEED_THREADS", + Math.max(2, Math.min(12, Runtime.getRuntime().availableProcessors()))); + int itemBatchSize = (int) envLong("TIMESCALEDB_T12_ITEM_BATCH_SIZE", 100L); + int retainRawDays = (int) envLong("TIMESCALEDB_T12_RETAIN_RAW_DAYS", 1L); + int rawWindowDays = (int) envLong("TIMESCALEDB_T12_RAW_WINDOW_DAYS", Math.max(2, retainRawDays + 1)); + long rowsPerItem = Math.max(1L, totalRows / itemCount); + long rawRowsPerItem = Math.max(1L, rawWindowDays * 24L * 12L); + + LOGGER.info( + "=== T-12: Heavy 18M server-side dataset (days={}, items={}, model=downsampled, targetRows={}, rows/item={}, rawWindowDays={}, rawRows/item={}, retainRawDays={}, segmentRows={}, seedThreads={}, itemBatchSize={}) ===", + days, itemCount, totalRows, rowsPerItem, rawWindowDays, rawRowsPerItem, retainRawDays, segmentRows, + seedThreads, itemBatchSize); + + int[] ids = new int[itemCount]; + for (int i = 0; i < itemCount; i++) { + ids[i] = getOrCreate("perf_t12_item_" + i, null); + } + + logInfo("T-12 cleanup start: deleting existing rows for {} items", itemCount); + long deletedRows = deleteRowsByIds(ids); + logInfo("T-12 cleanup done: deletedRows={}", deletedRows); + + Instant historicalStart = ZonedDateTime.now().minusDays(days).toInstant(); + long historicalStepSeconds = Math.max(1L, (days * 86_400L) / rowsPerItem); + + List itemBatches = partitionIds(ids, Math.max(1, itemBatchSize)); + long historicalStepsPerBatch = (rowsPerItem + segmentRows - 1L) / segmentRows; + long historicalExpectedSqlBatches = itemBatches.size() * historicalStepsPerBatch; + long historicalTotalRows = rowsPerItem * itemCount; + AtomicLong insertedRows = new AtomicLong(0); + AtomicLong startedExecutions = new AtomicLong(0); + AtomicLong completedExecutions = new AtomicLong(0); + long seedStartNs = System.nanoTime(); + + logInfo("T-12 seed historical start: targetRows={} expectedSqlBatches={} rowsPerItem={} stepSeconds={} segmentRows={} seedThreads={} itemBatchSize={}", + historicalTotalRows, historicalExpectedSqlBatches, rowsPerItem, historicalStepSeconds, segmentRows, + seedThreads, itemBatchSize); + + ExecutorService seedPool = Executors.newFixedThreadPool(Math.max(1, Math.min(seedThreads, itemBatches.size()))); + try { + List> futures = new ArrayList<>(); + for (int[] batch : itemBatches) { + futures.add(seedPool.submit(() -> { + try (Connection conn = ds().getConnection()) { + seedRowsServerSideBatch(conn, "historical", true, batch, historicalStart, rowsPerItem, + historicalStepSeconds, segmentRows, insertedRows, historicalTotalRows, + historicalExpectedSqlBatches, seedStartNs, startedExecutions, completedExecutions); + } catch (SQLException e) { + throw new IllegalStateException("Failed to seed historical rows", e); + } + })); + } + for (var future : futures) { + future.get(); + } + } finally { + seedPool.shutdown(); + seedPool.awaitTermination(10, TimeUnit.MINUTES); + } + + long elapsedMs = Math.max(1L, nanosToMillis(System.nanoTime() - seedStartNs)); + long createdTotal = insertedRows.get(); + long remaining = Math.max(0L, historicalTotalRows - createdTotal); + long rate = (createdTotal * 1000L) / elapsedMs; + logInfo("T-12 seed historical done: createdTotal={} remaining={} elapsedMs={} avgRate={} rows/s", createdTotal, + remaining, elapsedMs, rate); + + Instant rawWindowStart = ZonedDateTime.now().minusDays(rawWindowDays).toInstant(); + long rawStepSeconds = 300L; + long rawStepsPerBatch = (rawRowsPerItem + segmentRows - 1L) / segmentRows; + long rawExpectedSqlBatches = itemBatches.size() * rawStepsPerBatch; + long rawTotalRows = rawRowsPerItem * itemCount; + + insertedRows.set(0); + startedExecutions.set(0); + completedExecutions.set(0); + long rawSeedStartNs = System.nanoTime(); + + logInfo("T-12 seed raw-window start: targetRows={} expectedSqlBatches={} rowsPerItem={} stepSeconds={} windowDays={} retainRawDays={}", + rawTotalRows, rawExpectedSqlBatches, rawRowsPerItem, rawStepSeconds, rawWindowDays, retainRawDays); + + ExecutorService rawSeedPool = Executors + .newFixedThreadPool(Math.max(1, Math.min(seedThreads, itemBatches.size()))); + try { + List> rawFutures = new ArrayList<>(); + for (int[] batch : itemBatches) { + rawFutures.add(rawSeedPool.submit(() -> { + try (Connection conn = ds().getConnection()) { + seedRowsServerSideBatch(conn, "raw-window", false, batch, rawWindowStart, rawRowsPerItem, + rawStepSeconds, segmentRows, insertedRows, rawTotalRows, rawExpectedSqlBatches, + rawSeedStartNs, startedExecutions, completedExecutions); + } catch (SQLException e) { + throw new IllegalStateException("Failed to seed raw-window rows", e); + } + })); + } + for (var future : rawFutures) { + future.get(); + } + } finally { + rawSeedPool.shutdown(); + rawSeedPool.awaitTermination(10, TimeUnit.MINUTES); + } + + long rawElapsedMs = Math.max(1L, nanosToMillis(System.nanoTime() - rawSeedStartNs)); + long rawCreatedTotal = insertedRows.get(); + long rawRemaining = Math.max(0L, rawTotalRows - rawCreatedTotal); + long rawRate = (rawCreatedTotal * 1000L) / rawElapsedMs; + logInfo("T-12 seed raw-window done: createdTotal={} remaining={} elapsedMs={} avgRate={} rows/s", + rawCreatedTotal, rawRemaining, rawElapsedMs, rawRate); + + long seededDownsampledRows = countRowsByIds(ids, true); + long downsampledRemaining = Math.max(0L, historicalTotalRows - seededDownsampledRows); + logInfo("T-12 seed verify historical: createdDownsampledRows={} targetRows={} remainingRows={}", + seededDownsampledRows, historicalTotalRows, downsampledRemaining); + assertEquals(historicalTotalRows, seededDownsampledRows, + String.format("T-12 historical seed mismatch: expected %d rows, found %d", historicalTotalRows, + seededDownsampledRows)); + + ZonedDateTime cutoffBeforeJob = ZonedDateTime.now().minusDays(retainRawDays); + long rawOldBefore = countRowsByIdsBefore(ids, false, cutoffBeforeJob); + long rawRecentBefore = countRowsByIdsAfter(ids, false, cutoffBeforeJob); + logInfo("T-12 seed verify raw-window: rawOldBefore={} rawRecentBefore={} cutoffDays={}", rawOldBefore, + rawRecentBefore, retainRawDays); + assertTrue(rawOldBefore > 0L, + "T-12 raw-window seed mismatch: expected pending raw rows older than retainRawDays"); + assertTrue(rawRecentBefore > 0L, + "T-12 raw-window seed mismatch: expected recent raw rows within retainRawDays"); + + int writeSamples = (int) envLong("TIMESCALEDB_T12_WRITE_SAMPLES", 500L); + measureHeavyDatasetWriteLatency(ids, writeSamples); + + long downsampleElapsed = runHeavyDatasetDownsample(ids, retainRawDays); + logInfo("T-12 downsample SLO check: elapsed={} ms <= {} ms", downsampleElapsed, SLO_DOWNSAMPLE_JOB_18M_MS); + assertTrue(downsampleElapsed <= SLO_DOWNSAMPLE_JOB_18M_MS, String + .format("T-12 downsample took %d ms, exceeds SLO %d ms", downsampleElapsed, SLO_DOWNSAMPLE_JOB_18M_MS)); + + NumberItem queryItem = new NumberItem("perf_t12_item_0"); + ZonedDateTime now = ZonedDateTime.now(); + assertQueryWindow("T-12 last-1h", queryItem, ids[0], now.minusHours(1), now, SLO_QUERY_1H_P99_MS); + assertQueryWindow("T-12 last-7d", queryItem, ids[0], now.minusDays(7), now, SLO_QUERY_7D_P99_MS); + assertQueryWindow("T-12 last-30d", queryItem, ids[0], now.minusDays(30), now, SLO_QUERY_30D_P99_18M_MS); + } + + // ------------------------------------------------------------------ + // Helpers + // ------------------------------------------------------------------ + + private static int getOrCreate(String name, @Nullable String label) throws SQLException { + try (Connection conn = ds().getConnection()) { + return TimescaleDBQuery.getOrCreateItemId(conn, name, label); + } + } + + private static long countRows(int itemId, boolean downsampled) throws SQLException { + try (Connection conn = ds().getConnection(); + PreparedStatement ps = conn + .prepareStatement("SELECT COUNT(*) FROM items WHERE item_id = ? AND downsampled = ?")) { + ps.setInt(1, itemId); + ps.setBoolean(2, downsampled); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } + } + + private static long countRowsBefore(int itemId, @Nullable Boolean downsampled, ZonedDateTime before) + throws SQLException { + String sql = downsampled == null ? "SELECT COUNT(*) FROM items WHERE item_id = ? AND time < ?" + : "SELECT COUNT(*) FROM items WHERE item_id = ? AND time < ? AND downsampled = " + downsampled; + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + ps.setInt(1, itemId); + ps.setTimestamp(2, java.sql.Timestamp.from(before.toInstant())); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } + } + + private static long countRowsAfter(int itemId, @Nullable Boolean downsampled, ZonedDateTime after) + throws SQLException { + String sql = downsampled == null ? "SELECT COUNT(*) FROM items WHERE item_id = ? AND time >= ?" + : "SELECT COUNT(*) FROM items WHERE item_id = ? AND time >= ? AND downsampled = " + downsampled; + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + ps.setInt(1, itemId); + ps.setTimestamp(2, java.sql.Timestamp.from(after.toInstant())); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } + } + + private void assertQueryWindow(String label, NumberItem item, int itemId, ZonedDateTime from, ZonedDateTime to, + long sloP99Ms) throws SQLException { + int runs = 10; + List latencies = new ArrayList<>(runs); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < runs; i++) { + FilterCriteria f = new FilterCriteria(); + f.setItemName(item.getName()); + f.setBeginDate(from); + f.setEndDate(to); + f.setOrdering(Ordering.DESCENDING); + long t0 = System.nanoTime(); + TimescaleDBQuery.query(conn, item, itemId, f); + latencies.add(nanosToMillis(System.nanoTime() - t0)); + } + } + Latencies l = Latencies.of(latencies); + l.print(label); + logInfo("{} SLO check: p99={} ms <= {} ms", label, l.p99, sloP99Ms); + assertTrue(l.p99 <= sloP99Ms, String.format("%s p99 %d ms exceeds SLO %d ms", label, l.p99, sloP99Ms)); + } + + private void measureHeavyDatasetWriteLatency(int[] ids, int writeSamples) throws SQLException { + int samples = Math.max(1, writeSamples); + logInfo("T-12 write phase start: persistence-layer inserts samples={}", samples); + + List latencies = new ArrayList<>(samples); + ZonedDateTime base = ZonedDateTime.now(); + try (Connection conn = ds().getConnection()) { + for (int i = 0; i < samples; i++) { + int itemId = ids[i % ids.length]; + long t0 = System.nanoTime(); + TimescaleDBQuery.insert(conn, itemId, base.plusSeconds(i), + new TimescaleDBMapper.Row((double) i, null, null)); + latencies.add(nanosToMillis(System.nanoTime() - t0)); + } + } + + Latencies l = Latencies.of(latencies); + l.print("T-12 writes"); + logInfo("T-12 writes SLO check: p95={} ms <= {} ms, p99={} ms <= {} ms", l.p95, SLO_WRITE_P95_MS, l.p99, + SLO_WRITE_P99_MS); + assertTrue(l.p95 <= SLO_WRITE_P95_MS, + String.format("T-12 writes p95 %d ms exceeds SLO %d ms", l.p95, SLO_WRITE_P95_MS)); + assertTrue(l.p99 <= SLO_WRITE_P99_MS, + String.format("T-12 writes p99 %d ms exceeds SLO %d ms", l.p99, SLO_WRITE_P99_MS)); + } + + private long runHeavyDatasetDownsample(int[] ids, int retainRawDays) throws SQLException { + logInfo("T-12 downsample phase start: items={} mode=AVG interval=1h retainRawDays={}", ids.length, + retainRawDays); + + MetadataRegistry mr = mock(MetadataRegistry.class); + List metaList = new ArrayList<>(ids.length); + Map idMap = new HashMap<>(ids.length); + + for (int i = 0; i < ids.length; i++) { + String name = "perf_t12_item_" + i; + Metadata m = new Metadata(new MetadataKey("timescaledb", name), "AVG", + Map.of("downsampleInterval", "1h", "retainRawDays", String.valueOf(retainRawDays))); + metaList.add(m); + idMap.put(name, ids[i]); + when(mr.get(new MetadataKey("timescaledb", name))).thenReturn(m); + } + when(mr.getAll()).thenAnswer(inv -> metaList); + + TimescaleDBDownsampleJob job = new TimescaleDBDownsampleJob(ds(), new TimescaleDBMetadataService(mr)); + + long t0 = System.currentTimeMillis(); + job.run(); + long elapsed = System.currentTimeMillis() - t0; + + ZonedDateTime cutoff = ZonedDateTime.now().minusDays(retainRawDays); + long rawOldAfter = countRowsByIdsBefore(ids, false, cutoff); + long rawRecentAfter = countRowsByIdsAfter(ids, false, cutoff); + long downsampledAfter = countRowsByIds(ids, true); + logInfo("T-12 downsample phase done: elapsed={} ms rawOldAfter={} rawRecentAfter={} downsampledAfter={}", + elapsed, rawOldAfter, rawRecentAfter, downsampledAfter); + + assertEquals(0L, rawOldAfter, "T-12 downsample: old raw rows must be removed for configured items"); + assertTrue(downsampledAfter > 0L, "T-12 downsample: aggregated rows must exist after job"); + return elapsed; + } + + private static long nanosToMillis(long nanos) { + return nanos / 1_000_000; + } + + private static void seedRowsServerSideBatch(Connection conn, String phase, boolean downsampled, int[] itemIds, + Instant start, long rowsPerItem, long stepSeconds, int segmentRows, AtomicLong insertedRows, + long totalRowsAllItems, long expectedSqlBatches, long startNs, AtomicLong startedExecutions, + AtomicLong completedExecutions) throws SQLException { + final String sql = """ + INSERT INTO items (time, item_id, value, unit, downsampled) + SELECT (?::timestamptz + (gs * ? * interval '1 second')) AS time, + i.item_id AS item_id, + ((gs + i.item_id) % 10000)::double precision AS value, + NULL AS unit, + ? AS downsampled + FROM unnest(?::integer[]) AS i(item_id) + CROSS JOIN generate_series(?, ?) AS gs + """; + + long insertedPerItem = 0; + Integer[] boxedIds = Arrays.stream(itemIds).boxed().toArray(Integer[]::new); + while (insertedPerItem < rowsPerItem) { + long from = insertedPerItem; + long to = Math.min(rowsPerItem - 1, insertedPerItem + segmentRows - 1L); + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + ps.setTimestamp(1, java.sql.Timestamp.from(start)); + ps.setLong(2, stepSeconds); + ps.setBoolean(3, downsampled); + java.sql.Array itemArray = conn.createArrayOf("integer", boxedIds); + try { + ps.setArray(4, itemArray); + ps.setLong(5, from); + ps.setLong(6, to); + long sqlBatchId = startedExecutions.incrementAndGet(); + long batchRowsPlanned = (to - from + 1L) * itemIds.length; + logInfo("T-12 {} batch start: sqlBatch={}/{} batchItems={} itemRowRange={}-{} batchRowsPlanned={} downsampled={}", + phase, sqlBatchId, expectedSqlBatches, itemIds.length, from, to, batchRowsPlanned, + downsampled); + int insertedNow = ps.executeUpdate(); + long totalInserted = insertedRows.addAndGet(insertedNow); + long completed = completedExecutions.incrementAndGet(); + long elapsedMs = Math.max(1L, nanosToMillis(System.nanoTime() - startNs)); + long rate = (totalInserted * 1000L) / elapsedMs; + double percent = (totalInserted * 100.0) / totalRowsAllItems; + long remaining = Math.max(0L, totalRowsAllItems - totalInserted); + logInfo("T-12 {} batch done: sqlBatch={}/{} insertedBatchRows={} createdTotal={}/{} remaining={} progress={}% rate={} rows/s completedSqlBatches={} batchItems={} itemRowRange={}-{} elapsed={} ms downsampled={}", + phase, sqlBatchId, expectedSqlBatches, insertedNow, totalInserted, totalRowsAllItems, + remaining, String.format("%.2f", percent), rate, completed, itemIds.length, from, to, + elapsedMs, downsampled); + } finally { + itemArray.free(); + } + } + insertedPerItem = to + 1; + } + } + + private static long deleteRowsByIds(int[] ids) throws SQLException { + final String sql = "DELETE FROM items WHERE item_id = ANY(?::integer[])"; + Integer[] boxedIds = Arrays.stream(ids).boxed().toArray(Integer[]::new); + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + java.sql.Array itemArray = conn.createArrayOf("integer", boxedIds); + try { + ps.setArray(1, itemArray); + return ps.executeUpdate(); + } finally { + itemArray.free(); + } + } + } + + private static long countRowsByIds(int[] ids, boolean downsampled) throws SQLException { + final String sql = "SELECT COUNT(*) FROM items WHERE item_id = ANY(?::integer[]) AND downsampled = ?"; + Integer[] boxedIds = Arrays.stream(ids).boxed().toArray(Integer[]::new); + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + java.sql.Array itemArray = conn.createArrayOf("integer", boxedIds); + try { + ps.setArray(1, itemArray); + ps.setBoolean(2, downsampled); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } finally { + itemArray.free(); + } + } + } + + private static long countRowsByIdsBefore(int[] ids, boolean downsampled, ZonedDateTime cutoff) throws SQLException { + final String sql = "SELECT COUNT(*) FROM items WHERE item_id = ANY(?::integer[]) AND downsampled = ? AND time < ?"; + Integer[] boxedIds = Arrays.stream(ids).boxed().toArray(Integer[]::new); + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + java.sql.Array itemArray = conn.createArrayOf("integer", boxedIds); + try { + ps.setArray(1, itemArray); + ps.setBoolean(2, downsampled); + ps.setTimestamp(3, java.sql.Timestamp.from(cutoff.toInstant())); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } finally { + itemArray.free(); + } + } + } + + private static long countRowsByIdsAfter(int[] ids, boolean downsampled, ZonedDateTime cutoff) throws SQLException { + final String sql = "SELECT COUNT(*) FROM items WHERE item_id = ANY(?::integer[]) AND downsampled = ? AND time >= ?"; + Integer[] boxedIds = Arrays.stream(ids).boxed().toArray(Integer[]::new); + try (Connection conn = ds().getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { + java.sql.Array itemArray = conn.createArrayOf("integer", boxedIds); + try { + ps.setArray(1, itemArray); + ps.setBoolean(2, downsampled); + ps.setTimestamp(3, java.sql.Timestamp.from(cutoff.toInstant())); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + return rs.getLong(1); + } + } finally { + itemArray.free(); + } + } + } + + private static void logInfo(String messagePattern, Object... args) { + String msg = Objects.requireNonNullElse(MessageFormatter.arrayFormat(messagePattern, args).getMessage(), + messagePattern); + LOGGER.info(msg); + } + + private static List partitionIds(int[] ids, int batchSize) { + List batches = new ArrayList<>(); + for (int i = 0; i < ids.length; i += batchSize) { + int end = Math.min(ids.length, i + batchSize); + batches.add(Arrays.copyOfRange(ids, i, end)); + } + return batches; + } + + private static long theoreticalDownsampledRowsFor18MonthProfile(long days) { + // Approximation based on documented downsample intervals in PERFORMANCE_TESTS.md. + long retainedRowsPerDay = (300L * 96L) + (1_200L * 24L) + (1_500L); + return days * retainedRowsPerDay; + } + + private static long envLong(String key, long defaultValue) { + String raw = System.getenv(key); + if (raw == null || raw.isBlank()) { + return defaultValue; + } + try { + return Long.parseLong(raw.trim()); + } catch (NumberFormatException e) { + return defaultValue; + } + } + + private static boolean envFlag(String key, boolean defaultValue) { + String raw = System.getenv(key); + if (raw == null || raw.isBlank()) { + return defaultValue; + } + return Boolean.parseBoolean(raw.trim()); + } + + private static void applyBoundedStartupTimeouts(HikariConfig cfg) { + // Keep startup bounded in case host/port are unreachable. + cfg.setInitializationFailTimeout(10_000); + cfg.addDataSourceProperty("connectTimeout", "10"); + } + + private static boolean isNonBlank(@Nullable String value) { + return value != null && !value.isBlank(); + } + + private static @Nullable String firstNonBlankEnv(String preferred, String fallback) { + String preferredValue = System.getenv(preferred); + if (isNonBlank(preferredValue)) { + return preferredValue; + } + String fallbackValue = System.getenv(fallback); + return isNonBlank(fallbackValue) ? fallbackValue : null; + } + + private static void sleepMs(long ms) { + try { + Thread.sleep(ms); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + // ------------------------------------------------------------------ + // Latency statistics helper + // ------------------------------------------------------------------ + + record Latencies(long p50, long p95, long p99, long max, long min, double avg, int count) { + + static Latencies of(List values) { + if (values.isEmpty()) { + return new Latencies(0, 0, 0, 0, 0, 0, 0); + } + Collections.sort(values); + int n = values.size(); + return new Latencies(values.get((int) (n * 0.50)), values.get((int) (n * 0.95)), + values.get((int) (n * 0.99)), values.get(n - 1), values.get(0), + values.stream().mapToLong(Long::longValue).average().orElse(0), n); + } + + void print(String label) { + logInfo("{} n={} min={}ms avg={}ms p50={}ms p95={}ms p99={}ms max={}ms", label, count, min, avg, p50, + p95, p99, max); + } + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceServiceTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceServiceTest.java new file mode 100644 index 0000000000000..ccaf21f746728 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBPersistenceServiceTest.java @@ -0,0 +1,468 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.time.ZonedDateTime; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ScheduledFuture; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.openhab.core.items.ItemRegistry; +import org.openhab.core.items.MetadataRegistry; +import org.openhab.core.library.items.NumberItem; +import org.openhab.core.library.types.DecimalType; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.types.UnDefType; + +import com.zaxxer.hikari.HikariDataSource; + +/** + * Unit tests for {@link TimescaleDBPersistenceService} using a mocked DataSource. + * + *

+ * The service is constructed and activated directly (bypassing OSGi) to allow + * mock injection. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBPersistenceServiceTest { + + private HikariDataSource dataSource; + private Connection connection; + private Statement statement; + private PreparedStatement preparedStatement; + private ResultSet resultSet; + private ItemRegistry itemRegistry; + private MetadataRegistry metadataRegistry; + + /** + * Subclass that exposes the DataSource injection point for testing. + */ + private static class TestableService extends TimescaleDBPersistenceService { + + private final HikariDataSource injectedDs; + + TestableService(ItemRegistry ir, MetadataRegistry mr, HikariDataSource ds) { + super(ir, new TimescaleDBMetadataService(mr)); + this.injectedDs = ds; + } + + @Override + public void activate(Map config) { + // Skip the real activate (which would try to connect) and wire the mock directly + // via reflection to set the dataSource field. + try { + var dsField = TimescaleDBPersistenceService.class.getDeclaredField("dataSource"); + dsField.setAccessible(true); + dsField.set(this, injectedDs); + } catch (ReflectiveOperationException e) { + throw new IllegalStateException(e); + } + } + } + + private TestableService service; + + @BeforeEach + void setUp() throws Exception { + dataSource = mock(HikariDataSource.class); + connection = mock(Connection.class); + statement = mock(Statement.class); + preparedStatement = mock(PreparedStatement.class); + resultSet = mock(ResultSet.class); + itemRegistry = mock(ItemRegistry.class); + metadataRegistry = mock(MetadataRegistry.class); + + when(dataSource.getConnection()).thenReturn(connection); + when(connection.createStatement()).thenReturn(statement); + when(connection.prepareStatement(anyString())).thenReturn(preparedStatement); + when(preparedStatement.executeQuery()).thenReturn(resultSet); + when(preparedStatement.executeUpdate()).thenReturn(1); + when(resultSet.next()).thenReturn(false); + when(metadataRegistry.getAll()).thenReturn(Collections.emptyList()); + + service = new TestableService(itemRegistry, metadataRegistry, dataSource); + service.activate(Map.of()); + } + + @AfterEach + void tearDown() { + service.deactivate(); + } + + // ------------------------------------------------------------------ + // Service metadata + // ------------------------------------------------------------------ + + @Test + void getIdReturnstimescaledb() { + assertEquals("timescaledb", service.getId()); + } + + @Test + void getLabelReturnshumanreadablelabel() { + assertNotNull(service.getLabel(null)); + assertFalse(service.getLabel(null).isBlank()); + } + + @Test + void getSuggestedStrategiesReturnsEmptyList() { + assertTrue(service.getSuggestedStrategies().isEmpty()); + } + + // ------------------------------------------------------------------ + // store + // ------------------------------------------------------------------ + + @Test + void storeNormalstateSendsinsert() throws Exception { + // item_id cache is empty → getOrCreateItemId will run SELECT then INSERT + stubItemIdLookup(7); + + var item = new NumberItem("Sensor1"); + service.store(item, ZonedDateTime.now(), new DecimalType(42.0), null); + + // At minimum one connection obtained for the store operation + verify(dataSource, atLeastOnce()).getConnection(); + } + + @Test + void storeUndefstateDoesnottouchdatabase() throws Exception { + var item = new NumberItem("Sensor1"); + service.store(item, ZonedDateTime.now(), UnDefType.UNDEF, null); + + verify(dataSource, never()).getConnection(); + } + + @Test + void storeWithaliasUsesaliasname() throws Exception { + stubItemIdLookup(3); + var item = new NumberItem("RealName"); + + // Capture which PreparedStatements get setString(1, "AliasName") + PreparedStatement selectPs = mock(PreparedStatement.class); + ResultSet selectRs = mock(ResultSet.class); + when(selectRs.next()).thenReturn(false); + when(selectPs.executeQuery()).thenReturn(selectRs); + when(connection.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(selectPs); + + service.store(item, ZonedDateTime.now(), new DecimalType(1.0), "AliasName"); + + // The item_id lookup SELECT must be called with the alias + verify(selectPs, atLeastOnce()).setString(eq(1), eq("AliasName")); + } + + // ------------------------------------------------------------------ + // query + // ------------------------------------------------------------------ + + @Test + void queryItemnotincacheAndNotInDbReturnsemptylist() throws Exception { + var filter = new FilterCriteria(); + filter.setItemName("UnknownItem"); + // Default mock: resultSet.next() = false → item not in DB either + + var result = service.query(filter); + + assertFalse(result.iterator().hasNext()); + // Fix 1: on cache miss the service now performs a DB lookup before giving up + verify(dataSource, atLeastOnce()).getConnection(); + } + + @Test + void queryCacheMissItemFoundInDbPopulatescacheAndExecutesquery() throws Exception { + // Arrange: item_id lookup returns 42, actual query returns no rows + PreparedStatement findPs = mock(PreparedStatement.class); + ResultSet findRs = mock(ResultSet.class); + when(findRs.next()).thenReturn(true); + when(findRs.getInt(1)).thenReturn(42); + when(findPs.executeQuery()).thenReturn(findRs); + when(connection.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(findPs); + when(itemRegistry.getItem("Sensor1")).thenReturn(new NumberItem("Sensor1")); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor1"); + + var result = service.query(filter); + + // Still returns empty (no rows in the items table mock) + assertFalse(result.iterator().hasNext()); + // Fix 1: two connections — one for DB lookup, one for actual query + verify(dataSource, atLeast(2)).getConnection(); + // Fix 1: item is now cached → a second query must NOT trigger another DB lookup + reset(findPs); + service.query(filter); + verify(findPs, never()).executeQuery(); + } + + @Test + void queryNoitemnameReturnsemptylist() { + var filter = new FilterCriteria(); + // no item name set + + var result = service.query(filter); + assertFalse(result.iterator().hasNext()); + } + + @Test + void queryKnownitemDelegatestoqueryclass() throws Exception { + // Pre-populate the cache by storing first + stubItemIdLookup(5); + when(itemRegistry.getItem("Sensor1")).thenReturn(new NumberItem("Sensor1")); + + var item = new NumberItem("Sensor1"); + service.store(item, ZonedDateTime.now(), new DecimalType(1.0), null); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor1"); + + service.query(filter); + + // Connection should now be used for the query as well + verify(dataSource, atLeast(2)).getConnection(); + } + + // ------------------------------------------------------------------ + // remove + // ------------------------------------------------------------------ + + @Test + void removeItemnotincacheAndNotInDbReturnsfalse() throws Exception { + var filter = new FilterCriteria(); + filter.setItemName("UnknownItem"); + // Default mock: resultSet.next() = false → item not in DB either + + assertFalse(service.remove(filter)); + // Fix 2: on cache miss the service now performs a DB lookup before giving up + verify(dataSource, atLeastOnce()).getConnection(); + } + + @Test + void removeCacheMissItemFoundInDbPopulatescacheAndExecutesdelete() throws Exception { + // Arrange: item_id lookup returns 7, DELETE returns 3 rows deleted + PreparedStatement findPs = mock(PreparedStatement.class); + ResultSet findRs = mock(ResultSet.class); + when(findRs.next()).thenReturn(true); + when(findRs.getInt(1)).thenReturn(7); + when(findPs.executeQuery()).thenReturn(findRs); + when(connection.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(findPs); + when(preparedStatement.executeUpdate()).thenReturn(3); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor1"); + + assertTrue(service.remove(filter)); + // Fix 2: two connections — one for DB lookup, one for DELETE + verify(dataSource, atLeast(2)).getConnection(); + // Fix 2: item is now cached → a second remove must NOT trigger another DB lookup + reset(findPs); + service.remove(filter); + verify(findPs, never()).executeQuery(); + } + + @Test + void removeKnownitemReturnstrue() throws Exception { + stubItemIdLookup(9); + + // Store to populate cache + service.store(new NumberItem("Sensor1"), ZonedDateTime.now(), new DecimalType(1.0), null); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor1"); + when(preparedStatement.executeUpdate()).thenReturn(3); + + assertTrue(service.remove(filter)); + } + + @Test + void removeNoitemnameReturnsfalse() { + var filter = new FilterCriteria(); + assertFalse(service.remove(filter)); + } + + // ------------------------------------------------------------------ + // activate() / deactivate() — real code paths + // ------------------------------------------------------------------ + + @Test + void activateMissingurlDatasourceremainsnull() throws Exception { + var realService = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mock(MetadataRegistry.class))); + realService.activate(Map.of()); // no 'url' key + + var dsField = TimescaleDBPersistenceService.class.getDeclaredField("dataSource"); + dsField.setAccessible(true); + assertTrue(null == dsField.get(realService), "dataSource must be null when url is missing"); + realService.deactivate(); + } + + @Test + void activateInvalidurlDatasourceremainsnull() throws Exception { + var realService = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mock(MetadataRegistry.class))); + // Unreachable host; short timeout so the test does not block long + realService.activate( + Map.of("url", "jdbc:postgresql://localhost:19999/invalid", "password", "x", "connectTimeout", "500")); + + var dsField = TimescaleDBPersistenceService.class.getDeclaredField("dataSource"); + dsField.setAccessible(true); + assertTrue(null == dsField.get(realService), "dataSource must be null when connection fails"); + realService.deactivate(); + } + + @Test + @SuppressWarnings("unchecked") + void deactivateWithscheduledjobCancelsjob() throws Exception { + ScheduledFuture mockJob = mock(ScheduledFuture.class); + var jobField = TimescaleDBPersistenceService.class.getDeclaredField("downsampleJob"); + jobField.setAccessible(true); + jobField.set(service, mockJob); + + service.deactivate(); + + verify(mockJob).cancel(false); + } + + @Test + void deactivateClosesdatasource() throws Exception { + service.deactivate(); + verify(dataSource).close(); + } + + @Test + void deactivateSetsDownsampleJobInstanceToNullSoRunDownsampleNowReturnsFalse() throws Exception { + // Fix 3: inject a job instance, then deactivate → runDownsampleNow() must return false + var jobField = TimescaleDBPersistenceService.class.getDeclaredField("downsampleJobInstance"); + jobField.setAccessible(true); + jobField.set(service, mock(TimescaleDBDownsampleJob.class)); + + assertTrue(service.runDownsampleNow(), "sanity: job present before deactivate"); + + service.deactivate(); + + assertFalse(service.runDownsampleNow(), + "runDownsampleNow() must return false after deactivate() — console command " + + "must not trigger a job backed by a closed connection pool"); + } + + // ------------------------------------------------------------------ + // Private helpers tested via reflection + // ------------------------------------------------------------------ + + @Test + void secondsUntilMidnightIspositiveandatmost24h() { + long seconds = TimescaleDBPersistenceService.secondsUntilMidnight(); + assertTrue(seconds > 0, "Seconds until midnight must be positive"); + assertTrue(seconds <= 86400, "Seconds until midnight must not exceed 24 h"); + } + + @Test + void parseIntConfigValidvalueReturnsparsed() { + assertEquals(42, TimescaleDBPersistenceService.parseIntConfig(Map.of("k", "42"), "k", 0)); + } + + @Test + void parseIntConfigMissingkeyReturnsdefault() { + assertEquals(7, TimescaleDBPersistenceService.parseIntConfig(Map.of(), "k", 7)); + } + + @Test + void parseIntConfigInvalidvalueReturnsdefault() { + assertEquals(3, TimescaleDBPersistenceService.parseIntConfig(Map.of("k", "notanumber"), "k", 3)); + } + + // ------------------------------------------------------------------ + // runDownsampleNow / ConsoleCommandExtension + // ------------------------------------------------------------------ + + @Test + void runDownsampleNowReturnsFalseWhenNotActivated() { + // A fresh service with no activate() call has no job instance + TimescaleDBPersistenceService fresh = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mock(MetadataRegistry.class))); + assertFalse(fresh.runDownsampleNow(), "runDownsampleNow() must return false before activate()"); + } + + @Test + void consoleCommandDownsamplePrintsFinishedWhenServiceActive() throws Exception { + // Inject a downsampleJobInstance that does nothing (already activated mock service) + var jobField = TimescaleDBPersistenceService.class.getDeclaredField("downsampleJobInstance"); + jobField.setAccessible(true); + TimescaleDBDownsampleJob mockJob = mock(TimescaleDBDownsampleJob.class); + jobField.set(service, mockJob); + + var console = mock(org.openhab.core.io.console.Console.class); + var cmd = new TimescaleDBConsoleCommandExtension(service); + cmd.execute(new String[] { "downsample" }, console); + + verify(mockJob).run(); + verify(console).println(contains("finished")); + } + + @Test + void consoleCommandDownsamplePrintsNotActiveWhenServiceInactive() { + TimescaleDBPersistenceService fresh = new TimescaleDBPersistenceService(mock(ItemRegistry.class), + new TimescaleDBMetadataService(mock(MetadataRegistry.class))); + var console = mock(org.openhab.core.io.console.Console.class); + new TimescaleDBConsoleCommandExtension(fresh).execute(new String[] { "downsample" }, console); + verify(console).println(contains("not active")); + } + + @Test + void consoleCommandUnknownArgPrintsUsage() { + var console = mock(org.openhab.core.io.console.Console.class); + new TimescaleDBConsoleCommandExtension(service).execute(new String[] { "unknown" }, console); + // printUsage writes to console — at minimum something must be printed + verify(console, atLeastOnce()).printUsage(anyString()); + } + + // ------------------------------------------------------------------ + // Helpers + // ------------------------------------------------------------------ + + /** + * Stubs the item_id lookup: SELECT returns nothing, INSERT returns the given id. + */ + private void stubItemIdLookup(int itemId) throws Exception { + ResultSet selectRs = mock(ResultSet.class); + ResultSet insertRs = mock(ResultSet.class); + PreparedStatement selectPs = mock(PreparedStatement.class); + PreparedStatement insertPs = mock(PreparedStatement.class); + PreparedStatement insertItemPs = mock(PreparedStatement.class); + + when(selectRs.next()).thenReturn(false); + when(insertRs.next()).thenReturn(true); + when(insertRs.getInt(1)).thenReturn(itemId); + when(selectPs.executeQuery()).thenReturn(selectRs); + when(insertPs.executeQuery()).thenReturn(insertRs); + when(insertItemPs.executeUpdate()).thenReturn(1); + + when(connection.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(selectPs); + when(connection.prepareStatement(contains("INSERT INTO item_meta"))).thenReturn(insertPs); + when(connection.prepareStatement(contains("INSERT INTO items"))).thenReturn(insertItemPs); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQueryTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQueryTest.java new file mode 100644 index 0000000000000..80b0c062d9d19 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBQueryTest.java @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Timestamp; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.List; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.openhab.core.library.items.NumberItem; +import org.openhab.core.library.items.SwitchItem; +import org.openhab.core.library.types.DecimalType; +import org.openhab.core.library.types.OnOffType; +import org.openhab.core.persistence.FilterCriteria; +import org.openhab.core.persistence.FilterCriteria.Ordering; +import org.openhab.core.persistence.HistoricItem; + +/** + * Unit tests for {@link TimescaleDBQuery} using mocked JDBC connections. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +@SuppressWarnings("null") +class TimescaleDBQueryTest { + + private Connection connection; + private PreparedStatement preparedStatement; + private ResultSet resultSet; + + @BeforeEach + void setUp() throws Exception { + connection = mock(Connection.class); + preparedStatement = mock(PreparedStatement.class); + resultSet = mock(ResultSet.class); + + when(connection.prepareStatement(anyString())).thenReturn(preparedStatement); + when(preparedStatement.executeQuery()).thenReturn(resultSet); + when(resultSet.next()).thenReturn(false); // empty result by default + } + + // ------------------------------------------------------------------ + // insert + // ------------------------------------------------------------------ + + @Test + void insertSetsallfiveparameters() throws Exception { + var row = new TimescaleDBMapper.Row(23.4, null, "°C"); + ZonedDateTime now = ZonedDateTime.now(); + + TimescaleDBQuery.insert(connection, 5, now, row); + + verify(preparedStatement).setTimestamp(eq(1), any(Timestamp.class)); + verify(preparedStatement).setInt(2, 5); + verify(preparedStatement).setDouble(3, 23.4); + verify(preparedStatement).setString(4, null); // string + verify(preparedStatement).setString(5, "°C"); // unit + verify(preparedStatement).executeUpdate(); + } + + @Test + void insertSqlcontainsonconflictdoNothing() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + + TimescaleDBQuery.insert(connection, 1, ZonedDateTime.now(), new TimescaleDBMapper.Row(1.0, null, null)); + + String sql = capturedSql.get(0); + assertTrue(sql.contains("ON CONFLICT DO NOTHING"), + "INSERT must silently ignore duplicate writes via ON CONFLICT DO NOTHING"); + assertFalse(sql.contains("ON CONFLICT ("), + "ON CONFLICT must NOT specify columns — TimescaleDB hypertables do not support " + + "column-inference conflict targets and throw instead of silently dropping"); + } + + @Test + void insertNumericnullusessetnull() throws Exception { + var row = new TimescaleDBMapper.Row(null, "hello", null); + ZonedDateTime now = ZonedDateTime.now(); + + TimescaleDBQuery.insert(connection, 3, now, row); + + verify(preparedStatement).setNull(eq(3), anyInt()); + } + + // ------------------------------------------------------------------ + // getOrCreateItemId — existing item (SELECT path) + // ------------------------------------------------------------------ + + @Test + void getOrCreateItemIdExistingitemReturnsfromselect() throws Exception { + when(resultSet.next()).thenReturn(true); + when(resultSet.getInt(1)).thenReturn(42); + + int id = TimescaleDBQuery.getOrCreateItemId(connection, "MySensor", "My Sensor Label"); + + assertEquals(42, id); + // Only one PreparedStatement needed (the SELECT) + verify(connection, times(1)).prepareStatement(contains("SELECT id FROM item_meta")); + } + + @Test + void getOrCreateItemIdNewitemInsertsandreturns() throws Exception { + // First call (SELECT) returns no rows; second (INSERT) returns the new id + ResultSet selectRs = mock(ResultSet.class); + ResultSet insertRs = mock(ResultSet.class); + PreparedStatement selectPs = mock(PreparedStatement.class); + PreparedStatement insertPs = mock(PreparedStatement.class); + + when(selectRs.next()).thenReturn(false); + when(insertRs.next()).thenReturn(true); + when(insertRs.getInt(1)).thenReturn(99); + when(selectPs.executeQuery()).thenReturn(selectRs); + when(insertPs.executeQuery()).thenReturn(insertRs); + + when(connection.prepareStatement(contains("SELECT id FROM item_meta"))).thenReturn(selectPs); + when(connection.prepareStatement(contains("INSERT INTO item_meta"))).thenReturn(insertPs); + + int id = TimescaleDBQuery.getOrCreateItemId(connection, "NewSensor", null); + + assertEquals(99, id); + } + + // ------------------------------------------------------------------ + // query — result mapping + // ------------------------------------------------------------------ + + @Test + void queryEmptyresultReturnsemptylist() throws Exception { + when(resultSet.next()).thenReturn(false); + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + + List result = TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + assertTrue(result.isEmpty()); + } + + @Test + void querySinglerowReturnsoneitem() throws Exception { + Instant ts = Instant.parse("2024-06-01T12:00:00Z"); + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getTimestamp(1)).thenReturn(Timestamp.from(ts)); + when(resultSet.getObject(2)).thenReturn(42.0); // value + when(resultSet.getString(3)).thenReturn(null); // string + when(resultSet.getString(4)).thenReturn(null); // unit + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + + List result = TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + assertEquals(1, result.size()); + assertEquals(new DecimalType(42.0), result.get(0).getState()); + assertEquals(ts, result.get(0).getTimestamp().toInstant()); + } + + @Test + void queryWithunitReturnsquantitytype() throws Exception { + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getTimestamp(1)).thenReturn(Timestamp.from(Instant.now())); + when(resultSet.getObject(2)).thenReturn(22.5); + when(resultSet.getString(3)).thenReturn(null); + when(resultSet.getString(4)).thenReturn("°C"); + + var filter = new FilterCriteria(); + filter.setItemName("TempSensor"); + + List result = TimescaleDBQuery.query(connection, new NumberItem("TempSensor"), 1, filter); + + assertEquals(1, result.size()); + assertEquals("22.5 °C", result.get(0).getState().toString()); + } + + @Test + void queryWithswitchitemReturnsonofftype() throws Exception { + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getTimestamp(1)).thenReturn(Timestamp.from(Instant.now())); + when(resultSet.getObject(2)).thenReturn(1.0); + when(resultSet.getString(3)).thenReturn(null); + when(resultSet.getString(4)).thenReturn(null); + + var filter = new FilterCriteria(); + filter.setItemName("Switch1"); + + List result = TimescaleDBQuery.query(connection, new SwitchItem("Switch1"), 2, filter); + + assertEquals(OnOffType.ON, result.get(0).getState()); + } + + // ------------------------------------------------------------------ + // query — SQL construction + // ------------------------------------------------------------------ + + @Test + void queryWithdaterangeAddsbothdateparams() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + filter.setBeginDate(ZonedDateTime.now().minusDays(7)); + filter.setEndDate(ZonedDateTime.now()); + + TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + String sql = capturedSql.get(0); + assertTrue(sql.contains("time >= ?"), "Should have begin date filter"); + assertTrue(sql.contains("time <= ?"), "Should have end date filter"); + } + + @Test + void queryAscendingorderAppendsasc() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + filter.setOrdering(Ordering.ASCENDING); + + TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + assertTrue(capturedSql.get(0).contains("ORDER BY time ASC")); + } + + @Test + void queryDescendingorderAppendsdesc() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + filter.setOrdering(Ordering.DESCENDING); + + TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + assertTrue(capturedSql.get(0).contains("ORDER BY time DESC")); + } + + @Test + void queryWithpaginationAddslimitandoffset() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + filter.setPageSize(50); + filter.setPageNumber(2); + + TimescaleDBQuery.query(connection, new NumberItem("Sensor"), 1, filter); + + String sql = capturedSql.get(0); + assertTrue(sql.contains("LIMIT ?"), "Should have LIMIT clause"); + assertTrue(sql.contains("OFFSET ?"), "Should have OFFSET clause"); + } + + // ------------------------------------------------------------------ + // findItemId — SELECT-only cache-miss fallback + // ------------------------------------------------------------------ + + @Test + void findItemIdKnownitemReturnsid() throws Exception { + when(resultSet.next()).thenReturn(true); + when(resultSet.getInt(1)).thenReturn(77); + + var result = TimescaleDBQuery.findItemId(connection, "KnownSensor"); + + assertTrue(result.isPresent()); + assertEquals(77, result.get()); + // Must use the SELECT query — no INSERT + verify(connection).prepareStatement(contains("SELECT id FROM item_meta")); + verify(connection, never()).prepareStatement(contains("INSERT")); + } + + @Test + void findItemIdUnknownitemReturnsempty() throws Exception { + when(resultSet.next()).thenReturn(false); + + var result = TimescaleDBQuery.findItemId(connection, "UnknownSensor"); + + assertFalse(result.isPresent()); + verify(connection).prepareStatement(contains("SELECT id FROM item_meta")); + verify(connection, never()).prepareStatement(contains("INSERT")); + } + + @Test + void findItemIdSetsNameParameter() throws Exception { + when(resultSet.next()).thenReturn(false); + + TimescaleDBQuery.findItemId(connection, "MySensor"); + + verify(preparedStatement).setString(1, "MySensor"); + } + + // ------------------------------------------------------------------ + // remove + // ------------------------------------------------------------------ + + @Test + void removeNodatefilterOnlyfiltersonitemid() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + when(preparedStatement.executeUpdate()).thenReturn(5); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + + int deleted = TimescaleDBQuery.remove(connection, 3, filter); + + assertEquals(5, deleted); + assertFalse(capturedSql.get(0).contains("time >="), "Should not contain date filter"); + } + + @Test + void removeWithdaterangeAddsbothdateparams() throws Exception { + var capturedSql = new java.util.ArrayList(); + when(connection.prepareStatement(anyString())).thenAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return preparedStatement; + }); + when(preparedStatement.executeUpdate()).thenReturn(3); + + var filter = new FilterCriteria(); + filter.setItemName("Sensor"); + filter.setBeginDate(ZonedDateTime.now().minusDays(30)); + filter.setEndDate(ZonedDateTime.now()); + + TimescaleDBQuery.remove(connection, 1, filter); + + String sql = capturedSql.get(0); + assertTrue(sql.contains("time >= ?")); + assertTrue(sql.contains("time <= ?")); + } +} diff --git a/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchemaTest.java b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchemaTest.java new file mode 100644 index 0000000000000..9c68f073ed6a0 --- /dev/null +++ b/bundles/org.openhab.persistence.timescaledb/src/test/java/org/openhab/persistence/timescaledb/internal/TimescaleDBSchemaTest.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2010-2026 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.persistence.timescaledb.internal; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.*; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.eclipse.jdt.annotation.DefaultLocation; +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Unit tests for {@link TimescaleDBSchema} using mocked JDBC connections. + * + * @author René Ulbricht - Initial contribution + */ +@NonNullByDefault({ DefaultLocation.RETURN_TYPE, DefaultLocation.PARAMETER }) +class TimescaleDBSchemaTest { + + private Connection connection; + private Statement statement; + private PreparedStatement hypertablePs; + private ResultSet extensionResultSet; + + @BeforeEach + void setUp() throws SQLException { + connection = mock(Connection.class); + statement = mock(Statement.class); + hypertablePs = mock(PreparedStatement.class); + extensionResultSet = mock(ResultSet.class); + + when(connection.createStatement()).thenReturn(statement); + when(connection.prepareStatement(anyString())).thenReturn(hypertablePs); + when(statement.executeQuery(contains("pg_extension"))).thenReturn(extensionResultSet); + when(extensionResultSet.next()).thenReturn(true); // extension is present by default + } + + @Test + void initializeExecutesallrequiredddl() throws SQLException { + TimescaleDBSchema.initialize(connection, "7 days", 0, 0); + + // Must check TimescaleDB extension + verify(statement).executeQuery(contains("pg_extension")); + + // Must create item_meta + verify(statement).execute(contains("CREATE TABLE IF NOT EXISTS item_meta")); + + // Must create items table + verify(statement).execute(contains("CREATE TABLE IF NOT EXISTS items")); + + // Must create hypertable via PreparedStatement (not raw Statement — avoids SQL injection on chunkInterval) + verify(connection).prepareStatement(contains("create_hypertable")); + verify(hypertablePs).execute(); + + // Must create index + verify(statement).execute(contains("CREATE INDEX IF NOT EXISTS items_item_id_time_idx")); + } + + @Test + void initializeHypertablecontainsconfiguredchunkinterval() throws SQLException { + TimescaleDBSchema.initialize(connection, "14 days", 0, 0); + + // The chunk interval must be passed as a PreparedStatement parameter, not interpolated into SQL + verify(hypertablePs).setString(1, "14 days"); + verify(hypertablePs).execute(); + } + + @Test + void initializeHypertableUsespreparedstatementNotRawSql() throws SQLException { + // The SQL_CREATE_HYPERTABLE constant must use ?::INTERVAL — verify no string-formatted interval leaks + // into a raw Statement.execute() call + var capturedRawSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedRawSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "3 days", 0, 0); + + // No raw Statement should contain "create_hypertable" or the interval value + assertFalse(capturedRawSql.stream().anyMatch(s -> s.contains("create_hypertable")), + "create_hypertable must not be executed via raw Statement (SQL injection risk)"); + assertFalse(capturedRawSql.stream().anyMatch(s -> s.contains("3 days")), + "chunkInterval value must not appear in any raw Statement SQL"); + } + + @Test + void initializeWithcompressionSendscompressionddl() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 30, 0); + + boolean hasCompress = capturedSql.stream().anyMatch(s -> s.contains("timescaledb.compress")); + boolean hasPolicy = capturedSql.stream().anyMatch(s -> s.contains("add_compression_policy")); + assertTrue(hasCompress, "Should enable compression on table"); + assertTrue(hasPolicy, "Should add compression policy"); + + // Policy should reference the configured number of days + assertTrue(capturedSql.stream().anyMatch(s -> s.contains("add_compression_policy") && s.contains("30")), + "Compression policy should reference 30 days"); + } + + @Test + void initializeWithoutcompressionNocompressionddl() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 0, 0); + + boolean hasCompress = capturedSql.stream().anyMatch(s -> s.contains("add_compression_policy")); + assertFalse(hasCompress, "Should not add compression policy when compressionAfterDays=0"); + } + + @Test + void initializeWithretentionpolicySendsretentionddl() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 0, 365); + + assertTrue(capturedSql.stream().anyMatch(s -> s.contains("add_retention_policy")), + "Should add retention policy"); + assertTrue(capturedSql.stream().anyMatch(s -> s.contains("add_retention_policy") && s.contains("365")), + "Retention policy should reference 365 days"); + } + + @Test + void initializeWithoutretentionpolicyNoretentionddl() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 0, 0); + + boolean hasRetention = capturedSql.stream().anyMatch(s -> s.contains("add_retention_policy")); + assertFalse(hasRetention, "Should not add retention policy when retentionDays=0"); + } + + @Test + void initializeMissingtimescaledbextensionThrowssqlexception() throws SQLException { + when(extensionResultSet.next()).thenReturn(false); // extension NOT present + + SQLException ex = assertThrows(SQLException.class, + () -> TimescaleDBSchema.initialize(connection, "7 days", 0, 0)); + + String msg = ex.getMessage(); + assertNotNull(msg, "Exception must have a message"); + assertTrue(msg.contains("TimescaleDB extension"), "Error message should mention TimescaleDB extension"); + } + + @Test + void createTableDdlContainsdownsampledInUniqueconstraint() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 0, 0); + + boolean hasConstraint = capturedSql.stream().anyMatch( + s -> s.contains("UNIQUE") && s.contains("time") && s.contains("item_id") && s.contains("downsampled")); + assertTrue(hasConstraint, + "CREATE TABLE must include UNIQUE(time, item_id, downsampled) — without the downsampled " + + "column a downsampled row at the exact bucket-boundary timestamp would conflict " + + "with the raw row at the same time"); + } + + @Test + void migrationDdlDropsOldconstraintAndAddsNewone() throws SQLException { + var capturedSql = new java.util.ArrayList(); + doAnswer(inv -> { + capturedSql.add(inv.getArgument(0)); + return false; + }).when(statement).execute(anyString()); + + TimescaleDBSchema.initialize(connection, "7 days", 0, 0); + + // The migration DO-block is the only DDL statement that contains DROP CONSTRAINT + java.util.Optional migrationOpt = capturedSql.stream().filter(s -> s.contains("DROP CONSTRAINT")) + .findFirst(); + assertTrue(migrationOpt.isPresent(), "Migration DO-block not found in executed DDL statements"); + String migrationSql = migrationOpt.get(); + + assertTrue(migrationSql.contains("items_time_item_id_ukey"), + "Migration must drop the legacy items_time_item_id_ukey constraint"); + assertTrue(migrationSql.contains("items_time_item_id_downsampled_ukey"), + "Migration must add the new items_time_item_id_downsampled_ukey constraint"); + assertTrue(migrationSql.indexOf("DROP") < migrationSql.indexOf("ADD"), + "Migration must DROP the old constraint before ADD-ing the new one"); + } +} diff --git a/bundles/pom.xml b/bundles/pom.xml index 7b68f6116a6e9..0aa434dafc601 100644 --- a/bundles/pom.xml +++ b/bundles/pom.xml @@ -503,6 +503,7 @@ org.openhab.persistence.mapdb org.openhab.persistence.mongodb org.openhab.persistence.rrd4j + org.openhab.persistence.timescaledb org.openhab.voice.googlestt org.openhab.voice.googletts