Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,54 @@ All notable changes to LogTide will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.5.0]

### Added

- **Optional Redis Dependency**: Redis is now optional for simpler deployments (#90)
- PostgreSQL-based job queues using `graphile-worker` when Redis is unavailable
- PostgreSQL `LISTEN/NOTIFY` for real-time log streaming (live tail)
- In-memory rate limiting fallback when Redis is not configured
- Queue abstraction layer with adapter pattern (BullMQ for Redis, graphile-worker for PostgreSQL)
- New `docker-compose.simple.yml` for Redis-free deployments
- Automatic backend selection based on `REDIS_URL` environment variable
- Graceful degradation: caching disabled, rate limiting in-memory, jobs via PostgreSQL

- **Queue System Architecture**: Unified queue interface supporting multiple backends
- `IQueueAdapter` and `IWorkerAdapter` interfaces for queue operations
- `QueueSystemManager` singleton with queue/worker instance caching
- Proper resource cleanup on shutdown (closes all cached queue/worker instances)
- Type-safe job processors with `IJob<T>` generic interface

### Changed

- **Configuration**: `REDIS_URL` is now optional
- If not set, backend automatically uses PostgreSQL alternatives
- Existing deployments with Redis continue to work unchanged
- Health check endpoint reports Redis as `not_configured` when unavailable

- **Cache System**: Graceful handling of missing Redis
- All cache operations return `null` when Redis unavailable
- No errors thrown, application continues without caching
- SigmaHQ GitHub client works without Redis (skips caching)

### Fixed

- **WebSocket Memory Leak**: Fixed potential memory leak in live tail WebSocket handler
- Added proper socket cleanup in error handler
- `safeSend` helper prevents sending to closed sockets
- Race condition fix with `isSocketOpen` tracking

- **SQL Injection Prevention**: Fixed potential SQL injection in notification publisher
- Removed manual quote escaping, using Kysely parameterized queries

### Documentation

- Updated deployment docs for Redis-optional configuration
- Added `docker-compose.simple.yml` example for minimal deployments

---

## [0.4.0]

### Added
Expand Down
152 changes: 152 additions & 0 deletions docker/docker-compose.simple.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# LogTide Docker Compose - Simplified (No Redis)
#
# This configuration runs LogTide with PostgreSQL only.
# Redis is optional and not required for basic functionality.
#
# Benefits:
# - Simpler deployment (2 containers instead of 3)
# - Lower resource usage
# - Easier backup (single database)
#
# Trade-offs:
# - No distributed rate limiting (single instance only)
# - No Redis caching (relies on PostgreSQL)
# - Job queue uses PostgreSQL (graphile-worker)
#
# Usage: docker compose -f docker-compose.simple.yml up -d

services:
postgres:
image: timescale/timescaledb:latest-pg16
container_name: logtide-postgres
environment:
POSTGRES_DB: ${DB_NAME}
POSTGRES_USER: ${DB_USER}
POSTGRES_PASSWORD: ${DB_PASSWORD}
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
command:
- "postgres"
- "-c"
- "max_connections=100"
- "-c"
- "shared_buffers=256MB"
- "-c"
- "effective_cache_size=768MB"
- "-c"
- "work_mem=16MB"
- "-c"
- "maintenance_work_mem=128MB"
# Parallel query settings for faster aggregations
- "-c"
- "max_parallel_workers_per_gather=4"
- "-c"
- "max_parallel_workers=8"
- "-c"
- "parallel_tuple_cost=0.01"
- "-c"
- "parallel_setup_cost=100"
- "-c"
- "min_parallel_table_scan_size=8MB"
# Write-ahead log tuning for ingestion
- "-c"
- "wal_buffers=16MB"
- "-c"
- "checkpoint_completion_target=0.9"
# Logging for slow queries (>100ms)
- "-c"
- "log_min_duration_statement=100"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER}"]
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
networks:
- logtide-network

backend:
image: ${LOGTIDE_BACKEND_IMAGE:-logtide/backend:latest}
container_name: logtide-backend
ports:
- "8080:8080"
environment:
NODE_ENV: production
DATABASE_URL: postgresql://${DB_USER}:${DB_PASSWORD}@postgres:5432/${DB_NAME}
DATABASE_HOST: postgres
DB_USER: ${DB_USER}
# REDIS_URL intentionally not set - uses PostgreSQL queue
API_KEY_SECRET: ${API_KEY_SECRET}
PORT: 8080
HOST: 0.0.0.0
SMTP_HOST: ${SMTP_HOST:-}
SMTP_PORT: ${SMTP_PORT:-587}
SMTP_USER: ${SMTP_USER:-}
SMTP_PASS: ${SMTP_PASS:-}
SMTP_FROM: ${SMTP_FROM:-noreply@logtide.local}
INTERNAL_LOGGING_ENABLED: ${INTERNAL_LOGGING_ENABLED:-false}
INTERNAL_API_KEY: ${INTERNAL_API_KEY:-}
SERVICE_NAME: logtide-backend
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://localhost:8080/health', (r) => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"]
interval: 30s
timeout: 5s
retries: 3
start_period: 40s
restart: unless-stopped
networks:
- logtide-network

worker:
image: ${LOGTIDE_BACKEND_IMAGE:-logtide/backend:latest}
container_name: logtide-worker
command: ["worker"]
healthcheck:
disable: true
environment:
NODE_ENV: production
DATABASE_URL: postgresql://${DB_USER}:${DB_PASSWORD}@postgres:5432/${DB_NAME}
DATABASE_HOST: postgres
DB_USER: ${DB_USER}
# REDIS_URL intentionally not set - uses PostgreSQL queue
API_KEY_SECRET: ${API_KEY_SECRET}
SMTP_HOST: ${SMTP_HOST:-}
SMTP_PORT: ${SMTP_PORT:-587}
SMTP_USER: ${SMTP_USER:-}
SMTP_PASS: ${SMTP_PASS:-}
SMTP_FROM: ${SMTP_FROM:-noreply@logtide.local}
INTERNAL_LOGGING_ENABLED: ${INTERNAL_LOGGING_ENABLED:-false}
INTERNAL_API_KEY: ${INTERNAL_API_KEY:-}
SERVICE_NAME: logtide-worker
depends_on:
backend:
condition: service_healthy
restart: unless-stopped
networks:
- logtide-network

frontend:
image: ${LOGTIDE_FRONTEND_IMAGE:-logtide/frontend:latest}
container_name: logtide-frontend
ports:
- "3000:3000"
environment:
NODE_ENV: production
PUBLIC_API_URL: ${PUBLIC_API_URL:-http://localhost:8080}
depends_on:
- backend
restart: unless-stopped
networks:
- logtide-network

volumes:
postgres_data:
driver: local

networks:
logtide-network:
1 change: 1 addition & 0 deletions packages/backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
"dotenv": "^17.2.3",
"fastify": "^4.29.1",
"fastify-plugin": "^4.5.1",
"graphile-worker": "^0.16.6",
"ioredis": "^5.8.2",
"js-yaml": "^4.1.1",
"ldapts": "^7.2.1",
Expand Down
8 changes: 6 additions & 2 deletions packages/backend/src/config/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ const configSchema = z.object({
// Database
DATABASE_URL: z.string().url(),

// Redis
REDIS_URL: z.string().url().default('redis://localhost:6379'),
// Redis (optional - if not set, uses PostgreSQL-based job queue)
REDIS_URL: z.string().url().optional(),

// API
API_KEY_SECRET: z.string().min(32),
Expand Down Expand Up @@ -93,3 +93,7 @@ export function isTest(): boolean {
export function isSmtpConfigured(): boolean {
return !!(config.SMTP_HOST && config.SMTP_USER && config.SMTP_PASS);
}

export function isRedisConfigured(): boolean {
return !!config.REDIS_URL;
}
44 changes: 32 additions & 12 deletions packages/backend/src/modules/admin/service.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { db, getPoolStats } from '../../database/index.js';
import { sql } from 'kysely';
import { connection as redis } from '../../queue/connection.js';
import { connection as redis, isRedisAvailable } from '../../queue/connection.js';
import { CacheManager, type CacheStats, isCacheEnabled } from '../../utils/cache.js';

// System-wide statistics
Expand Down Expand Up @@ -133,7 +133,7 @@ export interface HealthStats {
connections: number;
};
redis: {
status: 'healthy' | 'degraded' | 'down';
status: 'healthy' | 'degraded' | 'down' | 'not_configured';
latency: number;
};
// Connection pool statistics (application-level)
Expand Down Expand Up @@ -608,6 +608,21 @@ export class AdminService {
* Get Redis statistics
*/
async getRedisStats(): Promise<RedisStats> {
// Return empty stats if Redis is not configured
if (!isRedisAvailable() || !redis) {
return {
memory: {
used: 'N/A (Redis not configured)',
peak: 'N/A',
},
queues: {
alertNotifications: { waiting: 0, active: 0, completed: 0, failed: 0 },
sigmaDetection: { waiting: 0, active: 0, completed: 0, failed: 0 },
},
connections: 0,
};
}

try {
// Get Redis memory info
const info = await redis.info('memory');
Expand Down Expand Up @@ -696,18 +711,20 @@ export class AdminService {
);
const dbConnections = connResult.rows[0]?.count || 0;

// Redis health
// Redis health (only if configured)
const redisStart = Date.now();
let redisStatus: 'healthy' | 'degraded' | 'down' = 'healthy';
let redisStatus: 'healthy' | 'degraded' | 'down' | 'not_configured' = 'not_configured';
let redisLatency = 0;

try {
await redis.ping();
redisLatency = Date.now() - redisStart;
if (redisLatency > 100) redisStatus = 'degraded';
} catch {
redisStatus = 'down';
redisLatency = -1;
if (isRedisAvailable() && redis) {
try {
await redis.ping();
redisLatency = Date.now() - redisStart;
redisStatus = redisLatency > 100 ? 'degraded' : 'healthy';
} catch {
redisStatus = 'down';
redisLatency = -1;
}
}

const dbStatus: 'healthy' | 'degraded' | 'down' =
Expand All @@ -716,8 +733,11 @@ export class AdminService {
// Check pool health: degraded if waiting requests > 0
const poolHealthy = poolStats.waitingCount === 0;

// Redis is not required - only affects overall status if configured and down
const redisHealthy = redisStatus === 'healthy' || redisStatus === 'not_configured';

const overall: 'healthy' | 'degraded' | 'down' =
dbStatus === 'healthy' && redisStatus === 'healthy' && poolHealthy
dbStatus === 'healthy' && redisHealthy && poolHealthy
? 'healthy'
: dbStatus === 'down' || redisStatus === 'down'
? 'down'
Expand Down
31 changes: 8 additions & 23 deletions packages/backend/src/modules/ingestion/service.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import { db } from '../../database/index.js';
import type { LogInput } from '@logtide/shared';
import { createQueue, publisher } from '../../queue/connection.js';
import { createQueue } from '../../queue/connection.js';
import type { LogEntry } from '../sigma/detection-engine.js';
import { CacheManager } from '../../utils/cache.js';
import { notificationPublisher } from '../streaming/index.js';

export class IngestionService {
/**
Expand Down Expand Up @@ -47,28 +48,12 @@ export class IngestionService {
console.error('[Ingestion] Failed to invalidate cache:', err);
});

// Publish to Redis for live tail
try {
// Map back to API format for frontend
const apiLogs = dbLogs.map(log => ({
id: undefined, // ID is generated by DB, might be missing here if we don't return it
time: log.time,
projectId: log.project_id,
service: log.service,
level: log.level,
message: log.message,
metadata: log.metadata,
traceId: log.trace_id,
spanId: log.span_id,
}));

await publisher.publish('logs:new', JSON.stringify({
projectId,
logs: apiLogs
}));
} catch (error) {
console.error('[Ingestion] Failed to publish logs to Redis:', error);
}
// Publish notification for live tail (uses PostgreSQL LISTEN/NOTIFY)
// Extract log IDs for the notification payload
const logIds = insertedLogs.map((log) => log.id);
notificationPublisher.publishLogIngestion(projectId, logIds).catch((err) => {
console.error('[Ingestion] Failed to publish notification:', err);
});

return logs.length;
}
Expand Down
Loading
Loading