diff --git a/.env.example b/.env.example index 90adc37e..b110532a 100644 --- a/.env.example +++ b/.env.example @@ -26,6 +26,11 @@ SUPABASE_ACCESS_TOKEN=sbp_YOUR_SUPABASE_TOKEN_HERE SUPABASE_URL=https://your-project.supabase.co SUPABASE_ANON_KEY=your-supabase-anon-key +# Backup Configuration +BACKUP_INTERVAL_HOURS=24 # Backup every 24 hours +MAX_BACKUPS=30 # Keep last 30 backups +API_KEY=your-secure-api-key-for-admin-endpoints + # Development Configuration NODE_ENV=development DEBUG=gemini-flow:* diff --git a/.gitignore b/.gitignore index 72e0df2a..4bf8369c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,10 @@ *.sqlite *.sqlite3 +# Database backups +.data/ +.data/backups/ + # Swarm and coordination files .gemini-flow/ .swarm/ diff --git a/DOCKER.md b/DOCKER.md new file mode 100644 index 00000000..ed5ccacf --- /dev/null +++ b/DOCKER.md @@ -0,0 +1,508 @@ +# 🐳 Docker Deployment Guide + +This guide provides comprehensive instructions for building, running, and securing the Gemini Flow backend using Docker. + +## šŸ“‹ Table of Contents + +- [Overview](#overview) +- [Quick Start](#quick-start) +- [Building the Image](#building-the-image) +- [Running the Container](#running-the-container) +- [Docker Compose](#docker-compose) +- [Security Verification](#security-verification) +- [Health Checks](#health-checks) +- [Troubleshooting](#troubleshooting) +- [Best Practices](#best-practices) + +## šŸŽÆ Overview + +### Multi-Stage Build Architecture + +The Dockerfile uses a two-stage build process: + +1. **Builder Stage** (node:18-alpine) + - Installs all dependencies (including devDependencies) + - Prepares application for production + - Prunes development dependencies + +2. **Production Stage** (node:18-alpine) + - Minimal runtime image + - Non-root user (geminiflow, UID 1001) + - dumb-init for proper signal handling + - Security hardening + +### Security Features + +- āœ… **Non-root user**: Runs as UID 1001 (geminiflow) +- āœ… **Multi-stage build**: Smaller image, no build tools in production +- āœ… **Signal handling**: dumb-init handles SIGTERM/SIGINT properly +- āœ… **File ownership**: All files owned by non-root user +- āœ… **Health checks**: Validates service availability +- āœ… **Resource limits**: CPU and memory constraints via docker-compose +- āœ… **Security options**: no-new-privileges flag enabled + +### Image Size Comparison + +| Build Type | Size | Reduction | +|------------|------|-----------| +| Single-stage (baseline) | ~180MB | - | +| Multi-stage (optimized) | ~140MB | 22% | + +## šŸš€ Quick Start + +### Using Docker Compose (Recommended) + +```bash +# Build and start all services +docker-compose up -d + +# View logs +docker-compose logs -f backend + +# Stop services +docker-compose down + +# Stop and remove volumes +docker-compose down -v +``` + +### Using Docker CLI + +```bash +# Build image +docker build -t gemini-flow-backend:latest backend/ + +# Run container +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + -e GOOGLE_API_KEY="your-api-key" \ + gemini-flow-backend:latest + +# View logs +docker logs -f gemini-flow-backend + +# Stop container +docker stop gemini-flow-backend +``` + +## šŸ”Ø Building the Image + +### Basic Build + +```bash +cd backend +docker build -t gemini-flow-backend:latest . +``` + +### Build with Arguments + +```bash +docker build \ + --build-arg BUILD_VERSION=1.0.0 \ + --build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ + -t gemini-flow-backend:latest \ + backend/ +``` + +### Build with No Cache + +```bash +docker build --no-cache -t gemini-flow-backend:latest backend/ +``` + +### Multi-platform Build + +```bash +docker buildx build \ + --platform linux/amd64,linux/arm64 \ + -t gemini-flow-backend:latest \ + backend/ +``` + +## šŸƒ Running the Container + +### Basic Run + +```bash +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + gemini-flow-backend:latest +``` + +### With Environment Variables + +```bash +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + -e NODE_ENV=production \ + -e PORT=3001 \ + -e LOG_LEVEL=info \ + -e GOOGLE_API_KEY="your-api-key-here" \ + gemini-flow-backend:latest +``` + +### With Volume Mounts + +```bash +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + -v $(pwd)/data:/app/.data \ + -v $(pwd)/logs:/app/logs \ + -e GOOGLE_API_KEY="your-api-key-here" \ + gemini-flow-backend:latest +``` + +### With Security Options + +```bash +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + --user 1001:1001 \ + --security-opt no-new-privileges:true \ + --memory=512m \ + --cpus=1 \ + -e GOOGLE_API_KEY="your-api-key-here" \ + gemini-flow-backend:latest +``` + +### Generate Secure API Key + +```bash +# Generate random API key +export GOOGLE_API_KEY=$(openssl rand -hex 32) + +# Or use UUID +export GOOGLE_API_KEY=$(uuidgen) + +# Run with generated key +docker run -d \ + --name gemini-flow-backend \ + -p 3001:3001 \ + -e GOOGLE_API_KEY="$GOOGLE_API_KEY" \ + gemini-flow-backend:latest +``` + +## šŸŽ¼ Docker Compose + +### Configuration + +The `docker-compose.yml` file includes: + +- Backend service with security hardening +- Volume mounts for persistent data +- Resource limits (CPU/memory) +- Health checks +- Logging configuration +- Optional Redis service (commented out) + +### Commands + +```bash +# Start services in detached mode +docker-compose up -d + +# Build and start +docker-compose up -d --build + +# View logs +docker-compose logs -f + +# View backend logs only +docker-compose logs -f backend + +# Stop services +docker-compose down + +# Stop and remove volumes +docker-compose down -v + +# Restart specific service +docker-compose restart backend + +# Scale services (if configured) +docker-compose up -d --scale backend=3 +``` + +### Environment Variables + +Create a `.env` file in the root directory: + +```bash +# .env +VERSION=1.0.0 +BUILD_DATE=2024-01-01T00:00:00Z +LOG_LEVEL=info +GOOGLE_API_KEY=your-api-key-here +``` + +## šŸ”’ Security Verification + +### Verify Non-Root User + +```bash +# Check user in Dockerfile +docker inspect gemini-flow-backend | grep -i user + +# Expected output: "User": "geminiflow" or "User": "1001" +``` + +### Check Running Processes + +```bash +# View processes inside container +docker exec gemini-flow-backend ps aux + +# Expected: processes running as UID 1001, not root (UID 0) +``` + +### Verify File Permissions + +```bash +# Check data directory ownership +docker exec gemini-flow-backend ls -la /app/.data + +# Expected: drwxr-xr-x geminiflow nodejs +``` + +### Security Scan + +```bash +# Using Docker Scout (if available) +docker scout cves gemini-flow-backend:latest + +# Using Trivy +docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy image gemini-flow-backend:latest + +# Using Grype +grype gemini-flow-backend:latest +``` + +### Container Security Checks + +```bash +# Check security options +docker inspect gemini-flow-backend | jq '.[0].HostConfig.SecurityOpt' + +# Expected: ["no-new-privileges:true"] + +# Check if running as root +docker exec gemini-flow-backend id + +# Expected: uid=1001(geminiflow) gid=1001(nodejs) + +# Check capabilities +docker inspect gemini-flow-backend | jq '.[0].HostConfig.CapDrop' +``` + +## šŸ„ Health Checks + +### Manual Health Check + +```bash +# Check health endpoint +curl http://localhost:3001/health + +# Expected response: +# { +# "status": "healthy", +# "timestamp": "2024-01-01T00:00:00.000Z", +# "service": "gemini-flow-backend" +# } +``` + +### Docker Health Status + +```bash +# View health status +docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" + +# Inspect health check logs +docker inspect gemini-flow-backend | jq '.[0].State.Health' + +# View health check history +docker inspect gemini-flow-backend | jq '.[0].State.Health.Log' +``` + +### Health Check from Inside Container + +```bash +# Execute health check manually +docker exec gemini-flow-backend node -e \ + "require('http').get('http://localhost:3001/health', (r) => { + r.on('data', d => console.log(d.toString())); + })" +``` + +## šŸ› Troubleshooting + +### Container Won't Start + +```bash +# Check logs +docker logs gemini-flow-backend + +# Check if port is already in use +lsof -i :3001 + +# Check container events +docker events --filter container=gemini-flow-backend +``` + +### Permission Errors + +```bash +# Fix volume permissions on host +sudo chown -R 1001:1001 ./data ./logs + +# Or run with correct user +docker run --user 1001:1001 ... +``` + +### Health Check Failing + +```bash +# Test health endpoint manually +docker exec gemini-flow-backend curl http://localhost:3001/health + +# Check if Node.js process is running +docker exec gemini-flow-backend ps aux | grep node + +# View application logs +docker logs gemini-flow-backend +``` + +### Build Failures + +```bash +# Clean build with no cache +docker build --no-cache -t gemini-flow-backend:latest backend/ + +# Check for Docker daemon issues +docker system info + +# Clean up Docker resources +docker system prune -a +``` + +### High Memory Usage + +```bash +# Check resource usage +docker stats gemini-flow-backend + +# Adjust memory limits in docker-compose.yml +# deploy.resources.limits.memory: 512M + +# Check for memory leaks in logs +docker logs gemini-flow-backend | grep -i "memory" +``` + +## šŸ“š Best Practices + +### Development vs Production + +**Development:** +```bash +# Use volume mounts for hot reload +docker run -d \ + --name gemini-flow-dev \ + -p 3001:3001 \ + -v $(pwd)/backend/src:/app/src \ + -e NODE_ENV=development \ + gemini-flow-backend:latest +``` + +**Production:** +```bash +# Use docker-compose with all security options +docker-compose up -d +``` + +### Logging + +```bash +# Follow logs with timestamps +docker logs -f --timestamps gemini-flow-backend + +# View last 100 lines +docker logs --tail 100 gemini-flow-backend + +# Save logs to file +docker logs gemini-flow-backend > backend.log 2>&1 +``` + +### Updating the Image + +```bash +# Pull latest changes +git pull origin main + +# Rebuild image +docker-compose build --no-cache backend + +# Restart with new image +docker-compose up -d backend + +# Verify new image is running +docker ps | grep gemini-flow-backend +``` + +### Backup and Restore + +```bash +# Backup volumes +docker run --rm \ + -v backend-data:/data \ + -v $(pwd)/backups:/backup \ + alpine tar czf /backup/backend-data-$(date +%Y%m%d).tar.gz /data + +# Restore volumes +docker run --rm \ + -v backend-data:/data \ + -v $(pwd)/backups:/backup \ + alpine tar xzf /backup/backend-data-20240101.tar.gz -C / +``` + +### Monitoring + +```bash +# Real-time resource usage +docker stats gemini-flow-backend + +# Container events +docker events --filter container=gemini-flow-backend --since 1h + +# Export metrics +docker inspect gemini-flow-backend | jq '.[0].State' +``` + +## šŸ”— References + +- [Docker Best Practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) +- [Node.js Docker Best Practices](https://github.com/nodejs/docker-node/blob/main/docs/BestPractices.md) +- [Issue #81](https://github.com/clduab11/gemini-flow/issues/81) +- [Pull Request #66](https://github.com/clduab11/gemini-flow/pull/66) + +## šŸ“ Notes + +- The backend runs on port 3001 by default +- Health check endpoint: `http://localhost:3001/health` +- Non-root user: `geminiflow` (UID 1001) +- Data directory: `/app/.data` +- Logs directory: `/app/logs` +- Base image: `node:18-alpine` +- Signal handler: `dumb-init` + +## šŸ†˜ Support + +For issues or questions: +- Open an issue on GitHub +- Check existing issues for similar problems +- Review Docker logs for error messages +- Verify environment variables are set correctly diff --git a/GEMINI.md b/GEMINI.md index c27c5cae..c9c485a7 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -29,7 +29,7 @@ gemini-flow is now available as an official Gemini CLI extension, packaging: ```bash # Install from GitHub -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow # Install from local clone cd /path/to/gemini-flow @@ -39,6 +39,8 @@ gemini extensions install . gemini extensions enable gemini-flow ``` +> **āš ļø Important**: Use the full GitHub URL (`https://github.com/clduab11/gemini-flow`). The shorthand syntax `github:username/repo` is **not supported** and will cause installation errors. + ### Using gemini-flow Commands Once enabled, use gemini-flow commands directly in Gemini CLI: @@ -100,7 +102,7 @@ gemini-flow also includes its own extension management commands: ```bash # Using gem-extensions command -gemini-flow gem-extensions install github:user/extension +gemini-flow gem-extensions install https://github.com/user/extension gemini-flow gem-extensions list gemini-flow gem-extensions enable extension-name gemini-flow gem-extensions info extension-name @@ -157,7 +159,7 @@ This documentation is specifically engineered for **Gemini Code Assist** using G gemini-flow is now available as an official Gemini CLI extension. Install it with: ```bash -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow ``` --- diff --git a/README.md b/README.md index d438a036..dc60c7b4 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,9 @@ gemini-flow init --protocols a2a,mcp --topology hierarchical gemini-flow agents spawn --count 50 --specialization "enterprise-ready" # NEW: Official Gemini CLI Extension (October 8, 2025) -gemini extensions install github:clduab11/gemini-flow # Install as Gemini extension -gemini extensions enable gemini-flow # Enable the extension -gemini hive-mind spawn "Build AI application" # Use commands in Gemini CLI +gemini extensions install https://github.com/clduab11/gemini-flow # Install as Gemini extension +gemini extensions enable gemini-flow # Enable the extension +gemini hive-mind spawn "Build AI application" # Use commands in Gemini CLI ``` **šŸš€ Modern Protocol Support**: Native A2A and MCP integration for seamless inter-agent communication and model coordination @@ -489,7 +489,7 @@ gemini-flow is now available as an **official Gemini CLI extension**, providing ```bash # Install from GitHub -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow # Install from local clone cd /path/to/gemini-flow @@ -499,6 +499,8 @@ gemini extensions install . gemini extensions enable gemini-flow ``` +> **Note**: Always use the full GitHub URL format (`https://github.com/username/repo`). The shorthand syntax `github:username/repo` is **not supported** by Gemini CLI and will result in "Install source not found" errors. + ### What's Included The extension packages gemini-flow's complete AI orchestration platform: @@ -560,7 +562,7 @@ gemini-flow also includes its own extension management commands: ```bash # Using gem-extensions command -gemini-flow gem-extensions install github:user/extension +gemini-flow gem-extensions install https://github.com/user/extension gemini-flow gem-extensions list gemini-flow gem-extensions enable extension-name gemini-flow gem-extensions info extension-name diff --git a/backend/.dockerignore b/backend/.dockerignore new file mode 100644 index 00000000..be81d0ad --- /dev/null +++ b/backend/.dockerignore @@ -0,0 +1,60 @@ +# Dependencies +node_modules +npm-debug.log +yarn-error.log +pnpm-debug.log + +# Environment files +.env +.env.* +!.env.example + +# Git +.git +.gitignore + +# Data directories (should be volume mounted) +.data +.data-test +.data/* +backups +logs + +# Testing +coverage +*.test.js +*.spec.js +__tests__ + +# Documentation (except README) +*.md +!README.md + +# IDE +.vscode +.idea +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db +.Spotlight-V100 +.Trashes + +# Build artifacts +dist +build +*.tgz + +# Temporary files +tmp +temp +*.tmp +*.cache + +# Development +nodemon.json +.eslintrc* +.prettierrc* diff --git a/backend/.env.example b/backend/.env.example index cbb4e722..d783824b 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -11,4 +11,13 @@ GOOGLE_AI_API_KEY=your_google_ai_api_key_here PORT=3001 # CORS Origins (for development) -CORS_ORIGINS=http://localhost:5173,http://localhost:3000 \ No newline at end of file +CORS_ORIGINS=http://localhost:5173,http://localhost:3000 + +# Logging Configuration +# Log levels: trace, debug, info, warn, error, fatal +# Recommended: 'debug' for development, 'info' for production +LOG_LEVEL=info + +# Environment (affects log formatting) +# 'development' enables pretty printing, 'production' uses JSON +NODE_ENV=development \ No newline at end of file diff --git a/backend/BACKUP_SYSTEM.md b/backend/BACKUP_SYSTEM.md new file mode 100644 index 00000000..3b8a893f --- /dev/null +++ b/backend/BACKUP_SYSTEM.md @@ -0,0 +1,195 @@ +# Database Backup System + +## Overview + +The automated backup system provides comprehensive protection for all database files in the `.data/` directory. It creates compressed, timestamped backups automatically at configurable intervals. + +## Features + +- āœ… **Automated Backups**: Runs every 24 hours by default (configurable) +- āœ… **Startup Backup**: Creates backup on server startup +- āœ… **Shutdown Backup**: Creates final backup on graceful shutdown +- āœ… **Compression**: Uses gzip compression to save disk space +- āœ… **Rotation**: Automatically keeps only the last 30 backups (configurable) +- āœ… **Metadata**: Each backup includes timestamp and file list +- āœ… **API Endpoints**: Manual backup/restore via REST API + +## Backed Up Files + +The system backs up the following files from `.data/`: +- `workflows.json` - User-created workflow definitions +- `store-state.json` - Current UI state +- `sessions.json` - Active session data + +## Configuration + +Set these environment variables in your `.env` file: + +```bash +# Backup configuration +BACKUP_INTERVAL_HOURS=24 # Backup every 24 hours +MAX_BACKUPS=30 # Keep last 30 backups +API_KEY=your-secure-api-key-for-admin-endpoints +``` + +## Directory Structure + +``` +.data/ +ā”œā”€ā”€ workflows.json +ā”œā”€ā”€ store-state.json +ā”œā”€ā”€ sessions.json +└── backups/ + ā”œā”€ā”€ backup-2025-10-27T23-51-02-293Z/ + │ ā”œā”€ā”€ workflows.json.gz + │ ā”œā”€ā”€ store-state.json.gz + │ ā”œā”€ā”€ sessions.json.gz + │ └── metadata.json + └── backup-2025-10-27T23-52-19-819Z/ + └── ... +``` + +## API Endpoints + +### List Backups +```bash +curl http://localhost:3001/api/admin/backups \ + -H "X-API-Key: $API_KEY" +``` + +Response: +```json +{ + "success": true, + "data": { + "backups": [ + { + "name": "backup-2025-10-27T23-51-02-293Z", + "timestamp": "2025-10-27T23:51:02.303Z", + "files": ["workflows.json", "store-state.json", "sessions.json"], + "size": 4096 + } + ], + "stats": { + "count": 5, + "totalSize": 20480, + "oldest": "2025-10-27T23:51:02.303Z", + "newest": "2025-10-27T23:52:19.823Z" + } + } +} +``` + +### Create Manual Backup +```bash +curl -X POST http://localhost:3001/api/admin/backups \ + -H "X-API-Key: $API_KEY" +``` + +Response: +```json +{ + "success": true, + "data": { + "backupPath": "/path/to/.data/backups/backup-2025-10-27T23-52-19-819Z" + } +} +``` + +### Restore from Backup +```bash +curl -X POST http://localhost:3001/api/admin/backups/backup-2025-10-27T23-51-02-293Z/restore \ + -H "X-API-Key: $API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"confirm": "RESTORE"}' +``` + +Response: +```json +{ + "success": true, + "message": "Database restored. Restart recommended." +} +``` + +āš ļø **Warning**: Restoring a backup will overwrite current database files. Consider creating a manual backup first. + +## Programmatic Usage + +### Create Backup +```javascript +import { createBackup } from './backend/src/db/backup.js'; + +const backupPath = await createBackup(); +console.log('Backup created:', backupPath); +``` + +### List Backups +```javascript +import { listBackups, getBackupStats } from './backend/src/db/backup.js'; + +const backups = await listBackups(); +const stats = await getBackupStats(); + +console.log('Available backups:', backups.length); +console.log('Total size:', stats.totalSize, 'bytes'); +``` + +### Restore Backup +```javascript +import { restoreBackup } from './backend/src/db/backup.js'; + +await restoreBackup('backup-2025-10-27T23-51-02-293Z'); +console.log('Database restored'); +``` + +## Security + +- **Authentication**: All admin endpoints require `X-API-Key` header (when `API_KEY` env var is set) +- **Confirmation**: Restore operations require explicit `{"confirm": "RESTORE"}` in request body +- **Read-only by Default**: Backups are created but never automatically deleted except during rotation + +## Monitoring + +The backup system logs all operations: +- Backup creation success/failure +- Backup restoration success/failure +- Old backup deletion +- Scheduler start/stop + +Example log output: +``` +[2025-10-27T23:51:02.303Z] INFO: Database backup created {"backupName":"backup-2025-10-27T23-51-02-293Z","files":["workflows.json","store-state.json","sessions.json"]} +[2025-10-27T23:51:56.350Z] INFO: Backup scheduler started {"intervalHours":24} +[2025-10-27T23:52:32.682Z] INFO: Final backup completed +``` + +## Troubleshooting + +### Backup Not Created +- Check that `.data/` directory exists +- Verify database files exist in `.data/` +- Check server logs for error messages + +### Restore Failed +- Verify backup exists in `.data/backups/` +- Check backup metadata.json is valid +- Ensure sufficient disk space + +### Too Many Backups +- Reduce `MAX_BACKUPS` environment variable +- Manually delete old backups from `.data/backups/` + +## Future Enhancements + +- **Cloud Backup**: Integration with cloud storage (S3, Google Cloud Storage) +- **Encryption**: Encrypt backups at rest +- **Incremental Backups**: Only backup changed files +- **Backup Verification**: Automatically verify backup integrity +- **Email Notifications**: Alert on backup success/failure + +## Related Issues + +- Issue #73: Automated Backup System (this implementation) +- Issue #68: Atomic Database Operations (dependency) +- Pull Request #66: Original review discussion diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 00000000..b0b24df1 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,67 @@ +# ============================================================================= +# Stage 1: Builder - Install dependencies and prepare application +# ============================================================================= +FROM node:18-alpine AS builder + +WORKDIR /app + +# Copy package files first (for better caching) +COPY package*.json ./ + +# Install ALL dependencies (including devDependencies for potential build steps) +RUN npm ci && npm cache clean --force + +# Copy application source +COPY . . + +# Remove development dependencies +RUN npm prune --production + +# ============================================================================= +# Stage 2: Production - Minimal runtime image +# ============================================================================= +FROM node:18-alpine + +# Install dumb-init to handle PID 1 responsibilities +RUN apk add --no-cache dumb-init + +# Create non-root user and group +RUN addgroup -g 1001 -S nodejs && \ + adduser -S geminiflow -u 1001 -G nodejs + +WORKDIR /app + +# Create data directory with correct permissions +RUN mkdir -p .data logs && \ + chown -R geminiflow:nodejs .data logs + +# Copy dependencies from builder stage +COPY --from=builder --chown=geminiflow:nodejs /app/node_modules ./node_modules + +# Copy application code with correct ownership +COPY --chown=geminiflow:nodejs package*.json ./ +COPY --chown=geminiflow:nodejs src ./src + +# Switch to non-root user +USER geminiflow + +# Expose port +EXPOSE 3001 + +# Health check (runs as non-root user) +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD node -e "require('http').get('http://localhost:3001/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})" || exit 1 + +# Use dumb-init to handle signals properly +ENTRYPOINT ["dumb-init", "--"] + +# Start server +CMD ["node", "src/server.js"] + +# Metadata +LABEL org.opencontainers.image.title="Gemini Flow Backend" \ + org.opencontainers.image.description="Backend API and WebSocket server for Gemini Flow" \ + org.opencontainers.image.version="1.0.0" \ + org.opencontainers.image.vendor="Gemini Flow" \ + org.opencontainers.image.source="https://github.com/clduab11/gemini-flow" \ + maintainer="clduab11" diff --git a/backend/LOGGING.md b/backend/LOGGING.md new file mode 100644 index 00000000..800a241d --- /dev/null +++ b/backend/LOGGING.md @@ -0,0 +1,232 @@ +# Structured Logging with Pino + +This document describes the structured logging implementation in the Gemini Flow backend using [Pino](https://getpino.io/). + +## Overview + +The backend uses Pino for production-ready structured logging with the following features: + +- **Log Levels**: debug, info, warn, error, fatal +- **Development Mode**: Pretty-printed, colorized output +- **Production Mode**: JSON output for log aggregation +- **Request Tracking**: Unique request IDs for correlation +- **Error Serialization**: Full stack traces and error details +- **Module Loggers**: Context-aware logging per module + +## Configuration + +### Environment Variables + +Configure logging via `.env` file: + +```bash +# Log level (trace, debug, info, warn, error, fatal) +LOG_LEVEL=info + +# Environment (affects output format) +NODE_ENV=production +``` + +**Recommended Settings:** +- Development: `LOG_LEVEL=debug`, `NODE_ENV=development` +- Production: `LOG_LEVEL=info`, `NODE_ENV=production` + +## Usage + +### Basic Logging + +```javascript +import { logger } from './utils/logger.js'; + +// Simple message +logger.info('Server started'); + +// With structured data +logger.info({ port: 3001, env: 'production' }, 'Server started'); + +// Error logging +logger.error({ err: error, userId: '123' }, 'Request failed'); +``` + +### Module-Specific Logging + +Create child loggers for different modules: + +```javascript +import { createModuleLogger } from './utils/logger.js'; + +const logger = createModuleLogger('gemini-api'); + +logger.info({ requestId: 'abc123' }, 'Processing request'); +logger.debug({ prompt: 'test' }, 'Built prompt'); +logger.error({ err: error }, 'API request failed'); +``` + +### Log Levels + +Use appropriate log levels for different scenarios: + +```javascript +// Detailed debugging info (development only) +logger.debug({ variable: value }, 'Debug information'); + +// General information +logger.info({ userId: '123' }, 'User logged in'); + +// Warning conditions +logger.warn({ retries: 3 }, 'Retry limit approaching'); + +// Error conditions +logger.error({ err: error }, 'Operation failed'); + +// Fatal errors (application crash) +logger.fatal({ err: error }, 'Application cannot continue'); +``` + +## Request Tracking + +All HTTP requests automatically get unique request IDs for correlation. + +### Request ID Middleware + +The `requestId` middleware: +- Checks for `X-Request-ID` header +- Generates UUID if not present +- Attaches to `req.id` +- Adds to response header + +### Request Logger Middleware + +The `requestLogger` middleware logs: +- Incoming requests (method, path, IP) +- Request completion (status code, duration) +- Appropriate log levels based on status code + +## Output Examples + +### Development Mode (Pretty Print) + +``` +[23:46:50 UTC] INFO: Server started + env: "development" + version: "1.0.0" + port: 3001 + healthCheck: "http://localhost:3001/health" + apiBase: "http://localhost:3001/api" + +[23:47:20 UTC] INFO: Incoming request + requestId: "12ae5ffc-edf8-44d4-95a3-b9e34ae9d29d" + method: "GET" + path: "/health" + ip: "::1" + userAgent: "curl/8.5.0" + +[23:47:20 UTC] INFO: Request completed + requestId: "12ae5ffc-edf8-44d4-95a3-b9e34ae9d29d" + statusCode: 200 + duration: 4 +``` + +### Production Mode (JSON) + +```json +{"level":"info","time":1761608823254,"env":"production","version":"1.0.0","port":3001,"healthCheck":"http://localhost:3001/health","apiBase":"http://localhost:3001/api","msg":"Server started"} +{"level":"info","time":1761608840456,"requestId":"550e8400-e29b-41d4-a716-446655440000","method":"POST","path":"/api/gemini/execute","ip":"::1","msg":"Incoming request"} +{"level":"info","time":1761608840789,"requestId":"550e8400-e29b-41d4-a716-446655440000","statusCode":201,"duration":333,"msg":"Request completed"} +``` + +## Log Aggregation + +Pino's JSON output is compatible with popular log aggregation services: + +### Elasticsearch + Kibana (ELK Stack) + +Stream logs to Elasticsearch: + +```bash +node src/server.js | pino-elasticsearch +``` + +### Datadog + +Use Datadog agent to collect logs: + +```yaml +# datadog.yaml +logs: + - type: file + path: /var/log/gemini-flow.log + service: gemini-flow-backend + source: nodejs +``` + +### AWS CloudWatch + +Use CloudWatch agent: + +```bash +node src/server.js | aws logs put-log-events \ + --log-group-name gemini-flow \ + --log-stream-name backend +``` + +### Splunk + +Use HTTP Event Collector: + +```bash +node src/server.js | pino-splunk +``` + +## Performance + +Pino is optimized for performance: + +- **5-10x faster** than console.log in production +- **Asynchronous** by default (doesn't block event loop) +- **Minimal memory** overhead +- **Fast JSON** serialization + +## Error Handling + +Pino properly serializes errors with full stack traces: + +```javascript +try { + // ... operation +} catch (error) { + logger.error({ + err: error, + operation: 'processRequest', + requestId: req.id + }, 'Operation failed'); +} +``` + +Output includes: +- Error message +- Stack trace +- Error type +- Custom context + +## Best Practices + +1. **Use appropriate log levels**: Don't log everything as `info` +2. **Include context**: Add request IDs, user IDs, etc. +3. **Log structured data**: Use objects for searchable fields +4. **Avoid sensitive data**: Don't log passwords, tokens, PII +5. **Use module loggers**: Create child loggers for different components +6. **Log meaningful messages**: Make messages descriptive and actionable + +## Files + +- `backend/src/utils/logger.js` - Core logger configuration +- `backend/src/api/middleware/requestId.js` - Request ID generation +- `backend/src/api/middleware/requestLogger.js` - HTTP request logging + +## References + +- [Pino Documentation](https://getpino.io/) +- [Pino Best Practices](https://getpino.io/#/docs/best-practices) +- [Log Levels](https://getpino.io/#/docs/api?id=logger-levels) +- [Child Loggers](https://getpino.io/#/docs/child-loggers) diff --git a/backend/package-lock.json b/backend/package-lock.json index c100fdcf..6d61cc02 100644 --- a/backend/package-lock.json +++ b/backend/package-lock.json @@ -11,7 +11,9 @@ "@google/generative-ai": "^0.24.1", "cors": "^2.8.5", "dotenv": "^16.3.1", - "express": "^4.18.2" + "express": "^4.18.2", + "pino": "^8.16.1", + "pino-pretty": "^10.2.3" }, "devDependencies": { "nodemon": "^3.0.1" @@ -26,6 +28,18 @@ "node": ">=18.0.0" } }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, "node_modules/accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", @@ -59,6 +73,15 @@ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", "license": "MIT" }, + "node_modules/atomic-sleep": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/atomic-sleep/-/atomic-sleep-1.0.0.tgz", + "integrity": "sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==", + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -66,6 +89,26 @@ "dev": true, "license": "MIT" }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/binary-extensions": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", @@ -127,6 +170,30 @@ "node": ">=8" } }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -190,6 +257,12 @@ "fsevents": "~2.3.2" } }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "license": "MIT" + }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -246,6 +319,15 @@ "node": ">= 0.10" } }, + "node_modules/dateformat": { + "version": "4.6.3", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-4.6.3.tgz", + "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==", + "license": "MIT", + "engines": { + "node": "*" + } + }, "node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", @@ -315,6 +397,15 @@ "node": ">= 0.8" } }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, "node_modules/es-define-property": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", @@ -360,6 +451,24 @@ "node": ">= 0.6" } }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, "node_modules/express": { "version": "4.21.2", "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", @@ -406,6 +515,27 @@ "url": "https://opencollective.com/express" } }, + "node_modules/fast-copy": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/fast-copy/-/fast-copy-3.0.2.tgz", + "integrity": "sha512-dl0O9Vhju8IrcLndv2eU4ldt1ftXMqqfgN4H1cpmGV7P6jeB9FwpN9a2c8DPGE1Ys88rNUJVYDHq73CGAGOPfQ==", + "license": "MIT" + }, + "node_modules/fast-redact": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/fast-redact/-/fast-redact-3.5.0.tgz", + "integrity": "sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + "license": "MIT" + }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", @@ -575,6 +705,12 @@ "node": ">= 0.4" } }, + "node_modules/help-me": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz", + "integrity": "sha512-7xgomUX6ADmcYzFik0HzAxh/73YlKR9bmFzf51CZwR+b6YtzU2m0u49hQCqV6SvlqIqsaxovfwdvbnsw3b/zpg==", + "license": "MIT" + }, "node_modules/http-errors": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", @@ -603,6 +739,26 @@ "node": ">=0.10.0" } }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/ignore-by-default": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", @@ -671,6 +827,15 @@ "node": ">=0.12.0" } }, + "node_modules/joycon": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/joycon/-/joycon-3.1.1.tgz", + "integrity": "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -753,6 +918,15 @@ "node": "*" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", @@ -853,6 +1027,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/on-exit-leak-free": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.2.tgz", + "integrity": "sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, "node_modules/on-finished": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", @@ -865,6 +1048,15 @@ "node": ">= 0.8" } }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -893,6 +1085,84 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pino": { + "version": "8.21.0", + "resolved": "https://registry.npmjs.org/pino/-/pino-8.21.0.tgz", + "integrity": "sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q==", + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0", + "fast-redact": "^3.1.1", + "on-exit-leak-free": "^2.1.0", + "pino-abstract-transport": "^1.2.0", + "pino-std-serializers": "^6.0.0", + "process-warning": "^3.0.0", + "quick-format-unescaped": "^4.0.3", + "real-require": "^0.2.0", + "safe-stable-stringify": "^2.3.1", + "sonic-boom": "^3.7.0", + "thread-stream": "^2.6.0" + }, + "bin": { + "pino": "bin.js" + } + }, + "node_modules/pino-abstract-transport": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pino-abstract-transport/-/pino-abstract-transport-1.2.0.tgz", + "integrity": "sha512-Guhh8EZfPCfH+PMXAb6rKOjGQEoy0xlAIn+irODG5kgfYV+BQ0rGYYWTIel3P5mmyXqkYkPmdIkywsn6QKUR1Q==", + "license": "MIT", + "dependencies": { + "readable-stream": "^4.0.0", + "split2": "^4.0.0" + } + }, + "node_modules/pino-pretty": { + "version": "10.3.1", + "resolved": "https://registry.npmjs.org/pino-pretty/-/pino-pretty-10.3.1.tgz", + "integrity": "sha512-az8JbIYeN/1iLj2t0jR9DV48/LQ3RC6hZPpapKPkb84Q+yTidMCpgWxIT3N0flnBDilyBQ1luWNpOeJptjdp/g==", + "license": "MIT", + "dependencies": { + "colorette": "^2.0.7", + "dateformat": "^4.6.3", + "fast-copy": "^3.0.0", + "fast-safe-stringify": "^2.1.1", + "help-me": "^5.0.0", + "joycon": "^3.1.1", + "minimist": "^1.2.6", + "on-exit-leak-free": "^2.1.0", + "pino-abstract-transport": "^1.0.0", + "pump": "^3.0.0", + "readable-stream": "^4.0.0", + "secure-json-parse": "^2.4.0", + "sonic-boom": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "pino-pretty": "bin.js" + } + }, + "node_modules/pino-std-serializers": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/pino-std-serializers/-/pino-std-serializers-6.2.2.tgz", + "integrity": "sha512-cHjPPsE+vhj/tnhCy/wiMh3M3z3h/j15zHQX+S9GkTBgqJuTuJzYJ4gUyACLhDaJ7kk9ba9iRDmbH2tJU03OiA==", + "license": "MIT" + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-warning": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/process-warning/-/process-warning-3.0.0.tgz", + "integrity": "sha512-mqn0kFRl0EoqhnL0GQ0veqFHyIN1yig9RHh/InzORTUiZHFRAur+aMtRkELNwGs9aNwKS6tg/An4NYBPGwvtzQ==", + "license": "MIT" + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -913,6 +1183,16 @@ "dev": true, "license": "MIT" }, + "node_modules/pump": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", + "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/qs": { "version": "6.13.0", "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", @@ -928,6 +1208,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/quick-format-unescaped": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz", + "integrity": "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==", + "license": "MIT" + }, "node_modules/range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", @@ -952,6 +1238,22 @@ "node": ">= 0.8" } }, + "node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -965,6 +1267,15 @@ "node": ">=8.10.0" } }, + "node_modules/real-require": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/real-require/-/real-require-0.2.0.tgz", + "integrity": "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==", + "license": "MIT", + "engines": { + "node": ">= 12.13.0" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -985,12 +1296,27 @@ ], "license": "MIT" }, + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "license": "MIT" }, + "node_modules/secure-json-parse": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz", + "integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==", + "license": "BSD-3-Clause" + }, "node_modules/semver": { "version": "7.7.2", "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", @@ -1149,6 +1475,24 @@ "node": ">=10" } }, + "node_modules/sonic-boom": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-3.8.1.tgz", + "integrity": "sha512-y4Z8LCDBuum+PBP3lSV7RHrXscqksve/bi0as7mhwVnBW+/wUqKT/2Kb7um8yqcFy0duYbbPxzt89Zy2nOCaxg==", + "license": "MIT", + "dependencies": { + "atomic-sleep": "^1.0.0" + } + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "license": "ISC", + "engines": { + "node": ">= 10.x" + } + }, "node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", @@ -1158,6 +1502,27 @@ "node": ">= 0.8" } }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", @@ -1171,6 +1536,15 @@ "node": ">=4" } }, + "node_modules/thread-stream": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-2.7.0.tgz", + "integrity": "sha512-qQiRWsU/wvNolI6tbbCKd9iKaTnCXsTwVxhhKM6nctPdujTyztjlbUkUTUymidWcMnZ5pWR0ej4a0tjsW021vw==", + "license": "MIT", + "dependencies": { + "real-require": "^0.2.0" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -1249,6 +1623,12 @@ "engines": { "node": ">= 0.8" } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" } } } diff --git a/backend/package.json b/backend/package.json index fb28fc0a..3f113d44 100644 --- a/backend/package.json +++ b/backend/package.json @@ -12,7 +12,9 @@ "@google/generative-ai": "^0.24.1", "express": "^4.18.2", "cors": "^2.8.5", - "dotenv": "^16.3.1" + "dotenv": "^16.3.1", + "pino": "^8.16.1", + "pino-pretty": "^10.2.3" }, "devDependencies": { "nodemon": "^3.0.1" diff --git a/backend/src/api/gemini/index.js b/backend/src/api/gemini/index.js index 71ce3103..00420252 100644 --- a/backend/src/api/gemini/index.js +++ b/backend/src/api/gemini/index.js @@ -6,8 +6,10 @@ import express from 'express'; import { GoogleGenerativeAI } from '@google/generative-ai'; +import { createModuleLogger } from '../../utils/logger.js'; const router = express.Router(); +const logger = createModuleLogger('gemini-api'); // Initialize Gemini client (will be done per request to handle different API keys) const initializeGemini = () => { @@ -116,23 +118,34 @@ router.post('/execute', async (req, res) => { }); } - console.log(`šŸ”„ Executing flow with ${nodes.length} nodes and ${edges.length} edges`); + logger.info({ + requestId: req.id, + nodeCount: nodes.length, + edgeCount: edges.length + }, 'Executing flow'); // Build prompt from graph const prompt = buildPromptFromGraph(nodes, edges); - console.log('šŸ“ Built prompt:', prompt); + logger.debug({ + requestId: req.id, + promptLength: prompt.length, + prompt: prompt.length > 100 ? prompt.substring(0, 100) + '...' : prompt + }, 'Built prompt from graph'); // Initialize Gemini client const genAI = initializeGemini(); const model = genAI.getGenerativeModel({ model: 'gemini-pro' }); // Generate content - console.log('šŸ¤– Sending request to Gemini API...'); + logger.debug({ requestId: req.id }, 'Sending request to Gemini API'); const result = await model.generateContent(prompt); const response = await result.response; const text = response.text(); - console.log('āœ… Received response from Gemini API'); + logger.info({ + requestId: req.id, + responseLength: text.length + }, 'Received response from Gemini API'); // Return successful response res.json({ @@ -147,7 +160,11 @@ router.post('/execute', async (req, res) => { }); } catch (error) { - console.error('āŒ Gemini API request failed:', error); + logger.error({ + err: error, + requestId: req.id, + path: req.path + }, 'Gemini API request failed'); // Handle specific error types if (error.message.includes('API key')) { diff --git a/backend/src/api/middleware/auth.js b/backend/src/api/middleware/auth.js new file mode 100644 index 00000000..e5e5eed3 --- /dev/null +++ b/backend/src/api/middleware/auth.js @@ -0,0 +1,52 @@ +/** + * Authentication Middleware + * + * Provides simple API key authentication for admin endpoints. + * Uses X-API-Key header or API_KEY environment variable. + */ + +import { logger } from '../../utils/logger.js'; + +const API_KEY = process.env.API_KEY || ''; + +/** + * Authentication middleware + * @param {Object} options - Authentication options + * @param {boolean} options.required - Whether authentication is required + */ +export function authenticate(options = { required: true }) { + return (req, res, next) => { + if (!options.required) { + return next(); + } + + const providedKey = req.headers['x-api-key']; + + if (!API_KEY) { + logger.error('API_KEY not configured. Authentication is required but no API_KEY is set.'); + return res.status(500).json({ + success: false, + error: { message: 'Server misconfiguration: API_KEY not set. Contact administrator.' } + }); + } + + if (!providedKey || providedKey !== API_KEY) { + logger.warn({ ip: req.ip }, 'Unauthorized access attempt'); + return res.status(401).json({ + success: false, + error: { message: 'Unauthorized. Provide valid X-API-Key header.' } + }); + } + + next(); + }; +} + +/** + * Async handler wrapper for route handlers + */ +export function asyncHandler(fn) { + return (req, res, next) => { + Promise.resolve(fn(req, res, next)).catch(next); + }; +} diff --git a/backend/src/api/middleware/requestId.js b/backend/src/api/middleware/requestId.js new file mode 100644 index 00000000..3f45db02 --- /dev/null +++ b/backend/src/api/middleware/requestId.js @@ -0,0 +1,29 @@ +/** + * Request ID Middleware + * + * Generates unique request IDs for correlation tracking. + * Supports X-Request-ID header passthrough or generates new UUIDs. + */ + +import { randomUUID } from 'crypto'; + +/** + * Middleware to add unique request ID to each request + * + * - Checks for existing X-Request-ID header + * - Generates new UUID if not present + * - Attaches ID to request object and response header + * + * @param {import('express').Request} req - Express request object + * @param {import('express').Response} res - Express response object + * @param {import('express').NextFunction} next - Express next function + */ +export function requestId(req, res, next) { + // Use existing request ID from header or generate new one + req.id = req.headers['x-request-id'] || randomUUID(); + + // Add request ID to response headers for client tracking + res.setHeader('X-Request-ID', req.id); + + next(); +} diff --git a/backend/src/api/middleware/requestLogger.js b/backend/src/api/middleware/requestLogger.js new file mode 100644 index 00000000..7e07d00d --- /dev/null +++ b/backend/src/api/middleware/requestLogger.js @@ -0,0 +1,58 @@ +/** + * Request Logger Middleware + * + * Logs all HTTP requests with: + * - Request details (method, path, IP) + * - Request ID for correlation + * - Response status and duration + * - Client identification + */ + +import { logger } from '../../utils/logger.js'; + +/** + * Middleware to log HTTP requests and responses + * + * Logs incoming requests immediately and completed requests + * with duration and status code after response finishes. + * + * @param {import('express').Request} req - Express request object + * @param {import('express').Response} res - Express response object + * @param {import('express').NextFunction} next - Express next function + */ +export function requestLogger(req, res, next) { + const start = Date.now(); + + // Log incoming request + logger.info({ + requestId: req.id, + method: req.method, + path: req.path, + ip: req.ip || req.socket?.remoteAddress || 'unknown', + userAgent: req.headers['user-agent'] + }, 'Incoming request'); + + // Log response when finished + res.on('finish', () => { + const duration = Date.now() - start; + + const logData = { + requestId: req.id, + method: req.method, + path: req.path, + statusCode: res.statusCode, + duration + }; + + // Use appropriate log level based on status code + if (res.statusCode >= 500) { + logger.error(logData, 'Request completed with server error'); + } else if (res.statusCode >= 400) { + logger.warn(logData, 'Request completed with client error'); + } else { + logger.info(logData, 'Request completed'); + } + }); + + next(); +} diff --git a/backend/src/api/routes/admin.js b/backend/src/api/routes/admin.js new file mode 100644 index 00000000..ead8d4b7 --- /dev/null +++ b/backend/src/api/routes/admin.js @@ -0,0 +1,61 @@ +/** + * Admin API Routes + * + * Provides endpoints for manual backup and restore operations. + * All endpoints require authentication via X-API-Key header. + */ + +import express from 'express'; +import { authenticate, asyncHandler } from '../middleware/auth.js'; +import * as backup from '../../db/backup.js'; + +const router = express.Router(); + +/** + * GET /api/admin/backups + * List all available backups with statistics + */ +router.get('/backups', + authenticate({ required: true }), + asyncHandler(async (req, res) => { + const backups = await backup.listBackups(); + const stats = await backup.getBackupStats(); + res.json({ success: true, data: { backups, stats } }); + }) +); + +/** + * POST /api/admin/backups + * Create a new backup manually + */ +router.post('/backups', + authenticate({ required: true }), + asyncHandler(async (req, res) => { + const backupPath = await backup.createBackup(); + res.json({ success: true, data: { backupPath } }); + }) +); + +/** + * POST /api/admin/backups/:name/restore + * Restore database from a backup + * Requires confirmation in request body: {"confirm": "RESTORE"} + */ +router.post('/backups/:name/restore', + authenticate({ required: true }), + asyncHandler(async (req, res) => { + if (req.body.confirm !== 'RESTORE') { + return res.status(400).json({ + success: false, + error: { message: 'Confirmation required: send {"confirm": "RESTORE"}' } + }); + } + await backup.restoreBackup(req.params.name); + res.json({ + success: true, + message: 'Database restored. Restart recommended.' + }); + }) +); + +export default router; diff --git a/backend/src/db/backup.js b/backend/src/db/backup.js new file mode 100644 index 00000000..c268e73d --- /dev/null +++ b/backend/src/db/backup.js @@ -0,0 +1,233 @@ +/** + * Database Backup Service + * + * Provides automated backup and restore functionality for all database files. + * Creates compressed, timestamped backups in .data/backups/ directory. + */ + +import fs from 'fs/promises'; +import path from 'path'; +import { createGzip, createGunzip } from 'zlib'; +import { pipeline } from 'stream/promises'; +import { createReadStream, createWriteStream } from 'fs'; +import { fileURLToPath } from 'url'; +import { dirname } from 'path'; +import { logger } from '../utils/logger.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Database is in project root .data directory +const PROJECT_ROOT = path.join(__dirname, '../../..'); +const DB_DIR = path.join(PROJECT_ROOT, '.data'); +const BACKUP_DIR = path.join(DB_DIR, 'backups'); +const MAX_BACKUPS = parseInt(process.env.MAX_BACKUPS) || 30; + +// Backup name format: backup-YYYY-MM-DDTHH-mm-ss-mmmZ +const BACKUP_NAME_PATTERN = /^backup-\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2}-\d{3}Z$/; + +/** + * Create a timestamped backup of all database files + * @returns {Promise} Path to the created backup + */ +export async function createBackup() { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const backupName = `backup-${timestamp}`; + const backupPath = path.join(BACKUP_DIR, backupName); + + try { + // Ensure backup directory exists + await fs.mkdir(BACKUP_DIR, { recursive: true }); + await fs.mkdir(backupPath, { recursive: true }); + + const files = ['workflows.json', 'store-state.json', 'sessions.json']; + const backedUp = []; + + for (const file of files) { + const sourcePath = path.join(DB_DIR, file); + const destPath = path.join(backupPath, file); + const gzipPath = `${destPath}.gz`; + + // Check if source exists + try { + await fs.access(sourcePath); + } catch { + logger.warn({ file }, 'Backup source file not found, skipping'); + continue; + } + + // Compress and copy + await pipeline( + createReadStream(sourcePath), + createGzip(), + createWriteStream(gzipPath) + ); + + backedUp.push(file); + } + + // Write backup metadata + const metadata = { + timestamp: new Date().toISOString(), + files: backedUp, + version: process.env.npm_package_version || 'unknown' + }; + await fs.writeFile( + path.join(backupPath, 'metadata.json'), + JSON.stringify(metadata, null, 2) + ); + + logger.info({ backupName, files: backedUp }, 'Database backup created'); + + // Clean up old backups + await cleanOldBackups(); + + return backupPath; + } catch (error) { + logger.error({ error: error.message }, 'Failed to create backup'); + throw error; + } +} + +/** + * Validate backup name to prevent path traversal attacks + * @param {string} backupName - Name of the backup to validate + * @returns {boolean} True if valid, throws error if invalid + */ +function validateBackupName(backupName) { + // Backup names must start with 'backup-' and contain only safe characters + if (!BACKUP_NAME_PATTERN.test(backupName)) { + throw new Error('Invalid backup name format'); + } + + // Ensure no path traversal characters + if (backupName.includes('..') || backupName.includes('/') || backupName.includes('\\')) { + throw new Error('Invalid backup name: path traversal not allowed'); + } + + return true; +} + +/** + * Restore database from a backup + * @param {string} backupName - Name of the backup to restore + */ +export async function restoreBackup(backupName) { + // Validate backup name to prevent path injection + validateBackupName(backupName); + + const backupPath = path.join(BACKUP_DIR, backupName); + + try { + // Verify backup exists + await fs.access(backupPath); + + // Read metadata + const metadata = JSON.parse( + await fs.readFile(path.join(backupPath, 'metadata.json'), 'utf-8') + ); + + logger.info({ backupName, metadata }, 'Restoring from backup'); + + // Validate files are in allowed list + const allowedFiles = ['workflows.json', 'store-state.json', 'sessions.json']; + + for (const file of metadata.files) { + // Ensure file is in allowed list + if (!allowedFiles.includes(file)) { + throw new Error(`Invalid file in backup: ${file}`); + } + + const gzipPath = path.join(backupPath, `${file}.gz`); + const destPath = path.join(DB_DIR, file); + + // Decompress and restore + await pipeline( + createReadStream(gzipPath), + createGunzip(), + createWriteStream(destPath) + ); + } + + logger.info({ backupName }, 'Database restored successfully'); + } catch (error) { + logger.error({ error: error.message, backupName }, 'Failed to restore backup'); + throw error; + } +} + +/** + * List all available backups + * @returns {Promise} List of backup information + */ +export async function listBackups() { + try { + // Ensure backup directory exists + try { + await fs.access(BACKUP_DIR); + } catch { + return []; + } + + const entries = await fs.readdir(BACKUP_DIR, { withFileTypes: true }); + const backups = []; + + for (const entry of entries) { + if (entry.isDirectory() && entry.name.startsWith('backup-')) { + const metadataPath = path.join(BACKUP_DIR, entry.name, 'metadata.json'); + try { + const metadata = JSON.parse(await fs.readFile(metadataPath, 'utf-8')); + const stats = await fs.stat(path.join(BACKUP_DIR, entry.name)); + backups.push({ + name: entry.name, + timestamp: metadata.timestamp, + files: metadata.files, + size: stats.size + }); + } catch { + // Skip invalid backups + } + } + } + + return backups.sort((a, b) => + new Date(b.timestamp) - new Date(a.timestamp) + ); + } catch (error) { + logger.error({ error: error.message }, 'Failed to list backups'); + return []; + } +} + +/** + * Delete old backups, keeping only MAX_BACKUPS + */ +async function cleanOldBackups() { + const backups = await listBackups(); + + if (backups.length > MAX_BACKUPS) { + const toDelete = backups.slice(MAX_BACKUPS); + + for (const backup of toDelete) { + const backupPath = path.join(BACKUP_DIR, backup.name); + await fs.rm(backupPath, { recursive: true }); + logger.info({ backup: backup.name }, 'Old backup deleted'); + } + } +} + +/** + * Get backup statistics + * @returns {Promise} Backup statistics + */ +export async function getBackupStats() { + const backups = await listBackups(); + const totalSize = backups.reduce((sum, b) => sum + b.size, 0); + + return { + count: backups.length, + totalSize, + oldest: backups[backups.length - 1]?.timestamp, + newest: backups[0]?.timestamp + }; +} diff --git a/backend/src/db/backupScheduler.js b/backend/src/db/backupScheduler.js new file mode 100644 index 00000000..34d4eac4 --- /dev/null +++ b/backend/src/db/backupScheduler.js @@ -0,0 +1,49 @@ +/** + * Backup Scheduler + * + * Manages periodic automated backups of the database. + * Runs backups at configured intervals (default: 24 hours). + */ + +import { createBackup } from './backup.js'; +import { logger } from '../utils/logger.js'; + +const BACKUP_INTERVAL = parseInt(process.env.BACKUP_INTERVAL_HOURS) || 24; + +let backupInterval; + +/** + * Start the backup scheduler + * Creates an initial backup on startup and schedules periodic backups + */ +export function startBackupScheduler() { + if (backupInterval) { + logger.warn('Backup scheduler already running'); + return; + } + + // Create initial backup on startup + createBackup().catch(err => + logger.error({ err: err.message }, 'Initial backup failed') + ); + + // Schedule periodic backups + backupInterval = setInterval(() => { + createBackup().catch(err => + logger.error({ err: err.message }, 'Scheduled backup failed') + ); + }, BACKUP_INTERVAL * 60 * 60 * 1000); + + logger.info({ intervalHours: BACKUP_INTERVAL }, 'Backup scheduler started'); +} + +/** + * Stop the backup scheduler + */ +export function stopBackupScheduler() { + if (backupInterval) { + clearInterval(backupInterval); + backupInterval = null; + logger.info('Backup scheduler stopped'); + } +} diff --git a/backend/src/server.js b/backend/src/server.js index 2fd0425c..ecc4ddd6 100644 --- a/backend/src/server.js +++ b/backend/src/server.js @@ -13,6 +13,16 @@ import { dirname, join } from 'path'; // Import API routes import geminiRoutes from './api/gemini/index.js'; +import adminRoutes from './api/routes/admin.js'; + +// Import backup system +import { startBackupScheduler, stopBackupScheduler } from './db/backupScheduler.js'; +import { createBackup } from './db/backup.js'; + +// Import logger and middleware +import { logger } from './utils/logger.js'; +import { requestId } from './api/middleware/requestId.js'; +import { requestLogger } from './api/middleware/requestLogger.js'; // Load environment variables dotenv.config(); @@ -31,6 +41,10 @@ app.use(cors({ app.use(express.json()); app.use(express.urlencoded({ extended: true })); +// Request tracking middleware +app.use(requestId); +app.use(requestLogger); + // Health check endpoint app.get('/health', (req, res) => { res.json({ @@ -42,10 +56,17 @@ app.get('/health', (req, res) => { // API routes app.use('/api/gemini', geminiRoutes); +app.use('/api/admin', adminRoutes); // Error handling middleware app.use((err, req, res, next) => { - console.error('Error:', err); + logger.error({ + err, + path: req.path, + method: req.method, + requestId: req.id + }, 'Request error'); + res.status(500).json({ error: 'Internal server error', message: err.message @@ -61,8 +82,46 @@ app.use('*', (req, res) => { }); // Start server -app.listen(PORT, () => { - console.log(`šŸš€ Gemini Flow Backend Server running on port ${PORT}`); - console.log(`šŸ“‹ Health check: http://localhost:${PORT}/health`); - console.log(`šŸ”§ API Base URL: http://localhost:${PORT}/api`); -}); \ No newline at end of file +const server = app.listen(PORT, () => { + logger.info({ + port: PORT, + env: process.env.NODE_ENV || 'development', + healthCheck: `http://localhost:${PORT}/health`, + apiBase: `http://localhost:${PORT}/api` + }, 'Server started'); + + // Start backup scheduler after server is running + startBackupScheduler(); +}); + +// Graceful shutdown handler +const shutdown = async (signal) => { + logger.info({ signal }, 'Shutting down gracefully'); + + // Create final backup before shutdown + try { + await createBackup(); + logger.info('Final backup completed'); + } catch (err) { + logger.error({ err }, 'Shutdown backup failed'); + } + + // Stop backup scheduler + stopBackupScheduler(); + + // Close server + server.close(() => { + logger.info('Server closed'); + process.exit(0); + }); + + // Force close after timeout + setTimeout(() => { + logger.error('Forced shutdown after timeout'); + process.exit(1); + }, 10000); +}; + +// Register shutdown handlers +process.on('SIGTERM', () => shutdown('SIGTERM')); +process.on('SIGINT', () => shutdown('SIGINT')); diff --git a/backend/src/utils/logger.js b/backend/src/utils/logger.js new file mode 100644 index 00000000..0c7ecc57 --- /dev/null +++ b/backend/src/utils/logger.js @@ -0,0 +1,91 @@ +/** + * Pino Structured Logger Configuration + * + * Provides production-ready structured logging with: + * - Log levels (debug, info, warn, error) + * - Pretty printing in development + * - JSON output in production + * - Error serialization + * - Module-specific child loggers + */ + +import pino from 'pino'; + +const isProduction = process.env.NODE_ENV === 'production'; +const logLevel = process.env.LOG_LEVEL || (isProduction ? 'info' : 'debug'); + +/** + * Main Pino logger instance + */ +export const logger = pino({ + level: logLevel, + + // Pretty print in development for human readability + transport: !isProduction ? { + target: 'pino-pretty', + options: { + colorize: true, + translateTime: 'HH:MM:ss Z', + ignore: 'pid,hostname' + } + } : undefined, + + // Production settings - structured JSON output + formatters: { + level: (label) => { + return { level: label }; + } + }, + + // Base fields included in every log + base: { + env: process.env.NODE_ENV || 'development', + version: process.env.npm_package_version || 'unknown' + }, + + // Serialize errors, requests, and responses properly + serializers: { + err: pino.stdSerializers.err, + req: pino.stdSerializers.req, + res: pino.stdSerializers.res + } +}); + +/** + * Create a child logger for a specific module + * + * @param {string} module - The module name (e.g., 'server', 'websocket', 'database') + * @returns {pino.Logger} Child logger with module context + * + * @example + * const logger = createModuleLogger('gemini-api'); + * logger.info({ requestId: '123' }, 'Processing request'); + */ +export function createModuleLogger(module) { + return logger.child({ module }); +} + +/** + * Log levels: + * - trace: Very detailed debugging information + * - debug: Debugging information + * - info: General informational messages + * - warn: Warning messages + * - error: Error messages + * - fatal: Fatal error messages (application crash) + * + * Usage examples: + * + * // Simple message + * logger.info('Server started'); + * + * // With structured data + * logger.info({ port: 3001, env: 'production' }, 'Server started'); + * + * // Error logging + * logger.error({ err: error, userId: '123' }, 'Request failed'); + * + * // Module-specific logging + * const apiLogger = createModuleLogger('api'); + * apiLogger.debug({ requestId: 'abc' }, 'Processing request'); + */ diff --git a/dist/cli/commands/gem-extensions.js b/dist/cli/commands/gem-extensions.js new file mode 100644 index 00000000..c6e1b928 --- /dev/null +++ b/dist/cli/commands/gem-extensions.js @@ -0,0 +1,238 @@ +/** + * Gemini CLI Extensions Command + * + * Manages Gemini CLI Extensions (October 2025 framework). + * Supports install, enable, disable, update, uninstall, list, and info commands. + */ +import { Command } from "commander"; +import chalk from "chalk"; +import ora from "ora"; +import { Logger } from "../../utils/logger.js"; +import { getExtensionManager } from "../../services/extension-manager.js"; +export class GemExtensionsCommand extends Command { + constructor() { + super("gem-extensions"); + this.logger = new Logger("GemExtensions"); + this.description("Manage Gemini CLI Extensions (Official Framework)") + .alias("gem-ext") + .addCommand(this.createInstallCommand()) + .addCommand(this.createListCommand()) + .addCommand(this.createEnableCommand()) + .addCommand(this.createDisableCommand()) + .addCommand(this.createUpdateCommand()) + .addCommand(this.createUninstallCommand()) + .addCommand(this.createInfoCommand()); + } + /** + * Install command + */ + createInstallCommand() { + return new Command("install") + .description("Install extension from GitHub or local path") + .argument("", "GitHub URL (github:user/repo) or local path") + .option("--enable", "Enable extension after installation", false) + .action(async (source, options) => { + const spinner = ora(`Installing extension from ${source}...`).start(); + try { + const manager = getExtensionManager(); + const metadata = await manager.install(source); + spinner.succeed(`Extension ${chalk.bold(metadata.name)} installed`); + console.log(chalk.blue("\nšŸ“¦ Extension Details:")); + console.log(chalk.gray(` Name: ${metadata.name}`)); + console.log(chalk.gray(` Version: ${metadata.version}`)); + console.log(chalk.gray(` Description: ${metadata.description || 'N/A'}`)); + if (options.enable) { + spinner.start("Enabling extension..."); + await manager.enable(metadata.name); + spinner.succeed(`Extension ${chalk.bold(metadata.name)} enabled`); + } + else { + console.log(chalk.yellow("\nšŸ’” Run 'gem-extensions enable ${metadata.name}' to activate")); + } + } + catch (error) { + spinner.fail("Installation failed"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * List command + */ + createListCommand() { + return new Command("list") + .description("List all installed extensions") + .option("--enabled", "Show only enabled extensions") + .option("--disabled", "Show only disabled extensions") + .action(async (options) => { + const spinner = ora("Loading extensions...").start(); + try { + const manager = getExtensionManager(); + let extensions = await manager.list(); + if (options.enabled) { + extensions = extensions.filter(ext => ext.enabled); + } + else if (options.disabled) { + extensions = extensions.filter(ext => !ext.enabled); + } + spinner.succeed("Extensions loaded"); + if (extensions.length === 0) { + console.log(chalk.yellow("\nNo extensions found")); + console.log(chalk.gray("Install an extension with: gem-extensions install ")); + return; + } + console.log(chalk.blue("\nšŸ“¦ Installed Extensions:\n")); + for (const ext of extensions) { + const status = ext.enabled + ? chalk.green("āœ“ Enabled") + : chalk.gray("ā—‹ Disabled"); + console.log(chalk.bold(` ${ext.name}`) + chalk.gray(` v${ext.version}`)); + console.log(` Status: ${status}`); + console.log(chalk.gray(` ${ext.description || 'No description'}`)); + console.log(chalk.gray(` Installed: ${new Date(ext.installedAt).toLocaleDateString()}`)); + console.log(); + } + console.log(chalk.gray(`Total: ${extensions.length} extension(s)`)); + } + catch (error) { + spinner.fail("Failed to load extensions"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * Enable command + */ + createEnableCommand() { + return new Command("enable") + .description("Enable an installed extension") + .argument("", "Extension name") + .action(async (name) => { + const spinner = ora(`Enabling extension ${name}...`).start(); + try { + const manager = getExtensionManager(); + await manager.enable(name); + spinner.succeed(`Extension ${chalk.bold(name)} enabled`); + console.log(chalk.green("\nāœ“ Extension is now active")); + } + catch (error) { + spinner.fail("Failed to enable extension"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * Disable command + */ + createDisableCommand() { + return new Command("disable") + .description("Disable an enabled extension") + .argument("", "Extension name") + .action(async (name) => { + const spinner = ora(`Disabling extension ${name}...`).start(); + try { + const manager = getExtensionManager(); + await manager.disable(name); + spinner.succeed(`Extension ${chalk.bold(name)} disabled`); + console.log(chalk.yellow("\nāš ļø Extension is now inactive")); + } + catch (error) { + spinner.fail("Failed to disable extension"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * Update command + */ + createUpdateCommand() { + return new Command("update") + .description("Update an installed extension") + .argument("", "Extension name") + .action(async (name) => { + const spinner = ora(`Updating extension ${name}...`).start(); + try { + const manager = getExtensionManager(); + await manager.update(name); + spinner.succeed(`Extension ${chalk.bold(name)} updated`); + console.log(chalk.green("\nāœ“ Extension updated to latest version")); + } + catch (error) { + spinner.fail("Failed to update extension"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * Uninstall command + */ + createUninstallCommand() { + return new Command("uninstall") + .description("Uninstall an extension") + .argument("", "Extension name") + .option("--force", "Skip confirmation prompt", false) + .action(async (name, options) => { + if (!options.force) { + console.log(chalk.yellow(`\nāš ļø This will permanently remove extension: ${chalk.bold(name)}`)); + // In a real implementation, we'd use inquirer to prompt for confirmation + console.log(chalk.gray("Use --force to skip this confirmation\n")); + } + const spinner = ora(`Uninstalling extension ${name}...`).start(); + try { + const manager = getExtensionManager(); + await manager.uninstall(name); + spinner.succeed(`Extension ${chalk.bold(name)} uninstalled`); + console.log(chalk.green("\nāœ“ Extension removed successfully")); + } + catch (error) { + spinner.fail("Failed to uninstall extension"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } + /** + * Info command + */ + createInfoCommand() { + return new Command("info") + .description("Show detailed information about an extension") + .argument("", "Extension name") + .action(async (name) => { + const spinner = ora(`Loading extension info...`).start(); + try { + const manager = getExtensionManager(); + const info = await manager.info(name); + if (!info) { + spinner.fail(`Extension ${name} not found`); + return; + } + spinner.succeed("Extension info loaded"); + console.log(chalk.blue("\nšŸ“¦ Extension Details:\n")); + console.log(chalk.bold(` Name: `) + info.name); + console.log(chalk.bold(` Display Name: `) + (info.displayName || info.name)); + console.log(chalk.bold(` Version: `) + info.version); + console.log(chalk.bold(` Author: `) + (info.author || 'Unknown')); + console.log(chalk.bold(` Description: `) + (info.description || 'N/A')); + console.log(chalk.bold(` Status: `) + (info.enabled ? chalk.green("Enabled") : chalk.gray("Disabled"))); + console.log(chalk.bold(` Source: `) + info.source); + console.log(chalk.bold(` Installed: `) + new Date(info.installedAt).toLocaleString()); + if (info.updatedAt) { + console.log(chalk.bold(` Updated: `) + new Date(info.updatedAt).toLocaleString()); + } + console.log(); + } + catch (error) { + spinner.fail("Failed to load extension info"); + console.error(chalk.red("Error:"), error instanceof Error ? error.message : error); + process.exit(1); + } + }); + } +} +export default GemExtensionsCommand; diff --git a/dist/cli/commands/index.js b/dist/cli/commands/index.js index 20e7f4b5..1e8d59d3 100644 --- a/dist/cli/commands/index.js +++ b/dist/cli/commands/index.js @@ -18,4 +18,4 @@ export { WorkspaceCommand } from "./workspace.js"; export { GeminiCommand } from "./gemini.js"; export { DGMCommand } from "./dgm.js"; export { JulesCommand } from "./jules.js"; -export { ExtensionsCommand } from "./extensions.js"; +export { GemExtensionsCommand } from "./gem-extensions.js"; diff --git a/dist/cli/full-index.js b/dist/cli/full-index.js index a69efdf4..0174d511 100644 --- a/dist/cli/full-index.js +++ b/dist/cli/full-index.js @@ -12,7 +12,7 @@ import { fileURLToPath } from "url"; import { Logger } from "../utils/logger.js"; import { ConfigManager } from "./config/config-manager.js"; // Import all command modules -import { InitCommand, SwarmCommand, AgentCommand, TaskCommand, SparcCommand, HiveMindCommand, MemoryCommand, HooksCommand, SecurityFlagsCommand, ConfigCommand, WorkspaceCommand, GeminiCommand, DGMCommand, JulesCommand, ExtensionsCommand, } from "./commands/index.js"; +import { InitCommand, SwarmCommand, AgentCommand, TaskCommand, SparcCommand, HiveMindCommand, MemoryCommand, HooksCommand, SecurityFlagsCommand, ConfigCommand, WorkspaceCommand, GeminiCommand, DGMCommand, JulesCommand, GemExtensionsCommand, } from "./commands/index.js"; // ES module equivalent of __dirname const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -83,7 +83,7 @@ function setupCommands() { program.addCommand(new GeminiCommand()); program.addCommand(new DGMCommand()); program.addCommand(new JulesCommand()); - program.addCommand(new ExtensionsCommand()); + program.addCommand(new GemExtensionsCommand()); // QueryCommand has a special constructor, let's skip it for now // program.addCommand(new QueryCommand()); // Additional aliases for commonly used commands diff --git a/dist/services/extension-manager.js b/dist/services/extension-manager.js new file mode 100644 index 00000000..2f68bb5f --- /dev/null +++ b/dist/services/extension-manager.js @@ -0,0 +1,303 @@ +/** + * Extension Manager Service + * + * Manages Gemini CLI Extensions for gemini-flow. + * Handles installation, enabling, disabling, updating, and uninstalling of extensions. + * + * Follows patterns from src/core/mcp-settings-manager.ts + */ +import * as fs from 'fs/promises'; +import * as path from 'path'; +import { exec } from 'child_process'; +import { promisify } from 'util'; +const execAsync = promisify(exec); +// Extension storage directory +const EXTENSIONS_DIR = path.join(process.cwd(), '.gemini-flow', 'extensions'); +const EXTENSIONS_CONFIG_FILE = path.join(EXTENSIONS_DIR, 'extensions.json'); +/** + * Extension Manager Service + */ +export class ExtensionManager { + constructor() { + this.config = null; + } + /** + * Initialize extension manager + */ + async initialize() { + // Ensure extensions directory exists + await fs.mkdir(EXTENSIONS_DIR, { recursive: true }); + // Load existing configuration + await this.loadConfig(); + } + /** + * Load extensions configuration + */ + async loadConfig() { + if (this.config) { + return this.config; + } + try { + const data = await fs.readFile(EXTENSIONS_CONFIG_FILE, 'utf-8'); + this.config = JSON.parse(data); + return this.config; + } + catch (error) { + if (error.code === 'ENOENT') { + // File not found, create default config + this.config = { + extensions: {}, + lastUpdated: new Date().toISOString() + }; + await this.saveConfig(); + return this.config; + } + throw new Error(`Failed to load extensions config: ${error.message}`); + } + } + /** + * Save extensions configuration + */ + async saveConfig() { + if (!this.config) { + throw new Error('No configuration to save'); + } + this.config.lastUpdated = new Date().toISOString(); + await fs.writeFile(EXTENSIONS_CONFIG_FILE, JSON.stringify(this.config, null, 2), 'utf-8'); + } + /** + * Install extension from GitHub or local path + */ + async install(source) { + await this.initialize(); + let manifestPath; + let extensionDir; + let extensionName; + // Check if source is GitHub URL or local path + if (source.startsWith('github:') || source.includes('github.com')) { + // Parse GitHub URL + const match = source.match(/(?:github:|https:\/\/github\.com\/)([^/]+)\/([^/]+)/); + if (!match) { + throw new Error('Invalid GitHub URL format'); + } + const [, owner, repo] = match; + extensionName = repo.replace(/\.git$/, ''); + extensionDir = path.join(EXTENSIONS_DIR, extensionName); + // Clone repository + console.log(`Cloning ${owner}/${repo}...`); + await execAsync(`git clone https://github.com/${owner}/${repo}.git ${extensionDir}`); + manifestPath = path.join(extensionDir, 'gemini-extension.json'); + } + else { + // Local path + const sourcePath = path.resolve(source); + manifestPath = path.join(sourcePath, 'gemini-extension.json'); + // Verify manifest exists + try { + await fs.access(manifestPath); + } + catch { + throw new Error(`Manifest not found at ${manifestPath}`); + } + // Read manifest to get extension name + const manifestContent = await fs.readFile(manifestPath, 'utf-8'); + const manifest = JSON.parse(manifestContent); + extensionName = manifest.name; + extensionDir = path.join(EXTENSIONS_DIR, extensionName); + // Copy extension to extensions directory + await this.copyDirectory(sourcePath, extensionDir); + manifestPath = path.join(extensionDir, 'gemini-extension.json'); + } + // Read and validate manifest + const manifestContent = await fs.readFile(manifestPath, 'utf-8'); + const manifest = JSON.parse(manifestContent); + // Create extension metadata + const metadata = { + name: manifest.name, + version: manifest.version, + displayName: manifest.displayName, + description: manifest.description, + author: manifest.author, + enabled: false, // Not enabled by default + installedAt: new Date().toISOString(), + source, + manifestPath, + entryPoint: manifest.entryPoint + }; + // Save to config + if (!this.config) { + this.config = { extensions: {}, lastUpdated: new Date().toISOString() }; + } + this.config.extensions[extensionName] = metadata; + await this.saveConfig(); + console.log(`Extension ${extensionName} installed successfully`); + return metadata; + } + /** + * Enable extension + */ + async enable(name) { + await this.initialize(); + if (!this.config?.extensions[name]) { + throw new Error(`Extension ${name} not found`); + } + const extension = this.config.extensions[name]; + // Load extension loader and call onEnable + if (extension.entryPoint) { + const loaderPath = path.join(EXTENSIONS_DIR, name, extension.entryPoint); + try { + const loader = await import(loaderPath); + if (loader.extension && loader.extension.onEnable) { + await loader.extension.onEnable(); + } + } + catch (error) { + console.error(`Failed to enable extension ${name}:`, error.message); + } + } + extension.enabled = true; + extension.updatedAt = new Date().toISOString(); + await this.saveConfig(); + console.log(`Extension ${name} enabled`); + } + /** + * Disable extension + */ + async disable(name) { + await this.initialize(); + if (!this.config?.extensions[name]) { + throw new Error(`Extension ${name} not found`); + } + const extension = this.config.extensions[name]; + // Load extension loader and call onDisable + if (extension.entryPoint) { + const loaderPath = path.join(EXTENSIONS_DIR, name, extension.entryPoint); + try { + const loader = await import(loaderPath); + if (loader.extension && loader.extension.onDisable) { + await loader.extension.onDisable(); + } + } + catch (error) { + console.error(`Failed to disable extension ${name}:`, error.message); + } + } + extension.enabled = false; + extension.updatedAt = new Date().toISOString(); + await this.saveConfig(); + console.log(`Extension ${name} disabled`); + } + /** + * Update extension + */ + async update(name) { + await this.initialize(); + if (!this.config?.extensions[name]) { + throw new Error(`Extension ${name} not found`); + } + const extension = this.config.extensions[name]; + const extensionDir = path.join(EXTENSIONS_DIR, name); + // Update from source + if (extension.source.startsWith('github:') || extension.source.includes('github.com')) { + console.log(`Updating ${name} from GitHub...`); + await execAsync(`cd ${extensionDir} && git pull`); + } + // Reload manifest to get new version + const manifestContent = await fs.readFile(extension.manifestPath, 'utf-8'); + const manifest = JSON.parse(manifestContent); + extension.version = manifest.version; + extension.updatedAt = new Date().toISOString(); + // Call onUpdate hook if extension is enabled + if (extension.enabled && extension.entryPoint) { + const loaderPath = path.join(EXTENSIONS_DIR, name, extension.entryPoint); + try { + const loader = await import(loaderPath); + if (loader.extension && loader.extension.onUpdate) { + await loader.extension.onUpdate(); + } + } + catch (error) { + console.error(`Failed to call onUpdate for ${name}:`, error.message); + } + } + await this.saveConfig(); + console.log(`Extension ${name} updated to version ${extension.version}`); + } + /** + * Uninstall extension + */ + async uninstall(name) { + await this.initialize(); + if (!this.config?.extensions[name]) { + throw new Error(`Extension ${name} not found`); + } + const extension = this.config.extensions[name]; + // Disable first if enabled + if (extension.enabled) { + await this.disable(name); + } + // Call onUninstall hook + if (extension.entryPoint) { + const loaderPath = path.join(EXTENSIONS_DIR, name, extension.entryPoint); + try { + const loader = await import(loaderPath); + if (loader.extension && loader.extension.onUninstall) { + await loader.extension.onUninstall(); + } + } + catch (error) { + console.error(`Failed to call onUninstall for ${name}:`, error.message); + } + } + // Remove extension directory + const extensionDir = path.join(EXTENSIONS_DIR, name); + await fs.rm(extensionDir, { recursive: true, force: true }); + // Remove from config + delete this.config.extensions[name]; + await this.saveConfig(); + console.log(`Extension ${name} uninstalled`); + } + /** + * List all installed extensions + */ + async list() { + await this.initialize(); + if (!this.config) { + return []; + } + return Object.values(this.config.extensions); + } + /** + * Get extension info + */ + async info(name) { + await this.initialize(); + return this.config?.extensions[name] || null; + } + /** + * Helper: Copy directory recursively + */ + async copyDirectory(src, dest) { + await fs.mkdir(dest, { recursive: true }); + const entries = await fs.readdir(src, { withFileTypes: true }); + for (const entry of entries) { + const srcPath = path.join(src, entry.name); + const destPath = path.join(dest, entry.name); + if (entry.isDirectory()) { + await this.copyDirectory(srcPath, destPath); + } + else { + await fs.copyFile(srcPath, destPath); + } + } + } +} +// Export singleton instance +let extensionManagerInstance = null; +export function getExtensionManager() { + if (!extensionManagerInstance) { + extensionManagerInstance = new ExtensionManager(); + } + return extensionManagerInstance; +} +export default ExtensionManager; diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..75bee8be --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,102 @@ +services: + backend: + build: + context: ./backend + dockerfile: Dockerfile + args: + BUILD_VERSION: ${VERSION:-1.0.0} + BUILD_DATE: ${BUILD_DATE} + image: gemini-flow-backend:latest + container_name: gemini-flow-backend + restart: unless-stopped + + # Run as non-root (redundant with Dockerfile USER, but explicit) + user: "1001:1001" + + # Security options + security_opt: + - no-new-privileges:true + + # Read-only root filesystem with tmpfs for temporary files + read_only: true + tmpfs: + - /tmp + - /app/.npm + + # Resource limits + deploy: + resources: + limits: + cpus: '1' + memory: 512M + reservations: + cpus: '0.5' + memory: 256M + + # Environment variables + environment: + NODE_ENV: production + PORT: 3001 + LOG_LEVEL: ${LOG_LEVEL:-info} + GOOGLE_API_KEY: ${GOOGLE_API_KEY} + + # Volumes for persistence + volumes: + - backend-data:/app/.data + - backend-logs:/app/logs + + # Port mapping + ports: + - "3001:3001" + + # Health check (override from Dockerfile if needed) + healthcheck: + test: ["CMD", "node", "-e", "require('http').get('http://localhost:3001/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # Logging configuration + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + + # Network + networks: + - gemini-flow-network + + # Optional: Redis for distributed features (commented out by default) + # redis: + # image: redis:7-alpine + # container_name: gemini-flow-redis + # restart: unless-stopped + # command: redis-server --appendonly yes + # user: "999:999" # Redis non-root user + # security_opt: + # - no-new-privileges:true + # volumes: + # - redis-data:/data + # ports: + # - "6379:6379" + # networks: + # - gemini-flow-network + # healthcheck: + # test: ["CMD", "redis-cli", "ping"] + # interval: 10s + # timeout: 3s + # retries: 3 + +volumes: + backend-data: + driver: local + backend-logs: + driver: local + # redis-data: + # driver: local + +networks: + gemini-flow-network: + driver: bridge diff --git a/docs/releases/GEMINI_CLI_EXTENSIONS_IMPLEMENTATION.md b/docs/releases/GEMINI_CLI_EXTENSIONS_IMPLEMENTATION.md index 6463acbc..475aa891 100644 --- a/docs/releases/GEMINI_CLI_EXTENSIONS_IMPLEMENTATION.md +++ b/docs/releases/GEMINI_CLI_EXTENSIONS_IMPLEMENTATION.md @@ -83,11 +83,11 @@ Successfully implemented official Gemini CLI Extensions framework support for ge ```bash # As official Gemini CLI extension -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow gemini extensions enable gemini-flow # Using gemini-flow's built-in extension manager -gemini-flow gem-extensions install github:user/extension +gemini-flow gem-extensions install https://github.com/user/extension gemini-flow gem-extensions list ``` @@ -276,7 +276,7 @@ interface ExtensionMetadata { ```bash # Test installation -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow # Test enable gemini extensions enable gemini-flow diff --git a/extensions/gemini-cli/README.md b/extensions/gemini-cli/README.md index eba35418..a039ada9 100644 --- a/extensions/gemini-cli/README.md +++ b/extensions/gemini-cli/README.md @@ -17,9 +17,11 @@ This extension packages gemini-flow's comprehensive AI orchestration capabilitie ```bash # Install from GitHub repository -gemini extensions install github:clduab11/gemini-flow +gemini extensions install https://github.com/clduab11/gemini-flow ``` +> **Note**: Always use the full GitHub URL. The `github:username/repo` shorthand is not supported by Gemini CLI. + ### From Local Directory ```bash diff --git a/scripts/README.md b/scripts/README.md index 06178020..7c8d4e0e 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,10 +1,18 @@ # Scripts Directory +## File Extension Guidelines + +This project uses `"type": "module"` in package.json, making ES modules the default. To avoid ambiguity: + +- **`.mjs`**: ES modules (use `import`/`export`) +- **`.cjs`**: CommonJS modules (use `require()`/`module.exports`) +- **`.js`**: Avoid in scripts directory (ambiguous with "type": "module") + ## postinstall.cjs **Purpose**: Conditional Husky installation script -**Problem Solved**: Fixes npm error 127 that occurs during global installs when husky (a devDependency) is not available. +**Problem Solved**: Fixes npm error 127 that occurs during global installs when husky (a devDependency) is not available. Also prevents "ReferenceError: require is not defined in ES module scope" by using the `.cjs` extension for CommonJS code. **How it works**: - Detects production, global, or CI environments diff --git a/verify-docker-security.sh b/verify-docker-security.sh new file mode 100755 index 00000000..e7420245 --- /dev/null +++ b/verify-docker-security.sh @@ -0,0 +1,196 @@ +#!/bin/bash +# Docker Security Verification Script for Gemini Flow Backend +# This script validates all security features of the Docker implementation + +set -e # Exit on error + +echo "šŸ”’ Docker Security Verification Script" +echo "=======================================" +echo "" + +# Colors for output +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Function to print success +success() { + echo -e "${GREEN}āœ“ $1${NC}" +} + +# Function to print error +error() { + echo -e "${RED}āœ— $1${NC}" +} + +# Function to print info +info() { + echo -e "${YELLOW}ℹ $1${NC}" +} + +# Variables +IMAGE_NAME="gemini-flow-backend:latest" +CONTAINER_NAME="gemini-flow-backend" + +echo "1. Building Docker Image" +echo "========================" +cd backend +docker build -t $IMAGE_NAME . +success "Image built successfully" +echo "" + +echo "2. Verifying Image Size" +echo "=======================" +SIZE=$(docker images $IMAGE_NAME --format "{{.Size}}") +info "Image size: $SIZE" +success "Image size check complete" +echo "" + +echo "3. Verifying Non-Root User" +echo "==========================" +USER=$(docker inspect $IMAGE_NAME | jq -r '.[0].Config.User') +if [ "$USER" == "geminiflow" ]; then + success "Container configured to run as non-root user: $USER" +else + error "Container not configured correctly. User: $USER" + exit 1 +fi +echo "" + +echo "4. Verifying Entrypoint (dumb-init)" +echo "===================================" +ENTRYPOINT=$(docker inspect $IMAGE_NAME | jq -r '.[0].Config.Entrypoint[0]') +if [ "$ENTRYPOINT" == "dumb-init" ]; then + success "dumb-init is configured as entrypoint" +else + error "dumb-init not configured. Entrypoint: $ENTRYPOINT" + exit 1 +fi +echo "" + +echo "5. Starting Test Container" +echo "==========================" +docker run -d --name $CONTAINER_NAME-test -p 3001:3001 -e GOOGLE_API_KEY=test-key $IMAGE_NAME +sleep 3 +success "Container started" +echo "" + +echo "6. Verifying Process User ID" +echo "=============================" +UID_CHECK=$(docker exec $CONTAINER_NAME-test id -u) +GID_CHECK=$(docker exec $CONTAINER_NAME-test id -g) +if [ "$UID_CHECK" == "1001" ] && [ "$GID_CHECK" == "1001" ]; then + success "Container running as UID 1001, GID 1001" +else + error "Container not running as expected user. UID: $UID_CHECK, GID: $GID_CHECK" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +fi +echo "" + +echo "7. Verifying Running Processes" +echo "===============================" +# Note: ps aux may truncate usernames. Check for both full and truncated versions. +PROCESS_USER=$(docker exec $CONTAINER_NAME-test ps aux | grep '[n]ode src/server.js' | awk '{print $1}' | head -n 1) +# Check if it's the geminiflow user (may appear as 'geminifl', '1001', or '1' in ps output) +if [[ "$PROCESS_USER" =~ ^(geminiflow|geminifl|1001|1)$ ]]; then + success "Node.js process running as non-root user (User: $PROCESS_USER)" +else + error "Process running as unexpected user: $PROCESS_USER" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +fi +echo "" + +echo "8. Verifying File Permissions" +echo "==============================" +DATA_OWNER=$(docker exec $CONTAINER_NAME-test stat -c '%U' /app/.data) +LOGS_OWNER=$(docker exec $CONTAINER_NAME-test stat -c '%U' /app/logs) +if [ "$DATA_OWNER" == "geminiflow" ] && [ "$LOGS_OWNER" == "geminiflow" ]; then + success "Data directories owned by geminiflow user" +else + error "Incorrect ownership. .data: $DATA_OWNER, logs: $LOGS_OWNER" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +fi +echo "" + +echo "9. Testing Health Check Endpoint" +echo "=================================" +HEALTH_STATUS=$(curl -s http://localhost:3001/health | jq -r '.status') +if [ "$HEALTH_STATUS" == "healthy" ]; then + success "Health check endpoint responding correctly" +else + error "Health check failed. Status: $HEALTH_STATUS" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +fi +echo "" + +echo "10. Waiting for Docker Health Check" +echo "====================================" +sleep 35 +DOCKER_HEALTH=$(docker inspect $CONTAINER_NAME-test | jq -r '.[0].State.Health.Status') +if [ "$DOCKER_HEALTH" == "healthy" ]; then + success "Docker health check reporting healthy" +else + error "Docker health check failed. Status: $DOCKER_HEALTH" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +fi +echo "" + +echo "11. Verifying Labels" +echo "====================" +TITLE=$(docker inspect $IMAGE_NAME | jq -r '.[0].Config.Labels["org.opencontainers.image.title"]') +if [ "$TITLE" == "Gemini Flow Backend" ]; then + success "Image labels configured correctly" +else + error "Image labels not configured. Title: $TITLE" +fi +echo "" + +echo "12. Checking for Common Vulnerabilities" +echo "========================================" +info "Checking for exposed secrets..." +# Exclude expected GOOGLE_API_KEY and check for other potential secrets +UNEXPECTED_SECRETS=$(docker exec $CONTAINER_NAME-test env | grep -iE "(password|secret|private.*key|token)" | grep -v "GOOGLE_API_KEY" || true) +if [ -n "$UNEXPECTED_SECRETS" ]; then + error "Potential secrets found in environment:" + echo "$UNEXPECTED_SECRETS" + docker stop $CONTAINER_NAME-test && docker rm $CONTAINER_NAME-test + exit 1 +else + success "No unexpected secrets exposed" +fi +echo "" + +echo "13. Cleanup" +echo "===========" +docker stop $CONTAINER_NAME-test +docker rm $CONTAINER_NAME-test +success "Test container removed" +echo "" + +echo "================================" +echo "āœ… All Security Checks Passed!" +echo "================================" +echo "" +echo "Summary:" +echo "--------" +echo "āœ“ Multi-stage build implemented" +echo "āœ“ Non-root user (geminiflow, UID 1001) configured" +echo "āœ“ dumb-init for signal handling" +echo "āœ“ File ownership set correctly" +echo "āœ“ Health checks working" +echo "āœ“ Image size optimized ($SIZE)" +echo "āœ“ Security labels configured" +echo "" +echo "Next steps:" +echo "----------" +echo "1. Run: docker compose up -d" +echo "2. Verify: docker compose ps" +echo "3. Check logs: docker compose logs -f backend" +echo "4. Test API: curl http://localhost:3001/health" +echo ""