Best for: Small to medium deployments, development, testing
# Production docker-compose
docker compose -f docker-compose.yml -f docker-compose.prod.yml up -dRequirements:
- Docker 20.10+
- Docker Compose 2.0+
- 4GB RAM minimum
- 2 CPU cores minimum
Best for: Large deployments, multi-region, high availability
# Apply Kubernetes manifests
kubectl apply -f k8s/namespace.yaml
kubectl apply -f k8s/configmap.yaml
kubectl apply -f k8s/secrets.yaml
kubectl apply -f k8s/deployments/
kubectl apply -f k8s/services/
kubectl apply -f k8s/ingress.yamlRequirements:
- Kubernetes 1.24+
- 8GB RAM per node
- 4 CPU cores per node
- Persistent storage (for database)
Option A: ECS (Elastic Container Service)
# Build and push images
docker build -t ruvscan-mcp:latest -f docker/Dockerfile.python .
docker tag ruvscan-mcp:latest ${AWS_ACCOUNT}.dkr.ecr.${REGION}.amazonaws.com/ruvscan-mcp:latest
docker push ${AWS_ACCOUNT}.dkr.ecr.${REGION}.amazonaws.com/ruvscan-mcp:latest
# Deploy with ECS
aws ecs create-service \
--cluster ruvscan-cluster \
--service-name ruvscan-mcp \
--task-definition ruvscan-mcp:1Option B: EKS (Elastic Kubernetes Service)
# Create cluster
eksctl create cluster --name ruvscan --region us-west-2
# Deploy
kubectl apply -f k8s/Option A: Cloud Run
# Build and deploy
gcloud builds submit --tag gcr.io/${PROJECT_ID}/ruvscan-mcp
gcloud run deploy ruvscan-mcp \
--image gcr.io/${PROJECT_ID}/ruvscan-mcp \
--platform managedOption B: GKE (Google Kubernetes Engine)
# Create cluster
gcloud container clusters create ruvscan \
--num-nodes=3 \
--machine-type=n1-standard-2
# Deploy
kubectl apply -f k8s/Option A: Azure Container Instances
# Deploy container group
az container create \
--resource-group ruvscan-rg \
--name ruvscan-mcp \
--image ruvscan/mcp-server:latestOption B: AKS (Azure Kubernetes Service)
# Create cluster
az aks create \
--resource-group ruvscan-rg \
--name ruvscan-cluster \
--node-count 3
# Deploy
kubectl apply -f k8s/# GitHub
GITHUB_TOKEN=ghp_xxxxx
# OpenAI (for embeddings)
OPENAI_API_KEY=sk-xxxxx
# Database
DATABASE_TYPE=sqlite # or supabase
SQLITE_PATH=/data/ruvscan.db
# Server
RUVSCAN_HOST=0.0.0.0
RUVSCAN_PORT=8000
# Rust Engine
RUST_ENGINE_HOST=rust-engine
RUST_ENGINE_PORT=50051
# Go Scanner
RUVSCAN_SOURCE_TYPE=org
RUVSCAN_SOURCE_NAME=ruvnet# Supabase (if using cloud storage)
SUPABASE_URL=https://xxx.supabase.co
SUPABASE_KEY=xxxxx
# Anthropic (alternative LLM)
ANTHROPIC_API_KEY=sk-ant-xxxxx
# Monitoring
LOG_LEVEL=INFO
SENTRY_DSN=https://xxx@sentry.io/xxx
# Performance
MAX_WORKERS=4
REQUEST_TIMEOUT=30# Auto-initialized on first run
# Data stored in: /data/ruvscan.db- Create Supabase project
- Run migrations:
-- Run SQL from src/mcp/storage/migrations/- Set environment variables:
SUPABASE_URL=https://xxx.supabase.co
SUPABASE_KEY=your_anon_key# docker-compose.postgres.yml
docker compose -f docker-compose.yml -f docker-compose.postgres.yml up -dserver {
listen 443 ssl http2;
server_name api.ruvscan.io;
ssl_certificate /etc/letsencrypt/live/api.ruvscan.io/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/api.ruvscan.io/privkey.pem;
location / {
proxy_pass http://localhost:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}labels:
- "traefik.enable=true"
- "traefik.http.routers.ruvscan.rule=Host(`api.ruvscan.io`)"
- "traefik.http.routers.ruvscan.tls=true"
- "traefik.http.routers.ruvscan.tls.certresolver=letsencrypt"Python MCP Server:
# Docker Compose
docker compose up --scale mcp-server=3
# Kubernetes
kubectl scale deployment ruvscan-mcp --replicas=3Rust Engine:
# Stateless - scale freely
kubectl scale deployment ruvscan-rust --replicas=5Go Scanners:
# Run as jobs or cron
kubectl create job scanner-job --from=cronjob/ruvscan-scannerNginx:
upstream ruvscan_backend {
least_conn;
server 127.0.0.1:8001;
server 127.0.0.1:8002;
server 127.0.0.1:8003;
}
server {
location / {
proxy_pass http://ruvscan_backend;
}
}Kubernetes:
apiVersion: v1
kind: Service
metadata:
name: ruvscan-mcp
spec:
type: LoadBalancer
selector:
app: ruvscan-mcp
ports:
- port: 80
targetPort: 8000# prometheus.yml
scrape_configs:
- job_name: 'ruvscan'
static_configs:
- targets: ['localhost:8000']Import dashboard from monitoring/grafana/ruvscan-dashboard.json
Loki:
# promtail-config.yml
clients:
- url: http://loki:3100/loki/api/v1/pushELK Stack:
# filebeat.yml
filebeat.inputs:
- type: container
paths:
- '/var/lib/docker/containers/*/*.log'# SQLite
sqlite3 /data/ruvscan.db ".backup /backups/ruvscan-$(date +%Y%m%d).db"
# Automated with cron
0 2 * * * sqlite3 /data/ruvscan.db ".backup /backups/ruvscan-$(date +%Y%m%d).db"# Backup environment and configs
tar -czf config-backup-$(date +%Y%m%d).tar.gz \
.env \
config/ \
docker-compose*.yml- Change default passwords
- Enable SSL/TLS
- Configure firewall rules
- Set up API rate limiting
- Enable authentication (API keys)
- Scan images for vulnerabilities
- Set up secret management (Vault/AWS Secrets Manager)
- Enable audit logging
- Configure CORS properly
- Keep dependencies updated
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 5
periodSeconds: 5HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1# Check logs
docker compose logs -f mcp-server
# Check resource usage
docker stats
# Verify environment
docker compose config# Check Python memory
docker exec -it ruvscan-mcp ps aux
# Adjust workers
# In docker-compose.yml:
environment:
MAX_WORKERS: 2# Check database size
ls -lh /data/ruvscan.db
# Analyze slow queries
sqlite3 /data/ruvscan.db "EXPLAIN QUERY PLAN SELECT * FROM repos;"
# Add indexes if neededenvironment:
WORKERS: 4
WORKER_CLASS: uvicorn.workers.UvicornWorker
WORKER_CONNECTIONS: 1000[profile.release]
opt-level = 3
lto = true
codegen-units = 1# SQLite optimizations
PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA cache_size = 10000;# Docker Compose
docker compose down
docker compose pull
docker compose up -d
# Kubernetes
kubectl rollout undo deployment/ruvscan-mcp
kubectl rollout status deployment/ruvscan-mcp- Use spot instances for non-critical workloads
- Enable auto-scaling to match demand
- Use cloud storage (S3/GCS) for backups
- Set up budget alerts
- Review logs retention policies
resources:
limits:
cpu: "1"
memory: "512Mi"
requests:
cpu: "250m"
memory: "256Mi"For deployment issues:
- GitHub Issues: https://github.com/ruvnet/ruvscan/issues
- Documentation:
/docs - Community: Discord