diff --git a/.github/workflows/release-helm.yml b/.github/workflows/release-helm.yml
new file mode 100644
index 0000000000..6719385e91
--- /dev/null
+++ b/.github/workflows/release-helm.yml
@@ -0,0 +1,127 @@
+name: π§ Helm Chart Release
+
+on:
+ push:
+ tags:
+ - 'helm-v*'
+ workflow_dispatch:
+ inputs:
+ chart_version:
+ description: 'Chart version to release'
+ required: true
+ type: string
+
+env:
+ REGISTRY: ghcr.io
+ CHART_NAME: trigger
+
+jobs:
+ lint-and-test:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Helm
+ uses: azure/setup-helm@v4
+ with:
+ version: "3.18.3"
+
+ - name: Lint Helm Chart
+ run: |
+ helm lint ./hosting/k8s/helm/
+
+ - name: Render templates
+ run: |
+ helm template test-release ./hosting/k8s/helm/ \
+ --values ./hosting/k8s/helm/values.yaml \
+ --output-dir ./helm-output
+
+ - name: Validate manifests
+ uses: docker://ghcr.io/yannh/kubeconform:v0.7.0
+ with:
+ entrypoint: '/kubeconform'
+ args: "-summary -output json ./helm-output"
+
+ release:
+ needs: lint-and-test
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write # for gh-release
+ packages: write
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Helm
+ uses: azure/setup-helm@v4
+ with:
+ version: "3.18.3"
+
+ - name: Log in to Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract version from tag or input
+ id: version
+ run: |
+ if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
+ VERSION="${{ github.event.inputs.chart_version }}"
+ else
+ VERSION="${{ github.ref_name }}"
+ VERSION="${VERSION#helm-v}"
+ fi
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
+ echo "Releasing version: $VERSION"
+
+ - name: Check Chart.yaml version matches release version
+ run: |
+ VERSION="${{ steps.version.outputs.version }}"
+ CHART_VERSION=$(grep '^version:' ./hosting/k8s/helm/Chart.yaml | awk '{print $2}')
+ echo "Chart.yaml version: $CHART_VERSION"
+ echo "Release version: $VERSION"
+ if [ "$CHART_VERSION" != "$VERSION" ]; then
+ echo "β Chart.yaml version does not match release version!"
+ exit 1
+ fi
+ echo "β
Chart.yaml version matches release version."
+
+ - name: Package Helm Chart
+ run: |
+ helm package ./hosting/k8s/helm/ --destination /tmp/
+
+ - name: Push Helm Chart to GHCR
+ run: |
+ VERSION="${{ steps.version.outputs.version }}"
+ CHART_PACKAGE="/tmp/${{ env.CHART_NAME }}-${VERSION}.tgz"
+
+ # Push to GHCR OCI registry
+ helm push "$CHART_PACKAGE" "oci://${{ env.REGISTRY }}/${{ github.repository_owner }}/charts"
+
+ - name: Create GitHub Release
+ id: release
+ uses: softprops/action-gh-release@v1
+ if: github.event_name == 'push'
+ with:
+ tag_name: ${{ github.ref_name }}
+ name: "Helm Chart ${{ steps.version.outputs.version }}"
+ body: |
+ ### Installation
+ ```bash
+ helm upgrade --install trigger \
+ oci://${{ env.REGISTRY }}/${{ github.repository_owner }}/charts/${{ env.CHART_NAME }} \
+ --version ${{ steps.version.outputs.version }}
+ ```
+
+ ### Changes
+ See commit history for detailed changes in this release.
+ files: |
+ /tmp/${{ env.CHART_NAME }}-${{ steps.version.outputs.version }}.tgz
+ token: ${{ secrets.GITHUB_TOKEN }}
+ draft: true
+ prerelease: true
diff --git a/docs/docs.json b/docs/docs.json
index 88b0d12a1d..9b480e4e60 100644
--- a/docs/docs.json
+++ b/docs/docs.json
@@ -178,6 +178,7 @@
"pages": [
"self-hosting/overview",
"self-hosting/docker",
+ "self-hosting/kubernetes",
{
"group": "Environment variables",
"pages": ["self-hosting/env/webapp", "self-hosting/env/supervisor"]
diff --git a/docs/self-hosting/docker.mdx b/docs/self-hosting/docker.mdx
index 7acc9cefd7..56051a192e 100644
--- a/docs/self-hosting/docker.mdx
+++ b/docs/self-hosting/docker.mdx
@@ -42,8 +42,8 @@ To run the webapp and worker components, you will need:
This machine will host the webapp, postgres, redis, and related services.
-- 2+ vCPU
-- 4+ GB RAM
+- 3+ vCPU
+- 6+ GB RAM
### Worker
@@ -345,6 +345,10 @@ TRIGGER_IMAGE_TAG=v4.0.0-v4-beta.21
This section highlights some of the CLI commands and options that are useful when self-hosting. Please check the [CLI reference](/cli-introduction) for more in-depth documentation.
+
+While v4 is in beta, always use `@v4-beta` instead of `@latest`. For example: `npx trigger.dev@v4-beta dev`
+
+
### Login
To avoid being redirected to [Trigger.dev Cloud](https://cloud.trigger.dev) when using the CLI, you need to specify the URL of your self-hosted instance with the `--api-url` or `-a` flag. For example:
diff --git a/docs/self-hosting/kubernetes.mdx b/docs/self-hosting/kubernetes.mdx
new file mode 100644
index 0000000000..a828b9c857
--- /dev/null
+++ b/docs/self-hosting/kubernetes.mdx
@@ -0,0 +1,432 @@
+---
+title: "Kubernetes"
+description: "You can self-host Trigger.dev in Kubernetes using our official Helm chart."
+---
+
+The following instructions will help you deploy Trigger.dev to Kubernetes using our official Helm chart. Make sure to read the self-hosting [overview](/self-hosting/overview) first.
+
+As self-hosted deployments tend to have unique requirements and configurations, we don't provide specific advice for securing your deployment, scaling up, or improving reliability.
+
+Should the burden ever get too much, we'd be happy to see you on [Trigger.dev cloud](https://trigger.dev/pricing) where we deal with these concerns for you.
+
+**Warning:** This guide alone is unlikely to result in a production-ready deployment. Security, scaling, and reliability concerns are not fully addressed here.
+
+## Requirements
+
+### Prerequisites
+- Kubernetes cluster 1.19+
+- Helm 3.8+
+- Kubectl with cluster access
+
+### Resources
+
+The following are minimum requirements for running the entire stack on Kubernetes:
+
+**Cluster resources:**
+- 6+ vCPU total
+- 12+ GB RAM total
+- Persistent volume support
+
+**Individual components:**
+- **Webapp**: 1 vCPU, 2 GB RAM
+- **Supervisor**: 1 vCPU, 1 GB RAM
+- **PostgreSQL**: 1 vCPU, 2 GB RAM
+- **Redis**: 0.5 vCPU, 1 GB RAM
+- **ClickHouse**: 1 vCPU, 2 GB RAM
+- **Object Storage**: 0.5 vCPU, 1 GB RAM
+- **Workers**: Depending on concurrency and machine preset
+
+These requirements scale based on your task concurrency and can be adjusted via the `resources` section in your `values.yaml`. For example:
+
+```yaml
+webapp:
+ resources:
+ requests:
+ cpu: 500m
+ memory: 1Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+## Installation
+
+### Quick start
+
+1. Install with default values (for testing only):
+
+```bash
+helm upgrade -n trigger --install trigger \
+ oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version ~4.0.0-beta \
+ --create-namespace
+```
+
+2. Access the webapp:
+
+```bash
+kubectl port-forward svc/trigger-webapp 3040:3030 -n trigger
+```
+
+3. Open the dashboard: `http://localhost:3040`
+
+4. Login with the magic link:
+
+```bash
+# Check the webapp logs
+kubectl logs -n trigger deployment/trigger-webapp | grep -A1 "magic link"
+```
+
+
+While v4 is in beta, always use `@v4-beta` instead of `@latest`. For example: `npx trigger.dev@v4-beta dev`
+
+
+## Configuration
+
+Most values map directly to the environment variables documented in the [webapp](/self-hosting/env/webapp) and [supervisor](/self-hosting/env/supervisor) environment variable overview.
+
+**Naming convention:**
+- Environment variables use `UPPER_SNAKE_CASE`
+- Helm values use `camelCase`
+
+**Example mapping:**
+```bash
+# Environment variable
+APP_ORIGIN=https://trigger.example.com
+
+# Becomes Helm value
+config:
+ appOrigin: "https://trigger.example.com"
+```
+
+### Default values
+
+The following commands will display the default values:
+
+```bash
+# Specific version
+helm show values oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version 4.0.0-beta.5
+
+# Latest v4 beta
+helm show values oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version ~4.0.0-beta
+```
+
+### Custom values
+
+The default values are insecure and are only suitable for testing. You will need to configure your own secrets as a bare minimum.
+
+Create a `values-custom.yaml` file to override the defaults. For example:
+
+```yaml
+# Generate new secrets with `openssl rand -hex 16`
+# WARNING: You should probably use an existingSecret instead
+secrets:
+ enabled: true
+ sessionSecret: "your-32-char-hex-secret-1"
+ magicLinkSecret: "your-32-char-hex-secret-2"
+ # ...
+
+# Recommended: existingSecret, must contain at least the following keys:
+# - SESSION_SECRET
+# - MAGIC_LINK_SECRET
+# - ENCRYPTION_KEY
+# - MANAGED_WORKER_SECRET
+# - OBJECT_STORE_ACCESS_KEY_ID
+# - OBJECT_STORE_SECRET_ACCESS_KEY
+secrets:
+ enabled: false
+ existingSecret: "your-existing-secret"
+
+# Application URLs
+config:
+ appOrigin: "https://trigger.example.com"
+ loginOrigin: "https://trigger.example.com"
+ apiOrigin: "https://trigger.example.com"
+
+# Resource limits
+webapp:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+
+supervisor:
+ resources:
+ requests:
+ cpu: 200m
+ memory: 512Mi
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+```
+
+Deploy with your custom values:
+
+```bash
+helm upgrade -n trigger --install trigger \
+ oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version ~4.0.0-beta \
+ --create-namespace \
+ -f values-custom.yaml
+```
+
+### Extra env
+
+You can set extra environment variables on all services. For example:
+
+```yaml
+webapp:
+ extraEnv:
+ - name: EXTRA_ENV_VAR
+ value: "extra-value"
+```
+
+### Extra annotations
+
+You can set extra annotations on all services. For example:
+
+```yaml
+webapp:
+ podAnnotations:
+ "my-annotation": "my-value"
+```
+
+### External services
+
+You can disable the built-in services and use external services instead. For example:
+
+```yaml
+postgres:
+ enabled: false
+ external: true
+ externalConnection:
+ host: "my-postgres.example.com"
+ port: 5432
+ database: "my-database"
+ username: "my-username"
+ password: "my-password"
+```
+
+## Worker token
+
+When using the default bootstrap configuration, worker creation and authentication is handled automatically. The webapp generates a worker token and makes it available to the supervisor via a shared volume.
+
+### Bootstrap (default)
+
+```yaml
+webapp:
+ bootstrap:
+ enabled: true
+ workerGroupName: "bootstrap"
+```
+
+### Manual
+
+If you need to set up workers separately or use a custom token:
+
+1. Get the worker token from the webapp logs:
+
+```bash
+kubectl logs deployment/trigger-webapp -n trigger | grep -A15 "Worker Token"
+```
+
+2. Create a secret with the token:
+
+```bash
+kubectl create secret generic worker-token \
+ --from-literal=token=tr_wgt_your_token_here \
+ -n trigger
+```
+
+3. Configure the supervisor to use the secret:
+
+```yaml
+supervisor:
+ bootstrap:
+ enabled: false
+ workerToken:
+ secret:
+ name: "worker-token"
+ key: "token"
+```
+
+## Registry setup
+
+See the [Docker registry setup](/self-hosting/docker#registry-setup) for conceptual information. The configuration is specified in your `values.yaml`:
+
+```yaml
+# Use external registry (recommended)
+registry:
+ external: true
+ # Part of deployment image ref, for example: your-registry.example.com/your-company/proj_123:20250625.1.prod
+ repositoryNamespace: "your-company"
+ externalConnection:
+ host: "your-registry.example.com"
+ port: 5000
+ auth:
+ enabled: true
+ username: "your-username"
+ password: "your-password"
+```
+
+
+The internal registry (`registry.external: false`) is experimental and requires proper TLS setup and additional cluster configuration. Use an external registry for production.
+
+
+## Object storage
+
+See the [Docker object storage setup](/self-hosting/docker#object-storage) for conceptual information. The defaults will use built-in MinIO, but you can use an external S3-compatible storage. The configuration is specified in your `values.yaml`:
+
+```yaml
+# Use external S3-compatible storage
+minio:
+ enabled: false
+ external: true
+ externalConnection:
+ url: "https://s3.amazonaws.com"
+ # or: "https://your-minio.com:9000"
+
+# Configure credentials
+secrets:
+ objectStore:
+ accessKeyId: "admin"
+ secretAccessKey: "very-safe-password"
+```
+
+## Authentication
+
+Authentication options are identical to the [Docker-based installation](/self-hosting/docker#authentication). The configuration is specified in your `values.yaml`:
+
+**GitHub OAuth:**
+```yaml
+webapp:
+ extraEnv:
+ - name: AUTH_GITHUB_CLIENT_ID
+ value: "your-github-client-id"
+ - name: AUTH_GITHUB_CLIENT_SECRET
+ value: "your-github-client-secret"
+```
+
+**Email authentication (Resend):**
+```yaml
+webapp:
+ extraEnv:
+ - name: EMAIL_TRANSPORT
+ value: "resend"
+ - name: FROM_EMAIL
+ value: "noreply@yourdomain.com"
+ - name: REPLY_TO_EMAIL
+ value: "support@yourdomain.com"
+ - name: RESEND_API_KEY
+ value: "your-resend-api-key"
+```
+
+**Restricting access:**
+```yaml
+webapp:
+ extraEnv:
+ - name: WHITELISTED_EMAILS
+ value: "user1@company\\.com|user2@company\\.com"
+```
+
+## Version locking
+
+You can lock versions in two ways:
+
+**Helm chart version (recommended):**
+```bash
+# Pin to a specific version for production
+helm upgrade -n trigger --install trigger \
+ oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version 4.0.0-beta.5
+
+# The app version will be different from the chart version
+# This is the version of the Trigger.dev webapp and supervisor
+# ..and should always match your Trigger.dev CLI version
+helm show chart \
+ oci://ghcr.io/triggerdotdev/charts/trigger \
+ --version 4.0.0-beta.5 | grep appVersion
+```
+
+**Specific image tags:**
+```yaml
+webapp:
+ image:
+ tag: "v4.0.0-v4-beta.21"
+
+supervisor:
+ image:
+ tag: "v4.0.0-v4-beta.21"
+```
+
+The chart version's `appVersion` field determines the default image tags. Newer image tags may be incompatible with older chart versions and vice versa.
+
+## Troubleshooting
+
+**Check logs:**
+```bash
+# Webapp logs
+kubectl logs deployment/trigger-webapp -n trigger -f
+
+# Supervisor logs
+kubectl logs deployment/trigger-supervisor -n trigger -f
+
+# All pods
+kubectl logs -l app.kubernetes.io/instance=trigger -n trigger -f
+```
+
+**Check pod status:**
+```bash
+kubectl get pods -n trigger
+kubectl describe pod -n trigger
+```
+
+**Start from scratch:**
+```bash
+# Delete the release
+helm uninstall trigger -n trigger
+
+# Delete persistent volumes (optional)
+# WARNING: This will delete all your data!
+kubectl delete pvc -l app.kubernetes.io/instance=trigger -n trigger
+
+# Delete the namespace (optional)
+kubectl delete namespace trigger
+```
+
+**Common issues:**
+- **Magic links not working**: Check webapp logs for email delivery errors
+- **Deploy fails**: Verify registry access and authentication
+- **Pods stuck pending**: Describe the pod and check the events
+- **Worker token issues**: Check webapp and supervisor logs for errors
+
+See the [Docker troubleshooting](/self-hosting/docker#troubleshooting) section for more information.
+
+## CLI usage
+
+See the [Docker CLI usage](/self-hosting/docker#cli-usage) section, the commands are identical regardless of deployment method.
+
+
+While v4 is in beta, always use `@v4-beta` instead of `@latest`. For example: `npx trigger.dev@v4-beta dev`
+
+
+## CI / GitHub Actions
+
+When running the CLI in a CI environment, your login profiles won't be available. Instead, you can use the `TRIGGER_API_URL` and `TRIGGER_ACCESS_TOKEN` environment
+variables to point at your self-hosted instance and authenticate.
+
+For more detailed instructions, see the [GitHub Actions guide](/github-actions).
+
+## Telemetry
+
+By default, the Trigger.dev webapp sends telemetry data to our servers. This data is used to improve the product and is not shared with third parties. To disable telemetry, set in your `values.yaml`:
+
+```yaml
+telemetry:
+ enabled: false
+```
diff --git a/hosting/docker/.env.example b/hosting/docker/.env.example
index ce2b1717e0..1f50e5e703 100644
--- a/hosting/docker/.env.example
+++ b/hosting/docker/.env.example
@@ -58,6 +58,13 @@ DEV_OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8030/otel
# - 8GB machine: NODE_MAX_OLD_SPACE_SIZE=6400
# NODE_MAX_OLD_SPACE_SIZE=8192
+# ClickHouse
+# - Do NOT use these defaults in production
+CLICKHOUSE_USER=default
+CLICKHOUSE_PASSWORD=password
+CLICKHOUSE_URL=http://default:password@clickhouse:8123?secure=false
+RUN_REPLICATION_CLICKHOUSE_URL=http://default:password@clickhouse:8123
+
# Docker Registry
# - When testing locally, the default values should be fine
# - When deploying to production, you will have to change these, especially the password and URL
diff --git a/hosting/docker/clickhouse/override.xml b/hosting/docker/clickhouse/override.xml
index 7642e22b09..41897c2984 100644
--- a/hosting/docker/clickhouse/override.xml
+++ b/hosting/docker/clickhouse/override.xml
@@ -1,21 +1,20 @@
-
warning
-
+
+ 524288000
+ 1
+
+
+ 8192
+ 1
+ 0
+ 0
+
+
\ No newline at end of file
diff --git a/hosting/docker/webapp/docker-compose.yml b/hosting/docker/webapp/docker-compose.yml
index 86309e8bad..dc0960db0c 100644
--- a/hosting/docker/webapp/docker-compose.yml
+++ b/hosting/docker/webapp/docker-compose.yml
@@ -17,6 +17,7 @@ services:
depends_on:
- postgres
- redis
+ - clickhouse
networks:
- webapp
- supervisor
@@ -58,6 +59,13 @@ services:
TRIGGER_BOOTSTRAP_ENABLED: 1
TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME: bootstrap
TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH: /home/node/shared/worker_token
+ # ClickHouse configuration
+ CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://default:password@clickhouse:8123?secure=false}
+ CLICKHOUSE_LOG_LEVEL: ${CLICKHOUSE_LOG_LEVEL:-info}
+ # Run replication
+ RUN_REPLICATION_ENABLED: ${RUN_REPLICATION_ENABLED:-1}
+ RUN_REPLICATION_CLICKHOUSE_URL: ${RUN_REPLICATION_CLICKHOUSE_URL:-http://default:password@clickhouse:8123}
+ RUN_REPLICATION_LOG_LEVEL: ${RUN_REPLICATION_LOG_LEVEL:-info}
# Limits
# TASK_PAYLOAD_OFFLOAD_THRESHOLD: 524288 # 512KB
# TASK_PAYLOAD_MAXIMUM_SIZE: 3145728 # 3MB
@@ -65,6 +73,8 @@ services:
# TASK_RUN_METADATA_MAXIMUM_SIZE: 262144 # 256KB
# DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT: 100
# DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT: 100
+ # Internal OTEL configuration
+ INTERNAL_OTEL_TRACE_LOGGING_ENABLED: ${INTERNAL_OTEL_TRACE_LOGGING_ENABLED:-0}
postgres:
image: postgres:${POSTGRES_IMAGE_TAG:-14}
@@ -131,10 +141,11 @@ services:
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
+ - ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9123:8123
- ${CLICKHOUSE_PUBLISH_IP:-127.0.0.1}:9090:9000
environment:
- CLICKHOUSE_ADMIN_USER: default
- CLICKHOUSE_ADMIN_PASSWORD: password
+ CLICKHOUSE_ADMIN_USER: ${CLICKHOUSE_USER:-default}
+ CLICKHOUSE_ADMIN_PASSWORD: ${CLICKHOUSE_PASSWORD:-password}
volumes:
- clickhouse:/bitnami/clickhouse
- ../clickhouse/override.xml:/bitnami/clickhouse/etc/config.d/override.xml:ro
diff --git a/hosting/k8s/helm/.gitignore b/hosting/k8s/helm/.gitignore
new file mode 100644
index 0000000000..eb389199cc
--- /dev/null
+++ b/hosting/k8s/helm/.gitignore
@@ -0,0 +1,3 @@
+values-*.yaml
+!values-production-example.yaml
+*.tgz
\ No newline at end of file
diff --git a/hosting/k8s/helm/.helmignore b/hosting/k8s/helm/.helmignore
new file mode 100644
index 0000000000..3eb07f68d9
--- /dev/null
+++ b/hosting/k8s/helm/.helmignore
@@ -0,0 +1,17 @@
+# Exclude version control
+.git/
+.gitignore
+.helmignore
+
+# Exclude OS files
+.DS_Store
+
+# Exclude CI/CD and scripts
+.github/
+scripts/
+
+# Exclude package files
+*.tgz
+
+# Exclude additional values files
+values-*.yaml
\ No newline at end of file
diff --git a/hosting/k8s/helm/Chart.yaml b/hosting/k8s/helm/Chart.yaml
new file mode 100644
index 0000000000..d599d43add
--- /dev/null
+++ b/hosting/k8s/helm/Chart.yaml
@@ -0,0 +1,18 @@
+apiVersion: v2
+name: trigger
+description: The official Trigger.dev Helm chart
+type: application
+version: 4.0.0-beta.5
+appVersion: v4.0.0-v4-beta.21
+home: https://trigger.dev
+sources:
+ - https://github.com/triggerdotdev/trigger.dev
+keywords:
+ - trigger
+ - workflow
+ - background-jobs
+ - job-scheduler
+ - task-queue
+ - automation
+annotations:
+ category: Development
diff --git a/hosting/k8s/helm/README.md b/hosting/k8s/helm/README.md
new file mode 100644
index 0000000000..5d9a8ff5e8
--- /dev/null
+++ b/hosting/k8s/helm/README.md
@@ -0,0 +1,555 @@
+# Trigger.dev v4 Helm Chart
+
+This Helm chart deploys Trigger.dev v4 self-hosting stack to Kubernetes.
+
+## Quick Start
+
+### Installation
+
+```bash
+# Deploy with default values (testing/development only)
+helm install trigger .
+
+# Deploy to specific namespace
+helm install trigger . -n trigger --create-namespace
+
+# Deploy with custom values for production
+helm install trigger . -f values-production.yaml -n trigger --create-namespace
+```
+
+### Upgrading
+
+```bash
+# Upgrade existing release
+helm upgrade trigger .
+
+# Upgrade with new values
+helm upgrade trigger . -f values-production.yaml
+```
+
+### Access the dashboard
+
+```bash
+kubectl port-forward svc/trigger-webapp 3040:3030 --address 0.0.0.0
+```
+
+Dashboard: http://localhost:3040/
+
+### Deploying your tasks
+
+```bash
+# The --push arg is required when testing locally
+npx trigger.dev@v4-beta deploy --push
+```
+
+## β οΈ Security Requirements
+
+### Secrets Configuration
+
+**IMPORTANT**: The default secrets are for **TESTING ONLY** and must be changed for production.
+
+#### Required Secrets
+
+All secrets must be exactly **32 hexadecimal characters** (16 bytes):
+
+- `sessionSecret` - User authentication sessions
+- `magicLinkSecret` - Passwordless login tokens
+- `encryptionKey` - Sensitive data encryption
+- `managedWorkerSecret` - Worker authentication
+
+#### Generate Production Secrets
+
+```bash
+for i in {1..4}; do openssl rand -hex 16; done
+```
+
+#### Configure Production Secrets
+
+```yaml
+# values-production.yaml
+secrets:
+ sessionSecret: "your-generated-secret-1"
+ magicLinkSecret: "your-generated-secret-2"
+ encryptionKey: "your-generated-secret-3"
+ managedWorkerSecret: "your-generated-secret-4"
+ objectStore:
+ accessKeyId: "your-s3-access-key"
+ secretAccessKey: "your-s3-secret-key"
+```
+
+## Architecture
+
+This chart deploys the following components:
+
+### Core Services
+- **Webapp** - Main Trigger.dev application (port 3030)
+- **PostgreSQL** - Primary database with logical replication
+- **Redis** - Cache and job queue
+- **Electric** - Real-time sync service (ElectricSQL)
+
+### Worker Services
+- **Supervisor** - Kubernetes worker orchestrator for executing runs
+
+### Supporting Services
+- **ClickHouse** - Analytics database
+- **MinIO** - S3-compatible object storage
+- **Registry** - Private Docker registry for deployed code (EXPERIMENTAL - disabled by default)
+
+## Configuration
+
+### Basic Configuration
+
+```yaml
+# Application URLs
+config:
+ appOrigin: "https://trigger.example.com"
+ loginOrigin: "https://trigger.example.com"
+ apiOrigin: "https://trigger.example.com"
+
+# Bootstrap mode (auto-creates worker group)
+config:
+ bootstrap:
+ enabled: true # Enable for combined setups
+ workerGroupName: "bootstrap"
+```
+
+### External Services
+
+Use external managed services instead of bundled components:
+
+```yaml
+# External PostgreSQL
+postgres:
+ enabled: false
+ external: true
+ externalConnection:
+ host: "your-postgres.rds.amazonaws.com"
+ port: 5432
+ database: "trigger"
+ username: "trigger_user"
+ password: "your-password"
+
+# External Redis
+redis:
+ enabled: false
+ external: true
+ externalConnection:
+ host: "your-redis.cache.amazonaws.com"
+ port: 6379
+ password: "your-password"
+
+# External Docker Registry (e.g., Kind local registry)
+registry:
+ enabled: true
+ external: true
+ externalConnection:
+ host: "localhost"
+ port: 5001
+ username: ""
+ password: ""
+```
+
+### Ingress Configuration
+
+```yaml
+ingress:
+ enabled: true
+ className: "nginx"
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-prod"
+ hosts:
+ - host: trigger.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: trigger-tls
+ hosts:
+ - trigger.example.com
+```
+
+### Resource Configuration
+
+```yaml
+resources:
+ webapp:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+
+postgres:
+ primary:
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+```
+
+## Deployment Modes
+
+### Testing/Development
+- Use default values
+- Single replica
+- Lower resource limits
+- Bootstrap mode enabled
+
+### Production
+- Custom secrets (required)
+- Multiple replicas with anti-affinity
+- Production resource limits
+- External services recommended
+- Ingress with TLS
+- Persistent storage
+
+## Persistence
+
+All services support persistent storage and allow you to control the storage class globally or per service:
+
+```yaml
+global:
+ storageClass: "fast-ssd" # Default for all services
+
+postgres:
+ primary:
+ persistence:
+ enabled: true
+ size: 10Gi
+ storageClass: "postgres-nvme" # Optional: override for PostgreSQL
+
+redis:
+ master:
+ persistence:
+ enabled: true
+ size: 5Gi
+ storageClass: "redis-ssd" # Optional: override for Redis
+
+clickhouse:
+ persistence:
+ enabled: true
+ size: 10Gi
+ storageClass: "analytics-hdd" # Optional: override for ClickHouse
+
+minio:
+ persistence:
+ enabled: true
+ size: 10Gi
+ storageClass: "objectstore-ssd" # Optional: override for MinIO
+
+registry:
+ persistence:
+ enabled: true
+ size: 10Gi
+ storageClass: "registry-ssd" # Optional: override for Registry
+
+# Shared persistent volume for worker token file
+persistence:
+ shared:
+ enabled: true
+ size: 5Mi
+```
+
+- If a per-service `storageClass` is set, it overrides the global value for that service only.
+- If neither is set, the cluster's default StorageClass is used.
+
+## Monitoring
+
+### Health Checks
+
+Health checks are configured for all services:
+- HTTP endpoints for web services
+- Database connection tests
+- Readiness and liveness probes
+
+### Prometheus Integration
+
+ServiceMonitors are available for webapp and supervisor services:
+
+```yaml
+webapp:
+ serviceMonitor:
+ enabled: true
+ interval: "30s"
+ path: "/metrics"
+ labels:
+ release: prometheus-stack
+
+supervisor:
+ serviceMonitor:
+ enabled: true
+ interval: "30s"
+ path: "/metrics"
+ labels:
+ release: prometheus-stack
+```
+
+## Operations
+
+### Force Pod Restart
+
+When you need to force all pods to restart (e.g., to pick up updated secrets or config):
+
+```bash
+# Force restart using timestamp annotation (Helm-native approach)
+helm upgrade . --set-string podAnnotations.restartedAt="$(date +%s)"
+
+# Example
+helm upgrade trigger . --set-string podAnnotations.restartedAt="$(date +%s)"
+```
+
+This approach:
+- β
Uses Helm's built-in annotation mechanism
+- β
Safe - doesn't recreate immutable resources like PVCs
+- β
Targeted - only restarts pods that need updates
+- β
Trackable - increments Helm revision number
+
+### Configuration Updates
+
+After changing secrets or ConfigMaps in your values file:
+
+```bash
+# 1. Upgrade with new values
+helm upgrade trigger . -f values-production.yaml
+
+# 2. Force pod restart to pick up changes
+helm upgrade trigger . -f values-production.yaml \
+ --set-string podAnnotations.restartedAt="$(date +%s)"
+```
+
+## Troubleshooting
+
+### Check Pod Status
+```bash
+kubectl get pods -l app.kubernetes.io/name=trigger.dev
+```
+
+### View Logs
+```bash
+# Webapp logs
+kubectl logs -l app.kubernetes.io/component=webapp
+
+# Database logs
+kubectl logs -l app.kubernetes.io/component=postgres
+```
+
+### Run Tests
+```bash
+helm test trigger.dev
+```
+
+## Testing
+
+### Validate Deployment
+
+```bash
+# Check Helm template syntax
+helm template trigger.dev . --dry-run > /dev/null && echo "Template validation successful"
+
+# Test webapp health endpoint (requires port forwarding)
+curl -s -o /dev/null -w "%{http_code}" http://localhost:3040/healthcheck || echo "Connection failed"
+
+# Port forward to access webapp locally
+kubectl port-forward svc/trigger.dev-webapp 3040:3030 --address 0.0.0.0
+```
+
+### Common Issues
+
+1. **Secrets errors**: Ensure all secrets are exactly 32 hex characters
+2. **Database connection**: Check PostgreSQL is ready before webapp starts
+3. **Resource limits**: Increase limits for ClickHouse in constrained environments
+4. **Config not applying**: Use the pod restart technique above to force config reload
+5. **Image pull errors**: When testing locally, deploy with `npx trigger.dev@v4-beta deploy --push`
+
+## Examples
+
+See `values-production-example.yaml` for a complete production configuration example.
+
+## Version Management
+
+### Understanding Versions
+
+The Helm chart uses three types of versions:
+
+1. **Chart Version** (`Chart.yaml:version`) - Helm chart packaging version
+2. **App Version** (`Chart.yaml:appVersion`) - Trigger.dev application version
+3. **Component Versions** (`values.yaml`) - Individual service versions (Electric, ClickHouse, etc.)
+
+### Release Process
+
+#### For Chart Maintainers
+
+1. **Update Chart Version** for chart changes:
+ ```bash
+ # Edit Chart.yaml
+ version: 4.1.0 # Increment for chart changes (semver)
+ ```
+
+2. **Update App Version** when Trigger.dev releases new version:
+ ```bash
+ # Edit Chart.yaml
+ appVersion: "v4.1.0" # Match Trigger.dev release (v-prefixed image tag)
+ ```
+
+3. **Release via GitHub**:
+ ```bash
+ # Tag and push
+ git tag helm-v4.1.0
+ git push origin helm-v4.1.0
+
+ # GitHub Actions will automatically build and publish to GHCR
+ ```
+
+#### For Users
+
+```bash
+# Install specific chart version
+helm upgrade --install trigger \
+ oci://ghcr.io/triggerdotdev/charts/trigger.dev \
+ --version 4.1.0
+
+# Install latest chart version
+helm upgrade --install trigger \
+ oci://ghcr.io/triggerdotdev/charts/trigger.dev
+
+# Override app version (advanced)
+helm upgrade --install trigger . \
+ --set webapp.image.tag=v4.0.1
+```
+
+## Production Readiness Checklist
+
+### π Security (REQUIRED)
+
+- [ ] **Generate unique secrets** (never use defaults):
+ ```bash
+ # Generate 4 secrets
+ for i in {1..4}; do openssl rand -hex 16; done
+ ```
+
+- [ ] **Configure security contexts**:
+ ```yaml
+ webapp:
+ podSecurityContext:
+ fsGroup: 1000
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ capabilities:
+ drop: [ALL]
+ ```
+
+- [ ] **Enable network policies** (if supported by cluster)
+- [ ] **Configure proper RBAC** for supervisor
+- [ ] **Use TLS ingress** with cert-manager
+
+### π Resource Management (REQUIRED)
+
+- [ ] **Set resource limits and requests** - for example:
+ ```yaml
+ webapp:
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+
+ postgres:
+ primary:
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+ requests:
+ cpu: 500m
+ memory: 1Gi
+
+ redis:
+ master:
+ resources:
+ limits:
+ cpu: 500m
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+
+ clickhouse:
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+ requests:
+ cpu: 500m
+ memory: 1Gi
+
+ supervisor:
+ resources:
+ limits:
+ cpu: 500m
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+ ```
+
+- [ ] **Configure persistent storage for all services** - for example:
+ ```yaml
+ global:
+ storageClass: "fast-nvme" # Default for all services
+
+ postgres:
+ persistence:
+ primary:
+ size: 500Gi
+
+ redis:
+ persistence:
+ master:
+ size: 20Gi
+
+ clickhouse:
+ persistence:
+ size: 100Gi
+
+ minio:
+ persistence:
+ size: 200Gi
+
+ registry:
+ persistence:
+ size: 100Gi
+ ```
+
+### ποΈ High Availability (RECOMMENDED)
+
+- [ ] **Multiple replicas** with pod anti-affinity
+- [ ] **Pod disruption budgets**
+- [ ] **External managed services** (RDS, ElastiCache, etc.)
+- [ ] **Multi-AZ storage classes**
+- [ ] **Backup strategies** for databases
+
+### π Monitoring (RECOMMENDED)
+
+- [ ] **Enable ServiceMonitors** for Prometheus
+- [ ] **Configure alerting** for critical services
+- [ ] **Set up log aggregation**
+- [ ] **Monitor resource usage** and adjust limits
+
+### π Performance (OPTIONAL)
+
+- [ ] **Horizontal Pod Autoscaler** for webapp
+- [ ] **Vertical Pod Autoscaler** for data services
+- [ ] **Node affinity** for data services
+- [ ] **Separate storage classes** for different workloads
+
+## Support
+
+- Documentation: https://trigger.dev/docs/self-hosting
+- GitHub Issues: https://github.com/triggerdotdev/trigger.dev/issues
+- Discord: https://discord.gg/untWVke9aH
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/NOTES.txt b/hosting/k8s/helm/templates/NOTES.txt
new file mode 100644
index 0000000000..abac129260
--- /dev/null
+++ b/hosting/k8s/helm/templates/NOTES.txt
@@ -0,0 +1,82 @@
+Thank you for installing {{ .Chart.Name }}.
+
+Your release is named {{ .Release.Name }}.
+
+π SECURITY WARNING:
+{{- if or (eq .Values.secrets.sessionSecret "2818143646516f6fffd707b36f334bbb") (eq .Values.secrets.magicLinkSecret "44da78b7bbb0dfe709cf38931d25dcdd") (eq .Values.secrets.encryptionKey "f686147ab967943ebbe9ed3b496e465a") (eq .Values.secrets.managedWorkerSecret "447c29678f9eaf289e9c4b70d3dd8a7f") }}
+ You are using DEFAULT SECRETS which are NOT SECURE for production!
+
+ For production deployments, generate new secrets:
+ 1. Run: openssl rand -hex 16 (repeat for each secret)
+ 2. Override in your values.yaml:
+ secrets:
+ sessionSecret: "your-new-32-char-hex-secret"
+ magicLinkSecret: "your-new-32-char-hex-secret"
+ encryptionKey: "your-new-32-char-hex-secret"
+ managedWorkerSecret: "your-new-32-char-hex-secret"
+{{- else }}
+ Custom secrets detected - good for production deployment!
+{{- end }}
+
+To get started:
+
+1. Wait for all pods to be ready:
+ kubectl get pods --namespace {{ .Release.Namespace }} -w
+
+2. Access the webapp:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.webapp.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "trigger-v4.fullname" . }}-webapp)
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.webapp.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "trigger-v4.fullname" . }}-webapp'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "trigger-v4.fullname" . }}-webapp --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ echo http://$SERVICE_IP:{{ .Values.webapp.service.port }}
+{{- else if contains "ClusterIP" .Values.webapp.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "{{ include "trigger-v4.selectorLabels" . }},app.kubernetes.io/component=webapp" -o jsonpath="{.items[0].metadata.name}")
+ export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8030:$CONTAINER_PORT
+
+ The application will be available at http://localhost:8030
+{{- end }}
+
+For more information about the deployment, run:
+ kubectl --namespace {{ .Release.Namespace }} get all -l "{{ include "trigger-v4.selectorLabels" . }}"
+
+{{- if .Values.webapp.bootstrap.enabled }}
+
+Bootstrap Mode is enabled:
+- Worker group "{{ .Values.webapp.bootstrap.workerGroupName }}" will be automatically created
+- Worker token will be available at {{ .Values.webapp.bootstrap.workerTokenPath }}
+{{- end }}
+
+Configuration:
+{{- if .Values.postgres.external }}
+- Using external PostgreSQL at {{ .Values.postgres.externalConnection.host }}:{{ .Values.postgres.externalConnection.port }}
+{{- else }}
+- Using internal PostgreSQL
+{{- end }}
+{{- if .Values.redis.external }}
+- Using external Redis at {{ .Values.redis.externalConnection.host }}:{{ .Values.redis.externalConnection.port }}
+{{- else }}
+- Using internal Redis
+{{- end }}
+{{- if .Values.electric.enabled }}
+- Electric sync service enabled
+{{- end }}
+{{- if .Values.clickhouse.enabled }}
+- ClickHouse analytics database enabled
+{{- end }}
+{{- if .Values.minio.enabled }}
+- MinIO object storage enabled
+{{- end }}
+{{- if .Values.registry.enabled }}
+- Docker registry enabled
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/_helpers.tpl b/hosting/k8s/helm/templates/_helpers.tpl
new file mode 100644
index 0000000000..96dfc767d3
--- /dev/null
+++ b/hosting/k8s/helm/templates/_helpers.tpl
@@ -0,0 +1,254 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "trigger-v4.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "trigger-v4.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "trigger-v4.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "trigger-v4.labels" -}}
+helm.sh/chart: {{ include "trigger-v4.chart" . }}
+{{ include "trigger-v4.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "trigger-v4.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "trigger-v4.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Component labels
+*/}}
+{{- define "trigger-v4.componentLabels" -}}
+{{ include "trigger-v4.labels" . }}
+app.kubernetes.io/component: {{ .component }}
+{{- end }}
+
+{{/*
+Component selector labels
+*/}}
+{{- define "trigger-v4.componentSelectorLabels" -}}
+{{ include "trigger-v4.selectorLabels" . }}
+app.kubernetes.io/component: {{ .component }}
+{{- end }}
+
+
+{{/*
+Get the full image name for webapp
+*/}}
+{{- define "trigger-v4.image" -}}
+{{- $registry := .Values.global.imageRegistry | default .Values.webapp.image.registry -}}
+{{- $repository := .Values.webapp.image.repository -}}
+{{- $tag := .Values.webapp.image.tag | default .Chart.AppVersion -}}
+{{- if $registry }}
+{{- printf "%s/%s:%s" $registry $repository $tag }}
+{{- else }}
+{{- printf "%s:%s" $repository $tag }}
+{{- end }}
+{{- end }}
+
+{{/*
+Get the full image name for supervisor
+*/}}
+{{- define "trigger-v4.supervisor.image" -}}
+{{- $registry := .Values.global.imageRegistry | default .Values.supervisor.image.registry -}}
+{{- $repository := .Values.supervisor.image.repository -}}
+{{- $tag := .Values.supervisor.image.tag | default .Chart.AppVersion -}}
+{{- if $registry }}
+{{- printf "%s/%s:%s" $registry $repository $tag }}
+{{- else }}
+{{- printf "%s:%s" $repository $tag }}
+{{- end }}
+{{- end }}
+
+{{/*
+PostgreSQL connection string
+*/}}
+{{- define "trigger-v4.postgres.connectionString" -}}
+{{- if .Values.postgres.external -}}
+postgresql://{{ .Values.postgres.externalConnection.username }}:{{ .Values.postgres.externalConnection.password }}@{{ .Values.postgres.externalConnection.host }}:{{ .Values.postgres.externalConnection.port }}/{{ .Values.postgres.externalConnection.database }}?schema={{ .Values.postgres.externalConnection.schema | default "public" }}&sslmode={{ .Values.postgres.externalConnection.sslMode | default "prefer" }}
+{{- else -}}
+postgresql://{{ .Values.postgres.auth.username }}:{{ .Values.postgres.auth.password }}@{{ include "trigger-v4.fullname" . }}-postgres:{{ .Values.postgres.primary.service.ports.postgres }}/{{ .Values.postgres.auth.database }}?schema={{ .Values.postgres.connection.schema | default "public" }}&sslmode={{ .Values.postgres.connection.sslMode | default "prefer" }}
+{{- end -}}
+{{- end }}
+
+{{/*
+Redis connection details
+*/}}
+{{- define "trigger-v4.redis.host" -}}
+{{- if .Values.redis.external -}}
+{{ .Values.redis.externalConnection.host }}
+{{- else -}}
+{{ include "trigger-v4.fullname" . }}-redis-master
+{{- end -}}
+{{- end }}
+
+{{- define "trigger-v4.redis.port" -}}
+{{- if .Values.redis.external -}}
+{{ .Values.redis.externalConnection.port }}
+{{- else -}}
+{{ .Values.redis.master.service.ports.redis }}
+{{- end -}}
+{{- end }}
+
+{{/*
+Electric service URL
+*/}}
+{{- define "trigger-v4.electric.url" -}}
+{{- if .Values.electric.enabled -}}
+http://{{ include "trigger-v4.fullname" . }}-electric:{{ .Values.electric.service.port }}
+{{- else -}}
+{{ .Values.config.electricOrigin }}
+{{- end -}}
+{{- end }}
+
+{{/*
+MinIO connection details
+*/}}
+{{- define "trigger-v4.minio.url" -}}
+{{- if .Values.minio.enabled -}}
+http://{{ include "trigger-v4.fullname" . }}-minio:{{ .Values.minio.service.ports.api }}
+{{- else if .Values.minio.external -}}
+{{ .Values.minio.externalConnection.url }}
+{{- else -}}
+""
+{{- end -}}
+{{- end }}
+
+{{/*
+Get the secrets name - either existing secret or generated name
+*/}}
+{{- define "trigger-v4.secretsName" -}}
+{{- if .Values.secrets.existingSecret -}}
+{{ .Values.secrets.existingSecret }}
+{{- else -}}
+{{ include "trigger-v4.fullname" . }}-secrets
+{{- end -}}
+{{- end }}
+
+{{/*
+Registry connection details
+*/}}
+{{- define "trigger-v4.registry.host" -}}
+{{- if .Values.registry.external -}}
+{{ .Values.registry.externalConnection.host }}:{{ .Values.registry.externalConnection.port }}
+{{- else if .Values.registry.enabled -}}
+{{ include "trigger-v4.fullname" . }}-registry:{{ .Values.registry.service.port }}
+{{- else -}}
+localhost:5000
+{{- end -}}
+{{- end }}
+
+{{/*
+PostgreSQL host (for wait-for-it script)
+*/}}
+{{- define "trigger-v4.postgres.host" -}}
+{{- if .Values.postgres.external -}}
+{{ .Values.postgres.externalConnection.host }}:{{ .Values.postgres.externalConnection.port }}
+{{- else -}}
+{{ include "trigger-v4.fullname" . }}-postgres:{{ .Values.postgres.primary.service.ports.postgres }}
+{{- end -}}
+{{- end }}
+
+{{/*
+Supervisor connection details
+*/}}
+{{- define "trigger-v4.supervisor.url" -}}
+{{- if .Values.supervisor.enabled -}}
+http://{{ include "trigger-v4.fullname" . }}-supervisor:{{ .Values.supervisor.service.ports.workload }}
+{{- else -}}
+""
+{{- end -}}
+{{- end }}
+
+{{/*
+Create the name of the supervisor service account to use
+*/}}
+{{- define "trigger-v4.supervisorServiceAccountName" -}}
+{{- if .Values.supervisor.serviceAccount.create }}
+{{- default (printf "%s-supervisor" (include "trigger-v4.fullname" .)) .Values.supervisor.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.supervisor.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the supervisor cluster role to use
+*/}}
+{{- define "trigger-v4.supervisorClusterRoleName" -}}
+{{- default (printf "%s-supervisor-%s" (include "trigger-v4.fullname" .) .Release.Namespace) .Values.supervisor.rbac.clusterRole.name }}
+{{- end }}
+
+{{/*
+Generate docker config for image pull secret
+*/}}
+{{- define "trigger-v4.imagePullSecret" }}
+{{- if and .Values.registry.enabled .Values.registry.auth.enabled }}
+{{- $registryHost := include "trigger-v4.registry.host" . }}
+{{- $username := .Values.registry.auth.username }}
+{{- $password := .Values.registry.auth.password }}
+{{- $auth := printf "%s:%s" $username $password | b64enc }}
+{{- $config := dict "auths" (dict $registryHost (dict "username" $username "password" $password "auth" $auth)) }}
+{{- $config | toJson }}
+{{- else if and .Values.registry.external .Values.registry.externalConnection.auth.enabled }}
+{{- $registryHost := .Values.registry.externalConnection.host }}
+{{- $username := .Values.registry.externalConnection.auth.username }}
+{{- $password := .Values.registry.externalConnection.auth.password }}
+{{- $auth := printf "%s:%s" $username $password | b64enc }}
+{{- $config := dict "auths" (dict $registryHost (dict "username" $username "password" $password "auth" $auth)) }}
+{{- $config | toJson }}
+{{- end }}
+{{- end }}
+
+{{/*
+Merge ingress annotations to avoid duplicates
+*/}}
+{{- define "trigger-v4.ingress.annotations" -}}
+{{- $annotations := dict -}}
+{{- if .Values.ingress.annotations -}}
+{{- $annotations = .Values.ingress.annotations -}}
+{{- end -}}
+{{- if .Values.ingress.certManager.enabled -}}
+{{- $_ := set $annotations "cert-manager.io/cluster-issuer" .Values.ingress.certManager.clusterIssuer -}}
+{{- end -}}
+{{- if .Values.ingress.externalDns.enabled -}}
+{{- $_ := set $annotations "external-dns.alpha.kubernetes.io/hostname" .Values.ingress.externalDns.hostname -}}
+{{- $_ := set $annotations "external-dns.alpha.kubernetes.io/ttl" (.Values.ingress.externalDns.ttl | toString) -}}
+{{- end -}}
+{{- toYaml $annotations -}}
+{{- end }}
+
diff --git a/hosting/k8s/helm/templates/clickhouse.yaml b/hosting/k8s/helm/templates/clickhouse.yaml
new file mode 100644
index 0000000000..bd9097e54e
--- /dev/null
+++ b/hosting/k8s/helm/templates/clickhouse.yaml
@@ -0,0 +1,165 @@
+{{- if and .Values.clickhouse.enabled (not .Values.clickhouse.external) }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-clickhouse
+ labels:
+ {{- $component := "clickhouse" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ serviceName: {{ include "trigger-v4.fullname" . }}-clickhouse-headless
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.clickhouse.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.clickhouse.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: clickhouse
+ {{- with .Values.clickhouse.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.clickhouse.image.registry }}/{{ .Values.clickhouse.image.repository }}:{{ .Values.clickhouse.image.tag }}"
+ imagePullPolicy: {{ .Values.clickhouse.image.pullPolicy }}
+ ports:
+ - name: native
+ containerPort: {{ .Values.clickhouse.service.ports.native }}
+ protocol: TCP
+ - name: http
+ containerPort: {{ .Values.clickhouse.service.ports.http }}
+ protocol: TCP
+ env:
+ - name: CLICKHOUSE_ADMIN_USER
+ value: {{ .Values.clickhouse.auth.adminUser | quote }}
+ - name: CLICKHOUSE_ADMIN_PASSWORD
+ value: {{ .Values.clickhouse.auth.adminPassword | quote }}
+ {{- with .Values.clickhouse.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ exec:
+ command:
+ - clickhouse-client
+ - --host
+ - localhost
+ - --port
+ - {{ .Values.clickhouse.service.ports.native | quote }}
+ - --user
+ - {{ .Values.clickhouse.auth.adminUser }}
+ - --password
+ - {{ .Values.clickhouse.auth.adminPassword }}
+ - --query
+ - "SELECT 1"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 15
+ failureThreshold: 5
+ readinessProbe:
+ exec:
+ command:
+ - clickhouse-client
+ - --host
+ - localhost
+ - --port
+ - {{ .Values.clickhouse.service.ports.native | quote }}
+ - --user
+ - {{ .Values.clickhouse.auth.adminUser }}
+ - --password
+ - {{ .Values.clickhouse.auth.adminPassword }}
+ - --query
+ - "SELECT 1"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 15
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.clickhouse.resources | nindent 12 }}
+ volumeMounts:
+ - name: clickhouse-data
+ mountPath: /bitnami/clickhouse
+ - name: clickhouse-config
+ mountPath: /bitnami/clickhouse/etc/config.d/override.xml
+ subPath: override.xml
+ readOnly: true
+ volumes:
+ - name: clickhouse-config
+ configMap:
+ name: {{ include "trigger-v4.fullname" . }}-clickhouse-config
+ {{- if not .Values.clickhouse.persistence.enabled }}
+ - name: clickhouse-data
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.clickhouse.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: clickhouse-data
+ labels:
+ {{- $component := "clickhouse" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 10 }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.clickhouse.persistence.size }}
+ {{- $storageClass := .Values.clickhouse.persistence.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-clickhouse-headless
+ labels:
+ {{- $component := "clickhouse" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: native
+ port: {{ .Values.clickhouse.service.ports.native }}
+ targetPort: native
+ protocol: TCP
+ - name: http
+ port: {{ .Values.clickhouse.service.ports.http }}
+ targetPort: http
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-clickhouse
+ labels:
+ {{- $component := "clickhouse" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.clickhouse.service.type }}
+ ports:
+ - name: native
+ port: {{ .Values.clickhouse.service.ports.native }}
+ targetPort: native
+ protocol: TCP
+ - name: http
+ port: {{ .Values.clickhouse.service.ports.http }}
+ targetPort: http
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/configmap.yaml b/hosting/k8s/helm/templates/configmap.yaml
new file mode 100644
index 0000000000..b3bd8230c8
--- /dev/null
+++ b/hosting/k8s/helm/templates/configmap.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.clickhouse.enabled (not .Values.clickhouse.external) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-clickhouse-config
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+data:
+ override.xml: |
+{{ .Values.clickhouse.config.override | indent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/electric.yaml b/hosting/k8s/helm/templates/electric.yaml
new file mode 100644
index 0000000000..35320916b9
--- /dev/null
+++ b/hosting/k8s/helm/templates/electric.yaml
@@ -0,0 +1,84 @@
+{{- if .Values.electric.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-electric
+ labels:
+ {{- $component := "electric" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.electric.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.electric.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: electric
+ {{- with .Values.electric.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.electric.image.registry }}/{{ .Values.electric.image.repository }}:{{ .Values.electric.image.tag }}"
+ imagePullPolicy: {{ .Values.electric.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.electric.service.targetPort }}
+ protocol: TCP
+ env:
+ - name: DATABASE_URL
+ value: {{ include "trigger-v4.postgres.connectionString" . | quote }}
+ - name: ELECTRIC_INSECURE
+ value: {{ .Values.electric.config.insecure | quote }}
+ - name: ELECTRIC_USAGE_REPORTING
+ value: {{ .Values.electric.config.usageReporting | quote }}
+ {{- with .Values.electric.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /v1/health
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /v1/health
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.electric.resources | nindent 12 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-electric
+ labels:
+ {{- $component := "electric" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.electric.service.type }}
+ ports:
+ - port: {{ .Values.electric.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/extra-manifests.yaml b/hosting/k8s/helm/templates/extra-manifests.yaml
new file mode 100644
index 0000000000..c25dc7baf1
--- /dev/null
+++ b/hosting/k8s/helm/templates/extra-manifests.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraManifests }}
+---
+{{ toYaml . }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/ingress.yaml b/hosting/k8s/helm/templates/ingress.yaml
new file mode 100644
index 0000000000..ba9640b9fe
--- /dev/null
+++ b/hosting/k8s/helm/templates/ingress.yaml
@@ -0,0 +1,51 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "trigger-v4.fullname" . -}}
+{{- $svcPort := .Values.webapp.service.port -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ {{- include "trigger-v4.ingress.annotations" . | nindent 4 }}
+spec:
+ {{- if .Values.ingress.className }}
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- if .paths }}
+ {{- range .paths }}
+ - path: {{ .path }}
+ pathType: {{ .pathType | default "Prefix" }}
+ backend:
+ service:
+ name: {{ $fullName }}-webapp
+ port:
+ number: {{ $svcPort }}
+ {{- end }}
+ {{- else }}
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{ $fullName }}-webapp
+ port:
+ number: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/minio.yaml b/hosting/k8s/helm/templates/minio.yaml
new file mode 100644
index 0000000000..3fcfb319b8
--- /dev/null
+++ b/hosting/k8s/helm/templates/minio.yaml
@@ -0,0 +1,143 @@
+{{- if .Values.minio.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-minio
+ labels:
+ {{- $component := "minio" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ serviceName: {{ include "trigger-v4.fullname" . }}-minio-headless
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.minio.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.minio.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: minio
+ {{- with .Values.minio.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.minio.image.registry }}/{{ .Values.minio.image.repository }}:{{ .Values.minio.image.tag }}"
+ imagePullPolicy: {{ .Values.minio.image.pullPolicy }}
+ args:
+ - server
+ - --console-address
+ - ":9001"
+ - /data
+ ports:
+ - name: api
+ containerPort: {{ .Values.minio.service.ports.api }}
+ protocol: TCP
+ - name: console
+ containerPort: {{ .Values.minio.service.ports.console }}
+ protocol: TCP
+ env:
+ - name: MINIO_ROOT_USER
+ value: {{ .Values.minio.auth.rootUser | quote }}
+ - name: MINIO_ROOT_PASSWORD
+ value: {{ .Values.minio.auth.rootPassword | quote }}
+ {{- with .Values.minio.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /minio/health/live
+ port: api
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 10
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /minio/health/live
+ port: api
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ timeoutSeconds: 10
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.minio.resources | nindent 12 }}
+ volumeMounts:
+ - name: minio-data
+ mountPath: /data
+ {{- if not .Values.minio.persistence.enabled }}
+ volumes:
+ - name: minio-data
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.minio.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: minio-data
+ labels:
+ {{- $component := "minio" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 10 }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.minio.persistence.size }}
+ {{- $storageClass := .Values.minio.persistence.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-minio-headless
+ labels:
+ {{- $component := "minio" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: api
+ port: {{ .Values.minio.service.ports.api }}
+ targetPort: api
+ protocol: TCP
+ - name: console
+ port: {{ .Values.minio.service.ports.console }}
+ targetPort: console
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-minio
+ labels:
+ {{- $component := "minio" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.minio.service.type }}
+ ports:
+ - name: api
+ port: {{ .Values.minio.service.ports.api }}
+ targetPort: api
+ protocol: TCP
+ - name: console
+ port: {{ .Values.minio.service.ports.console }}
+ targetPort: console
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/postgresql.yaml b/hosting/k8s/helm/templates/postgresql.yaml
new file mode 100644
index 0000000000..5252a0466e
--- /dev/null
+++ b/hosting/k8s/helm/templates/postgresql.yaml
@@ -0,0 +1,138 @@
+{{- if and .Values.postgres.enabled (not .Values.postgres.external) }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-postgres
+ labels:
+ {{- $component := "postgres" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ serviceName: {{ include "trigger-v4.fullname" . }}-postgres-headless
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.postgres.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.postgres.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: postgres
+ {{- with .Values.postgres.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.postgres.image.registry }}/{{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}"
+ imagePullPolicy: {{ .Values.postgres.image.pullPolicy }}
+ ports:
+ - name: postgres
+ containerPort: {{ .Values.postgres.primary.service.ports.postgres }}
+ protocol: TCP
+ env:
+ - name: POSTGRES_USER
+ value: {{ .Values.postgres.auth.username | quote }}
+ - name: POSTGRES_PASSWORD
+ value: {{ .Values.postgres.auth.password | quote }}
+ - name: POSTGRES_DB
+ value: {{ .Values.postgres.auth.database | quote }}
+ {{- with .Values.postgres.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ args:
+ - "-c"
+ - "wal_level=logical"
+ {{- with .Values.postgres.extraArgs }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ exec:
+ command:
+ - pg_isready
+ - -U
+ - {{ .Values.postgres.auth.username }}
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ readinessProbe:
+ exec:
+ command:
+ - pg_isready
+ - -U
+ - {{ .Values.postgres.auth.username }}
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.postgres.primary.resources | nindent 12 }}
+ volumeMounts:
+ - name: postgres-data
+ mountPath: /var/lib/postgresql/data
+ {{- if .Values.postgres.primary.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: postgres-data
+ labels:
+ {{- $component := "postgres" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 10 }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.postgres.primary.persistence.size }}
+ {{- $storageClass := .Values.postgres.primary.persistence.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+ {{- else }}
+ volumes:
+ - name: postgres-data
+ emptyDir: {}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-postgres-headless
+ labels:
+ {{- $component := "postgres" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: postgres
+ port: {{ .Values.postgres.primary.service.ports.postgres }}
+ targetPort: postgres
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-postgres
+ labels:
+ {{- $component := "postgres" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - name: postgres
+ port: {{ .Values.postgres.primary.service.ports.postgres }}
+ targetPort: postgres
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/redis.yaml b/hosting/k8s/helm/templates/redis.yaml
new file mode 100644
index 0000000000..f037bbc869
--- /dev/null
+++ b/hosting/k8s/helm/templates/redis.yaml
@@ -0,0 +1,125 @@
+{{- if and .Values.redis.enabled (not .Values.redis.external) }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-redis-master
+ labels:
+ {{- $component := "redis" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ serviceName: {{ include "trigger-v4.fullname" . }}-redis-headless
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.redis.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.redis.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: redis
+ {{- with .Values.redis.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.redis.image.registry }}/{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}"
+ imagePullPolicy: {{ .Values.redis.image.pullPolicy }}
+ ports:
+ - name: redis
+ containerPort: {{ .Values.redis.master.service.ports.redis }}
+ protocol: TCP
+ {{- with .Values.redis.extraEnv }}
+ env:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ livenessProbe:
+ exec:
+ command:
+ - redis-cli
+ - ping
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ readinessProbe:
+ exec:
+ command:
+ - redis-cli
+ - ping
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.redis.master.resources | nindent 12 }}
+ volumeMounts:
+ - name: redis-data
+ mountPath: /data
+ {{- if not .Values.redis.master.persistence.enabled }}
+ volumes:
+ - name: redis-data
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.redis.master.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels:
+ {{- $component := "redis" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 10 }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.redis.master.persistence.size }}
+ {{- $storageClass := .Values.redis.master.persistence.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-redis-headless
+ labels:
+ {{- $component := "redis" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: redis
+ port: {{ .Values.redis.master.service.ports.redis }}
+ targetPort: redis
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-redis-master
+ labels:
+ {{- $component := "redis" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - name: redis
+ port: {{ .Values.redis.master.service.ports.redis }}
+ targetPort: redis
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/registry.yaml b/hosting/k8s/helm/templates/registry.yaml
new file mode 100644
index 0000000000..528b361a6b
--- /dev/null
+++ b/hosting/k8s/helm/templates/registry.yaml
@@ -0,0 +1,143 @@
+{{- if and .Values.registry.enabled (not .Values.registry.external) }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry
+ labels:
+ {{- $component := "registry" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ serviceName: {{ include "trigger-v4.fullname" . }}-registry-headless
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.registry.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.registry.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: registry
+ {{- with .Values.registry.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.registry.image.registry }}/{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}"
+ imagePullPolicy: {{ .Values.registry.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.registry.service.targetPort }}
+ protocol: TCP
+ {{- if or .Values.registry.auth.enabled .Values.registry.extraEnv }}
+ env:
+ {{- if .Values.registry.auth.enabled }}
+ - name: REGISTRY_AUTH
+ value: "htpasswd"
+ - name: REGISTRY_AUTH_HTPASSWD_REALM
+ value: "Registry Realm"
+ - name: REGISTRY_AUTH_HTPASSWD_PATH
+ value: "/auth/htpasswd"
+ {{- end }}
+ {{- with .Values.registry.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.registry.resources | nindent 12 }}
+ volumeMounts:
+ - name: registry-data
+ mountPath: /var/lib/registry
+ {{- if .Values.registry.auth.enabled }}
+ - name: registry-auth
+ mountPath: /auth
+ readOnly: true
+ {{- end }}
+ volumes:
+ {{- if .Values.registry.auth.enabled }}
+ - name: registry-auth
+ secret:
+ secretName: {{ include "trigger-v4.fullname" . }}-registry-auth
+ {{- end }}
+ {{- if not .Values.registry.persistence.enabled }}
+ - name: registry-data
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.registry.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: registry-data
+ labels:
+ {{- $component := "registry" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 10 }}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.registry.persistence.size }}
+ {{- $storageClass := .Values.registry.persistence.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry-headless
+ labels:
+ {{- $component := "registry" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: http
+ port: {{ .Values.registry.service.port }}
+ targetPort: http
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry
+ labels:
+ {{- $component := "registry" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.registry.service.type }}
+ ports:
+ - name: http
+ port: {{ .Values.registry.service.port }}
+ targetPort: http
+ protocol: TCP
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/secrets.yaml b/hosting/k8s/helm/templates/secrets.yaml
new file mode 100644
index 0000000000..e93702cf91
--- /dev/null
+++ b/hosting/k8s/helm/templates/secrets.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.secrets.enabled (not .Values.secrets.existingSecret) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-secrets
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+type: Opaque
+data:
+ SESSION_SECRET: {{ .Values.secrets.sessionSecret | b64enc | quote }}
+ MAGIC_LINK_SECRET: {{ .Values.secrets.magicLinkSecret | b64enc | quote }}
+ ENCRYPTION_KEY: {{ .Values.secrets.encryptionKey | b64enc | quote }}
+ MANAGED_WORKER_SECRET: {{ .Values.secrets.managedWorkerSecret | b64enc | quote }}
+ OBJECT_STORE_ACCESS_KEY_ID: {{ .Values.secrets.objectStore.accessKeyId | b64enc | quote }}
+ OBJECT_STORE_SECRET_ACCESS_KEY: {{ .Values.secrets.objectStore.secretAccessKey | b64enc | quote }}
+{{- end }}
+---
+{{- if and .Values.registry.enabled .Values.registry.auth.enabled }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry-auth
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+type: Opaque
+data:
+ htpasswd: {{ htpasswd .Values.registry.auth.username .Values.registry.auth.password | trim | b64enc | quote }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry-secret
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "trigger-v4.imagePullSecret" . | b64enc }}
+{{- else if and .Values.registry.external .Values.registry.externalConnection.auth.enabled }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-registry-secret
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "trigger-v4.imagePullSecret" . | b64enc }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/servicemonitor.yaml b/hosting/k8s/helm/templates/servicemonitor.yaml
new file mode 100644
index 0000000000..15ca65f1cf
--- /dev/null
+++ b/hosting/k8s/helm/templates/servicemonitor.yaml
@@ -0,0 +1,55 @@
+{{- if .Values.webapp.serviceMonitor.enabled }}
+---
+# Webapp ServiceMonitor
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-webapp
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ app.kubernetes.io/component: webapp
+ {{- with .Values.webapp.serviceMonitor.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: webapp
+ endpoints:
+ - port: http
+ path: {{ .Values.webapp.serviceMonitor.path }}
+ interval: {{ .Values.webapp.serviceMonitor.interval }}
+ {{- with .Values.webapp.serviceMonitor.basicAuth }}
+ basicAuth:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+{{- end }}
+
+{{- if .Values.supervisor.serviceMonitor.enabled }}
+---
+# Supervisor ServiceMonitor
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-supervisor
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ app.kubernetes.io/component: supervisor
+ {{- with .Values.supervisor.serviceMonitor.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: supervisor
+ endpoints:
+ - port: metrics
+ path: {{ .Values.supervisor.serviceMonitor.path }}
+ interval: {{ .Values.supervisor.serviceMonitor.interval }}
+ {{- with .Values.supervisor.serviceMonitor.basicAuth }}
+ basicAuth:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/supervisor.yaml b/hosting/k8s/helm/templates/supervisor.yaml
new file mode 100644
index 0000000000..0aba0e206f
--- /dev/null
+++ b/hosting/k8s/helm/templates/supervisor.yaml
@@ -0,0 +1,263 @@
+{{- if .Values.supervisor.enabled }}
+{{- if .Values.supervisor.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "trigger-v4.supervisorServiceAccountName" . }}
+ labels:
+ {{- $component := "supervisor" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+ {{- with .Values.supervisor.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+---
+{{- if .Values.supervisor.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "trigger-v4.supervisorClusterRoleName" . }}
+ labels:
+ {{- $component := "supervisor" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["create", "delete", "deletecollection", "get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "trigger-v4.supervisorClusterRoleName" . }}-binding
+ labels:
+ {{- $component := "supervisor" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "trigger-v4.supervisorServiceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ include "trigger-v4.supervisorClusterRoleName" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-supervisor
+ labels:
+ {{- $component := "supervisor" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: 1
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 0
+ maxSurge: 1
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ {{- with .Values.supervisor.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ serviceAccountName: {{ include "trigger-v4.supervisorServiceAccountName" . }}
+ securityContext:
+ fsGroup: 1000
+ {{- with .Values.supervisor.podSecurityContext }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-shared
+ image: busybox:1.35
+ command: ['sh', '-c', 'mkdir -p /home/node/shared']
+ securityContext:
+ runAsUser: 1000
+ volumeMounts:
+ - name: shared
+ mountPath: /home/node/shared
+ containers:
+ - name: supervisor
+ image: {{ include "trigger-v4.supervisor.image" . }}
+ imagePullPolicy: {{ .Values.supervisor.image.pullPolicy }}
+ ports:
+ - name: workload
+ containerPort: {{ .Values.supervisor.service.ports.workload }}
+ protocol: TCP
+ - name: metrics
+ containerPort: {{ .Values.supervisor.service.ports.metrics }}
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: workload
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: workload
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ resources:
+ {{- toYaml .Values.supervisor.resources | nindent 12 }}
+ env:
+ # Core configuration
+ - name: TRIGGER_API_URL
+ value: "http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}"
+ - name: TRIGGER_WORKER_TOKEN
+ {{- if .Values.supervisor.bootstrap.enabled }}
+ value: "file://{{ .Values.supervisor.bootstrap.workerTokenPath }}"
+ {{- else if .Values.supervisor.bootstrap.workerToken.secret.name }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.supervisor.bootstrap.workerToken.secret.name }}
+ key: {{ .Values.supervisor.bootstrap.workerToken.secret.key }}
+ {{- else }}
+ value: {{ .Values.supervisor.bootstrap.workerToken.value | quote }}
+ {{- end }}
+ {{- if .Values.secrets.enabled }}
+ - name: MANAGED_WORKER_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: MANAGED_WORKER_SECRET
+ {{- end }}
+ # Worker instance configuration
+ - name: TRIGGER_WORKER_INSTANCE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Kubernetes configuration
+ - name: KUBERNETES_NAMESPACE
+ value: {{ default .Release.Namespace .Values.supervisor.config.kubernetes.namespace | quote }}
+ - name: KUBERNETES_FORCE_ENABLED
+ value: {{ .Values.supervisor.config.kubernetes.forceEnabled | quote }}
+ - name: KUBERNETES_WORKER_NODETYPE_LABEL
+ value: {{ .Values.supervisor.config.kubernetes.workerNodetypeLabel | quote }}
+ {{- if or (and .Values.registry.enabled .Values.registry.auth.enabled) (and .Values.registry.external .Values.registry.externalConnection.auth.enabled) }}
+ - name: KUBERNETES_IMAGE_PULL_SECRETS
+ value: "{{ include "trigger-v4.fullname" . }}-registry-secret"
+ {{- end }}
+ - name: KUBERNETES_EPHEMERAL_STORAGE_SIZE_LIMIT
+ value: {{ default "10Gi" .Values.supervisor.config.kubernetes.ephemeralStorageSizeLimit | quote }}
+ - name: KUBERNETES_EPHEMERAL_STORAGE_SIZE_REQUEST
+ value: {{ default "2Gi" .Values.supervisor.config.kubernetes.ephemeralStorageSizeRequest | quote }}
+ # Pod cleaner configuration
+ - name: POD_CLEANER_ENABLED
+ value: {{ .Values.supervisor.config.podCleaner.enabled | quote }}
+ - name: POD_CLEANER_BATCH_SIZE
+ value: {{ .Values.supervisor.config.podCleaner.batchSize | quote }}
+ - name: POD_CLEANER_INTERVAL_MS
+ value: {{ .Values.supervisor.config.podCleaner.intervalMs | quote }}
+ # Failed pod handler
+ - name: FAILED_POD_HANDLER_ENABLED
+ value: {{ .Values.supervisor.config.failedPodHandler.enabled | quote }}
+ - name: FAILED_POD_HANDLER_RECONNECT_INTERVAL_MS
+ value: {{ .Values.supervisor.config.failedPodHandler.reconnectIntervalMs | quote }}
+ # Workload API configuration
+ - name: TRIGGER_WORKLOAD_API_PROTOCOL
+ value: {{ .Values.supervisor.config.workloadApi.protocol | quote }}
+ - name: TRIGGER_WORKLOAD_API_DOMAIN
+ value: "{{ include "trigger-v4.fullname" . }}-supervisor.{{ .Release.Namespace }}.svc.cluster.local"
+ - name: TRIGGER_WORKLOAD_API_PORT_EXTERNAL
+ value: {{ .Values.supervisor.config.workloadApi.portExternal | quote }}
+ - name: TRIGGER_WORKLOAD_API_PORT_INTERNAL
+ value: {{ .Values.supervisor.config.workloadApi.portInternal | quote }}
+ - name: TRIGGER_WORKLOAD_API_HOST_INTERNAL
+ value: {{ .Values.supervisor.config.workloadApi.hostInternal | quote }}
+ - name: TRIGGER_WORKLOAD_API_ENABLED
+ value: {{ .Values.supervisor.config.workloadApi.enabled | quote }}
+ # Dequeue configuration
+ - name: TRIGGER_DEQUEUE_ENABLED
+ value: {{ .Values.supervisor.config.dequeue.enabled | quote }}
+ - name: TRIGGER_DEQUEUE_INTERVAL_MS
+ value: {{ .Values.supervisor.config.dequeue.intervalMs | quote }}
+ - name: TRIGGER_DEQUEUE_MAX_RUN_COUNT
+ value: {{ .Values.supervisor.config.dequeue.maxRunCount | quote }}
+ - name: TRIGGER_DEQUEUE_IDLE_INTERVAL_MS
+ value: {{ .Values.supervisor.config.dequeue.idleIntervalMs | quote }}
+ # Heartbeat configuration
+ - name: RUNNER_HEARTBEAT_INTERVAL_SECONDS
+ value: {{ .Values.supervisor.config.runner.heartbeatIntervalSeconds | quote }}
+ - name: RUNNER_SNAPSHOT_POLL_INTERVAL_SECONDS
+ value: {{ .Values.supervisor.config.runner.snapshotPollIntervalSeconds | quote }}
+ # Metrics configuration
+ - name: METRICS_ENABLED
+ value: {{ .Values.supervisor.config.metrics.enabled | quote }}
+ - name: METRICS_COLLECT_DEFAULTS
+ value: {{ .Values.supervisor.config.metrics.collectDefaults | quote }}
+ - name: METRICS_HOST
+ value: {{ .Values.supervisor.config.metrics.host | quote }}
+ - name: METRICS_PORT
+ value: {{ .Values.supervisor.config.metrics.port | quote }}
+ # Debug
+ - name: DEBUG
+ value: {{ .Values.supervisor.config.debug | quote }}
+ # OTEL
+ - name: OTEL_EXPORTER_OTLP_ENDPOINT
+ value: "http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}/otel"
+ {{- with .Values.supervisor.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: shared
+ mountPath: /home/node/shared
+ {{- with .Values.supervisor.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumes:
+ - name: shared
+ {{- if .Values.persistence.shared.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "trigger-v4.fullname" . }}-shared
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- with .Values.supervisor.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.supervisor.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.supervisor.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-supervisor
+ labels:
+ {{- $component := "supervisor" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.supervisor.service.type }}
+ ports:
+ - port: {{ .Values.supervisor.service.ports.workload }}
+ targetPort: workload
+ protocol: TCP
+ name: workload
+ - port: {{ .Values.supervisor.service.ports.metrics }}
+ targetPort: metrics
+ protocol: TCP
+ name: metrics
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-clickhouse.yaml b/hosting/k8s/helm/templates/tests/test-clickhouse.yaml
new file mode 100644
index 0000000000..814255b719
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-clickhouse.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.clickhouse.enabled (not .Values.clickhouse.external) }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-clickhouse"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-clickhouse
+ image: curlimages/curl:8.14.1
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing ClickHouse HTTP interface..."
+ curl -f --user "{{ .Values.clickhouse.auth.adminUser }}:{{ .Values.clickhouse.auth.adminPassword }}" "http://{{ include "trigger-v4.fullname" . }}-clickhouse:{{ .Values.clickhouse.service.ports.http }}/ping"
+ echo "ClickHouse test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-electric.yaml b/hosting/k8s/helm/templates/tests/test-electric.yaml
new file mode 100644
index 0000000000..0e6c657e96
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-electric.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.electric.enabled }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-electric"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-electric
+ image: curlimages/curl:8.14.1
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing Electric health endpoint..."
+ curl -f http://{{ include "trigger-v4.fullname" . }}-electric:{{ .Values.electric.service.port }}/api/status
+ echo "Electric test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-minio.yaml b/hosting/k8s/helm/templates/tests/test-minio.yaml
new file mode 100644
index 0000000000..605d35690e
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-minio.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.minio.enabled }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-minio"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-minio
+ image: curlimages/curl:8.14.1
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing MinIO health endpoint..."
+ curl -f http://{{ include "trigger-v4.fullname" . }}-minio:{{ .Values.minio.service.ports.api }}/minio/health/live
+ echo "MinIO test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-postgresql.yaml b/hosting/k8s/helm/templates/tests/test-postgresql.yaml
new file mode 100644
index 0000000000..2e6028bea6
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-postgresql.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.postgres.enabled (not .Values.postgres.external) }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-postgres"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-postgres
+ image: postgres:{{ .Values.postgres.image.tag }}
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing PostgreSQL connection..."
+ pg_isready -h {{ include "trigger-v4.fullname" . }}-postgres -p {{ .Values.postgres.primary.service.ports.postgres }} -U {{ .Values.postgres.auth.username }}
+ echo "PostgreSQL test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-redis.yaml b/hosting/k8s/helm/templates/tests/test-redis.yaml
new file mode 100644
index 0000000000..4ba2c46c34
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-redis.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.redis.enabled (not .Values.redis.external) }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-redis"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-redis
+ image: redis:{{ .Values.redis.image.tag }}
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing Redis connection..."
+ redis-cli -h {{ include "trigger-v4.fullname" . }}-redis-master -p {{ .Values.redis.master.service.ports.redis }} ping
+ echo "Redis test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-supervisor.yaml b/hosting/k8s/helm/templates/tests/test-supervisor.yaml
new file mode 100644
index 0000000000..71ab36d904
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-supervisor.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.supervisor.enabled }}
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-supervisor"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-supervisor
+ image: curlimages/curl:8.14.1
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing Supervisor metrics endpoint..."
+ curl -f http://{{ include "trigger-v4.fullname" . }}-supervisor:{{ .Values.supervisor.service.ports.metrics }}/metrics
+ echo "Supervisor test completed successfully"
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/tests/test-webapp.yaml b/hosting/k8s/helm/templates/tests/test-webapp.yaml
new file mode 100644
index 0000000000..7c7f9cce9e
--- /dev/null
+++ b/hosting/k8s/helm/templates/tests/test-webapp.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "trigger-v4.fullname" . }}-test-webapp"
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ restartPolicy: Never
+ containers:
+ - name: test-webapp
+ image: curlimages/curl:8.14.1
+ command: ['sh', '-c']
+ args:
+ - |
+ echo "Testing webapp health endpoint..."
+ curl -f http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}/healthcheck
+ echo "Webapp test completed successfully"
\ No newline at end of file
diff --git a/hosting/k8s/helm/templates/webapp.yaml b/hosting/k8s/helm/templates/webapp.yaml
new file mode 100644
index 0000000000..dbcfadea46
--- /dev/null
+++ b/hosting/k8s/helm/templates/webapp.yaml
@@ -0,0 +1,284 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-webapp
+ labels:
+ {{- $component := "webapp" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ replicas: {{ .Values.webapp.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.webapp.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }}
+ spec:
+ {{- with .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ fsGroup: 1000
+ {{- with .Values.webapp.podSecurityContext }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-shared
+ image: busybox:1.35
+ command: ['sh', '-c', 'mkdir -p /home/node/shared']
+ securityContext:
+ runAsUser: 1000
+ volumeMounts:
+ - name: shared
+ mountPath: /home/node/shared
+ containers:
+ - name: webapp
+ securityContext:
+ {{- toYaml .Values.webapp.securityContext | nindent 12 }}
+ image: {{ include "trigger-v4.image" . }}
+ imagePullPolicy: {{ .Values.webapp.image.pullPolicy }}
+ command:
+ - ./scripts/entrypoint.sh
+ ports:
+ - name: http
+ containerPort: {{ .Values.webapp.service.targetPort }}
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /healthcheck
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /healthcheck
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 5
+ resources:
+ {{- toYaml .Values.webapp.resources | nindent 12 }}
+ env:
+ - name: APP_ORIGIN
+ value: {{ .Values.config.appOrigin | quote }}
+ - name: LOGIN_ORIGIN
+ value: {{ .Values.config.loginOrigin | quote }}
+ - name: API_ORIGIN
+ value: {{ .Values.config.apiOrigin | quote }}
+ - name: ELECTRIC_ORIGIN
+ value: {{ include "trigger-v4.electric.url" . | quote }}
+ - name: DATABASE_URL
+ value: {{ include "trigger-v4.postgres.connectionString" . | quote }}
+ - name: DIRECT_URL
+ value: {{ include "trigger-v4.postgres.connectionString" . | quote }}
+ - name: DATABASE_HOST
+ value: {{ include "trigger-v4.postgres.host" . | quote }}
+ - name: REDIS_HOST
+ value: {{ include "trigger-v4.redis.host" . | quote }}
+ - name: REDIS_PORT
+ value: {{ include "trigger-v4.redis.port" . | quote }}
+ - name: REDIS_TLS_DISABLED
+ value: "true"
+ - name: APP_LOG_LEVEL
+ value: {{ .Values.webapp.logLevel | quote }}
+ - name: DEV_OTEL_EXPORTER_OTLP_ENDPOINT
+ value: "{{ .Values.config.appOrigin }}/otel"
+ - name: DEPLOY_REGISTRY_HOST
+ value: {{ include "trigger-v4.registry.host" . | quote }}
+ - name: DEPLOY_REGISTRY_NAMESPACE
+ value: {{ .Values.registry.repositoryNamespace | quote }}
+ - name: OBJECT_STORE_BASE_URL
+ value: {{ include "trigger-v4.minio.url" . | quote }}
+ - name: GRACEFUL_SHUTDOWN_TIMEOUT
+ value: {{ .Values.webapp.gracefulShutdownTimeout | quote }}
+ {{- if .Values.webapp.bootstrap.enabled }}
+ - name: TRIGGER_BOOTSTRAP_ENABLED
+ value: "1"
+ - name: TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME
+ value: {{ .Values.webapp.bootstrap.workerGroupName | quote }}
+ - name: TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH
+ value: {{ .Values.webapp.bootstrap.workerTokenPath | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.taskPayloadOffloadThreshold }}
+ - name: TASK_PAYLOAD_OFFLOAD_THRESHOLD
+ value: {{ .Values.webapp.limits.taskPayloadOffloadThreshold | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.taskPayloadMaximumSize }}
+ - name: TASK_PAYLOAD_MAXIMUM_SIZE
+ value: {{ .Values.webapp.limits.taskPayloadMaximumSize | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.batchTaskPayloadMaximumSize }}
+ - name: BATCH_TASK_PAYLOAD_MAXIMUM_SIZE
+ value: {{ .Values.webapp.limits.batchTaskPayloadMaximumSize | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.taskRunMetadataMaximumSize }}
+ - name: TASK_RUN_METADATA_MAXIMUM_SIZE
+ value: {{ .Values.webapp.limits.taskRunMetadataMaximumSize | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.defaultEnvExecutionConcurrencyLimit }}
+ - name: DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT
+ value: {{ .Values.webapp.limits.defaultEnvExecutionConcurrencyLimit | quote }}
+ {{- end }}
+ {{- if .Values.webapp.limits.defaultOrgExecutionConcurrencyLimit }}
+ - name: DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT
+ value: {{ .Values.webapp.limits.defaultOrgExecutionConcurrencyLimit | quote }}
+ {{- end }}
+ {{- if .Values.secrets.enabled }}
+ - name: SESSION_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: SESSION_SECRET
+ - name: MAGIC_LINK_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: MAGIC_LINK_SECRET
+ - name: ENCRYPTION_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: ENCRYPTION_KEY
+ - name: MANAGED_WORKER_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: MANAGED_WORKER_SECRET
+ - name: OBJECT_STORE_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: OBJECT_STORE_ACCESS_KEY_ID
+ - name: OBJECT_STORE_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "trigger-v4.secretsName" . }}
+ key: OBJECT_STORE_SECRET_ACCESS_KEY
+ {{- end }}
+ {{- if .Values.webapp.observability }}
+ {{- if .Values.webapp.observability.tracing.exporterUrl }}
+ - name: INTERNAL_OTEL_TRACE_EXPORTER_URL
+ value: {{ .Values.webapp.observability.tracing.exporterUrl | quote }}
+ {{- end }}
+ {{- if .Values.webapp.observability.tracing.exporterAuthHeaders }}
+ - name: INTERNAL_OTEL_TRACE_EXPORTER_AUTH_HEADERS
+ value: {{ .Values.webapp.observability.tracing.exporterAuthHeaders | quote }}
+ {{- end }}
+ - name: INTERNAL_OTEL_TRACE_LOGGING_ENABLED
+ value: {{ .Values.webapp.observability.tracing.loggingEnabled | quote }}
+ - name: INTERNAL_OTEL_TRACE_SAMPLING_RATE
+ value: {{ .Values.webapp.observability.tracing.samplingRate | quote }}
+ - name: INTERNAL_OTEL_TRACE_INSTRUMENT_PRISMA_ENABLED
+ value: {{ .Values.webapp.observability.tracing.instrumentPrismaEnabled | quote }}
+ - name: INTERNAL_OTEL_TRACE_DISABLED
+ value: {{ .Values.webapp.observability.tracing.disabled | quote }}
+ {{- if .Values.webapp.observability.logging.exporterUrl }}
+ - name: INTERNAL_OTEL_LOG_EXPORTER_URL
+ value: {{ .Values.webapp.observability.logging.exporterUrl | quote }}
+ {{- end }}
+ {{- if .Values.webapp.observability.metrics.exporterUrl }}
+ - name: INTERNAL_OTEL_METRIC_EXPORTER_URL
+ value: {{ .Values.webapp.observability.metrics.exporterUrl | quote }}
+ {{- end }}
+ {{- if .Values.webapp.observability.metrics.exporterAuthHeaders }}
+ - name: INTERNAL_OTEL_METRIC_EXPORTER_AUTH_HEADERS
+ value: {{ .Values.webapp.observability.metrics.exporterAuthHeaders | quote }}
+ {{- end }}
+ - name: INTERNAL_OTEL_METRIC_EXPORTER_ENABLED
+ value: {{ .Values.webapp.observability.metrics.exporterEnabled | quote }}
+ - name: INTERNAL_OTEL_METRIC_EXPORTER_INTERVAL_MS
+ value: {{ .Values.webapp.observability.metrics.exporterIntervalMs | quote }}
+ {{- end }}
+ {{- if .Values.webapp.clickhouse.enabled }}
+ - name: CLICKHOUSE_URL
+ value: {{ if .Values.clickhouse.external }}{{ .Values.clickhouse.externalConnection.httpUrl | quote }}{{ else }}"http://{{ .Values.clickhouse.auth.adminUser }}:{{ .Values.clickhouse.auth.adminPassword }}@{{ include "trigger-v4.fullname" . }}-clickhouse:{{ .Values.clickhouse.service.ports.http }}"{{ end }}
+ - name: CLICKHOUSE_LOG_LEVEL
+ value: {{ .Values.webapp.clickhouse.logLevel | quote }}
+ {{- end }}
+ {{- if .Values.webapp.runReplication.enabled }}
+ - name: RUN_REPLICATION_ENABLED
+ value: "1"
+ - name: RUN_REPLICATION_CLICKHOUSE_URL
+ value: {{ if .Values.clickhouse.external }}{{ .Values.clickhouse.externalConnection.httpUrl | quote }}{{ else }}"http://{{ .Values.clickhouse.auth.adminUser }}:{{ .Values.clickhouse.auth.adminPassword }}@{{ include "trigger-v4.fullname" . }}-clickhouse:{{ .Values.clickhouse.service.ports.http }}"{{ end }}
+ - name: RUN_REPLICATION_LOG_LEVEL
+ value: {{ .Values.webapp.runReplication.logLevel | quote }}
+ {{- end }}
+ {{- if not .Values.telemetry.enabled }}
+ - name: TRIGGER_TELEMETRY_DISABLED
+ value: "1"
+ {{- end }}
+ {{- with .Values.webapp.extraEnv }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: shared
+ mountPath: /home/node/shared
+ volumes:
+ - name: shared
+ {{- if .Values.persistence.shared.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "trigger-v4.fullname" . }}-shared
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- with .Values.webapp.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webapp.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.webapp.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-webapp
+ labels:
+ {{- $component := "webapp" }}
+ {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+spec:
+ type: {{ .Values.webapp.service.type }}
+ ports:
+ - port: {{ .Values.webapp.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }}
+---
+{{- if .Values.persistence.shared.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "trigger-v4.fullname" . }}-shared
+ {{- if .Values.persistence.shared.retain }}
+ annotations:
+ "helm.sh/resource-policy": keep
+ {{- end }}
+ labels:
+ {{- include "trigger-v4.labels" . | nindent 4 }}
+spec:
+ accessModes:
+ - {{ .Values.persistence.shared.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.shared.size }}
+ {{- $storageClass := .Values.persistence.shared.storageClass | default .Values.global.storageClass }}
+ {{- if $storageClass }}
+ storageClassName: {{ $storageClass | quote }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/hosting/k8s/helm/values-production-example.yaml b/hosting/k8s/helm/values-production-example.yaml
new file mode 100644
index 0000000000..fb7cee13d1
--- /dev/null
+++ b/hosting/k8s/helm/values-production-example.yaml
@@ -0,0 +1,144 @@
+# Production values example for Trigger.dev v4 Helm chart
+# Copy this file and customize for your production deployment
+
+# REQUIRED: Generate your own secrets using: openssl rand -hex 16
+secrets:
+ sessionSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_001"
+ magicLinkSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_002"
+ encryptionKey: "YOUR_32_CHAR_HEX_SECRET_HERE_003"
+ managedWorkerSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_004"
+ # Object store credentials (customize for your setup)
+ objectStore:
+ accessKeyId: "your-access-key"
+ secretAccessKey: "your-secret-key"
+
+# Production configuration
+config:
+ appOrigin: "https://trigger.example.com"
+ loginOrigin: "https://trigger.example.com"
+ apiOrigin: "https://trigger.example.com"
+
+# Production ingress
+ingress:
+ enabled: true
+ className: "nginx"
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-prod"
+ nginx.ingress.kubernetes.io/ssl-redirect: "true"
+ hosts:
+ - host: trigger.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: trigger-tls
+ hosts:
+ - trigger.example.com
+
+# Production webapp configuration
+webapp:
+ bootstrap:
+ enabled: false # Usually disabled in production
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+
+# Production PostgreSQL (or use external)
+postgres:
+ primary:
+ persistence:
+ enabled: true
+ size: 100Gi
+ storageClass: "fast-ssd"
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+ requests:
+ cpu: 500m
+ memory: 1Gi
+
+# Production Redis (or use external)
+redis:
+ master:
+ persistence:
+ enabled: true
+ size: 20Gi
+ storageClass: "fast-ssd"
+ resources:
+ limits:
+ cpu: 500m
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+
+# Production ClickHouse
+clickhouse:
+ persistence:
+ enabled: true
+ size: 100Gi
+ storageClass: "fast-ssd"
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+ requests:
+ cpu: 500m
+ memory: 1Gi
+
+# Production MinIO (or use external S3)
+minio:
+ auth:
+ rootUser: "admin"
+ rootPassword: "your-strong-minio-password"
+ persistence:
+ enabled: true
+ size: 500Gi
+ storageClass: "standard"
+
+# Production Registry
+registry:
+ repositoryNamespace: "mycompany" # Docker repository namespace for deployed images, will be part of the image ref
+ auth:
+ username: "registry-user"
+ password: "your-strong-registry-password"
+ persistence:
+ enabled: true
+ size: 100Gi
+ storageClass: "standard"
+
+# Production Supervisor (Kubernetes worker orchestrator)
+supervisor:
+ resources:
+ limits:
+ cpu: 500m
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+# Example: Use external PostgreSQL instead
+# postgres:
+# enabled: false
+# external: true
+# externalConnection:
+# host: "your-postgres-host.rds.amazonaws.com"
+# port: 5432
+# database: "trigger"
+# username: "trigger_user"
+# password: "your-db-password"
+# schema: "public"
+# sslMode: "require" # Use 'require' or 'verify-full' for production
+
+# Example: Use external Redis instead
+# redis:
+# enabled: false
+# external: true
+# externalConnection:
+# host: "your-redis-cluster.cache.amazonaws.com"
+# port: 6379
+# password: "your-redis-password"
diff --git a/hosting/k8s/helm/values.yaml b/hosting/k8s/helm/values.yaml
new file mode 100644
index 0000000000..61fd2c5f45
--- /dev/null
+++ b/hosting/k8s/helm/values.yaml
@@ -0,0 +1,611 @@
+global:
+ imageRegistry: ""
+ imagePullSecrets: []
+ storageClass: ""
+
+nameOverride: ""
+fullnameOverride: ""
+
+# Shared application configuration (used by multiple services)
+config:
+ appOrigin: "http://localhost:3040"
+ loginOrigin: "http://localhost:3040"
+ apiOrigin: "http://localhost:3040"
+ electricOrigin: "http://electric:3000"
+
+# Secrets configuration
+# IMPORTANT: The default values below are for TESTING ONLY and should NOT be used in production
+# For production deployments:
+# 1. Generate new secrets using: openssl rand -hex 16
+# 2. Override these values in your values.yaml or use external secret management
+# 3. Each secret must be exactly 32 hex characters (16 bytes)
+secrets:
+ # Enable/disable creation of secrets
+ # Set to false to use external secret management (Vault, Infisical, External Secrets, etc.)
+ # When disabled, use extraEnv and podAnnotations for secret injection
+ enabled: true
+
+ # Name of existing secret to use instead of creating one
+ # If empty, a secret will be created with the values below
+ # The secret must contain the following keys:
+ # - SESSION_SECRET
+ # - MAGIC_LINK_SECRET
+ # - ENCRYPTION_KEY
+ # - MANAGED_WORKER_SECRET
+ # - OBJECT_STORE_ACCESS_KEY_ID
+ # - OBJECT_STORE_SECRET_ACCESS_KEY
+ existingSecret: ""
+
+ # Session secret for user authentication (32 hex chars)
+ sessionSecret: "2818143646516f6fffd707b36f334bbb"
+ # Magic link secret for passwordless login (32 hex chars)
+ magicLinkSecret: "44da78b7bbb0dfe709cf38931d25dcdd"
+ # Encryption key for sensitive data (32 hex chars)
+ encryptionKey: "f686147ab967943ebbe9ed3b496e465a"
+ # Worker secret for managed worker authentication (32 hex chars)
+ managedWorkerSecret: "447c29678f9eaf289e9c4b70d3dd8a7f"
+ # Object store credentials (change for production)
+ objectStore:
+ accessKeyId: "admin"
+ secretAccessKey: "very-safe-password"
+
+# Webapp configuration
+webapp:
+ image:
+ registry: ghcr.io
+ repository: triggerdotdev/trigger.dev
+ tag: "" # Defaults to Chart.appVersion when empty
+ pullPolicy: IfNotPresent
+
+ replicaCount: 1
+
+ service:
+ type: ClusterIP
+ port: 3030
+ targetPort: 3000
+
+ podAnnotations: {}
+
+ # podSecurityContext:
+ # fsGroup: 1000
+
+ # securityContext:
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+
+ logLevel: "info"
+ gracefulShutdownTimeout: 1000
+
+ # Bootstrap configuration
+ bootstrap:
+ enabled: true
+ workerGroupName: "bootstrap"
+ workerTokenPath: "/home/node/shared/worker_token"
+
+ # Limits
+ limits:
+ taskPayloadOffloadThreshold: 524288 # 512KB
+ taskPayloadMaximumSize: 3145728 # 3MB
+ batchTaskPayloadMaximumSize: 1000000 # 1MB
+ taskRunMetadataMaximumSize: 262144 # 256KB
+ defaultEnvExecutionConcurrencyLimit: 100
+ defaultOrgExecutionConcurrencyLimit: 300
+
+ # Resources
+ resources:
+ {}
+ # Example resource configuration:
+ # limits:
+ # cpu: 1000m
+ # memory: 2Gi
+ # requests:
+ # cpu: 500m
+ # memory: 1Gi
+
+ # Extra environment variables for webapp
+ extraEnv:
+ []
+ # - name: CUSTOM_VAR
+ # value: "custom-value"
+ # - name: SECRET_VAR
+ # valueFrom:
+ # secretKeyRef:
+ # name: my-secret
+ # key: secret-key
+
+ # ServiceMonitor for Prometheus monitoring
+ serviceMonitor:
+ enabled: false
+ interval: "30s"
+ path: "/metrics"
+ labels: {}
+ basicAuth: {}
+
+ # ClickHouse integration (experimental)
+ # Usage patterns:
+ # 1. Internal ClickHouse: Set clickhouse.enabled=true, clickhouse.external=false, webapp.clickhouse.enabled=true
+ # 2. External ClickHouse: Set clickhouse.enabled=true, clickhouse.external=true, configure externalConnection URLs, webapp.clickhouse.enabled=true
+ # 3. No ClickHouse: Leave webapp.clickhouse.enabled=false (default)
+ clickhouse:
+ enabled: true
+ logLevel: "info" # one of: log, error, warn, info, debug
+ runReplication:
+ enabled: true
+ logLevel: "info" # one of: log, error, warn, info, debug
+
+ # Observability configuration (OTel)
+ observability:
+ tracing:
+ exporterUrl: ""
+ exporterAuthHeaders: ""
+ loggingEnabled: "0"
+ samplingRate: "20"
+ instrumentPrismaEnabled: "0"
+ disabled: "0"
+ logging:
+ exporterUrl: ""
+ # Log auth headers are currently set to tracing.exporterAuthHeaders
+ metrics:
+ exporterUrl: ""
+ exporterAuthHeaders: ""
+ exporterEnabled: "0"
+ exporterIntervalMs: 30000
+
+# Supervisor configuration
+supervisor:
+ enabled: true
+ image:
+ registry: ghcr.io
+ repository: triggerdotdev/supervisor
+ tag: "" # Defaults to Chart.appVersion when empty
+ pullPolicy: IfNotPresent
+
+ podAnnotations: {}
+
+ # podSecurityContext:
+ # fsGroup: 1000
+
+ # securityContext:
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+ service:
+ type: ClusterIP
+ ports:
+ workload: 3000
+ metrics: 9088
+ resources: {}
+ config:
+ kubernetes:
+ forceEnabled: true
+ namespace: "" # Default: uses release namespace
+ workerNodetypeLabel: "" # When set, runs will only be scheduled on nodes with "nodetype=