From 04d1743d403061d75972474d3cd39c53a9767c3c Mon Sep 17 00:00:00 2001 From: Debashish Sahu Date: Mon, 4 May 2026 11:16:46 -0400 Subject: [PATCH 1/4] feat(deploy): add Helm chart, GoReleaser, and GHCR release workflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #137. This change adds three deliverables: 1. A maintained Helm chart at deploy/helm/comqtt supporting both single-node (Deployment) and clustered (StatefulSet + Raft + Gossip) modes. The chart includes: - values.schema.json with conditional rules (cluster mode requires odd replicaCount and persistence.enabled=true) - A runtime entrypoint shim that renders the broker config from a ConfigMap template, computing seed members from replicaCount + the headless Service FQDN, and enabling --raft-bootstrap only when both pod-0 AND the Raft data dir is empty (idempotent on restart) - PodDisruptionBudget pinned to Raft quorum (ceil((n+1)/2)) - Soft pod anti-affinity by default; hard via cluster.hardAntiAffinity - Per-replica PVCs via volumeClaimTemplates - liveness, readiness, and startup probes (all tunable) - Optional dashboard Ingress with documented MQTT-TCP caveats - Optional ServiceMonitor for kube-prometheus-stack - helm test pod that performs a mosquitto pub/sub round trip - chart README with full values reference, upgrade notes, and limitations (no operator, no automatic Raft member eviction) 2. .goreleaser.yaml building cmd/single + cmd/cluster across linux, darwin, windows × amd64, arm64. Builds multi-arch (amd64 + arm64) Docker images via goreleaser dockers + docker_manifests, publishing to ghcr.io/wind-c/comqtt with semver, minor, and latest tags. 3. .github/workflows/{release,chart-lint-test,chart-release}.yaml: - release.yaml triggers on tag push, runs GoReleaser, publishes binaries to GitHub Releases and images to GHCR - chart-lint-test runs ct lint + helm template + a kind boot test across single and cluster CI value files - chart-release runs helm/chart-releaser-action on push to main Implementation note: cmd/cluster/main.go replaces all CLI flag values with the loaded --conf file when one is supplied, so passing --raft-bootstrap as a CLI flag alongside --conf is silently ignored. The chart works around this by templating the config file itself at runtime via the entrypoint shim. Bitnami sub-charts (Redis/MySQL/Postgres) are intentionally NOT bundled because the public bitnami/* Docker images now require authentication. The chart documents bring-your-own and ships an example Valkey manifest at deploy/helm/comqtt/ci/valkey.yaml as the recommended OSS RESP store. Verification gauntlet (all passed locally on kind v0.31.0): - helm lint deploy/helm/comqtt - helm template (single + cluster) - kubectl --dry-run=client apply - helm install single + helm test (MQTT pub/sub round trip) - helm install cluster (3-node Raft, leader election + member join) - Cross-node MQTT: subscribe on cluster-comqtt-0, publish to cluster-comqtt-2, message delivered via shared Valkey state - Bootstrap idempotency: kubectl delete pod cluster-comqtt-0 -> "genesis pod but raft dir is non-empty; bootstrap suppressed" --- .github/workflows/chart-lint-test.yaml | 118 ++++++++ .github/workflows/chart-release.yaml | 40 +++ .github/workflows/release.yaml | 56 ++++ .gitignore | 1 + .goreleaser.yaml | 158 ++++++++++ Dockerfile.goreleaser | 12 + README.md | 26 ++ deploy/helm/comqtt/.helmignore | 18 ++ deploy/helm/comqtt/Chart.yaml | 34 +++ deploy/helm/comqtt/README.md | 197 +++++++++++++ deploy/helm/comqtt/ci/cluster-values.yaml | 23 ++ deploy/helm/comqtt/ci/single-values.yaml | 13 + deploy/helm/comqtt/ci/valkey.yaml | 39 +++ deploy/helm/comqtt/templates/NOTES.txt | 39 +++ deploy/helm/comqtt/templates/_helpers.tpl | 101 +++++++ deploy/helm/comqtt/templates/configmap.yaml | 114 ++++++++ deploy/helm/comqtt/templates/deployment.yaml | 138 +++++++++ deploy/helm/comqtt/templates/ingress.yaml | 40 +++ deploy/helm/comqtt/templates/pdb.yaml | 13 + deploy/helm/comqtt/templates/secret.yaml | 13 + .../comqtt/templates/service-headless.yaml | 46 +++ deploy/helm/comqtt/templates/service.yaml | 36 +++ .../helm/comqtt/templates/serviceaccount.yaml | 13 + .../helm/comqtt/templates/servicemonitor.yaml | 23 ++ deploy/helm/comqtt/templates/statefulset.yaml | 179 ++++++++++++ .../templates/tests/test-connection.yaml | 41 +++ deploy/helm/comqtt/values.schema.json | 176 +++++++++++ deploy/helm/comqtt/values.yaml | 275 ++++++++++++++++++ 28 files changed, 1982 insertions(+) create mode 100644 .github/workflows/chart-lint-test.yaml create mode 100644 .github/workflows/chart-release.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 .goreleaser.yaml create mode 100644 Dockerfile.goreleaser create mode 100644 deploy/helm/comqtt/.helmignore create mode 100644 deploy/helm/comqtt/Chart.yaml create mode 100644 deploy/helm/comqtt/README.md create mode 100644 deploy/helm/comqtt/ci/cluster-values.yaml create mode 100644 deploy/helm/comqtt/ci/single-values.yaml create mode 100644 deploy/helm/comqtt/ci/valkey.yaml create mode 100644 deploy/helm/comqtt/templates/NOTES.txt create mode 100644 deploy/helm/comqtt/templates/_helpers.tpl create mode 100644 deploy/helm/comqtt/templates/configmap.yaml create mode 100644 deploy/helm/comqtt/templates/deployment.yaml create mode 100644 deploy/helm/comqtt/templates/ingress.yaml create mode 100644 deploy/helm/comqtt/templates/pdb.yaml create mode 100644 deploy/helm/comqtt/templates/secret.yaml create mode 100644 deploy/helm/comqtt/templates/service-headless.yaml create mode 100644 deploy/helm/comqtt/templates/service.yaml create mode 100644 deploy/helm/comqtt/templates/serviceaccount.yaml create mode 100644 deploy/helm/comqtt/templates/servicemonitor.yaml create mode 100644 deploy/helm/comqtt/templates/statefulset.yaml create mode 100644 deploy/helm/comqtt/templates/tests/test-connection.yaml create mode 100644 deploy/helm/comqtt/values.schema.json create mode 100644 deploy/helm/comqtt/values.yaml diff --git a/.github/workflows/chart-lint-test.yaml b/.github/workflows/chart-lint-test.yaml new file mode 100644 index 0000000..712e524 --- /dev/null +++ b/.github/workflows/chart-lint-test.yaml @@ -0,0 +1,118 @@ +name: chart-lint-test + +on: + pull_request: + paths: + - "deploy/helm/**" + - ".github/workflows/chart-lint-test.yaml" + push: + branches: + - main + paths: + - "deploy/helm/**" + - ".github/workflows/chart-lint-test.yaml" + +jobs: + lint-test: + name: Lint and template + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.15.0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.6.1 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --target-branch=main --chart-dirs deploy/helm) + if [ -n "$changed" ]; then + echo "changed=true" >> "$GITHUB_OUTPUT" + fi + + - name: Run chart-testing (lint) + run: ct lint --target-branch=main --chart-dirs deploy/helm --validate-maintainers=false + + - name: Helm template (single) + run: | + helm template ci deploy/helm/comqtt \ + -f deploy/helm/comqtt/ci/single-values.yaml \ + > /tmp/single.yaml + echo "Generated $(wc -l < /tmp/single.yaml) lines for single mode" + + - name: Helm template (cluster) + run: | + helm template ci deploy/helm/comqtt \ + -f deploy/helm/comqtt/ci/cluster-values.yaml \ + > /tmp/cluster.yaml + echo "Generated $(wc -l < /tmp/cluster.yaml) lines for cluster mode" + + install-test: + name: Install on kind + runs-on: ubuntu-latest + needs: lint-test + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.15.0 + + - name: Create kind cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: comqtt-ci + wait: 120s + + - name: Build comqtt image + run: | + docker build -t ghcr.io/wind-c/comqtt:ci . + kind load docker-image ghcr.io/wind-c/comqtt:ci --name comqtt-ci + + - name: Install single-mode chart + run: | + helm install single deploy/helm/comqtt \ + -f deploy/helm/comqtt/ci/single-values.yaml \ + --set image.tag=ci \ + --wait --timeout 5m + + - name: Run helm tests (single) + run: helm test single --logs + + - name: Deploy Valkey for cluster mode + run: kubectl apply -f deploy/helm/comqtt/ci/valkey.yaml + + - name: Install cluster-mode chart + run: | + helm install cluster deploy/helm/comqtt \ + -f deploy/helm/comqtt/ci/cluster-values.yaml \ + --set image.tag=ci \ + --wait --timeout 8m + + - name: Show cluster pod status + if: always() + run: | + kubectl get pods -o wide + kubectl describe statefulset cluster-comqtt || true + + - name: Wait for Raft formation + run: | + for i in 0 1 2; do + echo "==== cluster-comqtt-${i} logs ====" + kubectl logs cluster-comqtt-${i} --tail=200 || true + done diff --git a/.github/workflows/chart-release.yaml b/.github/workflows/chart-release.yaml new file mode 100644 index 0000000..e1776a8 --- /dev/null +++ b/.github/workflows/chart-release.yaml @@ -0,0 +1,40 @@ +name: chart-release + +on: + push: + branches: + - main + paths: + - "deploy/helm/**" + +permissions: + contents: write + pages: write + id-token: write + +jobs: + release: + name: Release Helm chart + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.15.0 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + with: + charts_dir: deploy/helm + env: + CR_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..2d6840a --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,56 @@ +name: release + +on: + push: + tags: + - "v*" + workflow_dispatch: + inputs: + tag: + description: "Tag to release (e.g. v2.7.0). Required for manual runs." + required: true + type: string + +permissions: + contents: write + packages: write + +jobs: + goreleaser: + name: GoReleaser + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: "~> v2" + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index cb5373c..b27ae77 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ bolt.db raft.db .idea .vscode +.claude/ diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..90af021 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,158 @@ +version: 2 + +project_name: comqtt + +before: + hooks: + - go mod tidy + +builds: + - id: comqtt + main: ./cmd/single + binary: comqtt + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + ldflags: + - -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} + ignore: + - goos: windows + goarch: arm64 + + - id: comqtt-cluster + main: ./cmd/cluster + binary: comqtt-cluster + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + ldflags: + - -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} + +archives: + - id: default + ids: + - comqtt + - comqtt-cluster + name_template: >- + {{ .ProjectName }}_{{ .Version }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + format_overrides: + - goos: windows + formats: [zip] + files: + - LICENSE.md + - README.md + - cmd/config/*.yml + +checksum: + name_template: "checksums.txt" + +snapshot: + version_template: "{{ incpatch .Version }}-snapshot-{{ .ShortCommit }}" + +changelog: + sort: asc + use: github + filters: + exclude: + - "^docs:" + - "^test:" + - "^chore:" + - "^ci:" + - "Merge pull request" + - "Merge branch" + groups: + - title: Features + regexp: "^.*feat[(\\w)]*:+.*$" + order: 0 + - title: Bug fixes + regexp: "^.*fix[(\\w)]*:+.*$" + order: 1 + - title: Performance + regexp: "^.*perf[(\\w)]*:+.*$" + order: 2 + - title: Other + order: 999 + +dockers: + - id: comqtt-amd64 + use: buildx + goos: linux + goarch: amd64 + ids: + - comqtt + - comqtt-cluster + image_templates: + - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" + - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" + dockerfile: Dockerfile.goreleaser + extra_files: + - cmd/config + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.description=A lightweight high-performance MQTT broker (v3.0/v3.1.1/v5.0) supporting clustering" + - "--label=org.opencontainers.image.url=https://github.com/wind-c/comqtt" + - "--label=org.opencontainers.image.source=https://github.com/wind-c/comqtt" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.licenses=MIT" + + - id: comqtt-arm64 + use: buildx + goos: linux + goarch: arm64 + ids: + - comqtt + - comqtt-cluster + image_templates: + - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" + - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" + dockerfile: Dockerfile.goreleaser + extra_files: + - cmd/config + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.description=A lightweight high-performance MQTT broker (v3.0/v3.1.1/v5.0) supporting clustering" + - "--label=org.opencontainers.image.url=https://github.com/wind-c/comqtt" + - "--label=org.opencontainers.image.source=https://github.com/wind-c/comqtt" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.licenses=MIT" + +docker_manifests: + - name_template: "ghcr.io/wind-c/comqtt:{{ .Version }}" + image_templates: + - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" + - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" + - name_template: "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}" + image_templates: + - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" + - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" + - name_template: "ghcr.io/wind-c/comqtt:latest" + image_templates: + - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" + - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" + +release: + github: + owner: wind-c + name: comqtt + prerelease: auto + draft: false + name_template: "v{{ .Version }}" diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser new file mode 100644 index 0000000..60e6ec2 --- /dev/null +++ b/Dockerfile.goreleaser @@ -0,0 +1,12 @@ +FROM alpine:3.20 + +RUN apk add --no-cache ca-certificates tzdata + +WORKDIR / +COPY comqtt /comqtt +COPY comqtt-cluster /comqtt-cluster +COPY cmd/config /etc/comqtt/examples + +EXPOSE 1883 1882 8080 7946 8946 17946 + +ENTRYPOINT ["/comqtt"] diff --git a/README.md b/README.md index d56bb0f..91fc75c 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,32 @@ docker build -t comqtt:latest . docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 comqtt:latest ``` +Tagged release images are published to GHCR by the `release` workflow: + +```sh +docker pull ghcr.io/wind-c/comqtt: +``` + +### Using Kubernetes (Helm) +A maintained Helm chart lives in [deploy/helm/comqtt](deploy/helm/comqtt) and supports both standalone and clustered deployments. See [the chart README](deploy/helm/comqtt/README.md) for full documentation. + +Single node: + +```sh +helm install single deploy/helm/comqtt +helm test single --logs +``` + +Three-node Raft cluster (bring your own RESP-compatible store; the chart ships an example Valkey manifest): + +```sh +kubectl apply -f deploy/helm/comqtt/ci/valkey.yaml +helm install cluster deploy/helm/comqtt \ + -f deploy/helm/comqtt/ci/cluster-values.yaml +``` + +Released charts are also available from the GitHub Pages-hosted Helm repository published by the `chart-release` workflow. + ## Developing with Comqtt ### Importing as a package Importing Comqtt as a package requires just a few lines of code to get started. diff --git a/deploy/helm/comqtt/.helmignore b/deploy/helm/comqtt/.helmignore new file mode 100644 index 0000000..d6d7333 --- /dev/null +++ b/deploy/helm/comqtt/.helmignore @@ -0,0 +1,18 @@ +.DS_Store +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +*.swp +*.bak +*.tmp +*.orig +*~ +.project +.idea/ +*.tmproj +.vscode/ +ci/ diff --git a/deploy/helm/comqtt/Chart.yaml b/deploy/helm/comqtt/Chart.yaml new file mode 100644 index 0000000..c9acf0a --- /dev/null +++ b/deploy/helm/comqtt/Chart.yaml @@ -0,0 +1,34 @@ +apiVersion: v2 +name: comqtt +description: | + comqtt — a lightweight, high-performance MQTT broker (v3.0/v3.1.1/v5.0) + supporting standalone and clustered (Raft + Gossip) deployments. +type: application +version: 0.1.0 +appVersion: "2.6.0" +kubeVersion: ">=1.23.0-0" +home: https://github.com/wind-c/comqtt +sources: + - https://github.com/wind-c/comqtt +keywords: + - mqtt + - broker + - iot + - messaging + - pub-sub + - cluster + - raft +maintainers: + - name: comqtt community + url: https://github.com/wind-c/comqtt +icon: https://raw.githubusercontent.com/wind-c/comqtt/main/README.md +annotations: + artifacthub.io/changes: | + - kind: added + description: Initial Helm chart with single-node and clustered deployment modes (resolves #137). + artifacthub.io/license: MIT +## Bitnami sub-charts intentionally omitted: as of 2025 the public bitnami/* +## Docker images are behind authentication. Bring your own Redis/Valkey, +## MySQL, or PostgreSQL — see deploy/helm/comqtt/README.md "Required +## external services" for example manifests. Cluster mode requires a +## RESP-compatible store (Redis or Valkey). diff --git a/deploy/helm/comqtt/README.md b/deploy/helm/comqtt/README.md new file mode 100644 index 0000000..e4d7cc8 --- /dev/null +++ b/deploy/helm/comqtt/README.md @@ -0,0 +1,197 @@ +# comqtt + +A Helm chart for [comqtt](https://github.com/wind-c/comqtt), a lightweight, +high-performance MQTT broker (v3.0/v3.1.1/v5.0) supporting standalone and +clustered deployments via Raft + Gossip. + +Resolves [issue #137](https://github.com/wind-c/comqtt/issues/137). + +## TL;DR + +```bash +helm repo add comqtt https://wind-c.github.io/comqtt +helm install my-broker comqtt/comqtt +``` + +## Single-node install + +```bash +helm install single deploy/helm/comqtt +helm test single --logs +``` + +## Clustered install + +A 3-replica Raft cluster needs a RESP-compatible store for shared session +state. The chart **does not bundle** Redis or Valkey because the public +Bitnami images are gated as of 2025; bring your own. The simplest option is +the [Valkey](https://valkey.io) OSS fork of Redis: + +```bash +kubectl apply -f deploy/helm/comqtt/ci/valkey.yaml + +helm install cluster deploy/helm/comqtt \ + --set mode=cluster \ + --set replicaCount=3 \ + --set config.redis.options.addr=valkey:6379 +``` + +`replicaCount` must be **odd** (1, 3, 5, 7) for Raft quorum — the schema enforces this. + +### Required external services + +| comqtt feature | Required when | Address value | Example | +|---|---|---|---| +| RESP store (Redis or Valkey) | `mode=cluster` or `config.storage-way=3` | `config.redis.options.addr` | [ci/valkey.yaml](ci/valkey.yaml) | +| MySQL | `config.auth.datasource=2` | via `config.auth.conf-path` | upstream auth-mysql.yml | +| PostgreSQL | `config.auth.datasource=3` | via `config.auth.conf-path` | upstream auth-postgresql.yml | +| HTTP auth | `config.auth.datasource=4` | via `config.auth.conf-path` | upstream auth-http.yml | + + +## Connecting + +Inside the cluster: + +```text +MQTT tcp://-comqtt..svc.cluster.local:1883 +WS ws://-comqtt..svc.cluster.local:1882/mqtt +HTTP http://-comqtt..svc.cluster.local:8080 +``` + +Smoke test using `eclipse-mosquitto`: + +```bash +kubectl run mqtt-pub --rm -i --restart=Never \ + --image=eclipse-mosquitto:2 -- \ + mosquitto_pub -h -comqtt -t demo -m "hello" +``` + +## How cluster mode works + +The chart deploys a StatefulSet plus a headless Service so each pod gets a +stable DNS name like `release-comqtt-0.release-comqtt-headless.namespace.svc.cluster.local`. + +An entrypoint script (mounted from a ConfigMap) renders the broker config +at boot, performing two runtime steps: + +1. **Seed members** are computed from `replicaCount` and the headless Service + FQDN — horizontal scaling does not require a chart upgrade. +2. **Raft bootstrap** is set to `true` only when both conditions hold: + - the pod hostname ends in `-0` (the genesis pod), and + - the Raft data directory is empty. + + This is why a `helm rollout restart` is safe: existing pods see a populated + data dir, the bootstrap flag stays off, and Raft re-joins. Re-bootstrapping + a populated cluster is destructive and the chart prevents it. + +The pod's IP is bound via `--bind-ip=$POD_IP` from the downward API. + +A PodDisruptionBudget enforces `minAvailable = ⌈(replicas+1)/2⌉` so voluntary +disruptions cannot drop quorum. Soft pod anti-affinity is the default; flip +`cluster.hardAntiAffinity=true` to require distinct hostnames. + +Each replica gets its own PVC via `volumeClaimTemplates` — Raft log +durability is non-negotiable in cluster mode and `persistence.enabled=false` +is rejected by the schema. + +## Values reference + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `mode` | string | `single` | `single` or `cluster`. | +| `replicaCount` | int | `3` | Cluster only. Must be odd. | +| `image.repository` | string | `ghcr.io/wind-c/comqtt` | | +| `image.tag` | string | `""` | Falls back to `Chart.appVersion`. `latest` is rejected. | +| `image.pullPolicy` | string | `IfNotPresent` | | +| `image.pullSecrets` | list | `[]` | | +| `serviceAccount.create` | bool | `true` | | +| `podSecurityContext` | object | non-root, fsGroup 1000 | | +| `securityContext` | object | drop ALL caps, RO root fs | | +| `resources.requests` | object | `100m / 128Mi` | | +| `resources.limits` | object | `{}` | unset by default | +| `livenessProbe` | object | TCP 1883, period 20s | | +| `readinessProbe` | object | TCP 1883, period 10s | | +| `startupProbe` | object | TCP 1883, period 5s, 30 fails | | +| `service.mqtt.{type,port,nodePort}` | — | `ClusterIP / 1883` | | +| `service.ws.{type,port,nodePort}` | — | `ClusterIP / 1882` | | +| `service.dashboard.{type,port,nodePort}` | — | `ClusterIP / 8080` | | +| `ingress.enabled` | bool | `false` | Dashboard only. | +| `tls.enabled` | bool | `false` | | +| `tls.existingSecret` | string | `""` | Pre-created Secret with tls.crt/tls.key/ca.crt. | +| `tls.certManager.enabled` | bool | `false` | | +| `config.*` | object | mirrors `cmd/config/single.yml` | Rendered into ConfigMap. | +| `cluster.gossipPort` | int | `7946` | | +| `cluster.raftPort` | int | `8946` | | +| `cluster.grpcPort` | int | `17946` | | +| `cluster.grpcEnable` | bool | `true` | | +| `cluster.discoveryWay` | int | `0` | 0 serf, 1 memberlist. | +| `cluster.raftImpl` | int | `0` | 0 hashicorp, 1 etcd. | +| `cluster.hardAntiAffinity` | bool | `false` | | +| `cluster.podDisruptionBudget.enabled` | bool | `true` | | +| `persistence.enabled` | bool | `true` | Required in cluster mode. | +| `persistence.size` | string | `5Gi` | | +| `persistence.storageClass` | string | `""` | | +| `secret.create` | bool | `false` | | +| `secret.existingSecret` | string | `""` | | +| `secret.data` | map | `{}` | Keys become file names under `/etc/comqtt/secrets`. | +| `serviceMonitor.enabled` | bool | `false` | kube-prometheus-stack. | +| `tests.enabled` | bool | `true` | `helm test` mosquitto pod. | + +For the full list of `config.*` keys (storage, auth, mqtt, redis, log) see +[cmd/config/single.yml](https://github.com/wind-c/comqtt/blob/main/cmd/config/single.yml) +and [cmd/config/node1.yml](https://github.com/wind-c/comqtt/blob/main/cmd/config/node1.yml) +in the upstream repo. The chart's `config:` block mirrors those files +verbatim. + +## Exposing MQTT externally + +A standard HTTP Ingress **cannot** proxy raw MQTT TCP — it is a Layer 7 +HTTP-only resource. Use one of: + +- `service.mqtt.type: LoadBalancer` — straightforward on cloud providers. +- An Ingress controller with TCP passthrough (e.g. NGINX `--tcp-services-configmap`). +- A NodePort plus an external load balancer or DNS round-robin. + +For TLS termination at the broker, supply `tls.existingSecret` and reference +it from `config.mqtt.tls.{ca-cert,server-cert,server-key}`. + +## Upgrades + +- **Switching `mode=single` ↔ `mode=cluster` is not supported in-place.** + The on-disk data layouts differ; the chart will refuse to roll a Deployment + into a StatefulSet (and Helm itself errors on incompatible kinds). Migrate + by exporting state, uninstalling, and reinstalling. + +- **Scaling cluster replicas:** scale up by `helm upgrade --set replicaCount=N` + (must remain odd). The new pods receive a populated data dir on first boot + only after they join the existing cluster — the bootstrap shim handles this + automatically. + +- **Scaling cluster replicas down** is risky: pods removed without explicit + Raft eviction become permanent failures from the cluster's perspective. See + the limitations section. + +## Limitations + +- No operator. The chart does not perform Raft member eviction when a pod is + permanently removed — the operator is expected to scale via `helm upgrade` + and accept that decommissioned members remain in the Raft membership list + until manually evicted. +- No automatic recovery from split-brain. If two halves of the cluster + diverge, manually pick a survivor, scale the StatefulSet to 1, delete the + PVCs of the losers, then scale back up. +- No bundled RESP store. The chart expects an externally-deployed Redis or + Valkey (see [ci/valkey.yaml](ci/valkey.yaml) for a starting point) — the + chart does not manage failover or HA for it. +- The dashboard Ingress proxies `/`. Mounting it under a sub-path is not + currently supported. + +## Contributing + +Bug reports and PRs welcome. Please run before submitting: + +```bash +helm lint deploy/helm/comqtt +helm template ci deploy/helm/comqtt -f deploy/helm/comqtt/ci/single-values.yaml +helm template ci deploy/helm/comqtt -f deploy/helm/comqtt/ci/cluster-values.yaml +``` diff --git a/deploy/helm/comqtt/ci/cluster-values.yaml b/deploy/helm/comqtt/ci/cluster-values.yaml new file mode 100644 index 0000000..09b72e7 --- /dev/null +++ b/deploy/helm/comqtt/ci/cluster-values.yaml @@ -0,0 +1,23 @@ +mode: cluster +replicaCount: 3 +image: + repository: ghcr.io/wind-c/comqtt + tag: "" + pullPolicy: IfNotPresent +persistence: + enabled: true + size: 1Gi +# CI assumes a RESP-compatible store named `valkey` is reachable at +# valkey:6379. See ci/valkey.yaml — chart-lint-test workflow applies it +# before installing the cluster. +config: + storage-way: 3 + log: + output: 0 + redis: + options: + addr: "valkey:6379" +cluster: + hardAntiAffinity: false +tests: + enabled: false diff --git a/deploy/helm/comqtt/ci/single-values.yaml b/deploy/helm/comqtt/ci/single-values.yaml new file mode 100644 index 0000000..7328a10 --- /dev/null +++ b/deploy/helm/comqtt/ci/single-values.yaml @@ -0,0 +1,13 @@ +mode: single +image: + repository: ghcr.io/wind-c/comqtt + tag: "" + pullPolicy: IfNotPresent +persistence: + enabled: false +config: + storage-way: 0 + log: + output: 0 +tests: + enabled: true diff --git a/deploy/helm/comqtt/ci/valkey.yaml b/deploy/helm/comqtt/ci/valkey.yaml new file mode 100644 index 0000000..ff67724 --- /dev/null +++ b/deploy/helm/comqtt/ci/valkey.yaml @@ -0,0 +1,39 @@ +# Vanilla Valkey deployment used by the chart's CI pipeline. Cluster-mode +# comqtt requires a RESP-compatible store; Valkey is the actively-maintained +# OSS fork of Redis and is protocol-compatible. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: valkey +spec: + replicas: 1 + selector: + matchLabels: + app: valkey + template: + metadata: + labels: + app: valkey + spec: + containers: + - name: valkey + image: valkey/valkey:8-alpine + ports: + - containerPort: 6379 + name: resp + resources: + requests: + cpu: 50m + memory: 64Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: valkey +spec: + selector: + app: valkey + ports: + - name: resp + port: 6379 + targetPort: resp diff --git a/deploy/helm/comqtt/templates/NOTES.txt b/deploy/helm/comqtt/templates/NOTES.txt new file mode 100644 index 0000000..10feb49 --- /dev/null +++ b/deploy/helm/comqtt/templates/NOTES.txt @@ -0,0 +1,39 @@ +comqtt {{ .Chart.AppVersion }} has been deployed in {{ .Values.mode }} mode. + +Release: {{ .Release.Name }} +Namespace: {{ .Release.Namespace }} + +Listeners (in-cluster): + MQTT tcp://{{ include "comqtt.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.mqtt.port }} + WS ws://{{ include "comqtt.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ws.port }}/mqtt + HTTP http://{{ include "comqtt.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.dashboard.port }} + +{{- if eq .Values.mode "cluster" }} + +Cluster mode: {{ .Values.replicaCount }} replicas. Pod-0 bootstraps Raft on +first boot when its Raft data dir is empty; subsequent restarts skip the +bootstrap flag automatically. Members: +{{- range $i, $_ := until (.Values.replicaCount | int) }} + - {{ include "comqtt.headlessName" $ }}-{{ $i }}.{{ include "comqtt.headlessName" $ }}.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.cluster.gossipPort }} +{{- end }} + +PodDisruptionBudget minAvailable = {{ include "comqtt.raftQuorum" . }} (Raft quorum). +{{- end }} + +Quick smoke test: + + kubectl run mqtt-pub --rm -i --restart=Never \ + --image={{ .Values.tests.image.repository }}:{{ .Values.tests.image.tag }} -- \ + mosquitto_pub -h {{ include "comqtt.fullname" . }} -p {{ .Values.service.mqtt.port }} \ + -t test/topic -m "hello from helm" + +To run the chart's bundled connectivity test: + + helm test {{ .Release.Name }} --logs + +{{- if .Values.ingress.enabled }} + +Dashboard Ingress is enabled. Note: raw MQTT cannot be served by a Layer 7 +HTTP Ingress; for external MQTT use a LoadBalancer Service or an Ingress +controller with TCP passthrough. +{{- end }} diff --git a/deploy/helm/comqtt/templates/_helpers.tpl b/deploy/helm/comqtt/templates/_helpers.tpl new file mode 100644 index 0000000..4b436ca --- /dev/null +++ b/deploy/helm/comqtt/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "comqtt.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Fully qualified app name. +*/}} +{{- define "comqtt.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Headless Service name (cluster mode). +*/}} +{{- define "comqtt.headlessName" -}} +{{- printf "%s-headless" (include "comqtt.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Chart label string. +*/}} +{{- define "comqtt.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Standard labels. +*/}} +{{- define "comqtt.labels" -}} +helm.sh/chart: {{ include "comqtt.chart" . }} +{{ include "comqtt.selectorLabels" . }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: broker +app.kubernetes.io/part-of: comqtt +comqtt.io/mode: {{ .Values.mode | quote }} +{{- end -}} + +{{/* +Selector labels. +*/}} +{{- define "comqtt.selectorLabels" -}} +app.kubernetes.io/name: {{ include "comqtt.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +ServiceAccount name. +*/}} +{{- define "comqtt.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{- default (include "comqtt.fullname" .) .Values.serviceAccount.name -}} +{{- else -}} +{{- default "default" .Values.serviceAccount.name -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve the image tag, falling back to .Chart.AppVersion. Refuse `latest`. +*/}} +{{- define "comqtt.image" -}} +{{- $tag := .Values.image.tag | default .Chart.AppVersion -}} +{{- if eq $tag "latest" -}} +{{- fail "image.tag=latest is not permitted; pin a real version (set image.tag to a specific release or rely on .Chart.AppVersion)." -}} +{{- end -}} +{{- printf "%s:%s" .Values.image.repository $tag -}} +{{- end -}} + +{{/* +Resolve which broker binary to invoke. +*/}} +{{- define "comqtt.binary" -}} +{{- if eq .Values.mode "cluster" -}}/comqtt-cluster{{- else -}}/comqtt{{- end -}} +{{- end -}} + +{{/* +Cluster Raft quorum minimum. +*/}} +{{- define "comqtt.raftQuorum" -}} +{{- div (add .Values.replicaCount 1) 2 -}} +{{- end -}} + +{{/* +Render the rendered config (single-mode — used as-is) or the template config +(cluster mode — placeholder substitution happens at runtime). +*/}} +{{- define "comqtt.configYaml" -}} +{{- toYaml .Values.config | nindent 0 -}} +{{- end -}} diff --git a/deploy/helm/comqtt/templates/configmap.yaml b/deploy/helm/comqtt/templates/configmap.yaml new file mode 100644 index 0000000..247c98e --- /dev/null +++ b/deploy/helm/comqtt/templates/configmap.yaml @@ -0,0 +1,114 @@ +{{- /* +For single mode the ConfigMap holds a fully-rendered config.yml that is +mounted at /etc/comqtt/config.yml. + +For cluster mode it holds a *template* config.tpl.yml plus an entrypoint +script that performs runtime substitution of node identity, bind IP, +seed members, and the Raft bootstrap flag. This is necessary because +cmd/cluster/main.go replaces all CLI flags with the contents of --conf +when a config file is provided, so we must inject runtime values into +the file itself rather than passing them as flags alongside --conf. +*/ -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "comqtt.fullname" . }}-config + labels: + {{- include "comqtt.labels" . | nindent 4 }} +data: +{{- if eq .Values.mode "single" }} + config.yml: | +{{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- else }} + config.tpl.yml: | +{{- $cfg := deepCopy .Values.config -}} +{{- /* Inject cluster-mode keys into the templated config. */ -}} +{{- $_ := set $cfg "storage-way" 3 -}} +{{- $cluster := dict + "discovery-way" .Values.cluster.discoveryWay + "node-name" "__NODE_NAME__" + "bind-addr" "__BIND_ADDR__" + "bind-port" (.Values.cluster.gossipPort | int) + "advertise-addr" "" + "advertise-port" 0 + "members" "__MEMBERS_PLACEHOLDER__" + "queue-depth" (.Values.cluster.queueDepth | int) + "raft-impl" .Values.cluster.raftImpl + "raft-port" (.Values.cluster.raftPort | int) + "raft-dir" "/data/raft" + "raft-bootstrap" "__RAFT_BOOTSTRAP__" + "grpc-enable" .Values.cluster.grpcEnable + "grpc-port" (.Values.cluster.grpcPort | int) + "inbound-pool-size" (.Values.cluster.inboundPoolSize | int) + "outbound-pool-size" (.Values.cluster.outboundPoolSize | int) + "inout-pool-nonblocking" .Values.cluster.inoutPoolNonblocking +-}} +{{- $_ := set $cfg "cluster" $cluster -}} +{{- tpl (toYaml $cfg) . | nindent 4 }} + entrypoint.sh: | + #!/bin/sh + set -eu + + : "${POD_IP:?POD_IP must be set via downward API}" + : "${HOSTNAME:?HOSTNAME must be set}" + NAMESPACE="${POD_NAMESPACE:?POD_NAMESPACE must be set}" + HEADLESS="${HEADLESS_SERVICE:?HEADLESS_SERVICE must be set}" + POD_PREFIX="${POD_PREFIX:?POD_PREFIX must be set (StatefulSet name)}" + REPLICAS="${REPLICA_COUNT:?REPLICA_COUNT must be set}" + GOSSIP_PORT="${GOSSIP_PORT:-7946}" + RAFT_DIR="${RAFT_DIR:-/data/raft}" + + mkdir -p "$RAFT_DIR" /data/logs /tmp/comqtt + + # Build YAML flow-list of seed members: ["pod-0.svc:7946","pod-1.svc:7946",...] + members='[' + i=0 + while [ "$i" -lt "$REPLICAS" ]; do + if [ "$i" -gt 0 ]; then members="${members},"; fi + members="${members}\"${POD_PREFIX}-${i}.${HEADLESS}.${NAMESPACE}.svc.cluster.local:${GOSSIP_PORT}\"" + i=$((i+1)) + done + members="${members}]" + + # Idempotent Raft bootstrap: true only when we are pod-0 AND raft dir is empty. + bootstrap="false" + case "$HOSTNAME" in + *-0) + if [ -z "$(ls -A "$RAFT_DIR" 2>/dev/null || true)" ]; then + bootstrap="true" + echo "[entrypoint] genesis pod with empty raft dir; --raft-bootstrap=true" + else + echo "[entrypoint] genesis pod but raft dir is non-empty; bootstrap suppressed" + fi + ;; + *) ;; + esac + + # Render template config. Bash-free sed; escape members for sed by trusting + # that hostnames and ports do not contain '|'. + sed \ + -e "s|__NODE_NAME__|${HOSTNAME}|g" \ + -e "s|__BIND_ADDR__|${POD_IP}|g" \ + -e "s|__MEMBERS_PLACEHOLDER__|${members}|g" \ + -e "s|__RAFT_BOOTSTRAP__|${bootstrap}|g" \ + /etc/comqtt/config.tpl.yml > /tmp/comqtt/config.yml + + # The members value in YAML must not be quoted as a string. + # sed has produced a quoted string; rewrite the line to use the bare list. + awk -v list="${members}" ' + /^[[:space:]]+members: / { + match($0, /^[[:space:]]+/); + prefix = substr($0, 1, RLENGTH); + print prefix "members: " list; + next + } + { print } + ' /tmp/comqtt/config.yml > /tmp/comqtt/config.rendered.yml + mv /tmp/comqtt/config.rendered.yml /tmp/comqtt/config.yml + + echo "[entrypoint] rendered config:" + cat /tmp/comqtt/config.yml + echo "[entrypoint] exec /comqtt-cluster" + + exec /comqtt-cluster --conf=/tmp/comqtt/config.yml +{{- end }} diff --git a/deploy/helm/comqtt/templates/deployment.yaml b/deploy/helm/comqtt/templates/deployment.yaml new file mode 100644 index 0000000..1aa74f4 --- /dev/null +++ b/deploy/helm/comqtt/templates/deployment.yaml @@ -0,0 +1,138 @@ +{{- if eq .Values.mode "single" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "comqtt.fullname" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "comqtt.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "comqtt.serviceAccountName" . }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: comqtt + image: {{ include "comqtt.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/comqtt"] + args: + - --conf=/etc/comqtt/config.yml + ports: + - name: mqtt + containerPort: 1883 + protocol: TCP + - name: mqtt-ws + containerPort: 1882 + protocol: TCP + - name: dashboard + containerPort: 8080 + protocol: TCP + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + {{- omit .Values.livenessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + {{- omit .Values.readinessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: + {{- omit .Values.startupProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/comqtt + readOnly: true + - name: data + mountPath: /data + - name: tmp + mountPath: /tmp + {{- if or .Values.secret.create .Values.secret.existingSecret }} + - name: secrets + mountPath: /etc/comqtt/secrets + readOnly: true + {{- end }} + volumes: + - name: config + configMap: + name: {{ include "comqtt.fullname" . }}-config + - name: tmp + emptyDir: {} + {{- if .Values.persistence.enabled }} + - name: data + persistentVolumeClaim: + claimName: {{ include "comqtt.fullname" . }}-data + {{- else }} + - name: data + emptyDir: {} + {{- end }} + {{- if or .Values.secret.create .Values.secret.existingSecret }} + - name: secrets + secret: + secretName: {{ default (include "comqtt.fullname" .) .Values.secret.existingSecret }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- if .Values.persistence.enabled }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "comqtt.fullname" . }}-data + labels: + {{- include "comqtt.labels" . | nindent 4 }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 4 }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.storageClass }} + storageClassName: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/ingress.yaml b/deploy/helm/comqtt/templates/ingress.yaml new file mode 100644 index 0000000..69b5d62 --- /dev/null +++ b/deploy/helm/comqtt/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if .Values.ingress.enabled -}} +{{- /* +NOTE: A standard HTTP Ingress can ONLY proxy the dashboard (port 8080). +Raw MQTT TCP cannot ride on a Layer 7 HTTP Ingress; expose it via a +LoadBalancer Service or an Ingress controller with TCP passthrough. +*/ -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "comqtt.fullname" . }}-dashboard + labels: + {{- include "comqtt.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.ingress.className }} + ingressClassName: {{ . }} + {{- end }} + {{- with .Values.ingress.tls }} + tls: + {{- toYaml . | nindent 4 }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "comqtt.fullname" $ }} + port: + number: {{ $.Values.service.dashboard.port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/pdb.yaml b/deploy/helm/comqtt/templates/pdb.yaml new file mode 100644 index 0000000..5fa01cc --- /dev/null +++ b/deploy/helm/comqtt/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if and (eq .Values.mode "cluster") .Values.cluster.podDisruptionBudget.enabled -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "comqtt.fullname" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} +spec: + minAvailable: {{ include "comqtt.raftQuorum" . }} + selector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/secret.yaml b/deploy/helm/comqtt/templates/secret.yaml new file mode 100644 index 0000000..27b1170 --- /dev/null +++ b/deploy/helm/comqtt/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.secret.create (not .Values.secret.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "comqtt.fullname" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} +type: Opaque +{{- with .Values.secret.data }} +stringData: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/service-headless.yaml b/deploy/helm/comqtt/templates/service-headless.yaml new file mode 100644 index 0000000..0498698 --- /dev/null +++ b/deploy/helm/comqtt/templates/service-headless.yaml @@ -0,0 +1,46 @@ +{{- if eq .Values.mode "cluster" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "comqtt.headlessName" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mqtt + port: {{ .Values.service.mqtt.port }} + targetPort: mqtt + protocol: TCP + - name: mqtt-ws + port: {{ .Values.service.ws.port }} + targetPort: mqtt-ws + protocol: TCP + - name: dashboard + port: {{ .Values.service.dashboard.port }} + targetPort: dashboard + protocol: TCP + - name: gossip-tcp + port: {{ .Values.cluster.gossipPort }} + targetPort: gossip-tcp + protocol: TCP + - name: gossip-udp + port: {{ .Values.cluster.gossipPort }} + targetPort: gossip-udp + protocol: UDP + - name: raft + port: {{ .Values.cluster.raftPort }} + targetPort: raft + protocol: TCP + {{- if .Values.cluster.grpcEnable }} + - name: grpc + port: {{ .Values.cluster.grpcPort }} + targetPort: grpc + protocol: TCP + {{- end }} + selector: + {{- include "comqtt.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/service.yaml b/deploy/helm/comqtt/templates/service.yaml new file mode 100644 index 0000000..6eb6c2b --- /dev/null +++ b/deploy/helm/comqtt/templates/service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "comqtt.fullname" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} + {{- with .Values.service.mqtt.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.mqtt.type }} + ports: + - name: mqtt + port: {{ .Values.service.mqtt.port }} + targetPort: mqtt + protocol: TCP + {{- if and (eq .Values.service.mqtt.type "NodePort") .Values.service.mqtt.nodePort }} + nodePort: {{ .Values.service.mqtt.nodePort }} + {{- end }} + - name: mqtt-ws + port: {{ .Values.service.ws.port }} + targetPort: mqtt-ws + protocol: TCP + {{- if and (eq .Values.service.ws.type "NodePort") .Values.service.ws.nodePort }} + nodePort: {{ .Values.service.ws.nodePort }} + {{- end }} + - name: dashboard + port: {{ .Values.service.dashboard.port }} + targetPort: dashboard + protocol: TCP + {{- if and (eq .Values.service.dashboard.type "NodePort") .Values.service.dashboard.nodePort }} + nodePort: {{ .Values.service.dashboard.nodePort }} + {{- end }} + selector: + {{- include "comqtt.selectorLabels" . | nindent 4 }} diff --git a/deploy/helm/comqtt/templates/serviceaccount.yaml b/deploy/helm/comqtt/templates/serviceaccount.yaml new file mode 100644 index 0000000..3d7a131 --- /dev/null +++ b/deploy/helm/comqtt/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "comqtt.serviceAccountName" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: false +{{- end }} diff --git a/deploy/helm/comqtt/templates/servicemonitor.yaml b/deploy/helm/comqtt/templates/servicemonitor.yaml new file mode 100644 index 0000000..62fd39f --- /dev/null +++ b/deploy/helm/comqtt/templates/servicemonitor.yaml @@ -0,0 +1,23 @@ +{{- if .Values.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "comqtt.fullname" . }} + {{- with .Values.serviceMonitor.namespace }} + namespace: {{ . }} + {{- end }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 6 }} + endpoints: + - port: dashboard + path: /metrics + interval: {{ .Values.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/statefulset.yaml b/deploy/helm/comqtt/templates/statefulset.yaml new file mode 100644 index 0000000..809db56 --- /dev/null +++ b/deploy/helm/comqtt/templates/statefulset.yaml @@ -0,0 +1,179 @@ +{{- if eq .Values.mode "cluster" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "comqtt.fullname" . }} + labels: + {{- include "comqtt.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "comqtt.headlessName" . }} + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "comqtt.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "comqtt.serviceAccountName" . }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + affinity: + {{- if .Values.affinity }} + {{- toYaml .Values.affinity | nindent 8 }} + {{- else }} + podAntiAffinity: + {{- if .Values.cluster.hardAntiAffinity }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 18 }} + {{- else }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + {{- include "comqtt.selectorLabels" . | nindent 20 }} + {{- end }} + {{- end }} + containers: + - name: comqtt + image: {{ include "comqtt.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/sh"] + args: + - /etc/comqtt/entrypoint.sh + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HEADLESS_SERVICE + value: {{ include "comqtt.headlessName" . | quote }} + - name: POD_PREFIX + value: {{ include "comqtt.fullname" . | quote }} + - name: REPLICA_COUNT + value: {{ .Values.replicaCount | quote }} + - name: GOSSIP_PORT + value: {{ .Values.cluster.gossipPort | quote }} + - name: RAFT_DIR + value: /data/raft + ports: + - name: mqtt + containerPort: 1883 + protocol: TCP + - name: mqtt-ws + containerPort: 1882 + protocol: TCP + - name: dashboard + containerPort: 8080 + protocol: TCP + - name: gossip-tcp + containerPort: {{ .Values.cluster.gossipPort }} + protocol: TCP + - name: gossip-udp + containerPort: {{ .Values.cluster.gossipPort }} + protocol: UDP + - name: raft + containerPort: {{ .Values.cluster.raftPort }} + protocol: TCP + {{- if .Values.cluster.grpcEnable }} + - name: grpc + containerPort: {{ .Values.cluster.grpcPort }} + protocol: TCP + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + {{- omit .Values.livenessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + {{- omit .Values.readinessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: + {{- omit .Values.startupProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/comqtt + readOnly: true + - name: data + mountPath: /data + - name: tmp + mountPath: /tmp + {{- if or .Values.secret.create .Values.secret.existingSecret }} + - name: secrets + mountPath: /etc/comqtt/secrets + readOnly: true + {{- end }} + volumes: + - name: config + configMap: + name: {{ include "comqtt.fullname" . }}-config + defaultMode: 0555 + - name: tmp + emptyDir: {} + {{- if or .Values.secret.create .Values.secret.existingSecret }} + - name: secrets + secret: + secretName: {{ default (include "comqtt.fullname" .) .Values.secret.existingSecret }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- include "comqtt.labels" . | nindent 10 }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 10 }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.storageClass }} + storageClassName: {{ . }} + {{- end }} +{{- end }} diff --git a/deploy/helm/comqtt/templates/tests/test-connection.yaml b/deploy/helm/comqtt/templates/tests/test-connection.yaml new file mode 100644 index 0000000..358f8cc --- /dev/null +++ b/deploy/helm/comqtt/templates/tests/test-connection.yaml @@ -0,0 +1,41 @@ +{{- if .Values.tests.enabled -}} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "comqtt.fullname" . }}-test-connection + labels: + {{- include "comqtt.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + restartPolicy: Never + containers: + - name: mosquitto + image: "{{ .Values.tests.image.repository }}:{{ .Values.tests.image.tag }}" + command: + - /bin/sh + - -c + - | + set -eu + HOST="{{ include "comqtt.fullname" . }}" + PORT="{{ .Values.service.mqtt.port }}" + TOPIC="helm/test/{{ .Release.Name }}" + MSG="connectivity-check-$(date +%s)" + + echo "[test] subscribing on $HOST:$PORT $TOPIC" + mosquitto_sub -h "$HOST" -p "$PORT" -t "$TOPIC" -C 1 -W 15 > /tmp/recv & + SUB=$! + sleep 2 + echo "[test] publishing" + mosquitto_pub -h "$HOST" -p "$PORT" -t "$TOPIC" -m "$MSG" + wait "$SUB" + + got="$(cat /tmp/recv)" + echo "[test] received: $got" + if [ "$got" != "$MSG" ]; then + echo "[test] mismatch — expected '$MSG'" + exit 1 + fi + echo "[test] OK" +{{- end }} diff --git a/deploy/helm/comqtt/values.schema.json b/deploy/helm/comqtt/values.schema.json new file mode 100644 index 0000000..dcef006 --- /dev/null +++ b/deploy/helm/comqtt/values.schema.json @@ -0,0 +1,176 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "comqtt Helm chart values", + "type": "object", + "required": ["mode", "image"], + "properties": { + "mode": { + "type": "string", + "enum": ["single", "cluster"], + "description": "Deployment mode." + }, + "replicaCount": { + "type": "integer", + "minimum": 1, + "description": "Replica count. In cluster mode must be odd (Raft quorum)." + }, + "image": { + "type": "object", + "required": ["repository"], + "properties": { + "repository": { "type": "string", "minLength": 1 }, + "tag": { "type": "string" }, + "pullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"] + }, + "pullSecrets": { "type": "array", "items": { "type": "object" } } + } + }, + "nameOverride": { "type": "string" }, + "fullnameOverride": { "type": "string" }, + "serviceAccount": { + "type": "object", + "properties": { + "create": { "type": "boolean" }, + "annotations": { "type": "object" }, + "name": { "type": "string" } + } + }, + "podAnnotations": { "type": "object" }, + "podLabels": { "type": "object" }, + "podSecurityContext": { "type": "object" }, + "securityContext": { "type": "object" }, + "resources": { "type": "object" }, + "nodeSelector": { "type": "object" }, + "tolerations": { "type": "array" }, + "affinity": { "type": "object" }, + "priorityClassName": { "type": "string" }, + "terminationGracePeriodSeconds": { "type": "integer", "minimum": 0 }, + "livenessProbe": { "type": "object" }, + "readinessProbe": { "type": "object" }, + "startupProbe": { "type": "object" }, + "service": { + "type": "object", + "properties": { + "mqtt": { "$ref": "#/$defs/portService" }, + "ws": { "$ref": "#/$defs/portService" }, + "dashboard": { "$ref": "#/$defs/portService" } + } + }, + "ingress": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "className": { "type": "string" }, + "annotations": { "type": "object" }, + "hosts": { "type": "array" }, + "tls": { "type": "array" } + } + }, + "tls": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "existingSecret": { "type": "string" }, + "certManager": { "type": "object" } + } + }, + "config": { "type": "object" }, + "cluster": { + "type": "object", + "properties": { + "discoveryWay": { "type": "integer", "enum": [0, 1] }, + "gossipPort": { "type": "integer", "minimum": 1, "maximum": 65535 }, + "raftPort": { "type": "integer", "minimum": 1, "maximum": 65535 }, + "grpcPort": { "type": "integer", "minimum": 1, "maximum": 65535 }, + "grpcEnable": { "type": "boolean" }, + "raftImpl": { "type": "integer", "enum": [0, 1] }, + "queueDepth": { "type": "integer", "minimum": 0 }, + "inboundPoolSize": { "type": "integer", "minimum": 0 }, + "outboundPoolSize": { "type": "integer", "minimum": 0 }, + "inoutPoolNonblocking": { "type": "boolean" }, + "hardAntiAffinity": { "type": "boolean" }, + "podDisruptionBudget": { + "type": "object", + "properties": { "enabled": { "type": "boolean" } } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "storageClass": { "type": "string" }, + "accessModes": { + "type": "array", + "items": { + "type": "string", + "enum": ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany", "ReadWriteOncePod"] + } + }, + "size": { "type": "string" }, + "annotations": { "type": "object" } + } + }, + "secret": { + "type": "object", + "properties": { + "create": { "type": "boolean" }, + "existingSecret": { "type": "string" }, + "data": { "type": "object" } + } + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "namespace": { "type": "string" }, + "interval": { "type": "string" }, + "scrapeTimeout": { "type": "string" }, + "labels": { "type": "object" } + } + }, + "tests": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "image": { "type": "object" } + } + } + }, + "allOf": [ + { + "if": { "properties": { "mode": { "const": "cluster" } } }, + "then": { + "properties": { + "replicaCount": { + "type": "integer", + "minimum": 1, + "not": { "multipleOf": 2 }, + "description": "Cluster mode requires an odd replica count for Raft quorum." + }, + "persistence": { + "type": "object", + "properties": { "enabled": { "const": true } }, + "required": ["enabled"] + } + } + } + } + ], + "$defs": { + "portService": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"] + }, + "port": { "type": "integer", "minimum": 1, "maximum": 65535 }, + "nodePort": { "type": ["integer", "string"] }, + "annotations": { "type": "object" } + } + } + } +} diff --git a/deploy/helm/comqtt/values.yaml b/deploy/helm/comqtt/values.yaml new file mode 100644 index 0000000..033b9ac --- /dev/null +++ b/deploy/helm/comqtt/values.yaml @@ -0,0 +1,275 @@ +# -- Deployment mode. `single` deploys cmd/single via Deployment; `cluster` +# deploys cmd/cluster via StatefulSet with Raft + Gossip. +mode: single + +# -- Replica count. Honored only in cluster mode. Must be odd for Raft quorum +# (1, 3, 5, 7). The schema enforces this when mode=cluster. +replicaCount: 3 + +image: + # -- Container image repository. + repository: ghcr.io/wind-c/comqtt + # -- Image tag. Tracks Chart.appVersion when empty. Never use `latest`. + tag: "" + pullPolicy: IfNotPresent + # -- Image pull secrets. List of `{name: secret-name}`. + pullSecrets: [] + +# -- Override the chart name. +nameOverride: "" +# -- Override the full release name. +fullnameOverride: "" + +serviceAccount: + # -- Create a dedicated ServiceAccount. + create: true + # -- Annotations for the ServiceAccount. + annotations: {} + # -- ServiceAccount name. Auto-generated when empty. + name: "" + +# -- Pod-level annotations. +podAnnotations: {} + +# -- Pod-level labels. +podLabels: {} + +# -- Pod security context. +podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + +# -- Container security context. +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# -- Resource requests. `limits` is unset by default to avoid OOM-killing +# bursty MQTT workloads; configure them explicitly when needed. +resources: + requests: + cpu: 100m + memory: 128Mi + limits: {} + +# -- nodeSelector for pod assignment. +nodeSelector: {} + +# -- Tolerations. +tolerations: [] + +# -- Affinity rules. The chart provides sensible defaults for cluster mode in +# `clusterAffinity`; this block is for additional rules. +affinity: {} + +# -- PriorityClass name for the pods. +priorityClassName: "" + +# -- Termination grace period seconds. +terminationGracePeriodSeconds: 30 + +# Liveness/readiness/startup probes target the dashboard HTTP port. +livenessProbe: + enabled: true + tcpSocket: + port: mqtt + initialDelaySeconds: 15 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + +readinessProbe: + enabled: true + tcpSocket: + port: mqtt + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + successThreshold: 1 + +startupProbe: + enabled: true + tcpSocket: + port: mqtt + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 30 + successThreshold: 1 + +service: + mqtt: + type: ClusterIP + port: 1883 + nodePort: "" + annotations: {} + ws: + type: ClusterIP + port: 1882 + nodePort: "" + annotations: {} + dashboard: + type: ClusterIP + port: 8080 + nodePort: "" + annotations: {} + +ingress: + # -- Enable Ingress for the dashboard. NOTE: a standard HTTP Ingress cannot + # proxy raw MQTT TCP. To expose MQTT externally, use a LoadBalancer + # Service or an Ingress controller with TCP/passthrough support. + enabled: false + className: "" + annotations: {} + hosts: + - host: comqtt.local + paths: + - path: / + pathType: Prefix + tls: [] + +tls: + # -- Enable MQTT TLS. When enabled either provide `existingSecret` or wire + # up cert-manager via `certManager.issuerRef`. + enabled: false + # -- Pre-existing Secret containing tls.crt, tls.key, optionally ca.crt. + existingSecret: "" + certManager: + enabled: false + issuerRef: + name: "" + kind: ClusterIssuer + +# -- Configuration mirroring cmd/config/single.yml and cmd/config/node1.yml. +# These keys render verbatim into a ConfigMap mounted at /etc/comqtt. +config: + # 0 memory, 1 bolt, 2 badger, 3 redis. Cluster mode requires 3 (redis). + storage-way: 1 + storage-path: /data/comqtt.db + bridge-way: 0 + bridge-path: /etc/comqtt/bridge-kafka.yml + pprof-enable: false + + auth: + # 0 anonymous, 1 username/password, 2 clientid + way: 0 + # 0 free, 1 redis, 2 mysql, 3 postgresql, 4 http + datasource: 0 + conf-path: "" + blacklist-path: "" + + mqtt: + tcp: ":1883" + ws: ":1882" + http: ":8080" + tls: + ca-cert: "" + server-cert: "" + server-key: "" + options: + client-write-buffer-size: 1024 + client-read-buffer-size: 1024 + sys-topic-resend-interval: 1 + inline-client: true + capabilities: + compatibilities: + obscure-not-authorized: false + passive-client-disconnect: false + always-return-response: false + restore-sys-info-restart: false + maximum-message-expiry-interval: 86400 + maximum-session-expiry-interval: 4294967295 + maximum-client-writes-pending: 65535 + maximum-packet-size: 0 + receive-maximum: 1024 + topic-alias-maximum: 65535 + maximum-qos: 2 + retain-available: 1 + wildcard-sub-available: 1 + sub-id-available: 1 + shared-sub-available: 1 + minimum-protocol-version: 3 + + redis: + options: + addr: "" + username: "" + password: "" + db: 0 + prefix: comqtt + + log: + enable: true + format: 1 + output: 0 + filename: /data/logs/comqtt.log + maxsize: 100 + max-age: 30 + max-backups: 10 + compress: true + level: 0 + +# Cluster-only configuration. Ignored when mode=single. +cluster: + # 0 serf, 1 memberlist + discoveryWay: 0 + gossipPort: 7946 + raftPort: 8946 + grpcPort: 17946 + grpcEnable: true + # 0 hashicorp/raft, 1 etcd/raft + raftImpl: 0 + queueDepth: 10240 + inboundPoolSize: 40960 + outboundPoolSize: 40960 + inoutPoolNonblocking: false + # Hard-anti-affinity escalates the soft default to required scheduling. + hardAntiAffinity: false + podDisruptionBudget: + enabled: true + +# Per-replica persistence. Required (and validated) in cluster mode for +# Raft log durability. Optional in single mode for bolt/badger. +persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 5Gi + annotations: {} + +# Secret rendering. Provide credential references rather than embedding +# plaintext in values.yaml. The Secret is mounted at /etc/comqtt/secrets. +secret: + # -- Create the Secret. Set false when supplying an externally-managed Secret. + create: false + # -- Use an existing Secret (overrides `data`). + existingSecret: "" + # -- Key/value pairs rendered into the Secret. Keys become file names. + data: {} + # redis-password: "..." + # mysql-password: "..." + # postgresql-password: "..." + +serviceMonitor: + # -- Create a ServiceMonitor for kube-prometheus-stack. + enabled: false + namespace: "" + interval: 30s + scrapeTimeout: 10s + labels: {} + +# -- Test pod settings (used by `helm test`). +tests: + enabled: true + image: + repository: eclipse-mosquitto + tag: "2" From 002a3112d9906bb97d800092d847b44d35164b24 Mon Sep 17 00:00:00 2001 From: Debashish Sahu Date: Mon, 4 May 2026 19:49:01 -0400 Subject: [PATCH 2/4] fix(helm): keep test pod after success so helm test --logs works The test-connection pod had hook-delete-policy "before-hook-creation, hook-succeeded", which deletes the pod immediately on success. The CI step `helm test single --logs` then errors out because the pod is gone before its logs can be fetched. Drop hook-succeeded; the pod still gets cleaned up before the next `helm test` run via before-hook-creation. --- deploy/helm/comqtt/templates/tests/test-connection.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/comqtt/templates/tests/test-connection.yaml b/deploy/helm/comqtt/templates/tests/test-connection.yaml index 358f8cc..b3125f0 100644 --- a/deploy/helm/comqtt/templates/tests/test-connection.yaml +++ b/deploy/helm/comqtt/templates/tests/test-connection.yaml @@ -7,7 +7,7 @@ metadata: {{- include "comqtt.labels" . | nindent 4 }} annotations: "helm.sh/hook": test - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-delete-policy": before-hook-creation spec: restartPolicy: Never containers: From 12362cc0aadd64a1f2ead6452a72d9eeb0354b1c Mon Sep 17 00:00:00 2001 From: Debashish Sahu Date: Mon, 4 May 2026 20:12:38 -0400 Subject: [PATCH 3/4] fix(release): create tag from workflow_dispatch input The workflow accepted a "tag" input but never used it. Without an existing v* tag, GoReleaser fell back to whatever the most recent tag was (here: chart-releaser's "comqtt-0.1.0") and failed to parse it as semver. On workflow_dispatch, create and push the requested tag before invoking GoReleaser. The push: tags: v* path is unaffected. --- .github/workflows/release.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2d6840a..a28c8f8 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -46,6 +46,16 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Create and push tag (manual dispatch only) + if: github.event_name == 'workflow_dispatch' + env: + TAG: ${{ inputs.tag }} + run: | + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git tag -a "$TAG" -m "Release $TAG" + git push origin "$TAG" + - name: Run GoReleaser uses: goreleaser/goreleaser-action@v6 with: From 6673e9cbb1b5e872a0ea34f1167a0861cd806f99 Mon Sep 17 00:00:00 2001 From: Debashish Sahu Date: Mon, 4 May 2026 20:18:35 -0400 Subject: [PATCH 4/4] fix(release): allow per-platform binary asymmetry; parameterize GHCR owner GoReleaser failed at the archive step because cmd/single builds for 5 platforms (incl. windows) while cmd/cluster builds for 4 (no windows). The single shared archive then had different binary counts per platform and GoReleaser refused to package it. Set archives.allow_different_binary_count: true so the asymmetry is intentional (windows users get just comqtt; linux/darwin get both binaries). Also parameterize the GHCR image owner via $IMAGE_OWNER (lowercased github.repository_owner) so the same config publishes to ghcr.io/wind-c/comqtt on upstream and ghcr.io/debsahu/comqtt on the fork. Drop the hardcoded release.github.owner so GoReleaser uses the running repo automatically. --- .github/workflows/release.yaml | 9 +++++++++ .goreleaser.yaml | 32 ++++++++++++++++---------------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a28c8f8..c16e834 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -56,6 +56,14 @@ jobs: git tag -a "$TAG" -m "Release $TAG" git push origin "$TAG" + # GHCR image names must be lowercase. github.repository_owner is + # already lowercase for users/orgs, but we lowercase defensively. + - name: Compute lowercase image owner + id: owner + run: echo "value=${OWNER,,}" >> "$GITHUB_OUTPUT" + env: + OWNER: ${{ github.repository_owner }} + - name: Run GoReleaser uses: goreleaser/goreleaser-action@v6 with: @@ -64,3 +72,4 @@ jobs: args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IMAGE_OWNER: ${{ steps.owner.outputs.value }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 90af021..84400d8 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -44,6 +44,9 @@ archives: ids: - comqtt - comqtt-cluster + # Windows skips cmd/cluster, so its archive has 1 binary while + # linux/darwin archives have 2. Allow that intentional asymmetry. + allow_different_binary_count: true name_template: >- {{ .ProjectName }}_{{ .Version }}_ {{- title .Os }}_ @@ -97,8 +100,8 @@ dockers: - comqtt - comqtt-cluster image_templates: - - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" - - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" dockerfile: Dockerfile.goreleaser extra_files: - cmd/config @@ -120,8 +123,8 @@ dockers: - comqtt - comqtt-cluster image_templates: - - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" - - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-arm64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" dockerfile: Dockerfile.goreleaser extra_files: - cmd/config @@ -136,23 +139,20 @@ dockers: - "--label=org.opencontainers.image.licenses=MIT" docker_manifests: - - name_template: "ghcr.io/wind-c/comqtt:{{ .Version }}" + - name_template: "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}" image_templates: - - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" - - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" - - name_template: "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-arm64" + - name_template: "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:v{{ .Major }}.{{ .Minor }}" image_templates: - - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" - - "ghcr.io/wind-c/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" - - name_template: "ghcr.io/wind-c/comqtt:latest" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:v{{ .Major }}.{{ .Minor }}-amd64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:v{{ .Major }}.{{ .Minor }}-arm64" + - name_template: "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:latest" image_templates: - - "ghcr.io/wind-c/comqtt:{{ .Version }}-amd64" - - "ghcr.io/wind-c/comqtt:{{ .Version }}-arm64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.IMAGE_OWNER }}/comqtt:{{ .Version }}-arm64" release: - github: - owner: wind-c - name: comqtt prerelease: auto draft: false name_template: "v{{ .Version }}"