diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..1e51ec78aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# Vendored upstream YAML for kubernetes-sigs/agent-sandbox (operator + CRDs). +# Refreshed via deploy/helm/sandbox-operator/vendor.sh — do not hand-edit. +# Marking as generated so GitHub collapses the diff in PRs and excludes it +# from language stats; bumps are still reviewable by reading vendor.sh's +# version arg. The sandbox-env chart's templates are first-party and stay +# reviewable. +deploy/helm/sandbox-operator/crds/** linguist-generated=true +deploy/helm/sandbox-operator/templates/agent-sandbox-manifest.yaml linguist-generated=true diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml new file mode 100644 index 0000000000..57d4dcee40 --- /dev/null +++ b/.github/workflows/helm-test.yml @@ -0,0 +1,139 @@ +name: Helm chart checks + +# PR-scoped lint + render + vendor-drift checks for the Helm charts. The +# release workflows (release-sandbox-charts.yaml, publish-chart.yml) are +# publish-only — without this gate, a helpers typo or a hand-edit to +# vendored CRDs ships straight to consumers. Runs on every PR that touches +# deploy/helm/** so reviewers see render failures inline. + +on: + pull_request: + paths: + - 'deploy/helm/**' + - '.github/workflows/helm-test.yml' + workflow_dispatch: + +jobs: + sandbox-operator: + name: sandbox-operator chart + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.16.4 + + - name: Lint + # The chart's namespace validation (templates/validations.yaml) + # requires `agent-sandbox-system`. Pass it here so render-time + # checks during lint don't see the wrong namespace. + run: helm lint deploy/helm/sandbox-operator --namespace agent-sandbox-system + + - name: Render + run: | + helm template sandbox-operator deploy/helm/sandbox-operator \ + --namespace agent-sandbox-system \ + > /dev/null + + # Catches hand-edits to crds/ or templates/agent-sandbox-manifest.yaml + # that bypass vendor.sh. The .gitattributes linguist-generated marker + # collapses those files in PR review, so without this gate a manual + # tweak would slip through unnoticed. + - name: Vendor drift check + run: | + set -euo pipefail + bash deploy/helm/sandbox-operator/vendor.sh + if ! git diff --exit-code -- \ + deploy/helm/sandbox-operator/crds \ + deploy/helm/sandbox-operator/templates/agent-sandbox-manifest.yaml; then + echo "::error::Vendored files differ from vendor.sh output. Re-run deploy/helm/sandbox-operator/vendor.sh and commit the result." + exit 1 + fi + + sandbox-env: + name: sandbox-env chart + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.16.4 + + - name: Lint + # envName is required, so lint must pass it. Use a representative + # value rather than `Release.Name` because helm lint doesn't drive + # render with a release name. + run: | + helm lint deploy/helm/sandbox-env \ + --namespace agent-sandbox-system \ + --set envName=ci + + - name: Render (default values) + run: | + helm template sandbox-env deploy/helm/sandbox-env \ + --namespace agent-sandbox-system \ + --set envName=ci \ + > /dev/null + + - name: Render (preview gateway enabled) + run: | + helm template sandbox-env deploy/helm/sandbox-env \ + --namespace agent-sandbox-system \ + --set envName=ci \ + --set previewGateway.enabled=true \ + --set previewGateway.domain=preview.example.com \ + --set previewGateway.clusterIssuer=letsencrypt-prod \ + --api-versions gateway.networking.k8s.io/v1 \ + --api-versions cert-manager.io/v1 \ + > /dev/null + + - name: Render (warm pool enabled) + run: | + helm template sandbox-env deploy/helm/sandbox-env \ + --namespace agent-sandbox-system \ + --set envName=ci \ + --set warmPool.enabled=true \ + --set warmPool.size=2 \ + > /dev/null + + - name: Render (envName missing must fail) + run: | + set +e + helm template sandbox-env deploy/helm/sandbox-env \ + --namespace agent-sandbox-system \ + > /tmp/render.out 2>&1 + rc=$? + set -e + if [ "${rc}" -eq 0 ]; then + echo "::error::sandbox-env rendered without envName — required-value check is missing." + cat /tmp/render.out + exit 1 + fi + if ! grep -q "envName is required" /tmp/render.out; then + echo "::error::sandbox-env failed for the wrong reason — expected envName-required error." + cat /tmp/render.out + exit 1 + fi + + studio: + name: studio chart + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.16.4 + + - name: Lint + run: helm lint deploy/helm/studio + + # Subchart .tgz files are vendored under deploy/helm/studio/charts/ + # (see .gitignore comment) — no `helm dependency build` needed. + - name: Render + run: helm template deco-studio deploy/helm/studio > /dev/null diff --git a/.github/workflows/publish-chart.yml b/.github/workflows/publish-chart.yml index 4d3014c096..94f4bf5c2f 100644 --- a/.github/workflows/publish-chart.yml +++ b/.github/workflows/publish-chart.yml @@ -5,7 +5,7 @@ on: branches: - main paths: - - 'deploy/helm/Chart.yaml' + - 'deploy/helm/studio/Chart.yaml' jobs: publish: @@ -22,13 +22,13 @@ jobs: - name: Get chart version id: chart - run: echo "version=$(grep '^version:' deploy/helm/Chart.yaml | awk '{print $2}')" >> $GITHUB_OUTPUT + run: echo "version=$(grep '^version:' deploy/helm/studio/Chart.yaml | awk '{print $2}')" >> $GITHUB_OUTPUT - name: Login to GHCR run: helm registry login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }} - name: Package chart - run: helm package deploy/helm/ --dependency-update + run: helm package deploy/helm/studio/ --dependency-update - name: Push chart to GHCR run: helm push chart-deco-studio-${{ steps.chart.outputs.version }}.tgz oci://ghcr.io/decocms diff --git a/.github/workflows/release-sandbox-charts.yaml b/.github/workflows/release-sandbox-charts.yaml new file mode 100644 index 0000000000..dacd323f48 --- /dev/null +++ b/.github/workflows/release-sandbox-charts.yaml @@ -0,0 +1,165 @@ +name: Release sandbox Helm charts + +on: + push: + branches: [main] + paths: + - "deploy/helm/sandbox-operator/**" + - "deploy/helm/sandbox-env/**" + workflow_dispatch: + +env: + REGISTRY: ghcr.io + # OCI charts in ghcr.io live at //:. Helm + # treats the parent path as the "repo" (here ghcr.io/decocms/studio/charts) + # and the chart name+version as ref. Argo CD's helm source supports this + # natively via repoURL=// + chart=. + OCI_REPO: oci://ghcr.io/${{ github.repository }}/charts + +jobs: + release: + name: Package & push ${{ matrix.chart }} + runs-on: ubuntu-latest + # Run sandbox-operator and sandbox-env in parallel — independent OCI + # tags, no shared mutable state. + strategy: + fail-fast: false + matrix: + chart: [sandbox-operator, sandbox-env] + # `[release]:` commits come from the auto-bump bot and don't actually + # change chart contents — skip them so we don't republish the same + # version. + if: ${{ !startsWith(github.event.head_commit.message, '[release]:') }} + permissions: + contents: read + packages: write + # OIDC token for keyless cosign signing (Sigstore Fulcio). + id-token: write + # Required to attach SLSA provenance attestations. + attestations: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.16.4 + + - name: Read chart version + id: version + env: + CHART_PATH: deploy/helm/${{ matrix.chart }} + run: | + set -euo pipefail + VERSION=$(awk '/^version:/{print $2; exit}' "${CHART_PATH}/Chart.yaml") + NAME=$(awk '/^name:/{print $2; exit}' "${CHART_PATH}/Chart.yaml") + echo "version=${VERSION}" >> "${GITHUB_OUTPUT}" + echo "name=${NAME}" >> "${GITHUB_OUTPUT}" + echo "path=${CHART_PATH}" >> "${GITHUB_OUTPUT}" + echo "::notice::Chart ${NAME} version ${VERSION}" + + - name: Log in to GitHub Container Registry + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | \ + helm registry login "${REGISTRY}" \ + --username "${{ github.actor }}" \ + --password-stdin + + # Skip republishing if the version tag already exists. Without this + # guard, every push to main that touches the chart silently mutates + # the chart under a tag consumers may already be pinning. + - name: Check if chart version already exists + id: tag-check + run: | + set -euo pipefail + # `helm show chart` against an OCI ref hits the registry; success + # means the tag is published. + if helm show chart "${OCI_REPO}/${{ steps.version.outputs.name }}" \ + --version "${{ steps.version.outputs.version }}" \ + >/dev/null 2>&1; then + echo "exists=true" >> "${GITHUB_OUTPUT}" + echo "::notice::Chart ${{ steps.version.outputs.name }}-${{ steps.version.outputs.version }} already published — skipping. Bump ${{ steps.version.outputs.path }}/Chart.yaml version to publish." + else + echo "exists=false" >> "${GITHUB_OUTPUT}" + fi + + - name: Package chart + if: steps.tag-check.outputs.exists != 'true' + id: package + run: | + set -euo pipefail + helm package "${{ steps.version.outputs.path }}" --destination dist + PACKAGE="dist/${{ steps.version.outputs.name }}-${{ steps.version.outputs.version }}.tgz" + echo "package=${PACKAGE}" >> "${GITHUB_OUTPUT}" + + - name: Push chart to OCI registry + if: steps.tag-check.outputs.exists != 'true' + id: push + run: | + set -euo pipefail + PUSH_OUT=$(helm push "${{ steps.package.outputs.package }}" "${OCI_REPO}" 2>&1) + echo "${PUSH_OUT}" + DIGEST=$(echo "${PUSH_OUT}" | awk '/Digest:/{print $2}') + if [ -z "${DIGEST}" ]; then + echo "::error::Could not extract digest from helm push output" + exit 1 + fi + echo "digest=${DIGEST}" >> "${GITHUB_OUTPUT}" + + # Keyless cosign signing for parity with the image release workflow. + # Verifies provenance via Sigstore's public transparency log without + # long-lived keys. Verify downstream with: + # cosign verify ghcr.io/decocms/studio/charts/: \ + # --certificate-identity-regexp 'https://github.com/decocms/studio/.*' \ + # --certificate-oidc-issuer https://token.actions.githubusercontent.com + - name: Install cosign + if: steps.tag-check.outputs.exists != 'true' + uses: sigstore/cosign-installer@v3 + + - name: Sign chart with cosign + if: steps.tag-check.outputs.exists != 'true' + env: + DIGEST: ${{ steps.push.outputs.digest }} + NAME: ${{ steps.version.outputs.name }} + VERSION: ${{ steps.version.outputs.version }} + run: | + set -euo pipefail + REF="${{ env.REGISTRY }}/${{ github.repository }}/charts/${NAME}@${DIGEST}" + cosign sign --yes "${REF}" + + - name: Generate SLSA build provenance + if: steps.tag-check.outputs.exists != 'true' + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.REGISTRY }}/${{ github.repository }}/charts/${{ steps.version.outputs.name }} + subject-digest: ${{ steps.push.outputs.digest }} + push-to-registry: true + + - name: Release summary + if: steps.tag-check.outputs.exists != 'true' + run: | + { + echo "## Helm chart published" + echo "" + echo "**${{ steps.version.outputs.name }} ${{ steps.version.outputs.version }}**" + echo "" + echo "### Install" + echo '```bash' + if [ "${{ matrix.chart }}" = "sandbox-operator" ]; then + echo "helm install sandbox-operator ${OCI_REPO}/${{ steps.version.outputs.name }} \\" + echo " --version ${{ steps.version.outputs.version }} \\" + echo " --namespace agent-sandbox-system --create-namespace" + else + echo "helm install sandbox-env- ${OCI_REPO}/${{ steps.version.outputs.name }} \\" + echo " --version ${{ steps.version.outputs.version }} \\" + echo " --namespace agent-sandbox-system \\" + echo " --set envName= \\" + echo " --set mesh.namespace= \\" + echo " --set mesh.serviceAccountName= \\" + echo " --set mesh.serviceName= \\" + echo " --set mesh.servicePort=80" + fi + echo '```' + } >> "${GITHUB_STEP_SUMMARY}" diff --git a/.github/workflows/release-studio-sandbox.yaml b/.github/workflows/release-studio-sandbox.yaml new file mode 100644 index 0000000000..9591c6a53d --- /dev/null +++ b/.github/workflows/release-studio-sandbox.yaml @@ -0,0 +1,144 @@ +name: Release Studio Sandbox Image + +on: + push: + branches: [main] + paths: + - "packages/sandbox/**" + workflow_dispatch: + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/studio-sandbox + +jobs: + build-push: + name: Build & Push studio-sandbox image + runs-on: ubuntu-latest + # `[release]:` commits come from the auto-bump bot and don't actually + # change the sandbox source — skip them to avoid republishing the same + # version on every release tag bump. + if: ${{ !startsWith(github.event.head_commit.message, '[release]:') }} + permissions: + contents: read + packages: write + # OIDC token for keyless cosign signing (Sigstore Fulcio). + id-token: write + # Required to attach SLSA provenance / SBOM attestations. + attestations: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: "1.3.5" + + - name: Install dependencies + run: bun install + + # The Dockerfile copies daemon/dist/daemon.js into the image, so the + # bundle has to exist before `docker build` runs. + - name: Build daemon bundle + run: bun run --cwd=packages/sandbox build + + - name: Read sandbox version + id: version + run: | + VERSION=$(bun -e "console.log(require('./packages/sandbox/package.json').version)") + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Skip republishing if the version tag already exists in GHCR. Without + # this guard, every push to main that touches packages/sandbox/** + # (including release-bump commits that get filtered above, but also + # docs-only changes inside the package) silently mutates the image + # under a tag consumers may already be pinning. + - name: Check if version tag already exists + id: tag-check + run: | + set -euo pipefail + TOKEN=$(echo -n "${{ secrets.GITHUB_TOKEN }}" | base64) + STATUS=$(curl -sS -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer ${TOKEN}" \ + "https://${{ env.REGISTRY }}/v2/${{ env.IMAGE_NAME }}/manifests/${{ steps.version.outputs.version }}") + if [ "${STATUS}" = "200" ]; then + echo "exists=true" >> $GITHUB_OUTPUT + echo "::notice::Image ${{ env.IMAGE_NAME }}:${{ steps.version.outputs.version }} already exists — skipping push. Bump packages/sandbox/package.json to publish." + else + echo "exists=false" >> $GITHUB_OUTPUT + fi + + - name: Set up QEMU + if: steps.tag-check.outputs.exists != 'true' + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + if: steps.tag-check.outputs.exists != 'true' + uses: docker/setup-buildx-action@v3 + + # `:latest` is intentionally only emitted on manual workflow_dispatch + # runs. Branch pushes get the version tag + sha tag, never `:latest`, + # so chart consumers who pin against `:latest` at least know it was a + # deliberate human action that moved it. + - name: Extract metadata (tags, labels) + id: meta + if: steps.tag-check.outputs.exists != 'true' + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=${{ steps.version.outputs.version }} + type=raw,value=latest,enable=${{ github.event_name == 'workflow_dispatch' }} + type=sha,format=short + + - name: Build and push Docker image + id: build + if: steps.tag-check.outputs.exists != 'true' + uses: docker/build-push-action@v5 + with: + context: ./packages/sandbox + file: ./packages/sandbox/image/Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64,linux/arm64 + + # Keyless cosign signing — verifies provenance via Sigstore's public + # transparency log without requiring long-lived signing keys. Verify + # downstream with: + # cosign verify ghcr.io/decocms/studio/studio-sandbox: \ + # --certificate-identity-regexp 'https://github.com/decocms/studio/.*' \ + # --certificate-oidc-issuer https://token.actions.githubusercontent.com + - name: Install cosign + if: steps.tag-check.outputs.exists != 'true' + uses: sigstore/cosign-installer@v3 + + - name: Sign image with cosign + if: steps.tag-check.outputs.exists != 'true' + env: + DIGEST: ${{ steps.build.outputs.digest }} + TAGS: ${{ steps.meta.outputs.tags }} + run: | + set -euo pipefail + for tag in $(echo "${TAGS}" | tr ',' '\n'); do + cosign sign --yes "${tag}@${DIGEST}" + done + + - name: Generate SLSA build provenance + if: steps.tag-check.outputs.exists != 'true' + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + subject-digest: ${{ steps.build.outputs.digest }} + push-to-registry: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 78825be2bf..7ac732cbd4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -159,7 +159,7 @@ jobs: - name: Build sandbox image run: | docker build \ - -t mesh-sandbox:ci \ + -t studio-sandbox:ci \ -f packages/sandbox/image/Dockerfile \ packages/sandbox @@ -171,7 +171,7 @@ jobs: -e APP_ROOT=/app \ -e PROXY_PORT=9000 \ -e DAEMON_NO_AUTOSTART=1 \ - mesh-sandbox:ci + studio-sandbox:ci for i in $(seq 1 30); do if curl -fsS http://localhost:19999/health | grep -q '"bootId":"ci-smoke"'; then echo "ok" diff --git a/.gitignore b/.gitignore index 72f3086936..9bf21d4f44 100644 --- a/.gitignore +++ b/.gitignore @@ -82,3 +82,17 @@ apps/mesh/playwright-report/ # Local dev data directory .deco + +# `helm package deploy/helm/sandbox-operator` (and …/sandbox-env) produces a +# .tgz that is published to ghcr.io as an OCI artifact by +# .github/workflows/release-sandbox-charts.yaml. The unpacked trees under +# deploy/helm/sandbox-{operator,env}/ are the source of truth; the packaged +# .tgz is redundant in-repo and would just drift. +deploy/helm/sandbox-operator-*.tgz +deploy/helm/sandbox-operator/charts/ +deploy/helm/sandbox-env-*.tgz +deploy/helm/sandbox-env/charts/ +# `helm dependency update` for the studio chart caches remote subcharts +# (nats, opentelemetry-collector) as .tgz under charts/. They stay tracked +# because the chart pins specific versions and offline installs depend on +# them being present. diff --git a/apps/mesh/src/cli/build-child-env.ts b/apps/mesh/src/cli/build-child-env.ts index af1deaffb7..eb997bbb05 100644 --- a/apps/mesh/src/cli/build-child-env.ts +++ b/apps/mesh/src/cli/build-child-env.ts @@ -78,6 +78,10 @@ export function buildChildEnv( // Sandbox runner: read from env by resolveRunnerKindFromEnv() in workers STUDIO_SANDBOX_RUNNER: process.env.STUDIO_SANDBOX_RUNNER, + // Per-env SandboxTemplate name override (sandbox-env Helm chart suffixes + // it with envName). Workers must inherit so claim creation hits the + // right template. + STUDIO_SANDBOX_TEMPLATE_NAME: process.env.STUDIO_SANDBOX_TEMPLATE_NAME, FREESTYLE_API_KEY: process.env.FREESTYLE_API_KEY, // Browserless diff --git a/apps/mesh/src/cli/sandbox-image.ts b/apps/mesh/src/cli/sandbox-image.ts index 59850b32ac..c3ba0de439 100644 --- a/apps/mesh/src/cli/sandbox-image.ts +++ b/apps/mesh/src/cli/sandbox-image.ts @@ -7,13 +7,13 @@ import { addLogEntry } from "./cli-store"; * awaits the same singleton, so any failure surfaces there with context. * * Skipped in production (image is expected to be registry-hosted) and when - * `MESH_SANDBOX_IMAGE` points elsewhere (user opted into a registry image). + * `STUDIO_SANDBOX_IMAGE` points elsewhere (user opted into a registry image). */ export async function kickoffSandboxImageBuild(opts: { noTui: boolean; }): Promise { if (process.env.NODE_ENV === "production") return; - if (process.env.MESH_SANDBOX_IMAGE) return; + if (process.env.STUDIO_SANDBOX_IMAGE) return; const { tryResolveRunnerKindFromEnv, ensureSandboxImage } = await import( "@decocms/sandbox/runner" diff --git a/apps/mesh/src/sandbox/lifecycle.ts b/apps/mesh/src/sandbox/lifecycle.ts index 24068b7dc1..df984e57e7 100644 --- a/apps/mesh/src/sandbox/lifecycle.ts +++ b/apps/mesh/src/sandbox/lifecycle.ts @@ -56,6 +56,17 @@ function readPreviewUrlPattern(): string | undefined { return raw && raw.trim() !== "" ? raw : undefined; } +// Per-env SandboxTemplate name. The sandbox-env Helm chart suffixes the +// template name with envName so multiple envs share `agent-sandbox-system` +// without collisions; mesh in this env must point its claims at the +// matching suffixed name. Empty/unset → AgentSandboxRunner's built-in +// default ("studio-sandbox") so single-env installs that didn't suffix +// keep working. +function readSandboxTemplateName(): string | undefined { + const raw = process.env.STUDIO_SANDBOX_TEMPLATE_NAME; + return raw && raw.trim() !== "" ? raw : undefined; +} + async function instantiate( kind: RunnerKind, db: Kysely, @@ -86,6 +97,7 @@ async function instantiate( return new AgentSandboxRunner({ stateStore, previewUrlPattern, + sandboxTemplateName: readSandboxTemplateName(), meter, }); } diff --git a/deploy/helm/sandbox-env/.helmignore b/deploy/helm/sandbox-env/.helmignore new file mode 100644 index 0000000000..ac5f866cec --- /dev/null +++ b/deploy/helm/sandbox-env/.helmignore @@ -0,0 +1,18 @@ +# Helm conventional ignores +.DS_Store +.git/ +.gitignore +.bzr/ +.hg/ +.svn/ +*.tmproj +.vscode/ +.idea/ +*.swp +*.bak +*.tmp +*.orig +*~ + +# Examples folder is for documentation in-tree only +examples/ diff --git a/deploy/helm/sandbox-env/Chart.yaml b/deploy/helm/sandbox-env/Chart.yaml new file mode 100644 index 0000000000..36f2bb1f46 --- /dev/null +++ b/deploy/helm/sandbox-env/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: sandbox-env +description: | + Studio-side resources that consume the agent-sandbox operator: the + shared SandboxTemplate, mesh runner RBAC, sandbox-pod NetworkPolicy, + optional SandboxWarmPool, and optional preview Gateway / HTTPRoute / + Certificate. Install one release per environment (dev / staging / + prod / ...). Resource names are suffixed with `envName` so multiple + releases coexist in the shared `agent-sandbox-system` namespace. + Requires the sandbox-operator chart to already be installed. +type: application +version: 0.1.0 +# appVersion tracks the studio-sandbox image version (image.tag default). +appVersion: "0.1.0" +kubeVersion: ">=1.30.0-0" diff --git a/deploy/helm/sandbox-env/README.md b/deploy/helm/sandbox-env/README.md new file mode 100644 index 0000000000..d7d030f1b3 --- /dev/null +++ b/deploy/helm/sandbox-env/README.md @@ -0,0 +1,208 @@ +# sandbox-env Helm chart + +Studio-side resources that consume the agent-sandbox operator. Install one +release per environment (dev / staging / prod / ...) — every resource name +is suffixed with `envName` so multiple releases coexist in the shared +`agent-sandbox-system` namespace without collisions. + +Renders: + +- `SandboxTemplate` `studio-sandbox-` +- `Role` + `RoleBinding` `studio-sandbox-runner-` (for the mesh + ServiceAccount of THIS env's studio install) +- `NetworkPolicy` `studio-sandbox-` (per-env podSelector) +- `SandboxWarmPool` `studio-sandbox-` (optional) +- `Gateway` + `HTTPRoute` + `Certificate` + `agent-sandbox-preview-` (optional) + +Requires the [`sandbox-operator`](../sandbox-operator/) chart to already be +installed (it ships the CRDs + controller). + +## Prerequisites + +- `sandbox-operator` chart installed in `agent-sandbox-system`. +- Kubernetes 1.30+ (for `spec.hostUsers: false` user namespace remap). +- The studio release for THIS environment must point its mesh runner at + the env-suffixed SandboxTemplate by setting + `STUDIO_SANDBOX_TEMPLATE_NAME=studio-sandbox-` in the studio + chart's `configMap.meshConfig`. Without that override the runner falls + back to `studio-sandbox` (no suffix) and claim creation fails with + `sandboxtemplate not found`. + +## Preview gateway auth model + +If you flip `previewGateway.enabled=true`, read this first. + +The Host header is the *only* authorization on `*.preview.` (no +listener-level auth, matching how Vercel preview URLs work). That means +sandbox handles travel in plaintext through every CDN / LB / proxy in the +request path and will appear in their access logs. Treat handles as +URL-grade secrets — do not share in tickets, screenshots, etc. + +For tighter isolation, terminate auth at the Gateway with an +`AuthorizationPolicy` (Istio) or extauth (Envoy) in front of this listener. +This chart does not do that for you. + +**Multi-env note:** two envs can both enable `previewGateway` only if they +use different `previewGateway.domain` values. The resource names are +envName-suffixed but the listener hostname (`*.`) must be unique +per Gateway — two Gateways binding the same wildcard hostname conflict at +the controller level. + +## Shared build cache + +Setting `cache.enabled=true` mounts a single PVC at `cache.mountPath` (default +`/mnt/cache`) in every sandbox pod and redirects each package manager's cache +directory there via env vars (`npm_config_cache`, `BUN_INSTALL_CACHE_DIR`, +`PNPM_STORE_PATH`, `YARN_CACHE_FOLDER`, `DENO_DIR`, `XDG_CACHE_HOME`). The +first sandbox that downloads a package version populates the cache; every +subsequent sandbox running the same dep skips the registry download entirely. + +**EKS prerequisite — EFS StorageClass (required for ReadWriteMany):** + +The cluster default StorageClass on EKS is EBS (gp2/gp3), which does not +support `ReadWriteMany`. Setting `cache.enabled=true` without overriding +`cache.storageClass` will cause the PVC to stay `Pending`. The chart validates +this at template time and fails with a clear error. + +1. [Install the AWS EFS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html) +2. Create an EFS filesystem (one per cluster is enough) +3. Create a `StorageClass` pointing at it, using `throughputMode: elastic` for + 30+ concurrent builds: + +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: efs-sc +provisioner: efs.csi.aws.com +parameters: + provisioningMode: efs-ap + fileSystemId: + directoryPerms: "700" +mountOptions: + - tls +``` + +4. Set `cache.storageClass: efs-sc` in your values. + +**Kind (local dev):** use `examples/values-kind.yaml`, which overrides to +`accessMode: ReadWriteOnce` + `storageClass: standard` — both supported by +`local-path-provisioner` on a single-node cluster. + +**PVC lifecycle:** the PVC carries `helm.sh/resource-policy: keep` so it +survives `helm upgrade` and `helm uninstall`. Delete it manually to evict the +cache: + +```bash +kubectl delete pvc studio-sandbox--cache -n agent-sandbox-system +``` + +## Install + +Published as an OCI artifact at +`oci://ghcr.io/decocms/studio/charts/sandbox-env` by +`.github/workflows/release-sandbox-charts.yaml`. + +```bash +helm install sandbox-env-staging \ + oci://ghcr.io/decocms/studio/charts/sandbox-env \ + --version 0.1.0 \ + --namespace agent-sandbox-system \ + --set envName=staging \ + --set mesh.namespace=deco-studio-staging \ + --set mesh.serviceAccountName=deco-studio-staging \ + --set mesh.serviceName=deco-studio-staging \ + --set mesh.servicePort=80 +``` + +Then point the studio (chart-deco-studio) release for the same env at +this runner: + +```yaml +# in your studio values.yaml (for the staging install) +configMap: + meshConfig: + STUDIO_SANDBOX_RUNNER: "agent-sandbox" + STUDIO_SANDBOX_TEMPLATE_NAME: "studio-sandbox-staging" + STUDIO_SANDBOX_PREVIEW_URL_PATTERN: "https://{handle}.preview.staging.example.com" +``` + +### ArgoCD Application (one per env) + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: sandbox-env-staging + namespace: argocd +spec: + project: default + source: + repoURL: ghcr.io/decocms/studio/charts + chart: sandbox-env + targetRevision: 0.1.0 + helm: + values: | + envName: staging + mesh: + namespace: deco-studio-staging + serviceAccountName: deco-studio-staging + serviceName: deco-studio-staging + servicePort: 80 + destination: + server: https://kubernetes.default.svc + namespace: agent-sandbox-system + syncPolicy: + syncOptions: + - ServerSideApply=true +``` + +Repeat the `Application` per env, varying `metadata.name` and `envName`. + +## Layout + +``` +sandbox-env/ +├── Chart.yaml +├── values.yaml # tunables + envName + mesh.* cross-refs +├── examples/ +│ └── values-kind.yaml # local dev overrides +└── templates/ + ├── _helpers.tpl + ├── validations.yaml # envName + Gateway API + cert-manager preflight + ├── sandbox-template.yaml # SandboxTemplate (per-env) + ├── sandbox-warm-pool.yaml # SandboxWarmPool (optional) + ├── sandbox-network-policy.yaml # NetworkPolicy on sandbox pods (per-env) + ├── sandbox-rbac.yaml # Role + cross-ns RoleBinding to mesh SA + ├── sandbox-cache-pvc.yaml # shared build cache PVC (optional) + ├── sandbox-preview-cert.yaml # cert-manager Certificate (optional) + └── sandbox-preview-gateway.yaml # Gateway + HTTPRoute (optional) +``` + +## Values + +See `values.yaml` for the full set. The most-tuned ones: + +| Key | Default | Notes | +| --- | --- | --- | +| `envName` | _(required)_ | DNS-label suffix on every resource name | +| `image.repository` | `ghcr.io/decocms/studio/studio-sandbox` | studio-sandbox image | +| `image.tag` | chart `appVersion` | bump in lockstep with packages/sandbox/package.json | +| `resources.*` | 0.5/2 CPU, 1/4Gi RAM | per sandbox pod | +| `nodeSelector` / `tolerations` / `affinity` | `{}` | for sandbox isolation NodePool | +| `hostUsers` | `false` | userns remap; flip to `true` if kernel/containerd doesn't support userns | +| `readOnlyRootFilesystem` | `true` | RO rootfs + emptyDirs on /app, /tmp, /home | +| `networkPolicy.enabled` | `true` | locks down ingress/egress | +| `warmPool.enabled` / `warmPool.size` | `false` / `0` | only after measuring cold-start pain | +| `previewGateway.enabled` | `false` | wildcard `*.preview.` Gateway + cert | +| `mesh.namespace` | `deco-studio` | studio release namespace (this env's) | +| `mesh.serviceAccountName` | `deco-studio` | mesh ServiceAccount that gets the RoleBinding | +| `mesh.serviceName` | `deco-studio` | mesh Service the preview HTTPRoute targets | +| `mesh.servicePort` | `80` | match studio's `service.port` | +| `mesh.podSelectorLabels` | `chart-deco-studio` / `deco-studio` | for the NetworkPolicy ingress rule | +| `cache.enabled` | `false` | mount shared build/dep cache PVC in every sandbox | +| `cache.storageClass` | _(required when RWX)_ | must name an RWX StorageClass (EFS on EKS); never leave empty with `accessMode=ReadWriteMany` | +| `cache.accessMode` | `ReadWriteMany` | `ReadWriteOnce` is fine on single-node kind | +| `cache.size` | `50Gi` | +| `cache.mountPath` | `/mnt/cache` | mount point inside every sandbox pod | diff --git a/deploy/helm/sandbox-env/examples/values-kind.yaml b/deploy/helm/sandbox-env/examples/values-kind.yaml new file mode 100644 index 0000000000..8d1315f97a --- /dev/null +++ b/deploy/helm/sandbox-env/examples/values-kind.yaml @@ -0,0 +1,75 @@ +# Local kind cluster overrides for the sandbox-env chart. Pair with the +# sandbox-operator chart (which installs the operator + CRDs). + +# ── env identity ─────────────────────────────────────────────────────── +# kind clusters typically only have one studio install, so a fixed env +# name is fine. +envName: "kind" + +# ── sandbox image (built locally + `kind load`ed) ────────────────────── +image: + repository: studio-sandbox + tag: local + pullPolicy: Never + +# ── modest sandbox limits for laptop kind ────────────────────────────── +# Bump back up when stress-testing. +resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1" + memory: "3Gi" + +# ── networkPolicy ────────────────────────────────────────────────────── +# kindnet enforces NetworkPolicy as of kind v0.27 (kindnetd v1.x). Leave +# the chart's policy on so studio → sandbox traffic on port 9000 is +# explicitly allow-listed; otherwise the operator-managed default (only +# `app: sandbox-router` ingress, locked off via networkPolicyManagement: +# Unmanaged in the SandboxTemplate) would re-block everything. Older +# kindnet builds (pre-0.27) ignore the policy and the rule is inert — +# safe either way. +networkPolicy: + enabled: true + +# ── pod hardening (relaxed for kind) ─────────────────────────────────── +# Plain host-users mode. userns remap requires K8s 1.30+ on a kernel that +# supports it; kind nodes can vary. Keep simple for local dev. +hostUsers: true +readOnlyRootFilesystem: false + +# ── no preview Gateway in kind ───────────────────────────────────────── +# Mesh's HTTP edge handles preview routing in-process via +# apps/mesh/src/sandbox/preview-proxy.ts: it reads the Host header, +# extracts the sandbox handle, and reverse-proxies to the in-cluster +# daemon Service. +previewGateway: + enabled: false + +# ── warm pool off in kind ────────────────────────────────────────────── +warmPool: + enabled: false + +# ── shared build cache ───────────────────────────────────────────────── +# local-path-provisioner only supports ReadWriteOnce, but that is fine on a +# single-node kind cluster — all pods land on the same node so concurrent +# mounts work. 10 Gi is enough for local dev; bump if you need more. +cache: + enabled: true + storageClass: standard + accessMode: ReadWriteOnce + size: 10Gi + +# ── mesh cross-references (point at the kind studio install) ─────────── +# studio kind install conventionally lands in `deco-studio` namespace +# with release name `deco-studio`. Override here if you `helm install` +# studio under a different name/namespace. +mesh: + namespace: "deco-studio" + serviceAccountName: "deco-studio" + serviceName: "deco-studio" + servicePort: 80 + podSelectorLabels: + app.kubernetes.io/name: "chart-deco-studio" + app.kubernetes.io/instance: "deco-studio" diff --git a/deploy/helm/sandbox-env/templates/_helpers.tpl b/deploy/helm/sandbox-env/templates/_helpers.tpl new file mode 100644 index 0000000000..a461ba0a76 --- /dev/null +++ b/deploy/helm/sandbox-env/templates/_helpers.tpl @@ -0,0 +1,153 @@ +{{/* +Chart name (overridable via nameOverride). +*/}} +{{- define "sandbox-env.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Chart-name-and-version label. +*/}} +{{- define "sandbox-env.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +envName, validated. Required so multiple releases (dev / staging / prod) +can coexist in the shared `agent-sandbox-system` namespace without name +collisions; every other helper here suffixes with this value. Constrained +to RFC 1035 DNS labels (a-z0-9-, must start with a letter) so that the +suffixed resource names remain valid in every K8s context — Service / +Role / NetworkPolicy / Gateway names all share that constraint. +*/}} +{{- define "sandbox-env.envName" -}} +{{- $env := required "envName is required (e.g. envName=staging). Used as suffix on every resource name so multiple releases share agent-sandbox-system without collisions." .Values.envName -}} +{{- if not (regexMatch "^[a-z]([a-z0-9-]{0,30}[a-z0-9])?$" $env) -}} +{{- fail (printf "envName=%q must be a DNS label: lowercase alphanumeric or '-', start with a letter, end alphanumeric, 1-32 chars" $env) -}} +{{- end -}} +{{- $env -}} +{{- end }} + +{{/* +Sandbox-pod template + warm-pool name. Both share the same name because +the SandboxWarmPool references the SandboxTemplate by name, and dashboards +keying off `app.kubernetes.io/name` get a single coherent label. +*/}} +{{- define "sandbox-env.sandboxName" -}} +{{- printf "studio-sandbox-%s" (include "sandbox-env.envName" .) -}} +{{- end }} + +{{/* +Mesh runner Role / RoleBinding name. Stays under 63 chars even with a +32-char envName. +*/}} +{{- define "sandbox-env.runnerRoleName" -}} +{{- printf "studio-sandbox-runner-%s" (include "sandbox-env.envName" .) -}} +{{- end }} + +{{/* +Preview Gateway / HTTPRoute / Certificate name. +*/}} +{{- define "sandbox-env.previewName" -}} +{{- printf "agent-sandbox-preview-%s" (include "sandbox-env.envName" .) -}} +{{- end }} + +{{/* +Default cert-manager Secret name for the preview wildcard cert. Mirrors +the Gateway/HTTPRoute name so the cert ↔ listener pairing is obvious. +*/}} +{{- define "sandbox-env.previewTlsSecretName" -}} +{{- default (printf "agent-sandbox-preview-%s-tls" (include "sandbox-env.envName" .)) .Values.previewGateway.tlsSecretName -}} +{{- end }} + +{{/* +Selector labels for sandbox pods. The runner stamps the same name label +onto every pod it creates via SandboxClaim.additionalPodMetadata, so the +NetworkPolicy podSelector can target it. Per-env, so two envs' netpols +don't accidentally apply to each other's pods. +*/}} +{{- define "sandbox-env.sandboxSelectorLabels" -}} +app.kubernetes.io/name: {{ include "sandbox-env.sandboxName" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels for sandbox-* resources. component=sandbox lets dashboards +split runtime sandbox pods from operator pods and traffic-edge resources. +*/}} +{{- define "sandbox-env.sandboxLabels" -}} +helm.sh/chart: {{ include "sandbox-env.chart" . }} +{{ include "sandbox-env.sandboxSelectorLabels" . }} +app.kubernetes.io/component: sandbox +studio.decocms.com/env: {{ include "sandbox-env.envName" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Common labels for the sandbox-preview Gateway/HTTPRoute/Certificate. Same +shape as sandboxLabels but with name=studio-sandbox-preview- and +component=sandbox-preview so dashboards can split traffic-edge resources +from runtime sandbox pods. +*/}} +{{- define "sandbox-env.sandboxPreviewLabels" -}} +helm.sh/chart: {{ include "sandbox-env.chart" . }} +app.kubernetes.io/name: {{ include "sandbox-env.previewName" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/component: sandbox-preview +studio.decocms.com/env: {{ include "sandbox-env.envName" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Common labels for non-sandbox resources owned by this chart (RBAC, etc.). +*/}} +{{- define "sandbox-env.labels" -}} +helm.sh/chart: {{ include "sandbox-env.chart" . }} +app.kubernetes.io/name: {{ include "sandbox-env.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +studio.decocms.com/env: {{ include "sandbox-env.envName" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Validate shared build cache configuration. When cache.enabled=true and +accessMode is ReadWriteMany, storageClass must be explicitly set — the +cluster default StorageClass on EKS is EBS (gp2/gp3), which does not +support ReadWriteMany. Without this guard the PVC provisions against the +wrong driver and stays Pending with an opaque provisioner error. +*/}} +{{- define "sandbox-env.validateCache" -}} +{{- if .Values.cache.enabled }} +{{- if and (eq .Values.cache.accessMode "ReadWriteMany") (not .Values.cache.storageClass) }} +{{- fail "sandbox-env: cache.enabled=true with accessMode=ReadWriteMany requires cache.storageClass to be set explicitly. The cluster default StorageClass on EKS is EBS (gp2/gp3), which does not support ReadWriteMany — the PVC will stay Pending forever. On EKS: install the AWS EFS CSI driver, create an EFS-backed StorageClass (e.g. efs-sc with throughputMode: elastic), and set cache.storageClass=efs-sc. On single-node kind: override to cache.accessMode=ReadWriteOnce and cache.storageClass=standard (see examples/values-kind.yaml)." -}} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Validate that Gateway API + cert-manager CRDs are present when the sandbox +preview gateway is enabled. Without this check, `helm install` would push +Gateway/HTTPRoute/Certificate to an API server that doesn't know those +kinds — the failure mode is an opaque "no matches for kind" rejection, +sometimes after partial-apply. Failing at template time keeps the release +atomic and gives a pointer to the right install command. +*/}} +{{- define "sandbox-env.validatePreviewGateway" -}} +{{- if .Values.previewGateway.enabled }} +{{- if not (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1") }} +{{- fail "sandbox-env: previewGateway.enabled=true requires the Gateway API CRDs (gateway.networking.k8s.io/v1). Install: kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.1.0/standard-install.yaml — and a Gateway controller (Istio, Envoy Gateway, Cilium, ...) implementing the chosen gatewayClassName." -}} +{{- end }} +{{- if not (.Capabilities.APIVersions.Has "cert-manager.io/v1") }} +{{- fail "sandbox-env: previewGateway.enabled=true requires cert-manager (cert-manager.io/v1). Install: helm install cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true" -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-cache-gc.yaml b/deploy/helm/sandbox-env/templates/sandbox-cache-gc.yaml new file mode 100644 index 0000000000..3b41efcbc6 --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-cache-gc.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.cache.enabled .Values.cache.gc.enabled }} +# Cache garbage-collection CronJob. +# +# Deletes node_modules cache slots and git mirrors that haven't been used in +# cache.gc.ttlDays days. "Used" is defined by: +# - node_modules: mtime of the sentinel file (.deco_cache_ok), which the +# daemon touches on every cache hit so it reflects last-used, not +# last-created time. +# - git mirrors: mtime of HEAD, which git updates on every fetch and which +# the daemon touches after each TTL refresh. +# +# The CronJob runs inside agent-sandbox-system, mounts the same PVC as the +# sandbox pods, and removes stale entries with `find … -mtime +N -exec rm`. +# concurrencyPolicy: Forbid ensures at most one GC run at a time. +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "sandbox-env.sandboxName" . }}-cache-gc + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 4 }} +spec: + schedule: {{ .Values.cache.gc.schedule | quote }} + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + metadata: + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 12 }} + app.kubernetes.io/component: cache-gc + spec: + restartPolicy: OnFailure + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: gc + image: busybox:1.36 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/sh + - -c + - | + set -e + TTL={{ .Values.cache.gc.ttlDays }} + CACHE={{ .Values.cache.mountPath }} + + echo "=== cache-gc start (ttl=${TTL}d) ===" + + # node_modules: evict slots whose sentinel mtime > TTL days. + # Sentinel is touched on every cache hit so mtime = last-used. + NM_DIR="${CACHE}/node_modules" + if [ -d "$NM_DIR" ]; then + find "$NM_DIR" -maxdepth 2 -name ".deco_cache_ok" \ + -mtime "+${TTL}" | while IFS= read -r sentinel; do + slot="$(dirname "$sentinel")" + echo "evict node_modules: $slot" + rm -rf "$slot" "${slot}.lock" 2>/dev/null || true + done + fi + + # git mirrors: evict bare clones whose HEAD mtime > TTL days. + # HEAD is touched after every fetch (TTL refresh) so mtime = last-used. + GIT_DIR="${CACHE}/git" + if [ -d "$GIT_DIR" ]; then + find "$GIT_DIR" -maxdepth 4 -name "HEAD" \ + -mtime "+${TTL}" | while IFS= read -r head; do + mirror="$(dirname "$head")" + echo "evict git mirror: $mirror" + rm -rf "$mirror" "${mirror}.lock" 2>/dev/null || true + done + fi + + echo "=== cache-gc done ===" + volumeMounts: + - name: build-cache + mountPath: {{ .Values.cache.mountPath }} + resources: + requests: + cpu: 10m + memory: 32Mi + limits: + cpu: 200m + memory: 128Mi + volumes: + - name: build-cache + persistentVolumeClaim: + claimName: {{ include "sandbox-env.sandboxName" . }}-cache +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-cache-pvc.yaml b/deploy/helm/sandbox-env/templates/sandbox-cache-pvc.yaml new file mode 100644 index 0000000000..04a5f7846a --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-cache-pvc.yaml @@ -0,0 +1,33 @@ +{{- if .Values.cache.enabled -}} +# Shared build and dependency cache PVC. Mounted at {{ .Values.cache.mountPath }} +# in every sandbox pod. Package managers are redirected here via env vars in +# the SandboxTemplate so tarballs downloaded by one sandbox are reused by all. +# +# accessMode: ReadWriteMany is required for concurrent pod mounts. +# On EKS, pair with an EFS-backed StorageClass (aws-efs-csi-driver). +# On single-node kind, ReadWriteOnce is sufficient — override in values-kind.yaml. +# +# helm.sh/resource-policy: keep prevents Helm from deleting this PVC on +# `helm upgrade` or `helm uninstall`. The cache is expensive to repopulate +# (full cold bun/npm/pnpm pull for every sandboxed project); losing it on a +# routine chart operation would be painful. Delete the PVC manually when you +# explicitly want to evict the cache. +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "sandbox-env.sandboxName" . }}-cache + namespace: agent-sandbox-system + annotations: + helm.sh/resource-policy: keep + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.cache.accessMode }} + {{- if .Values.cache.storageClass }} + storageClassName: {{ .Values.cache.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.cache.size }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-network-policy.yaml b/deploy/helm/sandbox-env/templates/sandbox-network-policy.yaml new file mode 100644 index 0000000000..d081b593dd --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-network-policy.yaml @@ -0,0 +1,130 @@ +{{- if .Values.networkPolicy.enabled }} +# NetworkPolicy for mesh sandbox pods in THIS environment. +# +# Scope: selects pods in agent-sandbox-system labeled +# app.kubernetes.io/name=studio-sandbox-. Applies both ingress and +# egress rules, so egress is deny-by-default (policyType Egress with only +# allowed rules). The label match is per-env so a release in env A doesn't +# accidentally allow ingress from env B's mesh into env A's pods. +# +# Threat model: workload is arbitrary user code. Egress must not reach IMDS +# (169.254.169.254 / fd00:ec2::254), in-cluster RFC1918 services, or +# link-local addresses. Combine with EKS IMDSv2 hop-limit=1 at the node +# level — this policy alone is not sufficient on clouds where IMDS is +# reachable via hop. +# +# OTel side note: the sandbox image does not currently emit OTLP traffic. +# If a future daemon revision pushes telemetry out-of-pod, add an explicit +# egress rule for the in-cluster collector (or use +# `networkPolicy.extraEgress`); the default-deny stance here will +# otherwise silently drop it. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "sandbox-env.sandboxName" . }} + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "sandbox-env.sandboxName" . }} + policyTypes: + - Ingress + - Egress + ingress: + # Daemon port (9000) — mesh server pods call this to exec tools, stream + # logs, etc. Also the *preview* request path: when previewGateway is + # enabled, mesh reverse-proxies `*.preview.` traffic to the + # daemon here so the daemon's CSP/HMR rewrites apply (port 3000 would + # bypass them). Selectors point at the studio release identified via + # `mesh.namespace` + `mesh.podSelectorLabels`. + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Values.mesh.namespace }} + podSelector: + matchLabels: + {{- toYaml .Values.mesh.podSelectorLabels | nindent 14 }} + ports: + - protocol: TCP + port: 9000 + {{- with .Values.networkPolicy.previewGatewayNamespace }} + # DEPRECATED — direct ingress on dev port 3000 from a configured + # gateway namespace. Only needed for setups that route preview traffic + # *around* mesh (no daemon CSP/HMR rewrites). The standard Istio + # Gateway API path lands on port 9000 via mesh and doesn't need this. + # Slated for removal in a future chart version. + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ . }} + ports: + - protocol: TCP + port: 3000 + {{- end }} + egress: + # CoreDNS — UDP + TCP 53. + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + # Public internet on 443 (HTTPS only) with IMDS, RFC1918, and link-local + # blocked. Covers bun install, git clone over HTTPS, external APIs + # without opening the sandbox to in-cluster services. + # + # Plaintext port 80 is *not* allowed by default — bun, npm, and git over + # HTTPS don't need it, and allowing it widens the MITM-on-egress surface. + # Installs that need a package mirror over HTTP can grant it via + # `networkPolicy.extraEgress` (see values.yaml). + # + # 100.64.0.0/10 is shared address space (RFC 6598). Some carriers and + # K8s networking flavors put pod IPs there (Kops/Kubenet on AWS; some + # GKE configurations); EKS with the AWS VPC CNI uses VPC ranges + # (10.0.0.0/8) and is already covered above. Keeping the 100.64.0.0/10 + # exclusion is harmless on EKS and necessary on the others. + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 169.254.0.0/16 # link-local + IPv4 IMDSv2 + - 10.0.0.0/8 # RFC1918 + - 172.16.0.0/12 # RFC1918 + - 192.168.0.0/16 # RFC1918 + - 100.64.0.0/10 # shared address space (RFC 6598) + ports: + - protocol: TCP + port: 443 + # IPv6 egress on dual-stack clusters (EKS dual-stack, GKE, etc.). + # Without this rule, an IPv6-enabled pod can reach in-cluster + # ULA-addressed Services and the IPv6 IMDS endpoint + # (fd00:ec2::254 on AWS) bypassing the IPv4 exclusions above. + # Single-stack IPv4 clusters ignore the rule, so it is safe to leave + # in by default. + - to: + - ipBlock: + cidr: ::/0 + except: + - fe80::/10 # link-local + - fc00::/7 # ULA (in-cluster IPv6 Service ranges, ULA Pod CIDRs) + - fd00:ec2::/96 # AWS IPv6 IMDS prefix + ports: + - protocol: TCP + port: 443 + {{- with .Values.networkPolicy.extraEgress }} + # Operator-supplied egress rules. Use to grant the sandbox controlled + # access to in-cluster services it shouldn't reach by default — for + # example, an in-cluster OTel collector at + # `-opentelemetry-collector..svc:4318` if a future + # daemon revision emits OTLP. Each entry is a NetworkPolicyEgressRule. + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-preview-cert.yaml b/deploy/helm/sandbox-env/templates/sandbox-preview-cert.yaml new file mode 100644 index 0000000000..635af22afc --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-preview-cert.yaml @@ -0,0 +1,30 @@ +{{- if .Values.previewGateway.enabled }} +{{- $domain := required "previewGateway.domain is required when previewGateway.enabled=true" .Values.previewGateway.domain }} +{{- $issuer := required "previewGateway.clusterIssuer is required when previewGateway.enabled=true" .Values.previewGateway.clusterIssuer }} +{{- $gwNamespace := .Values.previewGateway.namespace }} +{{- $tlsSecretName := include "sandbox-env.previewTlsSecretName" . }} +# Wildcard cert for the sandbox preview Gateway. cert-manager places the +# Secret in the gateway namespace so the Gateway listener can mount it +# without a cross-namespace reference. +# +# DNS-01 is the only solver that can validate a wildcard SAN, so the +# referenced ClusterIssuer must be DNS-01 (e.g. Cloudflare, Route53). The +# chart does not template the ClusterIssuer itself — the API tokens +# required to provision DNS records are per-cluster infra, not chart +# config. See README.md for a Cloudflare DNS-01 ClusterIssuer template. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "sandbox-env.previewName" . }} + namespace: {{ $gwNamespace }} + labels: + {{- include "sandbox-env.sandboxPreviewLabels" . | nindent 4 }} +spec: + secretName: {{ $tlsSecretName }} + issuerRef: + kind: ClusterIssuer + name: {{ $issuer | quote }} + commonName: {{ printf "*.%s" $domain | quote }} + dnsNames: + - {{ printf "*.%s" $domain | quote }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-preview-gateway.yaml b/deploy/helm/sandbox-env/templates/sandbox-preview-gateway.yaml new file mode 100644 index 0000000000..84dfa1be89 --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-preview-gateway.yaml @@ -0,0 +1,91 @@ +{{- if .Values.previewGateway.enabled }} +{{- $domain := required "previewGateway.domain is required when previewGateway.enabled=true" .Values.previewGateway.domain }} +{{- $issuer := required "previewGateway.clusterIssuer is required when previewGateway.enabled=true" .Values.previewGateway.clusterIssuer }} +{{- $gwNamespace := .Values.previewGateway.namespace }} +{{- $tlsSecretName := include "sandbox-env.previewTlsSecretName" . }} +{{- $hostname := printf "*.%s" $domain }} +{{- $meshNamespace := required "mesh.namespace is required" .Values.mesh.namespace }} +{{- $meshServiceName := required "mesh.serviceName is required" .Values.mesh.serviceName }} +{{- $previewName := include "sandbox-env.previewName" . }} +# Wildcard preview-URL ingress. A single Gateway + HTTPRoute terminate +# `*.preview.` and forward to the mesh Service; mesh inspects the +# Host header and reverse-proxies to the matching sandbox's daemon at port +# 9000 (daemon owns the public surface; routing browsers straight at dev +# port 3000 would bypass the daemon's CSP/HMR rewrites and break iframe +# embedding + SSE). +# +# Mesh stays in the request path for the first ship; the longer-term plan +# is per-claim HTTPRoutes that bypass mesh entirely. Switching to that +# requires per-Service routing + RBAC for mesh to mint HTTPRoutes, which +# is deferred. +# +# AUTH MODEL — read before exposing this. The Host header carries the +# sandbox handle, which is the *only* authorization on the preview path +# (no listener-level auth, matching how Vercel preview URLs work). That +# means handles travel in plaintext through every CDN / LB / proxy in the +# request path and will appear in their access logs. Treat them as +# URL-grade secrets — do not share in tickets, screenshots, etc. For +# tighter isolation, terminate auth at the Gateway with an +# AuthorizationPolicy (Istio) / extauth (Envoy) in front of this +# listener; the chart does not do that for you. +# +# MULTI-ENV NOTE — two envs can both enable previewGateway only if they +# use different `previewGateway.domain` values. Two Gateways binding the +# *same* wildcard hostname will conflict at the controller level; the +# resource names are envName-suffixed but the listener hostname must be +# unique per Gateway. +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: {{ $previewName }} + namespace: {{ $gwNamespace }} + labels: + {{- include "sandbox-env.sandboxPreviewLabels" . | nindent 4 }} + annotations: + # cert-manager picks up the cert from the listener's TLS secret ref; + # this annotation tells it which ClusterIssuer to use when minting + # the wildcard. Required because Gateway listeners don't have a + # built-in `issuerRef` field. + cert-manager.io/cluster-issuer: {{ $issuer | quote }} +spec: + gatewayClassName: {{ .Values.previewGateway.gatewayClassName | quote }} + listeners: + - name: https + protocol: HTTPS + port: 443 + hostname: {{ $hostname | quote }} + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: {{ $tlsSecretName }} + namespace: {{ $gwNamespace }} + allowedRoutes: + namespaces: + # HTTPRoute lives in the mesh namespace, so we have to explicitly + # allow cross-namespace attachment from there. Without an explicit + # selector the route would silently drop. + from: Selector + selector: + matchLabels: + kubernetes.io/metadata.name: {{ $meshNamespace }} +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: {{ $previewName }} + namespace: {{ $meshNamespace }} + labels: + {{- include "sandbox-env.sandboxPreviewLabels" . | nindent 4 }} +spec: + parentRefs: + - kind: Gateway + name: {{ $previewName }} + namespace: {{ $gwNamespace }} + hostnames: + - {{ $hostname | quote }} + rules: + - backendRefs: + - name: {{ $meshServiceName }} + port: {{ .Values.mesh.servicePort }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-rbac.yaml b/deploy/helm/sandbox-env/templates/sandbox-rbac.yaml new file mode 100644 index 0000000000..7a30eb1350 --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-rbac.yaml @@ -0,0 +1,79 @@ +# RBAC for the mesh ServiceAccount to drive the agent-sandbox operator from +# inside the cluster. The runner (packages/sandbox/server/runner/agent-sandbox/) needs: +# - sandboxclaims CRUD/patch (per-tenant claim lifecycle, idle TTL refresh) +# - sandboxes get/list/watch (waitForSandboxReady streams `?watch=true`) +# - pods/portforward create (kubectl-style tunnel to the daemon container) +# Pods get is included for portforward error paths; the runner itself doesn't +# read pod specs directly. +# +# Why pods/portforward is here even with the preview Gateway: +# The runner has *two* traffic paths and the Gateway only covers one. +# 1. Browser → preview URL: Gateway terminates `*.preview.` +# and forwards to the mesh Service, which then reverse-proxies to +# the sandbox Service (`..svc.cluster.local:9000`). +# No portforward involved. +# 2. Mesh runner → sandbox daemon (control plane): the runner calls +# the daemon's HTTP API directly (probeDaemonHealth, daemonBash, +# waitForDaemonReady) on every code-execution. This path uses +# portforward unconditionally — see runner.ts:openForwarder. It +# works the same whether mesh runs in-cluster or on a developer's +# laptop, and routes through the apiserver so we don't have to +# open daemon ingress on 9000 to mesh's pod selector. +# In production we *could* switch path 2 to in-cluster Service DNS and +# drop portforward from this Role; the daemon already enforces its own +# bearer-token check. That's tracked as a future hardening pass and is +# not blocked by this chart. +# +# Privilege-escalation note: pods/portforward in this Role can target the +# operator pod itself (which also lives in agent-sandbox-system). The +# operator's exposed ports today (8080 metrics, 8081 healthz) are +# read-only, so the worst case is read of metrics text. If a future +# operator revision adds a write port, scope this rule to sandbox pods +# only (resourceNames or relabel via mutating webhook). +# +# Scope: Role in agent-sandbox-system (the operator's namespace). The mesh +# ServiceAccount lives in `mesh.namespace` (per values) and the RoleBinding +# crosses namespaces by referencing it explicitly. Keeps blast radius of a +# mesh compromise limited to the sandbox namespace. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "sandbox-env.runnerRoleName" . }} + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.labels" . | nindent 4 }} +rules: + - apiGroups: ["extensions.agents.x-k8s.io"] + resources: ["sandboxclaims"] + verbs: ["get", "create", "delete", "patch"] + - apiGroups: ["agents.x-k8s.io"] + resources: ["sandboxes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/portforward"] + # `get` is required for the WebSocket-based port-forward path used by + # @kubernetes/client-node v1.x (and Bun's native WS, and any newer + # client-go-equivalent). The legacy SPDY path only needed `create`, but + # all modern clients use the WS upgrade — which the API server enforces + # as a `GET` against the subresource. Without `get`, the upgrade returns + # 403 and the runner sees `[object ErrorEvent]`. + verbs: ["get", "create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "sandbox-env.runnerRoleName" . }} + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ required "mesh.serviceAccountName is required" .Values.mesh.serviceAccountName }} + namespace: {{ required "mesh.namespace is required" .Values.mesh.namespace }} +roleRef: + kind: Role + name: {{ include "sandbox-env.runnerRoleName" . }} + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/helm/sandbox-env/templates/sandbox-template.yaml b/deploy/helm/sandbox-env/templates/sandbox-template.yaml new file mode 100644 index 0000000000..c61753fd9c --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-template.yaml @@ -0,0 +1,154 @@ +# Shared SandboxTemplate consumed by every SandboxClaim the mesh runner +# creates for THIS environment. Resource ceilings come from values.resources. +# +# Hardcoded to the operator's own namespace (agent-sandbox-system) — the CRDs +# ship with that as the install target, and the operator's RBAC watches it by +# default. The template *name* is suffixed with envName so multiple +# environments' SandboxTemplates coexist; mesh in this env must reference +# the suffixed name via STUDIO_SANDBOX_TEMPLATE_NAME. +apiVersion: extensions.agents.x-k8s.io/v1alpha1 +kind: SandboxTemplate +metadata: + name: {{ include "sandbox-env.sandboxName" . }} + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 4 }} +spec: + # Claims inject DAEMON_TOKEN per-provision via SandboxClaim.spec.env. The + # template carries no shared secret; leakage of the template compromises + # nothing on its own. + envVarsInjectionPolicy: Allowed + # The CRD defaults to Managed, which makes the operator install its own + # NetworkPolicy that only allows ingress from pods labeled + # `app: sandbox-router`. That's intended for Istio-style sidecar routing + # and silently blocks the mesh → daemon path the preview-proxy depends on. + # We surface the netpol via templates/sandbox-network-policy.yaml instead + # (gated by `networkPolicy.enabled`), so flag the operator's policy off. + networkPolicyManagement: Unmanaged + podTemplate: + metadata: + labels: + # Per-env name so each env's NetworkPolicy podSelector matches only + # its own pods. Mesh runner stamps the same value via + # SandboxClaim.additionalPodMetadata (driven by + # STUDIO_SANDBOX_TEMPLATE_NAME pointing at the env-suffixed + # template) — keep these in lockstep. + app.kubernetes.io/name: {{ include "sandbox-env.sandboxName" . }} + # Do NOT set `studio.decocms.com/role` here. The operator (v0.4.2+) + # rejects claims whose additionalPodMetadata defines a label key + # already present in the template — even when the values differ — + # with "metadata override conflict". The runner sets role=claimed + # via additionalPodMetadata, so the template must leave that key + # undefined. Warm-pool pods end up without the role label; + # dashboards filter by absence-of-handle instead. + spec: + automountServiceAccountToken: false + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.hostUsers }} + # User namespace remap: UID 1000 inside the pod maps to a high + # subordinate UID on the node, so a container escape lands as a + # nobody-user, not as a real node UID. Requires K8s 1.30+ and a + # containerd/kernel that support userns (EKS default AMIs are fine). + hostUsers: false + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: sandbox + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + workingDir: /app + env: + - name: DAEMON_PORT + value: "9000" + - name: WORKDIR + value: "/app" + # DAEMON_TOKEN is injected per-claim via SandboxClaim.spec.env. + {{- if .Values.readOnlyRootFilesystem }} + # With RO rootfs + emptyDir on /app, the mount root is owned + # root:1000 (fsGroup). Git 2.35+'s "dubious ownership" check + # would refuse to operate. Disable the check inside the + # sandbox — single-tenant pod, no untrusted same-pod user. + - name: GIT_CONFIG_COUNT + value: "1" + - name: GIT_CONFIG_KEY_0 + value: "safe.directory" + - name: GIT_CONFIG_VALUE_0 + value: "*" + {{- end }} + {{- if .Values.cache.enabled }} + # Single mount-path handed to the daemon. The daemon derives all + # sub-directories (bun, npm, pnpm, yarn, deno, xdg, git, + # node_modules, next) and injects the corresponding env vars into + # every subprocess it spawns — keeping runtime behaviour in code, + # not in the template. SANDBOX_CACHE_KEY is injected per-claim by + # the runner (not from the template). + - name: CACHE_DIR + value: {{ .Values.cache.mountPath }} + {{- end }} + ports: + - name: daemon + containerPort: 9000 + protocol: TCP + - name: dev + containerPort: 3000 + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + readOnlyRootFilesystem: {{ .Values.readOnlyRootFilesystem }} + {{- if or .Values.readOnlyRootFilesystem .Values.cache.enabled }} + volumeMounts: + {{- if .Values.readOnlyRootFilesystem }} + - name: workdir + mountPath: /app + - name: tmp + mountPath: /tmp + - name: home + mountPath: /home/sandbox + {{- end }} + {{- if .Values.cache.enabled }} + - name: build-cache + mountPath: {{ .Values.cache.mountPath }} + {{- end }} + {{- end }} + {{- if or .Values.readOnlyRootFilesystem .Values.cache.enabled }} + volumes: + {{- if .Values.readOnlyRootFilesystem }} + # Sized to match the per-container ephemeral-storage limit shape; + # individual mounts get a slice. Adjust if a workload needs more. + - name: workdir + emptyDir: + sizeLimit: 5Gi + - name: tmp + emptyDir: + sizeLimit: 1Gi + - name: home + emptyDir: + sizeLimit: 2Gi + {{- end }} + {{- if .Values.cache.enabled }} + - name: build-cache + persistentVolumeClaim: + claimName: {{ include "sandbox-env.sandboxName" . }}-cache + {{- end }} + {{- end }} diff --git a/deploy/helm/sandbox-env/templates/sandbox-warm-pool.yaml b/deploy/helm/sandbox-env/templates/sandbox-warm-pool.yaml new file mode 100644 index 0000000000..9d2b4fa86e --- /dev/null +++ b/deploy/helm/sandbox-env/templates/sandbox-warm-pool.yaml @@ -0,0 +1,19 @@ +{{- if .Values.warmPool.enabled }} +# Pre-warms N sandbox pods against the shared SandboxTemplate so new claims +# bind instantly rather than waiting on image pull + kubelet start. Disabled +# by default — enable only after measuring cold-start pain; every pool +# replica costs the full sandbox resource request. +# +# Schema (v1alpha1): see sandbox-operator/crds/agent-sandbox-crds.yaml. +apiVersion: extensions.agents.x-k8s.io/v1alpha1 +kind: SandboxWarmPool +metadata: + name: {{ include "sandbox-env.sandboxName" . }} + namespace: agent-sandbox-system + labels: + {{- include "sandbox-env.sandboxLabels" . | nindent 4 }} +spec: + replicas: {{ .Values.warmPool.size }} + sandboxTemplateRef: + name: {{ include "sandbox-env.sandboxName" . }} +{{- end }} diff --git a/deploy/helm/sandbox-env/templates/validations.yaml b/deploy/helm/sandbox-env/templates/validations.yaml new file mode 100644 index 0000000000..c15ae91ff0 --- /dev/null +++ b/deploy/helm/sandbox-env/templates/validations.yaml @@ -0,0 +1,17 @@ +{{- /* +Chart-level validations. Renders no resources. + +Note: this chart pins all sandbox-side resources to `agent-sandbox-system` +explicitly (rather than `.Release.Namespace`) because the operator's RBAC +watches that namespace by default — splitting them across two namespaces +breaks reconciliation. So unlike sandbox-operator, this chart does NOT +fail if installed under a different release namespace; the resources +still land in agent-sandbox-system regardless. The release namespace +only really matters for Helm's own bookkeeping. +*/ -}} +{{- include "sandbox-env.validatePreviewGateway" . -}} +{{- include "sandbox-env.validateCache" . -}} +{{- /* Force envName validation to run at template time even when no +resource references it directly (pure-validation render). Discard the +return value into a local so it doesn't leak into the rendered YAML. */ -}} +{{- $_ := include "sandbox-env.envName" . -}} diff --git a/deploy/helm/sandbox-env/values.yaml b/deploy/helm/sandbox-env/values.yaml new file mode 100644 index 0000000000..021eb3701b --- /dev/null +++ b/deploy/helm/sandbox-env/values.yaml @@ -0,0 +1,259 @@ +# Default values for the sandbox-env chart. +# +# This chart is independent of chart-deco-studio and of the sandbox-operator +# chart. It installs the Studio-side resources that consume the operator's +# CRDs (SandboxTemplate, RBAC, NetworkPolicy, optional WarmPool, optional +# preview Gateway/Certificate). Install one release per environment that +# needs to use the operator — every resource name is suffixed with +# `envName` so multiple releases coexist in `agent-sandbox-system`. +# +# Cross-chart wiring lives under `mesh.*`: tell this chart where the studio +# release for THIS environment runs so the RBAC RoleBinding, NetworkPolicy +# ingress selector, and preview HTTPRoute backendRef can reach it. There is +# no runtime coupling back the other way — studio consumes nothing from +# this chart at install time; it simply talks to the operator's CRDs and +# the daemon Service the operator creates per SandboxClaim. +# +# Mesh side: the studio install for THIS environment must point its runner +# at the env-suffixed SandboxTemplate by setting +# STUDIO_SANDBOX_TEMPLATE_NAME=studio-sandbox- +# in the studio chart's `configMap.meshConfig`. Without that override mesh +# falls back to "studio-sandbox" (no suffix) and claim creation will fail +# with "sandboxtemplate not found". + +# ── env identity ─────────────────────────────────────────────────────── +# Required. Used as suffix on every resource name this chart creates so +# multiple releases (dev / staging / prod / ...) coexist in the shared +# `agent-sandbox-system` namespace without collisions. Examples: +# envName=staging → SandboxTemplate/studio-sandbox-staging, +# Role/studio-sandbox-runner-staging, +# NetworkPolicy/studio-sandbox-staging, +# Gateway/agent-sandbox-preview-staging, ... +# Use a short DNS label (a-z0-9-, 1-32 chars). Conventionally matches the +# studio (chart-deco-studio) release name for the same environment. +envName: "" + +# ── sandbox pod image (used by SandboxTemplate) ──────────────────────── +image: + repository: ghcr.io/decocms/studio/studio-sandbox + # Pinned to a specific version so chart upgrades pull a matching image + # instead of silently moving with `:latest`. Bump in lockstep with + # packages/sandbox/package.json — release-studio-sandbox.yaml tags + # images using that version. NEVER set this to "latest" in prod. + tag: "0.1.0" + # Override to Never on local kind clusters that load via `kind load`. + pullPolicy: IfNotPresent + +# ── sandbox pod resources (per SandboxClaim) ─────────────────────────── +# Prod ceilings. Adjust per measured workload. +resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: "2" + memory: 4Gi + ephemeral-storage: 10Gi + +# ── sandbox NetworkPolicy ────────────────────────────────────────────── +networkPolicy: + enabled: true + # DEPRECATED: kept only for installs whose external gateway terminates + # *directly* on the sandbox's port 3000 (dev server). The default + # preview path goes through mesh and lands on port 9000 (daemon) + # instead, so all standard installs leave this empty. See + # `previewGateway` below for the supported Istio Gateway API path. + # Will be removed in a future chart version — open an issue if you + # still need it. + previewGatewayNamespace: "" + # Operator-supplied egress rules (NetworkPolicyEgressRule[]) merged + # into the sandbox NetworkPolicy. The base policy denies in-cluster + # egress; use this to grant *specific* exceptions (e.g. an in-cluster + # OTel collector if the daemon ever emits OTLP). Empty = no extra + # egress. + # + # extraEgress: + # - to: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: monitoring + # podSelector: + # matchLabels: + # app.kubernetes.io/name: opentelemetry-collector + # ports: + # - protocol: TCP + # port: 4318 + extraEgress: [] + +# ── sandbox-pod scheduling ───────────────────────────────────────────── +# Pin sandbox pods to a dedicated NodePool so a container escape lands on +# a node that has no mesh / postgres / NATS / OTel pods on it. Pair with a +# Karpenter NodePool that taints + labels matching nodes; see README.md +# for the snippet. Empty defaults run sandbox pods on whatever the +# scheduler picks. +nodeSelector: {} +# nodeSelector: +# workload: sandbox +tolerations: [] +# tolerations: +# - key: workload +# operator: Equal +# value: sandbox +# effect: NoSchedule +# Affinity rules merged into the sandbox PodSpec. Use podAffinity to +# co-locate sandbox pods on the same node (cheaper warm-pool packing, +# shared image cache); use nodeAffinity for soft node preferences that +# nodeSelector can't express. Empty default = scheduler picks. +affinity: {} + +# ── sandbox-pod hardening ────────────────────────────────────────────── +# User namespace remap (`spec.hostUsers: false`): UID 1000 inside the pod +# maps to a high, unprivileged subordinate UID on the node, so a +# container escape doesn't land as a real node UID. Requires K8s 1.30+ +# with a containerd/kernel that support userns (EKS default AMIs from +# late 2024 onward are fine; kind nodes vary). +# +# Defaults to the secure path because this chart runs untrusted user +# code. Override to true (back to host users) ONLY on clusters whose +# kernel/containerd doesn't support userns — symptoms of unsupported +# runtime are pod scheduling failures with messages like "user +# namespaces are not enabled". +hostUsers: false + +# Read-only root filesystem. When true, /app + /tmp + /home/sandbox are +# remounted as emptyDirs and `safe.directory '*'` is set so git works +# against the chowned mount. +# +# Defaults to true (secure path). Override to false ONLY if a workload +# writes outside the three covered emptyDirs and you've measured the +# resulting failure mode. Validate end-to-end (clone + bun/npm install + +# dev server start) on staging before merging an override. +readOnlyRootFilesystem: true + +# ── warm pool ────────────────────────────────────────────────────────── +warmPool: + # Enable only after measuring cold-start pain; every warm pod costs + # the full resources.requests above. + enabled: false + size: 0 + +# ── preview URL gateway (optional) ───────────────────────────────────── +# Wildcard preview-URL ingress (Approach B in the K8s sandbox plan). +# Renders an Istio Gateway + HTTPRoute that send all *.preview. +# traffic to the mesh Service; mesh recognises the Host header and +# reverse-proxies to the matching sandbox's daemon at port 9000 (daemon, +# not dev port 3000 — the daemon's reverse proxy injects the HMR +# bootstrap + strips CSP that the iframe needs). +# +# Manual prerequisites (not templated): +# 1. DNS: Cloudflare (or other) wildcard `*.preview.` → cluster +# external LB hostname. +# 2. cert-manager ClusterIssuer for the wildcard cert. DNS-01 is +# required (HTTP-01 doesn't work for wildcards). Set `clusterIssuer` +# to that issuer's name. +previewGateway: + enabled: false + # gatewayClassName for the Gateway. EKS clusters running Istio + # ambient/sidecar default to "istio". Confirm with + # `kubectl get gatewayclasses` before flipping enabled=true. + gatewayClassName: "istio" + # Namespace where the Gateway + HTTPRoute land. Mesh's existing gateway + # typically lives in `istio-system`; some setups use a dedicated + # `gateway` ns. The cert Secret is created in the same ns. + namespace: "istio-system" + # Wildcard domain for previews — e.g. "preview.decocms.com" yields + # `*.preview.decocms.com`. Required when enabled=true. + domain: "" + # cert-manager ClusterIssuer that issues the wildcard cert. Required + # when enabled=true. The chart does NOT template the ClusterIssuer + # itself — that is per-cluster infrastructure (a Cloudflare DNS-01 + # issuer, for example, needs your API token in a Secret). + clusterIssuer: "" + # PEM-format secret name created by cert-manager. Defaults to + # `agent-sandbox-preview--tls`. Override only if the cert + # lives under a name dictated by external tooling. + tlsSecretName: "" + +# ── shared build / dependency cache ─────────────────────────────────── +# Mounts a single PVC at `mountPath` inside every sandbox pod and redirects +# each package manager's cache directory there via env vars. On a cold start +# the first `bun install` (or npm/pnpm/yarn/deno) populates the cache; every +# subsequent sandbox that shares a dep at the same version skips the registry +# download entirely. +# +# Storage requirements — ReadWriteMany: +# Multi-pod concurrent mounts require ReadWriteMany. On EKS the cluster +# default StorageClass is EBS (gp2/gp3), which does NOT support RWX — +# cache.storageClass MUST be set explicitly or the PVC will stay Pending. +# +# EKS (recommended): +# 1. Install the AWS EFS CSI driver (aws-efs-csi-driver): +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html +# 2. Create an EFS filesystem and a StorageClass that points at it. +# Use throughputMode: elastic for 30+ concurrent sandbox builds: +# kind: StorageClass +# provisioner: efs.csi.aws.com +# parameters: +# provisioningMode: efs-ap +# fileSystemId: +# directoryPerms: "700" +# mountOptions: +# - tls +# 3. Set cache.storageClass to that StorageClass name (e.g. efs-sc). +# +# Single-node kind (local dev): +# Use examples/values-kind.yaml — it overrides to ReadWriteOnce + +# storageClass=standard, which local-path-provisioner supports. +# Lifecycle note: +# The PVC carries `helm.sh/resource-policy: keep` so it survives +# `helm upgrade` and `helm uninstall`. Delete it manually to evict the cache. +cache: + enabled: false + # REQUIRED when accessMode=ReadWriteMany. Must name an RWX-capable + # StorageClass (EFS on EKS, NFS elsewhere). Leaving this empty with RWX + # falls back to the cluster default (EBS on EKS), which does not support + # ReadWriteMany and will cause the PVC to stay Pending. The chart validates + # this at template time. + storageClass: "" + accessMode: ReadWriteMany + size: 50Gi + mountPath: /mnt/cache + # Garbage collection: nightly CronJob that evicts stale node_modules slots + # and git mirrors. Uses sentinel/HEAD mtime (last-used time) so active caches + # are never evicted. Requires cache.enabled=true. + # + # Cost tip: pair with EFS Intelligent Tiering (set a 30-day lifecycle policy + # on the EFS filesystem) so stale entries that survive until GC are billed + # at the Infrequent Access rate (~92% cheaper) rather than Standard. + gc: + enabled: true + # Cron schedule (UTC). Default: 03:00 daily — low-traffic window. + schedule: "0 3 * * *" + # Evict cache entries not used in this many days. + ttlDays: 30 + +# ── mesh cross-references ────────────────────────────────────────────── +# Tells this chart where the chart-deco-studio release for THIS +# environment runs so the RBAC RoleBinding, NetworkPolicy ingress +# selector, and preview-Gateway HTTPRoute backendRef all point at it. +# These values must match the namespace + release-name + service-port of +# the studio install for the same env. +mesh: + # Namespace where the studio release runs. The mesh ServiceAccount and + # mesh Service are looked up here. + namespace: "deco-studio" + # Name of the mesh ServiceAccount that gets the RoleBinding allowing + # SandboxClaim CRUD + portforward against agent-sandbox-system. + # Conventionally the studio release name (chart.fullname). + serviceAccountName: "deco-studio" + # Name of the mesh Service the preview HTTPRoute forwards traffic to. + # Conventionally the studio release name. + serviceName: "deco-studio" + # Port on the mesh Service the preview HTTPRoute targets. Match + # studio's `service.port` (default 80). + servicePort: 80 + # Pod selector labels that identify mesh pods for the NetworkPolicy + # ingress rule. Should match the studio chart's selectorLabels. + podSelectorLabels: + app.kubernetes.io/name: "chart-deco-studio" + app.kubernetes.io/instance: "deco-studio" diff --git a/deploy/helm/sandbox-operator/.helmignore b/deploy/helm/sandbox-operator/.helmignore new file mode 100644 index 0000000000..62a747d10a --- /dev/null +++ b/deploy/helm/sandbox-operator/.helmignore @@ -0,0 +1,21 @@ +# Helm conventional ignores +.DS_Store +.git/ +.gitignore +.bzr/ +.hg/ +.svn/ +*.tmproj +.vscode/ +.idea/ +*.swp +*.bak +*.tmp +*.orig +*~ + +# Re-vendor script — not part of the published chart +vendor.sh + +# Examples folder is for documentation in-tree only +examples/ diff --git a/deploy/helm/sandbox-operator/Chart.yaml b/deploy/helm/sandbox-operator/Chart.yaml new file mode 100644 index 0000000000..de2741f207 --- /dev/null +++ b/deploy/helm/sandbox-operator/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: sandbox-operator +description: | + Pure upstream kubernetes-sigs/agent-sandbox operator + CRDs (vendored — + upstream does not publish a Helm chart as of v0.4.2). Installs the + Namespace, ServiceAccount, Deployment, Service, and ClusterRoles / + ClusterRoleBindings the operator needs. No Studio-specific resources + live in this chart — see deploy/helm/sandbox-env for those. +type: application +# Chart version is independent of upstream agent-sandbox. Bump on any change. +version: 0.1.0 +# Tracks the upstream agent-sandbox release pinned by vendor.sh. +appVersion: "0.4.2" +kubeVersion: ">=1.30.0-0" diff --git a/deploy/helm/sandbox-operator/README.md b/deploy/helm/sandbox-operator/README.md new file mode 100644 index 0000000000..0f44feae9f --- /dev/null +++ b/deploy/helm/sandbox-operator/README.md @@ -0,0 +1,119 @@ +# sandbox-operator Helm chart + +Pure packaging of the upstream +[`kubernetes-sigs/agent-sandbox`](https://github.com/kubernetes-sigs/agent-sandbox) +operator + CRDs (vendored — upstream does not publish a Helm chart as of +v0.4.2). Installs: + +- `Namespace` `agent-sandbox-system` (with PodSecurity admission labels) +- `ServiceAccount`, `Service`, `Deployment` for the controller +- `ClusterRole` + `ClusterRoleBinding` for the base + extensions reconcilers +- All `CustomResourceDefinition`s the operator owns + +This chart **deliberately exposes no tunables**. Studio-side resources +(`SandboxTemplate`, RBAC for the mesh runner, `NetworkPolicy`, +`SandboxWarmPool`, preview `Gateway`/`HTTPRoute`/`Certificate`) live in the +companion [`sandbox-env`](../sandbox-env/) chart and are installed once per +environment alongside this one. + +Pinned upstream version: **v0.4.2** (see `Chart.yaml` `appVersion`). + +## Prerequisites + +- **Kubernetes 1.30+** (enforced by `Chart.yaml` `kubeVersion`). +- The chart **must be installed into the `agent-sandbox-system` namespace**. + The vendored upstream operator manifest hardcodes that namespace; `helm + template` will fail otherwise. See the validation in `_helpers.tpl`. + +## Install + +Published as an OCI artifact at +`oci://ghcr.io/decocms/studio/charts/sandbox-operator` by +`.github/workflows/release-sandbox-charts.yaml`. + +```bash +helm install sandbox-operator \ + oci://ghcr.io/decocms/studio/charts/sandbox-operator \ + --version 0.1.0 \ + --namespace agent-sandbox-system --create-namespace +``` + +Then install one `sandbox-env` release per environment that needs to use +this operator. See [`../sandbox-env/README.md`](../sandbox-env/README.md). + +### ArgoCD Application + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: sandbox-operator + namespace: argocd +spec: + project: default + source: + repoURL: ghcr.io/decocms/studio/charts + chart: sandbox-operator + targetRevision: 0.1.0 + destination: + server: https://kubernetes.default.svc + namespace: agent-sandbox-system + syncPolicy: + syncOptions: + - CreateNamespace=true + - ServerSideApply=true +``` + +## Layout + +``` +sandbox-operator/ +├── Chart.yaml +├── values.yaml # intentionally empty +├── vendor.sh # re-fetches upstream YAML +├── crds/ +│ └── agent-sandbox-crds.yaml # vendored CRDs +└── templates/ + ├── _helpers.tpl + ├── validations.yaml # namespace preflight + └── agent-sandbox-manifest.yaml # vendored upstream operator +``` + +`vendor.sh` splits upstream multi-doc YAML on `kind: CustomResourceDefinition` +boundaries and routes each doc into `crds/` or `templates/`. + +## CRD upgrade caveat + +Helm install-applies files under `crds/` on first install but **never +upgrades them** (intentional Helm design choice). After bumping `appVersion` +via `./vendor.sh`, run: + +```bash +kubectl apply -f deploy/helm/sandbox-operator/crds/agent-sandbox-crds.yaml +# then helm upgrade as normal +``` + +Uninstall + reinstall also works but drops existing `SandboxClaim`s. + +## Bumping upstream version + +```bash +./vendor.sh v0.4.3 # re-fetches + re-splits, requires sha256 in KNOWN_CHECKSUMS +# edit Chart.yaml: appVersion -> "0.4.3" +# bump version: 0.1.0 -> 0.2.0 +``` + +Push to `main` — `release-sandbox-charts.yaml` packages and pushes the new +OCI tag to `ghcr.io`. Argo CD picks it up by the `targetRevision` in the +`Application` manifest. + +Check upstream release notes for CRD schema changes — if `sandboxtemplates` +or `sandboxwarmpools` shape changes, the matching templates in `sandbox-env` +may need corresponding edits. + +## Why not an upstream Helm chart? + +Upstream hasn't published one as of v0.4.2. Filing a request with prior art +pointing at this chart is worthwhile — if upstream ships an official chart, +the vendored copy goes away and this chart switches to a `dependencies:` +entry pointing at upstream's repo. diff --git a/deploy/helm/sandbox-operator/crds/agent-sandbox-crds.yaml b/deploy/helm/sandbox-operator/crds/agent-sandbox-crds.yaml new file mode 100644 index 0000000000..07daf0a113 --- /dev/null +++ b/deploy/helm/sandbox-operator/crds/agent-sandbox-crds.yaml @@ -0,0 +1,8286 @@ +# Vendored from kubernetes-sigs/agent-sandbox v0.4.2 via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: CustomResourceDefinition docs only. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxes.agents.x-k8s.io +spec: + group: agents.x-k8s.io + names: + kind: Sandbox + listKind: SandboxList + plural: sandboxes + shortNames: + - sandbox + singular: sandbox + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + podTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + hostnameOverride: + type: string + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + items: + properties: + name: + type: string + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxChangePolicy: + type: string + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + properties: + certificateChainPath: + type: string + credentialBundlePath: + type: string + keyPath: + type: string + keyType: + type: string + maxExpirationSeconds: + format: int32 + type: integer + signerName: + type: string + userAnnotations: + additionalProperties: + type: string + type: object + required: + - keyType + - signerName + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + workloadRef: + properties: + name: + type: string + podGroup: + type: string + podGroupReplicaKey: + type: string + required: + - name + - podGroup + type: object + required: + - containers + type: object + required: + - spec + type: object + replicas: + default: 1 + format: int32 + maximum: 1 + minimum: 0 + type: integer + shutdownPolicy: + default: Retain + enum: + - Delete + - Retain + type: string + shutdownTime: + format: date-time + type: string + volumeClaimTemplates: + items: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: array + required: + - podTemplate + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + podIPs: + items: + type: string + type: array + replicas: + format: int32 + minimum: 0 + type: integer + selector: + type: string + service: + type: string + serviceFQDN: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxclaims.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxClaim + listKind: SandboxClaimList + plural: sandboxclaims + shortNames: + - sandboxclaim + singular: sandboxclaim + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + additionalPodMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + env: + items: + properties: + containerName: + type: string + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + lifecycle: + properties: + shutdownPolicy: + default: Retain + enum: + - Delete + - DeleteForeground + - Retain + type: string + shutdownTime: + format: date-time + type: string + type: object + sandboxTemplateRef: + properties: + name: + type: string + required: + - name + type: object + warmpool: + default: default + type: string + required: + - sandboxTemplateRef + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + sandbox: + properties: + name: + type: string + podIPs: + items: + type: string + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxtemplates.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxTemplate + listKind: SandboxTemplateList + plural: sandboxtemplates + shortNames: + - sandboxtemplate + singular: sandboxtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + envVarsInjectionPolicy: + default: Disallowed + enum: + - Allowed + - Overrides + - Disallowed + type: string + networkPolicy: + properties: + egress: + items: + properties: + ports: + items: + properties: + endPort: + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + protocol: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + to: + items: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + ingress: + items: + properties: + from: + items: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + ports: + items: + properties: + endPort: + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + protocol: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + type: object + networkPolicyManagement: + default: Managed + enum: + - Managed + - Unmanaged + type: string + podTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + hostnameOverride: + type: string + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + items: + properties: + name: + type: string + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxChangePolicy: + type: string + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + properties: + certificateChainPath: + type: string + credentialBundlePath: + type: string + keyPath: + type: string + keyType: + type: string + maxExpirationSeconds: + format: int32 + type: integer + signerName: + type: string + userAnnotations: + additionalProperties: + type: string + type: object + required: + - keyType + - signerName + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + workloadRef: + properties: + name: + type: string + podGroup: + type: string + podGroupReplicaKey: + type: string + required: + - name + - podGroup + type: object + required: + - containers + type: object + required: + - spec + type: object + required: + - podTemplate + type: object + status: + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxwarmpools.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxWarmPool + listKind: SandboxWarmPoolList + plural: sandboxwarmpools + shortNames: + - swp + singular: sandboxwarmpool + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.readyReplicas + name: Ready + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + replicas: + format: int32 + minimum: 0 + type: integer + sandboxTemplateRef: + properties: + name: + type: string + required: + - name + type: object + updateStrategy: + properties: + type: + default: OnReplenish + enum: + - Recreate + - OnReplenish + type: string + type: object + required: + - replicas + - sandboxTemplateRef + type: object + status: + properties: + readyReplicas: + format: int32 + type: integer + replicas: + format: int32 + type: integer + selector: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/deploy/helm/sandbox-operator/templates/_helpers.tpl b/deploy/helm/sandbox-operator/templates/_helpers.tpl new file mode 100644 index 0000000000..59c5cd1983 --- /dev/null +++ b/deploy/helm/sandbox-operator/templates/_helpers.tpl @@ -0,0 +1,42 @@ +{{/* +Chart name (overridable via nameOverride). +*/}} +{{- define "sandbox-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Chart-name-and-version label. +*/}} +{{- define "sandbox-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels for resources owned by this chart. +*/}} +{{- define "sandbox-operator.labels" -}} +helm.sh/chart: {{ include "sandbox-operator.chart" . }} +app.kubernetes.io/name: {{ include "sandbox-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Validate that the chart is being installed into agent-sandbox-system. The +vendored upstream operator manifest (templates/agent-sandbox-manifest.yaml) +ships its own Namespace object and hardcodes that name across its +ServiceAccount, Service, Deployment, and ClusterRoleBinding. The companion +sandbox-env chart also pins agent-sandbox-system because the operator's RBAC +watches it by default. Installing under any other namespace splits resources +across two namespaces and breaks reconciliation in non-obvious ways — fail +at template time instead. +*/}} +{{- define "sandbox-operator.validateNamespace" -}} +{{- if ne .Release.Namespace "agent-sandbox-system" -}} +{{- fail (printf "sandbox-operator: this chart must be installed into the 'agent-sandbox-system' namespace (got %q). The vendored upstream operator manifest hardcodes that namespace; installing elsewhere splits resources across namespaces. Re-run with --namespace agent-sandbox-system --create-namespace." .Release.Namespace) -}} +{{- end -}} +{{- end }} diff --git a/deploy/helm/sandbox-operator/templates/agent-sandbox-manifest.yaml b/deploy/helm/sandbox-operator/templates/agent-sandbox-manifest.yaml new file mode 100644 index 0000000000..b1cd28cbfb --- /dev/null +++ b/deploy/helm/sandbox-operator/templates/agent-sandbox-manifest.yaml @@ -0,0 +1,262 @@ +# Vendored from kubernetes-sigs/agent-sandbox v0.4.2 via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: controller Deployments, RBAC, Namespace, Service, ServiceAccount. +# +# LOCAL EDIT — preserve when re-running vendor.sh: +# PodSecurity admission labels added to the Namespace below. `baseline` +# is enforced (operator controller pod runs without an explicit +# securityContext; `restricted` would block it until that's patched). +# `restricted` is set as warn/audit so violations from sandbox pods or +# the controller surface in audit logs without rejecting admission. +# When the operator's pod spec hardens to `restricted`, flip enforce. +kind: Namespace +apiVersion: v1 +metadata: + name: agent-sandbox-system + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: latest + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/audit-version: latest + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: agent-sandbox-controller +subjects: +- kind: ServiceAccount + name: agent-sandbox-controller + namespace: agent-sandbox-system +roleRef: + kind: ClusterRole + name: agent-sandbox-controller + apiGroup: rbac.authorization.k8s.io + +--- + +kind: Service +apiVersion: v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller +spec: + selector: + app: agent-sandbox-controller + ports: + - name: metrics + port: 8080 + targetPort: metrics + protocol: TCP + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller +spec: + replicas: 1 + selector: + matchLabels: + app: agent-sandbox-controller + template: + metadata: + labels: + app: agent-sandbox-controller + spec: + serviceAccountName: agent-sandbox-controller + containers: + - name: agent-sandbox-controller + image: registry.k8s.io/agent-sandbox/agent-sandbox-controller:v0.4.2 + args: + - --leader-elect=true + - --extensions + ports: + - name: metrics + containerPort: 8080 + protocol: TCP + - name: healthz + containerPort: 8081 + protocol: TCP +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agent-sandbox-controller +rules: +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes/finalizers + - sandboxes/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - events.k8s.io + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agent-sandbox-controller-extensions +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions.agents.x-k8s.io + resources: + - sandboxclaims + - sandboxtemplates + - sandboxwarmpools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions.agents.x-k8s.io + resources: + - sandboxclaims/finalizers + - sandboxclaims/status + - sandboxtemplates/finalizers + - sandboxtemplates/status + - sandboxwarmpools/finalizers + - sandboxwarmpools/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: agent-sandbox-controller-extensions +subjects: +- kind: ServiceAccount + name: agent-sandbox-controller + namespace: agent-sandbox-system +roleRef: + kind: ClusterRole + name: agent-sandbox-controller-extensions + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/helm/sandbox-operator/templates/validations.yaml b/deploy/helm/sandbox-operator/templates/validations.yaml new file mode 100644 index 0000000000..b70184a31a --- /dev/null +++ b/deploy/helm/sandbox-operator/templates/validations.yaml @@ -0,0 +1,4 @@ +{{- /* +Chart-level validations. Renders no resources. +*/ -}} +{{- include "sandbox-operator.validateNamespace" . -}} diff --git a/deploy/helm/sandbox-operator/values.yaml b/deploy/helm/sandbox-operator/values.yaml new file mode 100644 index 0000000000..9ec4fd1006 --- /dev/null +++ b/deploy/helm/sandbox-operator/values.yaml @@ -0,0 +1,12 @@ +# Default values for the sandbox-operator chart. +# +# This chart deliberately exposes NO tunables. It is a thin packaging of +# the vendored upstream kubernetes-sigs/agent-sandbox operator + CRDs. +# All Studio-specific configuration (sandbox image, resource ceilings, +# NetworkPolicy egress, preview Gateway, warm pool, ...) lives in the +# companion sandbox-env chart, which is installed once per environment +# alongside this one. +# +# If you need to override the controller image, replicas, or args, edit +# templates/agent-sandbox-manifest.yaml — but prefer re-running +# vendor.sh against a newer upstream release first. diff --git a/deploy/helm/sandbox-operator/vendor.sh b/deploy/helm/sandbox-operator/vendor.sh new file mode 100755 index 0000000000..ff3bede500 --- /dev/null +++ b/deploy/helm/sandbox-operator/vendor.sh @@ -0,0 +1,301 @@ +#!/usr/bin/env bash +# Re-vendor kubernetes-sigs/agent-sandbox release assets into this subchart. +# +# Upstream ships raw multi-doc YAML (manifest.yaml + extensions.yaml), not a +# Helm chart. We split by kind: CustomResourceDefinition docs land in crds/, +# everything else in templates/ so Helm treats CRDs with its install-only +# lifecycle (see README.md for the upgrade caveat). +# +# Integrity: every supported upstream version is paired with a sha256 in +# KNOWN_CHECKSUMS below. The script refuses to write outputs unless every +# downloaded asset matches its pinned digest — this is the only line of +# defense against a swapped GitHub release asset (compromised maintainer +# account, credential theft, etc.). To bump: +# 1. Run: ./vendor.sh vX.Y.Z — it will fail with "no pinned checksum" +# 2. Compute sha256: shasum -a 256 manifest.yaml extensions.yaml +# 3. Verify the values out-of-band (release notes, signatures if any). +# 4. Add the entry to KNOWN_CHECKSUMS, commit, re-run. +# +# Usage: ./vendor.sh [vX.Y.Z] (default v0.4.2 — must match appVersion) +set -euo pipefail + +UPSTREAM_VERSION="${1:-v0.4.2}" +REPO="kubernetes-sigs/agent-sandbox" + +# Pinned sha256 digests for `${VERSION}:${ASSET}`. Keep entries sorted by +# version. Verify externally before adding a new row — anything past this +# table is implicitly trusted. +declare -A KNOWN_CHECKSUMS=( + ["v0.4.2:manifest.yaml"]="93cb43a90b9093c84a7529a7dbeca409fcd944746df00b52e8a2781c237c6e18" + ["v0.4.2:extensions.yaml"]="6ddcd6ce2d78714a5815d4c4304df858a075e0ed8fee971966b31af548c011bb" +) + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CRDS_FILE="${SCRIPT_DIR}/crds/agent-sandbox-crds.yaml" +TMPL_FILE="${SCRIPT_DIR}/templates/agent-sandbox-manifest.yaml" + +WORK="$(mktemp -d)" +trap 'rm -rf "${WORK}"' EXIT + +log() { printf "\033[1;34m[vendor]\033[0m %s\n" "$*"; } +err() { printf "\033[1;31m[vendor]\033[0m %s\n" "$*" >&2; } + +# Refuse to overwrite locally-modified outputs without warning. The vendor +# script regenerates files in-place; an in-progress local edit would be +# silently obliterated. +if ! git -C "${SCRIPT_DIR}" diff --quiet -- crds templates 2>/dev/null \ + || ! git -C "${SCRIPT_DIR}" diff --cached --quiet -- crds templates 2>/dev/null; then + err "uncommitted changes under crds/ or templates/ — commit or stash before re-vendoring" + exit 1 +fi + +verify_checksum() { + local file="$1" expected="$2" actual + actual="$(shasum -a 256 "${file}" | awk '{print $1}')" + if [ "${actual}" != "${expected}" ]; then + err "checksum mismatch for $(basename "${file}")" + err " expected: ${expected}" + err " actual: ${actual}" + err " the upstream release asset has changed since this checksum was pinned;" + err " verify the new digest out-of-band before updating KNOWN_CHECKSUMS" + exit 1 + fi +} + +require_checksum() { + local key="$1" + if [ -z "${KNOWN_CHECKSUMS[$key]:-}" ]; then + err "no pinned checksum for ${key}" + err " to bump: download the asset manually, compute shasum -a 256, verify" + err " against upstream release notes, then add a row to KNOWN_CHECKSUMS" + exit 1 + fi + printf "%s" "${KNOWN_CHECKSUMS[$key]}" +} + +log "fetching ${REPO}@${UPSTREAM_VERSION}" +MANIFEST_SHA="$(require_checksum "${UPSTREAM_VERSION}:manifest.yaml")" +EXTENSIONS_SHA="$(require_checksum "${UPSTREAM_VERSION}:extensions.yaml")" + +curl -fsSLo "${WORK}/manifest.yaml" \ + "https://github.com/${REPO}/releases/download/${UPSTREAM_VERSION}/manifest.yaml" +curl -fsSLo "${WORK}/extensions.yaml" \ + "https://github.com/${REPO}/releases/download/${UPSTREAM_VERSION}/extensions.yaml" + +verify_checksum "${WORK}/manifest.yaml" "${MANIFEST_SHA}" +verify_checksum "${WORK}/extensions.yaml" "${EXTENSIONS_SHA}" +log "checksums verified" + +# Merge the two upstream files' controller Deployments into one. +# +# Upstream ships manifest.yaml + extensions.yaml as two install paths: +# manifest.yaml alone for base mode (Sandbox reconciler only), or both +# files applied in order, where extensions.yaml's Deployment overwrites +# manifest.yaml's same-named Deployment to add `--extensions` and pull in +# the SandboxClaim / SandboxTemplate / SandboxWarmPool reconcilers. +# Concatenating them into one chart breaks that override: helm/kubectl +# applies one of the duplicates and the other silently disappears, so +# only one controller mode actually runs. The leader-election lock is +# hardcoded in the binary (no flag to override the lock name), so running +# them as two distinct Deployments doesn't work either — only one would +# ever be the leader. Running a single binary with `--extensions=true` +# enables ALL reconcilers in one process, which is what we want. +# +# Two transformations, both fail-loud if the input shape changes: +# 1. Drop the `kind: Deployment` doc from extensions.yaml — keep its +# ClusterRole / ClusterRoleBinding (those are the extensions RBAC). +# 2. Insert `- --extensions` after `- --leader-elect=true` in manifest.yaml's +# Deployment args. +# +# Done after checksum verification on purpose: the checksum proves what +# upstream shipped; this transformation is an intentional downstream patch. +log "dropping duplicate Deployment from extensions.yaml" +awk ' + function flush( i, is_dep) { + if (n == 0) return + is_dep = 0 + for (i = 1; i <= n; i++) { + if (buf[i] ~ /^kind:[[:space:]]*Deployment[[:space:]]*$/) { is_dep = 1; break } + } + if (!is_dep) { + for (i = 1; i <= n; i++) print buf[i] + print "---" + } + n = 0 + } + /^---[[:space:]]*$/ { flush(); next } + { buf[++n] = $0 } + END { flush() } +' "${WORK}/extensions.yaml" > "${WORK}/extensions.patched.yaml" +sed -i.bak -e '$d' "${WORK}/extensions.patched.yaml" && rm "${WORK}/extensions.patched.yaml.bak" +mv "${WORK}/extensions.patched.yaml" "${WORK}/extensions.yaml" +if grep -q '^kind:[[:space:]]*Deployment' "${WORK}/extensions.yaml"; then + err "post-patch: a Deployment doc still exists in extensions.yaml" + err " inspect ${WORK}/extensions.yaml and update the awk patch in this script" + exit 1 +fi + +log "adding --extensions to manifest.yaml Deployment args" +awk ' + function flush( i, is_dep, indent) { + if (n == 0) return + is_dep = 0 + for (i = 1; i <= n; i++) { + if (buf[i] ~ /^kind:[[:space:]]*Deployment[[:space:]]*$/) { is_dep = 1; break } + } + for (i = 1; i <= n; i++) { + print buf[i] + if (is_dep && buf[i] ~ /^[[:space:]]*-[[:space:]]*"?--leader-elect=true"?[[:space:]]*$/) { + match(buf[i], /^[[:space:]]*/) + indent = substr(buf[i], RSTART, RLENGTH) + print indent "- --extensions" + } + } + print "---" + n = 0 + } + /^---[[:space:]]*$/ { flush(); next } + { buf[++n] = $0 } + END { flush() } +' "${WORK}/manifest.yaml" > "${WORK}/manifest.patched.yaml" +sed -i.bak -e '$d' "${WORK}/manifest.patched.yaml" && rm "${WORK}/manifest.patched.yaml.bak" +mv "${WORK}/manifest.patched.yaml" "${WORK}/manifest.yaml" +if ! grep -q '^[[:space:]]*-[[:space:]]*--extensions[[:space:]]*$' "${WORK}/manifest.yaml"; then + err "post-patch: --extensions arg was not added to manifest.yaml Deployment" + err " inspect ${WORK}/manifest.yaml and update the awk patch in this script" + exit 1 +fi + +# Inject PodSecurity admission labels into the agent-sandbox-system Namespace. +# +# Upstream ships a bare Namespace; we enforce `baseline` and warn/audit on +# `restricted` so violations from sandbox pods or the controller surface in +# audit logs without rejecting admission. When the operator's pod spec +# hardens to restricted, flip enforce. See README + the LOCAL EDIT comment +# in HEADER_TMPL below. +log "injecting PodSecurity admission labels into agent-sandbox-system Namespace" +awk ' + function flush( i, is_target, has_kind, name_line, indent) { + if (n == 0) return + is_target = 0 + has_kind = 0 + name_line = 0 + for (i = 1; i <= n; i++) { + if (buf[i] ~ /^kind:[[:space:]]*Namespace[[:space:]]*$/) has_kind = 1 + if (has_kind && buf[i] ~ /^[[:space:]]+name:[[:space:]]*agent-sandbox-system[[:space:]]*$/) { + is_target = 1 + name_line = i + break + } + } + if (!is_target) { + for (i = 1; i <= n; i++) print buf[i] + print "---" + n = 0 + return + } + for (i = 1; i <= n; i++) { + print buf[i] + if (i == name_line) { + match(buf[i], /^[[:space:]]*/) + indent = substr(buf[i], RSTART, RLENGTH) + print indent "labels:" + print indent " pod-security.kubernetes.io/enforce: baseline" + print indent " pod-security.kubernetes.io/enforce-version: latest" + print indent " pod-security.kubernetes.io/warn: restricted" + print indent " pod-security.kubernetes.io/warn-version: latest" + print indent " pod-security.kubernetes.io/audit: restricted" + print indent " pod-security.kubernetes.io/audit-version: latest" + } + } + print "---" + n = 0 + } + /^---[[:space:]]*$/ { flush(); next } + { buf[++n] = $0 } + END { flush() } +' "${WORK}/manifest.yaml" > "${WORK}/manifest.patched.yaml" +sed -i.bak -e '$d' "${WORK}/manifest.patched.yaml" && rm "${WORK}/manifest.patched.yaml.bak" +mv "${WORK}/manifest.patched.yaml" "${WORK}/manifest.yaml" +if ! grep -q '^[[:space:]]*pod-security.kubernetes.io/enforce:[[:space:]]*baseline[[:space:]]*$' "${WORK}/manifest.yaml"; then + err "post-patch: PodSecurity labels were not injected into the Namespace doc" + err " upstream may have moved the Namespace into extensions.yaml or changed its name;" + err " inspect ${WORK}/manifest.yaml and update the awk patch in this script" + exit 1 +fi +# If upstream ever adds its own labels: block, our awk would produce a +# duplicate `labels:` key and helm lint would fail. That's the canary — +# fix the awk to merge into the existing labels block when it happens. + +# Split each multi-doc YAML by `---` boundaries, classify each doc by kind. +# awk is portable (no yq dependency) and good enough for manifests that only +# need a kind: line scanned. +split_docs() { + local src="$1" crds_out="$2" other_out="$3" + awk -v crds="${crds_out}" -v other="${other_out}" ' + function flush( isCrd, i, out) { + if (n == 0) return + isCrd = 0 + for (i = 1; i <= n; i++) { + if (buf[i] ~ /^kind:[[:space:]]*CustomResourceDefinition[[:space:]]*$/) { + isCrd = 1 + break + } + } + out = isCrd ? crds : other + for (i = 1; i <= n; i++) print buf[i] >> out + print "---" >> out + n = 0 + } + /^---[[:space:]]*$/ { flush(); next } + { buf[++n] = $0 } + END { flush() } + ' "${src}" +} + +log "splitting CRDs from non-CRDs" +: > "${WORK}/crds.yaml" +: > "${WORK}/other.yaml" +split_docs "${WORK}/manifest.yaml" "${WORK}/crds.yaml" "${WORK}/other.yaml" +split_docs "${WORK}/extensions.yaml" "${WORK}/crds.yaml" "${WORK}/other.yaml" + +# Strip trailing empty doc separator so `helm template` doesn't warn. +sed -i.bak -e '$d' "${WORK}/crds.yaml" && rm "${WORK}/crds.yaml.bak" +sed -i.bak -e '$d' "${WORK}/other.yaml" && rm "${WORK}/other.yaml.bak" + +mkdir -p "$(dirname "${CRDS_FILE}")" "$(dirname "${TMPL_FILE}")" + +HEADER_CRDS="# Vendored from ${REPO} ${UPSTREAM_VERSION} via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: CustomResourceDefinition docs only. +" + +HEADER_TMPL="# Vendored from ${REPO} ${UPSTREAM_VERSION} via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: controller Deployments, RBAC, Namespace, Service, ServiceAccount. +# +# LOCAL EDIT — preserve when re-running vendor.sh: +# PodSecurity admission labels added to the Namespace below. \`baseline\` +# is enforced (operator controller pod runs without an explicit +# securityContext; \`restricted\` would block it until that's patched). +# \`restricted\` is set as warn/audit so violations from sandbox pods or +# the controller surface in audit logs without rejecting admission. +# When the operator's pod spec hardens to \`restricted\`, flip enforce. +" + +printf "%s" "${HEADER_CRDS}" > "${CRDS_FILE}" +cat "${WORK}/crds.yaml" >> "${CRDS_FILE}" + +printf "%s" "${HEADER_TMPL}" > "${TMPL_FILE}" +cat "${WORK}/other.yaml" >> "${TMPL_FILE}" + +log "wrote $(wc -l <"${CRDS_FILE}") lines -> ${CRDS_FILE}" +log "wrote $(wc -l <"${TMPL_FILE}") lines -> ${TMPL_FILE}" +log "done. Remember to:" +log " - update appVersion in Chart.yaml if the version changed" +log " - bump KNOWN_CHECKSUMS in this script when bumping ${UPSTREAM_VERSION}" +log " - bump version in Chart.yaml so .github/workflows/release-sandbox-charts.yaml" +log " publishes a new OCI artifact" +log " - DO NOT track sandbox-operator-*.tgz in git (it's gitignored — the" +log " unpacked tree is the source of truth; the published ghcr.io OCI" +log " artifact is the consumer-facing build)" diff --git a/deploy/helm/Chart.lock b/deploy/helm/studio/Chart.lock similarity index 100% rename from deploy/helm/Chart.lock rename to deploy/helm/studio/Chart.lock diff --git a/deploy/helm/Chart.yaml b/deploy/helm/studio/Chart.yaml similarity index 100% rename from deploy/helm/Chart.yaml rename to deploy/helm/studio/Chart.yaml diff --git a/deploy/helm/README.md b/deploy/helm/studio/README.md similarity index 100% rename from deploy/helm/README.md rename to deploy/helm/studio/README.md diff --git a/deploy/helm/charts/nats-2.12.5.tgz b/deploy/helm/studio/charts/nats-2.12.5.tgz similarity index 100% rename from deploy/helm/charts/nats-2.12.5.tgz rename to deploy/helm/studio/charts/nats-2.12.5.tgz diff --git a/deploy/helm/charts/opentelemetry-collector-0.147.2.tgz b/deploy/helm/studio/charts/opentelemetry-collector-0.147.2.tgz similarity index 100% rename from deploy/helm/charts/opentelemetry-collector-0.147.2.tgz rename to deploy/helm/studio/charts/opentelemetry-collector-0.147.2.tgz diff --git a/deploy/helm/examples/secrets-example.yaml b/deploy/helm/studio/examples/secrets-example.yaml similarity index 100% rename from deploy/helm/examples/secrets-example.yaml rename to deploy/helm/studio/examples/secrets-example.yaml diff --git a/deploy/helm/img/deco-studio-infra-arch.jpg b/deploy/helm/studio/img/deco-studio-infra-arch.jpg similarity index 100% rename from deploy/helm/img/deco-studio-infra-arch.jpg rename to deploy/helm/studio/img/deco-studio-infra-arch.jpg diff --git a/deploy/helm/templates/NOTES.txt b/deploy/helm/studio/templates/NOTES.txt similarity index 100% rename from deploy/helm/templates/NOTES.txt rename to deploy/helm/studio/templates/NOTES.txt diff --git a/deploy/helm/templates/_helpers.tpl b/deploy/helm/studio/templates/_helpers.tpl similarity index 100% rename from deploy/helm/templates/_helpers.tpl rename to deploy/helm/studio/templates/_helpers.tpl diff --git a/deploy/helm/templates/configmap-ca-cert.yaml b/deploy/helm/studio/templates/configmap-ca-cert.yaml similarity index 100% rename from deploy/helm/templates/configmap-ca-cert.yaml rename to deploy/helm/studio/templates/configmap-ca-cert.yaml diff --git a/deploy/helm/templates/configmap-s3-sync.yaml b/deploy/helm/studio/templates/configmap-s3-sync.yaml similarity index 100% rename from deploy/helm/templates/configmap-s3-sync.yaml rename to deploy/helm/studio/templates/configmap-s3-sync.yaml diff --git a/deploy/helm/templates/configmap.yaml b/deploy/helm/studio/templates/configmap.yaml similarity index 100% rename from deploy/helm/templates/configmap.yaml rename to deploy/helm/studio/templates/configmap.yaml diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/studio/templates/deployment.yaml similarity index 100% rename from deploy/helm/templates/deployment.yaml rename to deploy/helm/studio/templates/deployment.yaml diff --git a/deploy/helm/templates/hpa.yaml b/deploy/helm/studio/templates/hpa.yaml similarity index 100% rename from deploy/helm/templates/hpa.yaml rename to deploy/helm/studio/templates/hpa.yaml diff --git a/deploy/helm/templates/pvc.yaml b/deploy/helm/studio/templates/pvc.yaml similarity index 100% rename from deploy/helm/templates/pvc.yaml rename to deploy/helm/studio/templates/pvc.yaml diff --git a/deploy/helm/templates/secret.yaml b/deploy/helm/studio/templates/secret.yaml similarity index 100% rename from deploy/helm/templates/secret.yaml rename to deploy/helm/studio/templates/secret.yaml diff --git a/deploy/helm/templates/service.yaml b/deploy/helm/studio/templates/service.yaml similarity index 100% rename from deploy/helm/templates/service.yaml rename to deploy/helm/studio/templates/service.yaml diff --git a/deploy/helm/templates/serviceaccount.yaml b/deploy/helm/studio/templates/serviceaccount.yaml similarity index 100% rename from deploy/helm/templates/serviceaccount.yaml rename to deploy/helm/studio/templates/serviceaccount.yaml diff --git a/deploy/helm/templates/validations.yaml b/deploy/helm/studio/templates/validations.yaml similarity index 100% rename from deploy/helm/templates/validations.yaml rename to deploy/helm/studio/templates/validations.yaml diff --git a/deploy/helm/values.yaml b/deploy/helm/studio/values.yaml similarity index 100% rename from deploy/helm/values.yaml rename to deploy/helm/studio/values.yaml diff --git a/packages/sandbox/README.md b/packages/sandbox/README.md index d6cc78d7ee..85b97b71c9 100644 --- a/packages/sandbox/README.md +++ b/packages/sandbox/README.md @@ -69,8 +69,8 @@ for this, you can remove them — they're no longer needed. `agent-sandbox`. Leave unset in dev to let auto-detect pick docker. - `FREESTYLE_API_KEY` — required for the Freestyle runner. Presence also auto-selects it when `STUDIO_SANDBOX_RUNNER` is unset. -- `MESH_SANDBOX_IMAGE` — override the Docker runner image - (default `mesh-sandbox:local`, built from `image/Dockerfile`). +- `STUDIO_SANDBOX_IMAGE` — override the Docker runner image + (default `studio-sandbox:local`, built from `image/Dockerfile`). - `SANDBOX_INGRESS_PORT` (default `7070`) — local Docker ingress bind port. - `SANDBOX_ROOT_URL` — production template for the pod URL. Either a bare base (`https://sandboxes.example.com` → handle becomes leading subdomain) diff --git a/packages/sandbox/daemon/config.test.ts b/packages/sandbox/daemon/config.test.ts index 30f7dc83a3..39ad8b0286 100644 --- a/packages/sandbox/daemon/config.test.ts +++ b/packages/sandbox/daemon/config.test.ts @@ -18,6 +18,11 @@ describe("loadConfig", () => { expect(cfg.appRoot).toBe("/app"); expect(cfg.proxyPort).toBe(9000); expect(cfg.pathPrefix).toBe(""); + expect(cfg.cacheDir).toBeNull(); + expect(cfg.gitCacheDir).toBeNull(); + expect(cfg.sandboxCacheKey).toBeNull(); + expect(cfg.nodeModulesCacheDir).toBeNull(); + expect(cfg.nextCacheDir).toBeNull(); }); it("derives pathPrefix from runtime=bun", () => { @@ -90,4 +95,27 @@ describe("loadConfig", () => { expect(cfg.devPort).toBe(4321); expect(cfg.pathPrefix).toBe("export PATH=/opt/bun/bin:$PATH && "); }); + + it("derives cache sub-dirs from CACHE_DIR", () => { + const cfg = loadConfig({ + ...base, + CACHE_DIR: "/mnt/cache", + SANDBOX_CACHE_KEY: "abc123", + }); + expect(cfg.cacheDir).toBe("/mnt/cache"); + expect(cfg.gitCacheDir).toBe("/mnt/cache/git"); + expect(cfg.nodeModulesCacheDir).toBe("/mnt/cache/node_modules"); + expect(cfg.nextCacheDir).toBe("/mnt/cache/next"); + expect(cfg.sandboxCacheKey).toBe("abc123"); + }); + + it("individual overrides take precedence over CACHE_DIR", () => { + const cfg = loadConfig({ + ...base, + CACHE_DIR: "/mnt/cache", + GIT_CACHE_DIR: "/custom/git", + }); + expect(cfg.gitCacheDir).toBe("/custom/git"); + expect(cfg.nodeModulesCacheDir).toBe("/mnt/cache/node_modules"); + }); }); diff --git a/packages/sandbox/daemon/config.ts b/packages/sandbox/daemon/config.ts index 6935c57107..ea4cec81fd 100644 --- a/packages/sandbox/daemon/config.ts +++ b/packages/sandbox/daemon/config.ts @@ -61,6 +61,16 @@ export function loadConfig(env: Record): Config { } const appRoot = env.APP_ROOT ?? "/app"; + const cacheDir = env.CACHE_DIR ?? null; + // Individual overrides take precedence; fall back to sub-dirs of cacheDir. + const gitCacheDir = + env.GIT_CACHE_DIR ?? (cacheDir ? `${cacheDir}/git` : null); + const sandboxCacheKey = env.SANDBOX_CACHE_KEY ?? null; + const nodeModulesCacheDir = + env.NODE_MODULES_CACHE_DIR ?? + (cacheDir ? `${cacheDir}/node_modules` : null); + const nextCacheDir = + env.NEXT_CACHE_DIR ?? (cacheDir ? `${cacheDir}/next` : null); const pathPrefix = runtime === "bun" @@ -83,5 +93,10 @@ export function loadConfig(env: Record): Config { appRoot, proxyPort, pathPrefix, + cacheDir, + gitCacheDir, + sandboxCacheKey, + nodeModulesCacheDir, + nextCacheDir, }); } diff --git a/packages/sandbox/daemon/constants.ts b/packages/sandbox/daemon/constants.ts index 328b911811..e702279eb9 100644 --- a/packages/sandbox/daemon/constants.ts +++ b/packages/sandbox/daemon/constants.ts @@ -19,10 +19,24 @@ export const PACKAGE_MANAGER_DAEMON_CONFIG: Record< string, { install: string; runPrefix: string } > = { - npm: { install: "npm install", runPrefix: "npm run" }, - pnpm: { install: "pnpm install", runPrefix: "pnpm run" }, - yarn: { install: "yarn install", runPrefix: "yarn run" }, - bun: { install: "bun install", runPrefix: "bun run" }, + // --prefer-offline: use the shared EFS npm store; skip registry round-trips + // for already-cached packages. --no-fund/--no-audit skip network calls that + // add latency without value in a sandbox. npm ci is intentionally avoided + // here: it removes node_modules before reinstalling, which would wipe the + // shared EFS symlink target before we can write the cache sentinel. + npm: { + install: "npm install --prefer-offline --no-fund --no-audit", + runPrefix: "npm run", + }, + // --frozen-lockfile: skip version resolution and refuse to mutate the + // lockfile. Faster on warm cache; fails fast if lockfile is stale (correct + // behaviour — stale lockfile = wrong cache key anyway). + pnpm: { install: "pnpm install --frozen-lockfile", runPrefix: "pnpm run" }, + yarn: { install: "yarn install --frozen-lockfile", runPrefix: "yarn run" }, + bun: { install: "bun install --frozen-lockfile", runPrefix: "bun run" }, + // deno install (deno 2.x) populates DENO_DIR on the shared EFS volume so + // subsequent sandboxes skip JSR/CDN fetches entirely. No node_modules are + // created; linkNodeModules skips deno automatically. deno: { install: "deno install", runPrefix: "deno task" }, }; diff --git a/packages/sandbox/daemon/entry.ts b/packages/sandbox/daemon/entry.ts index 866b8367e2..4bd7237fff 100644 --- a/packages/sandbox/daemon/entry.ts +++ b/packages/sandbox/daemon/entry.ts @@ -31,6 +31,25 @@ if (!process.env.DAEMON_BOOT_ID) { } const config = loadConfig(process.env); + +// Inject package-manager cache dirs and corepack behaviour into the process +// environment so every subprocess (install, dev server, user scripts) inherits +// them. Done here rather than in the Kubernetes template so the daemon is the +// single source of truth for sandbox runtime behaviour. Uses ??= so an +// explicit container env var still wins (useful in tests / local overrides). +if (config.cacheDir) { + process.env.npm_config_cache ??= `${config.cacheDir}/npm`; + process.env.PNPM_STORE_PATH ??= `${config.cacheDir}/pnpm`; + process.env.YARN_CACHE_FOLDER ??= `${config.cacheDir}/yarn`; + process.env.YARN_GLOBAL_FOLDER ??= `${config.cacheDir}/yarn-global`; + process.env.BUN_INSTALL_CACHE_DIR ??= `${config.cacheDir}/bun`; + process.env.DENO_DIR ??= `${config.cacheDir}/deno`; + process.env.XDG_CACHE_HOME ??= `${config.cacheDir}/xdg`; +} +// Always suppress corepack's interactive download prompt — the daemon owns +// install and the dev server must never block on stdin. +process.env.COREPACK_ENABLE_DOWNLOAD_PROMPT ??= "0"; + const dropPrivileges = process.env.DAEMON_DROP_PRIVILEGES === "1"; const broadcaster = new Broadcaster(REPLAY_BYTES); diff --git a/packages/sandbox/daemon/probe.ts b/packages/sandbox/daemon/probe.ts index aa4d19fb5b..3ebbfeefa8 100644 --- a/packages/sandbox/daemon/probe.ts +++ b/packages/sandbox/daemon/probe.ts @@ -1,5 +1,11 @@ import { FAST_PROBE_LIMIT, FAST_PROBE_MS, SLOW_PROBE_MS } from "./constants"; +// Frameworks like Next.js (webpack) compile on first request. HEAD / can block +// for 10-30 seconds while compilation runs. Using a short timeout causes the +// probe to give up, wait another FAST_PROBE_MS, and retry — adding wasted time +// before ready=true. 60 s is enough for any realistic cold webpack compile. +const COMPILE_PROBE_TIMEOUT_MS = 60_000; + export interface ProbeState { ready: boolean; htmlSupport: boolean; @@ -59,7 +65,7 @@ export function startUpstreamProbe(deps: ProbeDeps): ProbeState { try { const res = await fetch(url, { method: "HEAD", - signal: AbortSignal.timeout(5000), + signal: AbortSignal.timeout(COMPILE_PROBE_TIMEOUT_MS), }); const ct = (res.headers.get("content-type") ?? "").toLowerCase(); return { diff --git a/packages/sandbox/daemon/setup/cache.ts b/packages/sandbox/daemon/setup/cache.ts new file mode 100644 index 0000000000..e9fca52677 --- /dev/null +++ b/packages/sandbox/daemon/setup/cache.ts @@ -0,0 +1,216 @@ +import { createHash } from "node:crypto"; +import { + existsSync, + mkdirSync, + readFileSync, + readlinkSync, + symlinkSync, + unlinkSync, + utimesSync, +} from "node:fs"; +import { dirname } from "node:path"; +import { PACKAGE_MANAGER_DAEMON_CONFIG } from "../constants"; +import type { Config } from "../types"; +import { spawnShell } from "./spawn-shell"; + +export interface CacheDeps { + config: Config; + dropPrivileges?: boolean; + onChunk: (source: "setup", data: string) => void; +} + +const LOCKFILES = [ + "bun.lockb", + "bun.lock", + "package-lock.json", + "pnpm-lock.yaml", + "yarn.lock", +]; + +function findLockfile(appRoot: string): string | null { + for (const name of LOCKFILES) { + const p = `${appRoot}/${name}`; + if (existsSync(p)) return p; + } + return null; +} + +/** + * Symlinks /app/.next/cache → a per-(userId,projectRef) directory on the + * shared PVC so webpack compilation state persists across sandbox restarts. + * + * Safe without locking: sandbox_runner_state's advisory lock guarantees at + * most one live pod per (userId, projectRef) at any time, so there is never + * more than one writer for a given sandboxCacheKey. + */ +const NEXT_CONFIGS = [ + "next.config.js", + "next.config.ts", + "next.config.mjs", + "next.config.cjs", +]; + +function isNextJsProject(appRoot: string): boolean { + return NEXT_CONFIGS.some((f) => existsSync(`${appRoot}/${f}`)); +} + +export function linkNextCache(deps: CacheDeps): void { + const { config, onChunk } = deps; + if (!config.nextCacheDir || !config.sandboxCacheKey) return; + if (!isNextJsProject(config.appRoot)) return; + + const cacheDir = `${config.nextCacheDir}/${config.sandboxCacheKey}`; + const dotNext = `${config.appRoot}/.next`; + const target = `${dotNext}/cache`; + + try { + mkdirSync(cacheDir, { recursive: true }); + mkdirSync(dotNext, { recursive: true }); + symlinkSync(cacheDir, target); + onChunk( + "setup", + `$ (next.js cache → ${config.sandboxCacheKey.slice(0, 8)}…)\r\n`, + ); + } catch (e) { + const err = e as NodeJS.ErrnoException; + if (err.code !== "EEXIST") { + onChunk( + "setup", + `Warning: failed to link .next/cache: ${err.message}\r\n`, + ); + } + } +} + +/** + * Shares node_modules across sandboxes that have the same lockfile by + * symlinking /app/node_modules to a PVC directory keyed on the lockfile hash. + * + * Uses the same flock double-checked pattern as git mirrors: only one pod + * installs per lockfile hash; others block then reuse the result. + * + * Returns true when node_modules are ready and install can be skipped. + * Returns false when caching is disabled, no lockfile was found, or any + * setup step failed — callers fall through to the normal spawnInstall path. + */ +export async function linkNodeModules(deps: CacheDeps): Promise { + const { config, dropPrivileges, onChunk } = deps; + if (!config.nodeModulesCacheDir || !config.packageManager) return false; + + const pmConfig = PACKAGE_MANAGER_DAEMON_CONFIG[config.packageManager]; + if (!pmConfig?.install) return false; + + const lockfile = findLockfile(config.appRoot); + if (!lockfile) return false; + + let content: Buffer; + try { + content = readFileSync(lockfile); + } catch { + return false; + } + + const lockHash = createHash("sha256") + .update(content) + .digest("hex") + .slice(0, 24); + + // Structure: //node_modules/ + // + // The extra nesting level is required for Node.js module resolution. + // When a postinstall script runs from the real (symlink-resolved) path + // //node_modules/pkg/dist/file.js, Node's + // NODE_MODULES_PATHS algorithm skips the innermost "node_modules" segment + // and correctly looks at /node_modules/ for sibling packages. + // Without this extra level the path IS the node_modules directory and Node + // skips it, causing "Cannot find module" errors in postinstall scripts. + const moduleSlot = `${config.nodeModulesCacheDir}/${lockHash}`; + const cacheDir = `${moduleSlot}/node_modules`; + const flockFile = `${moduleSlot}.lock`; + const sentinel = `${moduleSlot}/.deco_cache_ok`; + const nmPath = `${config.appRoot}/node_modules`; + + try { + mkdirSync(cacheDir, { recursive: true }); + mkdirSync(dirname(flockFile), { recursive: true }); + } catch { + return false; + } + + // Symlink /app/node_modules → cache dir so subsequent installs write + // directly into the shared cache. Check for EEXIST and verify it points + // to our cache dir; a real directory (resume) means skip caching entirely. + let symlinkOk = false; + try { + symlinkSync(cacheDir, nmPath); + symlinkOk = true; + } catch (e) { + const err = e as NodeJS.ErrnoException; + if (err.code === "EEXIST") { + try { + symlinkOk = readlinkSync(nmPath) === cacheDir; + } catch { + symlinkOk = false; + } + } + } + + if (!symlinkOk) { + return false; + } + + // Fast path: another pod already installed and marked the sentinel. + if (existsSync(sentinel)) { + // Touch sentinel so the GC CronJob measures last-used time, not + // creation time, when deciding which cache slots to evict. + try { + const now = new Date(); + utimesSync(sentinel, now, now); + } catch { + // Non-fatal: worst case GC evicts a slot sooner than intended. + } + onChunk( + "setup", + `$ (node_modules cache hit: ${lockHash.slice(0, 8)}…)\r\n`, + ); + return true; + } + + onChunk( + "setup", + `$ (warming node_modules cache: ${lockHash.slice(0, 8)}…)\r\n`, + ); + + const corepack = + "export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 && corepack enable && "; + const install = `${config.pathPrefix}cd ${config.appRoot} && ${corepack}${pmConfig.install}`; + + // Double-checked lock: block until the current holder finishes, then + // re-check the sentinel before running install. + const cmd = [ + `flock -x ${JSON.stringify(flockFile)}`, + `-c 'if [ ! -f ${JSON.stringify(sentinel)} ]; then`, + ` ${install} &&`, + ` touch ${JSON.stringify(sentinel)};`, + `fi'`, + ].join(" "); + + const code = await spawnShell(cmd, { dropPrivileges, onChunk }); + if (code !== 0) { + onChunk( + "setup", + `\r\nWarning: node_modules cache install failed (exit ${code}), falling back to direct install\r\n`, + ); + // Remove the symlink so the fallback install writes to real node_modules + // on ephemeral storage instead of the (possibly full) PVC slot. + try { + unlinkSync(nmPath); + } catch { + // Non-fatal: fallback install will likely also fail but the error will + // be surfaced to the user. + } + return false; + } + + return true; +} diff --git a/packages/sandbox/daemon/setup/clone.ts b/packages/sandbox/daemon/setup/clone.ts index 80a39bd01d..639f17cb9e 100644 --- a/packages/sandbox/daemon/setup/clone.ts +++ b/packages/sandbox/daemon/setup/clone.ts @@ -1,6 +1,10 @@ -import { spawn } from "node:child_process"; -import { DECO_UID, DECO_GID } from "../constants"; +import { createHash } from "node:crypto"; +import { mkdirSync, statSync } from "node:fs"; +import { dirname } from "node:path"; import type { Config } from "../types"; +import { spawnShell } from "./spawn-shell"; + +const MIRROR_TTL_MS = 60 * 60 * 1000; // 1 hour export interface CloneDeps { config: Config; @@ -8,32 +12,143 @@ export interface CloneDeps { onChunk: (source: "setup", data: string) => void; } +/** + * Derives a stable, credential-free filesystem path for a repo's mirror. + * e.g. https://token@github.com/org/repo.git → /github.com/org/repo + */ +function mirrorPath(gitCacheDir: string, cloneUrl: string): string { + try { + const url = new URL(cloneUrl); + url.username = ""; + url.password = ""; + const canonical = (url.hostname + url.pathname).replace(/\.git$/, ""); + return `${gitCacheDir}/${canonical.replace(/[^a-zA-Z0-9/_-]/g, "_")}`; + } catch { + const hash = createHash("sha256") + .update(cloneUrl) + .digest("hex") + .slice(0, 16); + return `${gitCacheDir}/${hash}`; + } +} + /** Resolves to exit code (0 on success). Emits chunks via `onChunk`. */ export function spawnClone(deps: CloneDeps): Promise { - const { config } = deps; + const { config, dropPrivileges, onChunk } = deps; + + if (config.gitCacheDir) { + return spawnCloneWithReference({ config, dropPrivileges, onChunk }); + } + const cmd = `git clone --depth 1 ${config.cloneUrl} ${config.appRoot}`; - const label = `$ git clone --depth 1 ${config.repoName} ${config.appRoot}`; - deps.onChunk("setup", `${label}\r\n`); - - return new Promise((resolve) => { - const opts: Parameters[2] = { - stdio: ["ignore", "pipe", "pipe"], - }; - if (deps.dropPrivileges) { - (opts as { uid: number; gid: number }).uid = DECO_UID; - (opts as { uid: number; gid: number }).gid = DECO_GID; - } - const child = spawn("script", ["-q", "-c", cmd, "/dev/null"], opts); - child.stdout?.on("data", (c: Buffer) => - deps.onChunk("setup", c.toString("utf-8")), + onChunk( + "setup", + `$ git clone --depth 1 ${config.repoName} ${config.appRoot}\r\n`, + ); + return spawnShell(cmd, { dropPrivileges, onChunk }); +} + +async function spawnCloneWithReference(deps: CloneDeps): Promise { + const { config, dropPrivileges, onChunk } = deps; + const mirror = mirrorPath(config.gitCacheDir!, config.cloneUrl!); + const headFile = `${mirror}/HEAD`; + const lockFile = `${mirror}.lock`; + + try { + mkdirSync(dirname(mirror), { recursive: true }); + } catch { + // PVC full or unavailable — skip the mirror and fall back to direct clone. + onChunk( + "setup", + `\r\nWarning: git cache dir unavailable, falling back to direct clone\r\n`, ); - child.stderr?.on("data", (c: Buffer) => - deps.onChunk("setup", c.toString("utf-8")), + onChunk( + "setup", + `$ git clone --depth 1 ${config.repoName} ${config.appRoot}\r\n`, ); - child.on("error", (err) => { - deps.onChunk("setup", `\r\nSpawn failed: ${err.message}\r\n`); - resolve(-1); - }); - child.on("close", (code) => resolve(code ?? -1)); - }); + return spawnShell( + `git clone --depth 1 ${config.cloneUrl} ${config.appRoot}`, + { dropPrivileges, onChunk }, + ); + } + + // Check mirror state before acquiring flock (cheap, best-effort). + let headMtimeMs: number | null = null; + try { + headMtimeMs = statSync(headFile).mtimeMs; + } catch { + // Mirror not yet created — will be initialised inside the flock below. + } + + const mirrorMissing = headMtimeMs === null; + const mirrorStale = + headMtimeMs !== null && Date.now() - headMtimeMs > MIRROR_TTL_MS; + + if (mirrorMissing) { + // Cold path: shallow bare clone. --depth 1 is fine here because we never + // pass this mirror to `git clone --reference` (which rejects shallow repos + // with "reference repository is shallow"). Instead we wire it as an + // alternate manually — git looks up objects from the pack without a + // shallow check. Double-checked inside the flock so concurrent pods don't + // race on creation. + onChunk("setup", `$ (warming git mirror for ${config.repoName})\r\n`); + const createCmd = [ + `flock -x ${JSON.stringify(lockFile)}`, + `-c 'if [ ! -f ${JSON.stringify(headFile)} ]; then`, + ` git clone --bare --depth 1 --quiet ${config.cloneUrl} ${JSON.stringify(mirror)};`, + `fi'`, + ].join(" "); + const code = await spawnShell(createCmd, { dropPrivileges, onChunk }); + if (code !== 0) { + onChunk( + "setup", + `\r\nWarning: git mirror setup failed (exit ${code}), falling back to direct clone\r\n`, + ); + onChunk( + "setup", + `$ git clone --depth 1 ${config.repoName} ${config.appRoot}\r\n`, + ); + return spawnShell( + `git clone --depth 1 ${config.cloneUrl} ${config.appRoot}`, + { dropPrivileges, onChunk }, + ); + } + } else if (mirrorStale) { + // TTL refresh. --depth 1 is consistent with creation (mirror stays shallow). + // Failure is non-fatal — fall through with the stale mirror. + // Touch HEAD after fetch so GC TTL reflects last-used time. + onChunk("setup", `$ (refreshing git mirror for ${config.repoName})\r\n`); + const fetchCmd = [ + `flock -x ${JSON.stringify(lockFile)}`, + `-c 'git -C ${JSON.stringify(mirror)} fetch --depth 1 --prune --quiet 2>/dev/null;`, + `touch ${JSON.stringify(headFile)} 2>/dev/null || true'`, + ].join(" "); + await spawnShell(fetchCmd, { dropPrivileges, onChunk }); + } + + // Manually wire the mirror as an alternates source instead of using + // `git clone --reference` (which rejects shallow repos). The alternates + // file tells git where to look for objects before contacting the remote: + // objects already in the mirror are served from EFS; only new/changed + // objects are fetched from GitHub. Works for any branch, including ones + // not yet in the mirror — missing objects fall through to origin. + const mirrorObjects = `${mirror}/objects`; + const appRoot = JSON.stringify(config.appRoot); + const alternatesDir = `${config.appRoot}/.git/objects/info`; + const fetchRef = JSON.stringify(config.branch ?? "HEAD"); + + onChunk("setup", `$ git init ${config.repoName} ${config.appRoot}\r\n`); + + const cloneCmd = [ + `git init -q ${appRoot}`, + `&& mkdir -p ${JSON.stringify(alternatesDir)}`, + `&& echo ${JSON.stringify(mirrorObjects)} > ${JSON.stringify(`${alternatesDir}/alternates`)}`, + `&& git -C ${appRoot} remote add origin ${JSON.stringify(config.cloneUrl!)}`, + `&& git -C ${appRoot} fetch --depth 1 --quiet origin ${fetchRef}`, + `&& git -C ${appRoot} checkout FETCH_HEAD`, + ...(config.branch + ? [`&& git -C ${appRoot} branch -M ${JSON.stringify(config.branch)}`] + : []), + ].join(" "); + return spawnShell(cloneCmd, { dropPrivileges, onChunk }); } diff --git a/packages/sandbox/daemon/setup/install.ts b/packages/sandbox/daemon/setup/install.ts index 6d616cb73e..3894ef2738 100644 --- a/packages/sandbox/daemon/setup/install.ts +++ b/packages/sandbox/daemon/setup/install.ts @@ -1,10 +1,6 @@ -import { spawn } from "node:child_process"; -import { - DECO_UID, - DECO_GID, - PACKAGE_MANAGER_DAEMON_CONFIG, -} from "../constants"; +import { PACKAGE_MANAGER_DAEMON_CONFIG } from "../constants"; import type { Config } from "../types"; +import { spawnShell } from "./spawn-shell"; export interface InstallDeps { config: Config; @@ -13,33 +9,13 @@ export interface InstallDeps { } export function spawnInstall(deps: InstallDeps): Promise | null { - const { config } = deps; + const { config, dropPrivileges, onChunk } = deps; if (!config.packageManager) return null; const pmConfig = PACKAGE_MANAGER_DAEMON_CONFIG[config.packageManager]; - if (!pmConfig) return null; + if (!pmConfig?.install) return null; const corepack = "export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 && corepack enable && "; const cmd = `${config.pathPrefix}cd ${config.appRoot} && ${corepack}${pmConfig.install}`; - deps.onChunk("setup", `\r\n$ ${pmConfig.install}\r\n`); - return new Promise((resolve) => { - const opts: Parameters[2] = { - stdio: ["ignore", "pipe", "pipe"], - }; - if (deps.dropPrivileges) { - (opts as { uid: number; gid: number }).uid = DECO_UID; - (opts as { uid: number; gid: number }).gid = DECO_GID; - } - const child = spawn("script", ["-q", "-c", cmd, "/dev/null"], opts); - child.stdout?.on("data", (c: Buffer) => - deps.onChunk("setup", c.toString("utf-8")), - ); - child.stderr?.on("data", (c: Buffer) => - deps.onChunk("setup", c.toString("utf-8")), - ); - child.on("error", (err) => { - deps.onChunk("setup", `\r\nSpawn failed: ${err.message}\r\n`); - resolve(-1); - }); - child.on("close", (code) => resolve(code ?? -1)); - }); + onChunk("setup", `\r\n$ ${pmConfig.install}\r\n`); + return spawnShell(cmd, { dropPrivileges, onChunk }); } diff --git a/packages/sandbox/daemon/setup/orchestrator.test.ts b/packages/sandbox/daemon/setup/orchestrator.test.ts index 3ef38b0286..7e02f10eb3 100644 --- a/packages/sandbox/daemon/setup/orchestrator.test.ts +++ b/packages/sandbox/daemon/setup/orchestrator.test.ts @@ -18,6 +18,11 @@ const cfg: Config = { daemonBootId: "b", proxyPort: 9000, pathPrefix: "", + cacheDir: null, + gitCacheDir: null, + sandboxCacheKey: null, + nodeModulesCacheDir: null, + nextCacheDir: null, }; describe("SetupOrchestrator", () => { diff --git a/packages/sandbox/daemon/setup/orchestrator.ts b/packages/sandbox/daemon/setup/orchestrator.ts index 4bd89909ce..f77cffc043 100644 --- a/packages/sandbox/daemon/setup/orchestrator.ts +++ b/packages/sandbox/daemon/setup/orchestrator.ts @@ -8,6 +8,7 @@ import { spawnClone } from "./clone"; import { configureGitIdentity } from "./identity"; import { resolveBranch } from "./branch"; import { spawnInstall } from "./install"; +import { linkNextCache, linkNodeModules } from "./cache"; export interface SetupState { running: boolean; @@ -78,21 +79,30 @@ export class SetupOrchestrator { } } - const installPromise = spawnInstall({ + linkNextCache({ config, onChunk, dropPrivileges }); + + const nmCached = await linkNodeModules({ config, onChunk, dropPrivileges, }); - if (installPromise) { - const code = await installPromise; - if (code !== 0) { - broadcaster.broadcastChunk( - "setup", - `\r\nInstall failed with exit code ${code}\r\n`, - ); - this.state.running = false; - this.state.done = true; - return true; + if (!nmCached) { + const installPromise = spawnInstall({ + config, + onChunk, + dropPrivileges, + }); + if (installPromise) { + const code = await installPromise; + if (code !== 0) { + broadcaster.broadcastChunk( + "setup", + `\r\nInstall failed with exit code ${code}\r\n`, + ); + this.state.running = false; + this.state.done = true; + return true; + } } } diff --git a/packages/sandbox/daemon/setup/spawn-shell.ts b/packages/sandbox/daemon/setup/spawn-shell.ts new file mode 100644 index 0000000000..23988e3ea0 --- /dev/null +++ b/packages/sandbox/daemon/setup/spawn-shell.ts @@ -0,0 +1,82 @@ +import { spawn } from "node:child_process"; +import { DECO_UID, DECO_GID } from "../constants"; + +export interface SpawnShellOpts { + dropPrivileges?: boolean; + onChunk: (source: "setup", data: string) => void; +} + +const EXIT_SENTINEL = "__DAEMON_EXIT__:"; +const EXIT_RE = /\r?\n?__DAEMON_EXIT__:(\d+)\r?\n?/; + +/** + * Runs `cmd` inside `script -q -c … /dev/null` to get PTY output (so tools + * like git print coloured progress) while still returning the real exit code. + * + * `script` on this Debian image always exits 0 regardless of the wrapped + * command's status. We work around this by appending `; echo __DAEMON_EXIT__:$?` + * so the real code travels through the output stream, then strip the sentinel + * before broadcasting chunks to the caller. + */ +export function spawnShell(cmd: string, opts: SpawnShellOpts): Promise { + const { dropPrivileges, onChunk } = opts; + + const spawnOpts: Parameters[2] = { + stdio: ["ignore", "pipe", "pipe"], + }; + if (dropPrivileges) { + (spawnOpts as { uid: number; gid: number }).uid = DECO_UID; + (spawnOpts as { uid: number; gid: number }).gid = DECO_GID; + } + + // Append exit-code sentinel so the real status survives script's exit(0). + const wrapped = `${cmd}; echo ${EXIT_SENTINEL}$?`; + + return new Promise((resolve) => { + let capturedCode: number | null = null; + let tail = ""; + + function flush(text: string, final = false) { + // Buffer incomplete last lines across chunks so the sentinel is never + // split across two data events. + const combined = tail + text; + const match = EXIT_RE.exec(combined); + if (match) { + capturedCode = parseInt(match[1], 10); + const clean = + combined.slice(0, match.index) + + combined.slice(match.index + match[0].length); + if (clean) onChunk("setup", clean); + tail = ""; + } else if (final) { + if (combined) onChunk("setup", combined); + tail = ""; + } else { + // Keep the last partial line buffered in case the sentinel straddles chunks. + const lastNl = combined.lastIndexOf("\n"); + if (lastNl >= 0) { + onChunk("setup", combined.slice(0, lastNl + 1)); + tail = combined.slice(lastNl + 1); + } else { + tail = combined; + } + } + } + + const child = spawn( + "script", + ["-q", "-c", wrapped, "/dev/null"], + spawnOpts, + ); + child.stdout?.on("data", (c: Buffer) => flush(c.toString("utf-8"))); + child.stderr?.on("data", (c: Buffer) => flush(c.toString("utf-8"))); + child.on("error", (err) => { + onChunk("setup", `\r\nSpawn failed: ${err.message}\r\n`); + resolve(-1); + }); + child.on("close", () => { + flush("", true); + resolve(capturedCode ?? -1); + }); + }); +} diff --git a/packages/sandbox/daemon/types.ts b/packages/sandbox/daemon/types.ts index f3891597a5..5b2097e8ce 100644 --- a/packages/sandbox/daemon/types.ts +++ b/packages/sandbox/daemon/types.ts @@ -16,6 +16,24 @@ export interface Config { readonly proxyPort: number; /** Derived from `runtime`; e.g. "export PATH=/opt/bun/bin:$PATH && " when bun. */ readonly pathPrefix: string; + /** + * Root of the shared cache volume (e.g. /mnt/cache). The daemon derives + * all sub-directories from this single value and injects the corresponding + * package-manager env vars into every subprocess it spawns. Null = no cache. + */ + readonly cacheDir: string | null; + /** Root dir for git reference mirrors. Derived from cacheDir unless overridden. */ + readonly gitCacheDir: string | null; + /** + * Stable hash of (userId, projectRef) injected by the runner. Used to key + * the per-user-branch .next/cache directory on the shared PVC. Null when + * cache is disabled or the runner didn't provide the key. + */ + readonly sandboxCacheKey: string | null; + /** Base dir for shared node_modules on the PVC. Derived from cacheDir unless overridden. */ + readonly nodeModulesCacheDir: string | null; + /** Base dir for per-sandbox Next.js webpack caches on the PVC. Derived from cacheDir unless overridden. */ + readonly nextCacheDir: string | null; } export interface BroadcastSource { diff --git a/packages/sandbox/image/Dockerfile b/packages/sandbox/image/Dockerfile index 384edaca6f..a13e6184e1 100644 --- a/packages/sandbox/image/Dockerfile +++ b/packages/sandbox/image/Dockerfile @@ -1,4 +1,4 @@ -# Build: docker build -t mesh-sandbox:local -f packages/sandbox/image/Dockerfile packages/sandbox +# Build: docker build -t studio-sandbox:local -f packages/sandbox/image/Dockerfile packages/sandbox FROM oven/bun:1.3.13-debian ARG NODE_MAJOR=22 diff --git a/packages/sandbox/package.json b/packages/sandbox/package.json index ae0376ce98..dd027f7bda 100644 --- a/packages/sandbox/package.json +++ b/packages/sandbox/package.json @@ -1,6 +1,6 @@ { "name": "@decocms/sandbox", - "version": "0.0.1", + "version": "0.1.0", "type": "module", "description": "Sandbox runner for isolated per-user containerised tool execution", "scripts": { diff --git a/packages/sandbox/server/runner/agent-sandbox/index.ts b/packages/sandbox/server/runner/agent-sandbox/index.ts index 711ee05b3e..2e4b570517 100644 --- a/packages/sandbox/server/runner/agent-sandbox/index.ts +++ b/packages/sandbox/server/runner/agent-sandbox/index.ts @@ -1,5 +1,5 @@ -// Re-exported so external tooling (e.g. deploy/k8s-sandbox/local/smoke.ts) -// can build a KubeConfig without declaring @kubernetes/client-node itself. +// Re-exported so external tooling can build a KubeConfig without +// declaring @kubernetes/client-node itself. export { KubeConfig } from "@kubernetes/client-node"; export { K8S_CONSTANTS, SandboxError, SandboxTimeoutError } from "./constants"; export { diff --git a/packages/sandbox/server/runner/agent-sandbox/runner.ts b/packages/sandbox/server/runner/agent-sandbox/runner.ts index 6ed67a069f..5ec88d82c1 100644 --- a/packages/sandbox/server/runner/agent-sandbox/runner.ts +++ b/packages/sandbox/server/runner/agent-sandbox/runner.ts @@ -684,6 +684,7 @@ export class AgentSandboxRunner implements SandboxRunner { private buildEnvMap( opts: EnsureOptions, boot: { token: string; daemonBootId: string; workdir: string }, + id?: SandboxId, ): Record { const callerEnv: Record = {}; const dropped: string[] = []; @@ -722,6 +723,18 @@ export class AgentSandboxRunner implements SandboxRunner { ...(opts.workload?.packageManager ? { PACKAGE_MANAGER: opts.workload.packageManager } : {}), + // Key .next/cache on the credential-stripped repo URL so all users and + // all branches of the same project share the webpack compilation cache. + // Webpack is content-addressed — unchanged modules are cache hits + // regardless of who or which branch triggered the compile. The first pod + // to start (any user, any branch) warms it for everyone else. + ...(opts.repo + ? { + SANDBOX_CACHE_KEY: repoNextCacheKey(opts.repo.cloneUrl), + } + : id + ? { SANDBOX_CACHE_KEY: hashSandboxId(id, 32) } + : {}), }; } @@ -729,8 +742,9 @@ export class AgentSandboxRunner implements SandboxRunner { handle: string, opts: EnsureOptions, boot: { token: string; daemonBootId: string; workdir: string }, + id?: SandboxId, ): SandboxClaim { - const envMap = this.buildEnvMap(opts, boot); + const envMap = this.buildEnvMap(opts, boot, id); return { apiVersion: `${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}`, kind: "SandboxClaim", @@ -744,6 +758,7 @@ export class AgentSandboxRunner implements SandboxRunner { "app.kubernetes.io/name": "studio-sandbox", "app.kubernetes.io/managed-by": "studio", ...buildTenantLabels(opts.tenant), + ...buildRepoLabels(opts.repo), }, }, spec: { @@ -757,6 +772,7 @@ export class AgentSandboxRunner implements SandboxRunner { labels: buildTenantLabels(opts.tenant, { [LABEL_KEYS.role]: "claimed", [LABEL_KEYS.sandboxHandle]: handle, + ...buildRepoLabels(opts.repo), }), }, // `valueFrom.secretKeyRef` isn't supported on SandboxClaim env; RBAC @@ -785,11 +801,16 @@ export class AgentSandboxRunner implements SandboxRunner { const daemonBootId = randomUUID(); const workdir = DEFAULT_WORKDIR; - const claim = this.buildClaim(handle, opts, { - token, - daemonBootId, - workdir, - }); + const claim = this.buildClaim( + handle, + opts, + { + token, + daemonBootId, + workdir, + }, + id, + ); await createSandboxClaim(this.kubeConfig, this.namespace, claim); const { podName } = await waitForSandboxReady( this.kubeConfig, @@ -1297,6 +1318,8 @@ const LABEL_KEYS = { sandboxHandle: "studio.decocms.com/sandbox-handle", orgId: "studio.decocms.com/org-id", userId: "studio.decocms.com/user-id", + repo: "studio.decocms.com/repo", + branch: "studio.decocms.com/branch", } as const; // K8s label values: ≤63 chars, must match `(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?`. @@ -1311,6 +1334,21 @@ function sanitizeLabelValue(value: string): string { return LABEL_VALUE_RE.test(truncated) ? truncated : ""; } +/** + * Convert an arbitrary string to a valid K8s label value. Replaces any + * character outside [A-Za-z0-9-_.] with ".", strips leading/trailing + * non-alphanumeric chars, and truncates to 63 chars. Returns "" if the + * result is still invalid (e.g. empty string). + */ +function toLabelValue(value: string): string { + const replaced = value.replace(/[^A-Za-z0-9\-_.]/g, "."); + const trimmed = replaced + .replace(/^[^A-Za-z0-9]+/, "") + .replace(/[^A-Za-z0-9]+$/, "") + .slice(0, MAX_LABEL_VALUE_LEN); + return LABEL_VALUE_RE.test(trimmed) ? trimmed : ""; +} + /** * Tenant labels for `adopt()` recovery + cost attribution. Used on both the * claim (so `kubectl get sandboxclaim` shows ownership and adopt() can read @@ -1332,6 +1370,34 @@ function buildTenantLabels( return labels; } +/** Repo/branch labels for cost attribution and cache GC selectors. */ +function buildRepoLabels(repo: EnsureOptions["repo"]): Record { + if (!repo) return {}; + const labels: Record = {}; + const repoVal = toLabelValue(deriveRepoLabel(repo.cloneUrl)); + if (repoVal) labels[LABEL_KEYS.repo] = repoVal; + const branchVal = repo.branch ? toLabelValue(repo.branch) : ""; + if (branchVal) labels[LABEL_KEYS.branch] = branchVal; + return labels; +} + +/** + * Stable, credential-free cache key for the shared .next/cache directory. + * Strips username/password from the clone URL so the key is identical for all + * users and all branches of the same repository. + */ +function repoNextCacheKey(cloneUrl: string): string { + try { + const url = new URL(cloneUrl); + url.username = ""; + url.password = ""; + const canonical = `${url.hostname}${url.pathname}`.replace(/\.git$/, ""); + return createHash("sha256").update(canonical).digest("hex").slice(0, 32); + } catch { + return createHash("sha256").update(cloneUrl).digest("hex").slice(0, 32); + } +} + /** Read tenant back from a claim's metadata.labels (adopt path). */ function readClaimTenant(claim: SandboxResource): RunnerTenant | null { const labels = claim.metadata?.labels; diff --git a/packages/sandbox/server/runner/docker/local-ingress.ts b/packages/sandbox/server/runner/docker/local-ingress.ts index b4b78f5703..70f33b8843 100644 --- a/packages/sandbox/server/runner/docker/local-ingress.ts +++ b/packages/sandbox/server/runner/docker/local-ingress.ts @@ -156,7 +156,7 @@ export function startLocalSandboxIngress( // one per retry and trip MaxListenersExceededWarning after ~10 EADDRINUSE. server.on("listening", () => { console.log( - `[mesh-sandbox-ingress] forwarding *.localhost → ${host}:${port}`, + `[studio-sandbox-ingress] forwarding *.localhost → ${host}:${port}`, ); }); const tryListen = (): void => { @@ -167,7 +167,7 @@ export function startLocalSandboxIngress( if (!warnedInUse) { warnedInUse = true; console.warn( - `[mesh-sandbox-ingress] ${host}:${port} in use — waiting for previous process to release (up to ${MAX_RETRIES / 2}s)...`, + `[studio-sandbox-ingress] ${host}:${port} in use — waiting for previous process to release (up to ${MAX_RETRIES / 2}s)...`, ); } attempt++; @@ -182,12 +182,12 @@ export function startLocalSandboxIngress( port + " -sTCP:LISTEN -n -P`"; console.warn( - `[mesh-sandbox-ingress] ${host}:${port} still in use after ${MAX_RETRIES / 2}s; giving up${hint}.`, + `[studio-sandbox-ingress] ${host}:${port} still in use after ${MAX_RETRIES / 2}s; giving up${hint}.`, ); return; } console.warn( - `[mesh-sandbox-ingress] ${host}:${port} listen error: ${err.message}`, + `[studio-sandbox-ingress] ${host}:${port} listen error: ${err.message}`, ); }); tryListen(); diff --git a/packages/sandbox/server/runner/docker/runner.test.ts b/packages/sandbox/server/runner/docker/runner.test.ts index d71d30c153..dcd8250030 100644 --- a/packages/sandbox/server/runner/docker/runner.test.ts +++ b/packages/sandbox/server/runner/docker/runner.test.ts @@ -240,12 +240,12 @@ describe("DockerSandboxRunner.ensure() — fresh provision", () => { // Labels: root + id-scoped. const labelRoot = runArgs.findIndex( - (a, i) => a === "--label" && runArgs[i + 1] === "mesh-sandbox=1", + (a, i) => a === "--label" && runArgs[i + 1] === "studio-sandbox=1", ); expect(labelRoot).toBeGreaterThanOrEqual(0); const labelId = runArgs.findIndex( (a, i) => - a === "--label" && runArgs[i + 1]?.startsWith("mesh-sandbox.id="), + a === "--label" && runArgs[i + 1]?.startsWith("studio-sandbox.id="), ); expect(labelId).toBeGreaterThanOrEqual(0); diff --git a/packages/sandbox/server/runner/docker/runner.ts b/packages/sandbox/server/runner/docker/runner.ts index 234a03d4d4..c86a57b3d4 100644 --- a/packages/sandbox/server/runner/docker/runner.ts +++ b/packages/sandbox/server/runner/docker/runner.ts @@ -43,8 +43,8 @@ import type { } from "../types"; const RUNNER_KIND = "docker" as const; -const LABEL_ROOT = "mesh-sandbox"; -const LABEL_ID = "mesh-sandbox.id"; +const LABEL_ROOT = "studio-sandbox"; +const LABEL_ID = "studio-sandbox.id"; const DEFAULT_DEV_PORT = 3000; const PORT_READBACK_ATTEMPTS = 15; const PORT_READBACK_INTERVAL_MS = 200; @@ -120,7 +120,7 @@ export class DockerSandboxRunner implements SandboxRunner { constructor(opts: DockerRunnerOptions = {}) { this.defaultImage = - opts.image ?? process.env.MESH_SANDBOX_IMAGE ?? DEFAULT_IMAGE; + opts.image ?? process.env.STUDIO_SANDBOX_IMAGE ?? DEFAULT_IMAGE; this.exec_ = opts.exec ?? dockerExec; this.labelPrefix = opts.labelPrefix ?? LABEL_ROOT; this.stateStore = opts.stateStore ?? null; diff --git a/packages/sandbox/server/runner/docker/sweep.ts b/packages/sandbox/server/runner/docker/sweep.ts index 16c9393b32..d7e644127c 100644 --- a/packages/sandbox/server/runner/docker/sweep.ts +++ b/packages/sandbox/server/runner/docker/sweep.ts @@ -41,7 +41,7 @@ export async function sweepDockerOrphansOnBoot( } /** - * Caveat: filters only by `mesh-sandbox=1`, so multiple mesh pods sharing + * Caveat: filters only by `studio-sandbox=1`, so multiple studio pods sharing * one docker host would nuke each other's containers on SIGTERM. Fine for * single-pod-per-host (the only sane docker deployment shape today). */ diff --git a/packages/sandbox/shared.ts b/packages/sandbox/shared.ts index 6be1757529..af602062b8 100644 --- a/packages/sandbox/shared.ts +++ b/packages/sandbox/shared.ts @@ -3,7 +3,7 @@ export const PLUGIN_DESCRIPTION = "Isolated per-user sandboxes for MCP tool execution"; export const DAEMON_PORT = 9000; -export const DEFAULT_IMAGE = "mesh-sandbox:local"; +export const DEFAULT_IMAGE = "studio-sandbox:local"; /** Shell-quote a value for safe inclusion in a `bash -lc` script. */ export function shellQuote(value: string): string { diff --git a/scripts/dev.ts b/scripts/dev.ts index 8b35064bb9..cb80d7b239 100644 --- a/scripts/dev.ts +++ b/scripts/dev.ts @@ -24,8 +24,8 @@ const child = Bun.spawn( stdio: ["inherit", "inherit", "inherit"], env: { ...process.env, - MESH_SANDBOX_DEV_DAEMON_DIR: - process.env.MESH_SANDBOX_DEV_DAEMON_DIR ?? sandboxDaemonDir, + STUDIO_SANDBOX_DEV_DAEMON_DIR: + process.env.STUDIO_SANDBOX_DEV_DAEMON_DIR ?? sandboxDaemonDir, }, }, );