diff --git a/.cz.yaml b/.cz.yaml index 5412482c..03f4f71f 100644 --- a/.cz.yaml +++ b/.cz.yaml @@ -1,12 +1,11 @@ --- commitizen: annotated_tag: true - bump_message: "bump(release): v$current_version \u2192 v$new_version" + bump_message: "bump(release): v$current_version β†’ v$new_version" name: cz_conventional_commits tag_format: $major.$minor.$patch$prerelease update_changelog_on_bump: true - version: 1.1.1 + version: 2.0.0 version_files: - - build.gradle:version - charts/locust-k8s-operator/Chart.yaml:appVersion - charts/locust-k8s-operator/Chart.yaml:version diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..f6479469 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,34 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file + +# Test files +test/ +*_test.go + +# Documentation +docs/ +*.md +LICENSE + +# Git +.git/ +.gitignore + +# IDE +.idea/ +.vscode/ + +# CI +.github/ +.golangci.yml + +# Build artifacts +bin/ +cover.out +coverage.out + +# Planning +.planning/ + +# Python +venv/ +__pycache__/ diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..5838c0fe --- /dev/null +++ b/.editorconfig @@ -0,0 +1,29 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true + +[*.go] +indent_style = tab +indent_size = 4 + +[*.{yaml,yml}] +indent_style = space +indent_size = 2 + +[*.{md,markdown}] +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab + +[Dockerfile] +indent_style = space +indent_size = 4 + +[*.sh] +indent_style = space +indent_size = 2 diff --git a/.github/ct.yaml b/.github/ct.yaml index 21bd23d2..f71bc176 100644 --- a/.github/ct.yaml +++ b/.github/ct.yaml @@ -1,3 +1,3 @@ debug: true validate-maintainers: false -helm-extra-args: --timeout 600 +helm-extra-args: --timeout 300s diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 3f593d3e..bf19dd36 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,126 +7,153 @@ on: branches: - master - main - pull_request_target: - types: [ opened, synchronize, reopened, ready_for_review ] + pull_request: + types: [opened, synchronize, reopened, ready_for_review] # Sets default read-only permissions for the workflow. permissions: read-all jobs: - # Job to build and validate the project. - build: - name: πŸ—οΈ Build & Validate + # ============================================ + # Go Operator Build & Test + # ============================================ + build-go: + name: πŸ—οΈ Build Go Operator runs-on: ubuntu-latest + timeout-minutes: 30 steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout repo uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} repository: ${{ github.event.pull_request.head.repo.full_name }} - # Step 2: Sets up JDK 21. - - name: β˜• Set up JDK 21 - uses: actions/setup-java@v4 + - name: πŸ”§ Setup Go + uses: actions/setup-go@v5 with: - java-version: 21 - distribution: temurin + go-version-file: go.mod - # Step 3: Validates the Gradle wrapper to ensure its integrity. - - name: βœ… Validate Gradle wrapper - uses: gradle/wrapper-validation-action@v3.4.2 + - name: πŸ“₯ Download dependencies + run: go mod download - # Step 4: Builds the project and generates a JaCoCo test report. - - name: πŸ› οΈ Build - uses: gradle/gradle-build-action@v3.5.0 + - name: πŸ” Run linter + uses: golangci/golangci-lint-action@v8 with: - arguments: build jacocoTestReport -i + version: v2.1.0 + + - name: πŸ› οΈ Build + run: make build + + - name: βœ… Run tests + run: make test - # Step 5: Reports code coverage to Codecov. - name: πŸ“Š Report coverage + if: always() uses: codecov/codecov-action@v5 with: - name: branch-${{ github.ref }} - verbose: true + files: cover.out + flags: go-unit-tests + name: go-coverage fail_ci_if_error: false - # Step 6: Runs the Codacy coverage reporter. - - name: πŸ“ˆ Run codacy coverage reporter - uses: codacy/codacy-coverage-reporter-action@v1 + - name: πŸ“¦ Upload Go test artifacts on failure + if: failure() + uses: actions/upload-artifact@v4 with: - project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} - - # Step 7: Uploads build artifacts. - - name: πŸ“¦ Upload build artifacts - uses: actions/upload-artifact@v4.4.0 - with: - name: Build Artifacts + name: go-test-artifacts path: | - **/build/reports + cover.out + retention-days: 7 - # Job to lint and test the Helm chart. + # ============================================ + # Helm Chart Lint & Test + # ============================================ lint-test-helm: name: 🌊 Lint & Test chart runs-on: ubuntu-latest - needs: - - build + timeout-minutes: 30 steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.sha }} repository: ${{ github.event.pull_request.head.repo.full_name }} - # Step 2: Sets up Helm. - name: 🌊 Set up Helm uses: azure/setup-helm@v4 with: - version: v3.10.0 + version: v3.14.0 - # Step 3: Sets up Python. - name: 🐍 Set up Python uses: actions/setup-python@v5 with: python-version: '3.9' check-latest: true - # Step 4: Sets up chart-testing. - name: πŸ“Š Set up chart-testing uses: helm/chart-testing-action@v2.7.0 - # Step 5: Lists the Helm charts that have changed. - name: πŸ“‹ Run chart-testing (list-changed) id: list-changed run: | changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" + echo "changed=true" >> $GITHUB_OUTPUT fi - # Step 6: Lints the Helm charts. - name: πŸ”Ž Run chart-testing (lint) run: ct lint --target-branch ${{ github.event.repository.default_branch }} --config .github/ct.yaml - # Step 7: Creates a KinD cluster if any charts have changed. - name: β›΅ Create kind cluster uses: helm/kind-action@v1.12.0 if: steps.list-changed.outputs.changed == 'true' - # Step 8: Installs the Helm charts. + - name: πŸ”§ Setup Go + if: steps.list-changed.outputs.changed == 'true' + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: πŸ› οΈ Install ko + if: steps.list-changed.outputs.changed == 'true' + uses: ko-build/setup-ko@v0.7 + + - name: 🐳 Build operator image with ko + if: steps.list-changed.outputs.changed == 'true' + env: + KO_DOCKER_REPO: lotest/locust-k8s-operator + run: | + CHART_VERSION=$(grep '^appVersion:' charts/locust-k8s-operator/Chart.yaml | awk '{print $2}' | tr -d '"') + ko build ./cmd --local --bare --tags=${CHART_VERSION} + + - name: πŸ“¦ Load image into kind cluster + if: steps.list-changed.outputs.changed == 'true' + run: | + CHART_VERSION=$(grep '^appVersion:' charts/locust-k8s-operator/Chart.yaml | awk '{print $2}' | tr -d '"') + kind load docker-image lotest/locust-k8s-operator:${CHART_VERSION} --name chart-testing + - name: πŸš€ Run chart-testing (install) - run: ct install --target-branch ${{ github.event.repository.default_branch }} --config .github/ct.yaml + run: ct install --target-branch ${{ github.event.repository.default_branch }} --config .github/ct.yaml --helm-extra-set-args "--set=image.pullPolicy=Never" + + - name: πŸ“¦ Collect kind cluster logs on failure + if: failure() && steps.list-changed.outputs.changed == 'true' + run: kind export logs kind-logs --name chart-testing + + - name: πŸ“¦ Upload Helm test artifacts on failure + if: failure() && steps.list-changed.outputs.changed == 'true' + uses: actions/upload-artifact@v4 + with: + name: helm-test-artifacts + path: | + kind-logs/ + retention-days: 7 - # Job to test the documentation. docs-test: name: πŸ“š Test documentation runs-on: ubuntu-latest - needs: - - lint-test-helm + timeout-minutes: 15 steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout uses: actions/checkout@v4 with: @@ -134,22 +161,29 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} repository: ${{ github.event.pull_request.head.repo.full_name }} - # Step 2: Sets up Python. - name: 🐍 Setup python uses: actions/setup-python@v5 with: python-version: 3.x - # Step 3: Installs dependencies. - name: πŸ“¦ Install dependencies run: | pip install \ mkdocs-material \ - mkdocs-git-revision-date-localized-plugin + mkdocs-git-revision-date-localized-plugin \ + mkdocs-minify-plugin - # Step 4: Builds the documentation. - name: πŸ“„ Build documentation env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | python -m mkdocs build + + - name: πŸ“¦ Upload docs build artifacts on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: docs-build-artifacts + path: | + site/ + retention-days: 7 diff --git a/.github/workflows/docs-preview.yml b/.github/workflows/docs-preview.yml index 76286680..79e1aa09 100644 --- a/.github/workflows/docs-preview.yml +++ b/.github/workflows/docs-preview.yml @@ -43,6 +43,7 @@ jobs: run: | pip install mkdocs-material pip install mkdocs-git-revision-date-localized-plugin + pip install mkdocs-minify-plugin - name: πŸ”§ Configure MkDocs for preview run: | diff --git a/.github/workflows/go-test-e2e.yml b/.github/workflows/go-test-e2e.yml new file mode 100644 index 00000000..6ceab37a --- /dev/null +++ b/.github/workflows/go-test-e2e.yml @@ -0,0 +1,61 @@ +name: πŸš€ Go E2E Tests + +on: + push: + branches: [main, master] + paths: + - 'cmd/**' + - 'api/**' + - 'internal/**' + - 'test/**' + - 'go.mod' + - 'go.sum' + - 'Makefile' + pull_request: + paths: + - 'cmd/**' + - 'api/**' + - 'internal/**' + - 'test/**' + - 'go.mod' + - 'go.sum' + - 'Makefile' + +permissions: read-all + +jobs: + test-e2e: + name: 🎯 Run E2E Tests + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: πŸ“₯ Clone the code + uses: actions/checkout@v4 + + - name: πŸ”§ Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: ☸️ Create Kind cluster + uses: helm/kind-action@v1.12.0 + with: + cluster_name: locust-k8s-operator-test-e2e + wait: 120s + + - name: πŸ” Verify go mod tidy + run: | + go mod tidy + git diff --exit-code -- go.mod go.sum + + - name: 🎯 Run E2E tests + run: make test-e2e + + - name: πŸ“¦ Upload E2E test artifacts on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: e2e-test-artifacts + path: | + cover.out + retention-days: 7 diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml deleted file mode 100644 index dd731205..00000000 --- a/.github/workflows/integration-test.yml +++ /dev/null @@ -1,80 +0,0 @@ -# This is a GitHub Actions workflow for running integration tests. -name: πŸ§ͺ Integration Tests - -# This workflow is triggered on pull requests to the main branch, pushes to the main branch, and can also be triggered manually. -on: - pull_request: - branches: - - master - - main - push: - branches: - - master - - main - workflow_dispatch: - -jobs: - # Defines a single job named 'integration-test'. - integration-test: - # The job will run on the latest version of Ubuntu. - runs-on: ubuntu-latest - # The job will time out after 30 minutes. - timeout-minutes: 30 - - steps: - # Step 1: Checks out the repository code. - - name: πŸ“‚ Checkout code - uses: actions/checkout@v4 - - # Step 2: Sets up JDK 21. - - name: β˜• Set up JDK 21 - uses: actions/setup-java@v4 - with: - java-version: '21' - distribution: 'temurin' - - # caching gradle packages to speed up the build - # Step 3: Caches Gradle packages to speed up subsequent builds. - - name: ⚑ Cache Gradle packages - uses: actions/cache@v4 - with: - path: | - ~/.gradle/caches - ~/.gradle/wrapper - key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} - restore-keys: | - ${{ runner.os }}-gradle- - - # Step 4: Sets up Docker Buildx for building Docker images. - - name: 🐳 Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - # Step 5: Sets up Helm, a package manager for Kubernetes. - - name: 🌊 Set up Helm - uses: azure/setup-helm@v4 - with: - version: '3.12.0' - - # Step 6: Sets up a KinD (Kubernetes in Docker) cluster for running tests. - - name: β›΅ Set up Kind - uses: helm/kind-action@v1.12.0 - with: - cluster_name: locust-integration-test - config: .github/kind-config.yaml - - # Step 7: Runs the integration tests using Gradle. - - name: πŸš€ Run Integration Tests - run: ./gradlew integrationTest - env: - # Sets the KUBECONFIG environment variable to point to the KinD cluster's configuration. - KUBECONFIG: /home/runner/.kube/config - - # Step 8: Uploads the test results as an artifact. This step runs even if previous steps fail. - - name: πŸ“„ Upload test results - uses: actions/upload-artifact@v4 - if: always() - with: - name: integration-test-results - path: | - build/reports/ - build/test-results/ diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 333d41f0..89ea01ad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -8,69 +8,67 @@ on: - "*" jobs: - # Job to publish the Docker image. - Publish-image: + # ============================================ + # Go Operator Docker Image + # ============================================ + publish-image: name: 🐳 Publish image runs-on: ubuntu-latest - + timeout-minutes: 30 + permissions: + contents: read env: - DOCKER_IMAGE: lotest/${{ github.event.repository.name }} + DOCKER_IMAGE: lotest/locust-k8s-operator steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout repo uses: actions/checkout@v4 - # Step 2: Sets up JDK 21. - - name: β˜• Set up JDK 21 - uses: actions/setup-java@v4 + - name: πŸ”§ Setup Go + uses: actions/setup-go@v5 with: - java-version: 21 - distribution: temurin - - # Step 3: Sets up Gradle. - - name: πŸ› οΈ Setup Gradle - uses: gradle/gradle-build-action@v3.5.0 + go-version-file: go.mod - # Step 4: Builds the Docker image using Jib. - - name: πŸ“¦ Build docker image - run: ./gradlew jibDockerBuild -i --image $DOCKER_IMAGE + - name: πŸ”§ Setup ko + uses: ko-build/setup-ko@v0.7 - # Step 5: Logs in to Docker Hub. - name: πŸ” Login to Docker Hub uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - # Step 6: Pushes the image to the Docker repository. - - name: 🚒 Push image to repository - run: docker push --all-tags $DOCKER_IMAGE + - name: πŸ“¦ Build and push with ko + env: + KO_DOCKER_REPO: ${{ env.DOCKER_IMAGE }} + run: | + ko build ./cmd \ + --platform=linux/amd64,linux/arm64 \ + --bare \ + --tags=${{ github.ref_name }},latest,${{ github.sha }} - # Job to publish the Helm chart. helm-chart-release: name: 🌊 Publish Helm chart runs-on: ubuntu-latest - permissions: write-all + timeout-minutes: 15 + needs: [publish-image] + permissions: + contents: write steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - # Step 2: Configures Git with the user name and email. - name: ✍️ Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - # Step 3: Installs Helm. - name: 🌊 Install Helm uses: azure/setup-helm@v4 with: - version: v3.10.0 + version: v3.14.0 - # Step 4: Packages the Helm chart with the release version. - name: πŸ“¦ Custom packaging run: | VERSION=${{github.ref_name}} @@ -78,49 +76,43 @@ jobs: mkdir -p .cr-release-packages helm package charts/locust-k8s-operator --app-version=${VERSION} --version=${VERSION} --destination=.cr-release-packages - # Step 5: Runs the chart-releaser to publish the chart. - name: πŸŽ‰ Run chart-releaser - # switch back to helm/chart-releaser-action when https://github.com/helm/chart-releaser-action/pull/109 is merged - # Waiting for a new release to include changes coming from https://github.com/helm/chart-releaser-action/commit/38cfeacdbbd62d13ec773fcdee1435ff5846c554 - uses: askcloudarchitech/chart-releaser-action@skip-packaging-option + uses: helm/chart-releaser-action@v1 with: skip_packaging: true env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - # Job to publish the documentation. docs-release: name: πŸ“š Publish documentation runs-on: ubuntu-latest - permissions: write-all + timeout-minutes: 15 + permissions: + contents: write steps: - # Step 1: Checks out the repository code. - name: πŸ“‚ Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - # Step 2: Sets up Python. - name: 🐍 Setup python uses: actions/setup-python@v5 with: python-version: 3.x - # Step 3: Installs documentation dependencies. - name: πŸ“¦ Install dependencies run: | pip install \ mkdocs-material \ - mkdocs-git-revision-date-localized-plugin + mkdocs-git-revision-date-localized-plugin \ + mkdocs-minify-plugin - # Step 4: Builds the documentation. - name: πŸš€ Build documentation env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | python -m mkdocs build - # Step 5: Deploys the documentation. - name: πŸš€ Deploy documentation uses: peaceiris/actions-gh-pages@v4 with: diff --git a/.gitignore b/.gitignore index d90faa8b..c99a020b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,50 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# Editor and IDE +.idea +.vscode +*.swp +*.swo +*~ + +# Test coverage +cover.out +coverage.out + +# Python virtual environment +venv/ + +# OS Thumbs.db .DS_Store -.gradle -build/ -target/ -out/ -.micronaut/ -.idea -*.iml -*.ipr -*.iws -.project -.settings -.classpath -.factorypath # mkdocs .docs-venv .cache + +# Agentic flows +specs/ +.windsurf/ +.planning/ +.specify/ +CLAUDE.md +GEMINI.md diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..7d5a33ba --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,67 @@ +version: "2" +run: + allow-parallel-runners: true +linters: + default: none + enable: + - copyloopvar + - dupl + - errcheck + - errorlint + - exhaustive + - ginkgolinter + - goconst + - gocyclo + - gosec + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - unconvert + - unparam + - unused + settings: + errorlint: + errorf: true + errorf-multi: true + asserts: true + comparison: true + exhaustive: + check: + - switch + default-signifies-exhaustive: false + revive: + rules: + - name: comment-spacings + - name: import-shadowing + exclusions: + generated: lax + rules: + - linters: + - lll + path: api/* + - linters: + - dupl + - lll + path: internal/* + - linters: + - goconst + path: _test\.go + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d71ecea0..afdba819 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,13 +22,3 @@ repos: hooks: - id: commitizen stages: [commit-msg] - - - repo: https://github.com/jguttman94/pre-commit-gradle - rev: v0.3.0 - hooks: - - id: gradle-check - args: [ --wrapper, --output ] - exclude: .*\.md # Markdown - - id: gradle-spotless - args: [ --wrapper, --output ] - exclude: .*\.md # Markdown diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 87cf7dd7..25c549e6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,102 +30,106 @@ Please note that we have a code of conduct and thus you are kindly asked to foll Setup: once per project 1. Clone this repository. -2. Install [pre-commit](https://pre-commit.com/) and run the below commands to add and register needed git hooks - 1. Run `pre-commit install --install-hooks` - 2. Run `pre-commit install --hook-type commit-msg` +2. Install prerequisites: + - [Go 1.24+](https://go.dev/dl/) + - [Docker](https://docs.docker.com/get-docker/) + - [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) (Kubernetes in Docker) + - [Helm 3](https://helm.sh/docs/intro/install/) +3. Install development tools: + ```bash + make controller-gen envtest kustomize + ``` +4. Install [pre-commit](https://pre-commit.com/) and run: + 1. `pre-commit install --install-hooks` + 2. `pre-commit install --hook-type commit-msg`
-Developing +Common development commands - This project follows the [Conventional Commits](https://www.conventionalcommits.org/) standard to automate [Semantic Versioning](https://semver.org/) and [Keep A Changelog](https://keepachangelog.com/) with [Commitizen](https://github.com/commitizen-tools/commitizen). +```bash +# Build the operator binary +make build + +# Run all tests (unit + integration via envtest) +make test + +# Run linter +make lint + +# Generate CRDs, RBAC, and webhook manifests +make manifests + +# Run all CI checks locally +make ci +``` +
-### Local Testing with Minikube and Helm +### Local Testing with Kind -For local development and testing, you can use Minikube to create a local Kubernetes cluster. This allows you to test the operator and your changes in an environment that closely resembles a production setup. +For local development and testing, [Kind](https://kind.sigs.k8s.io/) (Kubernetes in Docker) is the recommended approach. #### Prerequisites -- [Minikube](https://minikube.sigs.k8s.io/docs/start/) -- [Helm](https://helm.sh/docs/intro/install/) +- [Go 1.24+](https://go.dev/dl/) +- [Docker](https://docs.docker.com/get-docker/) +- [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [Helm 3](https://helm.sh/docs/intro/install/) #### Steps -1. **Start Minikube** - - Start a local Kubernetes cluster using Minikube: +1. **Create a Kind Cluster** ```bash - minikube start + kind create cluster --name locust-dev ``` 2. **Build and Load the Docker Image** - If you've made changes to the operator's source code, you'll need to build a new Docker image and load it into your Minikube cluster. This project uses the Jib Gradle plugin to build images directly, so you don't need a `Dockerfile`. + Build a Docker image and load it into Kind: - First, build the image to your local Docker daemon: ```bash - ./gradlew jibDockerBuild - ``` + # Build the Docker image + make docker-build IMG=locust-k8s-operator:dev - Next, load the image into Minikube's internal registry: - ```bash - minikube image load locust-k8s-operator:latest + # Load the image into Kind + kind load docker-image locust-k8s-operator:dev --name locust-dev ``` -3. **Package the Helm Chart** - - Package the Helm chart to create a distributable `.tgz` file. +3. **Install the Operator with Helm** ```bash - helm package ./charts/locust-k8s-operator - ``` + # Package the Helm chart + helm package charts/locust-k8s-operator -4. **Install the Operator with Helm** - - Install the Helm chart on your Minikube cluster. The command below overrides the default image settings to use the one you just built and loaded. - - You can use a `values.yaml` file to override other settings. - - ```yaml - # values.yaml (optional) - # Example: Set resource requests and limits for the operator pod - config: - loadGenerationPods: - resource: - cpuRequest: 250m - memRequest: 128Mi - ephemeralRequest: 300M - cpuLimit: 1000m - memLimit: 1024Mi - ephemeralLimit: 50M - - # To leave a resource unbound, Leave the limit empty - # This is useful when you don't want to set a specific limit. - # example: - # config: - # loadGenerationPods: - # resource: - # cpuLimit: "" - # memLimit: "" - # ephemeralLimit: "" + # Install with local image + helm install locust-operator locust-k8s-operator-*.tgz \ + --set image.repository=locust-k8s-operator \ + --set image.tag=dev \ + --set image.pullPolicy=IfNotPresent ``` - Install the chart using the following command. The `-f values.yaml` flag is optional. +4. **Verify the Deployment** ```bash - helm install locust-operator locust-k8s-operator-*.tgz -f values.yaml \ - --set image.repository=locust-k8s-operator \ - --set image.tag=latest \ - --set image.pullPolicy=IfNotPresent + kubectl get pods -A | grep locust ``` - This will deploy the operator to your Minikube cluster using the settings defined in your `values.yaml` file. +5. **Cleanup** + + ```bash + # Uninstall the operator + helm uninstall locust-operator + + # Delete the Kind cluster + kind delete cluster --name locust-dev + ``` ### Writing documentation diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..f183ebbd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.24.0 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/ cmd/ +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager ./cmd/ + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..ee879bb0 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,90 @@ +# Migration from Java to Go + +## Overview + +The Locust Kubernetes Operator has been completely rewritten from Java to Go. This represents a full architectural transition, not a simple port. While the core functionality remains the same, the implementation is fundamentally different. + +## What Changed + +### Language and Framework +- **Before**: Java with Micronaut framework and Java Operator SDK +- **After**: Go with Operator SDK / controller-runtime (Kubernetes standard operator framework) + +### Project Structure +- **Before**: Java source in `src/`, Gradle build files, Maven dependencies +- **After**: Go source at repository root (`cmd/`, `api/`, `internal/`), Go modules + +### Performance Characteristics +- **Memory footprint**: ~256MB (Java) β†’ ~64MB (Go) +- **Startup time**: ~60s (Java) β†’ <1s (Go) +- **Binary size**: ~100MB (Java + JVM) β†’ ~30MB (Go static binary) + +## What Stayed the Same + +### API Compatibility +The `LocustTest` Custom Resource Definition (CRD) maintains full backward compatibility: +- Both v1 and v2 API versions are supported +- Existing manifests continue to work without modification +- Helm chart values remain compatible (with new optional features added) + +### Behavior +The operator provides the same functionality: +- Creates master and worker Pods with Locust +- Manages Services for master UI and headless communication +- Handles ConfigMap-based Locust script injection +- Supports Secret and environment variable configuration + +### Deployment +The Helm chart remains at `charts/locust-k8s-operator/` with the same installation process. + +## Finding the Old Java Code + +The Java operator source code has been preserved in the `archive/java-operator-v1` branch for reference. + +To access it: + +```bash +git fetch origin archive/java-operator-v1 +git checkout archive/java-operator-v1 +``` + +This branch contains the complete Java codebase as it existed before the Go rewrite. It is maintained for historical reference only and will not receive further updates. + +## Key Differences for Developers + +### Testing +- **Before**: JUnit 5, Mockito, Testcontainers +- **After**: Go's testing package, envtest for controller tests, Kind for E2E tests + +### Build System +- **Before**: Gradle with multi-stage Dockerfile +- **After**: Make with standard Go build commands, multi-arch builds via BuildKit + +### Dependencies +- **Before**: Maven Central packages +- **After**: Go modules from Go package ecosystem + +### CI/CD +The CI/CD pipelines have been updated to use Go tooling: +- `go build`, `go test`, `go vet`, `golangci-lint` instead of Gradle tasks +- Multi-platform Docker builds (amd64 + arm64) +- Helm chart testing remains unchanged + +## Migration for Users + +Most users will not need to make any changes. The Go operator is a drop-in replacement for the Java operator: + +1. Update the operator deployment via Helm (same chart, new appVersion) +2. Existing `LocustTest` resources continue to function +3. Review new features in v2.0 (OpenTelemetry, enhanced volumes, separate resources per role) + +For detailed migration guidance, see the [Migration Guide](https://abdelrhmanhamouda.github.io/locust-k8s-operator/migration/) in the documentation. + +## Rationale + +The rewrite to Go was motivated by: + +1. **Performance**: Lower memory usage and faster startup align with Kubernetes ecosystem expectations +2. **Ecosystem alignment**: controller-runtime is the de facto standard for Kubernetes operators +3. **Maintainability**: Simpler deployment (static binary), broader contributor pool familiar with Go +4. **Cloud-native fit**: Go is the lingua franca of cloud-native tooling diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..72159582 --- /dev/null +++ b/Makefile @@ -0,0 +1,382 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.0.1 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# io/locust-k8s-operator-bundle:$VERSION and io/locust-k8s-operator-catalog:$VERSION. +IMAGE_TAG_BASE ?= io/locust-k8s-operator + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.42.0 +# Image URL to use all building/pushing image targets +IMG ?= controller:latest + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: tidy +tidy: ## Run go mod tidy + go mod tidy + +.PHONY: test +test: manifests generate fmt vet setup-envtest generate-test-crds ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +.PHONY: generate-test-crds +generate-test-crds: kustomize manifests ## Generate v1-only CRD for integration tests (no conversion webhook needed). + cp config/crd/bases/locust.io_locusttests.yaml config/crd/test/base.yaml + $(KUSTOMIZE) build config/crd/test > config/crd/test/locust.io_locusttests.yaml + rm config/crd/test/base.yaml + +# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. +# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. +# CertManager is installed by default; skip with: +# - CERT_MANAGER_INSTALL_SKIP=true +KIND_CLUSTER ?= locust-k8s-operator-test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + +.PHONY: test-e2e +test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND_CLUSTER=$(KIND_CLUSTER) go test ./test/e2e/ -v -ginkgo.v + $(MAKE) cleanup-test-e2e + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests + @$(KIND) delete cluster --name $(KIND_CLUSTER) + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +.PHONY: lint-config +lint-config: golangci-lint ## Verify golangci-lint linter configuration + $(GOLANGCI_LINT) config verify + +##@ CI + +.PHONY: ci +ci: lint test ## Run all CI checks locally + +.PHONY: ci-coverage +ci-coverage: test ## Generate coverage report for CI + @echo "Coverage report: cover.out" + @go tool cover -func=cover.out | tail -1 + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager ./cmd + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name locust-k8s-operator-builder + $(CONTAINER_TOOL) buildx use locust-k8s-operator-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm locust-k8s-operator-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KIND ?= kind +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.6.0 +CONTROLLER_TOOLS_VERSION ?= v0.18.0 +#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) +ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') +#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) +ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') +GOLANGCI_LINT_VERSION ?= v2.1.0 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: setup-envtest +setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. + @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." + @$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path || { \ + echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ + exit 1; \ + } + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + $(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.55.0/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/PROJECT b/PROJECT new file mode 100644 index 00000000..adc1f726 --- /dev/null +++ b/PROJECT @@ -0,0 +1,32 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: io +layout: +- go.kubebuilder.io/v4 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: locust-k8s-operator +repo: github.com/AbdelrhmanHamouda/locust-k8s-operator +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: io + group: locust + kind: LocustTest + path: github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: io + group: locust + kind: LocustTest + path: github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2 + version: v2 +version: "3" diff --git a/README.md b/README.md index d46efc33..6aee3941 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@
- Locust Kubernetes Operator Logo
@@ -20,6 +20,28 @@ Docs: [github.io/locust-k8s-operator/](https://abdelrhmanhamouda.github.io/locus ----------------------------- +## v2.0 - Complete Go Rewrite + +The operator has been completely rewritten in Go, bringing significant improvements: + +| Improvement | Before (Java) | After (Go) | +|---------------|-------------------|---------------------| +| **Memory** | ~256MB | ~64MB | +| **Startup** | ~60s | <1s | +| **Framework** | Java Operator SDK | Operator SDK / controller-runtime | + +### New Features in v2.0 + +- **Native OpenTelemetry** - Export traces and metrics directly with `--otel` flag +- **Secret & ConfigMap Injection** - Securely inject credentials as env vars or file mounts +- **Volume Mounting** - Mount PVCs, ConfigMaps, Secrets with target filtering (master/worker/both) +- **Separate Resource Specs** - Independent resource configuration for master and worker pods +- **Enhanced Status** - Phase tracking, conditions, and worker connection status + +**[Migration Guide](https://abdelrhmanhamouda.github.io/locust-k8s-operator/migration/)** for existing v1 users + +----------------------------- + [//]: # (Badges) [![CI Pipeline][pipeline-status]][pipeline-status-url] [![Codacy Badge][code-coverage]][code-coverage-url] @@ -29,26 +51,69 @@ Docs: [github.io/locust-k8s-operator/](https://abdelrhmanhamouda.github.io/locus ![](docs/assets/images/run-anywhere.png) -## At a glance +## At a Glance -The _Operator_ is designed to unlock seamless & effortless distributed performance testing in the **_cloud_** and enable **_continues -integration for CI / CD_**. By design, the entire system is cloud native and focuses on automation and CI practices. One strong feature -about the system is its ability to **horizontally scale** to meet any required performance demands. +The Operator is designed to unlock seamless and effortless distributed performance testing in the cloud and enable continuous integration for CI/CD. By design, the entire system is cloud native and focuses on automation and CI practices. One strong feature about the system is its ability to horizontally scale to meet any required performance demands. ## Documentation -All the documentation for this project and how to use it is available through [github.io/locust-k8s-operator/](https://abdelrhmanhamouda.github.io/locust-k8s-operator/). +All documentation for this project is available at [github.io/locust-k8s-operator/](https://abdelrhmanhamouda.github.io/locust-k8s-operator/). + +## Quick Start + +### Prerequisites + +- **Go 1.24+** for local development +- **Docker** for building container images +- **kubectl** configured for your cluster +- **Helm 3.x** for chart installation +- **Kind** (optional, for local E2E testing) + +### Installation + +Install the operator using Helm: + +```bash +helm repo add locust-k8s-operator https://abdelrhmanhamouda.github.io/locust-k8s-operator +helm install locust-operator locust-k8s-operator/locust-k8s-operator +``` + +Or from the repository: + +```bash +helm install locust-operator charts/locust-k8s-operator/ +``` + +### Development + +```bash +# Install CRDs +make install + +# Run operator locally (against configured cluster) +make run + +# Run tests +make test + +# Run E2E tests (requires Kind) +make test-e2e + +# Build and push operator image +make docker-build docker-push IMG=/locust-operator:tag +``` + +See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed development setup. -## Project status +## Project Status -The project is **_actively_** maintained and is under continues development and improvement. If you have any request or want to chat, kindly -open a ticket. If you wish to contribute code and / or ideas, kindly check the contribution section. +The project is actively maintained and under continuous development and improvement. If you have any request or want to chat, kindly open a ticket. If you wish to contribute code and/or ideas, kindly check the contribution section. ## Contribute -There's plenty to do, come say hi in [the issues](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues)! πŸ‘‹ +There's plenty to do, come say hi in [the issues](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues)! -Also check out the [CONTRIBUTING.MD](CONTRIBUTING.md) πŸ€“ +Also check out [CONTRIBUTING.md](CONTRIBUTING.md). ## License diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..c186382b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,80 @@ +# Security Policy + +## Supported Versions + +We release security updates for the following versions: + +| Version | Supported | +| ------- | ------------------ | +| 2.x | :white_check_mark: | +| 1.x | :x: | + +Version 1.x (Java operator) is no longer maintained. Please upgrade to version 2.x (Go operator) to receive security updates. + +## Reporting a Vulnerability + +We take the security of the Locust Kubernetes Operator seriously. If you discover a security vulnerability, please report it responsibly. + +### Preferred Method: GitHub Security Advisories + +The preferred way to report security vulnerabilities is through [GitHub Security Advisories](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/security/advisories/new). + +This allows us to: +- Discuss the vulnerability privately +- Work on a fix before public disclosure +- Coordinate the release and announcement + +### Alternative: Private Email + +If you prefer email or cannot use GitHub Security Advisories, please contact the project maintainers directly. You can find maintainer contact information in the project's GitHub repository. + +### What to Include + +When reporting a vulnerability, please include: + +1. **Description**: A clear description of the vulnerability +2. **Impact**: What an attacker could achieve by exploiting it +3. **Reproduction**: Step-by-step instructions to reproduce the issue +4. **Affected versions**: Which versions are vulnerable +5. **Suggested fix** (optional): If you have ideas for remediation + +### Response Timeline + +- **Initial response**: Within 48 hours of report +- **Status update**: Within 7 days with assessment and next steps +- **Fix timeline**: Depends on severity; critical issues prioritized immediately + +## Scope + +Security reports should relate to: + +- The operator's Go code (controller logic, webhook validation, resource management) +- The Helm chart (RBAC, security contexts, defaults) +- CI/CD pipeline security (supply chain, artifact integrity) +- Dependencies (Go modules, container base images) + +Out of scope: +- Issues in Locust itself (report to [locustio/locust](https://github.com/locustio/locust)) +- Issues in Kubernetes core (report to [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)) +- General usage questions (use GitHub Discussions or Issues) + +## Security Best Practices + +When deploying the operator, we recommend: + +1. **Least Privilege RBAC**: The Helm chart provides minimal required permissions by default +2. **Read-only Root Filesystem**: Enabled by default in the operator Pod +3. **Network Policies**: Consider adding NetworkPolicy resources to restrict operator traffic +4. **Image Verification**: Use image digests or verify signatures (cosign) for supply chain security +5. **Keep Updated**: Regularly update to the latest patch version for security fixes + +## Disclosure Policy + +Once a security fix is released: + +1. We will publish a GitHub Security Advisory with details +2. The advisory will be linked in release notes +3. Credit will be given to the reporter (unless anonymity is requested) +4. CVE assignment will be requested for critical or high-severity issues + +We follow a coordinated disclosure approach, allowing time for users to update before full public disclosure. diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go new file mode 100644 index 00000000..b1882667 --- /dev/null +++ b/api/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the locust v1 API group. +// +kubebuilder:object:generate=true +// +groupName=locust.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "locust.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/locusttest_conversion.go b/api/v1/locusttest_conversion.go new file mode 100644 index 00000000..70d9c08d --- /dev/null +++ b/api/v1/locusttest_conversion.go @@ -0,0 +1,278 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + v2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +// ConvertTo converts this v1 LocustTest to the Hub version (v2). +func (src *LocustTest) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v2.LocustTest) + + // Metadata + dst.ObjectMeta = src.ObjectMeta + + // Image configuration + dst.Spec.Image = src.Spec.Image + if src.Spec.ImagePullPolicy != "" { + dst.Spec.ImagePullPolicy = corev1.PullPolicy(src.Spec.ImagePullPolicy) + } + dst.Spec.ImagePullSecrets = convertImagePullSecretsToV2(src.Spec.ImagePullSecrets) + + // Master configuration (grouped) + dst.Spec.Master = v2.MasterSpec{ + Command: src.Spec.MasterCommandSeed, + Autostart: ptr.To(true), + Autoquit: &v2.AutoquitConfig{Enabled: true, Timeout: 60}, + } + if src.Spec.Labels != nil && src.Spec.Labels.Master != nil { + dst.Spec.Master.Labels = src.Spec.Labels.Master + } + if src.Spec.Annotations != nil && src.Spec.Annotations.Master != nil { + dst.Spec.Master.Annotations = src.Spec.Annotations.Master + } + + // Worker configuration (grouped) + dst.Spec.Worker = v2.WorkerSpec{ + Command: src.Spec.WorkerCommandSeed, + Replicas: src.Spec.WorkerReplicas, + } + if src.Spec.Labels != nil && src.Spec.Labels.Worker != nil { + dst.Spec.Worker.Labels = src.Spec.Labels.Worker + } + if src.Spec.Annotations != nil && src.Spec.Annotations.Worker != nil { + dst.Spec.Worker.Annotations = src.Spec.Annotations.Worker + } + + // Test files configuration + if src.Spec.ConfigMap != "" || src.Spec.LibConfigMap != "" { + dst.Spec.TestFiles = &v2.TestFilesConfig{ + ConfigMapRef: src.Spec.ConfigMap, + LibConfigMapRef: src.Spec.LibConfigMap, + } + } + + // Scheduling configuration + if src.Spec.Affinity != nil || len(src.Spec.Tolerations) > 0 { + dst.Spec.Scheduling = &v2.SchedulingConfig{} + if src.Spec.Affinity != nil { + dst.Spec.Scheduling.Affinity = convertAffinityToV2(src.Spec.Affinity) + } + if len(src.Spec.Tolerations) > 0 { + dst.Spec.Scheduling.Tolerations = convertTolerationsToV2(src.Spec.Tolerations) + } + } + + return nil +} + +// ConvertFrom converts the Hub version (v2) to this v1 LocustTest. +// Note: This is a lossy conversion - v2-only fields are not preserved. +func (dst *LocustTest) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v2.LocustTest) + + // Metadata + dst.ObjectMeta = src.ObjectMeta + + // Image configuration + dst.Spec.Image = src.Spec.Image + dst.Spec.ImagePullPolicy = string(src.Spec.ImagePullPolicy) + dst.Spec.ImagePullSecrets = convertImagePullSecretsToV1(src.Spec.ImagePullSecrets) + + // Master configuration β†’ flat fields + dst.Spec.MasterCommandSeed = src.Spec.Master.Command + + // Worker configuration β†’ flat fields + dst.Spec.WorkerCommandSeed = src.Spec.Worker.Command + dst.Spec.WorkerReplicas = src.Spec.Worker.Replicas + + // Labels from grouped structure + if len(src.Spec.Master.Labels) > 0 || len(src.Spec.Worker.Labels) > 0 { + dst.Spec.Labels = &PodLabels{} + if len(src.Spec.Master.Labels) > 0 { + dst.Spec.Labels.Master = src.Spec.Master.Labels + } + if len(src.Spec.Worker.Labels) > 0 { + dst.Spec.Labels.Worker = src.Spec.Worker.Labels + } + } + + // Annotations from grouped structure + if len(src.Spec.Master.Annotations) > 0 || len(src.Spec.Worker.Annotations) > 0 { + dst.Spec.Annotations = &PodAnnotations{} + if len(src.Spec.Master.Annotations) > 0 { + dst.Spec.Annotations.Master = src.Spec.Master.Annotations + } + if len(src.Spec.Worker.Annotations) > 0 { + dst.Spec.Annotations.Worker = src.Spec.Worker.Annotations + } + } + + // Test files configuration β†’ flat fields + if src.Spec.TestFiles != nil { + dst.Spec.ConfigMap = src.Spec.TestFiles.ConfigMapRef + dst.Spec.LibConfigMap = src.Spec.TestFiles.LibConfigMapRef + } + + // Scheduling β†’ flat fields + if src.Spec.Scheduling != nil { + if src.Spec.Scheduling.Affinity != nil { + dst.Spec.Affinity = convertAffinityToV1(src.Spec.Scheduling.Affinity) + } + if len(src.Spec.Scheduling.Tolerations) > 0 { + dst.Spec.Tolerations = convertTolerationsToV1(src.Spec.Scheduling.Tolerations) + } + // Note: nodeSelector is lost (v2-only field) + } + + // The following v2-only fields are NOT preserved in v1: + // - master.resources, master.extraArgs + // - worker.resources, worker.extraArgs + // - testFiles.srcMountPath, testFiles.libMountPath + // - scheduling.nodeSelector + // - env (configMapRefs, secretRefs, variables, secretMounts) + // - volumes, volumeMounts + // - observability (OpenTelemetry config) + // - status (v1 has no status subresource fields) + + return nil +} + +// ============================================================================= +// Helper Functions +// ============================================================================= + +func convertImagePullSecretsToV2(secrets []string) []corev1.LocalObjectReference { + if len(secrets) == 0 { + return nil + } + result := make([]corev1.LocalObjectReference, len(secrets)) + for i, s := range secrets { + result[i] = corev1.LocalObjectReference{Name: s} + } + return result +} + +func convertImagePullSecretsToV1(secrets []corev1.LocalObjectReference) []string { + if len(secrets) == 0 { + return nil + } + result := make([]string, len(secrets)) + for i, s := range secrets { + result[i] = s.Name + } + return result +} + +func convertAffinityToV2(src *LocustTestAffinity) *corev1.Affinity { + if src == nil || src.NodeAffinity == nil { + return nil + } + + nodeReqs := src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + if len(nodeReqs) == 0 { + return nil + } + + terms := make([]corev1.NodeSelectorRequirement, 0, len(nodeReqs)) + for key, value := range nodeReqs { + terms = append(terms, corev1.NodeSelectorRequirement{ + Key: key, + Operator: corev1.NodeSelectorOpIn, + Values: []string{value}, + }) + } + + return &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + {MatchExpressions: terms}, + }, + }, + }, + } +} + +func convertAffinityToV1(src *corev1.Affinity) *LocustTestAffinity { + if src == nil || src.NodeAffinity == nil { + return nil + } + + required := src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + if required == nil || len(required.NodeSelectorTerms) == 0 { + return nil + } + + // Extract first term's match expressions into v1 format + // This is a lossy conversion - complex affinity rules may lose data + nodeReqs := make(map[string]string) + for _, term := range required.NodeSelectorTerms { + for _, expr := range term.MatchExpressions { + if expr.Operator == corev1.NodeSelectorOpIn && len(expr.Values) > 0 { + nodeReqs[expr.Key] = expr.Values[0] + } + } + } + + if len(nodeReqs) == 0 { + return nil + } + + return &LocustTestAffinity{ + NodeAffinity: &LocustTestNodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: nodeReqs, + }, + } +} + +func convertTolerationsToV2(src []LocustTestToleration) []corev1.Toleration { + if len(src) == 0 { + return nil + } + result := make([]corev1.Toleration, len(src)) + for i, t := range src { + result[i] = corev1.Toleration{ + Key: t.Key, + Operator: corev1.TolerationOperator(t.Operator), + Value: t.Value, + Effect: corev1.TaintEffect(t.Effect), + } + } + return result +} + +func convertTolerationsToV1(src []corev1.Toleration) []LocustTestToleration { + if len(src) == 0 { + return nil + } + result := make([]LocustTestToleration, len(src)) + for i, t := range src { + result[i] = LocustTestToleration{ + Key: t.Key, + Operator: string(t.Operator), + Value: t.Value, + Effect: string(t.Effect), + } + } + return result +} diff --git a/api/v1/locusttest_conversion_test.go b/api/v1/locusttest_conversion_test.go new file mode 100644 index 00000000..cb701956 --- /dev/null +++ b/api/v1/locusttest_conversion_test.go @@ -0,0 +1,542 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +func TestConvertTo_FullSpec(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-locust", + Namespace: "default", + }, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust -f /lotest/src/locustfile.py", + WorkerCommandSeed: "locust -f /lotest/src/locustfile.py", + WorkerReplicas: 10, + Image: "locustio/locust:2.43.1", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"my-registry-secret"}, + ConfigMap: "locust-tests", + LibConfigMap: "locust-lib", + Labels: &PodLabels{ + Master: map[string]string{"app": "locust-master"}, + Worker: map[string]string{"app": "locust-worker"}, + }, + Annotations: &PodAnnotations{ + Master: map[string]string{"prometheus.io/scrape": "true"}, + }, + Tolerations: []LocustTestToleration{ + {Key: "dedicated", Operator: "Equal", Value: "locust", Effect: "NoSchedule"}, + }, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + // Verify metadata + assert.Equal(t, "test-locust", dst.Name) + assert.Equal(t, "default", dst.Namespace) + + // Verify image config + assert.Equal(t, "locustio/locust:2.43.1", dst.Spec.Image) + assert.Equal(t, corev1.PullIfNotPresent, dst.Spec.ImagePullPolicy) + require.Len(t, dst.Spec.ImagePullSecrets, 1) + assert.Equal(t, "my-registry-secret", dst.Spec.ImagePullSecrets[0].Name) + + // Verify master config + assert.Equal(t, "locust -f /lotest/src/locustfile.py", dst.Spec.Master.Command) + assert.True(t, *dst.Spec.Master.Autostart) + require.NotNil(t, dst.Spec.Master.Autoquit) + assert.True(t, dst.Spec.Master.Autoquit.Enabled) + assert.Equal(t, int32(60), dst.Spec.Master.Autoquit.Timeout) + assert.Equal(t, "locust-master", dst.Spec.Master.Labels["app"]) + assert.Equal(t, "true", dst.Spec.Master.Annotations["prometheus.io/scrape"]) + + // Verify worker config + assert.Equal(t, "locust -f /lotest/src/locustfile.py", dst.Spec.Worker.Command) + assert.Equal(t, int32(10), dst.Spec.Worker.Replicas) + assert.Equal(t, "locust-worker", dst.Spec.Worker.Labels["app"]) + + // Verify test files config + require.NotNil(t, dst.Spec.TestFiles) + assert.Equal(t, "locust-tests", dst.Spec.TestFiles.ConfigMapRef) + assert.Equal(t, "locust-lib", dst.Spec.TestFiles.LibConfigMapRef) + + // Verify scheduling config + require.NotNil(t, dst.Spec.Scheduling) + require.Len(t, dst.Spec.Scheduling.Tolerations, 1) + assert.Equal(t, "dedicated", dst.Spec.Scheduling.Tolerations[0].Key) +} + +func TestConvertFrom_FullSpec(t *testing.T) { + src := &v2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-locust", + Namespace: "default", + }, + Spec: v2.LocustTestSpec{ + Image: "locustio/locust:2.43.1", + ImagePullPolicy: corev1.PullAlways, + ImagePullSecrets: []corev1.LocalObjectReference{ + {Name: "my-secret"}, + }, + Master: v2.MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Labels: map[string]string{"tier": "master"}, + }, + Worker: v2.WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 5, + Labels: map[string]string{"tier": "worker"}, + }, + TestFiles: &v2.TestFilesConfig{ + ConfigMapRef: "tests-cm", + LibConfigMapRef: "lib-cm", + }, + }, + } + + dst := &LocustTest{} + err := dst.ConvertFrom(src) + require.NoError(t, err) + + // Verify metadata + assert.Equal(t, "test-locust", dst.Name) + + // Verify flat fields + assert.Equal(t, "locust -f /lotest/src/locustfile.py", dst.Spec.MasterCommandSeed) + assert.Equal(t, "locust -f /lotest/src/locustfile.py", dst.Spec.WorkerCommandSeed) + assert.Equal(t, int32(5), dst.Spec.WorkerReplicas) + assert.Equal(t, "locustio/locust:2.43.1", dst.Spec.Image) + assert.Equal(t, "Always", dst.Spec.ImagePullPolicy) + + // Verify labels + require.NotNil(t, dst.Spec.Labels) + assert.Equal(t, "master", dst.Spec.Labels.Master["tier"]) + assert.Equal(t, "worker", dst.Spec.Labels.Worker["tier"]) + + // Verify test files + assert.Equal(t, "tests-cm", dst.Spec.ConfigMap) + assert.Equal(t, "lib-cm", dst.Spec.LibConfigMap) +} + +func TestRoundTrip_V1ToV2ToV1(t *testing.T) { + original := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "roundtrip-test", + Namespace: "test-ns", + }, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust -f /lotest/src/test.py", + WorkerCommandSeed: "locust -f /lotest/src/test.py", + WorkerReplicas: 3, + Image: "locustio/locust:latest", + ConfigMap: "my-tests", + }, + } + + // Convert v1 -> v2 + hub := &v2.LocustTest{} + err := original.ConvertTo(hub) + require.NoError(t, err) + + // Convert v2 -> v1 + result := &LocustTest{} + err = result.ConvertFrom(hub) + require.NoError(t, err) + + // Verify round-trip preserved v1 fields + assert.Equal(t, original.Name, result.Name) + assert.Equal(t, original.Namespace, result.Namespace) + assert.Equal(t, original.Spec.MasterCommandSeed, result.Spec.MasterCommandSeed) + assert.Equal(t, original.Spec.WorkerCommandSeed, result.Spec.WorkerCommandSeed) + assert.Equal(t, original.Spec.WorkerReplicas, result.Spec.WorkerReplicas) + assert.Equal(t, original.Spec.Image, result.Spec.Image) + assert.Equal(t, original.Spec.ConfigMap, result.Spec.ConfigMap) +} + +func TestConvertTo_MinimalSpec(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minimal-test", + }, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + // Verify required fields + assert.Equal(t, "locust", dst.Spec.Master.Command) + assert.Equal(t, "locust", dst.Spec.Worker.Command) + assert.Equal(t, int32(1), dst.Spec.Worker.Replicas) + + // Verify optional fields are nil/empty + assert.Nil(t, dst.Spec.TestFiles) + assert.Nil(t, dst.Spec.Scheduling) + assert.Nil(t, dst.Spec.Env) + assert.Nil(t, dst.Spec.Observability) +} + +func TestAffinityConversion(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "affinity-test"}, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + Affinity: &LocustTestAffinity{ + NodeAffinity: &LocustTestNodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: map[string]string{ + "node-type": "high-cpu", + }, + }, + }, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + require.NotNil(t, dst.Spec.Scheduling) + require.NotNil(t, dst.Spec.Scheduling.Affinity) + require.NotNil(t, dst.Spec.Scheduling.Affinity.NodeAffinity) + + // Convert back + result := &LocustTest{} + err = result.ConvertFrom(dst) + require.NoError(t, err) + + require.NotNil(t, result.Spec.Affinity) + require.NotNil(t, result.Spec.Affinity.NodeAffinity) + assert.Equal(t, "high-cpu", result.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution["node-type"]) +} + +func TestTolerationsConversion(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "tolerations-test"}, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + Tolerations: []LocustTestToleration{ + {Key: "dedicated", Operator: "Equal", Value: "locust", Effect: "NoSchedule"}, + {Key: "gpu", Operator: "Exists", Effect: "NoExecute"}, + }, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + require.NotNil(t, dst.Spec.Scheduling) + require.Len(t, dst.Spec.Scheduling.Tolerations, 2) + assert.Equal(t, "dedicated", dst.Spec.Scheduling.Tolerations[0].Key) + assert.Equal(t, corev1.TolerationOpEqual, dst.Spec.Scheduling.Tolerations[0].Operator) + assert.Equal(t, "locust", dst.Spec.Scheduling.Tolerations[0].Value) + assert.Equal(t, corev1.TaintEffectNoSchedule, dst.Spec.Scheduling.Tolerations[0].Effect) + + // Convert back + result := &LocustTest{} + err = result.ConvertFrom(dst) + require.NoError(t, err) + + require.Len(t, result.Spec.Tolerations, 2) + assert.Equal(t, "dedicated", result.Spec.Tolerations[0].Key) + assert.Equal(t, "Equal", result.Spec.Tolerations[0].Operator) + assert.Equal(t, "locust", result.Spec.Tolerations[0].Value) + assert.Equal(t, "NoSchedule", result.Spec.Tolerations[0].Effect) +} + +func TestImagePullSecretsConversion(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "secrets-test"}, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + ImagePullSecrets: []string{"secret1", "secret2", "secret3"}, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + require.Len(t, dst.Spec.ImagePullSecrets, 3) + assert.Equal(t, "secret1", dst.Spec.ImagePullSecrets[0].Name) + assert.Equal(t, "secret2", dst.Spec.ImagePullSecrets[1].Name) + assert.Equal(t, "secret3", dst.Spec.ImagePullSecrets[2].Name) + + // Convert back + result := &LocustTest{} + err = result.ConvertFrom(dst) + require.NoError(t, err) + + require.Len(t, result.Spec.ImagePullSecrets, 3) + assert.Equal(t, "secret1", result.Spec.ImagePullSecrets[0]) + assert.Equal(t, "secret2", result.Spec.ImagePullSecrets[1]) + assert.Equal(t, "secret3", result.Spec.ImagePullSecrets[2]) +} + +func TestConvertTo_EmptyOptionalFields(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "empty-optional"}, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + Labels: nil, + Annotations: nil, + Affinity: nil, + Tolerations: nil, + ImagePullSecrets: nil, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + assert.Empty(t, dst.Spec.Master.Labels) + assert.Empty(t, dst.Spec.Master.Annotations) + assert.Empty(t, dst.Spec.Worker.Labels) + assert.Empty(t, dst.Spec.Worker.Annotations) + assert.Nil(t, dst.Spec.Scheduling) + assert.Nil(t, dst.Spec.ImagePullSecrets) +} + +func TestConvertFrom_EmptyOptionalFields(t *testing.T) { + src := &v2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "empty-optional"}, + Spec: v2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: v2.MasterSpec{ + Command: "locust", + }, + Worker: v2.WorkerSpec{ + Command: "locust", + Replicas: 1, + }, + }, + } + + dst := &LocustTest{} + err := dst.ConvertFrom(src) + require.NoError(t, err) + + assert.Nil(t, dst.Spec.Labels) + assert.Nil(t, dst.Spec.Annotations) + assert.Nil(t, dst.Spec.Affinity) + assert.Empty(t, dst.Spec.Tolerations) + assert.Empty(t, dst.Spec.ImagePullSecrets) + assert.Empty(t, dst.Spec.ConfigMap) + assert.Empty(t, dst.Spec.LibConfigMap) +} + +func TestConvertFrom_V2OnlyFieldsLost(t *testing.T) { + src := &v2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "v2-only-fields"}, + Spec: v2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: v2.MasterSpec{ + Command: "locust", + ExtraArgs: []string{"--headless"}, + }, + Worker: v2.WorkerSpec{ + Command: "locust", + Replicas: 3, + ExtraArgs: []string{"--processes", "4"}, + }, + TestFiles: &v2.TestFilesConfig{ + ConfigMapRef: "tests", + SrcMountPath: "/custom/path", + LibMountPath: "/custom/lib", + }, + Scheduling: &v2.SchedulingConfig{ + NodeSelector: map[string]string{"zone": "us-west"}, + }, + Env: &v2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "DEBUG", Value: "true"}, + }, + }, + Observability: &v2.ObservabilityConfig{ + OpenTelemetry: &v2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel:4317", + }, + }, + }, + } + + dst := &LocustTest{} + err := dst.ConvertFrom(src) + require.NoError(t, err) + + // Verify base fields are preserved + assert.Equal(t, "locust", dst.Spec.MasterCommandSeed) + assert.Equal(t, "locust", dst.Spec.WorkerCommandSeed) + assert.Equal(t, int32(3), dst.Spec.WorkerReplicas) + assert.Equal(t, "tests", dst.Spec.ConfigMap) + + // v2-only fields are lost - we can't verify they're nil in v1 + // because v1 doesn't have these fields at all + // This test mainly ensures conversion doesn't error on v2-only fields +} + +func TestConvertTo_LabelsAndAnnotations(t *testing.T) { + src := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "labels-annotations-test"}, + Spec: LocustTestSpec{ + MasterCommandSeed: "locust", + WorkerCommandSeed: "locust", + WorkerReplicas: 1, + Image: "locustio/locust:latest", + Labels: &PodLabels{ + Master: map[string]string{"env": "test", "component": "master"}, + Worker: map[string]string{"env": "test", "component": "worker"}, + }, + Annotations: &PodAnnotations{ + Master: map[string]string{"note": "master-annotation"}, + Worker: map[string]string{"note": "worker-annotation"}, + }, + }, + } + + dst := &v2.LocustTest{} + err := src.ConvertTo(dst) + require.NoError(t, err) + + assert.Equal(t, "test", dst.Spec.Master.Labels["env"]) + assert.Equal(t, "master", dst.Spec.Master.Labels["component"]) + assert.Equal(t, "master-annotation", dst.Spec.Master.Annotations["note"]) + + assert.Equal(t, "test", dst.Spec.Worker.Labels["env"]) + assert.Equal(t, "worker", dst.Spec.Worker.Labels["component"]) + assert.Equal(t, "worker-annotation", dst.Spec.Worker.Annotations["note"]) +} + +func TestConvertFrom_LabelsAndAnnotations(t *testing.T) { + src := &v2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "labels-annotations-test"}, + Spec: v2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: v2.MasterSpec{ + Command: "locust", + Labels: map[string]string{"role": "master"}, + Annotations: map[string]string{"desc": "master-desc"}, + }, + Worker: v2.WorkerSpec{ + Command: "locust", + Replicas: 1, + Labels: map[string]string{"role": "worker"}, + Annotations: map[string]string{"desc": "worker-desc"}, + }, + }, + } + + dst := &LocustTest{} + err := dst.ConvertFrom(src) + require.NoError(t, err) + + require.NotNil(t, dst.Spec.Labels) + assert.Equal(t, "master", dst.Spec.Labels.Master["role"]) + assert.Equal(t, "worker", dst.Spec.Labels.Worker["role"]) + + require.NotNil(t, dst.Spec.Annotations) + assert.Equal(t, "master-desc", dst.Spec.Annotations.Master["desc"]) + assert.Equal(t, "worker-desc", dst.Spec.Annotations.Worker["desc"]) +} + +func TestNilAffinityConversion(t *testing.T) { + // Test nil affinity in v1 + result := convertAffinityToV2(nil) + assert.Nil(t, result) + + // Test nil affinity in v2 + result2 := convertAffinityToV1(nil) + assert.Nil(t, result2) + + // Test affinity with nil NodeAffinity + result3 := convertAffinityToV2(&LocustTestAffinity{NodeAffinity: nil}) + assert.Nil(t, result3) + + // Test affinity with empty requirements + result4 := convertAffinityToV2(&LocustTestAffinity{ + NodeAffinity: &LocustTestNodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: map[string]string{}, + }, + }) + assert.Nil(t, result4) +} + +func TestEmptyTolerationsConversion(t *testing.T) { + // Test empty tolerations in v1 + result := convertTolerationsToV2(nil) + assert.Nil(t, result) + + result2 := convertTolerationsToV2([]LocustTestToleration{}) + assert.Nil(t, result2) + + // Test empty tolerations in v2 + result3 := convertTolerationsToV1(nil) + assert.Nil(t, result3) + + result4 := convertTolerationsToV1([]corev1.Toleration{}) + assert.Nil(t, result4) +} + +func TestEmptyImagePullSecretsConversion(t *testing.T) { + // Test empty secrets in v1 + result := convertImagePullSecretsToV2(nil) + assert.Nil(t, result) + + result2 := convertImagePullSecretsToV2([]string{}) + assert.Nil(t, result2) + + // Test empty secrets in v2 + result3 := convertImagePullSecretsToV1(nil) + assert.Nil(t, result3) + + result4 := convertImagePullSecretsToV1([]corev1.LocalObjectReference{}) + assert.Nil(t, result4) +} diff --git a/api/v1/locusttest_types.go b/api/v1/locusttest_types.go new file mode 100644 index 00000000..df0d4dd4 --- /dev/null +++ b/api/v1/locusttest_types.go @@ -0,0 +1,173 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PodLabels defines labels for master and worker pods. +type PodLabels struct { + // Master defines labels attached to the master pod. + // +optional + Master map[string]string `json:"master,omitempty"` + + // Worker defines labels attached to worker pods. + // +optional + Worker map[string]string `json:"worker,omitempty"` +} + +// PodAnnotations defines annotations for master and worker pods. +type PodAnnotations struct { + // Master defines annotations attached to the master pod. + // +optional + Master map[string]string `json:"master,omitempty"` + + // Worker defines annotations attached to worker pods. + // +optional + Worker map[string]string `json:"worker,omitempty"` +} + +// LocustTestAffinity defines affinity rules for pod scheduling. +type LocustTestAffinity struct { + // NodeAffinity defines node affinity rules. + // +optional + NodeAffinity *LocustTestNodeAffinity `json:"nodeAffinity,omitempty"` +} + +// LocustTestNodeAffinity defines node affinity configuration. +type LocustTestNodeAffinity struct { + // RequiredDuringSchedulingIgnoredDuringExecution defines required node affinity rules. + // The map keys are label keys and values are label values that nodes must have. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution map[string]string `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// LocustTestToleration defines a toleration for pod scheduling. +type LocustTestToleration struct { + // Key is the taint key that the toleration applies to. + // +kubebuilder:validation:Required + Key string `json:"key"` + + // Operator represents the relationship between the key and value. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=Exists;Equal + Operator string `json:"operator"` + + // Value is the taint value the toleration matches to. + // +optional + Value string `json:"value,omitempty"` + + // Effect indicates the taint effect to match. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute + Effect string `json:"effect"` +} + +// LocustTestSpec defines the desired state of LocustTest. +type LocustTestSpec struct { + // MasterCommandSeed is the command seed for the master pod. + // This forms the base of the locust master command. + // +kubebuilder:validation:Required + MasterCommandSeed string `json:"masterCommandSeed"` + + // WorkerCommandSeed is the command seed for worker pods. + // This forms the base of the locust worker command. + // +kubebuilder:validation:Required + WorkerCommandSeed string `json:"workerCommandSeed"` + + // WorkerReplicas is the number of worker pods to spawn. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=500 + // +kubebuilder:default=1 + WorkerReplicas int32 `json:"workerReplicas"` + + // Image is the Locust container image to use. + // +kubebuilder:validation:Required + Image string `json:"image"` + + // ImagePullPolicy defines when to pull the image. + // +kubebuilder:validation:Enum=Always;IfNotPresent;Never + // +optional + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + + // ImagePullSecrets is a list of secret names for pulling images from private registries. + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // ConfigMap is the name of the ConfigMap containing the test file(s). + // +optional + ConfigMap string `json:"configMap,omitempty"` + + // LibConfigMap is the name of the ConfigMap containing lib directory files. + // +optional + LibConfigMap string `json:"libConfigMap,omitempty"` + + // Labels defines labels to attach to deployed pods. + // +optional + Labels *PodLabels `json:"labels,omitempty"` + + // Annotations defines annotations to attach to deployed pods. + // +optional + Annotations *PodAnnotations `json:"annotations,omitempty"` + + // Affinity defines affinity rules for pod scheduling. + // +optional + Affinity *LocustTestAffinity `json:"affinity,omitempty"` + + // Tolerations defines tolerations for pod scheduling. + // +optional + Tolerations []LocustTestToleration `json:"tolerations,omitempty"` +} + +// LocustTestStatus defines the observed state of LocustTest. +type LocustTestStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=lotest +// +kubebuilder:deprecatedversion:warning="locust.io/v1 LocustTest is deprecated, migrate to locust.io/v2" +// +kubebuilder:printcolumn:name="master_cmd",type=string,JSONPath=`.spec.masterCommandSeed`,description="Master pod command seed" +// +kubebuilder:printcolumn:name="worker_replica_count",type=integer,JSONPath=`.spec.workerReplicas`,description="Number of requested worker pods" +// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image`,description="Locust image" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// LocustTest is the Schema for the locusttests API (v1 - DEPRECATED). +type LocustTest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LocustTestSpec `json:"spec,omitempty"` + Status LocustTestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LocustTestList contains a list of LocustTest. +type LocustTestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LocustTest `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LocustTest{}, &LocustTestList{}) +} diff --git a/api/v1/locusttest_types_test.go b/api/v1/locusttest_types_test.go new file mode 100644 index 00000000..0899eb89 --- /dev/null +++ b/api/v1/locusttest_types_test.go @@ -0,0 +1,163 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLocustTestSpec_JSONRoundTrip(t *testing.T) { + spec := LocustTestSpec{ + MasterCommandSeed: "--locustfile /lotest/src/test.py --host https://example.com", + WorkerCommandSeed: "--locustfile /lotest/src/test.py", + WorkerReplicas: 3, + Image: "locustio/locust:latest", + ImagePullPolicy: "Always", + ConfigMap: "test-config", + Labels: &PodLabels{ + Master: map[string]string{"app": "locust-master"}, + Worker: map[string]string{"app": "locust-worker"}, + }, + } + + // Marshal to JSON + data, err := json.Marshal(spec) + require.NoError(t, err) + + // Unmarshal back + var decoded LocustTestSpec + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // Verify roundtrip + assert.Equal(t, spec.MasterCommandSeed, decoded.MasterCommandSeed) + assert.Equal(t, spec.WorkerReplicas, decoded.WorkerReplicas) + assert.Equal(t, spec.Labels.Master["app"], decoded.Labels.Master["app"]) +} + +func TestLocustTestSpec_JSONFieldNames(t *testing.T) { + spec := LocustTestSpec{ + MasterCommandSeed: "test", + WorkerCommandSeed: "test", + WorkerReplicas: 1, + Image: "test", + } + + data, err := json.Marshal(spec) + require.NoError(t, err) + + // Verify camelCase JSON field names + jsonStr := string(data) + assert.Contains(t, jsonStr, `"masterCommandSeed"`) + assert.Contains(t, jsonStr, `"workerCommandSeed"`) + assert.Contains(t, jsonStr, `"workerReplicas"`) +} + +func TestLocustTestSpec_AllFields(t *testing.T) { + spec := LocustTestSpec{ + MasterCommandSeed: "--locustfile /lotest/src/test.py --host https://example.com", + WorkerCommandSeed: "--locustfile /lotest/src/test.py", + WorkerReplicas: 5, + Image: "locustio/locust:2.15.1", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"my-registry-secret"}, + ConfigMap: "locust-test-config", + LibConfigMap: "locust-lib-config", + Labels: &PodLabels{ + Master: map[string]string{"role": "master", "team": "platform"}, + Worker: map[string]string{"role": "worker", "team": "platform"}, + }, + Annotations: &PodAnnotations{ + Master: map[string]string{"prometheus.io/scrape": "true"}, + Worker: map[string]string{"prometheus.io/scrape": "true"}, + }, + Affinity: &LocustTestAffinity{ + NodeAffinity: &LocustTestNodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: map[string]string{ + "node-type": "compute", + }, + }, + }, + Tolerations: []LocustTestToleration{ + { + Key: "dedicated", + Operator: "Equal", + Value: "locust", + Effect: "NoSchedule", + }, + }, + } + + // Marshal to JSON + data, err := json.Marshal(spec) + require.NoError(t, err) + + // Unmarshal back + var decoded LocustTestSpec + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // Verify all fields + assert.Equal(t, spec.MasterCommandSeed, decoded.MasterCommandSeed) + assert.Equal(t, spec.WorkerCommandSeed, decoded.WorkerCommandSeed) + assert.Equal(t, spec.WorkerReplicas, decoded.WorkerReplicas) + assert.Equal(t, spec.Image, decoded.Image) + assert.Equal(t, spec.ImagePullPolicy, decoded.ImagePullPolicy) + assert.Equal(t, spec.ImagePullSecrets, decoded.ImagePullSecrets) + assert.Equal(t, spec.ConfigMap, decoded.ConfigMap) + assert.Equal(t, spec.LibConfigMap, decoded.LibConfigMap) + assert.Equal(t, spec.Labels.Master, decoded.Labels.Master) + assert.Equal(t, spec.Labels.Worker, decoded.Labels.Worker) + assert.Equal(t, spec.Annotations.Master, decoded.Annotations.Master) + assert.Equal(t, spec.Annotations.Worker, decoded.Annotations.Worker) + assert.Equal(t, spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + decoded.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) + assert.Len(t, decoded.Tolerations, 1) + assert.Equal(t, spec.Tolerations[0].Key, decoded.Tolerations[0].Key) + assert.Equal(t, spec.Tolerations[0].Operator, decoded.Tolerations[0].Operator) + assert.Equal(t, spec.Tolerations[0].Value, decoded.Tolerations[0].Value) + assert.Equal(t, spec.Tolerations[0].Effect, decoded.Tolerations[0].Effect) +} + +func TestLocustTestSpec_OmitEmptyFields(t *testing.T) { + // Only required fields + spec := LocustTestSpec{ + MasterCommandSeed: "test", + WorkerCommandSeed: "test", + WorkerReplicas: 1, + Image: "test", + } + + data, err := json.Marshal(spec) + require.NoError(t, err) + + jsonStr := string(data) + + // Optional fields should not be present + assert.NotContains(t, jsonStr, `"imagePullPolicy"`) + assert.NotContains(t, jsonStr, `"imagePullSecrets"`) + assert.NotContains(t, jsonStr, `"configMap"`) + assert.NotContains(t, jsonStr, `"libConfigMap"`) + assert.NotContains(t, jsonStr, `"labels"`) + assert.NotContains(t, jsonStr, `"annotations"`) + assert.NotContains(t, jsonStr, `"affinity"`) + assert.NotContains(t, jsonStr, `"tolerations"`) +} diff --git a/api/v1/locusttest_webhook.go b/api/v1/locusttest_webhook.go new file mode 100644 index 00000000..7bb259a5 --- /dev/null +++ b/api/v1/locusttest_webhook.go @@ -0,0 +1,28 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + ctrl "sigs.k8s.io/controller-runtime" +) + +// SetupWebhookWithManager registers the webhook for LocustTest with the manager. +func (r *LocustTest) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..88f51fd7 --- /dev/null +++ b/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,254 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTest) DeepCopyInto(out *LocustTest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTest. +func (in *LocustTest) DeepCopy() *LocustTest { + if in == nil { + return nil + } + out := new(LocustTest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocustTest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestAffinity) DeepCopyInto(out *LocustTestAffinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(LocustTestNodeAffinity) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestAffinity. +func (in *LocustTestAffinity) DeepCopy() *LocustTestAffinity { + if in == nil { + return nil + } + out := new(LocustTestAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestList) DeepCopyInto(out *LocustTestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LocustTest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestList. +func (in *LocustTestList) DeepCopy() *LocustTestList { + if in == nil { + return nil + } + out := new(LocustTestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocustTestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestNodeAffinity) DeepCopyInto(out *LocustTestNodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestNodeAffinity. +func (in *LocustTestNodeAffinity) DeepCopy() *LocustTestNodeAffinity { + if in == nil { + return nil + } + out := new(LocustTestNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestSpec) DeepCopyInto(out *LocustTestSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = new(PodLabels) + (*in).DeepCopyInto(*out) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = new(PodAnnotations) + (*in).DeepCopyInto(*out) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(LocustTestAffinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]LocustTestToleration, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestSpec. +func (in *LocustTestSpec) DeepCopy() *LocustTestSpec { + if in == nil { + return nil + } + out := new(LocustTestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestStatus) DeepCopyInto(out *LocustTestStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestStatus. +func (in *LocustTestStatus) DeepCopy() *LocustTestStatus { + if in == nil { + return nil + } + out := new(LocustTestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestToleration) DeepCopyInto(out *LocustTestToleration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestToleration. +func (in *LocustTestToleration) DeepCopy() *LocustTestToleration { + if in == nil { + return nil + } + out := new(LocustTestToleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAnnotations) DeepCopyInto(out *PodAnnotations) { + *out = *in + if in.Master != nil { + in, out := &in.Master, &out.Master + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Worker != nil { + in, out := &in.Worker, &out.Worker + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAnnotations. +func (in *PodAnnotations) DeepCopy() *PodAnnotations { + if in == nil { + return nil + } + out := new(PodAnnotations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLabels) DeepCopyInto(out *PodLabels) { + *out = *in + if in.Master != nil { + in, out := &in.Master, &out.Master + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Worker != nil { + in, out := &in.Worker, &out.Worker + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLabels. +func (in *PodLabels) DeepCopy() *PodLabels { + if in == nil { + return nil + } + out := new(PodLabels) + in.DeepCopyInto(out) + return out +} diff --git a/api/v2/conditions.go b/api/v2/conditions.go new file mode 100644 index 00000000..76c25724 --- /dev/null +++ b/api/v2/conditions.go @@ -0,0 +1,83 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +// Condition types for LocustTest. +const ( + // ConditionTypeReady indicates all resources are created and ready. + ConditionTypeReady = "Ready" + + // ConditionTypeWorkersConnected indicates workers have connected to master. + ConditionTypeWorkersConnected = "WorkersConnected" + + // ConditionTypeTestCompleted indicates the test has finished. + ConditionTypeTestCompleted = "TestCompleted" + + // ConditionTypeSpecDrifted indicates the CR spec was modified after creation but changes are ignored. + ConditionTypeSpecDrifted = "SpecDrifted" + + // ConditionTypePodsHealthy indicates whether pods are healthy and running. + ConditionTypePodsHealthy = "PodsHealthy" +) + +// Condition reasons for Ready condition. +const ( + ReasonResourcesCreating = "ResourcesCreating" + ReasonResourcesCreated = "ResourcesCreated" + ReasonResourcesFailed = "ResourcesFailed" +) + +// Condition reasons for WorkersConnected condition. +const ( + ReasonWaitingForWorkers = "WaitingForWorkers" + ReasonAllWorkersConnected = "AllWorkersConnected" + ReasonWorkersMissing = "WorkersMissing" +) + +// Condition reasons for TestCompleted condition. +const ( + ReasonTestInProgress = "TestInProgress" + ReasonTestSucceeded = "TestSucceeded" + ReasonTestFailed = "TestFailed" +) + +// Condition reasons for SpecDrifted condition. +const ( + ReasonSpecChangeIgnored = "SpecChangeIgnored" +) + +// Condition reasons for PodsHealthy condition. +const ( + ReasonPodsStarting = "PodsStarting" + ReasonPodsHealthy = "PodsHealthy" + ReasonPodImagePullError = "ImagePullError" + ReasonPodConfigError = "ConfigurationError" + ReasonPodSchedulingError = "SchedulingError" + ReasonPodCrashLoop = "CrashLoopBackOff" + ReasonPodInitError = "InitializationError" +) + +// Phase represents the current lifecycle phase of a LocustTest. +type Phase string + +// Phase constants for LocustTest status. +const ( + PhasePending Phase = "Pending" + PhaseRunning Phase = "Running" + PhaseSucceeded Phase = "Succeeded" + PhaseFailed Phase = "Failed" +) diff --git a/api/v2/groupversion_info.go b/api/v2/groupversion_info.go new file mode 100644 index 00000000..baa2b878 --- /dev/null +++ b/api/v2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v2 contains API Schema definitions for the locust v2 API group. +// +kubebuilder:object:generate=true +// +groupName=locust.io +package v2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "locust.io", Version: "v2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v2/locusttest_conversion.go b/api/v2/locusttest_conversion.go new file mode 100644 index 00000000..6d5daf1d --- /dev/null +++ b/api/v2/locusttest_conversion.go @@ -0,0 +1,21 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +// Hub marks v2 as the hub version for conversions. +// All spoke versions (v1, future versions) convert to/from v2. +func (*LocustTest) Hub() {} diff --git a/api/v2/locusttest_types.go b/api/v2/locusttest_types.go new file mode 100644 index 00000000..e8999eee --- /dev/null +++ b/api/v2/locusttest_types.go @@ -0,0 +1,397 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ============================================ +// MASTER CONFIGURATION +// ============================================ + +// MasterSpec defines master node configuration. +type MasterSpec struct { + // Command is the base command for the master node. + // The operator appends: --master --master-port=5557 --expect-workers=N + // +kubebuilder:validation:Required + Command string `json:"command"` + + // Resources defines resource requests and limits for the master pod. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Labels for the master pod. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations for the master pod. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Autostart enables the --autostart flag to start the test automatically. + // +optional + // +kubebuilder:default=true + Autostart *bool `json:"autostart,omitempty"` + + // Autoquit configuration for automatic test termination. + // +optional + Autoquit *AutoquitConfig `json:"autoquit,omitempty"` + + // ExtraArgs are additional CLI arguments appended to the command. + // +optional + ExtraArgs []string `json:"extraArgs,omitempty"` +} + +// AutoquitConfig defines autoquit behavior for the master. +type AutoquitConfig struct { + // Enabled enables the --autoquit flag. + // +kubebuilder:default=true + Enabled bool `json:"enabled"` + + // Timeout in seconds after test completion before quitting. + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=60 + Timeout int32 `json:"timeout,omitempty"` +} + +// ============================================ +// WORKER CONFIGURATION +// ============================================ + +// WorkerSpec defines worker node configuration. +type WorkerSpec struct { + // Command is the base command for worker nodes. + // The operator appends: --worker --master-host= --master-port=5557 + // +kubebuilder:validation:Required + Command string `json:"command"` + + // Replicas is the number of worker pods to create. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=500 + Replicas int32 `json:"replicas"` + + // Resources defines resource requests and limits for worker pods. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Labels for worker pods. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations for worker pods. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // ExtraArgs are additional CLI arguments appended to the command. + // +optional + ExtraArgs []string `json:"extraArgs,omitempty"` +} + +// ============================================ +// TEST FILES CONFIGURATION +// ============================================ + +// TestFilesConfig defines test file mounting configuration. +type TestFilesConfig struct { + // ConfigMapRef is the name of the ConfigMap containing locustfile(s). + // +optional + ConfigMapRef string `json:"configMapRef,omitempty"` + + // LibConfigMapRef is the name of the ConfigMap containing library files. + // +optional + LibConfigMapRef string `json:"libConfigMapRef,omitempty"` + + // SrcMountPath is the mount path for test files. + // +optional + // +kubebuilder:default="/lotest/src" + SrcMountPath string `json:"srcMountPath,omitempty"` + + // LibMountPath is the mount path for library files. + // +optional + // +kubebuilder:default="/opt/locust/lib" + LibMountPath string `json:"libMountPath,omitempty"` +} + +// ============================================ +// SCHEDULING CONFIGURATION +// ============================================ + +// SchedulingConfig defines pod scheduling configuration. +type SchedulingConfig struct { + // Affinity rules for pod scheduling. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Tolerations for pod scheduling. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // NodeSelector for pod scheduling. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` +} + +// ============================================ +// ENVIRONMENT INJECTION (Issue #149) +// ============================================ + +// EnvConfig defines environment variable injection configuration. +type EnvConfig struct { + // ConfigMapRefs injects all keys from ConfigMaps as environment variables. + // +optional + ConfigMapRefs []ConfigMapEnvSource `json:"configMapRefs,omitempty"` + + // SecretRefs injects all keys from Secrets as environment variables. + // +optional + SecretRefs []SecretEnvSource `json:"secretRefs,omitempty"` + + // Variables defines specific environment variables. + // +optional + Variables []corev1.EnvVar `json:"variables,omitempty"` + + // SecretMounts mounts secrets as files in the container. + // +optional + SecretMounts []SecretMount `json:"secretMounts,omitempty"` +} + +// ConfigMapEnvSource defines a ConfigMap environment source. +type ConfigMapEnvSource struct { + // Name of the ConfigMap. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Prefix to add to all keys when injecting as env vars. + // +optional + Prefix string `json:"prefix,omitempty"` +} + +// SecretEnvSource defines a Secret environment source. +type SecretEnvSource struct { + // Name of the Secret. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Prefix to add to all keys when injecting as env vars. + // +optional + Prefix string `json:"prefix,omitempty"` +} + +// SecretMount defines a secret file mount. +type SecretMount struct { + // Name of the secret to mount. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // MountPath is the path where the secret should be mounted. + // +kubebuilder:validation:Required + MountPath string `json:"mountPath"` + + // ReadOnly mounts the secret as read-only. + // +optional + // +kubebuilder:default=true + ReadOnly bool `json:"readOnly,omitempty"` +} + +// ============================================ +// VOLUME MOUNTING (Issue #252) +// ============================================ + +// TargetedVolumeMount extends VolumeMount with target pod selection. +type TargetedVolumeMount struct { + corev1.VolumeMount `json:",inline"` + + // Target specifies which pods receive this mount. + // +optional + // +kubebuilder:validation:Enum=master;worker;both + // +kubebuilder:default=both + Target string `json:"target,omitempty"` +} + +// ============================================ +// OBSERVABILITY (Issue #72) +// ============================================ + +// ObservabilityConfig defines observability settings. +type ObservabilityConfig struct { + // OpenTelemetry configuration for native Locust OTel integration. + // +optional + OpenTelemetry *OpenTelemetryConfig `json:"openTelemetry,omitempty"` +} + +// OpenTelemetryConfig defines OpenTelemetry integration settings. +type OpenTelemetryConfig struct { + // Enabled enables OpenTelemetry integration. + // When true, adds --otel flag to Locust command. + // +kubebuilder:default=false + Enabled bool `json:"enabled"` + + // Endpoint is the OTel collector endpoint (e.g., "otel-collector:4317"). + // Required when Enabled is true. + // +optional + Endpoint string `json:"endpoint,omitempty"` + + // Protocol for OTel export. + // +optional + // +kubebuilder:validation:Enum=grpc;http/protobuf + // +kubebuilder:default=grpc + Protocol string `json:"protocol,omitempty"` + + // Insecure skips TLS verification for the collector connection. + // +optional + // +kubebuilder:default=false + Insecure bool `json:"insecure,omitempty"` + + // ExtraEnvVars for additional OTel SDK configuration. + // +optional + ExtraEnvVars map[string]string `json:"extraEnvVars,omitempty"` +} + +// ============================================ +// STATUS +// ============================================ + +// LocustTestStatus defines the observed state of LocustTest. +type LocustTestStatus struct { + // Phase is the current lifecycle phase of the test. + // +kubebuilder:validation:Enum=Pending;Running;Succeeded;Failed + // +optional + Phase Phase `json:"phase,omitempty"` + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ExpectedWorkers is the number of workers expected to connect. + // +optional + ExpectedWorkers int32 `json:"expectedWorkers,omitempty"` + + // ConnectedWorkers is the approximate number of connected workers, + // derived from the worker Job's Active pod count (Job.Status.Active). + // This is an approximation as Kubernetes Job.Status.Active may lag behind + // actual Locust worker connections. + // +optional + ConnectedWorkers int32 `json:"connectedWorkers,omitempty"` + + // StartTime is when the test started. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // CompletionTime is when the test completed. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + + // Conditions represent the latest available observations of the test's state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// ============================================ +// SPEC +// ============================================ + +// LocustTestSpec defines the desired state of LocustTest. +type LocustTestSpec struct { + // Image is the container image for Locust pods. + // +kubebuilder:validation:Required + Image string `json:"image"` + + // ImagePullPolicy for the Locust container. + // +optional + // +kubebuilder:validation:Enum=Always;IfNotPresent;Never + // +kubebuilder:default=IfNotPresent + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // ImagePullSecrets for pulling from private registries. + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // Master configuration for the master node. + // +kubebuilder:validation:Required + Master MasterSpec `json:"master"` + + // Worker configuration for worker nodes. + // +kubebuilder:validation:Required + Worker WorkerSpec `json:"worker"` + + // TestFiles configuration for locustfile and library mounting. + // +optional + TestFiles *TestFilesConfig `json:"testFiles,omitempty"` + + // Scheduling configuration for pod placement. + // +optional + Scheduling *SchedulingConfig `json:"scheduling,omitempty"` + + // Env configuration for environment variable injection. + // +optional + Env *EnvConfig `json:"env,omitempty"` + + // Volumes to add to pods. + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // VolumeMounts for the locust container with target selection. + // +optional + VolumeMounts []TargetedVolumeMount `json:"volumeMounts,omitempty"` + + // Observability configuration for metrics and tracing. + // +optional + Observability *ObservabilityConfig `json:"observability,omitempty"` +} + +// ============================================ +// ROOT TYPES +// ============================================ + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=lotest +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`,description="Current test phase" +// +kubebuilder:printcolumn:name="Workers",type=integer,JSONPath=`.spec.worker.replicas`,description="Requested worker count" +// +kubebuilder:printcolumn:name="Connected",type=integer,JSONPath=`.status.connectedWorkers`,description="Connected workers" +// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image`,priority=1 +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// LocustTest is the Schema for the locusttests API. +type LocustTest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LocustTestSpec `json:"spec,omitempty"` + Status LocustTestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LocustTestList contains a list of LocustTest. +type LocustTestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LocustTest `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LocustTest{}, &LocustTestList{}) +} diff --git a/api/v2/locusttest_webhook.go b/api/v2/locusttest_webhook.go new file mode 100644 index 00000000..629f79c0 --- /dev/null +++ b/api/v2/locusttest_webhook.go @@ -0,0 +1,293 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var locusttestlog = logf.Log.WithName("locusttest-resource") + +// Default reserved paths that cannot be used for secret mounts +const ( + DefaultSrcMountPath = "/lotest/src" + DefaultLibMountPath = "/opt/locust/lib" +) + +// Reserved volume name constants +const ( + reservedVolumeNamePrefix = "secret-" + libVolumeName = "locust-lib" +) + +// LocustTestCustomValidator handles validation for LocustTest resources. +type LocustTestCustomValidator struct{} + +// SetupWebhookWithManager sets up the webhook with the Manager. +func (r *LocustTest) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + WithValidator(&LocustTestCustomValidator{}). + Complete() +} + +// +kubebuilder:webhook:path=/validate-locust-io-v2-locusttest,mutating=false,failurePolicy=fail,sideEffects=None,groups=locust.io,resources=locusttests,verbs=create;update,versions=v2,name=vlocusttest-v2.kb.io,admissionReviewVersions=v1 + +var _ webhook.CustomValidator = &LocustTestCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator. +func (v *LocustTestCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lt, ok := obj.(*LocustTest) + if !ok { + return nil, fmt.Errorf("expected LocustTest but got %T", obj) + } + locusttestlog.Info("validate create", "name", lt.Name) + return validateLocustTest(lt) +} + +// ValidateUpdate implements webhook.CustomValidator. +func (v *LocustTestCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + lt, ok := newObj.(*LocustTest) + if !ok { + return nil, fmt.Errorf("expected LocustTest but got %T", newObj) + } + locusttestlog.Info("validate update", "name", lt.Name) + return validateLocustTest(lt) +} + +// ValidateDelete implements webhook.CustomValidator. +func (v *LocustTestCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lt, ok := obj.(*LocustTest) + if !ok { + return nil, fmt.Errorf("expected LocustTest but got %T", obj) + } + locusttestlog.Info("validate delete", "name", lt.Name) + return nil, nil +} + +// validateSecretMounts checks that secret mount paths don't conflict with reserved paths. +func validateSecretMounts(lt *LocustTest) error { + if lt.Spec.Env == nil || len(lt.Spec.Env.SecretMounts) == 0 { + return nil + } + + reservedPaths := getReservedPaths(lt) + + for _, sm := range lt.Spec.Env.SecretMounts { + for _, reserved := range reservedPaths { + if PathConflicts(sm.MountPath, reserved) { + return fmt.Errorf( + "secretMount path %q conflicts with reserved path %q; "+ + "operator uses this path for test files", + sm.MountPath, reserved) + } + } + } + + return nil +} + +// getReservedPaths returns the paths that are reserved by the operator. +// It dynamically calculates based on testFiles configuration. +func getReservedPaths(lt *LocustTest) []string { + var paths []string + + srcPath := DefaultSrcMountPath + libPath := DefaultLibMountPath + + if lt.Spec.TestFiles != nil { + if lt.Spec.TestFiles.SrcMountPath != "" { + srcPath = lt.Spec.TestFiles.SrcMountPath + } + if lt.Spec.TestFiles.LibMountPath != "" { + libPath = lt.Spec.TestFiles.LibMountPath + } + } + + // Only add paths that are actually in use + if lt.Spec.TestFiles != nil && lt.Spec.TestFiles.ConfigMapRef != "" { + paths = append(paths, srcPath) + } + if lt.Spec.TestFiles != nil && lt.Spec.TestFiles.LibConfigMapRef != "" { + paths = append(paths, libPath) + } + + // If no testFiles are configured, still protect default paths + // since users might add them later + if len(paths) == 0 { + paths = []string{srcPath, libPath} + } + + return paths +} + +// PathConflicts checks if two paths would conflict. +// Conflict occurs if one path is a prefix of the other. +func PathConflicts(path1, path2 string) bool { + // Normalize paths by removing trailing slashes + p1 := strings.TrimSuffix(path1, "/") + p2 := strings.TrimSuffix(path2, "/") + + // Check if either is a prefix of the other + return p1 == p2 || + strings.HasPrefix(p1, p2+"/") || + strings.HasPrefix(p2, p1+"/") +} + +// validateLocustTest runs all validation checks. +func validateLocustTest(lt *LocustTest) (admission.Warnings, error) { + // Validate CR name length + if err := validateCRName(lt); err != nil { + return nil, err + } + + // Validate secret mounts + if err := validateSecretMounts(lt); err != nil { + return nil, err + } + + // Validate user volumes + if err := validateVolumes(lt); err != nil { + return nil, err + } + + // Validate OTel configuration + if err := validateOTelConfig(lt); err != nil { + return nil, err + } + + return nil, nil +} + +// validateCRName validates that the CR name won't cause generated resource names to exceed K8s limits. +// Kubernetes resource names (including Jobs) must be <= 63 characters (DNS label limit). +// The operator generates names like "{cr-name}-worker", so we need to ensure total length fits. +func validateCRName(lt *LocustTest) error { + const maxNameSuffixLen = 7 // "-worker" is the longest suffix + const maxK8sNameLen = 63 // Kubernetes DNS label limit + + if len(lt.Name)+maxNameSuffixLen > maxK8sNameLen { + return fmt.Errorf( + "name %q is too long: generated Job names would exceed 63 characters (max CR name length is %d)", + lt.Name, + maxK8sNameLen-maxNameSuffixLen, + ) + } + + return nil +} + +// validateOTelConfig validates OpenTelemetry configuration. +func validateOTelConfig(lt *LocustTest) error { + if lt.Spec.Observability == nil { + return nil + } + + otelCfg := lt.Spec.Observability.OpenTelemetry + if otelCfg == nil { + return nil + } + + // If OTel is enabled, endpoint is required + if otelCfg.Enabled && otelCfg.Endpoint == "" { + return fmt.Errorf("observability.openTelemetry.endpoint is required when OpenTelemetry is enabled") + } + + return nil +} + +// validateVolumes checks for volume name and mount path conflicts. +func validateVolumes(lt *LocustTest) error { + // Check volume names + for _, vol := range lt.Spec.Volumes { + if err := validateVolumeName(lt, vol.Name); err != nil { + return err + } + } + + // Check mount paths + reservedPaths := getReservedPaths(lt) + for _, mount := range lt.Spec.VolumeMounts { + if err := validateMountPath(mount.MountPath, reservedPaths); err != nil { + return err + } + } + + // Validate that all mounts reference defined volumes + if err := validateMountReferences(lt); err != nil { + return err + } + + return nil +} + +// validateVolumeName checks if a volume name conflicts with operator-managed names. +func validateVolumeName(lt *LocustTest, name string) error { + // Check for reserved prefix + if strings.HasPrefix(name, reservedVolumeNamePrefix) { + return fmt.Errorf("volume name %q uses reserved prefix %q", name, reservedVolumeNamePrefix) + } + + // Check for lib volume name + if name == libVolumeName { + return fmt.Errorf("volume name %q is reserved by the operator", name) + } + + // Check for CR-based names + masterName := lt.Name + "-master" + workerName := lt.Name + "-worker" + if name == masterName || name == workerName { + return fmt.Errorf("volume name %q conflicts with operator-generated name", name) + } + + return nil +} + +// validateMountPath checks if a mount path conflicts with reserved paths. +func validateMountPath(path string, reservedPaths []string) error { + for _, reserved := range reservedPaths { + if PathConflicts(path, reserved) { + return fmt.Errorf("volumeMount path %q conflicts with reserved path %q", path, reserved) + } + } + return nil +} + +// validateMountReferences ensures all mounts reference defined volumes. +func validateMountReferences(lt *LocustTest) error { + volumeNames := make(map[string]bool) + for _, vol := range lt.Spec.Volumes { + volumeNames[vol.Name] = true + } + + for _, mount := range lt.Spec.VolumeMounts { + if !volumeNames[mount.Name] { + return fmt.Errorf("volumeMount %q references undefined volume", mount.Name) + } + } + + return nil +} diff --git a/api/v2/locusttest_webhook_test.go b/api/v2/locusttest_webhook_test.go new file mode 100644 index 00000000..03d89231 --- /dev/null +++ b/api/v2/locusttest_webhook_test.go @@ -0,0 +1,912 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPathConflicts_ExactMatch(t *testing.T) { + assert.True(t, PathConflicts("/lotest/src", "/lotest/src")) + assert.True(t, PathConflicts("/opt/locust/lib", "/opt/locust/lib")) +} + +func TestPathConflicts_Subpath(t *testing.T) { + // /foo conflicts with /foo/bar because /foo is a prefix + assert.True(t, PathConflicts("/lotest/src", "/lotest/src/secrets")) + assert.True(t, PathConflicts("/lotest/src/secrets", "/lotest/src")) + + // Deeper nesting + assert.True(t, PathConflicts("/opt/locust/lib", "/opt/locust/lib/utils")) + assert.True(t, PathConflicts("/opt/locust/lib/utils", "/opt/locust/lib")) +} + +func TestPathConflicts_NoConflict(t *testing.T) { + // Completely different paths + assert.False(t, PathConflicts("/lotest/src", "/etc/certs")) + assert.False(t, PathConflicts("/opt/locust/lib", "/var/secrets")) + + // Similar prefix but not a subpath + assert.False(t, PathConflicts("/lotest/src", "/lotest/src2")) + assert.False(t, PathConflicts("/lotest/src2", "/lotest/src")) +} + +func TestPathConflicts_TrailingSlash(t *testing.T) { + // Trailing slashes should be normalized + assert.True(t, PathConflicts("/lotest/src/", "/lotest/src")) + assert.True(t, PathConflicts("/lotest/src", "/lotest/src/")) + assert.True(t, PathConflicts("/lotest/src/", "/lotest/src/")) +} + +func TestValidateSecretMounts_NilEnv(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: nil, + }, + } + + err := validateSecretMounts(lt) + assert.NoError(t, err) +} + +func TestValidateSecretMounts_EmptyMounts(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{}, + }, + }, + } + + err := validateSecretMounts(lt) + assert.NoError(t, err) +} + +func TestValidateSecretMounts_ValidPath(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "tls-certs", MountPath: "/etc/locust/certs"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + assert.NoError(t, err) +} + +func TestValidateSecretMounts_ConflictDefault(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/lotest/src"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") + assert.Contains(t, err.Error(), "/lotest/src") +} + +func TestValidateSecretMounts_ConflictLib(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/opt/locust/lib"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") + assert.Contains(t, err.Error(), "/opt/locust/lib") +} + +func TestValidateSecretMounts_ConflictSubpath(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/lotest/src/secrets"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") +} + +func TestValidateSecretMounts_CustomTestFilesPath(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + TestFiles: &TestFilesConfig{ + ConfigMapRef: "my-scripts", + SrcMountPath: "/custom/src", + }, + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "secret", MountPath: "/custom/src/secrets"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "/custom/src") +} + +func TestValidateSecretMounts_CustomPathAllowsDefault(t *testing.T) { + // When using custom paths, the default paths should be allowed + lt := &LocustTest{ + Spec: LocustTestSpec{ + TestFiles: &TestFilesConfig{ + ConfigMapRef: "my-scripts", + SrcMountPath: "/custom/src", + }, + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + // This would conflict with default but we're using custom + {Name: "secret", MountPath: "/lotest/src"}, + }, + }, + }, + } + + err := validateSecretMounts(lt) + // Should pass because we're using custom path, not default + assert.NoError(t, err) +} + +func TestGetReservedPaths_NoTestFiles(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{}, + } + + paths := getReservedPaths(lt) + assert.Contains(t, paths, DefaultSrcMountPath) + assert.Contains(t, paths, DefaultLibMountPath) +} + +func TestGetReservedPaths_WithConfigMapRef(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + TestFiles: &TestFilesConfig{ + ConfigMapRef: "my-scripts", + }, + }, + } + + paths := getReservedPaths(lt) + assert.Contains(t, paths, DefaultSrcMountPath) + assert.Len(t, paths, 1) +} + +func TestGetReservedPaths_WithBothConfigMaps(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + TestFiles: &TestFilesConfig{ + ConfigMapRef: "my-scripts", + LibConfigMapRef: "my-lib", + }, + }, + } + + paths := getReservedPaths(lt) + assert.Contains(t, paths, DefaultSrcMountPath) + assert.Contains(t, paths, DefaultLibMountPath) + assert.Len(t, paths, 2) +} + +func TestGetReservedPaths_CustomPaths(t *testing.T) { + lt := &LocustTest{ + Spec: LocustTestSpec{ + TestFiles: &TestFilesConfig{ + ConfigMapRef: "my-scripts", + LibConfigMapRef: "my-lib", + SrcMountPath: "/custom/src", + LibMountPath: "/custom/lib", + }, + }, + } + + paths := getReservedPaths(lt) + assert.Contains(t, paths, "/custom/src") + assert.Contains(t, paths, "/custom/lib") + assert.NotContains(t, paths, DefaultSrcMountPath) + assert.NotContains(t, paths, DefaultLibMountPath) +} + +func TestValidateCreate(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "valid-secret", MountPath: "/etc/certs"}, + }, + }, + }, + } + + warnings, err := validator.ValidateCreate(context.Background(), lt) + require.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateCreate_Invalid(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/lotest/src"}, + }, + }, + }, + } + + warnings, err := validator.ValidateCreate(context.Background(), lt) + require.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") + assert.Nil(t, warnings) +} + +func TestValidateUpdate(t *testing.T) { + validator := &LocustTestCustomValidator{} + oldLt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + }, + } + + newLt := oldLt.DeepCopy() + newLt.Spec.Env = &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "valid-secret", MountPath: "/etc/certs"}, + }, + } + + warnings, err := validator.ValidateUpdate(context.Background(), oldLt, newLt) + require.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateDelete(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + } + + warnings, err := validator.ValidateDelete(context.Background(), lt) + require.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateCreate_WrongType(t *testing.T) { + validator := &LocustTestCustomValidator{} + + // Pass wrong type + warnings, err := validator.ValidateCreate(context.Background(), &LocustTestList{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "expected LocustTest") + assert.Nil(t, warnings) +} + +// ============================================ +// Volume Validation Tests +// ============================================ + +func TestValidateVolumes_Empty(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: nil, + VolumeMounts: nil, + }, + } + + err := validateVolumes(lt) + assert.NoError(t, err) +} + +func TestValidateVolumes_ValidConfig(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "test-results"}, + {Name: "shared-data"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "test-results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared-data", MountPath: "/shared"}, Target: "both"}, + }, + }, + } + + err := validateVolumes(lt) + assert.NoError(t, err) +} + +func TestValidateVolumeName_Valid(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "my-test", Namespace: "default"}, + } + + assert.NoError(t, validateVolumeName(lt, "test-results")) + assert.NoError(t, validateVolumeName(lt, "shared-data")) + assert.NoError(t, validateVolumeName(lt, "custom-volume")) +} + +func TestValidateVolumeName_SecretPrefix(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "my-test", Namespace: "default"}, + } + + err := validateVolumeName(lt, "secret-custom") + assert.Error(t, err) + assert.Contains(t, err.Error(), "uses reserved prefix") + assert.Contains(t, err.Error(), "secret-") +} + +func TestValidateVolumeName_LibVolume(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "my-test", Namespace: "default"}, + } + + err := validateVolumeName(lt, "locust-lib") + assert.Error(t, err) + assert.Contains(t, err.Error(), "is reserved by the operator") +} + +func TestValidateVolumeName_MasterConflict(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "my-test", Namespace: "default"}, + } + + err := validateVolumeName(lt, "my-test-master") + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with operator-generated name") +} + +func TestValidateVolumeName_WorkerConflict(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "my-test", Namespace: "default"}, + } + + err := validateVolumeName(lt, "my-test-worker") + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with operator-generated name") +} + +func TestValidateVolumes_PathConflict(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "bad-volume"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "bad-volume", MountPath: "/lotest/src"}, Target: "both"}, + }, + }, + } + + err := validateVolumes(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") +} + +func TestValidateVolumes_UndefinedMount(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "defined-volume"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "undefined-volume", MountPath: "/data"}, Target: "both"}, + }, + }, + } + + err := validateVolumes(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "references undefined volume") +} + +func TestValidateMountReferences_Valid(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "vol1"}, + {Name: "vol2"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "vol1", MountPath: "/data1"}}, + {VolumeMount: corev1.VolumeMount{Name: "vol2", MountPath: "/data2"}}, + }, + }, + } + + err := validateMountReferences(lt) + assert.NoError(t, err) +} + +func TestValidateMountReferences_Invalid(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "vol1"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "vol1", MountPath: "/data1"}}, + {VolumeMount: corev1.VolumeMount{Name: "missing", MountPath: "/data2"}}, + }, + }, + } + + err := validateMountReferences(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing") +} + +func TestValidateLocustTest_CombinedValidation(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "valid-secret", MountPath: "/etc/certs"}, + }, + }, + Volumes: []corev1.Volume{ + {Name: "test-results"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "test-results", MountPath: "/results"}, Target: "master"}, + }, + }, + } + + warnings, err := validateLocustTest(lt) + assert.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateLocustTest_SecretMountFailsFirst(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Env: &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/lotest/src"}, + }, + }, + Volumes: []corev1.Volume{ + {Name: "secret-bad"}, // Also invalid but secret mount fails first + }, + }, + } + + _, err := validateLocustTest(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "secretMount path") +} + +func TestValidateCreate_WithVolumes(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Volumes: []corev1.Volume{ + {Name: "test-results"}, + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "test-results", MountPath: "/results"}, Target: "master"}, + }, + }, + } + + warnings, err := validator.ValidateCreate(context.Background(), lt) + require.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateCreate_WithInvalidVolumeName(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Volumes: []corev1.Volume{ + {Name: "secret-custom"}, // Invalid: uses reserved prefix + }, + VolumeMounts: []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "secret-custom", MountPath: "/custom"}}, + }, + }, + } + + _, err := validator.ValidateCreate(context.Background(), lt) + require.Error(t, err) + assert.Contains(t, err.Error(), "uses reserved prefix") +} + +// ============================================ +// OTel Validation Tests +// ============================================ + +func TestValidateOTelConfig_NoObservability(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: nil, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateOTelConfig_NoOpenTelemetry(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: nil, + }, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateOTelConfig_Disabled(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: false, + }, + }, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateOTelConfig_EnabledWithEndpoint(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + }, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateOTelConfig_EnabledNoEndpoint(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "", // Missing endpoint + }, + }, + }, + } + + err := validateOTelConfig(lt) + assert.Error(t, err) + assert.Contains(t, err.Error(), "endpoint is required when OpenTelemetry is enabled") +} + +func TestValidateOTelConfig_ValidProtocolGRPC(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + Protocol: "grpc", + }, + }, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateOTelConfig_ValidProtocolHTTP(t *testing.T) { + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4318", + Protocol: "http/protobuf", + }, + }, + }, + } + + err := validateOTelConfig(lt) + assert.NoError(t, err) +} + +func TestValidateCreate_WithOTelEnabled(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.32.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + }, + }, + } + + warnings, err := validator.ValidateCreate(context.Background(), lt) + require.NoError(t, err) + assert.Nil(t, warnings) +} + +func TestValidateCreate_WithOTelEnabledNoEndpoint(t *testing.T) { + validator := &LocustTestCustomValidator{} + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.32.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + Observability: &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + // No endpoint - should fail + }, + }, + }, + } + + _, err := validator.ValidateCreate(context.Background(), lt) + require.Error(t, err) + assert.Contains(t, err.Error(), "endpoint is required") +} + +// ============================================ +// Update Validation Tests +// ============================================ + +func TestValidateUpdate_Invalid(t *testing.T) { + validator := &LocustTestCustomValidator{} + + oldLt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + }, + } + + t.Run("InvalidSecretMountPath", func(t *testing.T) { + newLt := oldLt.DeepCopy() + newLt.Spec.Env = &EnvConfig{ + SecretMounts: []SecretMount{ + {Name: "bad-secret", MountPath: "/lotest/src"}, + }, + } + + _, err := validator.ValidateUpdate(context.Background(), oldLt, newLt) + require.Error(t, err) + assert.Contains(t, err.Error(), "conflicts with reserved path") + }) + + t.Run("InvalidVolumeName", func(t *testing.T) { + newLt := oldLt.DeepCopy() + newLt.Spec.Volumes = []corev1.Volume{ + {Name: "secret-my-volume"}, // Uses reserved prefix + } + newLt.Spec.VolumeMounts = []TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "secret-my-volume", MountPath: "/custom"}}, + } + + _, err := validator.ValidateUpdate(context.Background(), oldLt, newLt) + require.Error(t, err) + assert.Contains(t, err.Error(), "uses reserved prefix") + }) + + t.Run("OTelEnabledNoEndpoint", func(t *testing.T) { + newLt := oldLt.DeepCopy() + newLt.Spec.Observability = &ObservabilityConfig{ + OpenTelemetry: &OpenTelemetryConfig{ + Enabled: true, + Endpoint: "", // Missing endpoint + }, + } + + _, err := validator.ValidateUpdate(context.Background(), oldLt, newLt) + require.Error(t, err) + assert.Contains(t, err.Error(), "endpoint is required") + }) +} + +// ============================================ +// Boundary Tests +// ============================================ + +func TestValidateCreate_LongCRName(t *testing.T) { + validator := &LocustTestCustomValidator{} + + t.Run("NameTooLong", func(t *testing.T) { + // Create a name that would exceed 63 chars when "-worker" is added + // 57 chars + "-worker" (7 chars) = 64 chars > 63 limit + longName := "a123456789-123456789-123456789-123456789-123456789-123456" + require.Equal(t, 57, len(longName), "Test name should be 57 chars") + + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: longName, Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + }, + } + + _, err := validator.ValidateCreate(context.Background(), lt) + require.Error(t, err) + assert.Contains(t, err.Error(), "too long") + assert.Contains(t, err.Error(), "63 characters") + }) + + t.Run("NameAtLimit", func(t *testing.T) { + // 56 chars + "-worker" (7 chars) = 63 chars (exactly at limit) + maxName := "a123456789-123456789-123456789-123456789-123456789-12345" + require.Equal(t, 56, len(maxName), "Test name should be 56 chars") + + lt := &LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: maxName, Namespace: "default"}, + Spec: LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 1, + }, + }, + } + + _, err := validator.ValidateCreate(context.Background(), lt) + require.NoError(t, err) + }) +} diff --git a/api/v2/zz_generated.deepcopy.go b/api/v2/zz_generated.deepcopy.go new file mode 100644 index 00000000..82b392c6 --- /dev/null +++ b/api/v2/zz_generated.deepcopy.go @@ -0,0 +1,471 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v2 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoquitConfig) DeepCopyInto(out *AutoquitConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoquitConfig. +func (in *AutoquitConfig) DeepCopy() *AutoquitConfig { + if in == nil { + return nil + } + out := new(AutoquitConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvConfig) DeepCopyInto(out *EnvConfig) { + *out = *in + if in.ConfigMapRefs != nil { + in, out := &in.ConfigMapRefs, &out.ConfigMapRefs + *out = make([]ConfigMapEnvSource, len(*in)) + copy(*out, *in) + } + if in.SecretRefs != nil { + in, out := &in.SecretRefs, &out.SecretRefs + *out = make([]SecretEnvSource, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretMounts != nil { + in, out := &in.SecretMounts, &out.SecretMounts + *out = make([]SecretMount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvConfig. +func (in *EnvConfig) DeepCopy() *EnvConfig { + if in == nil { + return nil + } + out := new(EnvConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTest) DeepCopyInto(out *LocustTest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTest. +func (in *LocustTest) DeepCopy() *LocustTest { + if in == nil { + return nil + } + out := new(LocustTest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocustTest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestCustomValidator) DeepCopyInto(out *LocustTestCustomValidator) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestCustomValidator. +func (in *LocustTestCustomValidator) DeepCopy() *LocustTestCustomValidator { + if in == nil { + return nil + } + out := new(LocustTestCustomValidator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestList) DeepCopyInto(out *LocustTestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LocustTest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestList. +func (in *LocustTestList) DeepCopy() *LocustTestList { + if in == nil { + return nil + } + out := new(LocustTestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocustTestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestSpec) DeepCopyInto(out *LocustTestSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + in.Master.DeepCopyInto(&out.Master) + in.Worker.DeepCopyInto(&out.Worker) + if in.TestFiles != nil { + in, out := &in.TestFiles, &out.TestFiles + *out = new(TestFilesConfig) + **out = **in + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(SchedulingConfig) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = new(EnvConfig) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]TargetedVolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Observability != nil { + in, out := &in.Observability, &out.Observability + *out = new(ObservabilityConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestSpec. +func (in *LocustTestSpec) DeepCopy() *LocustTestSpec { + if in == nil { + return nil + } + out := new(LocustTestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocustTestStatus) DeepCopyInto(out *LocustTestStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustTestStatus. +func (in *LocustTestStatus) DeepCopy() *LocustTestStatus { + if in == nil { + return nil + } + out := new(LocustTestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterSpec) DeepCopyInto(out *MasterSpec) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Autostart != nil { + in, out := &in.Autostart, &out.Autostart + *out = new(bool) + **out = **in + } + if in.Autoquit != nil { + in, out := &in.Autoquit, &out.Autoquit + *out = new(AutoquitConfig) + **out = **in + } + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec. +func (in *MasterSpec) DeepCopy() *MasterSpec { + if in == nil { + return nil + } + out := new(MasterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfig) DeepCopyInto(out *ObservabilityConfig) { + *out = *in + if in.OpenTelemetry != nil { + in, out := &in.OpenTelemetry, &out.OpenTelemetry + *out = new(OpenTelemetryConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfig. +func (in *ObservabilityConfig) DeepCopy() *ObservabilityConfig { + if in == nil { + return nil + } + out := new(ObservabilityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenTelemetryConfig) DeepCopyInto(out *OpenTelemetryConfig) { + *out = *in + if in.ExtraEnvVars != nil { + in, out := &in.ExtraEnvVars, &out.ExtraEnvVars + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTelemetryConfig. +func (in *OpenTelemetryConfig) DeepCopy() *OpenTelemetryConfig { + if in == nil { + return nil + } + out := new(OpenTelemetryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingConfig) DeepCopyInto(out *SchedulingConfig) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingConfig. +func (in *SchedulingConfig) DeepCopy() *SchedulingConfig { + if in == nil { + return nil + } + out := new(SchedulingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretMount) DeepCopyInto(out *SecretMount) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretMount. +func (in *SecretMount) DeepCopy() *SecretMount { + if in == nil { + return nil + } + out := new(SecretMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetedVolumeMount) DeepCopyInto(out *TargetedVolumeMount) { + *out = *in + in.VolumeMount.DeepCopyInto(&out.VolumeMount) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetedVolumeMount. +func (in *TargetedVolumeMount) DeepCopy() *TargetedVolumeMount { + if in == nil { + return nil + } + out := new(TargetedVolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestFilesConfig) DeepCopyInto(out *TestFilesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestFilesConfig. +func (in *TestFilesConfig) DeepCopy() *TestFilesConfig { + if in == nil { + return nil + } + out := new(TestFilesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerSpec) DeepCopyInto(out *WorkerSpec) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSpec. +func (in *WorkerSpec) DeepCopy() *WorkerSpec { + if in == nil { + return nil + } + out := new(WorkerSpec) + in.DeepCopyInto(out) + return out +} diff --git a/build.gradle b/build.gradle deleted file mode 100644 index 370cdbb0..00000000 --- a/build.gradle +++ /dev/null @@ -1,53 +0,0 @@ -plugins { - id 'com.google.cloud.tools.jib' version '3.3.2' - id 'io.micronaut.application' version "4.5.4" - id "com.diffplug.spotless" version "6.25.0" - id "jacoco" - id "idea" -} - -// Automatically managed by commitizen (.cz.yaml) -version = "1.1.1" -group = "com.locust" - -repositories { - mavenCentral() -} - - -application { - mainClass.set("com.locust.Application") -} - -java { - toolchain { - languageVersion = JavaLanguageVersion.of(21) - } - - sourceCompatibility = JavaVersion.toVersion("21") - targetCompatibility = JavaVersion.toVersion("21") - -} -graalvmNative.toolchainDetection = false -micronaut { - runtime("netty") - testRuntime("junit5") - processing { - incremental(true) - annotations("com.*") - } -} - -test { - // Disable parallel test execution to prevent resource conflicts between tests - // that use mock Kubernetes API servers. - maxParallelForks = 1 -} - -// don"t change apply ordering -apply from: "${rootDir}/gradle/dependencies.gradle" -apply from: "${rootDir}/gradle/testing.gradle" -apply from: "${rootDir}/gradle/spotless.gradle" -apply from: "${rootDir}/gradle/jib.gradle" -apply from: "${rootDir}/gradle/jacoco.gradle" -apply from: "${rootDir}/gradle/integration-test.gradle" diff --git a/charts/locust-k8s-operator/Chart.yaml b/charts/locust-k8s-operator/Chart.yaml index d069dacb..e3a3ac22 100644 --- a/charts/locust-k8s-operator/Chart.yaml +++ b/charts/locust-k8s-operator/Chart.yaml @@ -1,6 +1,7 @@ apiVersion: v2 name: locust-k8s-operator -description: Locust Kubernetes Operator +description: Locust Kubernetes Operator - Go v2 +icon: https://raw.githubusercontent.com/AbdelrhmanHamouda/locust-k8s-operator/master/docs/assets/images/favicon.png # A chart can be either an 'application' or a 'library' chart. # @@ -15,10 +16,35 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.1.1 +version: 2.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.1.1" +appVersion: "2.0.0" + +kubeVersion: ">= 1.25.0-0" +home: https://github.com/AbdelrhmanHamouda/locust-k8s-operator +sources: + - https://github.com/AbdelrhmanHamouda/locust-k8s-operator +maintainers: + - name: AbdelrhmanHamouda + url: https://github.com/AbdelrhmanHamouda + +annotations: + artifacthub.io/changes: | + - kind: changed + description: Rewritten for Go operator with clean-slate design + - kind: changed + description: Updated health probes to /healthz and /readyz on port 8081 + - kind: changed + description: Reduced default memory from 1Gi to 128Mi (Go binary) + - kind: added + description: Leader election support for HA deployments + - kind: added + description: Optional webhook configuration with cert-manager + - kind: added + description: Optional OTel Collector deployment + - kind: deprecated + description: Micronaut/JVM configuration removed (no longer applicable) diff --git a/charts/locust-k8s-operator/crds b/charts/locust-k8s-operator/crds deleted file mode 120000 index d67229f2..00000000 --- a/charts/locust-k8s-operator/crds +++ /dev/null @@ -1 +0,0 @@ -../../kube/crd \ No newline at end of file diff --git a/charts/locust-k8s-operator/templates/NOTES.txt b/charts/locust-k8s-operator/templates/NOTES.txt new file mode 100644 index 00000000..469701d9 --- /dev/null +++ b/charts/locust-k8s-operator/templates/NOTES.txt @@ -0,0 +1,35 @@ +πŸš€ {{ .Chart.Name }} deployed to namespace {{ .Release.Namespace }}. + +Get started: + + # Verify the operator is running + kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/name={{ include "locust-k8s-operator.name" . }} + + # View operator logs + kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/name={{ include "locust-k8s-operator.name" . }} -f + + # Create a LocustTest resource + kubectl apply -f your-locusttest.yaml + + # Watch status + kubectl get locusttest -n {{ .Release.Namespace }} -w + + # Access the Locust web UI (port 8089) + kubectl port-forward -n {{ .Release.Namespace }} job/-master 8089:8089 +{{- if .Values.webhook.enabled }} + +Webhook: ENABLED (requires cert-manager) + kubectl get certificate {{ include "locust-k8s-operator.fullname" . }}-webhook-cert -n {{ .Release.Namespace }} +{{- end }} +{{- if .Values.otelCollector.enabled }} + +OTel Collector: ENABLED + Endpoint: {{ include "locust-k8s-operator.fullname" . }}-otel-collector.{{ .Release.Namespace }}.svc.cluster.local + gRPC: 4317 | HTTP: 4318 +{{- end }} +{{- if .Values.metrics.enabled }} + +Metrics: port {{ .Values.metrics.port }} ({{ if .Values.metrics.secure }}HTTPS{{ else }}HTTP{{ end }}) +{{- end }} + +πŸ“– Docs: {{ .Chart.Home | default "https://github.com/AbdelrhmanHamouda/locust-k8s-operator" }} diff --git a/charts/locust-k8s-operator/templates/_helpers.tpl b/charts/locust-k8s-operator/templates/_helpers.tpl index 9f43877b..ee35a033 100644 --- a/charts/locust-k8s-operator/templates/_helpers.tpl +++ b/charts/locust-k8s-operator/templates/_helpers.tpl @@ -1,5 +1,31 @@ +{{/* +============================================================================= +LOCUST K8S OPERATOR - HELM TEMPLATE HELPERS +============================================================================= +This file contains reusable template helpers for the Locust K8s Operator chart. + +Sections: + 1. Standard Naming Helpers - Chart name, fullname, labels + 2. Backward Compatibility Helpers - Map old value paths to new paths + 3. Environment Variables Helper - Generate env vars for the operator + +For users upgrading from v1.x charts: + The backward compatibility helpers allow you to continue using old value + paths (e.g., config.loadGenerationPods.resource.cpuRequest) while we + recommend migrating to the new paths (e.g., locustPods.resources.requests.cpu). +============================================================================= +*/}} + +{{/* +============================================================================= +SECTION 1: Standard Naming Helpers +============================================================================= +*/}} + {{/* Expand the name of the chart. +Used for: container names, label values +Override with: .Values.nameOverride */}} {{- define "locust-k8s-operator.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} @@ -7,27 +33,34 @@ Expand the name of the chart. {{/* Create a default fully qualified app name. +Used for: deployment name, service account name, RBAC resources We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "locust-k8s-operator.fullname" -}} -{{- $name := default .Chart.Name }} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} +{{- end }} {{/* Create chart name and version as used by the chart label. +Used for: helm.sh/chart label to track which chart version deployed the resources */}} {{- define "locust-k8s-operator.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* -Common labels +Common labels applied to all resources. +Includes: chart info, selector labels, version, and managed-by */}} {{- define "locust-k8s-operator.labels" -}} helm.sh/chart: {{ include "locust-k8s-operator.chart" . }} @@ -39,7 +72,9 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* -Selector labels +Selector labels used for pod selection. +These labels are used by Deployments to select their pods and by Services to route traffic. +Must remain consistent across upgrades to avoid orphaned pods. */}} {{- define "locust-k8s-operator.selectorLabels" -}} app.kubernetes.io/name: {{ include "locust-k8s-operator.name" . }} @@ -47,7 +82,9 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to use +Create the name of the service account to use. +If serviceAccount.create is true, uses fullname or custom name. +If serviceAccount.create is false, uses the specified name or "default". */}} {{- define "locust-k8s-operator.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} @@ -56,3 +93,618 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} + +{{/* +============================================================================= +SECTION 2: Backward Compatibility Helpers +============================================================================= +These helpers map old v1.x chart value paths to new v2.x paths. +This allows users upgrading from v1.x to continue using their existing +values.yaml files while migrating to the new structure. + +Priority order (checks leaf values, not just parent keys): + 1. New path value exists β†’ use it + 2. Old path value exists β†’ use it (backward compat) + 3. Neither β†’ use hardcoded default + +Example - both of these work: + # New (recommended) + locustPods: + resources: + requests: + cpu: 500m + + # Old (deprecated, still works) + config: + loadGenerationPods: + resource: + cpuRequest: 500m +============================================================================= +*/}} + +{{/* +Pod CPU Request - new path with fallback to old path +*/}} +{{- define "locust.podCpuRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.requests .Values.locustPods.resources.requests.cpu }} +{{- .Values.locustPods.resources.requests.cpu }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.cpuRequest }} +{{- .Values.config.loadGenerationPods.resource.cpuRequest }} +{{- else }} +{{- "250m" }} +{{- end }} +{{- end }} + +{{/* +Pod Memory Request - new path with fallback to old path +*/}} +{{- define "locust.podMemRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.requests .Values.locustPods.resources.requests.memory }} +{{- .Values.locustPods.resources.requests.memory }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.memRequest }} +{{- .Values.config.loadGenerationPods.resource.memRequest }} +{{- else }} +{{- "128Mi" }} +{{- end }} +{{- end }} + +{{/* +Pod Ephemeral Storage Request - new path with fallback to old path +*/}} +{{- define "locust.podEphemeralRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.requests .Values.locustPods.resources.requests.ephemeralStorage }} +{{- .Values.locustPods.resources.requests.ephemeralStorage }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.ephemeralRequest }} +{{- .Values.config.loadGenerationPods.resource.ephemeralRequest }} +{{- else }} +{{- "30M" }} +{{- end }} +{{- end }} + +{{/* +Pod CPU Limit - new path with fallback to old path +*/}} +{{- define "locust.podCpuLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.limits .Values.locustPods.resources.limits.cpu }} +{{- .Values.locustPods.resources.limits.cpu }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.cpuLimit }} +{{- .Values.config.loadGenerationPods.resource.cpuLimit }} +{{- else }} +{{- "1000m" }} +{{- end }} +{{- end }} + +{{/* +Pod Memory Limit - new path with fallback to old path +*/}} +{{- define "locust.podMemLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.limits .Values.locustPods.resources.limits.memory }} +{{- .Values.locustPods.resources.limits.memory }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.memLimit }} +{{- .Values.config.loadGenerationPods.resource.memLimit }} +{{- else }} +{{- "1024Mi" }} +{{- end }} +{{- end }} + +{{/* +Pod Ephemeral Storage Limit - new path with fallback to old path +*/}} +{{- define "locust.podEphemeralLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.resources .Values.locustPods.resources.limits .Values.locustPods.resources.limits.ephemeralStorage }} +{{- .Values.locustPods.resources.limits.ephemeralStorage }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.resource .Values.config.loadGenerationPods.resource.ephemeralLimit }} +{{- .Values.config.loadGenerationPods.resource.ephemeralLimit }} +{{- else }} +{{- "50M" }} +{{- end }} +{{- end }} + +{{/* +Affinity Injection - new path with fallback to old path +*/}} +{{- define "locust.affinityInjection" -}} +{{- if and .Values.locustPods (hasKey .Values.locustPods "affinityInjection") }} +{{- .Values.locustPods.affinityInjection }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.affinity (hasKey .Values.config.loadGenerationPods.affinity "enableCrInjection") }} +{{- .Values.config.loadGenerationPods.affinity.enableCrInjection }} +{{- else }} +{{- true }} +{{- end }} +{{- end }} + +{{/* +Tolerations Injection - new path with fallback to old path +*/}} +{{- define "locust.tolerationsInjection" -}} +{{- if and .Values.locustPods (hasKey .Values.locustPods "tolerationsInjection") }} +{{- .Values.locustPods.tolerationsInjection }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.taintTolerations (hasKey .Values.config.loadGenerationPods.taintTolerations "enableCrInjection") }} +{{- .Values.config.loadGenerationPods.taintTolerations.enableCrInjection }} +{{- else }} +{{- true }} +{{- end }} +{{- end }} + +{{/* +TTL Seconds After Finished - new path with fallback to old path +*/}} +{{- define "locust.ttlSecondsAfterFinished" -}} +{{- if and .Values.locustPods .Values.locustPods.ttlSecondsAfterFinished }} +{{- .Values.locustPods.ttlSecondsAfterFinished }} +{{- else if and .Values.config .Values.config.loadGenerationJobs .Values.config.loadGenerationJobs.ttlSecondsAfterFinished }} +{{- .Values.config.loadGenerationJobs.ttlSecondsAfterFinished }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Image - new path with fallback to old path +*/}} +{{- define "locust.metricsExporterImage" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.image }} +{{- .Values.locustPods.metricsExporter.image }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.image }} +{{- .Values.config.loadGenerationPods.metricsExporter.image }} +{{- else }} +{{- "containersol/locust_exporter:v0.5.0" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Port - new path with fallback to old path +*/}} +{{- define "locust.metricsExporterPort" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.port }} +{{- .Values.locustPods.metricsExporter.port }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.port }} +{{- .Values.config.loadGenerationPods.metricsExporter.port }} +{{- else }} +{{- 9646 }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Pull Policy - new path with fallback to old path +*/}} +{{- define "locust.metricsExporterPullPolicy" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.pullPolicy }} +{{- .Values.locustPods.metricsExporter.pullPolicy }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.pullPolicy }} +{{- .Values.config.loadGenerationPods.metricsExporter.pullPolicy }} +{{- else }} +{{- "IfNotPresent" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter CPU Request +*/}} +{{- define "locust.metricsExporterCpuRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.requests .Values.locustPods.metricsExporter.resources.requests.cpu }} +{{- .Values.locustPods.metricsExporter.resources.requests.cpu }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.cpuRequest }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.cpuRequest }} +{{- else }} +{{- "100m" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Memory Request +*/}} +{{- define "locust.metricsExporterMemRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.requests .Values.locustPods.metricsExporter.resources.requests.memory }} +{{- .Values.locustPods.metricsExporter.resources.requests.memory }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.memRequest }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.memRequest }} +{{- else }} +{{- "64Mi" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Ephemeral Request +*/}} +{{- define "locust.metricsExporterEphemeralRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.requests .Values.locustPods.metricsExporter.resources.requests.ephemeralStorage }} +{{- .Values.locustPods.metricsExporter.resources.requests.ephemeralStorage }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralRequest }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralRequest }} +{{- else }} +{{- "30M" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter CPU Limit +*/}} +{{- define "locust.metricsExporterCpuLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.limits .Values.locustPods.metricsExporter.resources.limits.cpu }} +{{- .Values.locustPods.metricsExporter.resources.limits.cpu }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.cpuLimit }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.cpuLimit }} +{{- else }} +{{- "250m" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Memory Limit +*/}} +{{- define "locust.metricsExporterMemLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.limits .Values.locustPods.metricsExporter.resources.limits.memory }} +{{- .Values.locustPods.metricsExporter.resources.limits.memory }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.memLimit }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.memLimit }} +{{- else }} +{{- "128Mi" }} +{{- end }} +{{- end }} + +{{/* +Metrics Exporter Ephemeral Limit +*/}} +{{- define "locust.metricsExporterEphemeralLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.metricsExporter .Values.locustPods.metricsExporter.resources .Values.locustPods.metricsExporter.resources.limits .Values.locustPods.metricsExporter.resources.limits.ephemeralStorage }} +{{- .Values.locustPods.metricsExporter.resources.limits.ephemeralStorage }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.metricsExporter .Values.config.loadGenerationPods.metricsExporter.resource .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralLimit }} +{{- .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralLimit }} +{{- else }} +{{- "50M" }} +{{- end }} +{{- end }} + +{{/* +Kafka Bootstrap Servers - new path with fallback to old path +*/}} +{{- define "locust.kafkaBootstrapServers" -}} +{{- if and .Values.kafka .Values.kafka.bootstrapServers }} +{{- .Values.kafka.bootstrapServers }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.kafka .Values.config.loadGenerationPods.kafka.bootstrapServers }} +{{- .Values.config.loadGenerationPods.kafka.bootstrapServers }} +{{- else }} +{{- "localhost:9092" }} +{{- end }} +{{- end }} + +{{/* +Kafka Security Enabled - new path with fallback to old path +*/}} +{{- define "locust.kafkaSecurityEnabled" -}} +{{- if and .Values.kafka .Values.kafka.security (hasKey .Values.kafka.security "enabled") }} +{{- .Values.kafka.security.enabled }} +{{- else if and .Values.config .Values.config.loadGenerationPods .Values.config.loadGenerationPods.kafka .Values.config.loadGenerationPods.kafka.acl (hasKey .Values.config.loadGenerationPods.kafka.acl "enabled") }} +{{- .Values.config.loadGenerationPods.kafka.acl.enabled }} +{{- else }} +{{- false }} +{{- end }} +{{- end }} + +{{/* +============================================================================= +SECTION 2.5: Role-Specific Resource Helpers +============================================================================= +These helpers return role-specific resources for master and worker pods. +If masterResources/workerResources are set, they override the unified resources. +If empty, the helper returns empty string, meaning "use unified resources". + +This implements a three-level precedence: + 1. CR-level resources (highest precedence, in Go code) + 2. Helm role-specific resources (these helpers) + 3. Helm unified resources (fallback, in Section 2 helpers above) +============================================================================= +*/}} + +{{/* +Master CPU Request - role-specific override +*/}} +{{- define "locust.masterCpuRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.requests }} +{{- .Values.locustPods.masterResources.requests.cpu | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Master Memory Request - role-specific override +*/}} +{{- define "locust.masterMemRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.requests }} +{{- .Values.locustPods.masterResources.requests.memory | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Master Ephemeral Storage Request - role-specific override +*/}} +{{- define "locust.masterEphemeralRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.requests }} +{{- .Values.locustPods.masterResources.requests.ephemeralStorage | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Master CPU Limit - role-specific override +*/}} +{{- define "locust.masterCpuLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.limits }} +{{- .Values.locustPods.masterResources.limits.cpu | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Master Memory Limit - role-specific override +*/}} +{{- define "locust.masterMemLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.limits }} +{{- .Values.locustPods.masterResources.limits.memory | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Master Ephemeral Storage Limit - role-specific override +*/}} +{{- define "locust.masterEphemeralLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.masterResources .Values.locustPods.masterResources.limits }} +{{- .Values.locustPods.masterResources.limits.ephemeralStorage | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker CPU Request - role-specific override +*/}} +{{- define "locust.workerCpuRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.requests }} +{{- .Values.locustPods.workerResources.requests.cpu | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker Memory Request - role-specific override +*/}} +{{- define "locust.workerMemRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.requests }} +{{- .Values.locustPods.workerResources.requests.memory | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker Ephemeral Storage Request - role-specific override +*/}} +{{- define "locust.workerEphemeralRequest" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.requests }} +{{- .Values.locustPods.workerResources.requests.ephemeralStorage | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker CPU Limit - role-specific override +*/}} +{{- define "locust.workerCpuLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.limits }} +{{- .Values.locustPods.workerResources.limits.cpu | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker Memory Limit - role-specific override +*/}} +{{- define "locust.workerMemLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.limits }} +{{- .Values.locustPods.workerResources.limits.memory | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +Worker Ephemeral Storage Limit - role-specific override +*/}} +{{- define "locust.workerEphemeralLimit" -}} +{{- if and .Values.locustPods .Values.locustPods.workerResources .Values.locustPods.workerResources.limits }} +{{- .Values.locustPods.workerResources.limits.ephemeralStorage | default "" }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} + +{{/* +============================================================================= +SECTION 3: Environment Variables Helper +============================================================================= +Generates all environment variables for the operator container. +These env vars configure how the operator creates Locust test pods. + +The operator reads these at startup and uses them as defaults when +creating master/worker Jobs for LocustTest CRs. + +Categories: + - Resource limits: CPU, memory, ephemeral storage for Locust pods + - Feature flags: Affinity injection, tolerations injection + - Metrics exporter: Sidecar container configuration + - Kafka: Message queue configuration (deprecated) +============================================================================= +*/}} +{{- define "locust-k8s-operator.envVars" -}} +# Webhook configuration +# Controls whether the operator registers conversion and validation webhooks +- name: ENABLE_WEBHOOKS + value: {{ .Values.webhook.enabled | quote }} +# Resource limits for Locust test pods (master and workers) +# These define the default resources when not specified in the LocustTest CR +- name: POD_CPU_REQUEST + value: {{ include "locust.podCpuRequest" . | quote }} +- name: POD_MEM_REQUEST + value: {{ include "locust.podMemRequest" . | quote }} +- name: POD_EPHEMERAL_REQUEST + value: {{ include "locust.podEphemeralRequest" . | quote }} +- name: POD_CPU_LIMIT + value: {{ include "locust.podCpuLimit" . | quote }} +- name: POD_MEM_LIMIT + value: {{ include "locust.podMemLimit" . | quote }} +- name: POD_EPHEMERAL_LIMIT + value: {{ include "locust.podEphemeralLimit" . | quote }} +# Role-specific resources for master Locust containers +# Empty values mean "use unified resources" (backward compatible) +{{- $masterCpuReq := include "locust.masterCpuRequest" . }} +{{- if $masterCpuReq }} +- name: MASTER_POD_CPU_REQUEST + value: {{ $masterCpuReq | quote }} +{{- end }} +{{- $masterMemReq := include "locust.masterMemRequest" . }} +{{- if $masterMemReq }} +- name: MASTER_POD_MEM_REQUEST + value: {{ $masterMemReq | quote }} +{{- end }} +{{- $masterEphemeralReq := include "locust.masterEphemeralRequest" . }} +{{- if $masterEphemeralReq }} +- name: MASTER_POD_EPHEMERAL_REQUEST + value: {{ $masterEphemeralReq | quote }} +{{- end }} +{{- $masterCpuLim := include "locust.masterCpuLimit" . }} +{{- if $masterCpuLim }} +- name: MASTER_POD_CPU_LIMIT + value: {{ $masterCpuLim | quote }} +{{- end }} +{{- $masterMemLim := include "locust.masterMemLimit" . }} +{{- if $masterMemLim }} +- name: MASTER_POD_MEM_LIMIT + value: {{ $masterMemLim | quote }} +{{- end }} +{{- $masterEphemeralLim := include "locust.masterEphemeralLimit" . }} +{{- if $masterEphemeralLim }} +- name: MASTER_POD_EPHEMERAL_LIMIT + value: {{ $masterEphemeralLim | quote }} +{{- end }} +# Role-specific resources for worker Locust containers +# Empty values mean "use unified resources" (backward compatible) +{{- $workerCpuReq := include "locust.workerCpuRequest" . }} +{{- if $workerCpuReq }} +- name: WORKER_POD_CPU_REQUEST + value: {{ $workerCpuReq | quote }} +{{- end }} +{{- $workerMemReq := include "locust.workerMemRequest" . }} +{{- if $workerMemReq }} +- name: WORKER_POD_MEM_REQUEST + value: {{ $workerMemReq | quote }} +{{- end }} +{{- $workerEphemeralReq := include "locust.workerEphemeralRequest" . }} +{{- if $workerEphemeralReq }} +- name: WORKER_POD_EPHEMERAL_REQUEST + value: {{ $workerEphemeralReq | quote }} +{{- end }} +{{- $workerCpuLim := include "locust.workerCpuLimit" . }} +{{- if $workerCpuLim }} +- name: WORKER_POD_CPU_LIMIT + value: {{ $workerCpuLim | quote }} +{{- end }} +{{- $workerMemLim := include "locust.workerMemLimit" . }} +{{- if $workerMemLim }} +- name: WORKER_POD_MEM_LIMIT + value: {{ $workerMemLim | quote }} +{{- end }} +{{- $workerEphemeralLim := include "locust.workerEphemeralLimit" . }} +{{- if $workerEphemeralLim }} +- name: WORKER_POD_EPHEMERAL_LIMIT + value: {{ $workerEphemeralLim | quote }} +{{- end }} +# Feature flags for pod scheduling +# When enabled, the operator injects affinity/tolerations from the CR into pods +- name: ENABLE_AFFINITY_CR_INJECTION + value: {{ include "locust.affinityInjection" . | quote }} +- name: ENABLE_TAINT_TOLERATIONS_CR_INJECTION + value: {{ include "locust.tolerationsInjection" . | quote }} +# Metrics exporter sidecar configuration +# This Prometheus exporter runs alongside the Locust master to expose metrics +# Note: Not used when OpenTelemetry is enabled (OTel replaces the sidecar) +- name: METRICS_EXPORTER_IMAGE + value: {{ include "locust.metricsExporterImage" . | quote }} +- name: METRICS_EXPORTER_PORT + value: {{ include "locust.metricsExporterPort" . | quote }} +- name: METRICS_EXPORTER_IMAGE_PULL_POLICY + value: {{ include "locust.metricsExporterPullPolicy" . | quote }} +- name: METRICS_EXPORTER_CPU_REQUEST + value: {{ include "locust.metricsExporterCpuRequest" . | quote }} +- name: METRICS_EXPORTER_MEM_REQUEST + value: {{ include "locust.metricsExporterMemRequest" . | quote }} +- name: METRICS_EXPORTER_EPHEMERAL_REQUEST + value: {{ include "locust.metricsExporterEphemeralRequest" . | quote }} +- name: METRICS_EXPORTER_CPU_LIMIT + value: {{ include "locust.metricsExporterCpuLimit" . | quote }} +- name: METRICS_EXPORTER_MEM_LIMIT + value: {{ include "locust.metricsExporterMemLimit" . | quote }} +- name: METRICS_EXPORTER_EPHEMERAL_LIMIT + value: {{ include "locust.metricsExporterEphemeralLimit" . | quote }} +# Job TTL - automatically clean up completed Jobs after this many seconds +# If not set, Jobs remain until manually deleted or CR is deleted +{{- $ttl := include "locust.ttlSecondsAfterFinished" . }} +{{- if $ttl }} +- name: JOB_TTL_SECONDS_AFTER_FINISHED + value: {{ $ttl | quote }} +{{- end }} +# Kafka configuration (DEPRECATED - kept for backward compatibility) +# Consider using OpenTelemetry for metrics export instead +{{- if .Values.kafka.enabled }} +- name: KAFKA_BOOTSTRAP_SERVERS + value: {{ include "locust.kafkaBootstrapServers" . | quote }} +- name: KAFKA_SECURITY_ENABLED + value: {{ include "locust.kafkaSecurityEnabled" . | quote }} +{{- if or (and .Values.kafka .Values.kafka.security.enabled) (and .Values.config .Values.config.loadGenerationPods.kafka.acl.enabled) }} +- name: KAFKA_SECURITY_PROTOCOL_CONFIG + value: {{ .Values.kafka.security.protocol | default .Values.config.loadGenerationPods.kafka.acl.protocol | default "SASL_PLAINTEXT" | quote }} +- name: KAFKA_SASL_MECHANISM + value: {{ .Values.kafka.security.saslMechanism | default .Values.config.loadGenerationPods.kafka.sasl.mechanism | default "SCRAM-SHA-512" | quote }} +{{- if .Values.kafka.security.jaasConfig }} +- name: KAFKA_SASL_JAAS_CONFIG + value: {{ .Values.kafka.security.jaasConfig | quote }} +{{- else if .Values.config }} +- name: KAFKA_SASL_JAAS_CONFIG + value: {{ .Values.config.loadGenerationPods.kafka.sasl.jaas.config | quote }} +{{- end }} +{{- if .Values.kafka.credentials.secretName }} +- name: KAFKA_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.kafka.credentials.secretName }} + key: {{ .Values.kafka.credentials.usernameKey | default "username" }} +- name: KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.kafka.credentials.secretName }} + key: {{ .Values.kafka.credentials.passwordKey | default "password" }} +{{- else if .Values.config }} +- name: KAFKA_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.config.loadGenerationPods.kafka.locustK8sKafkaUser.userName }} + key: {{ .Values.config.loadGenerationPods.kafka.acl.secret.userKey }} +- name: KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.config.loadGenerationPods.kafka.locustK8sKafkaUser.userName }} + key: {{ .Values.config.loadGenerationPods.kafka.acl.secret.passwordKey }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/locust-k8s-operator/templates/certificate.yaml b/charts/locust-k8s-operator/templates/certificate.yaml new file mode 100644 index 00000000..79af19b6 --- /dev/null +++ b/charts/locust-k8s-operator/templates/certificate.yaml @@ -0,0 +1,76 @@ +{{/* +============================================================================= +TLS CERTIFICATES FOR WEBHOOK (cert-manager) +============================================================================= +This template creates cert-manager resources for automatic TLS certificate +management for the admission webhooks. + +Resources created: + 1. Issuer - A self-signed certificate issuer (namespace-scoped) + 2. Certificate - The actual TLS certificate for the webhook Service + +The Certificate creates a Secret containing: + - tls.crt: The certificate + - tls.key: The private key + - ca.crt: The CA certificate (same as tls.crt for self-signed) + +cert-manager automatically: + - Creates the initial certificate + - Renews before expiration + - Updates the Secret + +Prerequisites: + - cert-manager must be installed in the cluster + - webhook.enabled and webhook.certManager.enabled must both be true + +Alternative (manual certificates): + If you don't use cert-manager, create the Secret manually: + kubectl create secret tls -webhook-certs \ + --cert=path/to/tls.crt \ + --key=path/to/tls.key +============================================================================= +*/}} + +{{- if and .Values.webhook.enabled .Values.webhook.certManager.enabled }} +--- +# ============================================================================= +# Issuer - Self-signed certificate issuer +# ============================================================================= +# Creates a namespace-scoped issuer that can sign certificates. +# For production, consider using a ClusterIssuer with Let's Encrypt or +# your organization's PKI. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-selfsigned-issuer + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +spec: + selfSigned: {} +--- +# ============================================================================= +# Certificate - TLS certificate for webhook Service +# ============================================================================= +# This certificate is used by the webhook server to serve HTTPS. +# The API server validates this certificate when calling webhooks. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-serving-cert + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +spec: + # DNS names that the certificate is valid for + # Must match the Service name used by the webhook + dnsNames: + - {{ include "locust-k8s-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc + - {{ include "locust-k8s-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc.cluster.local + issuerRef: + kind: Issuer + name: {{ include "locust-k8s-operator.fullname" . }}-selfsigned-issuer + # Secret where cert-manager stores the certificate + # This Secret is mounted into the operator pod + secretName: {{ include "locust-k8s-operator.fullname" . }}-webhook-certs +{{- end }} diff --git a/charts/locust-k8s-operator/templates/crd.yaml b/charts/locust-k8s-operator/templates/crd.yaml new file mode 100644 index 00000000..b9b3ba6b --- /dev/null +++ b/charts/locust-k8s-operator/templates/crd.yaml @@ -0,0 +1,3658 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 +{{- if and .Values.webhook.enabled .Values.webhook.certManager.enabled }} + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "locust-k8s-operator.fullname" . }}-serving-cert +{{- end }} + name: locusttests.locust.io +spec: + group: locust.io + names: + kind: LocustTest + listKind: LocustTestList + plural: locusttests + shortNames: + - lotest + singular: locusttest + scope: Namespaced +{{- if .Values.webhook.enabled }} + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: {{ include "locust-k8s-operator.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /convert + conversionReviewVersions: + - v1 +{{- end }} + versions: + - additionalPrinterColumns: + - description: Master pod command seed + jsonPath: .spec.masterCommandSeed + name: master_cmd + type: string + - description: Number of requested worker pods + jsonPath: .spec.workerReplicas + name: worker_replica_count + type: integer + - description: Locust image + jsonPath: .spec.image + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: locust.io/v1 LocustTest is deprecated, migrate to locust.io/v2 + name: v1 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API (v1 - DEPRECATED). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + affinity: + description: Affinity defines affinity rules for pod scheduling. + properties: + nodeAffinity: + description: NodeAffinity defines node affinity rules. + properties: + requiredDuringSchedulingIgnoredDuringExecution: + additionalProperties: + type: string + description: |- + RequiredDuringSchedulingIgnoredDuringExecution defines required node affinity rules. + The map keys are label keys and values are label values that nodes must have. + type: object + type: object + type: object + annotations: + description: Annotations defines annotations to attach to deployed + pods. + properties: + master: + additionalProperties: + type: string + description: Master defines annotations attached to the master + pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines annotations attached to worker pods. + type: object + type: object + configMap: + description: ConfigMap is the name of the ConfigMap containing the + test file(s). + type: string + image: + description: Image is the Locust container image to use. + type: string + imagePullPolicy: + description: ImagePullPolicy defines when to pull the image. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets is a list of secret names for pulling + images from private registries. + items: + type: string + type: array + labels: + description: Labels defines labels to attach to deployed pods. + properties: + master: + additionalProperties: + type: string + description: Master defines labels attached to the master pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines labels attached to worker pods. + type: object + type: object + libConfigMap: + description: LibConfigMap is the name of the ConfigMap containing + lib directory files. + type: string + masterCommandSeed: + description: |- + MasterCommandSeed is the command seed for the master pod. + This forms the base of the locust master command. + type: string + tolerations: + description: Tolerations defines tolerations for pod scheduling. + items: + description: LocustTestToleration defines a toleration for pod scheduling. + properties: + effect: + description: Effect indicates the taint effect to match. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: Key is the taint key that the toleration applies + to. + type: string + operator: + description: Operator represents the relationship between the + key and value. + enum: + - Exists + - Equal + type: string + value: + description: Value is the taint value the toleration matches + to. + type: string + required: + - effect + - key + - operator + type: object + type: array + workerCommandSeed: + description: |- + WorkerCommandSeed is the command seed for worker pods. + This forms the base of the locust worker command. + type: string + workerReplicas: + default: 1 + description: WorkerReplicas is the number of worker pods to spawn. + format: int32 + maximum: 500 + minimum: 1 + type: integer + required: + - image + - masterCommandSeed + - workerCommandSeed + - workerReplicas + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current test phase + jsonPath: .status.phase + name: Phase + type: string + - description: Requested worker count + jsonPath: .spec.worker.replicas + name: Workers + type: integer + - description: Connected workers + jsonPath: .status.connectedWorkers + name: Connected + type: integer + - jsonPath: .spec.image + name: Image + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + env: + description: Env configuration for environment variable injection. + properties: + configMapRefs: + description: ConfigMapRefs injects all keys from ConfigMaps as + environment variables. + items: + description: ConfigMapEnvSource defines a ConfigMap environment + source. + properties: + name: + description: Name of the ConfigMap. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + secretMounts: + description: SecretMounts mounts secrets as files in the container. + items: + description: SecretMount defines a secret file mount. + properties: + mountPath: + description: MountPath is the path where the secret should + be mounted. + type: string + name: + description: Name of the secret to mount. + type: string + readOnly: + default: true + description: ReadOnly mounts the secret as read-only. + type: boolean + required: + - mountPath + - name + type: object + type: array + secretRefs: + description: SecretRefs injects all keys from Secrets as environment + variables. + items: + description: SecretEnvSource defines a Secret environment source. + properties: + name: + description: Name of the Secret. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + variables: + description: Variables defines specific environment variables. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + type: object + image: + description: Image is the container image for Locust pods. + type: string + imagePullPolicy: + default: IfNotPresent + description: ImagePullPolicy for the Locust container. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets for pulling from private registries. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + master: + description: Master configuration for the master node. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the master pod. + type: object + autoquit: + description: Autoquit configuration for automatic test termination. + properties: + enabled: + default: true + description: Enabled enables the --autoquit flag. + type: boolean + timeout: + default: 60 + description: Timeout in seconds after test completion before + quitting. + format: int32 + minimum: 0 + type: integer + required: + - enabled + type: object + autostart: + default: true + description: Autostart enables the --autostart flag to start the + test automatically. + type: boolean + command: + description: |- + Command is the base command for the master node. + The operator appends: --master --master-port=5557 --expect-workers=N + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for the master pod. + type: object + resources: + description: Resources defines resource requests and limits for + the master pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + type: object + observability: + description: Observability configuration for metrics and tracing. + properties: + openTelemetry: + description: OpenTelemetry configuration for native Locust OTel + integration. + properties: + enabled: + default: false + description: |- + Enabled enables OpenTelemetry integration. + When true, adds --otel flag to Locust command. + type: boolean + endpoint: + description: |- + Endpoint is the OTel collector endpoint (e.g., "otel-collector:4317"). + Required when Enabled is true. + type: string + extraEnvVars: + additionalProperties: + type: string + description: ExtraEnvVars for additional OTel SDK configuration. + type: object + insecure: + default: false + description: Insecure skips TLS verification for the collector + connection. + type: boolean + protocol: + default: grpc + description: Protocol for OTel export. + enum: + - grpc + - http/protobuf + type: string + required: + - enabled + type: object + type: object + scheduling: + description: Scheduling configuration for pod placement. + properties: + affinity: + description: Affinity rules for pod scheduling. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector for pod scheduling. + type: object + tolerations: + description: Tolerations for pod scheduling. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + testFiles: + description: TestFiles configuration for locustfile and library mounting. + properties: + configMapRef: + description: ConfigMapRef is the name of the ConfigMap containing + locustfile(s). + type: string + libConfigMapRef: + description: LibConfigMapRef is the name of the ConfigMap containing + library files. + type: string + libMountPath: + default: /opt/locust/lib + description: LibMountPath is the mount path for library files. + type: string + srcMountPath: + default: /lotest/src + description: SrcMountPath is the mount path for test files. + type: string + type: object + volumeMounts: + description: VolumeMounts for the locust container with target selection. + items: + description: TargetedVolumeMount extends VolumeMount with target + pod selection. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + target: + default: both + description: Target specifies which pods receive this mount. + enum: + - master + - worker + - both + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes to add to pods. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + worker: + description: Worker configuration for worker nodes. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for worker pods. + type: object + command: + description: |- + Command is the base command for worker nodes. + The operator appends: --worker --master-host= --master-port=5557 + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for worker pods. + type: object + replicas: + description: Replicas is the number of worker pods to create. + format: int32 + maximum: 500 + minimum: 1 + type: integer + resources: + description: Resources defines resource requests and limits for + worker pods. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + - replicas + type: object + required: + - image + - master + - worker + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + properties: + completionTime: + description: CompletionTime is when the test completed. + format: date-time + type: string + conditions: + description: Conditions represent the latest available observations + of the test's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectedWorkers: + description: |- + ConnectedWorkers is the approximate number of connected workers, + derived from the worker Job's Active pod count (Job.Status.Active). + This is an approximation as Kubernetes Job.Status.Active may lag behind + actual Locust worker connections. + format: int32 + type: integer + expectedWorkers: + description: ExpectedWorkers is the number of workers expected to + connect. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration is the most recent generation observed + by the controller. + format: int64 + type: integer + phase: + description: Phase is the current lifecycle phase of the test. + enum: + - Pending + - Running + - Succeeded + - Failed + type: string + startTime: + description: StartTime is when the test started. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/locust-k8s-operator/templates/deployment.yaml b/charts/locust-k8s-operator/templates/deployment.yaml index 2a1c97e5..739debf5 100644 --- a/charts/locust-k8s-operator/templates/deployment.yaml +++ b/charts/locust-k8s-operator/templates/deployment.yaml @@ -1,8 +1,27 @@ +{{/* +============================================================================= +LOCUST K8S OPERATOR DEPLOYMENT +============================================================================= +This deployment runs the Go-based Locust K8s Operator controller. + +The operator watches for LocustTest custom resources and creates/manages: + - Master Job (single pod running Locust in master mode) + - Worker Jobs (multiple pods running Locust in worker mode) + - Services for master communication + - ConfigMaps for test files + +Key features: + - Leader election for HA deployments (multiple replicas) + - Health probes on port 8081 (/healthz, /readyz) + - Optional metrics endpoint for Prometheus scraping + - Optional webhook for CR validation and conversion +============================================================================= +*/}} apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "locust-k8s-operator.fullname" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ .Release.Namespace }} labels: {{- include "locust-k8s-operator.labels" . | nindent 4 }} spec: @@ -26,120 +45,103 @@ spec: - name: {{ . }} {{- end }} {{- end }} + # Termination grace period for graceful shutdown + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + # Pod security context - enforces non-root execution + # Ko uses distroless/static:nonroot which runs as UID 65532 + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: - - name: {{ .Chart.Name }} + - name: {{ include "locust-k8s-operator.name" . }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + # Command-line arguments configure operator behavior + # Note: ko-built images have ENTRYPOINT set automatically to /ko-app/ + args: + # Health probe endpoint (required, fixed port) + - --health-probe-bind-address=:8081 + {{- if .Values.leaderElection.enabled }} + # Leader election ensures only one active controller in HA setups + - --leader-elect=true + {{- end }} + {{- if .Values.metrics.enabled }} + # Prometheus metrics endpoint + - --metrics-bind-address=:{{ .Values.metrics.port }} + {{- if .Values.metrics.secure }} + - --metrics-secure=true + {{- else }} + - --metrics-secure=false + {{- end }} + {{- end }} + {{- if .Values.webhook.enabled }} + # Webhook TLS certificate path (managed by cert-manager or manually) + - --webhook-cert-path=/tmp/k8s-webhook-server/serving-certs + {{- end }} ports: - - name: micronaut-port - containerPort: {{ .Values.appPort }} + # Health probe port - used by Kubernetes for liveness/readiness + - name: health + containerPort: 8081 + protocol: TCP + {{- if .Values.metrics.enabled }} + # Prometheus metrics port + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.webhook.enabled }} + # Webhook server port for admission control + - name: webhook + containerPort: {{ .Values.webhook.port }} + protocol: TCP + {{- end }} + # Container security context - principle of least privilege + securityContext: + {{- toYaml .Values.containerSecurityContext | nindent 12 }} + # Liveness probe - restart container if unhealthy livenessProbe: -{{ toYaml .Values.livenessProbe | indent 12 }} + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # Readiness probe - only receive traffic when ready readinessProbe: -{{ toYaml .Values.readinessProbe | indent 12 }} + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # Resource limits for the operator pod itself (not Locust test pods) resources: -{{- toYaml .Values.resources | nindent 12 }} + {{- toYaml .Values.resources | nindent 12 }} + # Environment variables configure Locust test pod defaults + # See _helpers.tpl for the full list and backward compatibility mappings env: - - name: APP_SERVER_PORT - value: {{ .Values.appPort | quote}} - # K8s config - - name: K8S_NAMESPACE - value: {{ .Release.Namespace | quote }} - # Kafka config - - name: KAFKA_BOOTSTRAP_SERVERS - value: {{ .Values.config.loadGenerationPods.kafka.bootstrapServers | quote }} - - name: KAFKA_SECURITY_ENABLED - value: {{ .Values.config.loadGenerationPods.kafka.acl.enabled | quote }} - {{- if .Values.config.loadGenerationPods.kafka.acl.enabled }} - - name: KAFKA_SECURITY_PROTOCOL_CONFIG - value: {{ .Values.config.loadGenerationPods.kafka.acl.protocol | quote }} - - name: KAFKA_SASL_MECHANISM - value: {{ .Values.config.loadGenerationPods.kafka.sasl.mechanism | quote }} - - name: KAFKA_SASL_JAAS_CONFIG - value: {{ .Values.config.loadGenerationPods.kafka.sasl.jaas.config | quote }} - - name: KAFKA_USERNAME - valueFrom: - secretKeyRef: - name: {{ .Values.config.loadGenerationPods.kafka.locustK8sKafkaUser.userName }} - key: {{ .Values.config.loadGenerationPods.kafka.acl.secret.userKey }} - - name: KAFKA_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.config.loadGenerationPods.kafka.locustK8sKafkaUser.userName }} - key: {{ .Values.config.loadGenerationPods.kafka.acl.secret.passwordKey }} - {{- end }} - - # Load generation job config - - name: JOB_TTL_SECONDS_AFTER_FINISHED - value: {{ .Values.config.loadGenerationJobs.ttlSecondsAfterFinished | quote }} - - # Load generation resource config - - name: POD_CPU_REQUEST - value: {{ .Values.config.loadGenerationPods.resource.cpuRequest | quote }} - - name: POD_MEM_REQUEST - value: {{ .Values.config.loadGenerationPods.resource.memRequest | quote }} - - name: POD_EPHEMERAL_REQUEST - value: {{ .Values.config.loadGenerationPods.resource.ephemeralRequest | quote }} - - name: POD_CPU_LIMIT - value: {{ .Values.config.loadGenerationPods.resource.cpuLimit | quote }} - - name: POD_MEM_LIMIT - value: {{ .Values.config.loadGenerationPods.resource.memLimit | quote }} - - name: POD_EPHEMERAL_LIMIT - value: {{ .Values.config.loadGenerationPods.resource.ephemeralLimit | quote }} - - name: ENABLE_AFFINITY_CR_INJECTION - value: {{ .Values.config.loadGenerationPods.affinity.enableCrInjection | quote }} - - name: ENABLE_TAINT_TOLERATIONS_CR_INJECTION - value: {{ .Values.config.loadGenerationPods.taintTolerations.enableCrInjection | quote }} - - # Metrics Exporter config - - name: METRICS_EXPORTER_IMAGE - value: {{ .Values.config.loadGenerationPods.metricsExporter.image | quote }} - - name: METRICS_EXPORTER_PORT - value: {{ .Values.config.loadGenerationPods.metricsExporter.port | quote }} - - name: METRICS_EXPORTER_IMAGE_PULL_POLICY - value: "{{ .Values.config.loadGenerationPods.metricsExporter.pullPolicy | default .Values.image.pullPolicy }}" - - name: METRICS_EXPORTER_CPU_REQUEST - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.cpuRequest | quote }} - - name: METRICS_EXPORTER_MEM_REQUEST - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.memRequest | quote }} - - name: METRICS_EXPORTER_EPHEMERAL_REQUEST - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralRequest | quote }} - - name: METRICS_EXPORTER_CPU_LIMIT - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.cpuLimit | quote }} - - name: METRICS_EXPORTER_MEM_LIMIT - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.memLimit | quote }} - - name: METRICS_EXPORTER_EPHEMERAL_LIMIT - value: {{ .Values.config.loadGenerationPods.metricsExporter.resource.ephemeralLimit | quote }} - - # Metric config - - name: METRICS_ENABLE - value: {{ .Values.micronaut.metrics.enabled | quote }} - {{- if .Values.micronaut.metrics.enabled | quote }} - - name: METRICS_WEB_ENABLE - value: {{ .Values.micronaut.metrics.web.enabled | quote }} - - name: METRICS_JVM_ENABLE - value: {{ .Values.micronaut.metrics.jvm.enabled | quote }} - - name: METRICS_UPTIME_ENABLE - value: {{ .Values.micronaut.metrics.uptime.enabled | quote }} - - name: METRICS_PROCESSOR_ENABLE - value: {{ .Values.micronaut.metrics.processor.enabled | quote }} - - name: METRICS_FILES_ENABLE - value: {{ .Values.micronaut.metrics.files.enabled | quote }} - - name: METRICS_LOGBACK_ENABLE - value: {{ .Values.micronaut.metrics.logback.enabled | quote }} - - name: METRICS_EXECUTOR_ENABLE - value: {{ .Values.micronaut.metrics.executor.enabled | quote }} - - name: METRICS_PROMETHEUS_STEP - value: {{ .Values.micronaut.metrics.export.prometheus.step | quote }} - + {{- include "locust-k8s-operator.envVars" . | nindent 12 }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- if .Values.environmentVariables }} - {{- range $key, $value := .Values.environmentVariables }} - - name: {{ $key }} - value: {{ $value | quote }} - {{- end }} - {{- end }} - + {{- if .Values.webhook.enabled }} + # /tmp emptyDir allows controller-runtime webhook server to create temp files + # with readOnlyRootFilesystem: true. Webhook certs overlay at subdirectory. + volumeMounts: + - name: tmp + mountPath: /tmp + - name: webhook-certs + mountPath: /tmp/k8s-webhook-server/serving-certs + readOnly: true + {{- end }} + {{- if .Values.webhook.enabled }} + # Webhook certificate Secret (created by cert-manager or manually) + volumes: + - name: tmp + emptyDir: {} + - name: webhook-certs + secret: + secretName: {{ include "locust-k8s-operator.fullname" . }}-webhook-certs + optional: true + {{- end }} + # Node scheduling constraints {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/locust-k8s-operator/templates/metrics-service.yaml b/charts/locust-k8s-operator/templates/metrics-service.yaml new file mode 100644 index 00000000..09a6cd22 --- /dev/null +++ b/charts/locust-k8s-operator/templates/metrics-service.yaml @@ -0,0 +1,17 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +spec: + selector: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 4 }} + ports: + - name: metrics + port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP +{{- end }} diff --git a/charts/locust-k8s-operator/templates/otel-collector.yaml b/charts/locust-k8s-operator/templates/otel-collector.yaml new file mode 100644 index 00000000..686afebf --- /dev/null +++ b/charts/locust-k8s-operator/templates/otel-collector.yaml @@ -0,0 +1,162 @@ +{{/* +============================================================================= +OPENTELEMETRY COLLECTOR (Optional) +============================================================================= +This template deploys an OpenTelemetry Collector alongside the operator. + +The OTel Collector receives telemetry data (traces, metrics) from Locust +test pods and exports it to your observability backend (Jaeger, Prometheus, +Grafana, etc.). + +When to use this: + - You want centralized telemetry collection for Locust tests + - You're using the OpenTelemetry integration in LocustTest CRs + - You don't have an existing OTel Collector in your cluster + +When NOT to use this: + - You already have an OTel Collector (point Locust to that instead) + - You're using the Prometheus metrics exporter sidecar (legacy approach) + +Configuration: + The collector config is provided via otelCollector.config in values.yaml. + See https://opentelemetry.io/docs/collector/configuration/ for options. + +Example LocustTest CR configuration to use this collector: + spec: + observability: + openTelemetry: + enabled: true + endpoint: "-locust-k8s-operator-otel-collector:4317" + protocol: grpc +============================================================================= +*/}} + +{{- if .Values.otelCollector.enabled }} +--- +# ============================================================================= +# ConfigMap - OTel Collector configuration +# ============================================================================= +# Contains the collector pipeline configuration (receivers, processors, exporters). +# Customize this in values.yaml under otelCollector.config +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-otel-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +data: + otel-collector-config.yaml: | + {{- .Values.otelCollector.config | nindent 4 }} +--- +# ============================================================================= +# Deployment - OTel Collector pod +# ============================================================================= +# Runs the OpenTelemetry Collector container. +# Receives telemetry on OTLP ports (4317 gRPC, 4318 HTTP). +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-otel-collector + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} + app.kubernetes.io/component: otel-collector +spec: + replicas: {{ .Values.otelCollector.replicas | default 1 }} + selector: + matchLabels: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: otel-collector + template: + metadata: + annotations: + checksum/otel-config: {{ .Values.otelCollector.config | sha256sum }} + labels: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: otel-collector + spec: + # Security context - run as non-root + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: otel-collector + image: {{ .Values.otelCollector.image }} + args: + # Point to the config file mounted from ConfigMap + - --config=/conf/otel-collector-config.yaml + ports: + # OTLP gRPC receiver - preferred for high-throughput + - name: otlp-grpc + containerPort: 4317 + protocol: TCP + # OTLP HTTP receiver - alternative for HTTP-only environments + - name: otlp-http + containerPort: 4318 + protocol: TCP + # Health check endpoint + - name: health + containerPort: 13133 + protocol: TCP + # Container security - principle of least privilege + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + readOnlyRootFilesystem: true + # Liveness probe - restart if unhealthy + livenessProbe: + httpGet: + path: / + port: 13133 + initialDelaySeconds: 5 + periodSeconds: 10 + # Readiness probe - only receive traffic when ready + readinessProbe: + httpGet: + path: / + port: 13133 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + {{- toYaml .Values.otelCollector.resources | nindent 12 }} + volumeMounts: + - name: config + mountPath: /conf + readOnly: true + volumes: + - name: config + configMap: + name: {{ include "locust-k8s-operator.fullname" . }}-otel-config +--- +# ============================================================================= +# Service - Exposes OTel Collector to Locust pods +# ============================================================================= +# Locust test pods send telemetry to this Service. +# Use the Service DNS name as the OTEL_EXPORTER_OTLP_ENDPOINT in LocustTest CRs. +apiVersion: v1 +kind: Service +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-otel-collector + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} + app.kubernetes.io/component: otel-collector +spec: + ports: + # gRPC port for OTLP + - name: otlp-grpc + port: 4317 + targetPort: otlp-grpc + protocol: TCP + # HTTP port for OTLP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: otel-collector +{{- end }} diff --git a/charts/locust-k8s-operator/templates/pdb.yaml b/charts/locust-k8s-operator/templates/pdb.yaml new file mode 100644 index 00000000..69d92ca4 --- /dev/null +++ b/charts/locust-k8s-operator/templates/pdb.yaml @@ -0,0 +1,36 @@ +{{/* +============================================================================= +POD DISRUPTION BUDGET FOR LOCUST K8S OPERATOR +============================================================================= +Optional PDB for High Availability deployments. + +When enabled (with replicaCount >= 2 and leader election), the PDB ensures +at least one operator pod remains available during voluntary disruptions +(node drains, upgrades, etc.). + +This prevents webhook unavailability during operator upgrades when +failurePolicy: Fail is set on webhooks. + +Enable via: + podDisruptionBudget: + enabled: true + replicaCount: 2 # Must be >= 2 for PDB to be useful + leaderElection: + enabled: true # Required for multi-replica operation +============================================================================= +*/}} + +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "locust-k8s-operator.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable | default 1 }} + selector: + matchLabels: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/locust-k8s-operator/templates/serviceaccount-and-roles.yaml b/charts/locust-k8s-operator/templates/serviceaccount-and-roles.yaml index faa53ed9..04b1292b 100644 --- a/charts/locust-k8s-operator/templates/serviceaccount-and-roles.yaml +++ b/charts/locust-k8s-operator/templates/serviceaccount-and-roles.yaml @@ -1,19 +1,32 @@ -# Check if serviceAccount creation is enabled. -# + If false, the entire block will be skipped. -{{- if .Values.serviceAccount.create -}} +{{/* +============================================================================= +RBAC CONFIGURATION FOR LOCUST K8S OPERATOR +============================================================================= +This template creates the ServiceAccount and RBAC rules needed by the operator. + +The operator needs permissions to: + 1. Watch and update LocustTest CRs (but not create/delete - users create CRs) + 2. Update status subresource (report test status back to CR) + 3. Create/delete Jobs using immutable pattern (master and worker pods) + 4. Create/delete Services (master service for worker communication) + 5. Read ConfigMaps (test files, library files) + 6. Read Secrets (for env injection, Kafka credentials) + 7. Create Events (for status reporting) + 8. Manage Leases (for leader election in HA mode, conditional) -# Initialize variables for reuse. -# + $serviceAccountName; stores the name of the service account. -# + $namespace; stores the namespace where resources will be deployed. -# + $k8sServicesVerbs and $k8sJobsVerbs; define the permissions for Kubernetes services and jobs, respectively. +ClusterRole vs Role: + - ClusterRole (k8s.clusterRole.enabled=true): Operator can manage tests in ALL namespaces + - Role (k8s.clusterRole.enabled=false): Operator limited to its own namespace +============================================================================= +*/}} +{{- if .Values.serviceAccount.create -}} {{- $serviceAccountName := include "locust-k8s-operator.serviceAccountName" . }} -{{- $namespace := .Release.Namespace | quote }} -{{- $k8sServicesVerbs := list "get" "list" "create" "update" "delete" }} -{{- $k8sJobsVerbs := list "get" "list" "create" "update" "delete" "patch" }} +{{- $namespace := .Release.Namespace }} -# Define the ServiceAccount resource. -# This account is used to execute the tasks within the Kubernetes cluster. +# ============================================================================= +# ServiceAccount - Identity for the operator pod +# ============================================================================= apiVersion: v1 kind: ServiceAccount metadata: @@ -21,8 +34,10 @@ metadata: namespace: {{ $namespace }} labels: {{- include "locust-k8s-operator.labels" . | nindent 4 }} - -# Add image pull secrets if specified in values. + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} {{- if .Values.image.pullSecrets }} imagePullSecrets: {{- range .Values.image.pullSecrets }} @@ -31,12 +46,10 @@ imagePullSecrets: {{- end }} --- -# Define RBAC (Role-Based Access Control) resources. -# This section creates either a ClusterRole or a Role based on the value of k8s.clusterRole.enabled. - -# Conditionally create either a ClusterRole (for cluster-wide permissions) -# + or a Role (for namespace-specific permissions). -# + The default is to create a namespace-specific Role. +# ============================================================================= +# ClusterRole/Role - Permissions for the operator +# ============================================================================= +# Use ClusterRole for multi-namespace operation, Role for single namespace {{- if .Values.k8s.clusterRole.enabled }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -46,28 +59,74 @@ kind: Role {{- end }} metadata: name: {{ $serviceAccountName }} - - # Specify the namespace only if creating a Role (namespace-specific permissions). + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} {{- if not .Values.k8s.clusterRole.enabled }} namespace: {{ $namespace }} {{- end }} - rules: - - apiGroups: [ "locust.io" ] - resources: [ "locusttests" ] - verbs: [ "*" ] # Full access to locusttests resources. - - apiGroups: [ "*" ] - resources: [ "services" ] - verbs: {{ toYaml $k8sServicesVerbs | nindent 6 }} # Permissions for managing services. - - apiGroups: [ "*" ] - resources: [ "jobs" ] - verbs: {{ toYaml $k8sJobsVerbs | nindent 6 }} # Permissions for managing jobs. + # ----------------------------------------------------------------------- + # LocustTest Custom Resource permissions + # ----------------------------------------------------------------------- + # Main CR - operator watches these and reconciles state + - apiGroups: ["locust.io"] + resources: ["locusttests"] + verbs: ["get", "list", "watch", "update", "patch"] + # Status subresource - operator reports test status here + - apiGroups: ["locust.io"] + resources: ["locusttests/status"] + verbs: ["get", "update", "patch"] + # Finalizers subresource - operator adds/removes finalizer for deletion visibility + - apiGroups: ["locust.io"] + resources: ["locusttests/finalizers"] + verbs: ["update"] ---- + # ----------------------------------------------------------------------- + # Core Kubernetes resources + # ----------------------------------------------------------------------- + # ConfigMaps - read user-provided test files and library code + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + # Secrets - read credentials for env injection (never modified by operator) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Services - master service for worker communication (create/delete lifecycle) + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "delete"] + # Pods - monitor pod health for status reporting + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Events - report status changes and errors + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + + # ----------------------------------------------------------------------- + # Batch resources + # ----------------------------------------------------------------------- + # Jobs - immutable create/delete pattern (master and worker pods) + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "delete"] -# Create a corresponding ClusterRoleBinding or RoleBinding -# + to bind the ServiceAccount to the appropriate ClusterRole or Role. + # ----------------------------------------------------------------------- + # Coordination resources + # ----------------------------------------------------------------------- + {{- if .Values.leaderElection.enabled }} + # Leases - leader election coordination (only when HA enabled) + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + {{- end }} +--- +# ============================================================================= +# ClusterRoleBinding/RoleBinding - Bind permissions to ServiceAccount +# ============================================================================= {{- if .Values.k8s.clusterRole.enabled }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -77,6 +136,8 @@ kind: RoleBinding {{- end }} metadata: name: {{ $serviceAccountName }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: {{ if .Values.k8s.clusterRole.enabled }}ClusterRole{{ else }}Role{{ end }} diff --git a/charts/locust-k8s-operator/templates/webhook.yaml b/charts/locust-k8s-operator/templates/webhook.yaml new file mode 100644 index 00000000..19fba27d --- /dev/null +++ b/charts/locust-k8s-operator/templates/webhook.yaml @@ -0,0 +1,84 @@ +{{/* +============================================================================= +WEBHOOK CONFIGURATION FOR LOCUST K8S OPERATOR +============================================================================= +This template creates admission webhooks for LocustTest CR validation. + +Webhooks provide: + 1. ValidatingWebhook - Validates LocustTest CRs before they're persisted + - Ensures required fields are present + - Validates resource configurations + - Prevents invalid test configurations + + 2. CRD conversion is handled by spec.conversion.webhook on the CRD itself + - NOT via MutatingWebhookConfiguration (this was incorrect) + - Controller-runtime serves the /convert endpoint automatically + - The webhook Service routes traffic for both validation and conversion + +Prerequisites: + - cert-manager installed (if webhook.certManager.enabled=true) + - Or manually created TLS certificates in the webhook-certs Secret + +The webhook Service routes traffic from the API server to the operator pod. +============================================================================= +*/}} + +{{- if .Values.webhook.enabled }} +--- +# ============================================================================= +# ValidatingWebhookConfiguration - Validates LocustTest CRs +# ============================================================================= +# Called by the API server before persisting CREATE/UPDATE operations. +# Rejects invalid configurations with helpful error messages. +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-validating + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} + {{- if .Values.webhook.certManager.enabled }} + # cert-manager will inject the CA bundle from the Certificate resource + annotations: + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "locust-k8s-operator.fullname" . }}-serving-cert + {{- end }} +webhooks: + - name: vlocusttest.kb.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "locust-k8s-operator.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /validate-locust-io-v2-locusttest + rules: + - apiGroups: ["locust.io"] + apiVersions: ["v1", "v2"] + operations: ["CREATE", "UPDATE"] + resources: ["locusttests"] + # No side effects - webhook only validates, doesn't modify external state + sideEffects: None + # Fail closed - reject requests if webhook is unavailable + failurePolicy: Fail +--- +# ============================================================================= +# Webhook Service - Routes API server requests to operator +# ============================================================================= +# The API server connects to this Service when calling webhooks. +# Traffic is routed to the operator pod's webhook port (default: 9443). +# This Service serves both validation webhook and conversion webhook. +# The /convert endpoint for CRD conversion is registered by controller-runtime. +apiVersion: v1 +kind: Service +metadata: + name: {{ include "locust-k8s-operator.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + labels: + {{- include "locust-k8s-operator.labels" . | nindent 4 }} +spec: + ports: + # API server always connects on 443, we forward to the webhook port + - port: 443 + targetPort: webhook + protocol: TCP + selector: + {{- include "locust-k8s-operator.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/locust-k8s-operator/values.schema.json b/charts/locust-k8s-operator/values.schema.json new file mode 100644 index 00000000..0de5cc6d --- /dev/null +++ b/charts/locust-k8s-operator/values.schema.json @@ -0,0 +1,297 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/AbdelrhmanHamouda/locust-k8s-operator/charts/locust-k8s-operator/values.schema.json", + "title": "Locust K8s Operator Helm Values", + "description": "Schema for validating Helm values for the Locust K8s Operator chart", + "type": "object", + "properties": { + "image": { + "type": "object", + "required": ["repository"], + "properties": { + "repository": { + "type": "string", + "description": "Docker image repository" + }, + "tag": { + "type": "string", + "description": "Image tag (defaults to appVersion if empty)" + }, + "pullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "description": "Image pull policy" + }, + "pullSecrets": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Image pull secrets" + } + } + }, + "replicaCount": { + "type": "integer", + "minimum": 1, + "description": "Number of operator replicas" + }, + "nameOverride": { + "type": "string", + "description": "Override the chart name" + }, + "fullnameOverride": { + "type": "string", + "description": "Override the full release name" + }, + "extraEnv": { + "type": "array", + "items": { + "type": "object" + }, + "description": "Additional environment variables for operator" + }, + "terminationGracePeriodSeconds": { + "type": "integer", + "minimum": 0, + "description": "Termination grace period in seconds" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string", + "pattern": "^[0-9]+(m|[.][0-9]+)?$" + }, + "memory": { + "type": "string", + "pattern": "^[0-9]+(Mi|Gi|Ki|M|G|K)?$" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string", + "pattern": "^[0-9]+(m|[.][0-9]+)?$" + }, + "memory": { + "type": "string", + "pattern": "^[0-9]+(Mi|Gi|Ki|M|G|K)?$" + } + } + } + }, + "description": "Resource requests and limits for operator pod" + }, + "leaderElection": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable leader election for HA" + } + } + }, + "podDisruptionBudget": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable PodDisruptionBudget" + }, + "maxUnavailable": { + "type": "integer", + "minimum": 1 + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable Prometheus metrics endpoint" + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "secure": { + "type": "boolean", + "description": "Use HTTPS for metrics endpoint" + } + } + }, + "webhook": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable admission webhook" + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "certManager": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Use cert-manager for TLS certificates" + } + } + } + } + }, + "locustPods": { + "type": "object", + "properties": { + "resources": { + "type": "object", + "description": "Default resources for Locust test pods" + }, + "masterResources": { + "type": "object", + "description": "Role-specific resources for master pods" + }, + "workerResources": { + "type": "object", + "description": "Role-specific resources for worker pods" + }, + "affinityInjection": { + "type": "boolean", + "description": "Enable affinity injection from CR" + }, + "tolerationsInjection": { + "type": "boolean", + "description": "Enable tolerations injection from CR" + }, + "ttlSecondsAfterFinished": { + "oneOf": [ + {"type": "string"}, + {"type": "integer"} + ], + "description": "TTL for completed Jobs" + }, + "metricsExporter": { + "type": "object", + "properties": { + "image": { + "type": "string" + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "pullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"] + } + } + } + } + }, + "otelCollector": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Deploy OTel Collector" + }, + "image": { + "type": "string", + "description": "OTel Collector image" + }, + "replicas": { + "type": "integer", + "minimum": 1 + }, + "resources": { + "type": "object", + "description": "Resources for OTel Collector" + }, + "config": { + "type": "string", + "description": "OTel Collector YAML configuration" + } + } + }, + "kafka": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable Kafka configuration" + }, + "bootstrapServers": { + "type": "string", + "description": "Kafka bootstrap servers" + }, + "security": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "protocol": { + "type": "string" + }, + "saslMechanism": { + "type": "string" + } + } + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean", + "description": "Create service account" + }, + "name": { + "type": "string", + "description": "Service account name" + }, + "annotations": { + "type": "object", + "description": "Service account annotations" + } + } + }, + "podSecurityContext": { + "type": "object", + "description": "Pod-level security context" + }, + "containerSecurityContext": { + "type": "object", + "description": "Container-level security context" + }, + "podAnnotations": { + "type": "object", + "description": "Annotations for operator pods" + }, + "nodeSelector": { + "type": "object", + "description": "Node selector for operator pods" + }, + "tolerations": { + "type": "array", + "description": "Tolerations for operator pods" + }, + "affinity": { + "type": "object", + "description": "Affinity rules for operator pods" + } + } +} diff --git a/charts/locust-k8s-operator/values.yaml b/charts/locust-k8s-operator/values.yaml index 68860599..15a4b592 100644 --- a/charts/locust-k8s-operator/values.yaml +++ b/charts/locust-k8s-operator/values.yaml @@ -1,143 +1,241 @@ -# Default values for locust-k8s-operator. - -# General -appPort: 8080 - -# Deployment -replicaCount: 1 +# ============================================================================= +# Locust K8s Operator - Go v2.0.0 +# ============================================================================= +# -- Operator image configuration image: repository: lotest/locust-k8s-operator + tag: "" # Defaults to appVersion pullPolicy: IfNotPresent - # List of names of secrets within the namespace to use imagePullSecrets. Applies to deployments and serviceAccounts pullSecrets: [] - # Overrides the image tag whose default is the chart appVersion. - tag: "" - -# Liveness/Readiness probes -livenessProbe: - httpGet: - scheme: HTTP - path: /health - port: micronaut-port - initialDelaySeconds: 10 - periodSeconds: 20 - timeoutSeconds: 10 - failureThreshold: 1 -readinessProbe: - httpGet: - scheme: HTTP - path: /health - port: micronaut-port - initialDelaySeconds: 30 - periodSeconds: 20 - timeoutSeconds: 10 - failureThreshold: 1 - -# K8s config -k8s: - customResourceDefinition: - deploy: true - clusterRole: - enabled: true +# -- Operator pod resources (Go operator with controller-runtime) +resources: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 10m + memory: 64Mi + +# -- Replica count (HA default (leader election enabled); set to 1 for single-replica) +replicaCount: 2 + +# -- Override the name of the chart +nameOverride: "" + +# -- Override the full name of the release +fullnameOverride: "locust-operator" + +# -- Additional environment variables for the operator container +extraEnv: [] +# - name: LOG_LEVEL +# value: debug + +# -- Termination grace period for operator pods (seconds) +terminationGracePeriodSeconds: 10 + +# -- Leader election for HA deployments +leaderElection: + enabled: true + +# -- PodDisruptionBudget for HA deployments (requires replicaCount >= 2) +podDisruptionBudget: + enabled: true + maxUnavailable: 1 + +# -- Metrics endpoint (Prometheus) +metrics: + enabled: false # Disabled by default, enable if scraping + port: 8080 + secure: false + +# -- Webhook configuration (for validation/conversion) +webhook: + enabled: false # Requires cert-manager + port: 9443 + certManager: + enabled: true # Use cert-manager for TLS + +# ============================================================================= +# Locust Test Pod Configuration (what the operator creates) +# ============================================================================= + +locustPods: + # -- Default resources for Locust containers + resources: + requests: + cpu: 250m + memory: 128Mi + ephemeralStorage: 30M + limits: + cpu: 1000m + memory: 1024Mi + ephemeralStorage: 50M + + # -- Role-specific resources for master Locust containers (overrides unified resources above) + # Leave empty {} to use unified resources for master + masterResources: {} + # requests: + # cpu: 500m + # memory: 256Mi + # ephemeralStorage: 30M + # limits: + # cpu: 2000m + # memory: 2048Mi + # ephemeralStorage: 50M + + # -- Role-specific resources for worker Locust containers (overrides unified resources above) + # Leave empty {} to use unified resources for workers + workerResources: {} + # requests: + # cpu: 250m + # memory: 128Mi + # ephemeralStorage: 30M + # limits: + # cpu: 1000m + # memory: 1024Mi + # ephemeralStorage: 50M + + # -- Inject affinity/tolerations from CR spec + affinityInjection: true + tolerationsInjection: true + + # -- Job TTL after completion (empty = Kubernetes default) + ttlSecondsAfterFinished: "" + + # -- Metrics exporter sidecar (for v1 API / non-OTel mode) + metricsExporter: + image: containersol/locust_exporter:v0.5.0 + port: 9646 + pullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 64Mi + ephemeralStorage: 30M + limits: + cpu: 250m + memory: 128Mi + ephemeralStorage: 50M + +# ============================================================================= +# Optional: OTel Collector (for v2 API OTel mode) +# ============================================================================= + +otelCollector: + enabled: false + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/releases + image: otel/opentelemetry-collector-contrib:0.145.0 + replicas: 1 + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 200m + memory: 256Mi + # -- OTel Collector configuration (YAML) + config: | + extensions: + health_check: + endpoint: 0.0.0.0:13133 + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + processors: + batch: {} + exporters: + debug: + verbosity: detailed + service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [debug] + +# ============================================================================= +# Standard Kubernetes options +# ============================================================================= serviceAccount: - # Specifies whether a service account should be created create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template name: "" + annotations: {} podAnnotations: {} -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi +# -- Pod security context (runs as non-root) +podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + seccompProfile: + type: RuntimeDefault + +# -- Container security context (principle of least privilege) +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + readOnlyRootFilesystem: true nodeSelector: {} - tolerations: [] - affinity: {} -config: - loadGenerationJobs: - # Either leave empty or use an empty string to avoid setting this option - ttlSecondsAfterFinished: "" - - loadGenerationPods: - resource: - cpuRequest: 250m - memRequest: 128Mi - ephemeralRequest: 30M - cpuLimit: 1000m - memLimit: 1024Mi - ephemeralLimit: 50M - affinity: - enableCrInjection: true - taintTolerations: - enableCrInjection: true - metricsExporter: - # Default Metrics Exporter config used by the operator - image: "containersol/locust_exporter:v0.5.0" - port: 9646 - pullPolicy: IfNotPresent - resource: - cpuRequest: 250m - memRequest: 128Mi - ephemeralRequest: 30M - cpuLimit: 250m - memLimit: 128Mi - ephemeralLimit: 50M - kafka: - bootstrapServers: localhost:9092 - locustK8sKafkaUser: - userName: locust-k8s-kafka-user-msk-credentials - deployUser: true - cluster: - name: cluster-name - patternType: literal - - acl: - enabled: false - protocol: SASL_PLAINTEXT - secret: - userKey: username - passwordKey: password - - sasl: - mechanism: SCRAM-SHA-512 - jaas: - config: org.apache.kafka.common.security.scram.ScramLoginModule required username="${KAFKA_USERNAME}" password="${KAFKA_PASSWORD}"; - -micronaut: - metrics: +# ============================================================================= +# Backward Compatibility (DEPRECATED - use new paths above) +# ============================================================================= +# These values are mapped to new paths via _helpers.tpl for existing users. +# They will be removed in a future release. + +# config: +# loadGenerationJobs: +# ttlSecondsAfterFinished: "" +# loadGenerationPods: +# resource: +# cpuRequest: 250m +# memRequest: 128Mi +# cpuLimit: 1000m +# memLimit: 1024Mi +# affinity: +# enableCrInjection: true +# taintTolerations: +# enableCrInjection: true +# metricsExporter: +# image: "containersol/locust_exporter:v0.5.0" +# port: 9646 +# kafka: +# bootstrapServers: localhost:9092 +# acl: +# enabled: false + +# -- Kafka configuration (DEPRECATED - kept for backward compatibility) +kafka: + enabled: false + bootstrapServers: localhost:9092 + security: + enabled: false + protocol: SASL_PLAINTEXT + saslMechanism: SCRAM-SHA-512 + jaasConfig: "" + credentials: + secretName: "" + usernameKey: username + passwordKey: password + +# ============================================================================= +# K8s RBAC Configuration +# ============================================================================= + +k8s: + clusterRole: enabled: true - web: - enabled: true - jvm: - enabled: true - uptime: - enabled: true - processor: - enabled: true - files: - enabled: false - logback: - enabled: false - executor: - enabled: false - export: - prometheus: - step: 'PT30S' diff --git a/cmd/logging.go b/cmd/logging.go new file mode 100644 index 00000000..1dc11c4c --- /dev/null +++ b/cmd/logging.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "time" + + "go.uber.org/zap/zapcore" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// colorize wraps s with ANSI escape codes for the given color code. +func colorize(code int, s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", code, s) +} + +// greenTimeEncoder encodes a timestamp in ISO8601 format with green ANSI color, +// matching the Java operator's %green(%d{ISO8601}) Logback pattern. +func greenTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(colorize(32, t.Format("2006-01-02T15:04:05.000Z0700"))) +} + +// yellowNameEncoder encodes the logger name in yellow ANSI color, +// matching the Java operator's %yellow(%C{1}) Logback pattern. +func yellowNameEncoder(name string, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(colorize(33, name)) +} + +// coloredConsoleEncoder returns a zap.Opts that configures the console encoder +// with colored output matching the Java operator's Logback configuration. +func coloredConsoleEncoder() zap.Opts { + return zap.ConsoleEncoder(func(c *zapcore.EncoderConfig) { + c.EncodeLevel = zapcore.CapitalColorLevelEncoder + c.EncodeTime = greenTimeEncoder + c.EncodeName = yellowNameEncoder + }) +} diff --git a/cmd/logging_test.go b/cmd/logging_test.go new file mode 100644 index 00000000..aca85020 --- /dev/null +++ b/cmd/logging_test.go @@ -0,0 +1,89 @@ +package main + +import ( + "testing" + "time" + + "go.uber.org/zap/zapcore" +) + +// testArrayEncoder is a minimal PrimitiveArrayEncoder that captures appended strings. +type testArrayEncoder struct { + strings []string +} + +func (e *testArrayEncoder) AppendString(s string) { e.strings = append(e.strings, s) } +func (e *testArrayEncoder) AppendBool(bool) {} +func (e *testArrayEncoder) AppendByteString([]byte) {} +func (e *testArrayEncoder) AppendComplex128(complex128) {} +func (e *testArrayEncoder) AppendComplex64(complex64) {} +func (e *testArrayEncoder) AppendFloat64(float64) {} +func (e *testArrayEncoder) AppendFloat32(float32) {} +func (e *testArrayEncoder) AppendInt(int) {} +func (e *testArrayEncoder) AppendInt64(int64) {} +func (e *testArrayEncoder) AppendInt32(int32) {} +func (e *testArrayEncoder) AppendInt16(int16) {} +func (e *testArrayEncoder) AppendInt8(int8) {} +func (e *testArrayEncoder) AppendUint(uint) {} +func (e *testArrayEncoder) AppendUint64(uint64) {} +func (e *testArrayEncoder) AppendUint32(uint32) {} +func (e *testArrayEncoder) AppendUint16(uint16) {} +func (e *testArrayEncoder) AppendUint8(uint8) {} +func (e *testArrayEncoder) AppendUintptr(uintptr) {} +func (e *testArrayEncoder) AppendDuration(time.Duration) {} +func (e *testArrayEncoder) AppendTime(time.Time) {} +func (e *testArrayEncoder) AppendArray(zapcore.ArrayMarshaler) error { return nil } +func (e *testArrayEncoder) AppendObject(zapcore.ObjectMarshaler) error { return nil } +func (e *testArrayEncoder) AppendReflected(interface{}) error { return nil } + +func TestColorize(t *testing.T) { + tests := []struct { + name string + code int + text string + want string + }{ + {"green", 32, "hello", "\x1b[32mhello\x1b[0m"}, + {"yellow", 33, "world", "\x1b[33mworld\x1b[0m"}, + {"empty", 32, "", "\x1b[32m\x1b[0m"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := colorize(tt.code, tt.text) + if got != tt.want { + t.Errorf("colorize(%d, %q) = %q, want %q", tt.code, tt.text, got, tt.want) + } + }) + } +} + +func TestGreenTimeEncoder(t *testing.T) { + enc := &testArrayEncoder{} + ts := time.Date(2026, 2, 10, 14, 30, 0, 0, time.UTC) + + greenTimeEncoder(ts, enc) + + if len(enc.strings) != 1 { + t.Fatalf("expected 1 appended string, got %d", len(enc.strings)) + } + + want := "\x1b[32m2026-02-10T14:30:00.000Z\x1b[0m" + if enc.strings[0] != want { + t.Errorf("greenTimeEncoder output = %q, want %q", enc.strings[0], want) + } +} + +func TestYellowNameEncoder(t *testing.T) { + enc := &testArrayEncoder{} + + yellowNameEncoder("setup", enc) + + if len(enc.strings) != 1 { + t.Fatalf("expected 1 appended string, got %d", len(enc.strings)) + } + + want := "\x1b[33msetup\x1b[0m" + if enc.strings[0] != want { + t.Errorf("yellowNameEncoder output = %q, want %q", enc.strings[0], want) + } +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 00000000..2be0e0fe --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,347 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "fmt" + "os" + "path/filepath" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + locustv1 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v1" + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/controller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(locustv1.AddToScheme(scheme)) + utilruntime.Must(locustv2.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + // Parse command-line flags and setup logging + flags := parseFlags() + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&flags.zapOpts), coloredConsoleEncoder())) + + // Create TLS options + tlsOpts := configureTLS(flags.enableHTTP2) + + // Setup certificate watchers and servers + webhookServer, webhookCertWatcher := setupWebhookServer(flags, tlsOpts) + metricsServerOptions, metricsCertWatcher := setupMetricsServer(flags, tlsOpts) + + // Create manager + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: flags.probeAddr, + LeaderElection: flags.enableLeaderElection, + LeaderElectionID: "locust-k8s-operator.locust.io", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // Setup controllers and webhooks + if err := setupControllers(mgr); err != nil { + setupLog.Error(err, "failed to setup controllers") + os.Exit(1) + } + + // Add certificate watchers to manager + if metricsCertWatcher != nil { + setupLog.Info("Adding metrics certificate watcher to manager") + if err := mgr.Add(metricsCertWatcher); err != nil { + setupLog.Error(err, "unable to add metrics certificate watcher to manager") + os.Exit(1) + } + } + + if webhookCertWatcher != nil { + setupLog.Info("Adding webhook certificate watcher to manager") + if err := mgr.Add(webhookCertWatcher); err != nil { + setupLog.Error(err, "unable to add webhook certificate watcher to manager") + os.Exit(1) + } + } + + // Setup health checks + if err := setupHealthChecks(mgr); err != nil { + setupLog.Error(err, "failed to setup health checks") + os.Exit(1) + } + + // Start manager + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +// flagConfig holds all command-line flag values +type flagConfig struct { + metricsAddr string + metricsCertPath string + metricsCertName string + metricsCertKey string + webhookCertPath string + webhookCertName string + webhookCertKey string + probeAddr string + enableLeaderElection bool + secureMetrics bool + enableHTTP2 bool + zapOpts zap.Options +} + +// parseFlags parses command-line flags and returns configuration +func parseFlags() *flagConfig { + cfg := &flagConfig{} + + flag.StringVar(&cfg.metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&cfg.probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&cfg.enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&cfg.secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.StringVar(&cfg.webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + flag.StringVar(&cfg.webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + flag.StringVar(&cfg.webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + flag.StringVar(&cfg.metricsCertPath, "metrics-cert-path", "", + "The directory that contains the metrics server certificate.") + flag.StringVar(&cfg.metricsCertName, "metrics-cert-name", "tls.crt", + "The name of the metrics server certificate file.") + flag.StringVar(&cfg.metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + flag.BoolVar(&cfg.enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + cfg.zapOpts = zap.Options{ + Development: false, + } + cfg.zapOpts.BindFlags(flag.CommandLine) + flag.Parse() + + return cfg +} + +// configureTLS creates TLS options based on HTTP/2 setting +func configureTLS(enableHTTP2 bool) []func(*tls.Config) { + var tlsOpts []func(*tls.Config) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + if !enableHTTP2 { + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + tlsOpts = append(tlsOpts, disableHTTP2) + } + + return tlsOpts +} + +// setupWebhookServer creates webhook server with optional certificate watcher +func setupWebhookServer(flags *flagConfig, tlsOpts []func(*tls.Config)) (webhook.Server, *certwatcher.CertWatcher) { + webhookTLSOpts := tlsOpts + var webhookCertWatcher *certwatcher.CertWatcher + + if len(flags.webhookCertPath) > 0 { + certFile := filepath.Join(flags.webhookCertPath, flags.webhookCertName) + keyFile := filepath.Join(flags.webhookCertPath, flags.webhookCertKey) + + setupLog.Info("Waiting for webhook certificate files", + "cert", certFile, "key", keyFile) + + // Poll until both cert files exist (cert-manager may still be issuing) + for { + _, certErr := os.Stat(certFile) + _, keyErr := os.Stat(keyFile) + if certErr == nil && keyErr == nil { + break + } + setupLog.Info("Webhook certificate files not ready, retrying in 1s...") + time.Sleep(time.Second) + } + + var err error + webhookCertWatcher, err = certwatcher.New(certFile, keyFile) + if err != nil { + setupLog.Error(err, "Failed to initialize webhook certificate watcher") + os.Exit(1) + } + + webhookTLSOpts = append(webhookTLSOpts, func(config *tls.Config) { + config.GetCertificate = webhookCertWatcher.GetCertificate + }) + } + + return webhook.NewServer(webhook.Options{ + TLSOpts: webhookTLSOpts, + }), webhookCertWatcher +} + +// setupMetricsServer creates metrics server options with optional certificate watcher +func setupMetricsServer(flags *flagConfig, tlsOpts []func(*tls.Config)) (metricsserver.Options, *certwatcher.CertWatcher) { //nolint:lll + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: flags.metricsAddr, + SecureServing: flags.secureMetrics, + TLSOpts: tlsOpts, + } + + if flags.secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + var metricsCertWatcher *certwatcher.CertWatcher + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // TODO(user): If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(flags.metricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", + "metrics-cert-path", flags.metricsCertPath, + "metrics-cert-name", flags.metricsCertName, + "metrics-cert-key", flags.metricsCertKey) + + var err error + metricsCertWatcher, err = certwatcher.New( + filepath.Join(flags.metricsCertPath, flags.metricsCertName), + filepath.Join(flags.metricsCertPath, flags.metricsCertKey), + ) + if err != nil { + setupLog.Error(err, "Failed to initialize metrics certificate watcher") + os.Exit(1) + } + + metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { + config.GetCertificate = metricsCertWatcher.GetCertificate + }) + } + + return metricsServerOptions, metricsCertWatcher +} + +// setupControllers registers controllers and webhooks with the manager +func setupControllers(mgr ctrl.Manager) error { + // Load operator configuration + cfg, err := config.LoadConfig() + if err != nil { + return fmt.Errorf("failed to load operator configuration: %w", err) + } + setupLog.Info("Operator configuration loaded", + "ttlSecondsAfterFinished", cfg.TTLSecondsAfterFinished, + "metricsExporterImage", cfg.MetricsExporterImage, + "affinityInjection", cfg.EnableAffinityCRInjection, + "tolerationsInjection", cfg.EnableTolerationsCRInjection) + + // Setup LocustTest reconciler + if err := (&controller.LocustTestReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Config: cfg, + Recorder: mgr.GetEventRecorderFor("locusttest-controller"), + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to create controller LocustTest: %w", err) + } + + // Setup webhooks (conversion and validation) + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + // v1 conversion webhook + if err := (&locustv1.LocustTest{}).SetupWebhookWithManager(mgr); err != nil { + return fmt.Errorf("unable to create webhook LocustTest v1: %w", err) + } + // v2 validation webhook + if err := (&locustv2.LocustTest{}).SetupWebhookWithManager(mgr); err != nil { + return fmt.Errorf("unable to create webhook LocustTest v2: %w", err) + } + } + // +kubebuilder:scaffold:builder + + return nil +} + +// setupHealthChecks adds health and readiness checks to the manager +func setupHealthChecks(mgr ctrl.Manager) error { + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up health check: %w", err) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + return fmt.Errorf("unable to set up ready check: %w", err) + } + return nil +} diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 00000000..2e2fcba9 --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,33 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: locust-k8s-operator + app.kubernetes.io/part-of: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: serving-cert + namespace: system +spec: + dnsNames: + - webhook-service.system.svc + - webhook-service.system.svc.cluster.local + issuerRef: + kind: Issuer + name: locust-k8s-operator-selfsigned-issuer + secretName: webhook-server-cert diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..2cebb8f6 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- certificate.yaml diff --git a/config/crd/bases/locust.io_locusttests.yaml b/config/crd/bases/locust.io_locusttests.yaml new file mode 100644 index 00000000..c2ccc837 --- /dev/null +++ b/config/crd/bases/locust.io_locusttests.yaml @@ -0,0 +1,3643 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: locusttests.locust.io +spec: + group: locust.io + names: + kind: LocustTest + listKind: LocustTestList + plural: locusttests + shortNames: + - lotest + singular: locusttest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Master pod command seed + jsonPath: .spec.masterCommandSeed + name: master_cmd + type: string + - description: Number of requested worker pods + jsonPath: .spec.workerReplicas + name: worker_replica_count + type: integer + - description: Locust image + jsonPath: .spec.image + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: locust.io/v1 LocustTest is deprecated, migrate to locust.io/v2 + name: v1 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API (v1 - DEPRECATED). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + affinity: + description: Affinity defines affinity rules for pod scheduling. + properties: + nodeAffinity: + description: NodeAffinity defines node affinity rules. + properties: + requiredDuringSchedulingIgnoredDuringExecution: + additionalProperties: + type: string + description: |- + RequiredDuringSchedulingIgnoredDuringExecution defines required node affinity rules. + The map keys are label keys and values are label values that nodes must have. + type: object + type: object + type: object + annotations: + description: Annotations defines annotations to attach to deployed + pods. + properties: + master: + additionalProperties: + type: string + description: Master defines annotations attached to the master + pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines annotations attached to worker pods. + type: object + type: object + configMap: + description: ConfigMap is the name of the ConfigMap containing the + test file(s). + type: string + image: + description: Image is the Locust container image to use. + type: string + imagePullPolicy: + description: ImagePullPolicy defines when to pull the image. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets is a list of secret names for pulling + images from private registries. + items: + type: string + type: array + labels: + description: Labels defines labels to attach to deployed pods. + properties: + master: + additionalProperties: + type: string + description: Master defines labels attached to the master pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines labels attached to worker pods. + type: object + type: object + libConfigMap: + description: LibConfigMap is the name of the ConfigMap containing + lib directory files. + type: string + masterCommandSeed: + description: |- + MasterCommandSeed is the command seed for the master pod. + This forms the base of the locust master command. + type: string + tolerations: + description: Tolerations defines tolerations for pod scheduling. + items: + description: LocustTestToleration defines a toleration for pod scheduling. + properties: + effect: + description: Effect indicates the taint effect to match. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: Key is the taint key that the toleration applies + to. + type: string + operator: + description: Operator represents the relationship between the + key and value. + enum: + - Exists + - Equal + type: string + value: + description: Value is the taint value the toleration matches + to. + type: string + required: + - effect + - key + - operator + type: object + type: array + workerCommandSeed: + description: |- + WorkerCommandSeed is the command seed for worker pods. + This forms the base of the locust worker command. + type: string + workerReplicas: + default: 1 + description: WorkerReplicas is the number of worker pods to spawn. + format: int32 + maximum: 500 + minimum: 1 + type: integer + required: + - image + - masterCommandSeed + - workerCommandSeed + - workerReplicas + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current test phase + jsonPath: .status.phase + name: Phase + type: string + - description: Requested worker count + jsonPath: .spec.worker.replicas + name: Workers + type: integer + - description: Connected workers + jsonPath: .status.connectedWorkers + name: Connected + type: integer + - jsonPath: .spec.image + name: Image + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + env: + description: Env configuration for environment variable injection. + properties: + configMapRefs: + description: ConfigMapRefs injects all keys from ConfigMaps as + environment variables. + items: + description: ConfigMapEnvSource defines a ConfigMap environment + source. + properties: + name: + description: Name of the ConfigMap. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + secretMounts: + description: SecretMounts mounts secrets as files in the container. + items: + description: SecretMount defines a secret file mount. + properties: + mountPath: + description: MountPath is the path where the secret should + be mounted. + type: string + name: + description: Name of the secret to mount. + type: string + readOnly: + default: true + description: ReadOnly mounts the secret as read-only. + type: boolean + required: + - mountPath + - name + type: object + type: array + secretRefs: + description: SecretRefs injects all keys from Secrets as environment + variables. + items: + description: SecretEnvSource defines a Secret environment source. + properties: + name: + description: Name of the Secret. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + variables: + description: Variables defines specific environment variables. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + type: object + image: + description: Image is the container image for Locust pods. + type: string + imagePullPolicy: + default: IfNotPresent + description: ImagePullPolicy for the Locust container. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets for pulling from private registries. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + master: + description: Master configuration for the master node. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the master pod. + type: object + autoquit: + description: Autoquit configuration for automatic test termination. + properties: + enabled: + default: true + description: Enabled enables the --autoquit flag. + type: boolean + timeout: + default: 60 + description: Timeout in seconds after test completion before + quitting. + format: int32 + minimum: 0 + type: integer + required: + - enabled + type: object + autostart: + default: true + description: Autostart enables the --autostart flag to start the + test automatically. + type: boolean + command: + description: |- + Command is the base command for the master node. + The operator appends: --master --master-port=5557 --expect-workers=N + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for the master pod. + type: object + resources: + description: Resources defines resource requests and limits for + the master pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + type: object + observability: + description: Observability configuration for metrics and tracing. + properties: + openTelemetry: + description: OpenTelemetry configuration for native Locust OTel + integration. + properties: + enabled: + default: false + description: |- + Enabled enables OpenTelemetry integration. + When true, adds --otel flag to Locust command. + type: boolean + endpoint: + description: |- + Endpoint is the OTel collector endpoint (e.g., "otel-collector:4317"). + Required when Enabled is true. + type: string + extraEnvVars: + additionalProperties: + type: string + description: ExtraEnvVars for additional OTel SDK configuration. + type: object + insecure: + default: false + description: Insecure skips TLS verification for the collector + connection. + type: boolean + protocol: + default: grpc + description: Protocol for OTel export. + enum: + - grpc + - http/protobuf + type: string + required: + - enabled + type: object + type: object + scheduling: + description: Scheduling configuration for pod placement. + properties: + affinity: + description: Affinity rules for pod scheduling. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector for pod scheduling. + type: object + tolerations: + description: Tolerations for pod scheduling. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + testFiles: + description: TestFiles configuration for locustfile and library mounting. + properties: + configMapRef: + description: ConfigMapRef is the name of the ConfigMap containing + locustfile(s). + type: string + libConfigMapRef: + description: LibConfigMapRef is the name of the ConfigMap containing + library files. + type: string + libMountPath: + default: /opt/locust/lib + description: LibMountPath is the mount path for library files. + type: string + srcMountPath: + default: /lotest/src + description: SrcMountPath is the mount path for test files. + type: string + type: object + volumeMounts: + description: VolumeMounts for the locust container with target selection. + items: + description: TargetedVolumeMount extends VolumeMount with target + pod selection. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + target: + default: both + description: Target specifies which pods receive this mount. + enum: + - master + - worker + - both + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes to add to pods. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + worker: + description: Worker configuration for worker nodes. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for worker pods. + type: object + command: + description: |- + Command is the base command for worker nodes. + The operator appends: --worker --master-host= --master-port=5557 + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for worker pods. + type: object + replicas: + description: Replicas is the number of worker pods to create. + format: int32 + maximum: 500 + minimum: 1 + type: integer + resources: + description: Resources defines resource requests and limits for + worker pods. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + - replicas + type: object + required: + - image + - master + - worker + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + properties: + completionTime: + description: CompletionTime is when the test completed. + format: date-time + type: string + conditions: + description: Conditions represent the latest available observations + of the test's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectedWorkers: + description: |- + ConnectedWorkers is the approximate number of connected workers, + derived from the worker Job's Active pod count (Job.Status.Active). + This is an approximation as Kubernetes Job.Status.Active may lag behind + actual Locust worker connections. + format: int32 + type: integer + expectedWorkers: + description: ExpectedWorkers is the number of workers expected to + connect. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration is the most recent generation observed + by the controller. + format: int64 + type: integer + phase: + description: Phase is the current lifecycle phase of the test. + enum: + - Pending + - Running + - Succeeded + - Failed + type: string + startTime: + description: StartTime is when the test started. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 00000000..03acb1a0 --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,17 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/locust.io_locusttests.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- path: patches/webhook_in_locusttests.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/webhook_in_locusttests.yaml b/config/crd/patches/webhook_in_locusttests.yaml new file mode 100644 index 00000000..e5d8b232 --- /dev/null +++ b/config/crd/patches/webhook_in_locusttests.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: locusttests.locust.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/test/kustomization.yaml b/config/crd/test/kustomization.yaml new file mode 100644 index 00000000..d694e768 --- /dev/null +++ b/config/crd/test/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - base.yaml diff --git a/config/crd/test/locust.io_locusttests.yaml b/config/crd/test/locust.io_locusttests.yaml new file mode 100644 index 00000000..ca429aab --- /dev/null +++ b/config/crd/test/locust.io_locusttests.yaml @@ -0,0 +1,3642 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: locusttests.locust.io +spec: + group: locust.io + names: + kind: LocustTest + listKind: LocustTestList + plural: locusttests + shortNames: + - lotest + singular: locusttest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Master pod command seed + jsonPath: .spec.masterCommandSeed + name: master_cmd + type: string + - description: Number of requested worker pods + jsonPath: .spec.workerReplicas + name: worker_replica_count + type: integer + - description: Locust image + jsonPath: .spec.image + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: locust.io/v1 LocustTest is deprecated, migrate to locust.io/v2 + name: v1 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API (v1 - DEPRECATED). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + affinity: + description: Affinity defines affinity rules for pod scheduling. + properties: + nodeAffinity: + description: NodeAffinity defines node affinity rules. + properties: + requiredDuringSchedulingIgnoredDuringExecution: + additionalProperties: + type: string + description: |- + RequiredDuringSchedulingIgnoredDuringExecution defines required node affinity rules. + The map keys are label keys and values are label values that nodes must have. + type: object + type: object + type: object + annotations: + description: Annotations defines annotations to attach to deployed + pods. + properties: + master: + additionalProperties: + type: string + description: Master defines annotations attached to the master + pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines annotations attached to worker pods. + type: object + type: object + configMap: + description: ConfigMap is the name of the ConfigMap containing the + test file(s). + type: string + image: + description: Image is the Locust container image to use. + type: string + imagePullPolicy: + description: ImagePullPolicy defines when to pull the image. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets is a list of secret names for pulling + images from private registries. + items: + type: string + type: array + labels: + description: Labels defines labels to attach to deployed pods. + properties: + master: + additionalProperties: + type: string + description: Master defines labels attached to the master pod. + type: object + worker: + additionalProperties: + type: string + description: Worker defines labels attached to worker pods. + type: object + type: object + libConfigMap: + description: LibConfigMap is the name of the ConfigMap containing + lib directory files. + type: string + masterCommandSeed: + description: |- + MasterCommandSeed is the command seed for the master pod. + This forms the base of the locust master command. + type: string + tolerations: + description: Tolerations defines tolerations for pod scheduling. + items: + description: LocustTestToleration defines a toleration for pod scheduling. + properties: + effect: + description: Effect indicates the taint effect to match. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: Key is the taint key that the toleration applies + to. + type: string + operator: + description: Operator represents the relationship between the + key and value. + enum: + - Exists + - Equal + type: string + value: + description: Value is the taint value the toleration matches + to. + type: string + required: + - effect + - key + - operator + type: object + type: array + workerCommandSeed: + description: |- + WorkerCommandSeed is the command seed for worker pods. + This forms the base of the locust worker command. + type: string + workerReplicas: + default: 1 + description: WorkerReplicas is the number of worker pods to spawn. + format: int32 + maximum: 500 + minimum: 1 + type: integer + required: + - image + - masterCommandSeed + - workerCommandSeed + - workerReplicas + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current test phase + jsonPath: .status.phase + name: Phase + type: string + - description: Requested worker count + jsonPath: .spec.worker.replicas + name: Workers + type: integer + - description: Connected workers + jsonPath: .status.connectedWorkers + name: Connected + type: integer + - jsonPath: .spec.image + name: Image + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: LocustTest is the Schema for the locusttests API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocustTestSpec defines the desired state of LocustTest. + properties: + env: + description: Env configuration for environment variable injection. + properties: + configMapRefs: + description: ConfigMapRefs injects all keys from ConfigMaps as + environment variables. + items: + description: ConfigMapEnvSource defines a ConfigMap environment + source. + properties: + name: + description: Name of the ConfigMap. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + secretMounts: + description: SecretMounts mounts secrets as files in the container. + items: + description: SecretMount defines a secret file mount. + properties: + mountPath: + description: MountPath is the path where the secret should + be mounted. + type: string + name: + description: Name of the secret to mount. + type: string + readOnly: + default: true + description: ReadOnly mounts the secret as read-only. + type: boolean + required: + - mountPath + - name + type: object + type: array + secretRefs: + description: SecretRefs injects all keys from Secrets as environment + variables. + items: + description: SecretEnvSource defines a Secret environment source. + properties: + name: + description: Name of the Secret. + type: string + prefix: + description: Prefix to add to all keys when injecting as + env vars. + type: string + required: + - name + type: object + type: array + variables: + description: Variables defines specific environment variables. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + type: object + image: + description: Image is the container image for Locust pods. + type: string + imagePullPolicy: + default: IfNotPresent + description: ImagePullPolicy for the Locust container. + enum: + - Always + - IfNotPresent + - Never + type: string + imagePullSecrets: + description: ImagePullSecrets for pulling from private registries. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + master: + description: Master configuration for the master node. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for the master pod. + type: object + autoquit: + description: Autoquit configuration for automatic test termination. + properties: + enabled: + default: true + description: Enabled enables the --autoquit flag. + type: boolean + timeout: + default: 60 + description: Timeout in seconds after test completion before + quitting. + format: int32 + minimum: 0 + type: integer + required: + - enabled + type: object + autostart: + default: true + description: Autostart enables the --autostart flag to start the + test automatically. + type: boolean + command: + description: |- + Command is the base command for the master node. + The operator appends: --master --master-port=5557 --expect-workers=N + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for the master pod. + type: object + resources: + description: Resources defines resource requests and limits for + the master pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + type: object + observability: + description: Observability configuration for metrics and tracing. + properties: + openTelemetry: + description: OpenTelemetry configuration for native Locust OTel + integration. + properties: + enabled: + default: false + description: |- + Enabled enables OpenTelemetry integration. + When true, adds --otel flag to Locust command. + type: boolean + endpoint: + description: |- + Endpoint is the OTel collector endpoint (e.g., "otel-collector:4317"). + Required when Enabled is true. + type: string + extraEnvVars: + additionalProperties: + type: string + description: ExtraEnvVars for additional OTel SDK configuration. + type: object + insecure: + default: false + description: Insecure skips TLS verification for the collector + connection. + type: boolean + protocol: + default: grpc + description: Protocol for OTel export. + enum: + - grpc + - http/protobuf + type: string + required: + - enabled + type: object + type: object + scheduling: + description: Scheduling configuration for pod placement. + properties: + affinity: + description: Affinity rules for pod scheduling. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector for pod scheduling. + type: object + tolerations: + description: Tolerations for pod scheduling. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + testFiles: + description: TestFiles configuration for locustfile and library mounting. + properties: + configMapRef: + description: ConfigMapRef is the name of the ConfigMap containing + locustfile(s). + type: string + libConfigMapRef: + description: LibConfigMapRef is the name of the ConfigMap containing + library files. + type: string + libMountPath: + default: /opt/locust/lib + description: LibMountPath is the mount path for library files. + type: string + srcMountPath: + default: /lotest/src + description: SrcMountPath is the mount path for test files. + type: string + type: object + volumeMounts: + description: VolumeMounts for the locust container with target selection. + items: + description: TargetedVolumeMount extends VolumeMount with target + pod selection. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + target: + default: both + description: Target specifies which pods receive this mount. + enum: + - master + - worker + - both + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes to add to pods. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + worker: + description: Worker configuration for worker nodes. + properties: + annotations: + additionalProperties: + type: string + description: Annotations for worker pods. + type: object + command: + description: |- + Command is the base command for worker nodes. + The operator appends: --worker --master-host= --master-port=5557 + type: string + extraArgs: + description: ExtraArgs are additional CLI arguments appended to + the command. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Labels for worker pods. + type: object + replicas: + description: Replicas is the number of worker pods to create. + format: int32 + maximum: 500 + minimum: 1 + type: integer + resources: + description: Resources defines resource requests and limits for + worker pods. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + required: + - command + - replicas + type: object + required: + - image + - master + - worker + type: object + status: + description: LocustTestStatus defines the observed state of LocustTest. + properties: + completionTime: + description: CompletionTime is when the test completed. + format: date-time + type: string + conditions: + description: Conditions represent the latest available observations + of the test's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectedWorkers: + description: |- + ConnectedWorkers is the approximate number of connected workers, + derived from the worker Job's Active pod count (Job.Status.Active). + This is an approximation as Kubernetes Job.Status.Active may lag behind + actual Locust worker connections. + format: int32 + type: integer + expectedWorkers: + description: ExpectedWorkers is the number of workers expected to + connect. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration is the most recent generation observed + by the controller. + format: int64 + type: integer + phase: + description: Phase is the current lifecycle phase of the test. + enum: + - Pending + - Running + - Succeeded + - Failed + type: string + startTime: + description: StartTime is when the test started. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/default/cert_metrics_manager_patch.yaml b/config/default/cert_metrics_manager_patch.yaml new file mode 100644 index 00000000..d9750155 --- /dev/null +++ b/config/default/cert_metrics_manager_patch.yaml @@ -0,0 +1,30 @@ +# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. + +# Add the volumeMount for the metrics-server certs +- op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: + mountPath: /tmp/k8s-metrics-server/metrics-certs + name: metrics-certs + readOnly: true + +# Add the --metrics-cert-path argument for the metrics server +- op: add + path: /spec/template/spec/containers/0/args/- + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs + +# Add the metrics-server certs volume configuration +- op: add + path: /spec/template/spec/volumes/- + value: + name: metrics-certs + secret: + secretName: metrics-server-cert + optional: false + items: + - key: ca.crt + path: ca.crt + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 00000000..f2ac65e0 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,164 @@ +# Adds namespace to all resources. +namespace: locust-k8s-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: locust-k8s-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- path: manager_webhook_patch.yaml + target: + kind: Deployment + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +replacements: + - source: # Uncomment the following block if you have any webhook + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.name # Name of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 0 + create: true + - source: + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.namespace # Namespace of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 1 + create: true + - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPath: .metadata.namespace # Namespace of the certificate CR + targets: + - select: + kind: CustomResourceDefinition + group: apiextensions.k8s.io + version: v1 + name: locusttests.locust.io + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - source: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPath: .metadata.name + targets: + - select: + kind: CustomResourceDefinition + group: apiextensions.k8s.io + version: v1 + name: locusttests.locust.io + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + # [WEBHOOK] Inject CA bundle for ValidatingWebhookConfiguration + - source: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPath: .metadata.namespace + targets: + - select: + kind: ValidatingWebhookConfiguration + name: validating-webhook-configuration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - source: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert + fieldPath: .metadata.name + targets: + - select: + kind: ValidatingWebhookConfiguration + name: validating-webhook-configuration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml new file mode 100644 index 00000000..2aaef653 --- /dev/null +++ b/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..17ef57d3 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 + - --webhook-cert-path=/tmp/k8s-webhook-server/serving-certs + ports: + - containerPort: 8443 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml new file mode 100644 index 00000000..abb01380 --- /dev/null +++ b/config/default/metrics_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 00000000..5c5f0b84 --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 00000000..a1272773 --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. + # This ensures that deployments meet the highest security requirements for Kubernetes. + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + ports: [] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 10m + memory: 64Mi + volumeMounts: [] + volumes: [] + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml new file mode 100644 index 00000000..f25cb45e --- /dev/null +++ b/config/manifests/kustomization.yaml @@ -0,0 +1,28 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/locust-k8s-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard + +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patches: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: controller-manager +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove + +# path: /spec/template/spec/containers/0/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/config/network-policy/allow-metrics-traffic.yaml b/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 00000000..de458bb0 --- /dev/null +++ b/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/config/network-policy/kustomization.yaml b/config/network-policy/kustomization.yaml new file mode 100644 index 00000000..ec0fb5e5 --- /dev/null +++ b/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..fdc5481b --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- monitor.yaml + +# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 00000000..28706b42 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,27 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification, exposing the system to potential man-in-the-middle attacks. + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, + # which securely references the certificate from the 'metrics-server-cert' secret. + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: locust-k8s-operator diff --git a/config/prometheus/monitor_tls_patch.yaml b/config/prometheus/monitor_tls_patch.yaml new file mode 100644 index 00000000..5bf84ce0 --- /dev/null +++ b/config/prometheus/monitor_tls_patch.yaml @@ -0,0 +1,19 @@ +# Patch for Prometheus ServiceMonitor to enable secure TLS configuration +# using certificates managed by cert-manager +- op: replace + path: /spec/endpoints/0/tlsConfig + value: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 00000000..6d67c1e9 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,28 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the locust-k8s-operator itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- locusttest_admin_role.yaml +- locusttest_editor_role.yaml +- locusttest_viewer_role.yaml + diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..5ad65d3b --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..805d83c3 --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/locusttest_admin_role.yaml b/config/rbac/locusttest_admin_role.yaml new file mode 100644 index 00000000..9aa5495b --- /dev/null +++ b/config/rbac/locusttest_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project locust-k8s-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over locust.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: locusttest-admin-role +rules: +- apiGroups: + - locust.io + resources: + - locusttests + verbs: + - '*' +- apiGroups: + - locust.io + resources: + - locusttests/status + verbs: + - get diff --git a/config/rbac/locusttest_editor_role.yaml b/config/rbac/locusttest_editor_role.yaml new file mode 100644 index 00000000..a86b3304 --- /dev/null +++ b/config/rbac/locusttest_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project locust-k8s-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the locust.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: locusttest-editor-role +rules: +- apiGroups: + - locust.io + resources: + - locusttests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - locust.io + resources: + - locusttests/status + verbs: + - get diff --git a/config/rbac/locusttest_viewer_role.yaml b/config/rbac/locusttest_viewer_role.yaml new file mode 100644 index 00000000..2f37c61d --- /dev/null +++ b/config/rbac/locusttest_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project locust-k8s-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to locust.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: locusttest-viewer-role +rules: +- apiGroups: + - locust.io + resources: + - locusttests + verbs: + - get + - list + - watch +- apiGroups: + - locust.io + resources: + - locusttests/status + verbs: + - get diff --git a/config/rbac/metrics_auth_role.yaml b/config/rbac/metrics_auth_role.yaml new file mode 100644 index 00000000..32d2e4ec --- /dev/null +++ b/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/metrics_auth_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 00000000..e775d67f --- /dev/null +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/metrics_reader_role.yaml b/config/rbac/metrics_reader_role.yaml new file mode 100644 index 00000000..51a75db4 --- /dev/null +++ b/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 00000000..ffd549c1 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - locust.io + resources: + - locusttests + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - locust.io + resources: + - locusttests/finalizers + verbs: + - update +- apiGroups: + - locust.io + resources: + - locusttests/status + verbs: + - get + - patch + - update diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 00000000..2c3dfa5d --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 00000000..85b1fe13 --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: locust-k8s-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 00000000..24095aa8 --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- locust_v2_locusttest.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/locust_v2_locusttest.yaml b/config/samples/locust_v2_locusttest.yaml new file mode 100644 index 00000000..a5e0c44e --- /dev/null +++ b/config/samples/locust_v2_locusttest.yaml @@ -0,0 +1,44 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: sample-test + namespace: default +spec: + image: locustio/locust:2.43.1 + imagePullPolicy: IfNotPresent + + master: + command: "--locustfile /lotest/src/locustfile.py" + autostart: true + autoquit: + enabled: true + timeout: 60 + labels: + role: master + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "1000m" + memory: "1Gi" + + worker: + command: "--locustfile /lotest/src/locustfile.py" + replicas: 5 + labels: + role: worker + resources: + requests: + cpu: "250m" + memory: "256Mi" + limits: + cpu: "500m" + memory: "512Mi" + + testFiles: + configMapRef: my-locust-tests + + scheduling: + nodeSelector: + workload-type: testing diff --git a/config/samples/locust_v2_locusttest_with_env.yaml b/config/samples/locust_v2_locusttest_with_env.yaml new file mode 100644 index 00000000..31c3b249 --- /dev/null +++ b/config/samples/locust_v2_locusttest_with_env.yaml @@ -0,0 +1,41 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: load-test-with-env + namespace: default +spec: + image: locustio/locust:2.20.0 + imagePullPolicy: IfNotPresent + + master: + command: "--locustfile /lotest/src/locustfile.py" + + worker: + command: "--locustfile /lotest/src/locustfile.py" + replicas: 2 + + testFiles: + configMapRef: locust-scripts + + env: + # Inject all keys from a ConfigMap as env vars with prefix + configMapRefs: + - name: app-config + prefix: "APP_" + # Inject all keys from a Secret as env vars + secretRefs: + - name: api-credentials + # Individual environment variables + variables: + - name: TARGET_HOST + value: "https://api.example.com" + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: app-config + key: log-level + # Mount secrets as files + secretMounts: + - name: tls-certs + mountPath: /etc/locust/certs + readOnly: true diff --git a/config/samples/locust_v2_locusttest_with_otel.yaml b/config/samples/locust_v2_locusttest_with_otel.yaml new file mode 100644 index 00000000..6737eaa9 --- /dev/null +++ b/config/samples/locust_v2_locusttest_with_otel.yaml @@ -0,0 +1,38 @@ +# Example LocustTest with OpenTelemetry configuration +# This sample demonstrates native Locust OTel integration +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: locusttest-with-otel + namespace: default +spec: + image: "locustio/locust:2.32.0" + imagePullPolicy: IfNotPresent + + master: + command: "--locustfile /lotest/src/locustfile.py --host https://example.com" + + worker: + command: "--locustfile /lotest/src/locustfile.py --host https://example.com" + replicas: 3 + + testFiles: + configMapRef: locust-scripts + + # OpenTelemetry configuration + # When enabled, this replaces the Prometheus metrics exporter sidecar + # with native Locust OTel integration + observability: + openTelemetry: + enabled: true + # OTel collector endpoint (required when enabled) + endpoint: "otel-collector.monitoring:4317" + # Protocol: "grpc" (default) or "http/protobuf" + protocol: grpc + # Skip TLS verification (useful for development) + insecure: true + # Additional OTel SDK configuration + extraEnvVars: + OTEL_RESOURCE_ATTRIBUTES: "service.name=locust-load-test,service.version=1.0.0" + OTEL_TRACES_SAMPLER: "parentbased_traceidratio" + OTEL_TRACES_SAMPLER_ARG: "0.1" diff --git a/config/samples/locust_v2_locusttest_with_volumes.yaml b/config/samples/locust_v2_locusttest_with_volumes.yaml new file mode 100644 index 00000000..5b245106 --- /dev/null +++ b/config/samples/locust_v2_locusttest_with_volumes.yaml @@ -0,0 +1,42 @@ +# Example LocustTest with user-defined volumes +# Demonstrates volume mounting with target selection (master/worker/both) +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: load-test-with-volumes + namespace: default +spec: + image: locustio/locust:2.20.0 + + master: + command: "--locustfile /lotest/src/locustfile.py" + + worker: + command: "--locustfile /lotest/src/locustfile.py" + replicas: 2 + + testFiles: + configMapRef: locust-scripts + + # User-defined volumes (all Kubernetes volume types supported) + volumes: + - name: test-results + emptyDir: {} + - name: shared-data + emptyDir: {} + - name: custom-certs + secret: + secretName: tls-certificates + + # Volume mounts with target selection + volumeMounts: + - name: test-results + mountPath: /results + target: master # Only master writes results + - name: shared-data + mountPath: /shared + target: both # All pods share this volume + - name: custom-certs + mountPath: /etc/ssl/custom + readOnly: true + target: worker # Only workers need certs for HTTPS testing diff --git a/config/samples/locusttest_v2_production.yaml b/config/samples/locusttest_v2_production.yaml new file mode 100644 index 00000000..ed323b94 --- /dev/null +++ b/config/samples/locusttest_v2_production.yaml @@ -0,0 +1,150 @@ +# ============================================================================== +# PRODUCTION-READY LOCUSTTEST EXAMPLE +# ============================================================================== +# This example demonstrates best practices for running Locust load tests in +# production environments, including resource management, high availability, +# and automatic cleanup. +# +# Copy and adapt this as a starting point for your production workloads. +# ============================================================================== + +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: production-load-test + namespace: default + labels: + app.kubernetes.io/name: locust-load-test + app.kubernetes.io/part-of: performance-testing + environment: production + annotations: + description: "Production-ready load test with resource limits and anti-affinity" + +spec: + # ============================================================================== + # IMAGE CONFIGURATION + # ============================================================================== + # Use a specific version tag (not :latest) for reproducibility + image: locustio/locust:2.43.1 + imagePullPolicy: IfNotPresent + + # ============================================================================== + # MASTER CONFIGURATION + # ============================================================================== + # The master node orchestrates the test and aggregates results from workers + master: + command: "--locustfile /lotest/src/locustfile.py" + + # Autostart: Begin test immediately when all workers connect + # Production tests often use autostart=true for CI/CD pipelines + autostart: true + + # Autoquit: Automatically terminate after test completion + # Prevents resource waste and enables clean CI/CD integration + # The 60s timeout allows time to collect final metrics before shutdown + autoquit: + enabled: true + timeout: 60 + + # Resource limits: Critical for production stability + # Master handles aggregation and UI, needs moderate resources + # Requests: Guaranteed allocation (scheduler requirement) + # Limits: Maximum allowed (prevents runaway resource consumption) + resources: + requests: + cpu: "500m" # 0.5 CPU cores guaranteed + memory: "512Mi" # 512 MiB guaranteed + limits: + cpu: "1000m" # Max 1 CPU core + memory: "1Gi" # Max 1 GiB + + labels: + role: master + component: load-test-orchestrator + + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8089" + + # ============================================================================== + # WORKER CONFIGURATION + # ============================================================================== + # Workers generate the actual load against your target system + worker: + command: "--locustfile /lotest/src/locustfile.py" + + # Replicas: Scale based on target load + # 10 workers can typically generate 5,000-10,000 RPS depending on test complexity + # Increase for higher load requirements + replicas: 10 + + # Resource limits: Size based on test complexity + # Workers need CPU for request generation and memory for client connections + # These values are suitable for typical HTTP load tests + resources: + requests: + cpu: "250m" # 0.25 CPU cores per worker + memory: "256Mi" # 256 MiB per worker + limits: + cpu: "500m" # Max 0.5 CPU per worker + memory: "512Mi" # Max 512 MiB per worker + + labels: + role: worker + component: load-generator + + annotations: + # Workers don't expose metrics individually (master aggregates) + prometheus.io/scrape: "false" + + # ============================================================================== + # TEST FILES + # ============================================================================== + # Mount your locustfile and any dependencies from a ConfigMap + # Create the ConfigMap separately: kubectl create configmap my-locust-tests --from-file=locustfile.py + testFiles: + configMapRef: my-locust-tests + + # ============================================================================== + # SCHEDULING CONFIGURATION + # ============================================================================== + # High availability: Spread workers across nodes to survive node failures + scheduling: + # Anti-affinity: Prefer to schedule workers on different nodes + # This improves load distribution and resilience + # Uses preferredDuringSchedulingIgnoredDuringExecution (soft constraint) + # to avoid blocking deployment if insufficient nodes exist + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + # Match workers from this same LocustTest + labelSelector: + matchLabels: + performance-test-name: production-load-test + role: worker + # Spread across nodes (not zones, as single-zone testing is common) + topologyKey: kubernetes.io/hostname + + # Optional: Target specific node pools for load generation + # Uncomment and adjust based on your cluster setup + # nodeSelector: + # workload-type: load-testing + + # Optional: Tolerate taints on dedicated load-testing nodes + # tolerations: + # - key: "workload-type" + # operator: "Equal" + # value: "load-testing" + # effect: "NoSchedule" + + # ============================================================================== + # NOTES ON WHAT'S NOT INCLUDED + # ============================================================================== + # This example intentionally omits: + # - secretMounts: Keep secrets out of example files (security sensitive) + # - observability.openTelemetry: Optional advanced feature, adds complexity + # + # Add these sections to your actual CRs based on your needs. + # See the API reference docs for full configuration options. diff --git a/config/scorecard/bases/config.yaml b/config/scorecard/bases/config.yaml new file mode 100644 index 00000000..c7704784 --- /dev/null +++ b/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml new file mode 100644 index 00000000..54e8aa50 --- /dev/null +++ b/config/scorecard/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +- bases/config.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +patches: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +# +kubebuilder:scaffold:patches diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml new file mode 100644 index 00000000..0b45f2fe --- /dev/null +++ b/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml new file mode 100644 index 00000000..8cc12589 --- /dev/null +++ b/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.42.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000..36d4cc6e --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- manifests.yaml +- service.yaml diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000..e04d1d6d --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-locust-io-v2-locusttest + failurePolicy: Fail + name: vlocusttest-v2.kb.io + rules: + - apiGroups: + - locust.io + apiVersions: + - v2 + operations: + - CREATE + - UPDATE + resources: + - locusttests + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000..56767793 --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/docs/advanced_topics.md b/docs/advanced_topics.md index d9ceb0d0..9c27e191 100644 --- a/docs/advanced_topics.md +++ b/docs/advanced_topics.md @@ -1,237 +1,31 @@ --- -title: Advanced topics -description: Advanced configuration and integration topics for experienced users +title: Advanced Topics +description: Content reorganized into focused How-To Guides tags: - advanced - configuration - - kafka - - aws msk - - technical + - how-to --- -# Advanced topics +# Advanced Topics -Basic configuration is not always enough to satisfy the performance test needs, for example when needing to work with Kafka and MSK. Below is a collection of topics of an advanced nature. This list will be keep growing as the tool matures more and more. +!!! info "Content reorganized" + These topics have been reorganized into focused How-To Guides for easier discovery and navigation. Find what you need using the mapping table below. -## :material-apache-kafka: Kafka & AWS MSK configuration +## Find what you need -Generally speaking, the usage of Kafka in a _locustfile_ is identical to how it would be used anywhere else within the cloud context. Thus, no special setup is needed for the purposes of performance testing with the _Operator_. -That being said, if an organization is using kafka in production, chances are that authenticated kafka is being used. One of the main providers of such managed service is _AWS_ in the form of _MSK_. For that end, the _Operator_ have an _out-of-the-box_ support for MSK. +| Topic | New Location | +|-------|-------------| +| Kafka & AWS MSK | [Configure Kafka integration](how-to-guides/configuration/configure-kafka.md) | +| Node Affinity | [Use node affinity](how-to-guides/scaling/use-node-affinity.md) | +| Tolerations | [Configure tolerations](how-to-guides/scaling/configure-tolerations.md) | +| Node Selector | [Use node selector](how-to-guides/scaling/use-node-selector.md) | +| Resource Management | [Configure resources](how-to-guides/configuration/configure-resources.md) | +| Private Registry | [Use private registry](how-to-guides/configuration/use-private-registry.md) | +| Automatic Cleanup | [Configure TTL](how-to-guides/configuration/configure-ttl.md) | +| OpenTelemetry | [Configure OpenTelemetry](how-to-guides/observability/configure-opentelemetry.md) | +| Secret Injection | [Inject secrets](how-to-guides/security/inject-secrets.md) | +| Volume Mounting | [Mount volumes](how-to-guides/configuration/mount-volumes.md) | +| Separate Resource Specs | [API Reference](api_reference.md) | -To enable performance testing with _MSK_, a central/global Kafka user can be created by the "cloud admin" or "the team" responsible for the _Operator_ deployment within the organization. The _Operator_ can then be easily configured to inject the configuration of that user as environment variables in all generated resources. Those variables can be used by the test to establish authentication with the kafka broker. - -| Variable Name | Description | -|:---------------------------------|:---------------------------------------------------------------------------------| -| `KAFKA_BOOTSTRAP_SERVERS` | Kafka bootstrap servers | -| `KAFKA_SECURITY_ENABLED` | - | -| `KAFKA_SECURITY_PROTOCOL_CONFIG` | Security protocol. Options: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL` | -| `KAFKA_SASL_MECHANISM` | Authentication mechanism. Options: `PLAINTEXT`, `SCRAM-SHA-256`, `SCRAM-SHA-512` | -| `KAFKA_USERNAME` | The username used to authenticate Kafka clients with the Kafka server | -| `KAFKA_PASSWORD` | The password used to authenticate Kafka clients with the Kafka server | - ---- - -## :material-server-network: Dedicated Kubernetes Nodes - -To run test resources on dedicated _Kubernetes_ node(s), the _Operator_ support deploying resources with **_Affinity_** and **_Taint Tolerations_**. - -### :material-vector-link: Affinity - -This allows generated resources to have specific _Affinity_ options. - -!!! Note - - The _Custom Resource Definition Spec_ is designed with modularity and expandability in mind. This means that although a specific set of _Kubernetes Affinity_ options are supported today, extending this support based on need is a streamlined and easy processes. If additonal support is needed, don't hesitate to open a [feature request](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues). - -#### Affinity Options - -The specification for affinity is defined as follows - -=== ":octicons-file-code-16: `affinity-spec.yaml`" - - ```yaml - apiVersion: locust.io/v1 - ... - spec: - ... - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution - : - ... - ... - ``` - -##### Node Affinity - -This optional section causes generated pods to declare specific _Node Affinity_ so _Kubernetes scheduler_ becomes aware of this requirement. - -The implementation from the _Custom Resource_ perspective is strongly influenced by Kubernetes native definition -of [node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). However, the implementation is -on purpose slightly simplified in order to allow users to have easier time working with affinity. - -The `nodeAffinity` section supports declaring node affinity under `requiredDuringSchedulingIgnoredDuringExecution`. Meaning that any -declared affinity labels **must** be present on nodes in order for resources to be deployed on them. - -**Example**: - -In the below example, generated pods will declare 3 **required** labels (keys and values) to be present on nodes before they are scheduled. - -=== ":octicons-file-code-16: `node-affinity-example.yaml`" - - ```yaml - apiVersion: locust.io/v1 - ... - spec: - ... - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeAffinityLabel1: locust-cloud-tests - nodeAffinityLabel2: performance-nodes - nodeAffinityLabel3: high-memory - ... - ... - ``` - -### :material-vector-difference: Taint Tolerations - -This allows generated resources to have specific _Taint Tolerations_ options. - -#### Toleration Options - -The specification for tolerations is defined as follows - -=== ":octicons-file-code-16: `taint-tolerations-spec.yaml`" - - ```yaml - apiVersion: locust.io/v1 - ... - spec: - ... - tolerations: - - key: - operator: <"Exists", "Equal"> - value: #(1)! - effect: <"NoSchedule", "PreferNoSchedule", "NoExecute"> - ... - ``` - - 1. Optional when `operator` value is set to `Exists`. - -=== ":octicons-file-code-16: **Example**" - ```yaml - apiVersion: locust.io/v1 - ... - spec: - ... - tolerations: - - key: taint-A - operator: Equal - value: ssd - effect: NoSchedule - - key: taint-B - operator: Exists - effect: NoExecute - ``` - - -## Resource Management - -The operator allows for fine-grained control over the resource requests and limits for the Locust master and worker pods. This is useful for ensuring that your load tests have the resources they need, and for preventing them from consuming too many resources on your cluster. - -Configuration is done via the `application.yml` file or through Helm values. The following properties are available: - -- `locust.operator.resource.pod-mem-request` -- `locust.operator.resource.pod-cpu-request` -- `locust.operator.resource.pod-ephemeral-storage-request` -- `locust.operator.resource.pod-mem-limit` -- `locust.operator.resource.pod-cpu-limit` -- `locust.operator.resource.pod-ephemeral-storage-limit` - -### :material-cpu-64-bit: Disabling CPU Limits - -In some scenarios, particularly during performance-sensitive tests, you may want to disable CPU limits to prevent throttling. This can be achieved by setting the `cpuLimit` property to a blank string in your Helm values. - -=== ":octicons-file-code-16: **Helm Values Example**" - - ```yaml - config: - loadGenerationPods: - resource: - cpuLimit: "" # (1)! - ``` - - 1. Setting the CPU limit to an empty string disables it. - -!!! Note - When the CPU limit is disabled, the pod is allowed to use as much CPU as is available on the node. This can be useful for maximizing performance, but it can also lead to resource contention if not managed carefully. - ---- - -## :material-docker: Usage of a private image registry - -Images from a private image registry can be used through various methods as described in the [kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry), one of those methods depends on setting `imagePullSecrets` for pods. This is supported in the operator by simply setting the `imagePullSecrets` option in the deployed custom resource. For example: - -```yaml title="locusttest-pull-secret-cr.yaml" -apiVersion: locust.io/v1 -... -spec: - image: ghcr.io/mycompany/locust:latest # (1)! - imagePullSecrets: # (2)! - - gcr-secret - ... -``` - -1. Specify which Locust image to use for both master and worker containers. -2. [Optional] Specify an existing pull secret to use for master and worker pods. - -### :material-sync: Image pull policy - -Kubernetes uses the image tag and pull policy to control when kubelet attempts to download (pull) a container image. The image pull policy can be defined through the `imagePullPolicy` option, as explained in the [kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). When using the operator, the `imagePullPolicy` option can be directly configured in the custom resource. For example: - -```yaml title="locusttest-pull-policy-cr.yaml" -apiVersion: locust.io/v1 -... -spec: - image: ghcr.io/mycompany/locust:latest # (1)! - imagePullPolicy: Always # (2)! - ... -``` - -1. Specify which Locust image to use for both master and worker containers. -2. [Optional] Specify the pull policy to use for containers defined within master and worker containers. Supported options include `Always`, `IfNotPresent` and `Never`. - -## :material-auto-fix: Automatic Cleanup for Finished Master and Worker Jobs - -Once load tests finish, master and worker jobs remain available in Kubernetes. -You can set up a time-to-live (TTL) value in the operator's Helm chart, so that -kubernetes jobs are eligible for cascading removal once the TTL expires. This means -that Master and Worker jobs and their dependent objects (e.g., pods) will be deleted. - -Note that setting up a TTL will not delete `LocustTest` or `ConfigMap` resources. - -To set a TTL value, override the key `ttlSecondsAfterFinished` in `values.yaml`: - -=== ":octicons-file-code-16: `values.yaml`" - ```yaml - ... - config: - loadGenerationJobs: - # Either leave empty or use an empty string to avoid setting this option - ttlSecondsAfterFinished: 3600 # (1)! - ... - ``` - - 1. Time in seconds to keep the job after it finishes. - -You can also use Helm's CLI arguments: `helm install ... --set config.loadGenerationJobs.ttlSecondsAfterFinished=0`. - -Read more about the `ttlSecondsAfterFinished` parameter in Kubernetes's [official documentation](https://kubernetes.io/docs/concepts/workloads/controllers/ttlafterfinished/). - -### Kubernetes Support for `ttlSecondsAfterFinished` - -Support for parameter `ttlSecondsAfterFinished` was added in Kubernetes v1.12. -In case you're deploying the locust operator to a Kubernetes cluster that does not -support `ttlSecondsAfterFinished`, you may leave the Helm key empty or use an empty -string. In this case, job definitions will not include the parameter. +Browse all guides: [How-To Guides](how-to-guides/index.md) diff --git a/docs/api_reference.md b/docs/api_reference.md new file mode 100644 index 00000000..ae88fa95 --- /dev/null +++ b/docs/api_reference.md @@ -0,0 +1,478 @@ +--- +title: API Reference +description: Complete API reference for LocustTest custom resources +tags: + - api + - reference + - crd + - specification +--- + +# API Reference + +This document provides a complete reference for the LocustTest Custom Resource Definition (CRD). + +## Overview + +| Property | Value | +|----------|-------| +| **Group** | `locust.io` | +| **Kind** | `LocustTest` | +| **Versions** | v2 (recommended), v1 (deprecated) | +| **Short Name** | `lotest` | +| **Scope** | Namespaced | + +--- + +## LocustTest v2 (Recommended) + +The v2 API provides a cleaner, grouped configuration structure with new features. + +### Spec Fields + +#### Root Fields + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `image` | string | **Yes** | - | Container image for Locust pods (e.g., `locustio/locust:2.20.0`) | +| `imagePullPolicy` | string | No | `IfNotPresent` | Image pull policy: `Always`, `IfNotPresent`, `Never` | +| `imagePullSecrets` | []LocalObjectReference | No | - | Secrets for pulling from private registries (specify as `- name: secret-name`) | +| `master` | [MasterSpec](#masterspec) | **Yes** | - | Master pod configuration | +| `worker` | [WorkerSpec](#workerspec) | **Yes** | - | Worker pod configuration | +| `testFiles` | [TestFilesConfig](#testfilesconfig) | No | - | ConfigMap references for test files | +| `scheduling` | [SchedulingConfig](#schedulingconfig) | No | - | Affinity, tolerations, nodeSelector | +| `env` | [EnvConfig](#envconfig) | No | - | Environment variable injection | +| `volumes` | []corev1.Volume | No | - | Additional volumes to mount | +| `volumeMounts` | [][TargetedVolumeMount](#targetedvolumemount) | No | - | Volume mounts with target filtering | +| `observability` | [ObservabilityConfig](#observabilityconfig) | No | - | OpenTelemetry configuration | + +#### MasterSpec + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `command` | string | **Yes** | - | Locust command seed (e.g., `--locustfile /lotest/src/test.py --host https://example.com`) | +| `resources` | corev1.ResourceRequirements | No | From operator config | CPU/memory requests and limits | +| `labels` | map[string]string | No | - | Additional labels for master pod | +| `annotations` | map[string]string | No | - | Additional annotations for master pod | +| `autostart` | bool | No | `true` | Start test automatically when workers connect | +| `autoquit` | [AutoquitConfig](#autoquitconfig) | No | `{enabled: true, timeout: 60}` | Auto-quit behavior after test completion | +| `extraArgs` | []string | No | - | Additional command-line arguments | + +#### WorkerSpec + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `command` | string | **Yes** | - | Locust command seed (e.g., `--locustfile /lotest/src/test.py`) | +| `replicas` | int32 | **Yes** | - | Number of worker replicas (1-500) | +| `resources` | corev1.ResourceRequirements | No | From operator config | CPU/memory requests and limits | +| `labels` | map[string]string | No | - | Additional labels for worker pods | +| `annotations` | map[string]string | No | - | Additional annotations for worker pods | +| `extraArgs` | []string | No | - | Additional command-line arguments | + +#### AutoquitConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `enabled` | bool | No | `true` | Enable auto-quit after test completion | +| `timeout` | int32 | No | `60` | Seconds to wait before quitting after test ends | + +#### TestFilesConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `configMapRef` | string | No | - | ConfigMap containing test files | +| `libConfigMapRef` | string | No | - | ConfigMap containing library files | +| `srcMountPath` | string | No | `/lotest/src` | Mount path for test files | +| `libMountPath` | string | No | `/opt/locust/lib` | Mount path for library files | + +#### SchedulingConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `affinity` | corev1.Affinity | No | - | Standard Kubernetes affinity rules | +| `tolerations` | []corev1.Toleration | No | - | Standard Kubernetes tolerations | +| `nodeSelector` | map[string]string | No | - | Node selector labels | + +#### EnvConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `configMapRefs` | [][ConfigMapEnvSource](#configmapenvsource) | No | - | ConfigMaps to inject as environment variables | +| `secretRefs` | [][SecretEnvSource](#secretenvsource) | No | - | Secrets to inject as environment variables | +| `variables` | []corev1.EnvVar | No | - | Individual environment variables | +| `secretMounts` | [][SecretMount](#secretmount) | No | - | Secrets to mount as files | + +#### ConfigMapEnvSource + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `name` | string | **Yes** | - | ConfigMap name | +| `prefix` | string | No | - | Prefix to add to all keys (e.g., `APP_`) | + +#### SecretEnvSource + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `name` | string | **Yes** | - | Secret name | +| `prefix` | string | No | - | Prefix to add to all keys | + +#### SecretMount + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `name` | string | **Yes** | - | Secret name | +| `mountPath` | string | **Yes** | - | Path to mount the secret | +| `readOnly` | bool | No | `true` | Mount as read-only | + +#### TargetedVolumeMount + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `name` | string | **Yes** | - | Volume name (must match a volume in `volumes`) | +| `mountPath` | string | **Yes** | - | Path to mount the volume | +| `subPath` | string | No | - | Sub-path within the volume | +| `readOnly` | bool | No | `false` | Mount as read-only | +| `target` | string | No | `both` | Target pods: `master`, `worker`, or `both` | + +#### ObservabilityConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `openTelemetry` | [OpenTelemetryConfig](#opentelemetryconfig) | No | - | OpenTelemetry configuration | + +#### OpenTelemetryConfig + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `enabled` | bool | No | `false` | Enable OpenTelemetry integration | +| `endpoint` | string | Required if enabled | - | OTel collector endpoint (e.g., `otel-collector:4317`) | +| `protocol` | string | No | `grpc` | Protocol: `grpc` or `http/protobuf` | +| `insecure` | bool | No | `false` | Use insecure connection | +| `extraEnvVars` | map[string]string | No | - | Additional OTel environment variables | + +### Status Fields + +| Field | Type | Description | +|-------|------|-------------| +| `phase` | string | Current lifecycle phase: `Pending`, `Running`, `Succeeded`, `Failed` | +| `observedGeneration` | int64 | Most recent generation observed by the controller | +| `expectedWorkers` | int32 | Number of expected worker replicas (from spec) | +| `connectedWorkers` | int32 | Approximate number of connected workers (from Job.Status.Active) | +| `startTime` | metav1.Time | When the test transitioned to Running | +| `completionTime` | metav1.Time | When the test reached Succeeded or Failed | +| `conditions` | []metav1.Condition | Standard Kubernetes conditions (see below) | + +!!! note + `connectedWorkers` is an approximation derived from the worker Job's active pod count. It may briefly lag behind actual Locust worker connections. + +#### Phase Lifecycle + +```mermaid +stateDiagram-v2 + [*] --> Pending: CR Created + Pending --> Running: Master Job active + Running --> Succeeded: Master Job completed + Running --> Failed: Master Job failed + Pending --> Failed: Pod health check failed (after grace period) + Running --> Failed: Pod health check failed (after grace period) +``` + +| Phase | Meaning | What to do | +|-------|---------|------------| +| `Pending` | Resources are being created (Service, master Job, worker Job). Initial state after CR creation. Also set during recovery after external resource deletion. | Wait for resources to be scheduled. Check events if stuck. | +| `Running` | Master Job has at least one active pod. Test execution is in progress. `startTime` is set on this transition. | Monitor worker connections and test progress. | +| `Succeeded` | Master Job completed successfully (exit code 0). `completionTime` is set. | Collect results. CR can be deleted or kept for records. | +| `Failed` | Master Job failed, or pod health checks detected persistent failures after the 2-minute grace period. `completionTime` is set. | Check pod logs and events for failure details. Delete and recreate to retry. | + +The operator waits 2 minutes after pod creation before reporting pod health failures. This prevents false alarms during normal startup activities like image pulling, volume mounting, and scheduling. + +#### Condition Types + +**Ready** + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `ResourcesCreated` | All resources (Service, Jobs) created successfully | +| `False` | `ResourcesCreating` | Resources are being created | +| `False` | `ResourcesFailed` | Test failed, resources in error state | + +**WorkersConnected** + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `AllWorkersConnected` | All expected workers have active pods | +| `False` | `WaitingForWorkers` | Initial state, waiting for worker pods | +| `False` | `WorkersMissing` | Some workers not yet active (shows N/M count) | + +**TestCompleted** + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `TestSucceeded` | Test completed successfully | +| `True` | `TestFailed` | Test completed with failure | +| `False` | `TestInProgress` | Test has not finished | + +**PodsHealthy** + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `PodsHealthy` | All pods running normally | +| `True` | `PodsStarting` | Within 2-minute grace period (not yet checking) | +| `False` | `ImagePullError` | One or more pods cannot pull container image | +| `False` | `ConfigurationError` | ConfigMap or Secret not found | +| `False` | `SchedulingError` | Pod cannot be scheduled (node affinity, resources) | +| `False` | `CrashLoopBackOff` | Container repeatedly crashing | +| `False` | `InitializationError` | Init container failed | + +**SpecDrifted** + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `SpecChangeIgnored` | CR spec was modified after creation. Changes are ignored. Delete and recreate to apply. | + +!!! info + The `SpecDrifted` condition only appears when a user edits the CR spec after initial creation. It serves as a reminder that tests are immutable. + +#### Checking Status + +```bash +# Quick status overview +kubectl get locusttest my-test + +# Detailed status with conditions +kubectl get locusttest my-test -o jsonpath='{.status}' | jq . + +# Watch phase changes in real-time +kubectl get locusttest my-test -w + +# Check specific condition +kubectl get locusttest my-test -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + +# Check worker connection progress +kubectl get locusttest my-test -o jsonpath='{.status.connectedWorkers}/{.status.expectedWorkers}' +``` + +#### CI/CD Integration + +Use `kubectl wait` to integrate LocustTest into CI/CD pipelines. The operator's status conditions follow standard Kubernetes conventions, making them compatible with any tool that supports `kubectl wait`. + +**GitHub Actions example:** + +```yaml +name: Load Test +on: + workflow_dispatch: + +jobs: + load-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Apply test + run: kubectl apply -f locusttest.yaml + + - name: Wait for test completion + run: | + kubectl wait locusttest/my-test \ + --for=jsonpath='{.status.phase}'=Succeeded \ + --timeout=30m + + - name: Check result + if: failure() + run: | + echo "Test failed or timed out" + kubectl describe locusttest my-test + kubectl logs -l performance-test-name=my-test --tail=50 + + - name: Cleanup + if: always() + run: kubectl delete locusttest my-test --ignore-not-found +``` + +**Generic shell script example:** + +```bash +#!/bin/bash +set -e + +# Apply test +kubectl apply -f locusttest.yaml + +# Wait for completion (either Succeeded or Failed) +echo "Waiting for test to complete..." +while true; do + PHASE=$(kubectl get locusttest my-test -o jsonpath='{.status.phase}' 2>/dev/null) + case "$PHASE" in + Succeeded) + echo "Test passed!" + exit 0 + ;; + Failed) + echo "Test failed!" + kubectl describe locusttest my-test + exit 1 + ;; + *) + echo "Phase: $PHASE - waiting..." + sleep 10 + ;; + esac +done +``` + +### Complete v2 Example + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: comprehensive-test +spec: + image: locustio/locust:2.20.0 + imagePullPolicy: IfNotPresent + + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com --users 1000 --spawn-rate 50 --run-time 10m" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + labels: + role: master + autostart: true + autoquit: + enabled: true + timeout: 120 + + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + labels: + role: worker + + testFiles: + configMapRef: my-test-scripts + libConfigMapRef: my-lib-files + + scheduling: + nodeSelector: + node-type: performance + tolerations: + - key: "dedicated" + operator: "Equal" + value: "performance" + effect: "NoSchedule" + + env: + secretRefs: + - name: api-credentials + prefix: "API_" + configMapRefs: + - name: app-config + variables: + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: test-data + persistentVolumeClaim: + claimName: test-data-pvc + + volumeMounts: + - name: test-data + mountPath: /data + target: both + + observability: + openTelemetry: + enabled: true + endpoint: "otel-collector.monitoring:4317" + protocol: "grpc" + extraEnvVars: + OTEL_SERVICE_NAME: "load-test" + OTEL_RESOURCE_ATTRIBUTES: "environment=staging,team=platform" +``` + +--- + +## LocustTest v1 (Deprecated) + +!!! warning "Deprecated" + The v1 API is deprecated and will be removed in v3.0. Use v2 for new deployments. See the [Migration Guide](migration.md) for upgrade instructions. + +### Spec Fields (v1) + +| Field | Type | Required | Default | Description | +|-------|------|----------|---------|-------------| +| `masterCommandSeed` | string | **Yes** | - | Command seed for master pod | +| `workerCommandSeed` | string | **Yes** | - | Command seed for worker pods | +| `workerReplicas` | int32 | **Yes** | - | Number of worker replicas (1-500) | +| `image` | string | **Yes** | - | Container image | +| `imagePullPolicy` | string | No | `IfNotPresent` | Image pull policy | +| `imagePullSecrets` | []string | No | - | Pull secrets | +| `configMap` | string | No | - | ConfigMap for test files | +| `libConfigMap` | string | No | - | ConfigMap for library files | +| `labels` | PodLabels | No | - | Labels with `master` and `worker` maps | +| `annotations` | PodAnnotations | No | - | Annotations with `master` and `worker` maps | +| `affinity` | LocustTestAffinity | No | - | Custom affinity structure | +| `tolerations` | []LocustTestToleration | No | - | Custom toleration structure | + +### v1 Example + +```yaml +apiVersion: locust.io/v1 +kind: LocustTest +metadata: + name: basic-test +spec: + image: locustio/locust:2.20.0 + masterCommandSeed: "--locustfile /lotest/src/test.py --host https://example.com" + workerCommandSeed: "--locustfile /lotest/src/test.py" + workerReplicas: 3 + configMap: test-scripts +``` + +--- + +## Kubectl Commands + +```bash +# List all LocustTests +kubectl get locusttests +kubectl get lotest # short name + +# Describe a LocustTest +kubectl describe locusttest + +# Watch status changes +kubectl get locusttest -w + +# Delete a LocustTest +kubectl delete locusttest +``` + +--- + +## Printer Columns + +When listing LocustTests, the following columns are displayed: + +| Column | Description | +|--------|-------------| +| NAME | Resource name | +| PHASE | Current phase (Pending/Running/Succeeded/Failed) | +| WORKERS | Requested worker count | +| CONNECTED | Connected worker count | +| IMAGE | Container image (priority column) | +| AGE | Time since creation | diff --git a/docs/assets/javascripts/schema-org.js b/docs/assets/javascripts/schema-org.js new file mode 100644 index 00000000..0294b08b --- /dev/null +++ b/docs/assets/javascripts/schema-org.js @@ -0,0 +1,28 @@ +document.addEventListener('DOMContentLoaded', function() { + var script = document.createElement('script'); + script.type = 'application/ld+json'; + script.textContent = JSON.stringify({ + "@context": "https://schema.org", + "@type": "SoftwareApplication", + "name": "Locust Kubernetes Operator", + "description": "Production-ready Kubernetes operator for Locust distributed load testing. Automate performance testing with cloud-native CI/CD integration, OpenTelemetry observability, and horizontal scaling.", + "applicationCategory": "DeveloperApplication", + "applicationSubCategory": "Performance Testing", + "operatingSystem": "Kubernetes", + "offers": { + "@type": "Offer", + "price": "0", + "priceCurrency": "USD" + }, + "author": { + "@type": "Person", + "name": "Abdelrhman Hamouda", + "url": "https://github.com/AbdelrhmanHamouda" + }, + "codeRepository": "https://github.com/AbdelrhmanHamouda/locust-k8s-operator", + "programmingLanguage": "Go", + "license": "https://opensource.org/licenses/Apache-2.0", + "keywords": ["kubernetes", "locust", "load testing", "performance testing", "operator", "cloud-native", "distributed testing"] + }); + document.head.appendChild(script); +}); diff --git a/docs/assets/stylesheets/extra.css b/docs/assets/stylesheets/extra.css index 0e39274e..dea77d0c 100644 --- a/docs/assets/stylesheets/extra.css +++ b/docs/assets/stylesheets/extra.css @@ -1,3 +1,43 @@ +/* ================================ + TYPOGRAPHY & READABILITY + ================================ */ + +/* CSS Custom Properties for consistent typography scale */ +:root { + --body-line-height: 1.7; + --code-line-height: 1.5; + --heading-line-height: 1.3; + --code-font-size: 14px; + --code-block-padding: 1.25rem; +} + +/* Body text line-height */ +.md-typeset p, +.md-typeset li, +.md-typeset td { + line-height: var(--body-line-height); +} + +/* Heading line-height (tighter for visual hierarchy) */ +.md-typeset h1, +.md-typeset h2, +.md-typeset h3, +.md-typeset h4 { + line-height: var(--heading-line-height); +} + +/* Code block improvements */ +.md-typeset pre > code { + font-size: var(--code-font-size); + line-height: var(--code-line-height); + padding: var(--code-block-padding); +} + +/* Inline code sizing to match */ +.md-typeset code { + font-size: 0.85em; +} + /* Modern hero section with gradient background and enhanced styling */ .md-content .tx-hero { display: flex !important; @@ -642,15 +682,6 @@ html { filter: drop-shadow(0 4px 12px rgba(0,0,0,0.15)) !important; } -/* Enhanced section styling */ -.md-content h2 { - margin-top: 2rem !important; - margin-bottom: 1rem !important; - font-size: 2.2rem !important; - font-weight: 600 !important; - line-height: 1.2 !important; -} - /* Improved image styling throughout docs */ .md-content img:not(.twemoji):not(.badge) { border-radius: 12px !important; @@ -671,19 +702,6 @@ html { margin: 1.5rem 0 !important; } -/* Better typography hierarchy */ -.md-content h3 { - font-size: 1.4rem !important; - font-weight: 600 !important; - margin-bottom: 1rem !important; - color: var(--md-default-fg-color) !important; -} - -.md-content p { - line-height: 1.7 !important; - margin-bottom: 1rem !important; -} - /* ================================ CUSTOM ANNOUNCEMENT SYSTEM ================================ */ @@ -956,3 +974,52 @@ html { font-size: 1.25rem; } } + +/* Glowing border animation for v2 announcement */ +.admonition.success.glowing-border { + position: relative; + overflow: hidden; + border: 1px solid transparent !important; + background-clip: padding-box !important; + z-index: 0; +} + +.admonition.success.glowing-border::before { + content: ''; + position: absolute; + top: -50%; + left: -50%; + width: 200%; + height: 200%; + background: conic-gradient( + from 0deg, + transparent 0deg, + var(--md-primary-fg-color) 45deg, + var(--md-accent-fg-color) 135deg, + #00bcd4 225deg, + var(--md-primary-fg-color) 315deg, + transparent 360deg + ); + animation: borderRotate 8s linear infinite; + z-index: -2; +} + +.admonition.success.glowing-border::after { + content: ''; + position: absolute; + inset: 1.5px; + background: var(--md-default-bg-color); + z-index: -1; + border-radius: inherit; +} + +@keyframes borderRotate { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +/* Ensure content stays readable on top */ +.admonition.success.glowing-border > * { + position: relative; + z-index: 1; +} diff --git a/docs/comparison.md b/docs/comparison.md new file mode 100644 index 00000000..5017076c --- /dev/null +++ b/docs/comparison.md @@ -0,0 +1,161 @@ +--- +title: Comparison - Locust on Kubernetes +description: Compare the Locust Kubernetes Operator with alternatives for running Locust load tests on Kubernetes. Includes official Locust operator, k6 operator, and manual deployment. Feature comparison, performance benchmarks, decision guide, and migration paths. +--- + +# Comparison: Locust on Kubernetes + +When running Locust load tests on Kubernetes, you have four main approaches to choose from: + +1. **Locust Kubernetes Operator** (this project) - Full lifecycle management via Custom Resource Definition (CRD) +2. **Official Locust Operator** (locustio/k8s-operator) - Locust team operator +3. **k6 Operator** (Grafana) - Distributed k6 testing on Kubernetes +4. **Manual Deployment** - Raw Kubernetes manifests (Deployments, Services, ConfigMaps) + +This page helps you evaluate which approach fits your use case, with an objective feature comparison, performance benchmarks, decision guide, and migration paths. + +## Feature Comparison + +| Feature | This Operator | Official Operator | k6 Operator | Manual Deploy | +|---------|:-------------:|:-----------------:|:-----------:|:-------------:| +| Declarative CRD API | βœ“ | βœ“ | βœ“ | βœ— | +| Automated lifecycle | βœ“ | βœ“ | βœ“ | βœ— | +| Immutable test guarantee | βœ“ | βœ— | βœ— | βœ— | +| Validation webhooks | βœ“ | Not documented | βœ“ | βœ— | +| CI/CD integration (autoQuit) | βœ“ | Not documented | βœ“ (cloud) | Manual | +| OpenTelemetry native | βœ“ | βœ— | βœ— | βœ— | +| Secret injection (envFrom) | βœ“ | Not documented | βœ“ | Manual | +| Volume mounting | βœ“ | βœ“ (ConfigMap) | βœ“ | βœ“ | +| Horizontal worker scaling | βœ“ | βœ“ | βœ“ | βœ“ | +| Resource governance | βœ“ (operator + CR) | Not documented | βœ“ | βœ“ | +| Status monitoring (conditions) | βœ“ | Not documented | βœ“ | βœ— | +| Pod health detection | βœ“ | βœ— | βœ— | βœ— | +| Leader election (HA) | βœ“ | Not documented | βœ“ | N/A | +| Helm chart | βœ“ | βœ“ | βœ“ | βœ— | +| API versions supported | v1 + v2 (conversion) | Single version | Single version | N/A | +| Documentation pages | 20+ | 1 | Extensive | N/A | + +**Note:** "Not documented" indicates features that may exist but are not described in the official documentation. The Official Locust Operator is maintained by the Locust core team. + +## Why Choose This Operator + +!!! success "Battle-Tested Reliability" + + - **Battle-Tested** - Production use on AWS EKS since 2022 + - **Comprehensive documentation** - Comprehensive coverage of getting started, API reference, architecture, security, FAQ, and migration guides + - **Go-native performance** - Sub-second startup time, 75 MB container image, 64 MB memory footprint + - **Feature-rich capabilities** - OpenTelemetry integration, validation webhooks, pod health monitoring, immutable test guarantee + - **Active development** - Continuous improvement with community feedback and contributions + +## Performance Benchmarks + +All metrics measured from production deployment on AWS EKS. Container images measured via `docker images`, memory usage via Kubernetes metrics-server, startup time as duration to pod Ready state. + +### Container Image Size + +| Metric | This Operator (Go) | Previous Version (Java) | +|--------|:------------------:|:-----------------------:| +| Image size | 75 MB | 325 MB | +| Reduction | 77% smaller | β€” | + +**Source:** `docker images` output + +### Runtime Performance + +| Metric | This Operator (Go) | Previous Version (Java) | +|--------|:------------------:|:-----------------------:| +| Memory usage (idle) | 64 MB | 256 MB | +| Startup time | < 1 second | ~60 seconds | + +**Source:** Kubernetes metrics-server and pod Ready state timing + +**Methodology:** Measurements from production deployment on AWS EKS. Container images measured via `docker images`, memory via Kubernetes metrics-server, startup time as duration to pod Ready state. + +**Note:** Performance data for the Official Locust Operator and k6 Operator is not published by their maintainers. + +## Decision Guide + +!!! tip "Choose This Operator when..." + + - Running Locust tests in CI/CD pipelines regularly + - Need automated test lifecycle management (create, run, cleanup) + - Want immutability guarantees (no mid-test changes) + - Require OpenTelemetry observability + - Multiple teams sharing a cluster need governance and isolation + - Need pod health monitoring and status conditions + - Want validation webhooks to catch configuration errors before deployment + +!!! info "Choose k6 Operator when..." + + - Using k6 (not Locust) for load testing + - Need Grafana Cloud integration for observability + - Prefer k6 scripting language and ecosystem + - Part of the Grafana observability stack + +!!! info "Choose Manual Deployment when..." + + - Learning Locust on Kubernetes for the first time + - Need maximum flexibility and customization + - Running a one-off test in a development environment + - Want to understand the underlying Kubernetes primitives + - Have specific requirements not covered by existing solutions + +## Migration Paths + +### From Manual Deployment to Operator + +If you're currently using raw Kubernetes manifests (Deployments, Services, ConfigMaps), migrating to the operator is straightforward: + +1. Keep your existing ConfigMap with test scripts +2. Create a LocustTest CR that references your ConfigMap +3. Deploy the CR - the operator handles the rest + +**Example:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: my-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-existing-configmap # Reference your existing ConfigMap + master: + command: "--locustfile /lotest/src/test.py --host https://example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 +``` + +[:octicons-arrow-right-24: Get started with the operator](getting_started/index.md) + +### From Helm Chart to Operator + +If you're using the official Locust Helm chart, you can map your Helm values to LocustTest CR fields: + +- Helm `image` β†’ CR `spec.image` +- Helm `master.args` β†’ CR `spec.master.command` +- Helm `worker.replicas` β†’ CR `spec.worker.replicas` +- Helm `locust_locustfile_configmap` β†’ CR `spec.testFiles.configMapRef` +- Helm `locust_lib_configmap` β†’ CR `spec.testFiles.libConfigMapRef` + +The operator provides additional capabilities like automated cleanup, validation webhooks, and OpenTelemetry integration that aren't available in the Helm chart. + +[:octicons-arrow-right-24: See detailed field mapping in the migration guide](migration.md#field-mapping-reference) + +### From v1 Operator to v2 Operator + +If you're already using v1 of the Locust Kubernetes Operator, migration to v2 is seamless: + +- **Backward compatibility**: v1 CRs continue to work via automatic conversion webhook +- **New features**: Access OpenTelemetry, secret injection, volume mounting, separate resource specs +- **Performance**: 75% smaller memory footprint, sub-second startup time + +[:octicons-arrow-right-24: Complete v1-to-v2 migration guide](migration.md) + +## Ready to Get Started? + +The Locust Kubernetes Operator provides comprehensive lifecycle management for running Locust tests on Kubernetes, with features designed for CI/CD pipelines and production environments. + +[:octicons-arrow-right-24: Get started in 5 minutes](getting_started/index.md){ .md-button .md-button--primary } diff --git a/docs/contribute.md b/docs/contribute.md index 3a3d33a7..263990e8 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -33,13 +33,24 @@ There are several ways you can contribute to the Locust K8s Operator project: The project is **_actively_** maintained and is under continuous development and improvement. If you have any request or want to chat, kindly open a ticket. If you wish to contribute code and/or ideas, please review the development documentation below. +## Technology Stack + +The operator is built with **Go** using the [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) framework. Key technologies: + +- **Language**: Go 1.24+ +- **Framework**: controller-runtime / Operator SDK +- **Testing**: envtest, Ginkgo, Kind +- **Build**: Make, Docker +- **Deployment**: Helm, Kustomize + ## Development Documentation For developers contributing to the Locust K8s Operator project, we provide detailed documentation on various development aspects: - [Local Development Guide](local-development.md): Setting up your development environment -- [Integration Testing Guide](integration-testing.md): Running and creating integration tests +- [Testing Guide](integration-testing.md): Running unit, integration, and E2E tests - [Pull Request Process](pull-request-process.md): Guidelines for submitting code changes +- [How It Works](how_does_it_work.md): Architecture overview You can also refer to the comprehensive [CONTRIBUTING.MD][contributing-url] file in the GitHub repository for more information. diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 00000000..6cb84cfe --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,253 @@ +--- +title: Frequently Asked Questions +description: Common questions and answers about the Locust Kubernetes Operator +tags: + - faq + - troubleshooting + - guide +--- + +# Frequently Asked Questions + +This page answers the most common questions about operating the Locust Kubernetes Operator in production. For step-by-step tutorials, see [Getting Started](getting_started/index.md). For advanced configuration, see [How-To Guides](how-to-guides/index.md). + +## Test Lifecycle + +### Why can't I update a running test? + +Tests are **immutable by design**. Once a LocustTest CR is created, the operator ignores all changes to the `spec` field and sets a `SpecDrifted` condition to indicate drift was detected. + +This ensures predictable behavior β€” each test run uses exactly the configuration it was created with, with no mid-flight configuration changes. See [How Does It Work - Immutable Tests](how_does_it_work.md#immutable-tests) for the design rationale. + +To change test parameters, use the delete-and-recreate pattern: + +```bash +kubectl delete locusttest my-test +# Edit your YAML with desired changes +kubectl apply -f locusttest.yaml +``` + +### How do I change test parameters? + +Delete the LocustTest CR, edit your YAML file with the desired changes, and recreate it: + +```bash +kubectl delete locusttest my-test +# Edit locusttest.yaml (change image, replicas, commands, etc.) +kubectl apply -f locusttest.yaml +``` + +The operator will create new Jobs with the updated configuration. Previous test results remain in your monitoring system (if using OpenTelemetry or metrics export). + +### What happens if I edit a LocustTest CR after creation? + +The operator detects spec changes but **ignores** them. It sets a `SpecDrifted` condition on the CR to indicate the spec has been modified: + +```bash +kubectl get locusttest my-test -o jsonpath='{.status.conditions[?(@.type=="SpecDrifted")]}' +``` + +The test continues running with its original configuration. To apply changes, delete and recreate the CR. + +### How do I run the same test multiple times? + +Delete and recreate the CR with the same YAML: + +```bash +kubectl delete locusttest my-test +kubectl apply -f locusttest.yaml # Same file +``` + +Or use unique names with a suffix to keep test history: + +```bash +kubectl apply -f locusttest-run-01.yaml +# Later... +kubectl apply -f locusttest-run-02.yaml +``` + +## Scaling + +### Can I scale workers during a running test? + +No, due to immutability. The worker replica count (`worker.replicas`) is set at test creation time and cannot be changed while the test runs. + +To run with different worker counts: + +```bash +kubectl delete locusttest my-test +# Edit YAML to update worker.replicas +kubectl apply -f locusttest.yaml +``` + +Note: Locust's web UI shows real-time user distribution across connected workers regardless of the replica count. + +### What's the maximum number of workers? + +The CRD enforces a maximum of **500 workers** per LocustTest. This limit prevents accidental resource exhaustion. + +For larger scales: + +- Run multiple LocustTest CRs against the same target (each test independently generates load) +- Use fewer workers with more users per worker (adjust `--users` and `--spawn-rate` in `master.command`) + +### How do I size worker resources? + +Resource requirements depend on test complexity: + +| Test Type | CPU per Worker | Memory per Worker | Notes | +|-----------|---------------|-------------------|-------| +| Light HTTP tests | 250m | 128Mi | Simple GET/POST requests | +| Medium complexity | 500m | 256Mi | JSON parsing, simple logic | +| Heavy tests | 1000m | 512Mi-1Gi | Complex business logic, large payloads | + +Start conservative and observe resource usage via `kubectl top pods`. See [Advanced Topics - Resource Management](how-to-guides/configuration/configure-resources.md) for detailed sizing guidance. + +!!! tip "Resource Precedence" + The operator applies resources in order of specificity: (1) CR spec resources (highest), (2) Helm role-specific resources (`masterResources`/`workerResources`), (3) Helm unified resources (`locustPods.resources`). + +## Debugging + +### My test is stuck in Pending phase + +Check in this order: + +1. **Check operator events**: `kubectl describe locusttest ` β€” look for errors in the Events section +2. **Check pod status**: `kubectl get pods -l performance-test-name=` β€” look for scheduling errors or image pull failures +3. **Check PodsHealthy condition**: `kubectl get locusttest -o jsonpath='{.status.conditions[?(@.type=="PodsHealthy")]}'` β€” the operator reports pod issues here +4. **Check ConfigMap exists**: If using `testFiles.configMapRef`, ensure the ConfigMap exists: `kubectl get configmap ` + +The operator has a 2-minute grace period before reporting pod failures, allowing time for image pulls and startup. + +### My test shows Failed phase + +Check the failure reason: + +1. **Check conditions**: `kubectl describe locusttest ` β€” the Status section shows why it failed +2. **Check master logs**: `kubectl logs -master-` β€” Locust errors appear here +3. **Common causes**: + - **Locustfile syntax error**: Python errors in your test script + - **Target host unreachable**: Network connectivity issues + - **ConfigMap not found**: Missing test files + - **Image pull failure**: Invalid image name or missing pull secrets + +### Workers show 0/N connected + +The `connectedWorkers` field is an approximation from `Job.Status.Active`. Workers need time to start, pull images, and connect to the master. + +Check worker connectivity: + +1. **Verify worker pods are running**: `kubectl get pods -l performance-test-pod-name=-worker` +2. **Verify master service exists**: `kubectl get svc -master` +3. **Check worker logs**: `kubectl logs -worker-` β€” workers should show "Connected to master" +4. **Verify network connectivity**: Workers connect to the master on port 5557 + +Workers typically connect within 30-60 seconds after pod startup. + +### How do I access the Locust web UI? + +Port-forward to the master service: + +```bash +kubectl port-forward svc/-master 8089:8089 +``` + +Then visit [http://localhost:8089](http://localhost:8089) in your browser. + +!!! note "Autostart Behavior" + If `autostart: true` (default), the test starts automatically and the web UI shows the running test. Set `autostart: false` to control test start from the web UI. + +### ConfigMap not found error + +The operator detects missing ConfigMaps via pod health monitoring and reports the issue in the `PodsHealthy` condition. + +You can create the ConfigMap **before or after** the LocustTest CR: + +```bash +# Create ConfigMap from local files +kubectl create configmap my-test-scripts --from-file=test.py=./test.py + +# If LocustTest already exists, the operator detects recovery automatically +``` + +The operator monitors pod status every 30 seconds and updates conditions when ConfigMaps become available. + +## Migration + +### Can I use v1 and v2 CRs at the same time? + +Yes, with the conversion webhook enabled. The operator automatically converts v1 CRs to v2 internally, allowing both versions to coexist. + +v1 CRs continue to work with their existing configuration. See [Migration Guide](migration.md) for conversion details. + +### Do I need to recreate existing v1 tests? + +No, existing v1 tests continue to work. However, new features (OpenTelemetry integration, environment variable injection, volume mounts) require the v2 API. + +Migrate when you need v2-only features or when convenient. See [Migration Guide](migration.md) for the conversion process. + +## Configuration + +### How do I pass extra CLI arguments to Locust? + +Use `master.extraArgs` and `worker.extraArgs` in the v2 API. These are appended after the command seed and operator-managed flags: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +spec: + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + extraArgs: + - "--loglevel" + - "DEBUG" + worker: + command: "--locustfile /lotest/src/test.py" + extraArgs: + - "--loglevel" + - "DEBUG" + replicas: 5 +``` + +!!! warning "Reserved Flags" + The operator manages these flags automatically: `--master`, `--worker`, `--master-host`, `--master-port`, `--expect-workers`, `--autostart`, `--autoquit`. Do not set them manually. + +### What resource precedence applies? + +The operator applies resources in this order (first non-empty value wins): + +1. **CR spec resources** (highest precedence): Set in `LocustTest.spec.master.resources` or `LocustTest.spec.worker.resources` +2. **Helm role-specific resources**: Set in `values.yaml` as `locustPods.masterResources` or `locustPods.workerResources` +3. **Helm unified resources**: Set in `values.yaml` as `locustPods.resources` + +This allows global defaults with role-specific overrides and per-test customization. + +## Observability + +### Should I use OpenTelemetry or the metrics sidecar? + +**Use OpenTelemetry for new deployments.** It provides traces and metrics without requiring a sidecar container, reducing resource overhead. + +The metrics sidecar is maintained for legacy compatibility. Use it only if: + +- Your monitoring stack doesn't support OTLP +- You have existing dashboards built on the CSV metrics format + +See [Advanced Topics - OpenTelemetry Integration](how-to-guides/observability/configure-opentelemetry.md) for configuration details. + +### How do I monitor test progress programmatically? + +Use the LocustTest status conditions for automation: + +```bash +# Check if test is ready +kubectl get locusttest my-test -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + +# Get current phase +kubectl get locusttest my-test -o jsonpath='{.status.phase}' + +# Get worker count +kubectl get locusttest my-test -o jsonpath='{.status.connectedWorkers}/{.status.expectedWorkers}' +``` + +See [API Reference - Status Fields](api_reference.md#status-fields) for all available status information. diff --git a/docs/features.md b/docs/features.md index a68c19a8..f92ef94b 100644 --- a/docs/features.md +++ b/docs/features.md @@ -11,31 +11,41 @@ tags: # Features +Everything the Locust Kubernetes Operator can do. Click any feature to learn how. +
-- :material-cloud-check: **Cloud Native & Kubernetes Integration** +- :material-cloud-check: **Cloud Native & Kubernetes Integration** --- Leverage the full power of Kubernetes for distributed performance testing. The operator is designed to be cloud-native, enabling seamless deployment and scaling on any Kubernetes cluster. -- :material-robot-happy: **Automation & CI/CD** + [:octicons-arrow-right-24: How it works](how_does_it_work.md) + +- :material-robot-happy: **Automation & CI/CD** --- Integrate performance testing directly into your CI/CD pipelines. Automate the deployment, execution, and teardown of your Locust tests for continuous performance validation. -- :material-shield-check: **Governance & Resource Management** + [:octicons-arrow-right-24: CI/CD integration tutorial](tutorials/ci-cd-integration.md) + +- :material-shield-check: **Governance & Resource Management** --- Maintain control over how resources are deployed and used. Configure resource requests and limits for Locust master and worker pods, and even disable CPU limits for performance-sensitive tests. -- :material-chart-bar: **Observability & Monitoring** + [:octicons-arrow-right-24: Configure resources](how-to-guides/configuration/configure-resources.md) + +- :material-chart-bar: **Observability & Monitoring** --- - Gain insights into test results and infrastructure usage. The operator supports Prometheus metrics out-of-the-box, allowing you to build rich monitoring dashboards. + Gain insights into test results and infrastructure usage. The operator supports Prometheus metrics out-of-the-box and native OpenTelemetry integration. + + [:octicons-arrow-right-24: Configure OpenTelemetry](how-to-guides/observability/configure-opentelemetry.md) Β· [:octicons-arrow-right-24: Metrics reference](metrics_and_dashboards.md) - :material-scale-balance: **Cost Optimization** @@ -43,18 +53,24 @@ tags: Optimize cloud costs by deploying resources only when needed and for as long as needed. The operator's automatic cleanup feature ensures that resources are terminated after a test run. + [:octicons-arrow-right-24: Configure TTL](how-to-guides/configuration/configure-ttl.md) + - :material-layers-triple: **Test Isolation & Parallelism** --- Run multiple tests in parallel with guaranteed isolation. Each test runs in its own set of resources, preventing any cross-test interference. + [:octicons-arrow-right-24: How it works](how_does_it_work.md) + - :material-docker: **Private Image Registry Support** --- Use images from private registries for your Locust tests. The operator supports `imagePullSecrets` and configurable `imagePullPolicy`. + [:octicons-arrow-right-24: Use private registry](how-to-guides/configuration/use-private-registry.md) + - :material-folder-multiple: **Lib ConfigMap Support** --- @@ -67,11 +83,54 @@ tags: Control where your Locust pods are scheduled using Kubernetes affinity and taint tolerations. This allows you to run tests on dedicated nodes or in specific availability zones. + [:octicons-arrow-right-24: Use node affinity](how-to-guides/scaling/use-node-affinity.md) Β· [:octicons-arrow-right-24: Configure tolerations](how-to-guides/scaling/configure-tolerations.md) Β· [:octicons-arrow-right-24: Use node selector](how-to-guides/scaling/use-node-selector.md) + - :material-apache-kafka: **Kafka & AWS MSK Integration** --- Seamlessly integrate with Kafka and AWS MSK for performance testing of event-driven architectures. The operator provides out-of-the-box support for authenticated Kafka. -
+ [:octicons-arrow-right-24: Configure Kafka](how-to-guides/configuration/configure-kafka.md) + +- :material-chart-timeline: **Native OpenTelemetry Support** + + --- + + Export traces and metrics directly from Locust using native OpenTelemetry integration. No sidecar requiredβ€”configure endpoints, protocols, and custom attributes directly in your CR. + + [:octicons-arrow-right-24: Configure OpenTelemetry](how-to-guides/observability/configure-opentelemetry.md) + +- :material-key-variant: **Secret & ConfigMap Injection** + + --- + Securely inject credentials, API keys, and configuration from Kubernetes Secrets and ConfigMaps. Supports environment variables and file mounts with automatic prefix handling. + + [:octicons-arrow-right-24: Inject secrets](how-to-guides/security/inject-secrets.md) + +- :material-harddisk: **Flexible Volume Mounting** + + --- + + Mount test data, certificates, and configuration files from PersistentVolumes, ConfigMaps, or Secrets. Target specific components (master, worker, or both) with fine-grained control. + + [:octicons-arrow-right-24: Mount volumes](how-to-guides/configuration/mount-volumes.md) + +- :material-tune-vertical: **Separate Resource Specs** + + --- + + Configure resources, labels, and annotations independently for master and worker pods. Optimize each component based on its specific needs. + + [:octicons-arrow-right-24: API Reference](api_reference.md) + +- :material-list-status: **Enhanced Status Tracking** + + --- + + Monitor test progress with rich status information including phase (Pending, Running, Succeeded, Failed), Kubernetes conditions, and worker connection status. + + [:octicons-arrow-right-24: Monitor test status](how-to-guides/observability/monitor-test-status.md) Β· [:octicons-arrow-right-24: API Reference](api_reference.md#status-fields) + + diff --git a/docs/getting_started.md b/docs/getting_started.md deleted file mode 100644 index ea101865..00000000 --- a/docs/getting_started.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Getting Started -description: How to get started using Locust Kubernetes operator -tags: - - tutorial - - getting started - - setup - - guide - - quickstart ---- - -# Getting started - -Only few simple steps are needed to get a test up and running in the cluster. The following is a step-by-step guide on how to achieve this. - -### :material-language-python: Step 1: Write a valid Locust test script - -For this example, we will be using the following script - -```python title="demo_test.py" -from locust import HttpUser, task - -class User(HttpUser): # (1)! - @task #(2)! - def get_employees(self) -> None: - """Get a list of employees.""" - self.client.get("/api/v1/employees") #(3)! -``` - -1. Class representing `users` that will be simulated by Locust. -2. One or more `task` that each simulated `user` will be performing. -3. HTTP call to a specific endpoint. - -!!! note - - To be able to run performance tests effectivly, an understanding of _Locust_ which is the underline load generation tool is required. All tests must be valid _locust_ tests. - - _Locust_ provide a very good and detail rich documentation that can be [found here](https://docs.locust.io/en/stable/quickstart.html). - -### :material-file-document-outline: Step 2: Write a valid custom resource for _LocustTest_ CRD - -A simple _custom resource_ for the previous test can be something like the following example; - -> To streamline this step, [_intensive-brew_](https://abdelrhmanhamouda.github.io/intensive-brew/) should be used. It is a simple cli tool that converts a declarative yaml into a compatible LocustTest kubernetes custom resource. - -```yaml title="locusttest-cr.yaml" -apiVersion: locust.io/v1 # (1)! -kind: LocustTest # (2)! -metadata: - name: demo.test # (3)! -spec: - image: locustio/locust:latest # (4)! - masterCommandSeed: # (5)! - --locustfile /lotest/src/demo_test.py - --host https://dummy.restapiexample.com - --users 100 - --spawn-rate 3 - --run-time 3m - workerCommandSeed: --locustfile /lotest/src/demo_test.py # (6)! - workerReplicas: 3 # (7)! - configMap: demo-test-map # (8)! - libConfigMap: demo-lib-map # (9)! -``` - -1. API version based on the deployed _LocustTest_ CRD. -2. Resource kind. -3. The name field used by the operator to infer the names of test generated resources. While this value is insignificant to the Operator - itself, it is important to keep a good convention here since it helps in tracking resources across the cluster when needed. -4. Image to use for the load generation pods -5. Seed command for the _master_ node. The _Operator_ will append to this seed command/s all operational parameters needed for the _master_ - to perform its job e.g. ports, rebalancing settings, timeouts, etc... -6. Seed command for the _worker_ node. The _Operator_ will append to this seed command/s all operational parameters needed for the _worker_ - to perform its job e.g. ports, master node url, master node ports, etc... -7. The amount of _worker_ nodes to spawn in the cluster. -8. [Optional] Name of _configMap_ to mount into the pod -9. [Optional] Name of _configMap_ containing lib directory files to mount at `/opt/locust/lib` - -#### Other options - -##### Labels and annotations - -You can add labels and annotations to generated Pods. For example: - -```yaml title="locusttest-cr.yaml" -apiVersion: locust.io/v1 -... -spec: - image: locustio/locust:latest - labels: # (1)! - master: - locust.io/role: "master" - myapp.com/testId: "abc-123" - myapp.com/tenantId: "xyz-789" - worker: - locust.io/role: "worker" - annotations: # (2)! - master: - myapp.com/threads: "1000" - myapp.com/version: "2.1.0" - worker: - myapp.com/version: "2.1.0" - ... -``` - -1. [Optional] Labels are attached to both master and worker pods. They can later be used to identify pods belonging to a particular execution context. This is useful, for example, when tests are deployed programmatically. A launcher application can query the Kubernetes API for specific resources. -2. [Optional] Annotations too are attached to master and worker pods. They can be used to include additional context about a test. For example, configuration parameters of the software system being tested. - -Both labels and annotations can be added to the Prometheus configuration, so that metrics are associated with the appropriate information, such as the test and tenant ids. You can read more about this in the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) site. - -### :material-rocket-launch-outline: Step 3: Deploy _Locust k8s Operator_ in the cluster. - -The recommended way to install the _Operator_ is by using the official HELM chart. Documentation on how to perform that -is [available here](helm_deploy.md). - -### :material-cogs: Step 4: Deploy test as a configMap - -For the purposes of this example, the `demo_test.py` test previously demonstrated will be deployed into the cluster as a _configMap_ that -the _Operator_ will mount to the load generation pods. -To deploy the test as a configMap, run the bellow command following this -template `kubectl create configmap --from-file `: - -- `kubectl create configmap demo-test-map --from-file demo_test.py` - -#### :material-folder-multiple: Step 4.1: Deploy lib files as a configMap (Optional) - -!!! info "What are lib files and why use this feature?" - **Lib files** are Python modules and libraries that your Locust tests depend on. When your tests require custom helper functions, utilities, or shared code that should be available across multiple test files, this feature allows you to package and deploy them alongside your tests. - -##### How it works - -The Locust Kubernetes Operator provides a mechanism to deploy your custom Python libraries as a ConfigMap, which will then be mounted to the `/opt/locust/lib` directory inside all Locust pods (both master and worker). This allows your test scripts to import and use these libraries. - -For example, if you have the following structure: - -``` -project/ -β”œβ”€β”€ my_test.py # Your main Locust test file -└── lib/ - β”œβ”€β”€ helpers.py # Helper functions - β”œβ”€β”€ utils.py # Utility functions - └── models.py # Data models -``` - -Your test might import these libraries like this: - -```python -# in my_test.py -from lib.helpers import some_helper_function -from lib.utils import format_data -``` - -To make these imports work when your test runs in Kubernetes, you need to: - -1. Deploy your lib files as a ConfigMap -2. Reference this ConfigMap in your LocustTest custom resource - -##### Step-by-step instructions - -**1. Create a ConfigMap from your lib directory:** - -You can deploy all library files from a directory: - -```bash -# Deploy all files from the lib/ directory as a ConfigMap -kubectl create configmap demo-lib-map --from-file=lib/ -``` - -Alternatively, you can create it from individual files: - -```bash -# Deploy specific files as a ConfigMap -kubectl create configmap demo-lib-map --from-file=lib/helpers.py --from-file=lib/utils.py -``` - -**2. Reference the lib ConfigMap in your LocustTest custom resource:** - -```yaml -apiVersion: locust.io/v1 -kind: LocustTest -metadata: - name: example-locusttest -spec: - masterConfig: - replicas: 1 - workerConfig: - replicas: 2 - configMap: demo-test-map # Your test script ConfigMap - libConfigMap: demo-lib-map # Your lib files ConfigMap -``` - -!!! tip "Organizing your code" - This feature is especially useful when: - - 1. You have complex test scenarios that benefit from modular code - 2. You want to share code between multiple Locust tests - 3. You need to keep your test scripts clean by separating implementation details - -!!! note "Fresh cluster resources" - - Fresh cluster resources are allocated for each running test, meaning that tests **DO NOT** have any cross impact on each other. - -### :material-play-circle-outline: Step 5: Start the test by deploying the _LocustTest_ custom resource. - -Deploying a _custom resource_, signals to the _Operator_ the desire to start a test and thus the _Operator_ starts creating and scheduling -all needed resources. -To do that, deploy the custom resource following this template `kubectl apply -f .yaml`: - -- `kubectl apply -f locusttest-cr.yaml` - -#### Step 5.1: Check cluster for running resources - -At this point, it is possible to check the cluster and all required resources will be running based on the passed configuration in the -custom resource. - -The Operator will create the following resources in the cluster for each valid custom resource: - -- A kubernetes _service_ for the _master_ node so it is reachable by other _worker_ nodes. -- A kubernetes _Job_ to manage the _master_ node. -- A kubernetes _Job_ to manage the _worker_ node. - -### :material-delete-outline: Step 6: Clear resources after test run - -In order to remove the cluster resources after a test run, simply remove the custom resource and the _Operator_ will react to this event by -cleaning the cluster of all **related** resources. -To delete a resource, run the below command following this template `kubectl delete -f .yaml`: - -- `kubectl delete -f locusttest-cr.yaml` diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md new file mode 100644 index 00000000..cd63b1a0 --- /dev/null +++ b/docs/getting_started/index.md @@ -0,0 +1,119 @@ +--- +title: Quick Start +description: Get your first distributed load test running on Kubernetes in 5 minutes +tags: + - quickstart + - tutorial + - getting started +--- + +# Quick Start (5 minutes) + +Get your first distributed load test running on Kubernetes. + +## Prerequisites + +- Kubernetes cluster (any: Minikube, Kind, GKE, EKS, AKS) +- kubectl configured +- Helm 3 installed + +## 1. Install the operator + +```bash +# Add the Helm repository +helm repo add locust-k8s-operator https://abdelrhmanhamouda.github.io/locust-k8s-operator/ +helm repo update + +# Install the operator into a dedicated namespace +helm install locust-operator locust-k8s-operator/locust-k8s-operator \ + --namespace locust-system \ + --create-namespace +``` + +*Installs the operator into a dedicated namespace. Takes ~30 seconds.* + +## 2. Create a test script + +```bash +cat > demo_test.py << 'EOF' +from locust import HttpUser, task + +class DemoUser(HttpUser): + @task # Define a task that users will execute + def get_homepage(self): + # Simple test that requests the homepage repeatedly + self.client.get("/") +EOF +``` + +*Simple test that requests the homepage repeatedly.* + +## 3. Deploy the test as ConfigMap + +```bash +# Make your test script available to Kubernetes pods +kubectl create configmap demo-test --from-file=demo_test.py +``` + +*Makes your test script available to Kubernetes pods.* + +## 4. Run the load test + +```bash +kubectl apply -f - <5000 RPS) +- Benchmarking scenarios where you need maximum performance +- Tests with bursty traffic patterns + +**Risk:** Pods can consume all available CPU on the node, potentially affecting other workloads. Use with [node affinity](../scaling/use-node-affinity.md) to isolate tests on dedicated nodes. + +## Resource sizing guidelines + +**Master pod:** + +- CPU: 100-500m (master coordinates, doesn't generate load) +- Memory: 256-512Mi (depends on test complexity and UI usage) +- Usually 1 replica + +**Worker pod:** + +- CPU: 500-1000m per worker (depends on test script complexity) +- Memory: 512Mi-1Gi per worker (depends on data handling) +- Scale workers based on user count (see [Scale worker replicas](../scaling/scale-workers.md)) + +**Example sizing for 1000 users:** + +```yaml +master: + resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "512Mi" + cpu: "500m" + +worker: + replicas: 20 # ~50 users per worker + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + # CPU limit omitted for performance +``` + +## Verify resource configuration + +Check actual resource specs on running pods: + +```bash +# Get master pod name +MASTER_POD=$(kubectl get pod -l performance-test-pod-name=resource-optimized-test-master -o jsonpath='{.items[0].metadata.name}') + +# Verify resource configuration +kubectl describe pod $MASTER_POD | grep -A 10 "Limits:\|Requests:" +``` + +Expected output: + +``` +Limits: + memory: 512Mi +Requests: + cpu: 100m + memory: 256Mi +``` + +## Monitor resource usage + +Check actual resource consumption: + +```bash +# Real-time resource usage +kubectl top pod -l performance-test-name=resource-optimized-test +``` + +If pods consistently hit memory limits, they'll be OOMKilled. If they hit CPU limits, they'll be throttled (slower performance). + +## What's next + +- **[Scale worker replicas](../scaling/scale-workers.md)** β€” Calculate worker count for high-load scenarios +- **[Use node affinity](../scaling/use-node-affinity.md)** β€” Run resource-intensive tests on dedicated nodes +- **[Configure tolerations](../scaling/configure-tolerations.md)** β€” Schedule tests on high-performance node pools diff --git a/docs/how-to-guides/configuration/configure-ttl.md b/docs/how-to-guides/configuration/configure-ttl.md new file mode 100644 index 00000000..deccc8d4 --- /dev/null +++ b/docs/how-to-guides/configuration/configure-ttl.md @@ -0,0 +1,217 @@ +--- +title: Configure automatic cleanup with TTL +description: Automatically remove finished jobs and pods after tests complete +tags: + - configuration + - cleanup + - ttl + - automation +--- + +# Configure automatic cleanup with TTL + +Automatically clean up finished master and worker jobs and their pods after tests complete using Kubernetes TTL (time-to-live). + +## Prerequisites + +- Kubernetes 1.12+ (TTL controller support) +- Locust Kubernetes Operator installed + +## What gets cleaned up + +When TTL is configured: + +- βœ“ **Cleaned up:** Master and worker Jobs and their Pods +- βœ— **Kept:** LocustTest CR, ConfigMaps, Secrets, Services + +This allows you to review test results (via `kubectl get locusttest`) while automatically removing resource-consuming pods. + +## Set TTL via Helm values + +Configure TTL for all tests during operator installation: + +```yaml +# values.yaml +locustPods: + ttlSecondsAfterFinished: 3600 # Clean up after 1 hour +``` + +Install or upgrade the operator: + +```bash +helm upgrade --install locust-operator locust-k8s-operator/locust-k8s-operator \ + --namespace locust-system \ + -f values.yaml +``` + +All LocustTest resources will inherit this TTL value. + +## Set TTL via CLI + +Override TTL at installation time: + +```bash +helm install locust-operator locust-k8s-operator/locust-k8s-operator \ + --namespace locust-system \ + --set locustPods.ttlSecondsAfterFinished=7200 # 2 hours +``` + +## Common TTL values + +| Value | Duration | Use case | +|-------|----------|----------| +| `0` | Immediate | Use with caution -- risk of race condition with log collection | +| `300` | 5 minutes | Quick tests where results are exported immediately | +| `3600` | 1 hour | Standard tests with manual result review | +| `7200` | 2 hours | Long tests with delayed result analysis | +| `86400` | 24 hours | Tests requiring extensive post-analysis | +| `""` (empty) | Never | Development or when using external cleanup | + +## Disable TTL + +To disable automatic cleanup: + +```yaml +# values.yaml +locustPods: + ttlSecondsAfterFinished: "" # Empty string disables TTL +``` + +Or omit the field entirely: + +```yaml +# values.yaml +locustPods: + # ttlSecondsAfterFinished not set - no TTL +``` + +Without TTL, jobs and pods persist until manually deleted. + +## Verify TTL configuration + +Check that TTL is set on created jobs: + +```bash +# Run a test +kubectl apply -f locusttest.yaml + +# Check master job TTL +kubectl get job my-test-master -o yaml | grep ttlSecondsAfterFinished +``` + +Expected output: + +```yaml +ttlSecondsAfterFinished: 3600 +``` + +## Watch automatic cleanup + +Monitor cleanup in action: + +```bash +# List all jobs with timestamps +kubectl get jobs -o wide --watch + +# After TTL expires, jobs transition to deleted +``` + +Verify cleanup occurred: + +```bash +# Jobs should be gone after TTL +kubectl get jobs -l performance-test-name=my-test + +# Pods should also be gone +kubectl get pods -l performance-test-name=my-test + +# But CR still exists +kubectl get locusttest my-test +``` + +## Example: CI/CD with automatic cleanup + +For CI/CD pipelines, use a short TTL to allow time for log collection: + +```yaml +# values.yaml +locustPods: + ttlSecondsAfterFinished: 60 # Clean up 60 seconds after completion +``` + +!!! warning "Avoid `ttlSecondsAfterFinished: 0` in CI/CD" + Setting TTL to `0` creates a race condition: Kubernetes may delete the job and its + pods before your pipeline can collect logs with `kubectl logs`. Use at least `60` + seconds to give your pipeline time to retrieve results. + +In your pipeline: + +```bash +# Run the test +kubectl apply -f locusttest.yaml + +# Wait for completion +kubectl wait --for=jsonpath='{.status.phase}'=Succeeded \ + locusttest/ci-test --timeout=10m + +# Collect results within the 60-second TTL window +kubectl logs job/ci-test-master > results.log + +# Jobs and pods will be cleaned up after the TTL expires +# CR persists for historical tracking +``` + +## Example: Development with manual cleanup + +During development, disable TTL to inspect pods: + +```yaml +# values.yaml +locustPods: + ttlSecondsAfterFinished: "" # Disable automatic cleanup +``` + +Clean up manually when done: + +```bash +# Delete just the test +kubectl delete locusttest my-test + +# Or delete all test resources +kubectl delete locusttest --all +``` + +## Backward compatibility + +The operator maintains backward compatibility with the old configuration path: + +```yaml +# Old path (still supported) +config: + loadGenerationJobs: + ttlSecondsAfterFinished: 3600 + +# New path (recommended) +locustPods: + ttlSecondsAfterFinished: 3600 +``` + +Helper functions in the Helm chart ensure both paths work. Use the new path for future configurations. + +## How TTL works + +Kubernetes TTL controller monitors finished jobs: + +1. Test completes (phase: Succeeded or Failed) +2. Job transitions to finished state +3. TTL countdown starts +4. After TTL seconds, controller deletes job +5. Cascading deletion removes dependent pods + +**Important:** TTL countdown starts when the job finishes, not when it starts. + +## What's next + +- **[Scale worker replicas](../scaling/scale-workers.md)** β€” Size tests appropriately to minimize wasted resources +- **[Configure resources](configure-resources.md)** β€” Set resource limits to prevent cluster exhaustion +- **[Configure OpenTelemetry](../observability/configure-opentelemetry.md)** β€” Export metrics before cleanup diff --git a/docs/how-to-guides/configuration/mount-volumes.md b/docs/how-to-guides/configuration/mount-volumes.md new file mode 100644 index 00000000..82aa5753 --- /dev/null +++ b/docs/how-to-guides/configuration/mount-volumes.md @@ -0,0 +1,332 @@ +--- +title: Mount volumes to test pods +description: Attach data, certificates, or configuration files from various sources +tags: + - configuration + - volumes + - storage +--- + +# Mount volumes to test pods + +Mount test data, TLS certificates, or configuration files into Locust pods from PersistentVolumes, ConfigMaps, Secrets, or EmptyDir. + +!!! info "v2 API only" + Volume mounting is only available in the v2 API. + +## Prerequisites + +- Locust Kubernetes Operator v2.0+ installed +- Volume source created (PVC, ConfigMap, or Secret) + +## Mount a PersistentVolumeClaim + +Use a PVC to share large test data files across pods: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: pvc-volume-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + volumes: # Define the volume + - name: test-data + persistentVolumeClaim: + claimName: test-data-pvc # Must exist in same namespace + volumeMounts: # Mount into pods + - name: test-data + mountPath: /data # Access files at /data in containers + target: both # Mount to both master and worker pods +``` + +Create the PVC first: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-data-pvc +spec: + accessModes: + - ReadOnlyMany # Multiple pods can read + resources: + requests: + storage: 10Gi +``` + +!!! warning "StorageClass compatibility" + Not all StorageClasses support `ReadOnlyMany` (ROX) access mode. Check your cluster's StorageClass documentation to confirm ROX support before using this access mode. + +Apply both: + +```bash +kubectl apply -f pvc.yaml +kubectl apply -f locusttest-pvc.yaml +``` + +## Mount a ConfigMap + +Mount configuration files from a ConfigMap: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: configmap-volume-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + volumes: + - name: config-files + configMap: + name: app-config # ConfigMap containing config files + volumeMounts: + - name: config-files + mountPath: /config # Files appear at /config/key1, /config/key2, etc. + target: both +``` + +Create the ConfigMap: + +```bash +kubectl create configmap app-config \ + --from-file=config.json \ + --from-file=settings.yaml +``` + +Your test script can read files: + +```python +import json + +# Read config from mounted volume +with open('/config/config.json') as f: + config = json.load(f) +``` + +## Mount a Secret + +Mount TLS certificates or API keys as files: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: secret-volume-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + volumes: + - name: tls-certs + secret: + secretName: tls-secret + volumeMounts: + - name: tls-certs + mountPath: /certs + readOnly: true # Best practice for secrets + target: both +``` + +Create the secret: + +```bash +kubectl create secret generic tls-secret \ + --from-file=tls.crt=cert.pem \ + --from-file=tls.key=key.pem +``` + +Use in test: + +```python +import requests + +# Use client certificates +response = requests.get( + 'https://api.example.com', + cert=('/certs/tls.crt', '/certs/tls.key') +) +``` + +## Use EmptyDir for temporary storage + +Create temporary storage available within a pod (shared between containers in the same pod, but not across pods): + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: emptydir-volume-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + volumes: + - name: cache + emptyDir: {} # Created when pod starts, deleted when pod stops + volumeMounts: + - name: cache + mountPath: /tmp/cache + target: worker # Only workers need cache +``` + +**Use cases for EmptyDir:** + +- Temporary file processing +- Download cache +- Scratch space for generated data + +**Note:** EmptyDir is pod-specific. Each worker pod has its own EmptyDir, not shared across pods. + +## Target specific pod types + +Control which pods receive the volume mount: + +```yaml +volumeMounts: + - name: test-data + mountPath: /data + target: master # Options: master, worker, both (default) +``` + +| Target | Master | Worker | Use case | +|--------|--------|--------|----------| +| `master` | βœ“ | βœ— | Master-only processing or UI data | +| `worker` | βœ— | βœ“ | Worker-specific data or libraries | +| `both` (default) | βœ“ | βœ“ | Shared test data or configuration | + +**Example with different targets:** + +```yaml +volumes: + - name: shared-data + persistentVolumeClaim: + claimName: shared-pvc + - name: worker-cache + emptyDir: {} + +volumeMounts: + - name: shared-data + mountPath: /data + target: both # Both master and workers read test data + - name: worker-cache + mountPath: /cache + target: worker # Only workers need cache space +``` + +## Reserved mount paths + +The following paths are reserved and cannot be used for volume mounts: + +| Path | Purpose | Customizable | +|------|---------|--------------| +| `/lotest/src` | Test script mount point | Yes, via `testFiles.srcMountPath` | +| `/opt/locust/lib` | Library mount point | Yes, via `testFiles.libMountPath` | + +If you customize these paths, the custom paths become reserved instead. + +## Reserved volume names + +The following volume name patterns are reserved: + +| Pattern | Purpose | +|---------|---------| +| `-master` | Master ConfigMap volume | +| `-worker` | Worker ConfigMap volume | +| `locust-lib` | Library ConfigMap volume | +| `secret-*` | Secret volumes from `env.secretMounts` | + +Choose different names for your volumes to avoid conflicts. + +## Verify volume mount + +Check that volumes are mounted correctly: + +```bash +# Get a worker pod name +WORKER_POD=$(kubectl get pod -l performance-test-pod-name=-worker -o jsonpath='{.items[0].metadata.name}') + +# Check mount exists +kubectl exec $WORKER_POD -- ls -la /data + +# Verify file contents +kubectl exec $WORKER_POD -- cat /data/test-file.json +``` + +## Full example with multiple volumes + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: multi-volume-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + volumes: + # Large test data on PVC + - name: test-data + persistentVolumeClaim: + claimName: test-data-pvc + # TLS certificates from secret + - name: tls-certs + secret: + secretName: api-certs + # Configuration from ConfigMap + - name: app-config + configMap: + name: test-config + # Temporary cache per worker + - name: cache + emptyDir: {} + volumeMounts: + - name: test-data + mountPath: /data + target: both # All pods read test data + - name: tls-certs + mountPath: /certs + readOnly: true + target: both # All pods use same certs + - name: app-config + mountPath: /config + target: both # All pods read config + - name: cache + mountPath: /tmp/cache + target: worker # Only workers use cache +``` + +## What's next + +- **[Inject secrets](../security/inject-secrets.md)** β€” Pass credentials as environment variables instead of files +- **[Use private registry](use-private-registry.md)** β€” Pull custom images with volume-specific tools +- **[Configure resources](configure-resources.md)** β€” Ensure pods have enough resources for I/O operations diff --git a/docs/how-to-guides/configuration/use-private-registry.md b/docs/how-to-guides/configuration/use-private-registry.md new file mode 100644 index 00000000..e35a40f0 --- /dev/null +++ b/docs/how-to-guides/configuration/use-private-registry.md @@ -0,0 +1,229 @@ +--- +title: Use a private image registry +description: Pull Locust images from private registries with authentication +tags: + - configuration + - images + - security +--- + +# Use a private image registry + +Pull custom Locust images from private container registries like Docker Hub, GitHub Container Registry, or AWS ECR. + +## Prerequisites + +- Registry credentials (username/password or access token) +- Custom Locust image pushed to private registry + +## Create image pull secret + +Store registry credentials in a Kubernetes secret: + +```bash +kubectl create secret docker-registry my-registry-secret \ + --docker-server=ghcr.io \ + --docker-username=myusername \ + --docker-password=ghp_myPersonalAccessToken +``` + +**For specific registries:** + +=== "GitHub Container Registry" + + ```bash + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username=myusername \ + --docker-password=ghp_myPersonalAccessToken + ``` + +=== "Docker Hub" + + ```bash + kubectl create secret docker-registry dockerhub-secret \ + --docker-server=docker.io \ + --docker-username=myusername \ + --docker-password=myAccessToken + ``` + +=== "AWS ECR" + + ```bash + # Get ECR login token (expires after 12 hours) + ECR_TOKEN=$(aws ecr get-login-password --region ) + kubectl create secret docker-registry ecr-secret \ + --docker-server=.dkr.ecr..amazonaws.com \ + --docker-username=AWS \ + --docker-password="${ECR_TOKEN}" + ``` + +=== "Google Container Registry" + + ```bash + # Use JSON key file + kubectl create secret docker-registry gcr-secret \ + --docker-server=gcr.io \ + --docker-username=_json_key \ + --docker-password="$(cat key.json)" + ``` + +!!! note "Namespace" + + The image pull secret must be created in the same namespace as the LocustTest CR that references it. + +Verify the secret exists: + +```bash +kubectl get secret my-registry-secret +``` + +## Reference secret in LocustTest + +Add `imagePullSecrets` to your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: private-registry-test +spec: + image: ghcr.io/mycompany/locust-custom:v1.2.3 # Private image + imagePullSecrets: # Reference the secret + - name: my-registry-secret + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 +``` + +Apply the CR: + +```bash +kubectl apply -f locusttest-private.yaml +``` + +## Configure image pull policy + +Control when Kubernetes pulls the image: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: pull-policy-test +spec: + image: ghcr.io/mycompany/locust-custom:latest + imagePullPolicy: Always # Pull image every time + imagePullSecrets: + - name: my-registry-secret + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 +``` + +**Pull policy options:** + +| Policy | Behavior | When to use | +|--------|----------|-------------| +| `Always` | Pull image on every pod creation | Development with `:latest` tag or frequently updated images | +| `IfNotPresent` | Pull only if not cached locally | Stable versioned images (default for non-`:latest` tags) | +| `Never` | Never pull, use cached image only | Pre-loaded images or air-gapped environments | + +**Recommendation:** Use `Always` with `:latest` tags. Use `IfNotPresent` or omit (default) with version tags like `v1.2.3`. + +## Verify image pull + +Check that pods successfully pulled the image: + +```bash +# Get pod status +kubectl get pods -l performance-test-name=private-registry-test + +# Check image field +kubectl get pod -l performance-test-pod-name=private-registry-test-master -o jsonpath='{.items[0].spec.containers[0].image}' +``` + +Expected output: + +``` +ghcr.io/mycompany/locust-custom:v1.2.3 +``` + +Verify pull policy: + +```bash +kubectl get pod -l performance-test-pod-name=private-registry-test-master -o jsonpath='{.items[0].spec.containers[0].imagePullPolicy}' +``` + +## Troubleshoot ImagePullBackOff + +If pods fail with `ImagePullBackOff`: + +```bash +# Check pod events +kubectl describe pod -l performance-test-name=private-registry-test | grep -A 10 "Events:" +``` + +**Common issues:** + +**Authentication failed:** + +``` +Failed to pull image: unauthorized: authentication required +``` + +Fix: Verify secret credentials are correct. Recreate the secret if needed. + +**Image not found:** + +``` +Failed to pull image: manifest unknown: manifest unknown +``` + +Fix: Verify image name, tag, and registry URL. Check the image exists: + +```bash +# For Docker Hub +docker pull ghcr.io/mycompany/locust-custom:v1.2.3 + +# For AWS ECR +aws ecr describe-images --repository-name locust-custom --region us-east-1 +``` + +**Wrong secret referenced:** + +``` +Couldn't find key .dockerconfigjson in Secret +``` + +Fix: Verify secret name in `imagePullSecrets` matches the created secret: + +```bash +kubectl get secrets | grep registry +``` + +**Network policy blocking registry:** + +``` +Failed to pull image: dial tcp: i/o timeout +``` + +Fix: Check network policies allow egress to the registry: + +```bash +kubectl get networkpolicies +``` + +## What's next + +- **[Mount volumes](mount-volumes.md)** β€” Add test data or certificates to pods +- **[Inject secrets](../security/inject-secrets.md)** β€” Pass API keys and credentials as environment variables +- **[Configure resources](configure-resources.md)** β€” Set CPU and memory limits for custom images diff --git a/docs/how-to-guides/index.md b/docs/how-to-guides/index.md new file mode 100644 index 00000000..c534e7d3 --- /dev/null +++ b/docs/how-to-guides/index.md @@ -0,0 +1,47 @@ +--- +title: How-To Guides +description: Task-oriented recipes for specific goals +tags: + - how-to + - guides + - recipes +--- + +# How-To Guides + +Task-oriented recipes for specific goals. Each guide walks you through a complete solution from start to finish. + +## Configuration + +Set up and configure your load tests: + +- **[Configure resource limits and requests](configuration/configure-resources.md)** β€” Control CPU and memory allocation for master and worker pods +- **[Use a private image registry](configuration/use-private-registry.md)** β€” Pull Locust images from private registries with authentication +- **[Mount volumes to test pods](configuration/mount-volumes.md)** β€” Attach data, certificates, or configuration files from various sources +- **[Configure Kafka and AWS MSK integration](configuration/configure-kafka.md)** β€” Set up authenticated Kafka access for event-driven testing +- **[Configure automatic cleanup with TTL](configuration/configure-ttl.md)** β€” Automatically remove finished jobs and pods after tests complete + +## Observability + +Monitor and measure test performance: + +- **[Configure OpenTelemetry integration](observability/configure-opentelemetry.md)** β€” Export traces and metrics using native OTel support +- **[Monitor test status and health](observability/monitor-test-status.md)** β€” Track test progress, phase transitions, conditions, and pod health +- **[Set up Prometheus monitoring](../metrics_and_dashboards.md)** β€” Collect and visualize test metrics with Prometheus and Grafana + +## Scaling + +Scale tests for high load and optimize resource placement: + +- **[Scale worker replicas for high load](scaling/scale-workers.md)** β€” Size worker replicas based on simulated user count +- **[Use node affinity for dedicated test nodes](scaling/use-node-affinity.md)** β€” Target specific nodes using labels and affinity rules +- **[Configure tolerations for tainted nodes](scaling/configure-tolerations.md)** β€” Schedule pods on nodes with taints +- **[Use node selector for simple node targeting](scaling/use-node-selector.md)** β€” Target nodes using simple label matching + +## Security + +Secure your tests and manage credentials: + +- **[Inject secrets into test pods](security/inject-secrets.md)** β€” Use Kubernetes secrets for API keys, tokens, and credentials +- **[Configure pod security settings](security/configure-pod-security.md)** β€” Set security contexts, RBAC, and network policies for test pods +- **[Secure container registry access](configuration/use-private-registry.md)** β€” Authenticate with private container registries diff --git a/docs/how-to-guides/observability/configure-opentelemetry.md b/docs/how-to-guides/observability/configure-opentelemetry.md new file mode 100644 index 00000000..4fde591b --- /dev/null +++ b/docs/how-to-guides/observability/configure-opentelemetry.md @@ -0,0 +1,207 @@ +--- +title: Configure OpenTelemetry integration +description: Enable native OpenTelemetry support for metrics and traces export from Locust tests +tags: + - observability + - opentelemetry + - metrics + - traces + - monitoring +--- + +# Configure OpenTelemetry integration + +Native OpenTelemetry support in v2 eliminates the need for a metrics exporter sidecar. Your Locust tests can export metrics and traces directly to an OTel Collector. + +## Prerequisites + +You need an OpenTelemetry Collector deployed in your cluster. The collector receives telemetry data from Locust and forwards it to your observability backend (Prometheus, Jaeger, Tempo, etc.). + +## Step 1: Verify OTel Collector endpoint connectivity + +Determine the correct endpoint for your OTel Collector: + +| Scenario | Endpoint Format | Example | +|----------|-----------------|---------| +| Same namespace | `http://:` | `http://otel-collector:4317` | +| Different namespace | `http://.:` | `http://otel-collector.monitoring:4317` | +| External collector | `https://:` | `https://otel.example.com:4317` | + +**Test connectivity** from a debug pod: + +```bash +kubectl run debug --image=busybox --rm -it -- nc -zv otel-collector.monitoring 4317 +``` + +If the connection succeeds, you'll see: `otel-collector.monitoring (10.0.1.5:4317) open` + +## Step 2: Configure in LocustTest CR + +Add the `observability.openTelemetry` block to your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: otel-enabled-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + observability: + openTelemetry: + enabled: true # Enable OTel integration + endpoint: "http://otel-collector.monitoring:4317" # OTel Collector endpoint + protocol: "grpc" # Use gRPC (or "http/protobuf") + insecure: false # Use TLS (set true for dev/testing) + extraEnvVars: + OTEL_SERVICE_NAME: "my-load-test" # Service name in traces + OTEL_RESOURCE_ATTRIBUTES: "environment=staging,team=platform" # Resource attributes +``` + +**Configuration fields explained:** + +- `enabled`: Set to `true` to activate OpenTelemetry integration +- `endpoint`: OTel Collector URL (scheme://hostname:port). Include the scheme (`http://` or `https://`) for compatibility across OTel SDK versions. +- `protocol`: Transport protocol + - `grpc` (recommended, default): Use gRPC transport + - `http/protobuf`: Use HTTP/protobuf transport +- `insecure`: TLS configuration + - `false` (default): Use TLS for secure communication + - `true`: Skip TLS verification (development/testing only) + + !!! note + TLS behavior primarily depends on the endpoint scheme (`http://` vs `https://`). The `OTEL_EXPORTER_OTLP_INSECURE` environment variable is set by the operator but may not be recognized by all OTel SDK implementations (e.g., Python). Use `http://` endpoints for non-TLS connections. +- `extraEnvVars`: Additional OpenTelemetry environment variables + - `OTEL_SERVICE_NAME`: Identifier for this test in traces + - `OTEL_RESOURCE_ATTRIBUTES`: Metadata tags (key=value pairs, comma-separated) + +## Step 3: Deploy and verify + +Apply your LocustTest CR: + +```bash +kubectl apply -f locusttest.yaml +``` + +**Check that OTel environment variables were injected:** + +```bash +kubectl get pod -l performance-test-name=otel-enabled-test -o yaml | grep OTEL_ +``` + +**Expected environment variables:** + +| Variable | Value | Purpose | +|----------|-------|---------| +| `OTEL_TRACES_EXPORTER` | `otlp` | Enable OTLP trace export | +| `OTEL_METRICS_EXPORTER` | `otlp` | Enable OTLP metrics export | +| `OTEL_EXPORTER_OTLP_ENDPOINT` | Your endpoint | Collector address | +| `OTEL_EXPORTER_OTLP_PROTOCOL` | `grpc` or `http/protobuf` | Transport protocol | +| `OTEL_EXPORTER_OTLP_INSECURE` | `true` (if set) | Skip TLS verification | +| `OTEL_SERVICE_NAME` | Your service name | Service identifier | +| `OTEL_RESOURCE_ATTRIBUTES` | Your attributes | Resource metadata | + +## Step 4: Query traces and metrics + +Once your test is running, telemetry flows to your OTel Collector and downstream backends. + +**Prometheus metrics** (if OTel Collector exports to Prometheus): + +!!! note + The exact metric names depend on your OTel Collector pipeline configuration and Locust's OTel instrumentation. The examples below assume the Collector exports to Prometheus with default naming. + +```promql +# Request rate by service +rate(locust_requests_total{service_name="my-load-test"}[1m]) + +# Average response time +avg(locust_response_time_seconds{service_name="my-load-test"}) +``` + +**Jaeger/Tempo traces** (if OTel Collector exports to tracing backend): + +Filter by: +- Service name: `my-load-test` +- Resource attributes: `environment=staging`, `team=platform` + +Look for: +- Request spans showing HTTP calls +- Duration metrics for performance analysis +- Error traces for debugging failures + +## Troubleshooting + +### No traces appearing in backend + +**Check Locust logs for OTel errors:** + +```bash +kubectl logs job/otel-enabled-test-master | grep -i otel +``` + +**Common issues:** + +| Problem | Symptom | Solution | +|---------|---------|----------| +| Wrong endpoint | Connection refused | Verify endpoint with `nc -zv` test | +| TLS mismatch | TLS handshake errors | Set `insecure: true` for testing, or fix TLS certificates | +| Collector not receiving OTLP | No error in logs but no data | Check collector logs and verify protocol matches | +| Network policy blocking | Connection timeouts | Ensure NetworkPolicy allows egress to collector | + +**Check OTel Collector logs:** + +```bash +kubectl logs -n monitoring deployment/otel-collector | grep -i error +``` + +### Performance impact + +OpenTelemetry adds overhead to your test execution: + +- **Overhead:** Generally minimal overhead, varying with telemetry volume, sampling rate, and collector proximity. +- **Network overhead:** Depends on telemetry volume and sampling + +**Recommendations:** + +- **Use sampling** for high-volume tests: + ```yaml + extraEnvVars: + OTEL_TRACES_SAMPLER: "traceidratio" + OTEL_TRACES_SAMPLER_ARG: "0.1" # Sample 10% of traces + ``` +- **Adjust collector resources** if experiencing backpressure +- **Monitor test pods** for resource saturation when OTel is enabled + +## OTel vs Metrics Sidecar comparison + +| Aspect | OpenTelemetry | Metrics Sidecar | +|--------|---------------|-----------------| +| **Traces** | Yes | No | +| **Metrics** | Yes | Yes | +| **Additional containers** | None | 1 sidecar per master pod | +| **Setup complexity** | Requires OTel Collector | Works with Prometheus directly | +| **Resource overhead** | Generally minimal, varies with config | Additional sidecar container | +| **Recommended for** | New deployments, distributed tracing needs | Legacy compatibility, Prometheus-only stacks | +| **v2 API** | Yes | Yes (default when OTel disabled) | +| **v1 API** | No | Yes | + +**When OpenTelemetry is enabled:** + +- The `--otel` flag is automatically added to Locust commands +- The metrics exporter sidecar is NOT deployed +- The metrics port (9646) is excluded from the Service + +**When to use each:** + +- **Use OpenTelemetry** if you need traces, want to reduce container count, or are building a new observability stack +- **Use Metrics Sidecar** if you only need Prometheus metrics, have existing Prometheus infrastructure, or need v1 API compatibility + +## Related guides + +- [Monitor test status and health](monitor-test-status.md) β€” Check test phase, conditions, and pod health +- [Configure resources](../configuration/configure-resources.md) β€” Adjust resource limits for OTel overhead +- [Metrics & Dashboards](../../metrics_and_dashboards.md) β€” Complete observability reference diff --git a/docs/how-to-guides/observability/monitor-test-status.md b/docs/how-to-guides/observability/monitor-test-status.md new file mode 100644 index 00000000..b91b2192 --- /dev/null +++ b/docs/how-to-guides/observability/monitor-test-status.md @@ -0,0 +1,362 @@ +--- +title: Monitor test status and health +description: Track test progress, phase transitions, conditions, and pod health using kubectl +tags: + - observability + - monitoring + - status + - health checks + - ci/cd +--- + +# Monitor test status and health + +The operator reports test status through standard Kubernetes status fields and conditions. This guide shows you how to monitor test execution, detect failures, and integrate with CI/CD pipelines. + +## How the operator reports test status + +The operator updates `.status` on your LocustTest CR throughout its lifecycle: + +- **Phase:** Current state (Pending β†’ Running β†’ Succeeded/Failed) +- **Conditions:** Detailed health indicators (Ready, WorkersConnected, PodsHealthy, etc.) +- **Worker counts:** Expected vs connected workers +- **Timestamps:** Start time and completion time + +## Watch test progress + +**Monitor phase changes in real-time:** + +```bash +kubectl get locusttest my-test -w +``` + +**Expected output:** + +``` +NAME PHASE WORKERS CONNECTED AGE +my-test Pending 5 2s +my-test Pending 5 0 5s +my-test Running 5 3 12s +my-test Running 5 5 18s +my-test Succeeded 5 5 5m32s +``` + +**Output columns explained:** + +| Column | Description | +|--------|-------------| +| NAME | LocustTest resource name | +| PHASE | Current lifecycle phase | +| WORKERS | Requested worker count (from spec) | +| CONNECTED | Active worker pods (approximation from Job status) | +| AGE | Time since CR creation | + +### Phase progression + +Tests move through these phases: + +```mermaid +stateDiagram-v2 + [*] --> Pending: CR created + Pending --> Running: Master pod active + Running --> Succeeded: Test completed (exit 0) + Running --> Failed: Master failed or pods unhealthy + Running --> Failed: Pod health check failed (after grace period) + Pending --> Failed: Pod health check failed +``` + +| Phase | Meaning | What to do | +|-------|---------|------------| +| **Pending** | Resources creating (Service, Jobs), pods scheduling | Wait for resources to schedule. Check events if stuck >2 minutes. | +| **Running** | Master pod active, test executing | Monitor worker connections and test progress. | +| **Succeeded** | Master job completed successfully (exit code 0) | Collect results. CR can be deleted or kept for records. | +| **Failed** | Master job failed or pods unhealthy (after 2-minute grace period) | Check pod logs and events. Delete and recreate to retry. | + +!!! note "Grace period" + The operator waits 2 minutes after pod creation before reporting pod health failures. This prevents false alarms during image pulls and startup. + +## Check status conditions + +Conditions provide detailed health information beyond the phase. + +**View all conditions:** + +```bash +kubectl get locusttest my-test -o jsonpath='{.status.conditions}' | jq . +``` + +**Example output:** + +```json +[ + { + "type": "Ready", + "status": "True", + "reason": "ResourcesCreated", + "message": "All resources created successfully" + }, + { + "type": "WorkersConnected", + "status": "True", + "reason": "AllWorkersConnected", + "message": "5/5 workers connected" + }, + { + "type": "PodsHealthy", + "status": "True", + "reason": "PodsHealthy", + "message": "All pods running normally" + } +] +``` + +### Key condition types + +#### Ready + +Indicates whether test resources were created successfully. + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `ResourcesCreated` | All resources (Service, Jobs) created successfully | +| `False` | `ResourcesCreating` | Resources are being created | +| `False` | `ResourcesFailed` | Test failed, resources in error state | + +#### WorkersConnected + +Tracks worker connection progress. + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `AllWorkersConnected` | All expected workers have active pods | +| `False` | `WaitingForWorkers` | Initial state, waiting for worker pods | +| `False` | `WorkersMissing` | Some workers not yet active (message shows N/M count) | + +!!! note + `connectedWorkers` is an approximation from Job.Status.Active. It may briefly lag behind actual Locust master connections. + +#### PodsHealthy + +Detects pod-level failures (crashes, scheduling issues, image pull errors). + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `PodsHealthy` | All pods running normally | +| `True` | `PodsStarting` | Within 2-minute grace period (not yet checking health) | +| `False` | `ImagePullError` | One or more pods cannot pull container image | +| `False` | `ConfigurationError` | ConfigMap or Secret referenced in CR not found | +| `False` | `SchedulingError` | Pod cannot be scheduled (node affinity, insufficient resources) | +| `False` | `CrashLoopBackOff` | Container repeatedly crashing | +| `False` | `InitializationError` | Init container failed | + +**Check a specific condition:** + +```bash +kubectl get locusttest my-test -o jsonpath='{.status.conditions[?(@.type=="PodsHealthy")]}' +``` + +#### TestCompleted + +Indicates whether the test has finished and the outcome. + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `TestSucceeded` | Test completed successfully (master exited with code 0) | +| `True` | `TestFailed` | Test completed with failure | +| `False` | `TestInProgress` | Test is still running | + +#### SpecDrifted + +Appears when the CR spec is edited after creation, once the test has moved past the Pending phase. + +| Status | Reason | Meaning | +|--------|--------|---------| +| `True` | `SpecChangeIgnored` | Spec was modified after creation. Changes ignored. Delete and recreate to apply. | + +## Detect pod failures + +When `PodsHealthy=False`, the operator detected a problem with test pods. + +**Get condition details:** + +```bash +kubectl describe locusttest my-test +``` + +Look for the `PodsHealthy` condition in the Status section. The message field explains what failed. + +**Failure message format:** + +Messages follow the pattern: `{FailureType}: {N} pod(s) affected [{pod-names}]: {error-detail}` + +**Example failure messages:** + +- `ImagePullError: 1 pod(s) affected [my-test-master-abc12]: ErrImagePull` +- `ConfigurationError: 3 pod(s) affected [my-test-worker-def34, my-test-worker-ghi56, my-test-worker-jkl78]: Secret "api-creds" not found` +- `SchedulingError: 2 pod(s) affected [my-test-worker-mno90, my-test-worker-pqr12]: 0/3 nodes available: insufficient cpu` +- `CrashLoopBackOff: 1 pod(s) affected [my-test-master-stu34]: CrashLoopBackOff` + +**View pod states directly:** + +The operator applies two label selectors to test pods: + +| Label | Selects | Example | +|-------|---------|---------| +| `performance-test-name=` | All pods (master + workers) | `kubectl get pods -l performance-test-name=my-test` | +| `performance-test-pod-name=-` | Specific role (master or worker) | `kubectl get pods -l performance-test-pod-name=my-test-worker` | + +```bash +kubectl get pods -l performance-test-name=my-test +``` + +**Check pod logs for errors:** + +```bash +# Master logs +kubectl logs job/my-test-master + +# Worker logs (first worker pod) +kubectl logs job/my-test-worker --max-log-requests=1 +``` + +### Common failure scenarios + +| Symptom | Likely cause | How to investigate | +|---------|--------------|-------------------| +| Phase stuck in `Pending` | Pods not scheduling | `kubectl describe pod` for scheduling errors | +| `PodsHealthy=False` with `ImagePullError` | Wrong image name or missing imagePullSecret | Check image name in spec, verify secret exists | +| `PodsHealthy=False` with `ConfigurationError` | Missing ConfigMap or Secret | Verify referenced resources exist: `kubectl get configmap,secret` | +| Phase transitions to `Failed` immediately | Master pod crashed on startup | Check master logs for Python errors in locustfile | +| Workers never connect | Network policy or firewall | Verify workers can reach master service on port 5557 | + +## CI/CD integration + +Use `kubectl wait` to block until test completion. The operator follows Kubernetes condition conventions, making it compatible with standard CI/CD tools. + +### GitHub Actions example + +```yaml +name: Load Test +on: + workflow_dispatch: + +jobs: + load-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Apply test + run: kubectl apply -f locusttest.yaml + + - name: Wait for test completion + run: | + kubectl wait locusttest/my-test \ + --for=jsonpath='{.status.phase}'=Succeeded \ + --timeout=30m + + - name: Check result + if: failure() + run: | + echo "Test failed or timed out" + kubectl describe locusttest my-test + kubectl logs -l performance-test-name=my-test --tail=50 + + - name: Cleanup + if: always() + run: kubectl delete locusttest my-test --ignore-not-found +``` + +### Generic shell script + +```bash +#!/bin/bash +set -e + +# Apply test +kubectl apply -f locusttest.yaml + +# Wait for completion (Succeeded or Failed) +echo "Waiting for test to complete..." +while true; do + PHASE=$(kubectl get locusttest my-test -o jsonpath='{.status.phase}' 2>/dev/null) + case "$PHASE" in + Succeeded) + echo "Test passed!" + exit 0 + ;; + Failed) + echo "Test failed!" + kubectl describe locusttest my-test + kubectl logs job/my-test-master --tail=50 + exit 1 + ;; + Pending|Running) + echo "Phase: $PHASE - waiting..." + sleep 10 + ;; + *) + echo "Unknown phase: $PHASE" + sleep 10 + ;; + esac +done +``` + +**Wait patterns:** + +```bash +# Wait for specific phase +kubectl wait locusttest/my-test --for=jsonpath='{.status.phase}'=Succeeded --timeout=30m + +# Wait for condition +kubectl wait locusttest/my-test --for=condition=Ready --timeout=5m + +# Check if test completed (success or failure) +PHASE=$(kubectl get locusttest my-test -o jsonpath='{.status.phase}') +if [ "$PHASE" = "Succeeded" ]; then + echo "Test passed" +elif [ "$PHASE" = "Failed" ]; then + echo "Test failed" + exit 1 +fi +``` + +## Check worker connection progress + +Monitor how many workers have connected to the master: + +```bash +kubectl get locusttest my-test -o jsonpath='{.status.connectedWorkers}/{.status.expectedWorkers}' +``` + +**Example output:** `5/5` (all workers connected) + +**View WorkersConnected condition:** + +```bash +kubectl get locusttest my-test -o jsonpath='{.status.conditions[?(@.type=="WorkersConnected")]}' +``` + +If workers aren't connecting: + +1. **Check worker pod status:** + ```bash + kubectl get pods -l performance-test-pod-name=my-test-worker + ``` + +2. **Verify master service exists:** + ```bash + kubectl get service my-test-master + ``` + +3. **Check worker logs for connection errors:** + ```bash + kubectl logs job/my-test-worker --max-log-requests=1 | grep -i connect + ``` + +## Related guides + +- [Configure OpenTelemetry integration](configure-opentelemetry.md) β€” Export metrics and traces from tests +- [API Reference - Status Fields](../../api_reference.md#status-fields) β€” Complete status field documentation +- [Metrics & Dashboards](../../metrics_and_dashboards.md) β€” Monitor test metrics with Prometheus diff --git a/docs/how-to-guides/scaling/configure-tolerations.md b/docs/how-to-guides/scaling/configure-tolerations.md new file mode 100644 index 00000000..e2049e78 --- /dev/null +++ b/docs/how-to-guides/scaling/configure-tolerations.md @@ -0,0 +1,339 @@ +--- +title: Configure tolerations for tainted nodes +description: Schedule pods on nodes with taints +tags: + - scaling + - scheduling + - tolerations + - taints +--- + +# Configure tolerations for tainted nodes + +Schedule Locust pods on tainted nodes using tolerations, enabling dedicated node pools and preventing other workloads from using test infrastructure. + +## Prerequisites + +- Locust Kubernetes Operator installed +- Access to taint cluster nodes +- **Toleration injection enabled** -- The operator must have the `ENABLE_TAINT_TOLERATIONS_CR_INJECTION` environment variable set to `"true"`. The Helm chart enables this by default via `locustPods.tolerationsInjection: true`. + +## When to use tolerations + +**Common use cases:** + +- **Dedicated node pools:** Reserve nodes exclusively for load testing +- **High-performance nodes:** Prevent regular workloads from consuming resources +- **Spot instances:** Allow tests on spot/preemptible nodes with taints +- **Specialized hardware:** Schedule on GPU or high-memory nodes + +**How taints and tolerations work:** + +1. **Taint nodes:** Mark nodes as special-purpose (e.g., "dedicated=load-testing:NoSchedule") +2. **Add tolerations:** Pods with matching tolerations can be scheduled on tainted nodes +3. **Result:** Only pods with tolerations use the tainted nodes + +## Taint your nodes + +Add taints to nodes you want to dedicate for testing: + +```bash +# Taint a node for load testing +kubectl taint nodes node-1 dedicated=load-testing:NoSchedule + +# Taint multiple nodes +kubectl taint nodes node-2 dedicated=load-testing:NoSchedule +kubectl taint nodes node-3 dedicated=load-testing:NoSchedule + +# Verify taints +kubectl describe node node-1 | grep Taints +``` + +**Taint effects:** + +| Effect | Behavior | +|--------|----------| +| `NoSchedule` | New pods without toleration won't be scheduled | +| `PreferNoSchedule` | Scheduler tries to avoid placing pods here (soft) | +| `NoExecute` | Existing pods without toleration are evicted | + +**Recommendation:** Use `NoSchedule` for dedicated test nodes. + +## Configure tolerations + +Add `scheduling.tolerations` to your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: toleration-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + tolerations: + - key: dedicated + operator: Equal + value: load-testing + effect: NoSchedule +``` + +Apply the configuration: + +```bash +kubectl apply -f locusttest-toleration.yaml +``` + +## Toleration operators + +**Equal operator:** Exact match required + +```yaml +tolerations: + - key: dedicated + operator: Equal + value: load-testing # Must match exactly + effect: NoSchedule +``` + +Matches taint: `dedicated=load-testing:NoSchedule` + +**Exists operator:** Key must exist, value doesn't matter + +```yaml +tolerations: + - key: dedicated + operator: Exists # Any value for key "dedicated" + effect: NoSchedule +``` + +Matches taints: `dedicated=load-testing:NoSchedule`, `dedicated=anything:NoSchedule` + +## Multiple tolerations + +Tolerate multiple taints: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: multi-toleration-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + tolerations: + - key: dedicated + operator: Equal + value: load-testing + effect: NoSchedule + - key: spot-instance + operator: Exists + effect: NoSchedule + - key: high-performance + operator: Equal + value: "true" + effect: PreferNoSchedule +``` + +Pods can be scheduled on nodes with any of these taints. + +## Example: Dedicated node pool + +Complete setup for dedicated load testing nodes: + +**1. Taint the nodes:** + +```bash +kubectl taint nodes node-pool-load-1 workload=load-testing:NoSchedule +kubectl taint nodes node-pool-load-2 workload=load-testing:NoSchedule +kubectl taint nodes node-pool-load-3 workload=load-testing:NoSchedule +``` + +**2. Label the nodes (for affinity):** + +```bash +kubectl label nodes node-pool-load-1 workload-type=load-testing +kubectl label nodes node-pool-load-2 workload-type=load-testing +kubectl label nodes node-pool-load-3 workload-type=load-testing +``` + +**3. Create LocustTest with affinity + tolerations:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: dedicated-pool-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 15 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - load-testing # Target labeled nodes + tolerations: + - key: workload + operator: Equal + value: load-testing + effect: NoSchedule # Tolerate the taint +``` + +**Result:** Pods only run on dedicated nodes, and only these pods can use those nodes. + +## Example: Spot instances + +Run tests on cost-optimized spot/preemptible instances: + +**1. Taint spot nodes:** + +```bash +# Cloud providers often add this taint automatically +kubectl taint nodes spot-node-1 cloud.google.com/gke-preemptible=true:NoSchedule +``` + +**2. Tolerate spot node taints:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: spot-instance-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 20 + scheduling: + tolerations: + - key: cloud.google.com/gke-preemptible # GKE spot instances + operator: Exists + effect: NoSchedule + - key: eks.amazonaws.com/capacityType # EKS does NOT add a spot taint by default + operator: Equal # (unlike GKE). You must configure a custom + value: SPOT # taint on your EKS managed/self-managed + effect: NoSchedule # node groups for this toleration to apply. +``` + +## NoExecute effect + +`NoExecute` evicts running pods without toleration: + +```bash +# Taint with NoExecute +kubectl taint nodes node-1 maintenance=scheduled:NoExecute +``` + +Pods without toleration are immediately evicted. Use for: + +- Scheduled node maintenance +- Emergency capacity reclaim +- Node pool draining + +**Toleration with grace period:** + +```yaml +tolerations: + - key: maintenance + operator: Equal + value: scheduled + effect: NoExecute + tolerationSeconds: 300 # Pod survives 5 minutes, then evicted +``` + +## Verify tolerations and node placement + +Check that pods are scheduled on tainted nodes: + +```bash +# Show pod placement +kubectl get pods -l performance-test-name=toleration-test -o wide + +# Check node taints +kubectl describe node | grep Taints + +# Verify pod tolerations +kubectl get pod -o jsonpath='{.spec.tolerations}' +``` + +## Troubleshoot scheduling failures + +If pods remain `Pending`: + +```bash +kubectl describe pod | grep -A 10 "Events:" +``` + +**Common issues:** + +**Missing toleration:** + +``` +Warning FailedScheduling 0/3 nodes are available: 3 node(s) had taint {dedicated: load-testing} +``` + +Fix: Add matching toleration to the CR. + +**Typo in taint key or value:** + +```bash +# Check actual taint +kubectl describe node node-1 | grep Taints + +# Output: Taints: dedicated=load-testing:NoSchedule +``` + +Ensure toleration matches exactly (case-sensitive). + +**Wrong effect:** + +Taint: `dedicated=load-testing:NoSchedule` +Toleration: `effect: PreferNoSchedule` (mismatch) + +Fix: Match the effect in toleration to the taint. + +## Remove taints + +When no longer needed: + +```bash +# Remove specific taint +kubectl taint nodes node-1 dedicated=load-testing:NoSchedule- + +# Note the trailing minus (-) to remove +``` + +## What's next + +- **[Use node affinity](use-node-affinity.md)** β€” Target specific nodes (often used together) +- **[Use node selector](use-node-selector.md)** β€” Simpler alternative without taints +- **[Scale worker replicas](scale-workers.md)** β€” Calculate capacity for dedicated pools diff --git a/docs/how-to-guides/scaling/scale-workers.md b/docs/how-to-guides/scaling/scale-workers.md new file mode 100644 index 00000000..80fec61a --- /dev/null +++ b/docs/how-to-guides/scaling/scale-workers.md @@ -0,0 +1,277 @@ +--- +title: Scale worker replicas for high load +description: Size worker replicas based on simulated user count and throughput +tags: + - scaling + - workers + - performance +--- + +# Scale worker replicas for high load + +Calculate and configure worker replicas to handle your target user count and request throughput. + +## Prerequisites + +- Locust Kubernetes Operator installed +- Basic understanding of distributed load testing + +## How worker replicas affect throughput + +Each worker pod generates load independently. More workers = more throughput capacity. + +**Key factors:** + +- **User count:** Each worker can efficiently handle 50-100 simulated users (depends on test complexity) +- **Request rate:** CPU-intensive tests (complex parsing, encryption) need more workers +- **Memory usage:** Tests with large payloads or state need more memory per worker + +## Calculate worker count + +**Formula:** + +``` +workers = ceil(total_users / users_per_worker) +``` + +**Default rule of thumb:** 50 users per worker + +**Examples:** + +| Target Users | Users/Worker | Workers Needed | +|--------------|--------------|----------------| +| 100 | 50 | 2 | +| 500 | 50 | 10 | +| 1000 | 50 | 20 | +| 5000 | 50 | 100 | + +**Adjust users per worker based on test complexity:** + +- **Simple tests** (basic HTTP GET): 100 users/worker +- **Standard tests** (REST API with JSON): 50 users/worker +- **Complex tests** (heavy parsing, encryption, large payloads): 25 users/worker + +## Configure worker replicas + +Set worker count in your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: scaled-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: | + --locustfile /lotest/src/test.py + --host https://api.example.com + --users 1000 # Total simulated users + --spawn-rate 50 # Users to add per second + --run-time 10m + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 20 # 1000 users / 50 users per worker = 20 workers +``` + +!!! note "Replica validation range" + The operator accepts 1 to 500 worker replicas. + +Apply the configuration: + +```bash +kubectl apply -f locusttest-scaled.yaml +``` + +## Example: 1000 users test + +Complete configuration for 1000 concurrent users: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: high-load-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: high-load-test-script + master: + command: | + --locustfile /lotest/src/test.py + --host https://api.example.com + --users 1000 + --spawn-rate 50 + --run-time 15m + resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "512Mi" + cpu: "500m" + worker: + replicas: 20 # 1000 users at 50 users/worker + command: "--locustfile /lotest/src/test.py" + resources: + requests: + memory: "512Mi" # More memory for load generation + cpu: "500m" + limits: + memory: "1Gi" + # CPU limit omitted for maximum performance +``` + +## Resource implications + +Each worker consumes cluster resources: + +**Per-worker resource baseline:** + +- CPU: 500m request, no limit (for performance) +- Memory: 512Mi-1Gi (depends on test data) + +**Total resources for 20 workers:** + +- CPU: 10 cores requested (20 workers Γ— 500m) +- Memory: 10-20Gi (20 workers Γ— 512Mi-1Gi) + +**Planning checklist:** + +- [ ] Cluster has enough capacity for all workers +- [ ] Consider using [node affinity](use-node-affinity.md) to target specific node pools +- [ ] Configure [resource limits](../configuration/configure-resources.md) appropriately +- [ ] Use [tolerations](configure-tolerations.md) if running on dedicated nodes + +## Monitor connected workers + +Verify that all workers connect successfully: + +```bash +# Watch test status +kubectl get locusttest high-load-test -w + +# Check status field +kubectl get locusttest high-load-test -o jsonpath='{.status}' +``` + +Look for: + +```json +{ + "phase": "Running", + "expectedWorkers": 20, + "connectedWorkers": 20, + "conditions": [ ... ] +} +``` + +**If `connectedWorkers` < `expectedWorkers`:** + +```bash +# List worker pods +kubectl get pods -l performance-test-pod-name=high-load-test-worker + +# Check for pending or failed pods +kubectl get pods -l performance-test-pod-name=high-load-test-worker | grep -v Running + +# Describe problematic pods +kubectl describe pod +``` + +Common issues: + +- Insufficient cluster capacity (pending pods) +- Image pull failures +- Resource quota exceeded +- Node selector or affinity constraints not satisfied + +## View worker pod distribution + +Check which nodes are running workers: + +```bash +kubectl get pods -l performance-test-pod-name=high-load-test-worker -o wide +``` + +Output shows pod-to-node distribution: + +``` +NAME NODE STATUS +high-load-test-worker-0 node-pool-1-a Running +high-load-test-worker-1 node-pool-1-b Running +high-load-test-worker-2 node-pool-1-c Running +... +``` + +**Best practice:** Distribute workers across multiple nodes for resilience and better resource utilization. + +## Scaling considerations + +**Spawn rate:** + +Match spawn rate to worker count and network capacity: + +``` +recommended_spawn_rate = workers Γ— 5-10 users/second +``` + +For 20 workers: 100-200 users/second spawn rate is reasonable. + +**Example:** + +```yaml +master: + command: | + --users 1000 + --spawn-rate 100 # 20 workers Γ— 5 users/sec/worker +``` + +Too high spawn rate overwhelms workers during ramp-up. Too low takes too long to reach target. + +**Network bandwidth:** + +High worker counts can saturate network: + +- 20 workers Γ— 100 RPS = 2000 total RPS +- At 10KB per request = 20MB/s bandwidth + +Ensure cluster networking can handle aggregate throughput. + +**Master capacity:** + +Master coordinates all workers. Very high worker counts (>50) may require increased master resources: + +```yaml +master: + resources: + requests: + memory: "512Mi" # Increased from 256Mi + cpu: "500m" # Increased from 200m + limits: + memory: "1Gi" + cpu: "1000m" +``` + +## Adjusting worker count + +Editing the CR spec of a running test does **not** live-update it. To change the worker count, delete the existing test and re-apply the updated manifest: + +```bash +# Delete the running test +kubectl delete locusttest my-locust-test + +# Edit spec.worker.replicas in your manifest, then apply: +kubectl apply -f my-locust-test.yaml +``` + +!!! note "No live scaling" + The operator does not support in-place updates. You must `kubectl delete` the running LocustTest and then `kubectl apply` the updated manifest to change worker replicas or any other spec field. + +## What's next + +- **[Configure resources](../configuration/configure-resources.md)** β€” Set appropriate CPU and memory for workers +- **[Use node affinity](use-node-affinity.md)** β€” Target high-performance nodes for workers +- **[Configure tolerations](configure-tolerations.md)** β€” Run workers on dedicated node pools diff --git a/docs/how-to-guides/scaling/use-node-affinity.md b/docs/how-to-guides/scaling/use-node-affinity.md new file mode 100644 index 00000000..a162f488 --- /dev/null +++ b/docs/how-to-guides/scaling/use-node-affinity.md @@ -0,0 +1,326 @@ +--- +title: Use node affinity for dedicated test nodes +description: Target specific nodes using labels and affinity rules +tags: + - scaling + - scheduling + - node affinity +--- + +# Use node affinity for dedicated test nodes + +Schedule Locust pods on specific nodes using node affinity, enabling dedicated test infrastructure or zone isolation. + +## Prerequisites + +- Locust Kubernetes Operator installed +- Access to label cluster nodes +- Affinity injection enabled via the `ENABLE_AFFINITY_CR_INJECTION` environment variable on the operator (Helm default: `locustPods.affinityInjection: true`) + +## When to use node affinity + +**Common use cases:** + +- **Dedicated nodes:** Run load tests on nodes reserved for testing +- **High-performance nodes:** Target nodes with faster CPUs or more memory +- **Zone isolation:** Keep tests in specific availability zones +- **Cost optimization:** Use spot instances or lower-cost node pools for testing + +**Node affinity vs node selector:** + +- **Node selector:** Simple label matching (use this for basic needs) +- **Node affinity:** Complex rules with OR logic, soft preferences, multiple conditions + +Use node affinity when you need the flexibility. Use [node selector](use-node-selector.md) for simplicity. + +## Label your nodes + +Add labels to nodes where you want to run tests: + +```bash +# Label nodes for load testing +kubectl label nodes node-1 workload-type=load-testing +kubectl label nodes node-2 workload-type=load-testing +kubectl label nodes node-3 workload-type=load-testing + +# Verify labels +kubectl get nodes --show-labels | grep workload-type +``` + +**Example labels:** + +```bash +# By workload type +kubectl label nodes node-1 workload-type=load-testing + +# By performance tier +kubectl label nodes node-2 performance-tier=high + +# By environment +kubectl label nodes node-3 environment=testing + +# By instance type (AWS) +kubectl label nodes node-4 node.kubernetes.io/instance-type=c5.2xlarge +``` + +## Configure node affinity + +Add `scheduling.affinity.nodeAffinity` to your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: affinity-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: # Hard requirement + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - load-testing # Only schedule on nodes with this label +``` + +Apply the configuration: + +```bash +kubectl apply -f locusttest-affinity.yaml +``` + +## Multiple label requirements + +Require multiple labels on nodes (AND logic): + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: multi-label-affinity +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type # Must be load-testing + operator: In + values: + - load-testing + - key: performance-tier # AND must be high-performance + operator: In + values: + - high + - key: environment # AND must be in testing env + operator: In + values: + - testing +``` + +All conditions must be true for a node to be selected. + +## Example: AWS instance type targeting + +Target specific EC2 instance types: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: aws-instance-affinity +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 20 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node.kubernetes.io/instance-type + operator: In + values: + - c5.2xlarge # Compute-optimized instances + - c5.4xlarge +``` + +## Example: Zone isolation + +Keep tests in specific availability zones: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: zone-affinity-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a # Only use nodes in zone 1a +``` + +## Verify node placement + +Check that pods are scheduled on the correct nodes: + +```bash +# Show pod-to-node mapping +kubectl get pods -l performance-test-name=affinity-test -o wide + +# Check specific labels on nodes where pods are running +kubectl get nodes -l workload-type=load-testing +``` + +Expected output showing pods scheduled only on labeled nodes: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +affinity-test-master-abc123 1/1 Running 0 45s 10.244.1.12 node-1 +affinity-test-worker-0-def456 1/1 Running 0 42s 10.244.1.13 node-1 +affinity-test-worker-1-ghi789 1/1 Running 0 42s 10.244.2.8 node-2 +affinity-test-worker-2-jkl012 1/1 Running 0 42s 10.244.3.5 node-3 +``` + +Cross-reference the NODE column against your labeled nodes: + +```bash +kubectl get nodes -l workload-type=load-testing +``` + +## Troubleshoot scheduling failures + +If pods remain in `Pending` state: + +```bash +# Check pod events +kubectl describe pod | grep -A 10 "Events:" +``` + +**Common issue:** + +``` +Warning FailedScheduling No nodes are available that match all of the following predicates: NodeAffinity (3) +``` + +**Causes:** + +1. **No nodes with matching labels:** + + ```bash + # Check labeled nodes exist + kubectl get nodes -l workload-type=load-testing + ``` + + Fix: Label at least one node. + +2. **Insufficient capacity on labeled nodes:** + + ```bash + # Check node resources + kubectl describe nodes -l workload-type=load-testing | grep -A 5 "Allocated resources" + ``` + + Fix: Add more nodes with the label or reduce resource requests. + +3. **Typo in label key or value:** + + Verify label spelling matches exactly: + + ```bash + kubectl get nodes --show-labels | grep workload + ``` + +!!! tip "podAffinity and podAntiAffinity" + + The `scheduling.affinity` field also supports `podAffinity` and `podAntiAffinity` for + inter-pod scheduling rules. See the [production deployment sample](../../samples/production-deployment.md) + for a worked example. + +## Combine with tolerations + +Often used together for dedicated node pools: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: affinity-toleration-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - load-testing + tolerations: + - key: dedicated + operator: Equal + value: load-testing + effect: NoSchedule +``` + +!!! note "Feature flags" + + Tolerations injection requires the `ENABLE_TAINT_TOLERATIONS_CR_INJECTION` environment variable + to be enabled on the operator. + +See [Configure tolerations](configure-tolerations.md) for details. + +## What's next + +- **[Configure tolerations](configure-tolerations.md)** β€” Schedule on tainted nodes (often used together) +- **[Use node selector](use-node-selector.md)** β€” Simpler alternative for basic label matching +- **[Scale worker replicas](scale-workers.md)** β€” Calculate worker count for dedicated nodes diff --git a/docs/how-to-guides/scaling/use-node-selector.md b/docs/how-to-guides/scaling/use-node-selector.md new file mode 100644 index 00000000..4065a9a9 --- /dev/null +++ b/docs/how-to-guides/scaling/use-node-selector.md @@ -0,0 +1,318 @@ +--- +title: Use node selector for simple node targeting +description: Target nodes using simple label matching +tags: + - scaling + - scheduling + - node selector +--- + +# Use node selector for simple node targeting + +Target specific nodes using simple label matching with node selector, the easiest way to control pod placement. + +## Prerequisites + +- Locust Kubernetes Operator installed +- Access to label cluster nodes + +## When to use node selector + +**Use node selector when:** + +- You need simple label matching (key=value) +- All conditions are AND (all labels must match) +- You want the simplest configuration + +**Use node affinity when:** + +- You need OR logic (match any of multiple labels) +- You need soft preferences (preferred but not required) +- You need complex expressions (In, NotIn, Exists, DoesNotExist) + +See [Use node affinity](use-node-affinity.md) for advanced scenarios. + +## Label your nodes + +Add labels to nodes: + +```bash +# Label for SSD storage +kubectl label nodes node-1 disktype=ssd + +# Label for performance environment +kubectl label nodes node-1 environment=performance + +# Label multiple nodes +kubectl label nodes node-2 disktype=ssd environment=performance +kubectl label nodes node-3 disktype=ssd environment=performance + +# Verify labels +kubectl get nodes --show-labels | grep disktype +``` + +## Configure node selector + +Add `scheduling.nodeSelector` to your LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: nodeselector-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + scheduling: + nodeSelector: + disktype: ssd # Only schedule on nodes with this label +``` + +Apply the configuration: + +```bash +kubectl apply -f locusttest-nodeselector.yaml +``` + +## Multiple labels (AND logic) + +Require multiple labels on nodes: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: multi-label-selector +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + nodeSelector: + disktype: ssd # Must have SSD + environment: performance # AND must be performance environment +``` + +Nodes must have **both** labels to be selected. + +## Example: High-performance nodes + +Target high-performance node pool: + +**1. Label your high-performance nodes:** + +```bash +kubectl label nodes perf-node-1 performance-tier=high +kubectl label nodes perf-node-2 performance-tier=high +kubectl label nodes perf-node-3 performance-tier=high +``` + +**2. Configure test to use labeled nodes:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: high-perf-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: performance-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 20 + scheduling: + nodeSelector: + performance-tier: high # Only high-performance nodes +``` + +## Example: AWS instance type targeting + +Target specific EC2 instance types: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: aws-instance-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + nodeSelector: + node.kubernetes.io/instance-type: c5.2xlarge # Compute-optimized +``` + +**Note:** This only matches one instance type. For multiple types, use [node affinity](use-node-affinity.md) with `In` operator. + +## Example: Zone-specific deployment + +Keep tests in a specific availability zone: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: zone-specific-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + nodeSelector: + topology.kubernetes.io/zone: us-east-1a # Only zone 1a +``` + +## Verify node placement + +Check that pods are scheduled on the correct nodes: + +```bash +# Show pod-to-node mapping +kubectl get pods -l performance-test-name= -o wide + +# Check labels on nodes where pods are running +NODE=$(kubectl get pod -l performance-test-pod-name=-master -o jsonpath='{.items[0].spec.nodeName}') +kubectl get node $NODE --show-labels | grep disktype +``` + +Expected: All pods running on nodes with matching labels. + +## Troubleshoot scheduling failures + +If pods remain `Pending`: + +```bash +kubectl describe pod | grep -A 10 "Events:" +``` + +**Common issue:** + +``` +Warning FailedScheduling 0/5 nodes are available: 5 node(s) didn't match Pod's node affinity/selector +``` + +**Causes:** + +1. **No nodes with matching labels:** + + ```bash + # Check if any nodes have the label + kubectl get nodes -l disktype=ssd + ``` + + Fix: Label at least one node. + +2. **Typo in label key or value:** + + ```bash + # Check actual labels + kubectl get nodes --show-labels | grep disktype + ``` + + Ensure spelling and case match exactly. + +3. **Insufficient capacity on labeled nodes:** + + ```bash + # Check node resources + kubectl describe node -l disktype=ssd | grep -A 5 "Allocated resources" + ``` + + Fix: Add more labeled nodes or reduce resource requests. + +## Compare with node affinity + +**Node selector:** + +```yaml +scheduling: + nodeSelector: + disktype: ssd +``` + +**Equivalent node affinity:** + +```yaml +scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: disktype + operator: In + values: + - ssd +``` + +Node selector is simpler. Node affinity is more powerful. + +!!! note "Feature flag" + + Node affinity injection requires the `ENABLE_AFFINITY_CR_INJECTION` environment variable + to be enabled on the operator (Helm default: `locustPods.affinityInjection: true`). + +## Combine with other scheduling + +Node selector works with tolerations: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: selector-toleration-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: my-test + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 10 + scheduling: + nodeSelector: + disktype: ssd # Simple label matching + tolerations: + - key: dedicated + operator: Equal + value: load-testing + effect: NoSchedule # Tolerate taint on SSD nodes +``` + +!!! note "Feature flag" + + Tolerations injection requires the helm value. + +See [Configure tolerations](configure-tolerations.md) for details. + +## What's next + +- **[Use node affinity](use-node-affinity.md)** β€” Complex scheduling with OR logic and preferences +- **[Configure tolerations](configure-tolerations.md)** β€” Schedule on tainted nodes +- **[Scale worker replicas](scale-workers.md)** β€” Calculate capacity for labeled nodes diff --git a/docs/how-to-guides/security/configure-pod-security.md b/docs/how-to-guides/security/configure-pod-security.md new file mode 100644 index 00000000..2a7e1f27 --- /dev/null +++ b/docs/how-to-guides/security/configure-pod-security.md @@ -0,0 +1,343 @@ +--- +title: Configure pod security settings +description: Understand and configure security contexts, RBAC, and network policies for operator and test pods +tags: + - security + - pod security + - rbac + - network policy + - hardening +--- + +# Configure pod security settings + +The operator applies security settings to all test pods by default. This guide explains the default security posture, RBAC requirements, and network isolation options. + +## Default security context + +The operator automatically applies a security context to all Locust test pods (master and worker). The operator meets the **baseline** profile because it does not use any restricted fields (hostNetwork, hostPID, privileged, etc.). The seccomp RuntimeDefault profile is an additional hardening measure toward **restricted** profile compliance. + +### Security settings applied + +```yaml +# Applied to all test pods by default +securityContext: + seccompProfile: + type: RuntimeDefault # Use runtime's default seccomp profile +``` + +**Why this default:** + +- **seccompProfile: RuntimeDefault** β€” Uses the container runtime's default seccomp profile to restrict system calls. + +!!! note "Non-root execution" + The official Locust image (`locustio/locust`) runs as a non-root user by default (UID 1000), but the operator does not explicitly set `runAsNonRoot: true` on the pod security context. If you require enforced non-root execution, see the [restricted profile section](#pod-security-standards-compliance) below. + +### Why NOT readOnlyRootFilesystem + +The operator does NOT set `readOnlyRootFilesystem: true` because: + +- Locust needs to write to `/tmp` for temporary files +- Python pip may need cache directories for plugin installation +- The locustfile may write temporary data during test execution + +If your test doesn't require write access, you can customize the security context (see below). + +## Customizing security context + +The v2 API does **not** expose `securityContext` fields on the LocustTest CR. The test pod security context is hardcoded in the operator (see `internal/resources/job.go`). There is no way to customize it per-test via the CR. + +The `podSecurityContext` and `containerSecurityContext` Helm values apply to the **operator deployment only**, not to test pods. To change the test pod security context, you would need to modify the operator source code. + +## RBAC best practices + +### Operator RBAC + +The operator's service account needs permissions to manage LocustTest resources and create test infrastructure. + +**What the operator needs:** + +| Resource | Verbs | Purpose | +|----------|-------|---------| +| `locusttests` | get, list, watch, update, patch | Watch CRs and reconcile state | +| `locusttests/status` | get, update, patch | Report test status | +| `locusttests/finalizers` | update | Manage deletion lifecycle | +| `configmaps` | get, list, watch | Read test files and library code | +| `secrets` | get, list, watch | Read credentials for env injection | +| `services` | get, list, watch, create, delete | Master service for worker communication | +| `pods` | get, list, watch | Monitor pod health for status reporting | +| `events` | create, patch | Report status changes and errors | +| `jobs` | get, list, watch, create, delete | Master and worker pods (immutable pattern) | + +!!! note "Read-only Secret access" + The operator **never creates or modifies** ConfigMaps or Secrets. It only reads them to populate environment variables and volume mounts in test pods. + +**ClusterRole vs Role:** + +The operator supports two RBAC modes (configured via Helm): + +| Mode | Scope | Use case | +|------|-------|----------| +| ClusterRole (default) | All namespaces | Multi-tenant platform, centralized operator | +| Role | Single namespace | Security-sensitive environments, namespace isolation | + +Configure in Helm values: + +```yaml +# values.yaml +k8s: + clusterRole: + enabled: false # Restrict to operator namespace only +``` + +### Test pod RBAC + +Test pods run as non-root and do **not** get elevated privileges. Test pods use the namespace's default service account. Kubernetes mounts its token automatically. If your cluster does not restrict default service account permissions, consider setting `automountServiceAccountToken: false` on the default service account. + +!!! warning "Least privilege" + Only grant the minimum permissions your test needs. Avoid `cluster-admin` or broad wildcard permissions. + +### User RBAC for test creators + +Users who create and manage LocustTest CRs need different permissions than the operator. + +**Minimal test creator role:** + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: locusttest-creator + namespace: performance-testing +rules: + # Create and manage LocustTest CRs + - apiGroups: ["locust.io"] + resources: ["locusttests"] + verbs: ["get", "list", "watch", "create", "delete"] + + # Create ConfigMaps for test files + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "update", "delete"] + + # View pods for debugging + - apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] + + # View events for status monitoring + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch"] +``` + +**Verify user permissions:** + +```bash +# Check if user can create LocustTest +kubectl auth can-i create locusttests --as jane.doe + +# Check if user can read secrets (should be "no") +kubectl auth can-i get secrets --as jane.doe +``` + +## Network isolation + +Use NetworkPolicies to restrict traffic to/from test pods. + +### Allow only necessary traffic + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: locust-test-isolation + namespace: performance-testing +spec: + podSelector: + matchLabels: + performance-test-name: my-test # Apply to specific test + policyTypes: + - Ingress + - Egress + + ingress: + # Allow communication between pods in the same test + - from: + - podSelector: + matchLabels: + performance-test-name: my-test + ports: + - port: 5557 # Worker -> Master (communication) + protocol: TCP + - port: 5558 # Worker -> Master (data) + protocol: TCP + + egress: + # Allow worker -> master communication + - to: + - podSelector: + matchLabels: + performance-test-name: my-test + ports: + - port: 5557 # Worker -> Master (communication) + protocol: TCP + - port: 5558 # Worker -> Master (data) + protocol: TCP + + # Allow DNS resolution + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + ports: + - port: 53 + protocol: UDP + + # Allow traffic to target system under test + - to: + - podSelector: {} # All pods (adjust as needed) + ports: + - port: 80 + protocol: TCP + - port: 443 + protocol: TCP + + # Allow traffic to OTel Collector (if using OpenTelemetry) + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + - podSelector: + matchLabels: + app: otel-collector + ports: + - port: 4317 # OTLP gRPC + protocol: TCP +``` + +**What this policy allows:** + +- **Ingress:** Only communication between pods in the same test (master ↔ workers) +- **Egress:** DNS, target system (HTTP/HTTPS), OTel Collector + +**What this policy blocks:** + +- Cross-test communication +- External egress except explicitly allowed +- Ingress from outside the test + +### Verification + +**Check if NetworkPolicy is active:** + +```bash +kubectl get networkpolicy -n performance-testing +``` + +**Test connectivity from a worker pod:** + +```bash +# Get a worker pod +POD=$(kubectl get pods -l performance-test-pod-name=my-test-worker -o jsonpath='{.items[0].metadata.name}') + +# Test target system connectivity +kubectl exec $POD -- curl -I https://api.example.com + +# Test master connectivity +kubectl exec $POD -- nc -zv my-test-master 5557 + +# Test blocked traffic (should timeout or fail) +kubectl exec $POD -- curl -I https://blocked-host.com --max-time 5 +``` + +### NetworkPolicy best practices + +1. **Start with allow-all, then restrict:** Test your application first, then add NetworkPolicies gradually. + +2. **Allow DNS:** Always allow egress to `kube-system` namespace port 53 for DNS resolution. + +3. **Test-specific policies:** Use `performance-test-name` label to isolate individual tests. + +4. **Monitor denied traffic:** Use a CNI that logs dropped packets (Calico, Cilium) to identify blocked traffic. + +5. **Document exceptions:** If you must allow broad egress, document why in the NetworkPolicy annotations. + +## Verification + +### Check pod security context + +```bash +# Get pod security context +kubectl get pod -l performance-test-name=my-test -o jsonpath='{.items[0].spec.securityContext}' | jq . +``` + +**Expected output:** + +```json +{ + "seccompProfile": { + "type": "RuntimeDefault" + } +} +``` + +### Verify non-root execution + +```bash +# Check which user the pod runs as +POD=$(kubectl get pods -l performance-test-pod-name=my-test-master -o jsonpath='{.items[0].metadata.name}') +kubectl exec $POD -- id +``` + +**Expected output:** + +``` +uid=1000(locust) gid=1000(locust) groups=1000(locust) +``` + +If you see `uid=0(root)`, the pod is running as root (violation of security policy). + +### Verify RBAC permissions + +```bash +# Check operator service account permissions +# Replace and with your installation's values +kubectl auth can-i --list --as=system:serviceaccount:: + +# Check if test pod has Kubernetes API access (should be "no" by default) +kubectl exec $POD -- curl -k https://kubernetes.default.svc +``` + +Expected: Connection refused or authentication error (test pods should NOT have API access by default). + +## Pod Security Standards compliance + +The operator's default security settings meet these Pod Security Standards profiles: + +| Profile | Compliant | Notes | +|---------|-----------|-------| +| **Baseline** | Yes | No restricted fields used (hostNetwork, hostPID, privileged, etc.) | +| **Restricted** | Partial | Missing: `runAsNonRoot`, `allowPrivilegeEscalation=false`, `capabilities drop ALL`. Seccomp RuntimeDefault is present as a hardening measure. | +| **Privileged** | Yes | No restrictions | + +**To meet "restricted" profile:** + +The following settings would need to be added to the test pod security context. Since the test pod security context is hardcoded in the operator (`internal/resources/job.go`), this requires modifying the operator source code: + +```yaml +securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL +``` + +## Related guides + +- [Inject secrets and configuration](inject-secrets.md) β€” Manage credentials for test pods +- [Security Best Practices](../../security.md) β€” Complete security guide (RBAC, secrets, external integrations) +- [API Reference](../../api_reference.md) β€” LocustTest CR specification diff --git a/docs/how-to-guides/security/inject-secrets.md b/docs/how-to-guides/security/inject-secrets.md new file mode 100644 index 00000000..ab2bc8e8 --- /dev/null +++ b/docs/how-to-guides/security/inject-secrets.md @@ -0,0 +1,364 @@ +--- +title: Inject secrets and configuration into test pods +description: Inject credentials and configuration into Locust test pods using ConfigMaps, Secrets, and environment variables +tags: + - security + - secrets + - environment variables + - configuration +--- + +# Inject secrets and configuration into test pods + +Inject credentials and configuration into Locust test pods without hardcoding them in test files. The operator provides four methods for injecting data. + +## Method 1: ConfigMap environment variables + +Inject all keys from a ConfigMap as environment variables with an optional prefix. + +**Create a ConfigMap:** + +```bash +kubectl create configmap app-config \ + --from-literal=TARGET_HOST=https://api.example.com \ + --from-literal=LOG_LEVEL=INFO \ + --from-literal=TIMEOUT=30 +``` + +**Reference in LocustTest CR:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: configmap-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + env: + configMapRefs: + - name: app-config # ConfigMap name + prefix: "APP_" # Prefix for all keys (optional) +``` + +**Result:** ConfigMap keys become environment variables with the prefix: +- `TARGET_HOST` β†’ `APP_TARGET_HOST` +- `LOG_LEVEL` β†’ `APP_LOG_LEVEL` +- `TIMEOUT` β†’ `APP_TIMEOUT` + +**Access in your locustfile:** + +```python +import os + +target_host = os.getenv('APP_TARGET_HOST') +log_level = os.getenv('APP_LOG_LEVEL', 'INFO') +timeout = int(os.getenv('APP_TIMEOUT', '30')) +``` + +## Method 2: Secret environment variables + +Inject all keys from a Secret as environment variables with an optional prefix. + +**Create a Secret:** + +```bash +kubectl create secret generic api-credentials \ + --from-literal=API_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... \ + --from-literal=API_KEY=sk_live_51H8... \ + --from-literal=DB_PASSWORD=secure-password-here +``` + +**Reference in LocustTest CR:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: secret-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + env: + secretRefs: + - name: api-credentials # Secret name + # No prefix specified -- keys are used as-is +``` + +**Result:** Secret keys become environment variables: +- `API_TOKEN` β†’ `API_TOKEN` +- `API_KEY` β†’ `API_KEY` +- `DB_PASSWORD` β†’ `DB_PASSWORD` + +**Access in your locustfile:** + +```python +import os + +api_token = os.getenv('API_TOKEN') +api_key = os.getenv('API_KEY') +db_password = os.getenv('DB_PASSWORD') +``` + +!!! warning "Secret values in pod specs" + Kubernetes injects Secret values as environment variables. They're visible in pod specs. Use RBAC to restrict access to pod definitions. + +## Method 3: Individual variables + +Define individual environment variables with literal values or references to ConfigMap/Secret keys. This gives you fine-grained control over which keys to inject. + +**Create sources:** + +```bash +kubectl create configmap app-settings --from-literal=api-url=https://api.example.com +kubectl create secret generic auth --from-literal=token=secret-token-here +``` + +**Reference in LocustTest CR:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: individual-vars-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + env: + variables: + # Literal value + - name: ENVIRONMENT + value: "staging" + + # Reference to Secret key + - name: API_TOKEN + valueFrom: + secretKeyRef: + name: auth # Secret name + key: token # Key within Secret + + # Reference to ConfigMap key + - name: API_URL + valueFrom: + configMapKeyRef: + name: app-settings # ConfigMap name + key: api-url # Key within ConfigMap +``` + +**Result:** Three environment variables are injected: +- `ENVIRONMENT=staging` (literal value) +- `API_TOKEN=secret-token-here` (from Secret) +- `API_URL=https://api.example.com` (from ConfigMap) + +**Use cases:** +- Mix literal values with secrets/configs +- Select specific keys from ConfigMaps/Secrets +- Set defaults with fallback to secrets for sensitive values + +## Method 4: Secret file mounts + +Mount secrets as files in the container filesystem. This is useful for: +- TLS certificates +- Credential files (JSON key files, kubeconfig, etc.) +- Configuration files that must be read from disk + +**Create a Secret from files:** + +```bash +kubectl create secret generic tls-certs \ + --from-file=ca.crt=path/to/ca.crt \ + --from-file=client.crt=path/to/client.crt \ + --from-file=client.key=path/to/client.key +``` + +**Reference in LocustTest CR:** + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: file-mount-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 3 + env: + secretMounts: + - name: tls-certs # Secret name + mountPath: /etc/locust/certs # Mount path in container + readOnly: true # Mount as read-only (recommended) +``` + +**Result:** Secret keys become files at mount path: +- `/etc/locust/certs/ca.crt` +- `/etc/locust/certs/client.crt` +- `/etc/locust/certs/client.key` + +**Access in your locustfile:** + +```python +import ssl + +# Create SSL context with mounted certificates +ssl_context = ssl.create_default_context(cafile='/etc/locust/certs/ca.crt') +ssl_context.load_cert_chain( + certfile='/etc/locust/certs/client.crt', + keyfile='/etc/locust/certs/client.key' +) + +# Use in HTTP client +# (implementation depends on your HTTP library) +``` + +### Reserved paths + +The following paths are reserved and cannot be used for secret mounts: + +| Path | Purpose | Customizable via | +|------|---------|------------------| +| `/lotest/src/` | Test script mount point | `testFiles.srcMountPath` | +| `/opt/locust/lib` | Library mount point | `testFiles.libMountPath` | + +When `testFiles` is configured, only the paths actually in use are reserved. When not configured, both default paths are reserved. + +If you customize `srcMountPath` or `libMountPath`, those custom paths become reserved instead. + +## Combined example + +Use multiple injection methods together: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: combined-injection +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + env: + # Method 1: ConfigMap environment variables + configMapRefs: + - name: app-config + prefix: "APP_" + + # Method 2: Secret environment variables + secretRefs: + - name: api-credentials + # No prefix specified -- keys are used as-is + + # Method 3: Individual variables + variables: + - name: ENVIRONMENT + value: "production" + - name: REGION + value: "us-west-2" + - name: SPECIAL_TOKEN + valueFrom: + secretKeyRef: + name: special-auth + key: token + + # Method 4: Secret file mounts + secretMounts: + - name: tls-certs + mountPath: /etc/locust/certs + readOnly: true + - name: service-account-key + mountPath: /etc/locust/keys + readOnly: true +``` + +**Result:** +- All keys from `app-config` ConfigMap with `APP_` prefix +- All keys from `api-credentials` Secret (no prefix) +- Literal values: `ENVIRONMENT`, `REGION` +- Individual secret reference: `SPECIAL_TOKEN` +- Files mounted at `/etc/locust/certs/` and `/etc/locust/keys/` + +## Verification + +### Check environment variables + +Verify that environment variables were injected into test pods: + +```bash +# Get a pod name +POD=$(kubectl get pods -l performance-test-name=combined-injection -o jsonpath='{.items[0].metadata.name}') + +# Check all environment variables +kubectl exec $POD -- printenv | sort + +# Check specific prefix +kubectl exec $POD -- printenv | grep "APP_" + +# Check specific variable +kubectl exec $POD -- printenv API_TOKEN +``` + +### Check file mounts + +Verify that secret files were mounted: + +```bash +# List files in mount path +kubectl exec $POD -- ls -la /etc/locust/certs/ + +# Read file content (use with caution for sensitive data) +kubectl exec $POD -- cat /etc/locust/certs/ca.crt +``` + +### Troubleshooting + +| Problem | Symptom | Solution | +|---------|---------|----------| +| Pod stuck in `ContainerCreating` or shows `CreateContainerConfigError` | ConfigurationError condition | Verify ConfigMap/Secret exists: `kubectl get configmap,secret` | +| Environment variable missing | Variable not in `printenv` output | Check spelling of ConfigMap/Secret name and key | +| File mount empty | Directory exists but no files | Verify Secret exists and has data: `kubectl get secret -o yaml` | +| Permission denied reading file | `cat` fails with permission error | Check `readOnly: true` and Secret file permissions | + +**Check PodsHealthy condition:** + +```bash +kubectl get locusttest combined-injection -o jsonpath='{.status.conditions[?(@.type=="PodsHealthy")]}' +``` + +If `status=False` with reason `ConfigurationError`, the error message shows which ConfigMap or Secret is missing. + +## Security best practices + +1. **Use Secrets for sensitive data:** Never use ConfigMaps for passwords, tokens, or keys. + +2. **Use RBAC to restrict Secret access:** Limit who can read Secrets in your namespace. Users should not have direct Secret access -- only the operator's service account needs it. + +3. **Rotate secrets regularly:** See [Security Best Practices - Secret Rotation](../../security.md#secret-rotation) for the rotation process. + +4. **Use External Secrets Operator:** For production, sync secrets from external vaults (AWS Secrets Manager, HashiCorp Vault, etc.). See [Security Best Practices - External Secrets](../../security.md#external-secrets-integration). + +5. **Prefer file mounts for certificates:** Mount TLS certificates as files instead of environment variables (harder to accidentally log). + +6. **Use read-only mounts:** Always set `readOnly: true` for secret mounts to prevent accidental modification. + +## Related guides + +- [Mount volumes](../configuration/mount-volumes.md) β€” Mount non-secret volumes (PVCs, ConfigMaps, emptyDir) +- [Security Best Practices](../../security.md) β€” RBAC, secret rotation, external secrets integration +- [API Reference - EnvConfig](../../api_reference.md#envconfig) β€” Complete env configuration reference diff --git a/docs/how_does_it_work.md b/docs/how_does_it_work.md index 41665498..94905263 100644 --- a/docs/how_does_it_work.md +++ b/docs/how_does_it_work.md @@ -1,24 +1,223 @@ --- -title: How does it work -description: A high-level overview of the operator's workflow. +title: How Does It Work +description: A high-level overview of the operator's architecture and workflow. --- -# How does it work +# How Does It Work -To run a performance test, basic configuration is provided through a simple and intuitive kubernetes custom resource. Once deployed the -_Operator_ does all the heavy work of creating and scheduling the resources while making sure that all created load generation pods can -effectively communicate with each other. +To run a performance test, basic configuration is provided through a simple and intuitive Kubernetes custom resource. Once deployed, the _Operator_ does all the heavy work of creating and scheduling the resources while making sure that all created load generation pods can effectively communicate with each other. -To handle the challenge of delivering test script/s from local environment to the cluster and in turn to the deployed _locust_ pods, -the _Operator_ support dynamic volume mounting from a configMaps source. This is indicated by a simple optional configuration. Meaning, if -the configuration is present, the volume is mounted, and if it is not, no volume is mounted. +## Demo Since a "_Picture Is Worth a Thousand Words_", here is a gif! ![Short demo for how the operator works](assets/images/operatorDemo.gif "Operator Demo") -## Steps performed in demo +### Steps performed in demo -- :material-file-code-outline: Test configmap created in cluster. -- :material-file-document-edit-outline: LocustTest CR deployed into the cluster. -- :material-robot-outline: The _Operator_ creating, configuring and scheduling test resources on CR creation event. -- :material-delete-sweep-outline: The _Operator_ cleaning up test resources after test CR has been removed event. \ No newline at end of file +- :material-file-code-outline: Test ConfigMap created in cluster +- :material-file-document-edit-outline: LocustTest CR deployed into the cluster +- :material-robot-outline: The _Operator_ creating, configuring and scheduling test resources on CR creation event +- :material-delete-sweep-outline: The _Operator_ cleaning up test resources after test CR has been removed + +## Architecture Overview + +The Locust K8s Operator is built using **Go** with the [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) framework, following the standard Kubernetes operator pattern. + +When you create a LocustTest CR, the controller picks it up and orchestrates the creation of all necessary resources. Here's how the pieces fit together: + +```mermaid +graph TB + User[User creates LocustTest CR] -->|applies| CR[LocustTest Custom Resource] + CR -->|watches| Controller[Operator Controller] + Controller -->|creates & owns| Service[Master Service] + Controller -->|creates & owns| MasterJob[Master Job] + Controller -->|creates & owns| WorkerJob[Worker Job] + MasterJob -->|creates| MasterPod[Master Pod] + WorkerJob -->|creates| WorkerPods[Worker Pods 1..N] + Controller -->|watches| Jobs[Job Status Changes] + Controller -->|watches| Pods[Pod Events] + Controller -->|updates| Status[Status Subresource] + Status -->|reflects| Phase[Phase: Pending β†’ Running β†’ Succeeded/Failed] + Status -->|tracks| Conditions[Conditions: Ready, PodsHealthy, etc.] +``` + +The controller maintains full ownership of created resources through [owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/), ensuring automatic cleanup when the LocustTest CR is deleted. + +## Reconciliation Loop + +The operator follows an event-driven reconciliation pattern. Reconciliation is triggered by: + +- **LocustTest CR events**: create, update, or delete operations +- **Owned Job status changes**: when the master or worker Job completes or fails +- **Pod state changes**: when pods enter CrashLoopBackOff, fail to schedule, or encounter errors + +The controller implements a phase-based state machine to track test lifecycle: + +```mermaid +stateDiagram-v2 + [*] --> Pending: CR created + Pending --> Running: Resources created successfully + Running --> Succeeded: Master Job completes (exit 0) + Running --> Failed: Master Job fails OR pods unhealthy + Pending --> Failed: Resource creation error + Failed --> Pending: External deletion triggers recovery + Succeeded --> [*] + Failed --> [*] + + note right of Pending + Creates master Service + Creates master Job + Creates worker Job + end note + + note right of Running + Monitors Job completion + Tracks pod health + Updates worker counts + end note +``` + +### What Happens in Each Phase + +**Pending** β€” The controller creates three core resources: + +- A master Service (headless, for worker-to-master communication) +- A master Job (single pod running Locust master) +- A worker Job (N pods running Locust workers) + +All resources have owner references pointing to the LocustTest CR. Once creation succeeds, the phase transitions to Running. + +**Running** β€” The controller monitors: + +- Job completion status (success or failure) +- Pod health across all master and worker pods +- Worker connection counts (approximate, from Job status) + +**Succeeded/Failed** β€” Terminal states. The test has completed or encountered unrecoverable errors. Resources remain until CR deletion. + +### Status Updates are Conflict-Safe + +The controller uses a **retry-on-conflict** pattern for all status updates. If two reconcile loops try to update status simultaneously (e.g., from a Job event and a Pod event), the controller automatically retries with the latest resource version. This prevents status overwrites and ensures eventual consistency. + +### Self-Healing Behavior + +If external tools delete the Service or Jobs while the test is Running, the controller detects the missing resources and transitions back to Pending. On the next reconcile, it recreates everything from scratch. This self-healing ensures tests can recover from accidental `kubectl delete` operations. + +## Validation Webhooks + +Before a LocustTest CR reaches the controller, it passes through a **ValidatingWebhookConfiguration** that intercepts create and update requests. The webhook validates: + +!!! tip "What Gets Validated" + - **CR name length**: Ensures generated resource names (like `{name}-worker`) stay under the 63-character Kubernetes limit + - **Secret mount path conflicts**: Prevents users from mounting secrets into reserved paths like `/lotest/src` (where test files live) + - **Volume name conflicts**: Blocks use of reserved volume names like `secret-*` or `locust-lib` + - **OpenTelemetry configuration**: When OTel is enabled, the webhook enforces that `endpoint` is provided + +The webhook catches misconfigurations **before** they hit the controller, giving users immediate feedback with clear error messages. This design prevents invalid CRs from cluttering the cluster or causing cryptic pod errors. + +## Pod Health Monitoring + +The controller doesn't just watch Jobs β€” it actively monitors **pod health** to surface issues early. + +### How Pod Watching Works + +Jobs create Pods, so the ownership chain is: `LocustTest β†’ Job β†’ Pod`. Since Pods aren't directly owned by the LocustTest, we use a **custom mapping function** in the controller setup: + +```go +Watches(&corev1.Pod{}, + handler.EnqueueRequestsFromMapFunc(r.mapPodToLocustTest), +) +``` + +This function walks the owner chain: when a Pod event occurs, it finds the owning Job, then finds the LocustTest that owns the Job, and triggers a reconcile on that LocustTest. + +### Grace Period for Startup + +Pods take time to start β€” scheduling, image pulls, volume mounts β€” so the controller applies a **2-minute grace period** after the oldest pod is created. During this window, pod failures are ignored to avoid false positives during normal startup. + +After the grace period expires, the controller analyzes all pods for: + +- **CrashLoopBackOff**: Container repeatedly crashing +- **ImagePullBackOff**: Can't pull the specified image +- **CreateContainerConfigError**: Missing ConfigMap or invalid volume mounts +- **Scheduling errors**: No nodes available, insufficient resources, etc. + +When unhealthy pods are detected, the controller adds a condition to the LocustTest status with the failure reason and affected pod names. For ConfigMap errors, it even extracts the missing ConfigMap name and suggests creating it. + +### Self-Healing from External Deletion + +If a user or automation deletes the master Service or any Job while the test is Running, the controller detects the missing resource during the next reconcile. It immediately transitions the LocustTest back to Pending and recreates all resources from scratch. This ensures tests can recover from accidental deletions without manual intervention. + +## Leader Election & High Availability + +When running multiple replicas of the operator (recommended for production), **leader election** ensures only one instance actively reconciles resources at a time. + +### How It Works + +The operator uses Kubernetes **Lease objects** for leader election. When `--leader-elect` is enabled (the default in the Helm chart), all replicas compete for leadership: + +- One instance acquires the lease and becomes the **active leader** +- Other replicas become **standby followers**, ready to take over +- If the leader pod crashes or is evicted, a standby acquires the lease within seconds +- The new leader picks up reconciliation seamlessly using the LocustTest status + +### Why This Matters + +Leader election prevents: + +- **Split-brain scenarios**: Multiple controllers trying to create the same resources +- **Resource conflicts**: Two controllers racing to update status +- **Operator downtime**: If one replica fails, another takes over instantly + +The default Helm deployment runs **2 replicas with leader election enabled**, providing high availability without resource waste. + +!!! info "Leader Election in Development" + Local development typically runs with `--leader-elect=false` for simplicity. Multi-replica setups with leader election are primarily for production resilience. + +## Key Design Decisions + +### Immutable Tests + +Tests are **immutable by design**. Once a LocustTest CR is created, updates to its `spec` are **ignored** by the operator. The operator sets a `SpecDrifted` condition on the CR to indicate when spec changes have been detected but not applied. + +To change test parameters (image, commands, replicas, etc.), **delete and recreate** the CR: + +```bash +# Delete the existing test +kubectl delete locusttest + +# Edit your CR YAML with the desired changes, then re-apply +kubectl apply -f locusttest-cr.yaml +``` + +This design ensures: + +- **Predictable behavior** β€” each test run uses exactly the configuration it was created with +- **Clean test isolation** β€” no mid-flight configuration drift +- **Simple lifecycle** β€” create, run, observe, delete + +### Owner References + +All created resources (Jobs, Services) have owner references pointing to the LocustTest CR. This enables: + +- Automatic garbage collection on CR deletion +- Clear resource ownership in `kubectl get` +- No orphaned resources + +### Status Tracking + +The operator maintains rich status information: + +```yaml +status: + phase: Running + expectedWorkers: 5 + connectedWorkers: 5 + startTime: "2026-01-15T10:00:00Z" + conditions: + - type: Ready + status: "True" + lastTransitionTime: "2026-01-15T10:00:05Z" + reason: AllWorkersConnected + message: "All 5 workers connected to master" +``` diff --git a/docs/index.md b/docs/index.md index 0148429b..2aefdeaa 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,6 +1,8 @@ --- title: Locust Kubernetes Operator description: Enable performance testing for the modern era! Utilize the full power of Locust in the cloud with a fully automated, cloud-native approach. +hide: + - navigation --- # Performance testing that simply works @@ -12,14 +14,44 @@ description: Enable performance testing for the modern era! Utilize the full pow +## Find Your Path + +
+ +- :material-scale-balance:{ .lg .middle } **Evaluating Solutions?** + + --- + + Compare the Locust Kubernetes Operator with alternatives in under 30 seconds. + + [:octicons-arrow-right-24: Compare alternatives](comparison.md) + +- :material-rocket-launch:{ .lg .middle } **Ready to Start?** + + --- + + Deploy your first distributed load test on Kubernetes in 5 minutes with our step-by-step guide. + + [:octicons-arrow-right-24: Quick start guide](getting_started/index.md) + +- :material-book-open-variant:{ .lg .middle } **Need API Details?** + + --- + + Jump straight to the complete API field reference, resource configuration, and status lifecycle documentation. + + [:octicons-arrow-right-24: API Reference](api_reference.md) + +
+ +## πŸš€ Experience the Power of v2.0 { .text-center } + +
+ +- :material-language-go:{ .lg .middle } __Rebuilt in Go__ + + --- + + Experience **60x faster startup times** and a **4x smaller memory footprint**. The entire operator has been rewritten in Go for maximum efficiency and reliability. + + [:octicons-arrow-right-24: Read the migration guide](migration.md) + +- :material-eye-check:{ .lg .middle } __OpenTelemetry__ + + --- + + Gain deep visibility with built-in tracing and metrics. No sidecars requiredβ€”just pure, cloud-native observability. + + [:octicons-arrow-right-24: Learn more](how-to-guides/observability/configure-opentelemetry.md) + +- :material-key-variant:{ .lg .middle } __Secret Injection__ + + --- + + Securely manage your test credentials with native Kubernetes Secret and ConfigMap injection directly into your test pods. + + [:octicons-arrow-right-24: Learn more](how-to-guides/security/inject-secrets.md) + +- :material-harddisk:{ .lg .middle } __Volume Mounting__ + + --- + + Mount any storage volume to your master and worker pods for flexible test data and configuration management. + + [:octicons-arrow-right-24: Learn more](how-to-guides/configuration/mount-volumes.md) + +
+ ## Build for cloud-native performance testing { .text-center } @@ -122,24 +192,3 @@ performance testing. Scale up or down based on your needs. -[//]: # (Pipeline status badge) -[pipeline-status]: https://github.com/AbdelrhmanHamouda/locust-k8s-operator/actions/workflows/ci.yaml/badge.svg?branch=master -[pipeline-status-url]: https://github.com/AbdelrhmanHamouda/locust-k8s-operator/actions/workflows/ci.yaml - -[//]: # (Code coverage badge) -[code-coverage]: https://app.codacy.com/project/badge/Grade/70b76e69dbde4a9ebfd36ad5ccf6de78 -[code-coverage-url]: https://www.codacy.com/gh/AbdelrhmanHamouda/locust-k8s-operator/dashboard?utm_source=github.com&utm_medium=referral&utm_content=AbdelrhmanHamouda/locust-k8s-operator&utm_campaign=Badge_Grade - -[//]: # (Code quality badge) -[code-quality]: https://app.codacy.com/project/badge/Coverage/70b76e69dbde4a9ebfd36ad5ccf6de78 -[code-quality-url]: https://www.codacy.com/gh/AbdelrhmanHamouda/locust-k8s-operator/dashboard?utm_source=github.com&utm_medium=referral&utm_content=AbdelrhmanHamouda/locust-k8s-operator&utm_campaign=Badge_Coverage - -[//]: # (common urls) -[contributing-url]: https://github.com/AbdelrhmanHamouda/locust-k8s-operator/blob/master/CONTRIBUTING.md -[issues-url]: https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues -[LocustTest]:https://github.com/AbdelrhmanHamouda/locust-k8s-operator/tree/master/kube/crd/locust-test-crd.yaml -[cr-example]: https://github.com/AbdelrhmanHamouda/locust-k8s-operator/tree/master/kube/sample-cr/locust-test-cr.yaml - -[//]: # (Docker badge) -[docker-url]: https://hub.docker.com/r/lotest/locust-k8s-operator -[docker-pulls]:https://img.shields.io/docker/pulls/lotest/locust-k8s-operator?style=flat&logo=docker&logoColor=green&label=Image%20Pulls&color=green&link=https%3A%2F%2Fhub.docker.com%2Fr%2Flotest%2Flocust-k8s-operator \ No newline at end of file diff --git a/docs/integration-testing.md b/docs/integration-testing.md index a58f2992..ea7cb3be 100644 --- a/docs/integration-testing.md +++ b/docs/integration-testing.md @@ -1,252 +1,184 @@ -# Integration Testing Guide +# Testing Guide -This document describes the comprehensive integration testing setup for the Locust K8s Operator, which validates the complete end-to-end functionality beyond unit tests. +This document describes the comprehensive testing setup for the Locust K8s Operator, covering unit tests, integration tests (envtest), and end-to-end tests. ## Overview -The integration test suite performs the following workflow: -1. **Build** - Creates the operator Docker image -2. **Package** - Packages the Helm chart -3. **Deploy** - Spins up a K8s cluster (K3s) and installs the operator -4. **Test** - Deploys a LocustTest CR and validates operator behavior -5. **Validate** - Ensures Locust master/workers are running correctly -6. **Cleanup** - Removes all resources and tears down the environment +The operator uses a multi-layered testing strategy: -## Architecture +| Test Type | Framework | Scope | Speed | +|-----------|-----------|-------|-------| +| **Unit Tests** | Go testing | Individual functions | Fast (~seconds) | +| **Integration Tests** | envtest | Controller + API Server | Medium (~30s) | +| **E2E Tests** | Ginkgo + Kind | Full cluster deployment | Slow (~5-10min) | -### Test Framework -- **Testing Framework**: JUnit 5 with Testcontainers -- **Kubernetes Cluster**: K3s via Testcontainers for local development, KinD in CI environment -- **Build System**: Gradle with custom integration test source set -- **Container Management**: Docker with Jib plugin for image building +## Test Structure -### Test Structure ``` -src/integrationTest/ -β”œβ”€β”€ java/com/locust/operator/ -β”‚ └── LocustOperatorIntegrationTest.java # Main integration test -└── resources/ - └── application-test.yml # Test configuration +locust-k8s-operator/ +β”œβ”€β”€ api/ +β”‚ β”œβ”€β”€ v1/ +β”‚ β”‚ └── *_test.go # v1 API tests +β”‚ └── v2/ +β”‚ β”œβ”€β”€ *_test.go # v2 API tests +β”‚ └── locusttest_webhook_test.go # Webhook validation tests +β”œβ”€β”€ internal/ +β”‚ β”œβ”€β”€ config/ +β”‚ β”‚ └── config_test.go # Configuration tests +β”‚ β”œβ”€β”€ controller/ +β”‚ β”‚ β”œβ”€β”€ suite_test.go # envtest setup +β”‚ β”‚ β”œβ”€β”€ locusttest_controller_test.go # Unit tests +β”‚ β”‚ └── integration_test.go # Integration tests +β”‚ └── resources/ +β”‚ β”œβ”€β”€ job_test.go # Job builder tests +β”‚ β”œβ”€β”€ service_test.go # Service builder tests +β”‚ β”œβ”€β”€ labels_test.go # Label builder tests +β”‚ β”œβ”€β”€ env_test.go # Environment builder tests +β”‚ └── command_test.go # Command builder tests +└── test/ + └── e2e/ + β”œβ”€β”€ e2e_suite_test.go # E2E test setup + └── e2e_test.go # E2E test scenarios ``` ## Prerequisites -### Local Development -- **Docker**: Running Docker daemon -- **Java 21**: Required for building the operator -- **Helm 3.x**: For chart packaging and installation -- **Gradle**: Uses project's gradle wrapper +- **Go 1.24+**: Required for running tests +- **Docker**: Required for E2E tests (Kind) +- **Kind**: Required for E2E tests -### CI/CD (GitHub Actions) -- Uses Ubuntu latest runner -- Automatically installs all dependencies -- Runs on PR and push to main branch +## Running Tests -## Running Integration Tests +### Unit & Integration Tests (envtest) + +The primary test command runs both unit tests and integration tests using envtest: -### Option 1: Using the Integration Test Script (Recommended) ```bash -# Make script executable (first time only) -chmod +x scripts/run-integration-test.sh +# Run all tests with coverage +make test + +# Run tests with verbose output +go test ./... -v + +# Run specific package tests +go test ./internal/resources/... -v +go test ./internal/controller/... -v +go test ./api/v2/... -v + +# Run specific test by name +go test ./internal/controller/... -v -run TestReconcile -# Run integration tests -./scripts/run-integration-test.sh +# Generate coverage report +make test +go tool cover -html=cover.out -o coverage.html ``` -The script performs several helpful functions: -- Checks prerequisites (Docker, Helm, Java) -- Cleans up previous runs and Docker resources -- Runs the integration tests with proper error handling -- Provides detailed error reporting and logs -- Shows test results and report locations +### E2E Tests (Kind) + +End-to-end tests run against a real Kubernetes cluster using Kind: -### Option 2: Using Gradle Directly ```bash -# Run integration tests -./gradlew integrationTest -PrunIntegrationTests +# Run E2E tests (creates Kind cluster automatically) +make test-e2e -# Run with verbose output -./gradlew integrationTest -PrunIntegrationTests --info +# Run E2E tests with verbose output +KIND_CLUSTER=locust-test go test ./test/e2e/ -v -ginkgo.v -# Run specific test class -./gradlew integrationTest -PrunIntegrationTests --tests="LocustOperatorIntegrationTest" +# Cleanup E2E test cluster +make cleanup-test-e2e ``` -### Option 3: In CI/CD -Integration tests run automatically in GitHub Actions: -- On pull requests to `main` or `master` -- On pushes to `main` or `master` -- Can be triggered manually via `workflow_dispatch` - -## Test Scenarios - -### Test 1: Operator Deployment -- Creates operator namespace -- Installs operator via Helm chart -- Validates operator deployment is ready -- Verifies operator pod is running - -### Test 2: LocustTest Deployment -- Creates test namespace and ConfigMap with simple Locust script -- Deploys LocustTest custom resource -- Validates master and worker deployments are created -- Ensures all pods reach Running state - -### Test 3: LocustTest Execution -- Verifies Locust master web interface starts -- Checks master logs for successful initialization -- Validates workers connect to master -- Confirms test environment is functional - -### Test 4: Cleanup -- Deletes LocustTest custom resource -- Verifies all managed resources are cleaned up -- Uninstalls operator -- Validates complete cleanup - -## Configuration - -### Integration Test Configuration -Located in `gradle/integration-test.gradle`: -- Defines separate source set for integration tests -- Configures dependencies (Testcontainers, Kubernetes client, etc.) -- Sets up test reporting and timeouts -- Links to main build pipeline - -### Test Application Configuration -Located in `src/integrationTest/resources/application-test.yml`: -- Configures logging levels for test visibility -- Sets timeouts for different test phases -- Defines resource locations and image names - -### CI Configuration -Located in `.github/workflows/integration-test.yml`: -- GitHub Actions workflow for automated testing -- Includes caching for Gradle and Docker layers -- Uploads test results as artifacts -- Uses **KinD (Kubernetes in Docker)** cluster with custom configuration in `.github/kind-config.yaml` -- Uses Helm 3.12.0 for chart installation - -## Sample LocustTest Resource - -The integration test creates this sample LocustTest CR: - -```yaml -apiVersion: locust.io/v1 -kind: LocustTest -metadata: - name: integration-test - namespace: locust-tests -spec: - masterConfig: - replicas: 1 - image: locustio/locust:2.15.1 - resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "256Mi" - cpu: "200m" - workerConfig: - replicas: 2 - image: locustio/locust:2.15.1 - resources: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "256Mi" - cpu: "200m" - configMap: locust-test-scripts +### CI Pipeline + +All tests run automatically in GitHub Actions: + +```bash +# Run the same checks as CI locally +make ci + +# This runs: +# - make lint (golangci-lint) +# - make test (unit + integration tests) ``` -## Test Reports and Artifacts +## Test Fixtures -### Local Testing -- **HTML Report**: `build/reports/integration-tests/index.html` -- **JUnit XML**: `build/test-results/integration-test/` -- **Logs**: `/tmp/locust-integration-test-{timestamp}.log` +Test fixtures and sample data are located in: -### CI Testing -- Test results uploaded as GitHub Actions artifacts -- Available for download from the Actions run page -- Includes both HTML reports and raw XML results +- `internal/testdata/` - Test fixtures for unit tests +- `config/samples/` - Sample CRs for integration/E2E tests ## Troubleshooting ### Common Issues -#### Docker Permission Errors +#### envtest Binary Issues ```bash -# On Linux, ensure user is in docker group -sudo usermod -aG docker $USER -# Then logout and login again +# Re-download envtest binaries +make setup-envtest + +# Verify binaries are installed +ls bin/k8s/ ``` -#### K3s Container Startup Issues -- Ensure Docker has enough resources (4GB+ RAM recommended) -- Check Docker daemon is running: `docker info` -- Verify no conflicting containers: `docker ps -a` +#### Test Timeouts +```bash +# Increase timeout for slow systems +go test ./... -v -timeout 10m +``` -#### Helm Chart Packaging Failures -- Ensure Helm is installed: `helm version` -- Check chart syntax: `helm lint charts/locust-k8s-operator` -- Verify chart dependencies: `helm dependency list charts/locust-k8s-operator` +#### Kind Cluster Issues +```bash +# Check if cluster exists +kind get clusters -#### Integration Test Timeouts -- Tests have generous timeouts but may need adjustment for slower systems -- Modify timeouts in `application-test.yml` if needed -- Check system resources during test execution +# Delete and recreate +kind delete cluster --name locust-k8s-operator-test-e2e +make test-e2e +``` ### Debug Mode -Enable debug logging by setting: -```yaml -logger: - levels: - com.locust: DEBUG - org.testcontainers: DEBUG + +Run tests with verbose logging: +```bash +# Verbose test output +go test ./internal/controller/... -v -ginkgo.v + +# With debug logs from controller +go test ./internal/controller/... -v -args -zap-log-level=debug ``` -### Manual Debugging -If tests fail, you can manually inspect the K3s cluster: -1. The test creates temporary kubeconfig files -2. Look for log messages indicating kubeconfig location -3. Use `kubectl` with the temporary kubeconfig to inspect cluster state - -## Performance Considerations - -### Resource Requirements -- **Memory**: ~4GB available RAM recommended -- **CPU**: 2+ cores for reasonable performance -- **Disk**: ~10GB for images and temporary files -- **Network**: Internet access for pulling images - -### Execution Time -- Full test suite: ~10-15 minutes -- Individual test phases: - - Cluster startup: ~2-3 minutes - - Image building: ~3-5 minutes - - Deployment validation: ~2-3 minutes - - Test execution: ~2-3 minutes - - Cleanup: ~1-2 minutes - -## Future Enhancements - -### Planned Improvements -- [ ] Multi-scenario testing (different LocustTest configurations) -- [ ] Performance benchmarking integration -- [ ] Integration with libConfigMap feature testing -- [ ] Cross-platform testing (ARM64 support) -- [ ] Parallel test execution for faster CI - -### Extension Points -- Add custom test scenarios in separate test classes -- Extend with custom Kubernetes resources validation -- Integrate with monitoring and observability testing -- Add chaos engineering tests for resilience validation +## Writing New Tests + +### Guidelines + +1. **Unit tests**: Test pure functions in isolation +2. **Integration tests**: Test controller behavior with envtest +3. **E2E tests**: Test user-facing scenarios in real cluster + +### Test Naming Conventions + +```go +// Unit tests: Test_ +func TestBuildMasterJob_WithEnvConfig(t *testing.T) {} + +// Integration tests: Describe/Context/It +Describe("LocustTest Controller", func() { + Context("When creating a LocustTest", func() { + It("Should create master Job", func() {}) + }) +}) +``` + +### Adding Integration Tests + +1. Add test to `internal/controller/integration_test.go` +2. Use `k8sClient` for Kubernetes operations +3. Use `Eventually` for async assertions +4. Clean up resources in `AfterEach` ## Related Documentation -- [How It Works](how_does_it_work.md) - Operator architecture overview -- [Contributing](contribute.md) - Development guidelines -- [LibConfigMap Feature](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/blob/master/LIBCONFIGMAP_FEATURE_IMPLEMENTATION.md) - Feature implementation details + +- [Local Development](local-development.md) - Development setup +- [Contributing](contribute.md) - Contribution guidelines +- [Pull Request Process](pull-request-process.md) - PR workflow diff --git a/docs/local-development.md b/docs/local-development.md index 5efb10ed..50efe9a7 100644 --- a/docs/local-development.md +++ b/docs/local-development.md @@ -4,8 +4,15 @@ This guide describes the setup and workflow for local development on the Locust ## Development Setup -
-Initial Setup +### Prerequisites + +- **Go 1.24+**: Required for building the operator +- **Docker**: Running Docker daemon for building images +- **kubectl**: Kubernetes CLI for cluster interaction +- **Kind** or **Minikube**: Local Kubernetes cluster for testing +- **Helm 3.x**: For chart packaging and installation + +### Initial Setup 1. Clone the repository: ```bash @@ -13,103 +20,174 @@ This guide describes the setup and workflow for local development on the Locust cd locust-k8s-operator ``` -2. Install [pre-commit](https://pre-commit.com/) and set up the git hooks: +2. Install dependencies and tools: ```bash - pre-commit install --install-hooks - pre-commit install --hook-type commit-msg + # Download Go dependencies + make tidy + + # Install development tools (controller-gen, envtest, etc.) + make controller-gen + make envtest + make kustomize ``` -
## Development Guidelines - This project follows the [Conventional Commits](https://www.conventionalcommits.org/) standard to automate [Semantic Versioning](https://semver.org/) and [Keep A Changelog](https://keepachangelog.com/) with [Commitizen](https://github.com/commitizen-tools/commitizen). -- All code should include appropriate tests. See the [integration testing guide](integration-testing.md) for details on the integration test setup. +- All code should include appropriate tests. See the [integration testing guide](integration-testing.md) for details on the test setup. -## Local Testing with Minikube and Helm +## Common Development Commands -For local development and testing, you can use Minikube to create a local Kubernetes cluster. This allows you to test the operator and your changes in an environment that closely resembles a production setup. +The project uses a `Makefile` for common development tasks. Run `make help` to see all available targets. -### Prerequisites +### Build & Test -- [Minikube](https://minikube.sigs.k8s.io/docs/start/) -- [Helm](https://helm.sh/docs/intro/install/) +```bash +# Build the operator binary +make build -### Steps +# Run all tests (unit + integration via envtest) +make test + +# Run linter +make lint + +# Run linter with auto-fix +make lint-fix + +# Run all CI checks locally +make ci +``` + +### Code Generation + +```bash +# Generate CRDs, RBAC, and webhook manifests +make manifests -1. **Start Minikube** +# Generate DeepCopy implementations +make generate + +# Format code +make fmt + +# Run go vet +make vet +``` + +### Running Locally + +```bash +# Run the operator locally against your current kubeconfig cluster +make run + +# Install CRDs into the cluster +make install + +# Uninstall CRDs from the cluster +make uninstall +``` + +## Local Testing with Kind + +For local development and testing, Kind (Kubernetes in Docker) is the recommended approach. + +### Steps - Start a local Kubernetes cluster using Minikube: +1. **Create a Kind Cluster** ```bash - minikube start + kind create cluster --name locust-dev ``` 2. **Build and Load the Docker Image** - If you've made changes to the operator's source code, you'll need to build a new Docker image and load it into your Minikube cluster. This project uses the Jib Gradle plugin to build images directly, so you don't need a `Dockerfile`. + ```bash + # Build the Docker image + make docker-build IMG=locust-k8s-operator:dev + + # Load the image into Kind + kind load docker-image locust-k8s-operator:dev --name locust-dev + ``` - First, build the image to your local Docker daemon: +3. **Deploy the Operator** + + Option A: Using kustomize (for development): ```bash - ./gradlew jibDockerBuild + # Deploy CRDs and operator + make deploy IMG=locust-k8s-operator:dev ``` - Next, load the image into Minikube's internal registry: + Option B: Using Helm (for production-like testing): ```bash - minikube image load locust-k8s-operator:latest + # Package the Helm chart + helm package ../charts/locust-k8s-operator + + # Install with local image + helm install locust-operator locust-k8s-operator-*.tgz \ + --set image.repository=locust-k8s-operator \ + --set image.tag=dev \ + --set image.pullPolicy=IfNotPresent ``` -3. **Package the Helm Chart** +4. **Verify the Deployment** - Package the Helm chart to create a distributable `.tgz` file. + !!! note "Development vs Production Namespaces" + The `make deploy` command generates a namespace based on your project name. For production deployments, use the `locust-system` namespace as documented in the [Helm Deployment Guide](helm_deploy.md). ```bash - helm package ./charts/locust-k8s-operator - ``` + # Check pods in the generated namespace + kubectl get pods -A | grep locust -4. **Install the Operator with Helm** - - Install the Helm chart on your Minikube cluster. The command below overrides the default image settings to use the one you just built and loaded. + # Follow operator logs + kubectl logs -f -n deployment/ + ``` - You can use a `values.yaml` file to override other settings. +5. **Test with a Sample CR** - ```yaml - # values.yaml (optional) - # Example: Set resource requests and limits for the operator pod - config: - loadGenerationPods: - resource: - cpuRequest: 250m - memRequest: 128Mi - ephemeralRequest: 300M - cpuLimit: 1000m - memLimit: 1024Mi - ephemeralLimit: 50M + ```bash + # Create a test ConfigMap with a simple Locust script + kubectl create configmap locust-test --from-literal=locustfile.py=' + from locust import HttpUser, task + class TestUser(HttpUser): + @task + def hello(self): + self.client.get("/") + ' + + # Apply a sample LocustTest CR + kubectl apply -f config/samples/locust_v2_locusttest.yaml - # To leave a resource unbound, Leave the limit empty - # This is useful when you don't want to set a specific limit. - # example: - # config: - # loadGenerationPods: - # resource: - # cpuLimit: "" - # memLimit: "" - # ephemeralLimit: "" + # Watch the resources + kubectl get locusttests,jobs,pods -w ``` - Install the chart using the following command. The `-f values.yaml` flag is optional. +6. **Cleanup** ```bash - helm install locust-operator locust-k8s-operator-*.tgz -f values.yaml \ - --set image.repository=locust-k8s-operator \ - --set image.tag=latest \ - --set image.pullPolicy=IfNotPresent + # Remove the operator + make undeploy + + # Delete the Kind cluster + kind delete cluster --name locust-dev ``` - This will deploy the operator to your Minikube cluster using the settings defined in your `values.yaml` file. - ## Writing Documentation All documentation is located under the `docs/` directory. The documentation is hosted on [GitHub Pages](https://abdelrhmanhamouda.github.io/locust-k8s-operator/) and updated automatically with each release. To manage and build the documentation, the project uses [MkDocs](https://www.mkdocs.org/) & [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) framework. +### Preview Documentation Locally + +```bash +# Install MkDocs (if not installed) +pip install mkdocs mkdocs-material + +# Serve documentation locally +mkdocs serve + +# Build documentation +mkdocs build --strict +``` + During development, the **_CI_** workflow will build the documentation as part of the validation. diff --git a/docs/metrics_and_dashboards.md b/docs/metrics_and_dashboards.md index 537620a2..0c718984 100644 --- a/docs/metrics_and_dashboards.md +++ b/docs/metrics_and_dashboards.md @@ -11,54 +11,310 @@ tags: # Metrics & Dashboards -The Locust Kubernetes Operator is designed with observability in mind, providing out-of-the-box support for Prometheus metrics. This allows you to gain deep insights into your performance tests and the operator's behavior. -## :material-export: Prometheus Metrics Exporter +## :material-chart-timeline: OpenTelemetry Metrics & Traces -By default, the operator deploys a [Prometheus metrics exporter](https://github.com/ContainerSolutions/locust_exporter) alongside each Locust master and worker pod. This exporter collects detailed metrics from the Locust instances and exposes them in a format that Prometheus can scrape. +!!! info "New in v2.0" + Native OpenTelemetry support is available in the v2 API. -### :material-key-variant: Key Metrics +### Native OpenTelemetry Support -Some of the key metrics you can monitor include: +Locust 2.x includes native OpenTelemetry support, which the operator can configure automatically. This provides both metrics and distributed tracing without requiring the metrics exporter sidecar. -- `locust_requests_total`: The total number of requests made. -- `locust_requests_failed_total`: The total number of failed requests. -- `locust_response_time_seconds`: The response time of requests. -- `locust_users`: The number of simulated users. +### Configuring OTel -### :material-tune: Configuration +Enable OpenTelemetry in your LocustTest CR: -To enable Prometheus to scrape these metrics, you'll need to configure a scrape job in your `prometheus.yml` file. Here's an example configuration: +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: otel-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + observability: + openTelemetry: + enabled: true + endpoint: "http://otel-collector.monitoring:4317" + protocol: "grpc" +``` + +See [Advanced Topics - OpenTelemetry](how-to-guides/observability/configure-opentelemetry.md) for detailed configuration options. + +### OTel Collector Setup + +For a complete observability setup, deploy an OTel Collector. Example configuration: + +```yaml +# otel-collector-config.yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +exporters: + prometheus: + endpoint: 0.0.0.0:8889 + otlphttp: + endpoint: http://jaeger-collector:4318 + tls: + insecure: true + +service: + pipelines: + metrics: + receivers: [otlp] + exporters: [prometheus] + traces: + receivers: [otlp] + exporters: [otlphttp] +``` + +!!! tip + The Helm chart includes an optional OTel Collector deployment. Enable it with `otelCollector.enabled: true`. + +--- + +## :material-robot-outline: Operator Metrics + +The Go operator can expose controller-runtime metrics (disabled by default). When enabled, metrics are served on the configured port (default: 8080): + +| Metric | Description | +|--------|-------------| +| `controller_runtime_reconcile_total` | Total reconciliations | +| `controller_runtime_reconcile_errors_total` | Reconciliation errors | +| `controller_runtime_reconcile_time_seconds` | Reconciliation duration | +| `workqueue_depth` | Current queue depth | +| `workqueue_adds_total` | Items added to queue | + +These metrics can be scraped by Prometheus using the standard `/metrics` endpoint on the operator pod. + +### Enabling Operator Metrics + +Enable metrics in your Helm values: + +```yaml +metrics: + enabled: true +``` + +Then configure Prometheus to scrape the operator: ```yaml scrape_configs: - - job_name: 'locust' + - job_name: 'locust-operator' kubernetes_sd_configs: - role: pod relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: locust-k8s-operator + - source_labels: [__address__] action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 + regex: ([^:]+)(?::\d+)? + replacement: $1:8080 target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) ``` -## :material-view-dashboard-outline: Grafana Dashboards +### ServiceMonitor (Prometheus Operator) -Once you have your metrics flowing into Prometheus, you can create powerful and informative dashboards in Grafana to visualize your test results. You can build panels to track key performance indicators (KPIs) such as response times, request rates, and error rates. +If using the Prometheus Operator, create a ServiceMonitor to automatically discover and scrape operator metrics: -There are also community-built Grafana dashboards available for Locust that you can adapt for your needs. +=== "HTTP Metrics" -## :material-robot-outline: Operator Metrics + ```yaml + apiVersion: monitoring.coreos.com/v1 + kind: ServiceMonitor + metadata: + name: locust-operator-metrics + namespace: locust-operator-system + labels: + app.kubernetes.io/name: locust-k8s-operator + spec: + selector: + matchLabels: + app.kubernetes.io/name: locust-k8s-operator + endpoints: + - port: metrics + path: /metrics + interval: 30s + ``` + + Ensure Helm values have: + ```yaml + metrics: + enabled: true + secure: false # HTTP metrics + ``` + +=== "HTTPS Metrics (Recommended)" + + ```yaml + apiVersion: monitoring.coreos.com/v1 + kind: ServiceMonitor + metadata: + name: locust-operator-metrics + namespace: locust-operator-system + labels: + app.kubernetes.io/name: locust-k8s-operator + spec: + selector: + matchLabels: + app.kubernetes.io/name: locust-k8s-operator + endpoints: + - port: metrics + path: /metrics + interval: 30s + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true # For development only + ``` + + Ensure Helm values have: + ```yaml + metrics: + enabled: true + secure: true # Enable HTTPS metrics (default: false) + ``` + +!!! warning "Production TLS" + For production, use cert-manager to manage TLS certificates instead of `insecureSkipVerify: true`. See the operator's `config/prometheus/` directory for examples. + +### Operator Metrics Queries + +Useful PromQL queries for operator monitoring: + +```promql +# Reconciliation rate (per second) +rate(controller_runtime_reconcile_total[5m]) + +# Reconciliation error rate +rate(controller_runtime_reconcile_errors_total[5m]) + +# Average reconciliation duration +rate(controller_runtime_reconcile_time_seconds_sum[5m]) + / rate(controller_runtime_reconcile_time_seconds_count[5m]) + +# Current workqueue depth +workqueue_depth + +# Queue processing rate +rate(workqueue_adds_total[5m]) +``` + +--- + +## :material-chart-box-outline: Locust Test Metrics + +!!! warning "Two Metrics Approaches - Choose One" + The operator provides **two mutually exclusive** methods for collecting Locust test metrics: + + **1. Prometheus Exporter Sidecar** (default, v1 & v2 API) + + - Uses `containersol/locust_exporter` sidecar on port 9646 + - Exposes Prometheus-formatted metrics + - Works with Prometheus scraping + - Documented in this section below + + **2. Native OpenTelemetry** (v2 API only) + + - Locust exports directly via OTLP protocol + - No sidecar container needed + - Metrics sent to OTel Collector + - See [OpenTelemetry section above](#opentelemetry-metrics-traces) + + **When OTel is enabled, the exporter sidecar is NOT deployed.** All Prometheus exporter documentation below only applies to **non-OTel mode**. + +### Metrics Exporter Sidecar (Non-OTel Mode) + +When OpenTelemetry is **not** enabled, the operator automatically injects a Prometheus metrics exporter sidecar into the Locust master pod. This exporter scrapes Locust's built-in stats endpoint and exposes metrics in Prometheus format. + +**What the Operator Creates Automatically**: + +1. **Metrics Exporter Sidecar Container**: + - Image: `containersol/locust_exporter:v0.5.0` + - Port: 9646 + - Path: `/metrics` + +2. **Kubernetes Service**: `-master` + - Includes metrics port 9646 + - Provides stable DNS endpoint + +3. **Pod Annotations** (for Prometheus auto-discovery): + ```yaml + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "9646" + ``` + +**No manual setup required** - the operator handles everything. + +### Available Locust Metrics + +The exporter provides these key metrics from Locust: + +| Metric | Type | Description | +|--------|------|-------------| +| `locust_requests_total` | Counter | Total number of requests | +| `locust_requests_current_rps` | Gauge | Current requests per second | +| `locust_requests_current_fail_per_sec` | Gauge | Current failures per second | +| `locust_requests_avg_response_time` | Gauge | Average response time (ms) | +| `locust_requests_min_response_time` | Gauge | Minimum response time (ms) | +| `locust_requests_max_response_time` | Gauge | Maximum response time (ms) | +| `locust_requests_avg_content_length` | Gauge | Average response size (bytes) | +| `locust_users` | Gauge | Current number of simulated users | +| `locust_errors` | Counter | Total errors by type | + +For the complete list, see the [locust_exporter documentation](https://github.com/ContainerSolutions/locust_exporter). + +### Locust Metrics Queries + +Useful PromQL queries for load test monitoring: + +```promql +# Total request rate across all endpoints +sum(rate(locust_requests_total[1m])) + +# Error rate +sum(rate(locust_errors[1m])) + +# Average response time +avg(locust_requests_avg_response_time) + +# Max response time (locust_exporter exposes gauges, not histograms) +max(locust_requests_max_response_time) + +# Current active users +sum(locust_users) + +# Request rate by endpoint +sum(rate(locust_requests_total[1m])) by (name, method) + +# Error percentage +100 * sum(rate(locust_errors[1m])) + / sum(rate(locust_requests_total[1m])) +``` + +### Integration Examples + +The metrics are automatically exposed by the operator-created Service and pod annotations. Simply configure your monitoring tools to discover them: + +**Prometheus**: Configure Kubernetes service discovery to scrape pods with `prometheus.io/scrape: "true"` annotation. The operator adds these annotations automatically - no manual configuration of individual tests needed. + +**Grafana**: Connect to your Prometheus datasource and create dashboards using the PromQL queries above. Import panels from existing [Locust dashboard examples](https://grafana.com/grafana/dashboards/?search=locust). + +**NewRelic**: Deploy a Prometheus agent configured to scrape Kubernetes pods with `prometheus.io/scrape: true` and forward metrics to NewRelic. See [Issue #118](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues/118) for production deployment patterns. -In addition to the Locust-specific metrics, the operator itself exposes a set of metrics through Micronaut's metrics module. These metrics provide insights into the operator's health and performance, including JVM metrics, uptime, and more. You can find these metrics by scraping the operator's pod on the `/health` endpoint. +**DataDog**: Configure the DataDog agent's Prometheus integration to auto-discover and scrape pods with `prometheus.io/*` annotations. The DataDog agent automatically finds operator-created test pods. +!!! tip "Production Deployment" + For large-scale deployments, see [Issue #118](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues/118) which documents production patterns used with thousands of tests in NewRelic and DataDog environments. diff --git a/docs/migration.md b/docs/migration.md new file mode 100644 index 00000000..910bae23 --- /dev/null +++ b/docs/migration.md @@ -0,0 +1,407 @@ +--- +title: Migration Guide +description: Guide for migrating from v1 to v2 of the Locust Kubernetes Operator +tags: + - migration + - upgrade + - v2 + - guide +--- + +# Migration Guide: v1 to v2 + +This guide helps existing users of the Locust Kubernetes Operator upgrade from v1 to v2. The v2 release is a complete rewrite in Go, bringing significant performance improvements and new features. + + +## Overview + +### Why We Rewrote in Go + +The v2 operator was rewritten from Java to Go for several key reasons: + +| Aspect | Java (v1) | Go (v2) | +|--------|-----------|---------| +| **Memory footprint** | ~256MB | ~64MB | +| **Startup time** | ~60 seconds | <1 second | +| **Framework** | Java Operator SDK | Operator SDK / controller-runtime | +| **Ecosystem alignment** | Minority | Majority of K8s operators | + +### What Changes for Users + +- **API Version:** New `locust.io/v2` API with grouped configuration +- **Backward Compatibility:** v1 CRs continue to work via automatic conversion +- **New Features:** OpenTelemetry, secret injection, volume mounting, separate resource specs +- **Helm Chart:** Updated values structure (backward compatible) + +### Compatibility Guarantees + +- **v1 API:** Fully supported via conversion webhook (deprecated, will be removed in v3) +- **Existing CRs:** Work without modification +- **Helm Values:** Backward compatibility shims for common settings + +--- + +## Before You Begin + +### Prerequisites + +- Kubernetes 1.25+ +- Helm 3.x +- cert-manager v1.14+ (required for conversion webhook) + +### Backup Recommendations + +Before upgrading, back up your existing resources: + +```bash +# Export all LocustTest CRs +kubectl get locusttests -A -o yaml > locusttests-backup.yaml + +# Export operator Helm values +helm get values locust-operator -n > values-backup.yaml +``` + +!!! danger "Critical: Webhook Required for v1 API Compatibility" + If you have existing v1 `LocustTest` CRs, the conversion webhook is **required** for them to continue working after upgrading to v2. Without it, v1 CRs will fail CRD schema validation. + + You **must**: + + 1. Install [cert-manager](https://cert-manager.io/docs/installation/) before upgrading + 2. Enable the webhook during upgrade: `--set webhook.enabled=true` + 3. Verify the webhook is running after upgrade + + If you only use v2 CRs (or are starting fresh), the webhook is optional. + +--- + +## Step 1: Update Helm Chart + +### Upgrade Command + +```bash +# Update Helm repository +helm repo update locust-k8s-operator + +# Upgrade to v2 (with webhook for v1 CR compatibility) +helm upgrade locust-operator locust-k8s-operator/locust-k8s-operator \ + --namespace locust-system \ + --version 2.0.0 \ + --set webhook.enabled=true + +# If you don't need v1 API compatibility, you can omit --set webhook.enabled=true +``` + +!!! note "CRD Upgrade" + Helm automatically upgrades the CRD when using `helm upgrade`. The v2 CRD includes conversion webhook configuration when webhooks are enabled, allowing the API server to convert between v1 and v2 formats transparently. + +### New Helm Values + +The v2 chart introduces a cleaner structure. Key changes: + +| Old Path (v1) | New Path (v2) | Notes | +|---------------|---------------|-------| +| `config.loadGenerationPods.resource.cpuRequest` | `locustPods.resources.requests.cpu` | Backward compatible | +| `config.loadGenerationPods.resource.memLimit` | `locustPods.resources.limits.memory` | Backward compatible | +| `config.loadGenerationPods.affinity.enableCrInjection` | `locustPods.affinityInjection` | Backward compatible | +| `micronaut.*` | N/A | Removed (Java-specific) | +| `appPort` | N/A | Fixed at 8081 | +| N/A | `webhook.enabled` | New: Enable conversion webhook | +| N/A | `leaderElection.enabled` | New: Enable leader election | + +### Operator Resource Defaults + +The Go operator controller requires significantly fewer resources than the Java version: + +```yaml +resources: + limits: + memory: 256Mi + cpu: 500m + requests: + memory: 64Mi + cpu: 10m +``` + +--- + +## Step 2: Verify Existing CRs + +The conversion webhook automatically converts v1 CRs to v2 format when stored. Verify your existing CRs work: + +```bash +# List all LocustTests +kubectl get locusttests -A + +# Check a specific CR +kubectl describe locusttest +``` + +### Verify Conversion + +You can read a v1 CR as v2 to verify conversion: + +```bash +# Read as v2 (even if created as v1) +kubectl get locusttest -o yaml | grep "apiVersion:" +# Should show: apiVersion: locust.io/v2 +``` + +!!! warning "Deprecation Warning" + When using the v1 API, you'll see a deprecation warning in kubectl output. This is expected and indicates the conversion webhook is working. + +--- + +## Step 3: Migrate CRs to v2 Format (Recommended) + +While v1 CRs continue to work, migrating to v2 format is recommended to access new features. + +### Field Mapping Reference + +| v1 Field | v2 Field | Notes | +|----------|----------|-------| +| `masterCommandSeed` | `master.command` | Direct mapping | +| `workerCommandSeed` | `worker.command` | Direct mapping | +| `workerReplicas` | `worker.replicas` | Direct mapping | +| `image` | `image` | No change | +| `imagePullPolicy` | `imagePullPolicy` | No change | +| `imagePullSecrets` | `imagePullSecrets` | No change | +| `configMap` | `testFiles.configMapRef` | Grouped under testFiles | +| `libConfigMap` | `testFiles.libConfigMapRef` | Grouped under testFiles | +| `labels.master` | `master.labels` | Grouped under master | +| `labels.worker` | `worker.labels` | Grouped under worker | +| `annotations.master` | `master.annotations` | Grouped under master | +| `annotations.worker` | `worker.annotations` | Grouped under worker | +| `affinity.nodeAffinity` | `scheduling.affinity` | Uses native K8s Affinity ⚠️[^1] | +| `tolerations` | `scheduling.tolerations` | Uses native K8s Tolerations | +| N/A | `master.resources` | New: Separate resource specs for master | +| N/A | `worker.resources` | New: Separate resource specs for worker | +| N/A | `master.extraArgs` | New: Additional CLI arguments for master | +| N/A | `worker.extraArgs` | New: Additional CLI arguments for worker | +| N/A | `master.autostart` | Auto-added during conversion (default: true) | +| N/A | `master.autoquit` | Auto-added during conversion (enabled: true, timeout: 60s) | + +[^1]: **Affinity Conversion Note**: When converting v2 β†’ v1, complex affinity rules may be simplified. Only `NodeSelectorOpIn` operators are preserved, and only the first value from multi-value expressions is kept. Pod affinity/anti-affinity and preferred scheduling rules are not preserved in v1. + +### Example Transformation + +=== "v1 Format (Deprecated)" + + ```yaml + apiVersion: locust.io/v1 + kind: LocustTest + metadata: + name: example-test + spec: + image: locustio/locust:2.20.0 + masterCommandSeed: --locustfile /lotest/src/test.py --host https://example.com + workerCommandSeed: --locustfile /lotest/src/test.py + workerReplicas: 5 + configMap: test-scripts + labels: + master: + team: platform + worker: + team: platform + ``` + +=== "v2 Format" + + ```yaml + apiVersion: locust.io/v2 + kind: LocustTest + metadata: + name: example-test + spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://example.com" + labels: + team: platform + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + labels: + team: platform + testFiles: + configMapRef: test-scripts + ``` + +### Lossy Conversion Details + +!!! warning "V2-Only Fields Not Preserved in V1" + When reading v2 CRs as v1 (or during rollback to v1), the following v2-exclusive fields **will be lost**: + + **Master/Worker Configuration:** + + - `master.resources` - Separate resource specs for master pod + - `worker.resources` - Separate resource specs for worker pod + - `master.extraArgs` - Additional CLI arguments for master + - `worker.extraArgs` - Additional CLI arguments for worker + - `master.autostart` - Autostart configuration + - `master.autoquit` - Autoquit configuration + + **Test Files:** + + - `testFiles.srcMountPath` - Custom mount path for test files + - `testFiles.libMountPath` - Custom mount path for library files + + **Scheduling:** + + - `scheduling.nodeSelector` - Node selector (v1 only supports nodeAffinity) + - Complex affinity rules (see warning above) + + **Environment & Secrets:** + + - `env.configMapRefs` - ConfigMap environment injection + - `env.secretRefs` - Secret environment injection + - `env.variables` - Individual environment variables + - `env.secretMounts` - Secret file mounts + + **Volumes:** + + - `volumes` - Volume definitions + - `volumeMounts` - Volume mounts with target selection + + **Observability:** + + - `observability.openTelemetry` - OpenTelemetry configuration + + **Status:** + + - All `status` subresource fields (v1 has no status implementation) + + **Recommendation**: Before rolling back from v2 to v1, backup your v2 CRs to preserve this configuration. + +--- + +## Step 4: Leverage New Features + +After migrating to v2, you can use new features: + +### OpenTelemetry Support + +```yaml +spec: + observability: + openTelemetry: + enabled: true + endpoint: "otel-collector.monitoring:4317" + protocol: "grpc" +``` + +[:octicons-arrow-right-24: Learn more about OpenTelemetry](how-to-guides/observability/configure-opentelemetry.md) + +### Secret & ConfigMap Injection + +```yaml +spec: + env: + secretRefs: + - name: api-credentials + prefix: "API_" + configMapRefs: + - name: app-config + variables: + - name: TARGET_HOST + value: "https://api.example.com" +``` + +[:octicons-arrow-right-24: Learn more about Environment Injection](how-to-guides/security/inject-secrets.md) + +### Volume Mounting + +```yaml +spec: + volumes: + - name: test-data + persistentVolumeClaim: + claimName: test-data-pvc + volumeMounts: + - name: test-data + mountPath: /data + target: both # master, worker, or both +``` + +[:octicons-arrow-right-24: Learn more about Volume Mounting](how-to-guides/configuration/mount-volumes.md) + +### Separate Resource Specs + +```yaml +spec: + master: + resources: + requests: + memory: "256Mi" + cpu: "100m" + worker: + resources: + requests: + memory: "512Mi" + cpu: "500m" +``` + +[:octicons-arrow-right-24: Learn more about Separate Resources](api_reference.md) + +--- + +## Troubleshooting + +### Common Issues + +#### Conversion Webhook Not Working + +**Symptom:** v1 CRs fail with schema validation errors + +**Solution:** Ensure cert-manager is installed and the webhook is enabled: + +```bash +# Check cert-manager +kubectl get pods -n cert-manager + +# Enable webhook in Helm +helm upgrade locust-operator locust-k8s-operator/locust-k8s-operator \ + --set webhook.enabled=true +``` + +#### Resources Not Created + +**Symptom:** LocustTest CR created but no Jobs/Services appear + +**Solution:** Check operator logs: + +```bash +kubectl logs -n locust-system -l app.kubernetes.io/name=locust-k8s-operator +``` + +#### Status Not Updating + +**Symptom:** LocustTest status remains empty + +**Solution:** Verify RBAC permissions include `locusttests/status`: + +```bash +kubectl auth can-i update locusttests/status --as=system:serviceaccount:locust-system:locust-operator +``` + +### How to Get Help + +- [GitHub Issues](https://github.com/AbdelrhmanHamouda/locust-k8s-operator/issues) + +--- + +## Rollback Procedure + +If you need to revert to v1: + +```bash +# Rollback Helm release +helm rollback locust-operator -n locust-system + +# Or reinstall v1 +helm install locust-operator locust-k8s-operator/locust-k8s-operator \ + --version 1.1.1 \ + -f values-backup.yaml +``` + +!!! note + After rollback, v2-specific fields in CRs will be lost. Ensure you have backups of any v2-only configurations. diff --git a/docs/pull-request-process.md b/docs/pull-request-process.md index e0e4d84e..33c845bd 100644 --- a/docs/pull-request-process.md +++ b/docs/pull-request-process.md @@ -8,7 +8,7 @@ This document outlines the process for submitting pull requests to the Locust K8 2. **Follow Coding Conventions**: Ensure your code follows the project's coding standards and conventions. -3. **Write Tests**: All new features or bug fixes should be covered by appropriate tests. See the [integration testing guide](integration-testing.md) for details on integration testing. +3. **Write Tests**: All new features or bug fixes should be covered by appropriate tests. See the [testing guide](integration-testing.md) for details on the testing setup. ## Pull Request Workflow @@ -19,41 +19,81 @@ This document outlines the process for submitting pull requests to the Locust K8 git checkout -b feature/your-feature-name ``` -3. **Make Your Changes**: Implement your changes, following the project's coding standards. +3. **Make Your Changes**: Implement your changes, following Go coding standards. 4. **Commit Your Changes**: Use the [Conventional Commits](https://www.conventionalcommits.org/) standard for commit messages. This is important as the commit messages directly influence the content of the CHANGELOG.md and version increments. Examples of good commit messages: ``` - feat: add support for Locust worker autoscaling - fix: correct container resource allocation - docs: update installation instructions + feat: add support for OpenTelemetry metrics export + fix: correct volume mount path validation + docs: update API reference for v2 fields + refactor: simplify resource builder functions + test: add integration tests for env injection ``` -5. **Run Tests Locally**: Run both unit and integration tests to ensure your changes don't break existing functionality: +5. **Run Tests Locally**: Run tests and linting to ensure your changes don't break existing functionality: ```bash - # Run unit tests - ./gradlew test + cd locust-k8s-operator - # Run integration tests - ./scripts/run-integration-test.sh + # Run all CI checks (lint + tests) + make ci + + # Or run individually: + make lint # Run linter + make test # Run unit + integration tests + make test-e2e # Run E2E tests (requires Docker) + ``` + +6. **Generate Manifests**: If you modified API types, regenerate manifests: + ```bash + make generate # Generate DeepCopy methods + make manifests # Generate CRDs, RBAC, webhooks ``` -6. **Submit Your Pull Request**: Push your branch to your fork and submit a pull request to the main repository. +7. **Submit Your Pull Request**: Push your branch to your fork and submit a pull request to the main repository. ## Pull Request Requirements -1. **Clean Build Dependencies**: Ensure any install or build dependencies are removed before the final build. +### Code Quality + +- [ ] Code follows Go conventions and project style +- [ ] No linting errors (`make lint` passes) +- [ ] All tests pass (`make test` passes) +- [ ] New code has appropriate test coverage (β‰₯80% for new packages) + +### Documentation + +- [ ] API changes are reflected in `docs/api_reference.md` +- [ ] New features are documented in `docs/features.md` or `docs/advanced_topics.md` +- [ ] Breaking changes are noted in the PR description +- [ ] Helm chart updates include `docs/helm_deploy.md` changes -2. **Documentation**: Update the documentation with details of changes to interfaces, configuration options, or other important aspects. +### Commit Messages -3. **Commit Messages**: Ensure commit messages follow the Conventional Commits standard. This is critical for automated changelog generation and semantic versioning. +- [ ] Follow Conventional Commits standard +- [ ] Each commit represents a logical unit of change +- [ ] Commit messages are clear and descriptive -4. **Tests**: - - Write clean and well-structured tests. - - Ensure your changes don't cause regressions. - - All changes (within reason) should be covered by tests. - - Update existing tests if your changes represent breaking changes. +### Tests + +- [ ] Unit tests for new/modified functions +- [ ] Integration tests for controller behavior changes +- [ ] Existing tests updated if behavior changes +- [ ] No test regressions + +## CI Pipeline Checks + +The following checks run automatically on each PR: + +| Check | Description | Command | +|---------------|-------------------------------|------------------| +| **Lint** | golangci-lint static analysis | `make lint` | +| **Test** | Unit + integration tests | `make test` | +| **Build** | Binary compilation | `make build` | +| **Manifests** | CRD/RBAC generation | `make manifests` | + +All checks must pass before a PR can be merged. ## Review Process @@ -63,11 +103,16 @@ This document outlines the process for submitting pull requests to the Locust K8 3. **Feedback**: Maintainers may request changes or improvements to your PR. -4. **Merge**: Once approved, a maintainer will merge your PR. +4. **Merge**: Once approved and CI passes, a maintainer will merge your PR. ## After Your PR is Merged -1. **Update Your Fork**: Keep your fork up to date with the main repository. +1. **Update Your Fork**: Keep your fork up to date with the main repository: + ```bash + git checkout master + git pull upstream master + git push origin master + ``` 2. **Celebrate**: Thank you for contributing to the Locust K8s Operator project! Your efforts help make the project better for everyone. diff --git a/docs/roadmap.md b/docs/roadmap.md deleted file mode 100644 index 180d1a8e..00000000 --- a/docs/roadmap.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Roadmap -description: Planned features for Locust Kubernetes Operator. ---- - -# Roadmap - -The following is a list of planned features and improvements for the Locust Kubernetes Operator. This list is not exhaustive and may change over time. - -- :material-chart-line: **Enhanced Observability**: Provide out-of-the-box Grafana dashboard examples and more detailed Prometheus configuration guides to make monitoring even easier. - -- :material-send-clock-outline: **Event-Driven Actions**: Integrate with notification systems like Microsoft Teams or Slack to send alerts on test completion, failure, or other significant events. - -- :material-speedometer: **Advanced Benchmarking**: Investigate the feasibility of incorporating external metrics into test results. This would allow for more sophisticated pass/fail criteria, such as assessing the performance of a Kafka-based service by its consumer lag. - -- :material-update: **Dynamic Updates**: Add support for updating a `LocustTest` custom resource while a test is running. This would allow for dynamically adjusting test parameters without restarting the test. - -- :material-web: **Web UI/Dashboard**: Explore the possibility of creating a simple web UI or dashboard for managing and monitoring tests directly through the operator. diff --git a/docs/security.md b/docs/security.md new file mode 100644 index 00000000..eb545e58 --- /dev/null +++ b/docs/security.md @@ -0,0 +1,394 @@ +--- +title: Security Best Practices +description: RBAC configuration, secret management, and security hardening for the Locust Kubernetes Operator +tags: + - security + - rbac + - secrets + - guide +--- + +# Security Best Practices + +This guide covers security best practices for deploying and operating the Locust Kubernetes Operator in production environments. It provides practical examples for RBAC configuration, secret management, and security hardening. + +## Operator RBAC Permissions + +### What the Operator Needs + +The operator follows least-privilege principles. It requires specific permissions to manage LocustTest resources and create load test infrastructure. + +| Resource | Verbs | Purpose | +|----------|-------|---------| +| `locusttests` | get, list, watch, update, patch | Watch CRs and reconcile state | +| `locusttests/status` | get, update, patch | Report test status | +| `locusttests/finalizers` | update | Manage deletion lifecycle | +| `configmaps` | get, list, watch | Read test files and library code | +| `secrets` | get, list, watch | Read credentials for env injection | +| `services` | get, list, watch, create, delete | Master service for worker communication | +| `pods` | get, list, watch | Monitor pod health for status reporting | +| `events` | create, patch | Report status changes and errors | +| `jobs` | get, list, watch, create, delete | Master and worker pods (immutable pattern) | +| `leases` | get, list, watch, create, update, patch | Leader election (only when HA enabled) | + +!!! note "Read-Only Secret Access" + The operator **never creates or modifies** ConfigMaps or Secrets. It only reads them to populate environment variables and volume mounts in test pods. Users manage Secret creation and rotation. + +### Namespace-Scoped vs Cluster-Scoped + +The operator supports two RBAC modes: + +**ClusterRole** (`k8s.clusterRole.enabled: true`, default) + +- Operator manages LocustTest CRs in **all namespaces** +- Use when multiple teams share one operator deployment +- Typical for platform teams managing centralized performance testing + +**Role** (`k8s.clusterRole.enabled: false`) + +- Operator limited to its **deployment namespace** +- Use for single-tenant deployments or strict namespace isolation +- Typical for security-sensitive environments + +Configure the mode in Helm values: + +```yaml +# values.yaml +k8s: + clusterRole: + enabled: false # Restrict to operator namespace only +``` + +### User RBAC for Test Creators + +Users who create and manage LocustTest CRs need different permissions than the operator itself. Here are minimal RBAC examples: + +**Test Creator Role** (create and manage performance tests): + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: locusttest-creator + namespace: performance-testing +rules: + # Create and manage LocustTest CRs + - apiGroups: ["locust.io"] + resources: ["locusttests"] + verbs: ["get", "list", "watch", "create", "delete"] + # Create ConfigMaps for test files + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "update", "delete"] + # View pods for debugging + - apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] + # View events for status monitoring + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: locusttest-creator-binding + namespace: performance-testing +subjects: + - kind: User + name: jane.doe + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: locusttest-creator + apiGroup: rbac.authorization.k8s.io +``` + +**Test Viewer Role** (read-only access for monitoring): + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: locusttest-viewer + namespace: performance-testing +rules: + # View LocustTest CRs + - apiGroups: ["locust.io"] + resources: ["locusttests"] + verbs: ["get", "list", "watch"] + # View pods and logs + - apiGroups: [""] + resources: ["pods", "pods/log", "events"] + verbs: ["get", "list", "watch"] +``` + +Users with this role can monitor test status and view logs but cannot create or modify tests. + +## Secret Management + +### Injecting Secrets into Tests + +The operator provides three approaches for injecting secrets into Locust test pods: + +| Method | Best For | Configuration | +|--------|----------|---------------| +| **Secret environment variables** (`env.secretRefs`) | API keys, tokens, passwords | Mounts all keys from a Secret as environment variables | +| **Secret file mounts** (`env.secretMounts`) | Certificates, key files, config files | Mounts Secret keys as files in the container filesystem | +| **Individual secret references** (`env.variables[].valueFrom.secretKeyRef`) | Specific keys from a secret | Fine-grained control over which keys to inject | + +**Quick Example** (secret environment variables): + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + env: + secretRefs: + - name: api-credentials # All keys become env vars +``` + +See [Advanced Topics - Environment Variables](how-to-guides/security/inject-secrets.md) for detailed examples of all three approaches. + +### Secret Rotation + +Because tests are immutable, running tests continue to use the secret values they started with. Secret rotation requires recreating the test. + +**Rotation Process:** + +1. **Update the Secret** in Kubernetes with new credentials: + ```bash + kubectl create secret generic api-credentials \ + --from-literal=API_TOKEN=new-token-value \ + --dry-run=client -o yaml | kubectl apply -f - + ``` + +2. **Delete the LocustTest CR**: + ```bash + kubectl delete locusttest my-test + ``` + +3. **Recreate the LocustTest CR** β€” new pods pick up updated secret values: + ```bash + kubectl apply -f locusttest.yaml + ``` + +!!! tip "Scheduled Rotation" + For automated secret rotation, integrate with external secrets management tools (see next section) that synchronize secrets on a schedule. + +### External Secrets Integration + +The operator works seamlessly with [External Secrets Operator](https://external-secrets.io/) for automatic secret synchronization from external secrets managers (AWS Secrets Manager, HashiCorp Vault, Google Secret Manager, Azure Key Vault, etc.). + +**Example** (AWS Secrets Manager integration): + +```yaml +# ExternalSecret syncs from AWS Secrets Manager to a K8s Secret +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: load-test-credentials + namespace: performance-testing +spec: + refreshInterval: 1h # Sync every hour + secretStoreRef: + name: aws-secretsmanager + kind: ClusterSecretStore + target: + name: load-test-credentials # K8s Secret name + data: + - secretKey: API_TOKEN + remoteRef: + key: /perf-testing/api-token + - secretKey: DB_PASSWORD + remoteRef: + key: /perf-testing/db-password +--- +# LocustTest references the synced Secret +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: my-test +spec: + image: locustio/locust:2.20.0 + master: + command: "--locustfile /lotest/src/test.py --host https://api.example.com" + worker: + command: "--locustfile /lotest/src/test.py" + replicas: 5 + env: + secretRefs: + - name: load-test-credentials # Uses synced Secret +``` + +!!! tip "Secret Source Agnostic" + The operator doesn't care how Secrets are created. You can use External Secrets Operator, Sealed Secrets, Vault Agent, manual `kubectl create secret`, or any other method. + +## Pod Security + +### Operator Pod Security + +The operator runs with a hardened security context by default, meeting Kubernetes Pod Security Standards **"restricted"** profile: + +```yaml +# From values.yaml (default configuration) +securityContext: + runAsNonRoot: true + runAsUser: 65532 # Non-root user + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault +``` + +These settings are enabled by default in the Helm chart. No additional configuration is required. + +### Test Pod Security + +Test pods run the user-provided Locust image. Security depends on the image you use. + +**Recommendations:** + +- Use the official `locustio/locust` image or build a hardened variant +- Avoid running test containers as root +- Set resource limits to prevent resource exhaustion: + ```yaml + master: + resources: + limits: + cpu: 2000m + memory: 2Gi + worker: + resources: + limits: + cpu: 1000m + memory: 1Gi + ``` + +Test pods inherit the default security context from Helm values (`locustPods.securityContext`). Override per-test if needed. + +## Network Security + +### Master-Worker Communication + +Master and worker pods communicate internally within the cluster: + +- **Port 5557**: Master listens for worker connections (internal only) +- **Port 8089**: Web UI on master pod (use port-forward for access) + +For production use: + +- **Do not expose port 8089 externally** β€” use `kubectl port-forward` for temporary access +- If using NetworkPolicies, ensure master and worker pods can communicate + +### NetworkPolicy Example + +Restrict pod communication to within the same test: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: locust-internal + namespace: performance-testing +spec: + podSelector: + matchLabels: + performance-test-name: my-test + policyTypes: + - Ingress + - Egress + ingress: + # Allow communication between pods in the same test + - from: + - podSelector: + matchLabels: + performance-test-name: my-test + ports: + - port: 5557 # Worker -> Master + - port: 8089 # Web UI (optional) + egress: + # Allow all egress (tests need to reach target systems) + - {} +``` + +!!! note "Egress Requirements" + Test pods need egress access to reach the target system under test. The example above allows unrestricted egress. Restrict further if required by your security policies. + +### Service Mesh Compatibility + +The operator is compatible with service mesh solutions (Istio, Linkerd). However: + +- Master-worker communication on port 5557 must work within the mesh +- Ensure sidecar injection doesn't break pod startup (adjust readiness probes if needed) +- Test traffic to external targets may require egress configuration in the mesh + +## Image Security + +### Using Private Registries + +If Locust images are in a private registry, configure image pull secrets: + +**Helm Configuration:** + +```yaml +# values.yaml +image: + pullSecrets: + - name: my-registry-secret +``` + +**Create the pull secret:** + +```bash +kubectl create secret docker-registry my-registry-secret \ + --docker-server=registry.example.com \ + --docker-username=user \ + --docker-password=pass \ + --docker-email=user@example.com \ + -n performance-testing +``` + +### Image Scanning + +Scan Locust images for vulnerabilities before use: + +```bash +# Example with Trivy +trivy image locustio/locust:2.20.0 +``` + +Build custom hardened images if the official image doesn't meet security requirements. + +## Audit and Compliance + +### Operator Audit Logging + +Kubernetes audit logs capture all operator actions. Enable audit logging at the cluster level to track: + +- LocustTest CR creation/deletion +- Job creation by the operator +- Secret access attempts + +### Compliance Considerations + +- **PCI-DSS**: Ensure Secrets are encrypted at rest (etcd encryption) +- **SOC 2**: Log all operator actions via audit logs +- **GDPR**: Avoid storing personal data in LocustTest CRs or test results + +## Additional Resources + +- [Getting Started](getting_started/index.md) β€” Initial setup and first test +- [How-To Guides](how-to-guides/index.md) β€” Environment variables, volumes, resource management +- [API Reference](api_reference.md) β€” Complete CR specification +- [Kubernetes RBAC Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +- [External Secrets Operator](https://external-secrets.io/) diff --git a/docs/tutorials/ci-cd-integration.md b/docs/tutorials/ci-cd-integration.md new file mode 100644 index 00000000..c894000c --- /dev/null +++ b/docs/tutorials/ci-cd-integration.md @@ -0,0 +1,271 @@ +--- +title: CI/CD Integration (15 minutes) +description: Automate load tests in GitHub Actions pipelines +tags: + - tutorial + - ci-cd + - automation + - github-actions +--- + +# CI/CD Integration (15 minutes) + +Automate your load tests to run on every deployment or on a schedule. + +## What you'll learn + +- How to run load tests in CI/CD pipelines +- How to create unique test runs per pipeline execution +- How to collect and store test results +- How to fail a pipeline on performance regression + +## Prerequisites + +- Completed the [Your First Load Test](first-load-test.md) tutorial +- A Kubernetes cluster accessible from CI (kubeconfig or service account) +- GitHub repository (for GitHub Actions example) + +## The scenario + +You want weekly load tests against your staging environment, plus on-demand tests before releases. Tests should fail the pipeline if error rate exceeds 1%. + +## Step 1: Prepare the test script + +We'll reuse the `ecommerce_test.py` from Tutorial 1. Store it in your repository: + +``` +your-repo/ +β”œβ”€β”€ .github/ +β”‚ └── workflows/ +β”‚ └── load-test.yaml +└── tests/ + └── locust/ + └── ecommerce_test.py +``` + +The test script should be checked into your repository at `tests/locust/ecommerce_test.py` (same content from Tutorial 1). This ensures version control and consistency across pipeline runs. + +## Step 2: Create the GitHub Actions workflow + +Create `.github/workflows/load-test.yaml`: + +```yaml +name: Weekly Load Test + +on: + schedule: + - cron: '0 2 * * 1' # Every Monday at 2 AM UTC + workflow_dispatch: # Allow manual trigger from GitHub UI + +jobs: + load-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up kubectl + uses: azure/setup-kubectl@v3 + with: + version: 'v1.28.0' + + - name: Configure kubeconfig + run: | + # Create .kube directory + mkdir -p $HOME/.kube + # Write kubeconfig from GitHub secret + echo "${{ secrets.KUBECONFIG }}" > $HOME/.kube/config + # Verify connectivity + kubectl cluster-info + + - name: Create/update test ConfigMap + run: | + # Use --dry-run + kubectl apply for idempotency + kubectl create configmap ecommerce-test \ + --from-file=tests/locust/ecommerce_test.py \ + --dry-run=client -o yaml | kubectl apply -f - + + - name: Deploy LocustTest with unique name + run: | + # Generate unique test name with timestamp + TEST_NAME="ecommerce-ci-$(date +%Y%m%d-%H%M%S)" + + kubectl apply -f - <> $GITHUB_ENV + + - name: Wait for test completion + run: | + # Timeout accounts for pod scheduling + 5m run time + autoquit grace period + TIMEOUT=600 # 10 minutes + ELAPSED=0 + while true; do + PHASE=$(kubectl get locusttest ${TEST_NAME} -o jsonpath='{.status.phase}') + case "$PHASE" in + Succeeded) echo "Test passed"; break ;; + Failed) echo "Test failed"; exit 1 ;; + *) echo "Phase: $PHASE -- waiting..."; sleep 10 ;; + esac + ELAPSED=$((ELAPSED + 10)) + if [ $ELAPSED -ge $TIMEOUT ]; then + echo "Timed out after ${TIMEOUT}s"; exit 1 + fi + done + + - name: Collect test results + if: always() # Run even if test fails + run: | + # Get master pod logs (job/ selector works because master always has exactly 1 pod) + kubectl logs job/${TEST_NAME}-master > results.log + + # Get test status YAML + kubectl get locusttest ${TEST_NAME} -o yaml > test-status.yaml + + # Display summary + echo "=== Test Summary ===" + kubectl get locusttest ${TEST_NAME} + + - name: Check for performance regression + run: | + # Extract final statistics from master logs + # NOTE: This grep regex is version-specific to Locust's log format. + # For more robust failure detection, consider using --exit-code-on-error + # in the Locust command, which makes Locust exit with code 1 on errors. + FAILURE_RATE=$(kubectl logs job/${TEST_NAME}-master | \ + grep -oP 'Total.*Failures.*\K[\d.]+%' | tail -1 | sed 's/%//') + + echo "Failure rate: ${FAILURE_RATE}%" + + # Fail pipeline if error rate > 1% + if (( $(echo "$FAILURE_RATE > 1.0" | bc -l) )); then + echo "ERROR: Failure rate ${FAILURE_RATE}% exceeds threshold of 1%" + exit 1 + fi + + echo "βœ“ Performance acceptable: ${FAILURE_RATE}% failures" + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: load-test-results-${{ env.TEST_NAME }} + path: | + results.log + test-status.yaml + retention-days: 30 + + - name: Cleanup test resources + if: always() + run: kubectl delete locusttest ${TEST_NAME} --ignore-not-found +``` + +**Key workflow features:** + +- **Scheduled execution**: `cron: '0 2 * * 1'` runs every Monday at 2 AM +- **Manual trigger**: `workflow_dispatch` allows on-demand runs from GitHub UI +- **Unique test names**: `$(date +%Y%m%d-%H%M%S)` prevents name conflicts +- **Idempotent ConfigMap**: `--dry-run=client -o yaml | kubectl apply` updates existing ConfigMap +- **Result collection**: Logs and YAML saved as GitHub artifacts +- **Regression detection**: Pipeline fails if error rate exceeds 1% + +## Step 3: Configure GitHub secrets + +Your workflow needs a kubeconfig to access the cluster. Add it as a GitHub secret: + +1. Copy your kubeconfig content: + ```bash + cat ~/.kube/config + ``` + +2. In GitHub: Go to **Settings** β†’ **Secrets and variables** β†’ **Actions** β†’ **New repository secret** + +3. Create secret `KUBECONFIG` and paste the raw kubeconfig YAML content (GitHub secrets handle arbitrary text, no encoding needed) + +**Security note:** For production, use a service account with minimal permissions instead of full admin kubeconfig. + +## Step 4: Run and verify + +### Trigger the workflow manually + +1. Go to **Actions** tab in GitHub +2. Select **Weekly Load Test** workflow +3. Click **Run workflow** β†’ **Run workflow** + +### Monitor execution + +Watch the workflow run in real-time. Check each step's output: + +- βœ“ ConfigMap created/updated +- βœ“ LocustTest deployed with unique name +- βœ“ Test completed successfully +- βœ“ Performance within acceptable limits + +### Check artifacts + +After completion (or failure), download artifacts: + +1. Click on the workflow run +2. Scroll to **Artifacts** section +3. Download `load-test-results-ecommerce-ci-YYYYMMDD-HHMMSS.zip` + +The artifact contains: +- `results.log` β€” Full Locust master output with statistics +- `test-status.yaml` β€” Complete LocustTest CR status + +## Step 5: Make tests fail on regression + +The workflow already includes regression detection in the "Check for performance regression" step. It: + +1. **Extracts error rate** from master logs using `grep` +2. **Compares to threshold** (1% in this example) +3. **Fails pipeline** with `exit 1` if threshold exceeded + +**Customizing thresholds:** + +```bash +# Fail on error rate > 1% +if (( $(echo "$FAILURE_RATE > 1.0" | bc -l) )); then + exit 1 +fi + +# Or fail on response time > 500ms +AVG_RESPONSE=$(kubectl logs job/${TEST_NAME}-master | \ + grep -oP 'Average response time.*\K[\d.]+' | tail -1) +if (( $(echo "$AVG_RESPONSE > 500" | bc -l) )); then + exit 1 +fi +``` + +## What you learned + +βœ“ How to run Kubernetes-based load tests in CI/CD pipelines +βœ“ How to create unique test names for traceability +βœ“ How to collect and store test results as artifacts +βœ“ How to fail pipelines on performance regression +βœ“ How to configure scheduled and manual test execution + +## Next steps + +- [Production Deployment](production-deployment.md) β€” Configure production-grade load tests +- [Configure resources](../how-to-guides/configuration/configure-resources.md) β€” Optimize pod resource allocation +- [Set up OpenTelemetry](../how-to-guides/observability/configure-opentelemetry.md) β€” Export metrics for long-term analysis diff --git a/docs/tutorials/first-load-test.md b/docs/tutorials/first-load-test.md new file mode 100644 index 00000000..131962c6 --- /dev/null +++ b/docs/tutorials/first-load-test.md @@ -0,0 +1,208 @@ +--- +title: Your First Load Test +description: Learn how distributed load testing works by building a realistic test from scratch +tags: + - tutorial + - distributed testing + - load testing + - beginners +--- + +# Your First Load Test (10 minutes) + +Learn how distributed load testing works by building a realistic test from scratch. You'll create an e-commerce scenario that simulates 100 users browsing products and viewing details. + +## What you'll learn + +- How Locust master and worker pods communicate and distribute load +- How to write realistic test scripts with multiple tasks and weighted behavior +- How to configure test parameters for meaningful results +- How to monitor test progress and interpret statistics + +## Prerequisites + +- Completed the [Quick Start](../getting_started/index.md) guide +- Basic understanding of HTTP and REST APIs +- Kubernetes cluster with the operator installed + +## The scenario + +You're testing an e-commerce API before a big sale. You need to verify it can handle 100 simultaneous users over 5 minutes, with users primarily browsing products (75% of traffic) and occasionally viewing product details (25% of traffic). + +This simulates realistic user behavior - most users browse, fewer drill into specific items. + +## Step 1: Write the test script + +Create a test script that simulates realistic shopping behavior: + +```python +cat > ecommerce_test.py << 'EOF' +from locust import HttpUser, task, between + +class ShopperUser(HttpUser): + # Wait 1-3 seconds between tasks to simulate realistic user pacing + wait_time = between(1, 3) + + @task(3) # This task runs 3x more often (75% of requests) + def browse_products(self): + """Browse the product catalog.""" + # The name parameter helps identify requests in statistics + self.client.get( + "/anything/products", + name="GET /products" + ) + + @task(1) # This task runs 1x as often (25% of requests) + def view_product_detail(self): + """View details for a specific product.""" + # Simulate viewing product ID 42 + self.client.get( + "/anything/products/42", + name="GET /products/:id" + ) +EOF +``` + +### What's happening here + +- **`HttpUser`**: Base class for simulating HTTP clients. Each instance represents one user. +- **`wait_time = between(1, 3)`**: Adds realistic pauses between requests. Real users don't hammer APIs continuously. +- **`@task(3)` and `@task(1)`**: Task weights control distribution. Weight 3 means "run 3x as often as weight 1", giving us 75%/25% split. +- **`name` parameter**: Groups similar URLs (like `/products/42`, `/products/99`) into one statistic row. Without this, you'd see hundreds of separate rows. + +We're using `https://httpbin.org/anything` as a mock API - it accepts any request and returns 200, perfect for learning without deploying a real e-commerce backend. + +## Step 2: Deploy the test + +First, create the ConfigMap: + +```bash +kubectl create configmap ecommerce-test --from-file=ecommerce_test.py +``` + +Now create the LocustTest resource: + +```bash +kubectl apply -f - < searching > viewing > purchasing) +- **Dynamic data** β€” Product IDs vary per user (prevents cache hits) +- **Wait time** β€” `between(1, 3)` simulates realistic user pacing +- **Logging** β€” Helps debug test issues in production + +## Step 2: Size resources appropriately + +### Why resource sizing matters + +Load tests need consistent performance to generate reliable results. Without resource limits: + +- **Worker pods compete** for CPU with other workloads β†’ inconsistent request rates +- **Memory exhaustion** can crash pods mid-test β†’ incomplete results +- **Cluster instability** affects production services + +### Sizing guidelines + +**Master pod** (coordinator): +- **Memory**: 512Mi request, 1Gi limit β€” handles test coordination and statistics +- **CPU**: 500m request, 1000m limit β€” moderate processing needs + +**Worker pods** (load generators): +- **Memory**: 256Mi request, 512Mi limit β€” per-worker estimate: ~50 users +- **CPU**: 250m request, **no limit** β€” maximizes request generation throughput +- **Replica count**: Total users Γ· 50 = worker count (e.g., 1000 users = 20 workers) + +### Resource configuration example + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: resource-sized-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: production-test + master: + command: | + --locustfile /lotest/src/production_test.py + --host https://api.staging.example.com + --users 1000 + --spawn-rate 50 + --run-time 30m + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + worker: + command: "--locustfile /lotest/src/production_test.py" + replicas: 20 # 1000 users Γ· 50 users/worker = 20 workers + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + # CPU limit intentionally omitted for maximum performance +``` + +**Why omit CPU limit on workers?** CPU limits can throttle request generation, reducing test accuracy. Workers with only CPU requests get maximum available CPU while still being schedulable. + +## Step 3: Isolate on dedicated nodes + +### Why dedicated nodes prevent interference + +Running load tests on shared nodes can: + +- **Throttle production workloads** β€” high CPU usage from workers affects critical services +- **Skew test results** β€” resource contention from other pods creates inconsistent performance +- **Violate policies** β€” some clusters prohibit non-production workloads on production nodes + +### Label nodes for load testing + +```bash +# Identify nodes for load testing (e.g., separate node pool) +kubectl get nodes + +# Label dedicated node(s) +kubectl label nodes worker-node-1 workload-type=load-testing +kubectl label nodes worker-node-2 workload-type=load-testing +kubectl label nodes worker-node-3 workload-type=load-testing +``` + +### Configure node affinity + +Add node affinity to ensure pods only run on labeled nodes: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: isolated-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: production-test + master: + command: | + --locustfile /lotest/src/production_test.py + --host https://api.staging.example.com + --users 1000 + --spawn-rate 50 + --run-time 30m + worker: + command: "--locustfile /lotest/src/production_test.py" + replicas: 20 + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - load-testing # Only schedule on labeled nodes +``` + +### Add tolerations for tainted nodes + +If your dedicated nodes have taints (prevents accidental scheduling), add tolerations: + +```yaml +spec: + scheduling: + affinity: + # ... (node affinity from above) + tolerations: + - key: "workload-type" + operator: "Equal" + value: "load-testing" + effect: "NoSchedule" +``` + +**Verification:** + +```bash +# Check where master and worker pods are scheduled +kubectl get pods -l performance-test-name=isolated-test -o wide + +# You should see NODE column showing only your labeled nodes +``` + +## Step 4: Enable OpenTelemetry + +### Why native OpenTelemetry beats sidecars + +The v2 operator includes native OpenTelemetry support, eliminating the need for sidecar containers: + +- **Lower overhead** β€” no extra containers per pod +- **Simpler configuration** β€” environment variables injected automatically +- **Better performance** β€” direct export from Locust to collector + +### Configure OpenTelemetry export + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: otel-enabled-test +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: production-test + master: + command: | + --locustfile /lotest/src/production_test.py + --host https://api.staging.example.com + --users 1000 + --spawn-rate 50 + --run-time 30m + worker: + command: "--locustfile /lotest/src/production_test.py" + replicas: 20 + observability: + openTelemetry: + enabled: true + endpoint: "otel-collector.monitoring:4317" # Your OTel Collector endpoint + protocol: "grpc" # or "http/protobuf" + # TLS is the default; set insecure: true only for development without TLS + extraEnvVars: + OTEL_SERVICE_NAME: "production-load-test" + OTEL_RESOURCE_ATTRIBUTES: "environment=staging,team=platform,test.type=load" +``` + +**Configuration details:** + +- **`endpoint`** β€” OpenTelemetry Collector gRPC endpoint (format: `host:port`) +- **`protocol`** β€” `grpc` (default) or `http/protobuf` +- **`insecure`** β€” TLS is the default; set `true` only for development without TLS +- **`extraEnvVars`** β€” Custom attributes for trace/metric filtering + +### Verify OpenTelemetry injection + +```bash +# Check environment variables in master pod +kubectl get pod -l performance-test-pod-name=otel-enabled-test-master \ + -o yaml | grep OTEL_ + +# Expected output: +# OTEL_TRACES_EXPORTER: otlp +# OTEL_METRICS_EXPORTER: otlp +# OTEL_EXPORTER_OTLP_ENDPOINT: otel-collector.monitoring:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL: grpc +# OTEL_SERVICE_NAME: production-load-test +``` + +## Step 5: Deploy the complete production test + +Combining all previous steps, here's the full production-ready LocustTest CR: + +```yaml +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: production-load-test + namespace: load-testing +spec: + image: locustio/locust:2.20.0 + testFiles: + configMapRef: production-test + master: + command: | + --locustfile /lotest/src/production_test.py + --host https://api.staging.example.com + --users 1000 + --spawn-rate 50 + --run-time 30m + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + worker: + command: "--locustfile /lotest/src/production_test.py" + replicas: 20 # 1000 users Γ· 50 users per worker + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + # CPU limit omitted for maximum worker performance + scheduling: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - load-testing + tolerations: + - key: "workload-type" + operator: "Equal" + value: "load-testing" + effect: "NoSchedule" + observability: + openTelemetry: + enabled: true + endpoint: "otel-collector.monitoring:4317" + protocol: "grpc" + extraEnvVars: + OTEL_SERVICE_NAME: "production-load-test" + OTEL_RESOURCE_ATTRIBUTES: "environment=staging,team=platform" +``` + +### Deploy the test + +```bash +# Create ConfigMap from enhanced test script +kubectl create configmap production-test \ + --from-file=production_test.py \ + --namespace load-testing + +# Apply the LocustTest CR +kubectl apply -f production-load-test.yaml +``` + +## Step 6: Monitor and verify + +### Watch test progression + +```bash +# Monitor test status (watch mode) +kubectl get locusttest production-load-test -n load-testing -w + +# Expected progression: +# NAME PHASE WORKERS CONNECTED AGE +# production-load-test Pending 20 0 5s +# production-load-test Running 20 20 45s +# production-load-test Succeeded 20 20 31m +``` + +### Check status conditions + +```bash +# View detailed status conditions +kubectl get locusttest production-load-test -n load-testing \ + -o jsonpath='{.status.conditions[*]}' | jq + +# Expected conditions: +# { +# "type": "PodsHealthy", +# "status": "True", +# "reason": "PodsHealthy", +# "message": "All pods are healthy" +# } +``` + +### Verify worker health + +```bash +# Check all worker pods are running +kubectl get pods -l performance-test-pod-name=production-load-test-worker \ + -n load-testing + +# Expected: 20 pods in Running state +``` + +### Verify OpenTelemetry traces + +If OpenTelemetry is configured, check your observability backend: + +**Prometheus (metrics):** +```promql +# Query Locust request metrics (illustrative) +locust_requests_total{service_name="production-load-test"} + +# Query response time metrics (illustrative) +locust_request_duration_seconds{service_name="production-load-test"} +``` + +!!! note "Metric names are illustrative" + Actual metric names depend on your OpenTelemetry/Prometheus setup and exporter configuration. Check your OTel Collector and Prometheus documentation for the exact names available in your environment. + +**Jaeger/Tempo (traces):** + +Filter by `service.name=production-load-test` to see: + +- Individual request spans +- Request duration distribution +- Error traces + +### Access real-time Locust UI + +```bash +# Port-forward to master pod +kubectl port-forward -n load-testing \ + job/production-load-test-master 8089:8089 + +# Open http://localhost:8089 in browser +``` + +The UI shows: + +- Live request statistics (RPS, response times, failures) +- Charts showing performance trends over time +- Worker connection status +- Test phase and remaining duration + +## What you learned + +βœ“ How to size master and worker resources for production workloads +βœ“ How to isolate load tests on dedicated nodes using affinity and tolerations +βœ“ How to export traces and metrics to OpenTelemetry collectors +βœ“ How to scale worker replicas based on simulated user count +βœ“ How to monitor test health through status conditions +βœ“ How to deploy complete production-grade load tests + +## Next steps + +- [Configure resources](../how-to-guides/configuration/configure-resources.md) β€” Deep dive into resource optimization +- [Configure OpenTelemetry](../how-to-guides/observability/configure-opentelemetry.md) β€” Advanced observability setup +- [Use node affinity](../how-to-guides/scaling/use-node-affinity.md) β€” More scheduling strategies +- [API Reference](../api_reference.md) β€” Explore all configuration options +- [Security best practices](../security.md) β€” Secure your load testing infrastructure diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..3ebb41bc --- /dev/null +++ b/go.mod @@ -0,0 +1,99 @@ +module github.com/AbdelrhmanHamouda/locust-k8s-operator + +go 1.24.0 + +require ( + github.com/go-logr/logr v1.4.2 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.1 + github.com/stretchr/testify v1.11.1 + go.uber.org/zap v1.27.0 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/client-go v0.33.0 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + sigs.k8s.io/controller-runtime v0.21.0 +) + +require ( + cel.dev/expr v0.19.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.26.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..7e299c50 --- /dev/null +++ b/go.sum @@ -0,0 +1,254 @@ +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/gradle.properties b/gradle.properties deleted file mode 100644 index e17f1c83..00000000 --- a/gradle.properties +++ /dev/null @@ -1,6 +0,0 @@ -micronautVersion=4.8.3 -org.gradle.parallel=true -org.gradle.caching=true -# To avoid deprecated Gradle 7 default behavior for "Toolchain Download Repositories" -# More info: https://docs.gradle.org/7.6.1/userguide/toolchains.html#sub:adoptium_and_adopt_open_jdk -org.gradle.java.installations.auto-download=true \ No newline at end of file diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle deleted file mode 100644 index a2653610..00000000 --- a/gradle/dependencies.gradle +++ /dev/null @@ -1,55 +0,0 @@ -ext { - // Logging - logbackVersion = '1.2.11' - - // Metrics - micrometerPrometheusVersion = '4.2.1' - - // Testing - mockitoVersion = '4.8.0' - assertjVersion = '3.23.1' - systemLambdaVersion = '1.2.1' - k8sClient = '7.3.1' // Matching version from JOSDK - - // Operator SDK - javaOperatorVersion = '5.1.1' -} - -dependencies { - annotationProcessor("org.projectlombok:lombok") - annotationProcessor("io.micronaut:micronaut-http-validation") - annotationProcessor("io.micronaut.validation:micronaut-validation-processor") - annotationProcessor("io.micronaut:micronaut-inject-java") - annotationProcessor ("io.javaoperatorsdk:operator-framework:$javaOperatorVersion") - - implementation("io.micronaut:micronaut-inject") - implementation ("io.micronaut:micronaut-http-client") - implementation ("io.micronaut:micronaut-jackson-databind") - implementation("io.micronaut:micronaut-management") - implementation("io.micronaut.validation:micronaut-validation") - implementation("io.micronaut.micrometer:micronaut-micrometer-core") - implementation("io.micronaut.micrometer:micronaut-micrometer-registry-prometheus:$micrometerPrometheusVersion") - implementation("jakarta.annotation:jakarta.annotation-api") - implementation("io.javaoperatorsdk:operator-framework:$javaOperatorVersion") - - compileOnly("org.projectlombok:lombok") - runtimeOnly("ch.qos.logback:logback-classic:$logbackVersion") - runtimeOnly("org.yaml:snakeyaml") - - // ### Test - testAnnotationProcessor("io.micronaut:micronaut-inject-java") - testAnnotationProcessor("org.projectlombok:lombok") - - testImplementation("org.mockito:mockito-core:$mockitoVersion") - testImplementation("org.mockito:mockito-junit-jupiter:$mockitoVersion") - testImplementation("org.assertj:assertj-core:$assertjVersion") - testImplementation("org.junit.jupiter:junit-jupiter-api") - testImplementation("org.junit.jupiter:junit-jupiter-params") - testImplementation("io.micronaut.test:micronaut-test-junit5") - testImplementation("org.junit.jupiter:junit-jupiter-engine") - testImplementation("com.github.stefanbirkner:system-lambda:$systemLambdaVersion") - testImplementation("io.fabric8:kubernetes-server-mock:$k8sClient") - testImplementation ("io.fabric8:kube-api-test:$k8sClient") - - testCompileOnly("org.projectlombok:lombok") -} diff --git a/gradle/integration-test.gradle b/gradle/integration-test.gradle deleted file mode 100644 index 9db08423..00000000 --- a/gradle/integration-test.gradle +++ /dev/null @@ -1,100 +0,0 @@ -// Integration Test Configuration -sourceSets { - integrationTest { - java.srcDirs = ['src/integrationTest/java'] - resources.srcDirs = ['src/integrationTest/resources'] - compileClasspath += main.output + test.output - runtimeClasspath += main.output + test.output - } -} - -configurations { - integrationTestImplementation.extendsFrom testImplementation - integrationTestRuntimeOnly.extendsFrom testRuntimeOnly -} - -dependencies { - integrationTestImplementation 'org.testcontainers:testcontainers:1.19.0' - integrationTestImplementation 'org.testcontainers:k3s:1.19.0' - integrationTestImplementation 'io.fabric8:kubernetes-client:6.8.1' - integrationTestImplementation 'org.awaitility:awaitility:4.2.0' - integrationTestImplementation 'org.yaml:snakeyaml:2.0' - integrationTestImplementation 'org.apache.commons:commons-compress:1.23.0' - integrationTestImplementation 'commons-io:commons-io:2.11.0' -} - -// Function to detect correct Docker socket path (same as jib.gradle) -def getDockerHostForIntegrationTest() { - // Check if DOCKER_HOST is already set - def dockerHost = System.getenv('DOCKER_HOST') - if (dockerHost) { - return dockerHost - } - - // For macOS with Docker Desktop, check the user-specific socket - def userDockerSocket = System.getProperty('user.home') + '/.docker/run/docker.sock' - if (new File(userDockerSocket).exists()) { - return 'unix://' + userDockerSocket - } - - // Default Docker socket - def defaultSocket = '/var/run/docker.sock' - if (new File(defaultSocket).exists()) { - return 'unix://' + defaultSocket - } - - // Fallback to default - return 'unix:///var/run/docker.sock' -} - -task integrationTest(type: Test) { - group = 'verification' - description = 'Runs integration tests' - testClassesDirs = sourceSets.integrationTest.output.classesDirs - classpath = sourceSets.integrationTest.runtimeClasspath - - useJUnitPlatform() - - // Set test timeouts - systemProperty 'junit.jupiter.execution.timeout.default', '15m' - - // Pass environment variables to tests with proper Docker socket detection - environment 'DOCKER_HOST', getDockerHostForIntegrationTest() - - // Configure Testcontainers to use default Docker socket for volume mounting - // This fixes the issue where Docker Desktop's user-specific socket can't be mounted as volume - environment 'TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE', '/var/run/docker.sock' - - // Disable caching of test results to ensure fresh runs each time - outputs.upToDateWhen { false } - - // Always run all tests, don't skip any based on previous runs - options { - // Ensure all tests run every time - testLogging { - events "passed", "skipped", "failed" - exceptionFormat "full" - showStandardStreams = true - } - } - - // Configure test reporting - reports { - html.outputLocation = file("$buildDir/reports/integration-tests") - junitXml.outputLocation = file("$buildDir/test-results/integration-test") - } - - // Ensure clean state by running these tasks first - dependsOn clean, assemble, classes, jibDockerBuild - - // Force the task to run even if it's considered up-to-date - outputs.upToDateWhen { false } - - // Only run if explicitly requested - onlyIf { project.hasProperty('runIntegrationTests') || gradle.startParameter.taskNames.contains('integrationTest') } -} - -// Make check depend on integration tests in CI -if (System.getenv('CI') == 'true') { - check.dependsOn integrationTest -} diff --git a/gradle/jacoco.gradle b/gradle/jacoco.gradle deleted file mode 100644 index c2919a99..00000000 --- a/gradle/jacoco.gradle +++ /dev/null @@ -1,10 +0,0 @@ -def allTestCoverageFile = "$buildDir/jacoco/merged/test.exec" - -jacocoTestReport { - executionData file(allTestCoverageFile) - - reports { - xml.required = true - html.required = true - } -} diff --git a/gradle/jib.gradle b/gradle/jib.gradle deleted file mode 100644 index 03869b14..00000000 --- a/gradle/jib.gradle +++ /dev/null @@ -1,64 +0,0 @@ -def matchedTags = project.version =~ /(\d+)\.(\d+)\.(\d+)/ -def fullVersion = project.version -def majorVersion = matchedTags[0][1] -def majorMinorVersion = majorVersion + "." + matchedTags[0][2] - -// Function to detect Docker socket path -def getDockerHost() { - // Check if DOCKER_HOST is already set - def dockerHost = System.getenv('DOCKER_HOST') - if (dockerHost) { - println "Using existing DOCKER_HOST: ${dockerHost}" - return dockerHost - } - - // For macOS with Docker Desktop, check the user-specific socket - def userDockerSocket = System.getProperty('user.home') + '/.docker/run/docker.sock' - if (new File(userDockerSocket).exists()) { - def dockerEndpoint = 'unix://' + userDockerSocket - println "Detected Docker Desktop socket: ${dockerEndpoint}" - return dockerEndpoint - } - - // Default Docker socket - def defaultSocket = '/var/run/docker.sock' - if (new File(defaultSocket).exists()) { - def dockerEndpoint = 'unix://' + defaultSocket - println "Using default Docker socket: ${dockerEndpoint}" - return dockerEndpoint - } - - println "No Docker socket found, using default configuration" - return null -} - -// Get the Docker host to use -def dockerHostEndpoint = getDockerHost() - -jib { - // Configure Docker daemon connection - dockerClient { - if (dockerHostEndpoint) { - environment = ['DOCKER_HOST': dockerHostEndpoint] - } - } - - from { - image = 'eclipse-temurin:21-jre' - } - - to { - image = project.name - // Tag image with - // - Full version - // - Major Version - // - Major.Minor version - // - "latest" - tags = [fullVersion, majorVersion, majorMinorVersion, 'latest'] - } - - // User current time for image creation - container { - creationTime = "USE_CURRENT_TIMESTAMP" - } -} diff --git a/gradle/spotless.gradle b/gradle/spotless.gradle deleted file mode 100644 index 5bd5f4aa..00000000 --- a/gradle/spotless.gradle +++ /dev/null @@ -1,24 +0,0 @@ -ext { - DISABLE_SPOTLESS = System.getenv("DISABLE_SPOTLESS") ?: "false" -} - -// We only want to run it on local dev env -if (DISABLE_SPOTLESS == "false") { - spotless { - java { - // Disable as it is causing failure with JDK 17 due to access of internal APIs - // more info: https://github.com/google/google-java-format/issues/612 - //removeUnusedImports() - endWithNewline() - indentWithSpaces(4) - } - format "misc", { - target "**/*.gradle", "**/.gitignore" - - trimTrailingWhitespace() - indentWithSpaces() - endWithNewline() - } - compileJava.dependsOn "spotlessApply" - } -} diff --git a/gradle/testing.gradle b/gradle/testing.gradle deleted file mode 100644 index 19b80e55..00000000 --- a/gradle/testing.gradle +++ /dev/null @@ -1,34 +0,0 @@ -import org.gradle.api.tasks.testing.logging.TestExceptionFormat -import org.gradle.api.tasks.testing.logging.TestLogEvent - -tasks.withType(Test) { - useJUnitPlatform() - - testLogging { - events TestLogEvent.FAILED, TestLogEvent.SKIPPED, TestLogEvent.STANDARD_ERROR - exceptionFormat TestExceptionFormat.FULL - showExceptions true - showCauses true - showStackTraces true - } - - // --add-opens needed to get around JDK 17 restrictions with system-lambda - jvmArgs "-XX:TieredStopAtLevel=1", "--add-opens=java.base/java.util=ALL-UNNAMED" - - beforeTest { descriptor -> - logger.lifecycle("Running test: {}.{}", descriptor.className, descriptor.name) - } -} - -test { - useJUnitPlatform() -} - - - -test.classpath += configurations.developmentOnly - -tasks.withType(JavaExec) { - classpath += configurations.developmentOnly - jvmArgs( '-XX:TieredStopAtLevel=1', '-Dcom.sun.management.jmxremote') -} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index 249e5832..00000000 Binary files a/gradle/wrapper/gradle-wrapper.jar and /dev/null differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index 0b55a3bd..00000000 --- a/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,5 +0,0 @@ -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.2-bin.zip -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew deleted file mode 100755 index a69d9cb6..00000000 --- a/gradlew +++ /dev/null @@ -1,240 +0,0 @@ -#!/bin/sh - -# -# Copyright Β© 2015-2021 the original authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -############################################################################## -# -# Gradle start up script for POSIX generated by Gradle. -# -# Important for running: -# -# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is -# noncompliant, but you have some other compliant shell such as ksh or -# bash, then to run this script, type that shell name before the whole -# command line, like: -# -# ksh Gradle -# -# Busybox and similar reduced shells will NOT work, because this script -# requires all of these POSIX shell features: -# * functions; -# * expansions Β«$varΒ», Β«${var}Β», Β«${var:-default}Β», Β«${var+SET}Β», -# Β«${var#prefix}Β», Β«${var%suffix}Β», and Β«$( cmd )Β»; -# * compound commands having a testable exit status, especially Β«caseΒ»; -# * various built-in commands including Β«commandΒ», Β«setΒ», and Β«ulimitΒ». -# -# Important for patching: -# -# (2) This script targets any POSIX shell, so it avoids extensions provided -# by Bash, Ksh, etc; in particular arrays are avoided. -# -# The "traditional" practice of packing multiple parameters into a -# space-separated string is a well documented source of bugs and security -# problems, so this is (mostly) avoided, by progressively accumulating -# options in "$@", and eventually passing that to Java. -# -# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, -# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; -# see the in-line comments for details. -# -# There are tweaks for specific operating systems such as AIX, CygWin, -# Darwin, MinGW, and NonStop. -# -# (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt -# within the Gradle project. -# -# You can find Gradle at https://github.com/gradle/gradle/. -# -############################################################################## - -# Attempt to set APP_HOME - -# Resolve links: $0 may be a link -app_path=$0 - -# Need this for daisy-chained symlinks. -while - APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path - [ -h "$app_path" ] -do - ls=$( ls -ld "$app_path" ) - link=${ls#*' -> '} - case $link in #( - /*) app_path=$link ;; #( - *) app_path=$APP_HOME$link ;; - esac -done - -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" -APP_BASE_NAME=${0##*/} - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD=maximum - -warn () { - echo "$*" -} >&2 - -die () { - echo - echo "$*" - echo - exit 1 -} >&2 - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "$( uname )" in #( - CYGWIN* ) cygwin=true ;; #( - Darwin* ) darwin=true ;; #( - MSYS* | MINGW* ) msys=true ;; #( - NONSTOP* ) nonstop=true ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD=$JAVA_HOME/jre/sh/java - else - JAVACMD=$JAVA_HOME/bin/java - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then - case $MAX_FD in #( - max*) - MAX_FD=$( ulimit -H -n ) || - warn "Could not query maximum file descriptor limit" - esac - case $MAX_FD in #( - '' | soft) :;; #( - *) - ulimit -n "$MAX_FD" || - warn "Could not set maximum file descriptor limit to $MAX_FD" - esac -fi - -# Collect all arguments for the java command, stacking in reverse order: -# * args from the command line -# * the main class name -# * -classpath -# * -D...appname settings -# * --module-path (only if needed) -# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. - -# For Cygwin or MSYS, switch paths to Windows format before running java -if "$cygwin" || "$msys" ; then - APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) - CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) - - JAVACMD=$( cygpath --unix "$JAVACMD" ) - - # Now convert the arguments - kludge to limit ourselves to /bin/sh - for arg do - if - case $arg in #( - -*) false ;; # don't mess with options #( - /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath - [ -e "$t" ] ;; #( - *) false ;; - esac - then - arg=$( cygpath --path --ignore --mixed "$arg" ) - fi - # Roll the args list around exactly as many times as the number of - # args, so each arg winds up back in the position where it started, but - # possibly modified. - # - # NB: a `for` loop captures its iteration list before it begins, so - # changing the positional parameters here affects neither the number of - # iterations, nor the values presented in `arg`. - shift # remove old arg - set -- "$@" "$arg" # push replacement arg - done -fi - -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. - -set -- \ - "-Dorg.gradle.appname=$APP_BASE_NAME" \ - -classpath "$CLASSPATH" \ - org.gradle.wrapper.GradleWrapperMain \ - "$@" - -# Stop when "xargs" is not available. -if ! command -v xargs >/dev/null 2>&1 -then - die "xargs is not available" -fi - -# Use "xargs" to parse quoted args. -# -# With -n1 it outputs one arg per line, with the quotes and backslashes removed. -# -# In Bash we could simply go: -# -# readarray ARGS < <( xargs -n1 <<<"$var" ) && -# set -- "${ARGS[@]}" "$@" -# -# but POSIX shell has neither arrays nor command substitution, so instead we -# post-process each arg (as a line of input to sed) to backslash-escape any -# character that might be a shell metacharacter, then use eval to reverse -# that process (while maintaining the separation between arguments), and wrap -# the whole thing up as a single "set" statement. -# -# This will of course break if any of these variables contains a newline or -# an unmatched quote. -# - -eval "set -- $( - printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | - xargs -n1 | - sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | - tr '\n' ' ' - )" '"$@"' - -exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat deleted file mode 100644 index 53a6b238..00000000 --- a/gradlew.bat +++ /dev/null @@ -1,91 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%"=="" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%"=="" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if %ERRORLEVEL% equ 0 goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* - -:end -@rem End local scope for the variables with windows NT shell -if %ERRORLEVEL% equ 0 goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -set EXIT_CODE=%ERRORLEVEL% -if %EXIT_CODE% equ 0 set EXIT_CODE=1 -if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% -exit /b %EXIT_CODE% - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 00000000..97867981 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/internal/config/.gitkeep b/internal/config/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 00000000..f744c72e --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,252 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" +) + +// OperatorConfig holds all operator configuration loaded from environment variables. +type OperatorConfig struct { + // Job configuration + // TTLSecondsAfterFinished specifies how long a Job should exist after completion. + // nil means use Kubernetes default (don't set the field). + TTLSecondsAfterFinished *int32 + + // Pod resource configuration for Locust containers + PodCPURequest string + PodMemRequest string + PodEphemeralStorageRequest string + PodCPULimit string + PodMemLimit string + PodEphemeralStorageLimit string + + // Role-specific pod resource configuration for master/worker Locust containers. + // Empty string means "use unified Pod* resource values above". + // Set via Helm masterResources/workerResources values. + MasterCPURequest string + MasterMemRequest string + MasterEphemeralStorageRequest string + MasterCPULimit string + MasterMemLimit string + MasterEphemeralStorageLimit string + WorkerCPURequest string + WorkerMemRequest string + WorkerEphemeralStorageRequest string + WorkerCPULimit string + WorkerMemLimit string + WorkerEphemeralStorageLimit string + + // Metrics exporter sidecar configuration + MetricsExporterImage string + MetricsExporterPort int32 + MetricsExporterPullPolicy string + MetricsExporterCPURequest string + MetricsExporterMemRequest string + MetricsExporterEphemeralStorageRequest string + MetricsExporterCPULimit string + MetricsExporterMemLimit string + MetricsExporterEphemeralStorageLimit string + + // Kafka configuration for optional Kafka integration + KafkaBootstrapServers string + KafkaSecurityEnabled bool + KafkaSecurityProtocol string + KafkaUsername string + KafkaPassword string + KafkaSaslMechanism string + KafkaSaslJaasConfig string + + // Feature flags + // EnableAffinityCRInjection enables injecting affinity rules from CR spec + EnableAffinityCRInjection bool + // EnableTolerationsCRInjection enables injecting tolerations from CR spec + EnableTolerationsCRInjection bool +} + +// LoadConfig loads operator configuration from environment variables. +// Default values match those in the Java operator's application.yml. +// Returns error if any resource values are invalid Kubernetes quantities. +func LoadConfig() (*OperatorConfig, error) { + cfg := &OperatorConfig{ + // Job configuration + TTLSecondsAfterFinished: getEnvInt32Ptr("JOB_TTL_SECONDS_AFTER_FINISHED"), + + // Pod resource configuration + PodCPURequest: getEnv("POD_CPU_REQUEST", "250m"), + PodMemRequest: getEnv("POD_MEM_REQUEST", "128Mi"), + PodEphemeralStorageRequest: getEnv("POD_EPHEMERAL_REQUEST", "30M"), + PodCPULimit: getEnv("POD_CPU_LIMIT", "1000m"), + PodMemLimit: getEnv("POD_MEM_LIMIT", "1024Mi"), + PodEphemeralStorageLimit: getEnv("POD_EPHEMERAL_LIMIT", "50M"), + + // Role-specific pod resources (empty = use unified Pod* values above) + MasterCPURequest: getEnv("MASTER_POD_CPU_REQUEST", ""), + MasterMemRequest: getEnv("MASTER_POD_MEM_REQUEST", ""), + MasterEphemeralStorageRequest: getEnv("MASTER_POD_EPHEMERAL_REQUEST", ""), + MasterCPULimit: getEnv("MASTER_POD_CPU_LIMIT", ""), + MasterMemLimit: getEnv("MASTER_POD_MEM_LIMIT", ""), + MasterEphemeralStorageLimit: getEnv("MASTER_POD_EPHEMERAL_LIMIT", ""), + WorkerCPURequest: getEnv("WORKER_POD_CPU_REQUEST", ""), + WorkerMemRequest: getEnv("WORKER_POD_MEM_REQUEST", ""), + WorkerEphemeralStorageRequest: getEnv("WORKER_POD_EPHEMERAL_REQUEST", ""), + WorkerCPULimit: getEnv("WORKER_POD_CPU_LIMIT", ""), + WorkerMemLimit: getEnv("WORKER_POD_MEM_LIMIT", ""), + WorkerEphemeralStorageLimit: getEnv("WORKER_POD_EPHEMERAL_LIMIT", ""), + + // Metrics exporter configuration + MetricsExporterImage: getEnv("METRICS_EXPORTER_IMAGE", "containersol/locust_exporter:v0.5.0"), + MetricsExporterPort: getEnvInt32("METRICS_EXPORTER_PORT", 9646), + MetricsExporterPullPolicy: getEnv("METRICS_EXPORTER_IMAGE_PULL_POLICY", "Always"), + MetricsExporterCPURequest: getEnv("METRICS_EXPORTER_CPU_REQUEST", "250m"), + MetricsExporterMemRequest: getEnv("METRICS_EXPORTER_MEM_REQUEST", "128Mi"), + MetricsExporterEphemeralStorageRequest: getEnv("METRICS_EXPORTER_EPHEMERAL_REQUEST", "30M"), + MetricsExporterCPULimit: getEnv("METRICS_EXPORTER_CPU_LIMIT", "1000m"), + MetricsExporterMemLimit: getEnv("METRICS_EXPORTER_MEM_LIMIT", "1024Mi"), + MetricsExporterEphemeralStorageLimit: getEnv("METRICS_EXPORTER_EPHEMERAL_LIMIT", "50M"), + + // Kafka configuration + KafkaBootstrapServers: getEnv("KAFKA_BOOTSTRAP_SERVERS", "localhost:9092"), + KafkaSecurityEnabled: getEnvBool("KAFKA_SECURITY_ENABLED", false), + KafkaSecurityProtocol: getEnv("KAFKA_SECURITY_PROTOCOL_CONFIG", "SASL_PLAINTEXT"), + KafkaUsername: getEnv("KAFKA_USERNAME", ""), + KafkaPassword: getEnv("KAFKA_PASSWORD", ""), + KafkaSaslMechanism: getEnv("KAFKA_SASL_MECHANISM", "SCRAM-SHA-512"), + KafkaSaslJaasConfig: getEnv("KAFKA_SASL_JAAS_CONFIG", ""), + + // Feature flags + EnableAffinityCRInjection: getEnvBool("ENABLE_AFFINITY_CR_INJECTION", false), + EnableTolerationsCRInjection: getEnvBool("ENABLE_TAINT_TOLERATIONS_CR_INJECTION", false), + } + + // Validate all resource quantities at startup + if err := validateResourceQuantities(cfg); err != nil { + return nil, fmt.Errorf("invalid operator configuration: %w", err) + } + + return cfg, nil +} + +// validateResourceQuantities validates all resource quantity strings in config. +func validateResourceQuantities(cfg *OperatorConfig) error { + quantities := map[string]string{ + "POD_CPU_REQUEST": cfg.PodCPURequest, + "POD_MEM_REQUEST": cfg.PodMemRequest, + "POD_EPHEMERAL_REQUEST": cfg.PodEphemeralStorageRequest, + "POD_CPU_LIMIT": cfg.PodCPULimit, + "POD_MEM_LIMIT": cfg.PodMemLimit, + "POD_EPHEMERAL_LIMIT": cfg.PodEphemeralStorageLimit, + "MASTER_POD_CPU_REQUEST": cfg.MasterCPURequest, + "MASTER_POD_MEM_REQUEST": cfg.MasterMemRequest, + "MASTER_POD_EPHEMERAL_REQUEST": cfg.MasterEphemeralStorageRequest, + "MASTER_POD_CPU_LIMIT": cfg.MasterCPULimit, + "MASTER_POD_MEM_LIMIT": cfg.MasterMemLimit, + "MASTER_POD_EPHEMERAL_LIMIT": cfg.MasterEphemeralStorageLimit, + "WORKER_POD_CPU_REQUEST": cfg.WorkerCPURequest, + "WORKER_POD_MEM_REQUEST": cfg.WorkerMemRequest, + "WORKER_POD_EPHEMERAL_REQUEST": cfg.WorkerEphemeralStorageRequest, + "WORKER_POD_CPU_LIMIT": cfg.WorkerCPULimit, + "WORKER_POD_MEM_LIMIT": cfg.WorkerMemLimit, + "WORKER_POD_EPHEMERAL_LIMIT": cfg.WorkerEphemeralStorageLimit, + "METRICS_EXPORTER_CPU_REQUEST": cfg.MetricsExporterCPURequest, + "METRICS_EXPORTER_MEM_REQUEST": cfg.MetricsExporterMemRequest, + "METRICS_EXPORTER_EPHEMERAL_REQUEST": cfg.MetricsExporterEphemeralStorageRequest, + "METRICS_EXPORTER_CPU_LIMIT": cfg.MetricsExporterCPULimit, + "METRICS_EXPORTER_MEM_LIMIT": cfg.MetricsExporterMemLimit, + "METRICS_EXPORTER_EPHEMERAL_LIMIT": cfg.MetricsExporterEphemeralStorageLimit, + } + + var errs []error + for name, value := range quantities { + if value == "" { + continue // Empty string means "not set", which is valid + } + if _, err := resource.ParseQuantity(value); err != nil { + errs = append(errs, fmt.Errorf("invalid value for %s: %q is not a valid Kubernetes quantity", name, value)) + } + } + + if len(errs) > 0 { + return fmt.Errorf("resource validation failed:\n%s", formatErrors(errs)) + } + + return nil +} + +// formatErrors formats a slice of errors for display. +func formatErrors(errs []error) string { + msgs := make([]string, 0, len(errs)) + for _, err := range errs { + msgs = append(msgs, " - "+err.Error()) + } + return strings.Join(msgs, "\n") +} + +// getEnv returns the value of an environment variable or a default value if not set. +func getEnv(key, defaultValue string) string { + if v := os.Getenv(key); v != "" { + return v + } + return defaultValue +} + +// getEnvBool returns the boolean value of an environment variable or a default value if not set. +func getEnvBool(key string, defaultValue bool) bool { + if v := os.Getenv(key); v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + log.Printf("WARNING: env var %s has unparseable boolean value %q, using default %v", key, v, defaultValue) + return defaultValue + } + return b + } + return defaultValue +} + +// getEnvInt32 returns the int32 value of an environment variable or a default value if not set. +func getEnvInt32(key string, defaultValue int32) int32 { + if v := os.Getenv(key); v != "" { + i, err := strconv.ParseInt(v, 10, 32) + if err != nil { + log.Printf("WARNING: env var %s has unparseable int32 value %q, using default %d", key, v, defaultValue) + return defaultValue + } + return int32(i) + } + return defaultValue +} + +// getEnvInt32Ptr returns a pointer to an int32 value of an environment variable, or nil if not set. +// This is used for optional fields where nil indicates "not configured" vs 0. +func getEnvInt32Ptr(key string) *int32 { + if v := os.Getenv(key); v != "" { + i, err := strconv.ParseInt(v, 10, 32) + if err != nil { + log.Printf("WARNING: env var %s has unparseable int32 value %q, ignoring", key, v) + return nil + } + val := int32(i) + return &val + } + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go new file mode 100644 index 00000000..b7c7415e --- /dev/null +++ b/internal/config/config_test.go @@ -0,0 +1,493 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "bytes" + "log" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// captureLogOutput captures log output during test execution. +func captureLogOutput(t *testing.T, fn func()) string { + t.Helper() + var buf bytes.Buffer + log.SetOutput(&buf) + defer log.SetOutput(os.Stderr) + fn() + return buf.String() +} + +func TestLoadConfig_DefaultValues(t *testing.T) { + // Clear any existing env vars that might interfere + envVars := []string{ + "JOB_TTL_SECONDS_AFTER_FINISHED", + "POD_CPU_REQUEST", + "POD_MEM_REQUEST", + "POD_EPHEMERAL_REQUEST", + "POD_CPU_LIMIT", + "POD_MEM_LIMIT", + "POD_EPHEMERAL_LIMIT", + "METRICS_EXPORTER_IMAGE", + "METRICS_EXPORTER_PORT", + "METRICS_EXPORTER_IMAGE_PULL_POLICY", + "METRICS_EXPORTER_CPU_REQUEST", + "METRICS_EXPORTER_MEM_REQUEST", + "METRICS_EXPORTER_EPHEMERAL_REQUEST", + "METRICS_EXPORTER_CPU_LIMIT", + "METRICS_EXPORTER_MEM_LIMIT", + "METRICS_EXPORTER_EPHEMERAL_LIMIT", + "KAFKA_BOOTSTRAP_SERVERS", + "KAFKA_SECURITY_ENABLED", + "KAFKA_SECURITY_PROTOCOL_CONFIG", + "KAFKA_USERNAME", + "KAFKA_PASSWORD", + "KAFKA_SASL_MECHANISM", + "KAFKA_SASL_JAAS_CONFIG", + "ENABLE_AFFINITY_CR_INJECTION", + "ENABLE_TAINT_TOLERATIONS_CR_INJECTION", + } + for _, env := range envVars { + _ = os.Unsetenv(env) + } + + cfg, err := LoadConfig() + require.NoError(t, err) + + // Job configuration + assert.Nil(t, cfg.TTLSecondsAfterFinished, "TTL should be nil when not set") + + // Pod resources - match Java application.yml defaults + assert.Equal(t, "250m", cfg.PodCPURequest) + assert.Equal(t, "128Mi", cfg.PodMemRequest) + assert.Equal(t, "30M", cfg.PodEphemeralStorageRequest) + assert.Equal(t, "1000m", cfg.PodCPULimit) + assert.Equal(t, "1024Mi", cfg.PodMemLimit) + assert.Equal(t, "50M", cfg.PodEphemeralStorageLimit) + + // Metrics exporter - match Java application.yml defaults + assert.Equal(t, "containersol/locust_exporter:v0.5.0", cfg.MetricsExporterImage) + assert.Equal(t, int32(9646), cfg.MetricsExporterPort) + assert.Equal(t, "Always", cfg.MetricsExporterPullPolicy) + assert.Equal(t, "250m", cfg.MetricsExporterCPURequest) + assert.Equal(t, "128Mi", cfg.MetricsExporterMemRequest) + assert.Equal(t, "30M", cfg.MetricsExporterEphemeralStorageRequest) + assert.Equal(t, "1000m", cfg.MetricsExporterCPULimit) + assert.Equal(t, "1024Mi", cfg.MetricsExporterMemLimit) + assert.Equal(t, "50M", cfg.MetricsExporterEphemeralStorageLimit) + + // Kafka configuration - match Java application.yml defaults + assert.Equal(t, "localhost:9092", cfg.KafkaBootstrapServers) + assert.False(t, cfg.KafkaSecurityEnabled) + assert.Equal(t, "SASL_PLAINTEXT", cfg.KafkaSecurityProtocol) + assert.Equal(t, "", cfg.KafkaUsername) // Empty for security + assert.Equal(t, "", cfg.KafkaPassword) // Empty for security + assert.Equal(t, "SCRAM-SHA-512", cfg.KafkaSaslMechanism) + assert.Equal(t, "", cfg.KafkaSaslJaasConfig) // Empty for security + + // Feature flags + assert.False(t, cfg.EnableAffinityCRInjection) + assert.False(t, cfg.EnableTolerationsCRInjection) +} + +func TestLoadConfig_EnvironmentOverrides(t *testing.T) { + // Set up test env vars + t.Setenv("JOB_TTL_SECONDS_AFTER_FINISHED", "300") + t.Setenv("POD_CPU_REQUEST", "500m") + t.Setenv("POD_MEM_REQUEST", "256Mi") + t.Setenv("POD_EPHEMERAL_REQUEST", "100M") + t.Setenv("POD_CPU_LIMIT", "2000m") + t.Setenv("POD_MEM_LIMIT", "2048Mi") + t.Setenv("POD_EPHEMERAL_LIMIT", "200M") + t.Setenv("METRICS_EXPORTER_IMAGE", "custom/exporter:v1.0.0") + t.Setenv("METRICS_EXPORTER_PORT", "9000") + t.Setenv("METRICS_EXPORTER_IMAGE_PULL_POLICY", "IfNotPresent") + t.Setenv("ENABLE_AFFINITY_CR_INJECTION", "true") + t.Setenv("ENABLE_TAINT_TOLERATIONS_CR_INJECTION", "true") + + cfg, err := LoadConfig() + require.NoError(t, err) + + // Verify overrides + require.NotNil(t, cfg.TTLSecondsAfterFinished) + assert.Equal(t, int32(300), *cfg.TTLSecondsAfterFinished) + assert.Equal(t, "500m", cfg.PodCPURequest) + assert.Equal(t, "256Mi", cfg.PodMemRequest) + assert.Equal(t, "100M", cfg.PodEphemeralStorageRequest) + assert.Equal(t, "2000m", cfg.PodCPULimit) + assert.Equal(t, "2048Mi", cfg.PodMemLimit) + assert.Equal(t, "200M", cfg.PodEphemeralStorageLimit) + assert.Equal(t, "custom/exporter:v1.0.0", cfg.MetricsExporterImage) + assert.Equal(t, int32(9000), cfg.MetricsExporterPort) + assert.Equal(t, "IfNotPresent", cfg.MetricsExporterPullPolicy) + assert.True(t, cfg.EnableAffinityCRInjection) + assert.True(t, cfg.EnableTolerationsCRInjection) +} + +func TestLoadConfig_TTLSecondsAfterFinished_ZeroValue(t *testing.T) { + t.Setenv("JOB_TTL_SECONDS_AFTER_FINISHED", "0") + + cfg, err := LoadConfig() + require.NoError(t, err) + + // TTL of 0 should be a valid value (immediate cleanup) + require.NotNil(t, cfg.TTLSecondsAfterFinished) + assert.Equal(t, int32(0), *cfg.TTLSecondsAfterFinished) +} + +func TestLoadConfig_KafkaConfiguration(t *testing.T) { + t.Setenv("KAFKA_BOOTSTRAP_SERVERS", "kafka.example.com:9092") + t.Setenv("KAFKA_SECURITY_ENABLED", "true") + t.Setenv("KAFKA_SECURITY_PROTOCOL_CONFIG", "SASL_SSL") + t.Setenv("KAFKA_USERNAME", "user") + t.Setenv("KAFKA_PASSWORD", "secret") + t.Setenv("KAFKA_SASL_MECHANISM", "PLAIN") + t.Setenv("KAFKA_SASL_JAAS_CONFIG", "org.apache.kafka.common.security.plain.PlainLoginModule required;") + + cfg, err := LoadConfig() + require.NoError(t, err) + + assert.Equal(t, "kafka.example.com:9092", cfg.KafkaBootstrapServers) + assert.True(t, cfg.KafkaSecurityEnabled) + assert.Equal(t, "SASL_SSL", cfg.KafkaSecurityProtocol) + assert.Equal(t, "user", cfg.KafkaUsername) + assert.Equal(t, "secret", cfg.KafkaPassword) + assert.Equal(t, "PLAIN", cfg.KafkaSaslMechanism) + assert.Equal(t, "org.apache.kafka.common.security.plain.PlainLoginModule required;", cfg.KafkaSaslJaasConfig) +} + +func TestLoadConfig_MetricsExporterConfiguration(t *testing.T) { + t.Setenv("METRICS_EXPORTER_IMAGE", "myregistry/locust-exporter:v2.0.0") + t.Setenv("METRICS_EXPORTER_PORT", "8080") + t.Setenv("METRICS_EXPORTER_IMAGE_PULL_POLICY", "Never") + t.Setenv("METRICS_EXPORTER_CPU_REQUEST", "100m") + t.Setenv("METRICS_EXPORTER_MEM_REQUEST", "64Mi") + t.Setenv("METRICS_EXPORTER_EPHEMERAL_REQUEST", "10M") + t.Setenv("METRICS_EXPORTER_CPU_LIMIT", "500m") + t.Setenv("METRICS_EXPORTER_MEM_LIMIT", "512Mi") + t.Setenv("METRICS_EXPORTER_EPHEMERAL_LIMIT", "100M") + + cfg, err := LoadConfig() + require.NoError(t, err) + + assert.Equal(t, "myregistry/locust-exporter:v2.0.0", cfg.MetricsExporterImage) + assert.Equal(t, int32(8080), cfg.MetricsExporterPort) + assert.Equal(t, "Never", cfg.MetricsExporterPullPolicy) + assert.Equal(t, "100m", cfg.MetricsExporterCPURequest) + assert.Equal(t, "64Mi", cfg.MetricsExporterMemRequest) + assert.Equal(t, "10M", cfg.MetricsExporterEphemeralStorageRequest) + assert.Equal(t, "500m", cfg.MetricsExporterCPULimit) + assert.Equal(t, "512Mi", cfg.MetricsExporterMemLimit) + assert.Equal(t, "100M", cfg.MetricsExporterEphemeralStorageLimit) +} + +func TestGetEnv(t *testing.T) { + t.Setenv("TEST_VAR", "value") + + assert.Equal(t, "value", getEnv("TEST_VAR", "default")) + assert.Equal(t, "default", getEnv("NONEXISTENT_VAR", "default")) +} + +func TestGetEnv_EmptyValue(t *testing.T) { + // Empty string should return default + t.Setenv("TEST_EMPTY", "") + assert.Equal(t, "default", getEnv("TEST_EMPTY", "default")) +} + +func TestGetEnvBool(t *testing.T) { + tests := []struct { + name string + envValue string + setEnv bool + defaultValue bool + expected bool + }{ + {"true string", "true", true, false, true}, + {"false string", "false", true, true, false}, + {"1 value", "1", true, false, true}, + {"0 value", "0", true, true, false}, + {"TRUE uppercase", "TRUE", true, false, true}, + {"FALSE uppercase", "FALSE", true, true, false}, + {"invalid value returns default true", "invalid", true, true, true}, + {"invalid value returns default false", "invalid", true, false, false}, + {"unset uses default true", "", false, true, true}, + {"unset uses default false", "", false, false, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setEnv { + t.Setenv("TEST_BOOL", tt.envValue) + } else { + _ = os.Unsetenv("TEST_BOOL") + } + result := getEnvBool("TEST_BOOL", tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetEnvInt32(t *testing.T) { + t.Run("valid integer", func(t *testing.T) { + t.Setenv("TEST_INT", "42") + assert.Equal(t, int32(42), getEnvInt32("TEST_INT", 0)) + }) + + t.Run("unset returns default", func(t *testing.T) { + _ = os.Unsetenv("TEST_INT") + assert.Equal(t, int32(100), getEnvInt32("TEST_INT", 100)) + }) + + t.Run("invalid value returns default", func(t *testing.T) { + t.Setenv("TEST_INT", "invalid") + assert.Equal(t, int32(100), getEnvInt32("TEST_INT", 100)) + }) + + t.Run("negative value", func(t *testing.T) { + t.Setenv("TEST_INT", "-10") + assert.Equal(t, int32(-10), getEnvInt32("TEST_INT", 0)) + }) + + t.Run("zero value", func(t *testing.T) { + t.Setenv("TEST_INT", "0") + assert.Equal(t, int32(0), getEnvInt32("TEST_INT", 100)) + }) +} + +func TestGetEnvInt32Ptr(t *testing.T) { + t.Run("not set returns nil", func(t *testing.T) { + _ = os.Unsetenv("TEST_PTR") + result := getEnvInt32Ptr("TEST_PTR") + assert.Nil(t, result) + }) + + t.Run("valid value returns pointer", func(t *testing.T) { + t.Setenv("TEST_PTR", "42") + result := getEnvInt32Ptr("TEST_PTR") + require.NotNil(t, result) + assert.Equal(t, int32(42), *result) + }) + + t.Run("zero value returns pointer to zero", func(t *testing.T) { + t.Setenv("TEST_PTR", "0") + result := getEnvInt32Ptr("TEST_PTR") + require.NotNil(t, result) + assert.Equal(t, int32(0), *result) + }) + + t.Run("invalid value returns nil", func(t *testing.T) { + t.Setenv("TEST_PTR", "invalid") + result := getEnvInt32Ptr("TEST_PTR") + assert.Nil(t, result) + }) + + t.Run("empty string returns nil", func(t *testing.T) { + t.Setenv("TEST_PTR", "") + result := getEnvInt32Ptr("TEST_PTR") + assert.Nil(t, result) + }) + + t.Run("negative value returns pointer", func(t *testing.T) { + t.Setenv("TEST_PTR", "-5") + result := getEnvInt32Ptr("TEST_PTR") + require.NotNil(t, result) + assert.Equal(t, int32(-5), *result) + }) +} + +// TestLoadConfig_ValidResourceQuantities tests that valid resource quantities pass validation +func TestLoadConfig_ValidResourceQuantities(t *testing.T) { + t.Setenv("POD_CPU_REQUEST", "500m") + t.Setenv("POD_MEM_REQUEST", "256Mi") + t.Setenv("POD_CPU_LIMIT", "2000m") + + cfg, err := LoadConfig() + + require.NoError(t, err) + assert.Equal(t, "500m", cfg.PodCPURequest) + assert.Equal(t, "256Mi", cfg.PodMemRequest) + assert.Equal(t, "2000m", cfg.PodCPULimit) +} + +// TestLoadConfig_InvalidResourceQuantity tests that invalid resource quantities return error +func TestLoadConfig_InvalidResourceQuantity(t *testing.T) { + t.Setenv("POD_CPU_REQUEST", "garbage") + + cfg, err := LoadConfig() + + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "POD_CPU_REQUEST") + assert.Contains(t, err.Error(), "garbage") +} + +// TestLoadConfig_MultipleInvalidResources tests that multiple invalid values are all reported +func TestLoadConfig_MultipleInvalidResources(t *testing.T) { + t.Setenv("POD_CPU_REQUEST", "invalid-cpu") + t.Setenv("POD_MEM_LIMIT", "bad-memory") + t.Setenv("METRICS_EXPORTER_CPU_REQUEST", "wrong") + + cfg, err := LoadConfig() + + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "POD_CPU_REQUEST") + assert.Contains(t, err.Error(), "POD_MEM_LIMIT") + assert.Contains(t, err.Error(), "METRICS_EXPORTER_CPU_REQUEST") +} + +// TestLoadConfig_EmptyResourceStrings tests that empty strings are treated as optional (not validated) +func TestLoadConfig_EmptyResourceStrings(t *testing.T) { + // Clear all resource env vars to get empty defaults if any + envVars := []string{ + "POD_CPU_REQUEST", + "POD_MEM_REQUEST", + "POD_EPHEMERAL_REQUEST", + "POD_CPU_LIMIT", + "POD_MEM_LIMIT", + "POD_EPHEMERAL_LIMIT", + "MASTER_POD_CPU_REQUEST", + "MASTER_POD_MEM_REQUEST", + "WORKER_POD_CPU_REQUEST", + "WORKER_POD_MEM_REQUEST", + } + for _, env := range envVars { + _ = os.Unsetenv(env) + } + + // Set master/worker role-specific to empty explicitly + t.Setenv("MASTER_POD_CPU_REQUEST", "") + t.Setenv("WORKER_POD_CPU_REQUEST", "") + + cfg, err := LoadConfig() + + // Should succeed - empty strings are valid (mean "not set") + require.NoError(t, err) + require.NotNil(t, cfg) +} + +// TestLoadConfig_RoleSpecificResourceDefaults tests that role-specific resource fields default to empty +func TestLoadConfig_RoleSpecificResourceDefaults(t *testing.T) { + // Clear all MASTER_POD_* and WORKER_POD_* env vars + envVars := []string{ + "MASTER_POD_CPU_REQUEST", + "MASTER_POD_MEM_REQUEST", + "MASTER_POD_EPHEMERAL_REQUEST", + "MASTER_POD_CPU_LIMIT", + "MASTER_POD_MEM_LIMIT", + "MASTER_POD_EPHEMERAL_LIMIT", + "WORKER_POD_CPU_REQUEST", + "WORKER_POD_MEM_REQUEST", + "WORKER_POD_EPHEMERAL_REQUEST", + "WORKER_POD_CPU_LIMIT", + "WORKER_POD_MEM_LIMIT", + "WORKER_POD_EPHEMERAL_LIMIT", + } + for _, env := range envVars { + _ = os.Unsetenv(env) + } + + cfg, err := LoadConfig() + + require.NoError(t, err) + require.NotNil(t, cfg) + + // Assert all 12 role-specific fields default to empty string + assert.Equal(t, "", cfg.MasterCPURequest) + assert.Equal(t, "", cfg.MasterMemRequest) + assert.Equal(t, "", cfg.MasterEphemeralStorageRequest) + assert.Equal(t, "", cfg.MasterCPULimit) + assert.Equal(t, "", cfg.MasterMemLimit) + assert.Equal(t, "", cfg.MasterEphemeralStorageLimit) + assert.Equal(t, "", cfg.WorkerCPURequest) + assert.Equal(t, "", cfg.WorkerMemRequest) + assert.Equal(t, "", cfg.WorkerEphemeralStorageRequest) + assert.Equal(t, "", cfg.WorkerCPULimit) + assert.Equal(t, "", cfg.WorkerMemLimit) + assert.Equal(t, "", cfg.WorkerEphemeralStorageLimit) +} + +// TestLoadConfig_RoleSpecificResourceOverrides tests that role-specific env vars are loaded correctly +func TestLoadConfig_RoleSpecificResourceOverrides(t *testing.T) { + // Set some role-specific env vars + t.Setenv("MASTER_POD_CPU_REQUEST", "500m") + t.Setenv("MASTER_POD_MEM_REQUEST", "512Mi") + t.Setenv("WORKER_POD_CPU_LIMIT", "2000m") + + cfg, err := LoadConfig() + + require.NoError(t, err) + require.NotNil(t, cfg) + + // Assert fields populated correctly + assert.Equal(t, "500m", cfg.MasterCPURequest) + assert.Equal(t, "512Mi", cfg.MasterMemRequest) + assert.Equal(t, "2000m", cfg.WorkerCPULimit) + + // Assert unset fields remain empty string + assert.Equal(t, "", cfg.MasterEphemeralStorageRequest) + assert.Equal(t, "", cfg.MasterCPULimit) + assert.Equal(t, "", cfg.WorkerCPURequest) +} + +// TestLoadConfig_InvalidRoleSpecificResource tests that invalid role-specific quantities return error +func TestLoadConfig_InvalidRoleSpecificResource(t *testing.T) { + t.Setenv("MASTER_POD_CPU_REQUEST", "garbage") + + cfg, err := LoadConfig() + + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "MASTER_POD_CPU_REQUEST") +} + +func TestGetEnvBool_WarnsOnInvalidValue(t *testing.T) { + t.Setenv("TEST_BOOL_WARN", "notabool") + output := captureLogOutput(t, func() { + result := getEnvBool("TEST_BOOL_WARN", true) + assert.True(t, result, "Should return default on invalid value") + }) + assert.Contains(t, output, "WARNING") + assert.Contains(t, output, "TEST_BOOL_WARN") + assert.Contains(t, output, "notabool") +} + +func TestGetEnvInt32_WarnsOnInvalidValue(t *testing.T) { + t.Setenv("TEST_INT_WARN", "notanumber") + output := captureLogOutput(t, func() { + result := getEnvInt32("TEST_INT_WARN", 42) + assert.Equal(t, int32(42), result, "Should return default on invalid value") + }) + assert.Contains(t, output, "WARNING") + assert.Contains(t, output, "TEST_INT_WARN") + assert.Contains(t, output, "notanumber") +} + +func TestGetEnvInt32Ptr_WarnsOnInvalidValue(t *testing.T) { + t.Setenv("TEST_PTR_WARN", "invalid") + output := captureLogOutput(t, func() { + result := getEnvInt32Ptr("TEST_PTR_WARN") + assert.Nil(t, result, "Should return nil on invalid value") + }) + assert.Contains(t, output, "WARNING") + assert.Contains(t, output, "TEST_PTR_WARN") + assert.Contains(t, output, "invalid") +} diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go new file mode 100644 index 00000000..b3e878f6 --- /dev/null +++ b/internal/controller/integration_test.go @@ -0,0 +1,850 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +var _ = Describe("LocustTest Controller Integration", func() { + var ( + testNamespace string + testCounter int + ) + + BeforeEach(func() { + testCounter++ + testNamespace = fmt.Sprintf("test-ns-%d", testCounter) + + // Create namespace for test isolation + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, ns)).To(Succeed()) + }) + + AfterEach(func() { + // Cleanup namespace (cascades to all resources) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Delete(ctx, ns)).To(Succeed()) + }) + + // Helper to create a standard test LocustTest + createLocustTest := func(name string) *locustv2.LocustTest { + return &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + TestFiles: &locustv2.TestFilesConfig{ + ConfigMapRef: "test-configmap", + }, + }, + } + } + + // ==================== CREATE FLOW TESTS ==================== + Describe("Create Flow", func() { + It("should create master Service when LocustTest is created", func() { + lt := createLocustTest("create-service-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for Service to be created + svcKey := types.NamespacedName{ + Name: "create-service-test-master", + Namespace: testNamespace, + } + createdSvc := &corev1.Service{} + + Eventually(func() error { + return k8sClient.Get(ctx, svcKey, createdSvc) + }, timeout, interval).Should(Succeed()) + + // Verify Service properties + // Service selector uses performance-test-pod-name label + Expect(createdSvc.Spec.Selector).To(HaveKeyWithValue("performance-test-pod-name", "create-service-test-master")) + // Service has 3 ports: 5557, 5558, metrics (WebUI 8089 is excluded) + Expect(createdSvc.Spec.Ports).To(HaveLen(3)) + }) + + It("should create master Job when LocustTest is created", func() { + lt := createLocustTest("create-master-job-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + jobKey := types.NamespacedName{ + Name: "create-master-job-test-master", + Namespace: testNamespace, + } + createdJob := &batchv1.Job{} + + Eventually(func() error { + return k8sClient.Get(ctx, jobKey, createdJob) + }, timeout, interval).Should(Succeed()) + + // Verify master Job properties + Expect(*createdJob.Spec.Parallelism).To(Equal(int32(1))) + // Completions is not explicitly set in the builder (nil means 1 by default) + Expect(createdJob.Spec.Template.Spec.Containers).To(HaveLen(2)) // locust + metrics + }) + + It("should create worker Job when LocustTest is created", func() { + lt := createLocustTest("create-worker-job-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + jobKey := types.NamespacedName{ + Name: "create-worker-job-test-worker", + Namespace: testNamespace, + } + createdJob := &batchv1.Job{} + + Eventually(func() error { + return k8sClient.Get(ctx, jobKey, createdJob) + }, timeout, interval).Should(Succeed()) + + // Verify worker Job properties + Expect(*createdJob.Spec.Parallelism).To(Equal(int32(3))) // WorkerReplicas + Expect(createdJob.Spec.Completions).To(BeNil()) // Nil for workers + }) + + It("should set owner references on created resources", func() { + lt := createLocustTest("owner-ref-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Get the created LocustTest to get its UID + createdLT := &locustv2.LocustTest{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "owner-ref-test", Namespace: testNamespace, + }, createdLT) + }, timeout, interval).Should(Succeed()) + + // Check Service owner reference + svc := &corev1.Service{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "owner-ref-test-master", Namespace: testNamespace, + }, svc) + }, timeout, interval).Should(Succeed()) + + Expect(svc.OwnerReferences).To(HaveLen(1)) + Expect(svc.OwnerReferences[0].Name).To(Equal("owner-ref-test")) + Expect(svc.OwnerReferences[0].Kind).To(Equal("LocustTest")) + Expect(svc.OwnerReferences[0].UID).To(Equal(createdLT.UID)) + + // Check master Job owner reference + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "owner-ref-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + Expect(masterJob.OwnerReferences).To(HaveLen(1)) + Expect(masterJob.OwnerReferences[0].Name).To(Equal("owner-ref-test")) + + // Check worker Job owner reference + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "owner-ref-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(workerJob.OwnerReferences).To(HaveLen(1)) + Expect(workerJob.OwnerReferences[0].Name).To(Equal("owner-ref-test")) + }) + + It("should create all resources with correct labels", func() { + lt := createLocustTest("labels-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Verify Service labels + svc := &corev1.Service{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "labels-test-master", Namespace: testNamespace, + }, svc) + }, timeout, interval).Should(Succeed()) + + // Service doesn't have labels set in BuildMasterService - verify it was created + Expect(svc.Name).To(Equal("labels-test-master")) + + // Verify Job labels + job := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "labels-test-master", Namespace: testNamespace, + }, job) + }, timeout, interval).Should(Succeed()) + + // Job template has labels, verify pod template labels + Expect(job.Spec.Template.Labels).To(HaveKeyWithValue("performance-test-pod-name", "labels-test-master")) + Expect(job.Spec.Template.Labels).To(HaveKeyWithValue("managed-by", "locust-k8s-operator")) + }) + }) + + // ==================== CREATE FLOW EDGE CASES ==================== + Describe("Create Flow - Edge Cases", func() { + It("should handle LocustTest with custom labels", func() { + lt := createLocustTest("custom-labels-test") + lt.Spec.Master.Labels = map[string]string{ + "custom-label": "master-value", + } + lt.Spec.Worker.Labels = map[string]string{ + "custom-label": "worker-value", + } + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Verify custom labels on master Job + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "custom-labels-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + Expect(masterJob.Spec.Template.Labels).To(HaveKeyWithValue("custom-label", "master-value")) + + // Verify custom labels on worker Job + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "custom-labels-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(workerJob.Spec.Template.Labels).To(HaveKeyWithValue("custom-label", "worker-value")) + }) + + It("should handle LocustTest with custom annotations", func() { + lt := createLocustTest("custom-annotations-test") + lt.Spec.Master.Annotations = map[string]string{ + "custom-annotation": "master-value", + } + lt.Spec.Worker.Annotations = map[string]string{ + "custom-annotation": "worker-value", + } + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Verify custom annotations on master Job + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "custom-annotations-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + Expect(masterJob.Spec.Template.Annotations).To(HaveKeyWithValue("custom-annotation", "master-value")) + + // Verify custom annotations on worker Job + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "custom-annotations-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(workerJob.Spec.Template.Annotations).To(HaveKeyWithValue("custom-annotation", "worker-value")) + }) + + It("should handle LocustTest with affinity configuration", func() { + lt := createLocustTest("affinity-test") + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"performance"}, + }, + }, + }, + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "affinity-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + // Verify affinity is set (feature flag may disable injection) + // The affinity structure depends on EnableAffinityCRInjection config + // Just verify the Job was created successfully + Expect(masterJob.Name).To(Equal("affinity-test-master")) + }) + + It("should handle LocustTest with tolerations", func() { + lt := createLocustTest("tolerations-test") + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Tolerations: []corev1.Toleration{ + { + Key: "dedicated", + Operator: corev1.TolerationOpEqual, + Value: "performance", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + } + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "tolerations-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + // Verify Job was created (tolerations depend on EnableTolerationsCRInjection) + Expect(masterJob.Name).To(Equal("tolerations-test-master")) + }) + + It("should handle LocustTest with single worker", func() { + lt := createLocustTest("single-worker-test") + lt.Spec.Worker.Replicas = 1 + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "single-worker-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(*workerJob.Spec.Parallelism).To(Equal(int32(1))) + }) + + It("should handle LocustTest with maximum workers", func() { + lt := createLocustTest("max-workers-test") + lt.Spec.Worker.Replicas = 500 // Maximum allowed + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "max-workers-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(*workerJob.Spec.Parallelism).To(Equal(int32(500))) + }) + + It("should handle LocustTest with imagePullSecrets", func() { + lt := createLocustTest("pull-secrets-test") + lt.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: "my-registry-secret"}, + {Name: "another-secret"}, + } + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "pull-secrets-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + Expect(masterJob.Spec.Template.Spec.ImagePullSecrets).To(HaveLen(2)) + Expect(masterJob.Spec.Template.Spec.ImagePullSecrets[0].Name).To(Equal("my-registry-secret")) + Expect(masterJob.Spec.Template.Spec.ImagePullSecrets[1].Name).To(Equal("another-secret")) + }) + }) + + // ==================== UPDATE NO-OP TESTS ==================== + Describe("Update NO-OP Flow", func() { + It("should NOT create new resources when CR spec is updated", func() { + lt := createLocustTest("update-noop-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for initial resources to be created + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "update-noop-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + originalUID := masterJob.UID + originalResourceVersion := masterJob.ResourceVersion + + // Update the CR spec - retry on conflict + Eventually(func() error { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "update-noop-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return err + } + updatedLT.Spec.Worker.Replicas = 10 // Change worker count + return k8sClient.Update(ctx, updatedLT) + }, timeout, interval).Should(Succeed()) + + // Wait a bit for potential reconciliation + Consistently(func() string { + job := &batchv1.Job{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "update-noop-test-master", Namespace: testNamespace, + }, job) + if err != nil { + return "" + } + return string(job.UID) + }, timeout/2, interval).Should(Equal(string(originalUID))) + + // Verify resource version unchanged (no modifications) + finalJob := &batchv1.Job{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: "update-noop-test-master", Namespace: testNamespace, + }, finalJob)).To(Succeed()) + + Expect(finalJob.ResourceVersion).To(Equal(originalResourceVersion)) + }) + + It("should NOT modify worker Job when workerReplicas is changed", func() { + lt := createLocustTest("worker-update-noop-test") + lt.Spec.Worker.Replicas = 5 + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for worker Job + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "worker-update-noop-test-worker", Namespace: testNamespace, + }, workerJob) + }, timeout, interval).Should(Succeed()) + + Expect(*workerJob.Spec.Parallelism).To(Equal(int32(5))) + originalUID := workerJob.UID + + // Wait for status to be initialized + Eventually(func() string { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "worker-update-noop-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return "" + } + return string(updatedLT.Status.Phase) + }, timeout, interval).Should(Equal(string(locustv2.PhaseRunning))) + + // Update workerReplicas using Eventually to handle concurrent status updates + Eventually(func() error { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "worker-update-noop-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return err + } + updatedLT.Spec.Worker.Replicas = 20 + return k8sClient.Update(ctx, updatedLT) + }, timeout, interval).Should(Succeed()) + + // Worker Job should remain unchanged + Consistently(func() int32 { + job := &batchv1.Job{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "worker-update-noop-test-worker", Namespace: testNamespace, + }, job) + if err != nil { + return -1 + } + return *job.Spec.Parallelism + }, timeout/2, interval).Should(Equal(int32(5))) // Still 5, not 20 + + // UID should be the same (same Job, not recreated) + finalJob := &batchv1.Job{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: "worker-update-noop-test-worker", Namespace: testNamespace, + }, finalJob)).To(Succeed()) + + Expect(finalJob.UID).To(Equal(originalUID)) + }) + + It("should NOT modify master Job when masterCommandSeed is changed", func() { + lt := createLocustTest("master-cmd-update-noop-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for master Job + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "master-cmd-update-noop-test-master", Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + originalUID := masterJob.UID + + // Wait for status to be initialized + Eventually(func() string { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "master-cmd-update-noop-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return "" + } + return string(updatedLT.Status.Phase) + }, timeout, interval).Should(Equal(string(locustv2.PhaseRunning))) + + // Update masterCommand using Eventually to handle concurrent status updates + Eventually(func() error { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "master-cmd-update-noop-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return err + } + updatedLT.Spec.Master.Command = "locust -f /lotest/src/new_test.py" + return k8sClient.Update(ctx, updatedLT) + }, timeout, interval).Should(Succeed()) + + // Master Job UID should remain unchanged + Consistently(func() types.UID { + job := &batchv1.Job{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "master-cmd-update-noop-test-master", Namespace: testNamespace, + }, job) + if err != nil { + return "" + } + return job.UID + }, timeout/2, interval).Should(Equal(originalUID)) + }) + }) + + // ==================== DELETE FLOW TESTS ==================== + Describe("Delete Flow", func() { + // Note: envtest does not have a garbage collection controller, so we cannot + // test automatic cascade deletion. Instead, we verify owner references are + // correctly set (tested in Create Flow) and that the CR can be deleted. + It("should delete LocustTest CR successfully", func() { + lt := createLocustTest("delete-cr-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for resources to be created + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "delete-cr-test-master", Namespace: testNamespace, + }, &batchv1.Job{}) + }, timeout, interval).Should(Succeed()) + + // Delete the LocustTest CR + Expect(k8sClient.Delete(ctx, lt)).To(Succeed()) + + // Verify CR is deleted + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "delete-cr-test", Namespace: testNamespace, + }, &locustv2.LocustTest{}) + return err != nil // Should be NotFound + }, timeout, interval).Should(BeTrue()) + }) + + It("should handle deletion of non-existent LocustTest gracefully", func() { + // This tests that the reconciler handles NotFound errors + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nonexistent-test", + Namespace: testNamespace, + }, + } + + // Attempting to delete non-existent resource should not cause issues + err := k8sClient.Delete(ctx, lt) + // The error should indicate not found, which is fine + Expect(err).To(HaveOccurred()) // NotFound is expected + }) + }) + + // ==================== POD HEALTH MONITORING TESTS ==================== + Describe("Pod Health Monitoring", func() { + It("should detect pod failures and update PodsHealthy condition", func() { + lt := createLocustTest("pod-health-crashloop-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for the master Job to be created + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "pod-health-crashloop-test-master", + Namespace: testNamespace, + }, masterJob) + }, timeout, interval).Should(Succeed()) + + // Wait for LocustTest status.phase to become Running + Eventually(func() string { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "pod-health-crashloop-test", + Namespace: testNamespace, + }, updatedLT); err != nil { + return "" + } + return string(updatedLT.Status.Phase) + }, timeout, interval).Should(Equal(string(locustv2.PhaseRunning))) + + // Create a pod that simulates a CrashLoopBackOff failure + // In envtest, we can't override CreationTimestamp, so we'll verify the + // controller watches pods and updates conditions (initially in grace period) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-health-crashloop-test-master-pod", + Namespace: testNamespace, + Labels: map[string]string{ + "performance-test-name": "pod-health-crashloop-test", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: masterJob.Name, + UID: masterJob.UID, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "locust", + Image: "locustio/locust:latest", + }, + }, + }, + } + + // Create the pod + Expect(k8sClient.Create(ctx, pod)).To(Succeed()) + + // Update pod status to CrashLoopBackOff + Eventually(func() error { + createdPod := &corev1.Pod{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "pod-health-crashloop-test-master-pod", + Namespace: testNamespace, + }, createdPod); err != nil { + return err + } + + createdPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Name: "locust", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "CrashLoopBackOff", + Message: "Back-off restarting failed container", + }, + }, + Ready: false, + }, + } + createdPod.Status.Phase = corev1.PodFailed + + return k8sClient.Status().Update(ctx, createdPod) + }, timeout, interval).Should(Succeed()) + + // Wait for the LocustTest status conditions to include PodsHealthy + // During the 2-minute grace period, it will be True with PodsStarting reason + Eventually(func() bool { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "pod-health-crashloop-test", + Namespace: testNamespace, + }, updatedLT); err != nil { + return false + } + + // Find the PodsHealthy condition + for _, condition := range updatedLT.Status.Conditions { + if condition.Type == locustv2.ConditionTypePodsHealthy { + // Verify condition exists with PodsStarting (in grace period) + // This proves the controller watches pods and updates status + return condition.Reason == locustv2.ReasonPodsStarting + } + } + return false + }, timeout, interval).Should(BeTrue()) + + // Additional assertion: verify the condition details + finalLT := &locustv2.LocustTest{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: "pod-health-crashloop-test", + Namespace: testNamespace, + }, finalLT)).To(Succeed()) + + var podsHealthyCondition *metav1.Condition + for i := range finalLT.Status.Conditions { + if finalLT.Status.Conditions[i].Type == locustv2.ConditionTypePodsHealthy { + podsHealthyCondition = &finalLT.Status.Conditions[i] + break + } + } + + // Verify the PodsHealthy condition exists + Expect(podsHealthyCondition).NotTo(BeNil()) + // During grace period it should be True with PodsStarting reason + Expect(podsHealthyCondition.Status).To(Equal(metav1.ConditionTrue)) + Expect(podsHealthyCondition.Reason).To(Equal(locustv2.ReasonPodsStarting)) + // This validates that the controller detected the pod with CrashLoopBackOff + // and applied the grace period logic (which would become False after 2 minutes) + }) + }) + + // ==================== ERROR HANDLING TESTS ==================== + Describe("Error Handling", func() { + It("should handle idempotent resource creation", func() { + lt := createLocustTest("idempotent-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Wait for resources and status to be updated + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "idempotent-test-master", Namespace: testNamespace, + }, &batchv1.Job{}) + }, timeout, interval).Should(Succeed()) + + // Wait for status to be initialized + Eventually(func() string { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "idempotent-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return "" + } + return string(updatedLT.Status.Phase) + }, timeout, interval).Should(Equal(string(locustv2.PhaseRunning))) + + // Manually trigger another reconciliation by adding an annotation + // Use Eventually to handle concurrent status updates + Eventually(func() error { + updatedLT := &locustv2.LocustTest{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "idempotent-test", Namespace: testNamespace, + }, updatedLT); err != nil { + return err + } + + if updatedLT.Annotations == nil { + updatedLT.Annotations = make(map[string]string) + } + updatedLT.Annotations["test-trigger"] = "reconcile" + return k8sClient.Update(ctx, updatedLT) + }, timeout, interval).Should(Succeed()) + + // Should not fail - resources already exist + Consistently(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "idempotent-test-master", Namespace: testNamespace, + }, &batchv1.Job{}) + }, timeout/2, interval).Should(Succeed()) + }) + + It("should create resources in different namespaces independently", func() { + // Create second namespace + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace + "-2", + }, + } + Expect(k8sClient.Create(ctx, ns2)).To(Succeed()) + defer func() { + _ = k8sClient.Delete(ctx, ns2) + }() + + // Create LocustTest in both namespaces with same name + lt1 := createLocustTest("same-name-test") + Expect(k8sClient.Create(ctx, lt1)).To(Succeed()) + + lt2 := createLocustTest("same-name-test") + lt2.Namespace = testNamespace + "-2" + Expect(k8sClient.Create(ctx, lt2)).To(Succeed()) + + // Both should create their resources independently + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "same-name-test-master", Namespace: testNamespace, + }, &batchv1.Job{}) + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "same-name-test-master", Namespace: testNamespace + "-2", + }, &batchv1.Job{}) + }, timeout, interval).Should(Succeed()) + }) + + It("should handle rapid create/delete cycles", func() { + // Create and immediately delete + lt := createLocustTest("rapid-cycle-test") + Expect(k8sClient.Create(ctx, lt)).To(Succeed()) + + // Delete immediately without waiting for resources + Expect(k8sClient.Delete(ctx, lt)).To(Succeed()) + + // Verify CR is deleted + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "rapid-cycle-test", Namespace: testNamespace, + }, &locustv2.LocustTest{}) + return err != nil + }, timeout, interval).Should(BeTrue()) + + // Create again with same name - should work + lt2 := createLocustTest("rapid-cycle-test") + Expect(k8sClient.Create(ctx, lt2)).To(Succeed()) + + // Should be able to create resources + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: "rapid-cycle-test-master", Namespace: testNamespace, + }, &batchv1.Job{}) + }, timeout, interval).Should(Succeed()) + }) + }) +}) diff --git a/internal/controller/locusttest_controller.go b/internal/controller/locusttest_controller.go new file mode 100644 index 00000000..6751e858 --- /dev/null +++ b/internal/controller/locusttest_controller.go @@ -0,0 +1,470 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/resources" +) + +const finalizerName = "locust.io/cleanup" + +// LocustTestReconciler reconciles a LocustTest object +type LocustTestReconciler struct { + client.Client + Scheme *runtime.Scheme + Config *config.OperatorConfig + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=locust.io,resources=locusttests,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=locust.io,resources=locusttests/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=locust.io,resources=locusttests/finalizers,verbs=update +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;delete +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;delete +// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// Reconcile handles LocustTest CR events. +// On creation: Creates master Service, master Job, and worker Job. +// On update: NO-OP by design (tests are immutable). +// On deletion: Finalizer emits log + Event, then cleanup via owner references. +func (r *LocustTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + // Fetch the LocustTest CR + locustTest := &locustv2.LocustTest{} + if err := r.Get(ctx, req.NamespacedName, locustTest); err != nil { + if apierrors.IsNotFound(err) { + // CR deleted - nothing to do (cleanup via owner references) + log.V(1).Info("LocustTest not found, likely deleted") + return ctrl.Result{}, nil + } + log.Error(err, "Failed to fetch LocustTest") + return ctrl.Result{}, err + } + + // Handle deletion: finalizer ensures visible logs and events + if !locustTest.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(locustTest, finalizerName) { + log.Info("LocustTest deleted, cleaning up resources via owner references", + "name", locustTest.Name, + "namespace", locustTest.Namespace) + r.Recorder.Event(locustTest, corev1.EventTypeNormal, "Deleting", + "LocustTest and owned resources being cleaned up") + controllerutil.RemoveFinalizer(locustTest, finalizerName) + if err := r.Update(ctx, locustTest); err != nil { + log.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer: %w", err) + } + } + return ctrl.Result{}, nil + } + + // Add finalizer on first reconcile if not present + if !controllerutil.ContainsFinalizer(locustTest, finalizerName) { + controllerutil.AddFinalizer(locustTest, finalizerName) + if err := r.Update(ctx, locustTest); err != nil { + log.Error(err, "Failed to add finalizer") + return ctrl.Result{}, fmt.Errorf("failed to add finalizer: %w", err) + } + if err := r.Get(ctx, req.NamespacedName, locustTest); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to re-fetch after adding finalizer: %w", err) + } + } + + // Initialize status on first reconcile + if locustTest.Status.Phase == "" { + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, req.NamespacedName, locustTest); err != nil { + return err + } + r.initializeStatus(locustTest) + return r.Status().Update(ctx, locustTest) + }); err != nil { + log.Error(err, "Failed to initialize status") + return ctrl.Result{}, fmt.Errorf("failed to initialize status: %w", err) + } + // Re-fetch after status update to get the latest resource version + if err := r.Get(ctx, req.NamespacedName, locustTest); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to re-fetch LocustTest after status update: %w", err) + } + } + + // If resources already exist (Phase is Running or terminal), check Job status + // This handles reconciles triggered by Job status changes + if locustTest.Status.Phase == locustv2.PhaseRunning || + locustTest.Status.Phase == locustv2.PhaseSucceeded || + locustTest.Status.Phase == locustv2.PhaseFailed { + return r.reconcileStatus(ctx, locustTest) + } + + // Phase == Pending: create resources + // Log informational message if this is a spec update (generation > 1) + // The phase-based state machine handles this correctly β€” Pending always creates + if locustTest.Generation > 1 { + log.V(1).Info("Update operations on LocustTest are not supported by design", + "name", locustTest.Name, + "namespace", locustTest.Namespace) + } + + // On initial creation or pending (Phase is Pending) + log.Info("LocustTest created", + "name", locustTest.Name, + "namespace", locustTest.Namespace) + + // Log detailed CR information (debug level) + var configMapRef string + if locustTest.Spec.TestFiles != nil { + configMapRef = locustTest.Spec.TestFiles.ConfigMapRef + } + log.V(1).Info("Custom resource information", + "image", locustTest.Spec.Image, + "masterCommand", locustTest.Spec.Master.Command, + "workerCommand", locustTest.Spec.Worker.Command, + "workerReplicas", locustTest.Spec.Worker.Replicas, + "configMap", configMapRef) + + // Create resources + return r.createResources(ctx, locustTest) +} + +// createResources creates the master Service, master Job, and worker Job. +// Resources are created with owner references for automatic garbage collection. +func (r *LocustTestReconciler) createResources(ctx context.Context, lt *locustv2.LocustTest) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + // Build resources using resource builders from Phase 3 + masterService := resources.BuildMasterService(lt, r.Config) + masterJob := resources.BuildMasterJob(lt, r.Config, log) + workerJob := resources.BuildWorkerJob(lt, r.Config, log) + + // Create master Service + if err := r.createResource(ctx, lt, masterService, "Service"); err != nil { + return ctrl.Result{}, err + } + log.V(1).Info("Master Service reconciled", "name", masterService.Name) + + // Create master Job + if err := r.createResource(ctx, lt, masterJob, "Job"); err != nil { + return ctrl.Result{}, err + } + log.V(1).Info("Master Job reconciled", "name", masterJob.Name) + + // Create worker Job + if err := r.createResource(ctx, lt, workerJob, "Job"); err != nil { + return ctrl.Result{}, err + } + log.V(1).Info("Worker Job reconciled", "name", workerJob.Name) + + log.Info("All resources created successfully", + "locustTest", lt.Name, + "masterService", masterService.Name, + "masterJob", masterJob.Name, + "workerJob", workerJob.Name) + + // Update status after successful resource creation (with conflict retry) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, client.ObjectKeyFromObject(lt), lt); err != nil { + return err + } + lt.Status.Phase = locustv2.PhaseRunning + lt.Status.ObservedGeneration = lt.Generation + if lt.Status.StartTime == nil { + now := metav1.Now() + lt.Status.StartTime = &now + } + r.setReady(lt, true, locustv2.ReasonResourcesCreated, "All resources created") + return r.Status().Update(ctx, lt) + }); err != nil { + log.Error(err, "Failed to update status after resource creation") + return ctrl.Result{}, fmt.Errorf("failed to update status after resource creation: %w", err) + } + + return ctrl.Result{}, nil +} + +// createResource creates a Kubernetes resource with owner reference set. +// If the resource already exists, it logs and returns success (idempotent). +func (r *LocustTestReconciler) createResource(ctx context.Context, lt *locustv2.LocustTest, obj client.Object, kind string) error { + log := logf.FromContext(ctx) + + // Set owner reference for automatic garbage collection + if err := controllerutil.SetControllerReference(lt, obj, r.Scheme); err != nil { + log.Error(err, "Failed to set owner reference", + "kind", kind, + "name", obj.GetName()) + return err + } + + // Create the resource + if err := r.Create(ctx, obj); err != nil { + if apierrors.IsAlreadyExists(err) { + // Resource already exists - this is fine (idempotent) + log.V(1).Info("Resource already exists", + "kind", kind, + "name", obj.GetName()) + return nil + } + log.Error(err, "Failed to create resource", + "kind", kind, + "name", obj.GetName()) + return err + } + + // Record event for successful creation + r.Recorder.Event(lt, corev1.EventTypeNormal, "Created", + fmt.Sprintf("Created %s %s", kind, obj.GetName())) + + log.Info("Created resource", + "kind", kind, + "name", obj.GetName(), + "namespace", obj.GetNamespace()) + + return nil +} + +// reconcileStatus updates the LocustTest status based on owned Job states. +// Called when resources already exist and we need to track Job completion. +func (r *LocustTestReconciler) reconcileStatus(ctx context.Context, lt *locustv2.LocustTest) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + // Check for externally deleted Service + masterServiceName := lt.Name + "-master" + masterService := &corev1.Service{} + if err := r.Get(ctx, client.ObjectKey{Name: masterServiceName, Namespace: lt.Namespace}, masterService); err != nil { + if apierrors.IsNotFound(err) { + // Service was externally deleted β€” transition to Pending for recovery + log.Info("Master Service externally deleted, transitioning to Pending for recovery", + "service", masterServiceName) + r.Recorder.Event(lt, corev1.EventTypeWarning, "ResourceDeleted", + fmt.Sprintf("Master Service %s was deleted externally, will attempt recreation", masterServiceName)) + + // Reset to Pending to trigger resource recreation on next reconcile + log.Info("Attempting to update status to Pending after external deletion", + "currentPhase", lt.Status.Phase, + "generation", lt.Generation, + "observedGeneration", lt.Status.ObservedGeneration) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, client.ObjectKeyFromObject(lt), lt); err != nil { + log.Error(err, "Failed to re-fetch LocustTest during status update retry") + return err + } + log.V(1).Info("Re-fetched LocustTest for status update", + "resourceVersion", lt.ResourceVersion, + "phase", lt.Status.Phase) + lt.Status.Phase = locustv2.PhasePending + lt.Status.ObservedGeneration = lt.Generation + r.setReady(lt, false, locustv2.ReasonResourcesCreating, "Recreating externally deleted resources") + return r.Status().Update(ctx, lt) + }); err != nil { + log.Error(err, "Failed to update status after detecting Service deletion", + "service", masterServiceName, + "retryAttempts", "exhausted") + // Still requeue to retry the entire reconciliation + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + log.Info("Successfully updated status to Pending, will recreate Service", + "service", masterServiceName) + return ctrl.Result{RequeueAfter: time.Second}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to get master Service: %w", err) + } + + // Don't update if already in terminal state (unless resources are missing β€” handled above) + if lt.Status.Phase == locustv2.PhaseSucceeded || lt.Status.Phase == locustv2.PhaseFailed { + return ctrl.Result{}, nil + } + + // Fetch master Job to determine status + masterJob := &batchv1.Job{} + masterJobName := lt.Name + "-master" + if err := r.Get(ctx, client.ObjectKey{Name: masterJobName, Namespace: lt.Namespace}, masterJob); err != nil { + if apierrors.IsNotFound(err) { + // Job was externally deleted β€” same recovery pattern + log.Info("Master Job externally deleted, transitioning to Pending for recovery", + "job", masterJobName) + r.Recorder.Event(lt, corev1.EventTypeWarning, "ResourceDeleted", + fmt.Sprintf("Master Job %s was deleted externally, will attempt recreation", masterJobName)) + + log.Info("Attempting to update status to Pending after external deletion", + "currentPhase", lt.Status.Phase, + "generation", lt.Generation, + "observedGeneration", lt.Status.ObservedGeneration) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, client.ObjectKeyFromObject(lt), lt); err != nil { + log.Error(err, "Failed to re-fetch LocustTest during status update retry") + return err + } + log.V(1).Info("Re-fetched LocustTest for status update", + "resourceVersion", lt.ResourceVersion, + "phase", lt.Status.Phase) + lt.Status.Phase = locustv2.PhasePending + lt.Status.ObservedGeneration = lt.Generation + r.setReady(lt, false, locustv2.ReasonResourcesCreating, "Recreating externally deleted resources") + return r.Status().Update(ctx, lt) + }); err != nil { + log.Error(err, "Failed to update status after detecting master Job deletion", + "job", masterJobName, + "retryAttempts", "exhausted") + // Still requeue to retry the entire reconciliation + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + log.Info("Successfully updated status to Pending, will recreate master Job", + "job", masterJobName) + return ctrl.Result{RequeueAfter: time.Second}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to get master Job: %w", err) + } + + // Fetch worker Job for worker count + workerJob := &batchv1.Job{} + workerJobName := lt.Name + "-worker" + if err := r.Get(ctx, client.ObjectKey{Name: workerJobName, Namespace: lt.Namespace}, workerJob); err != nil { + if apierrors.IsNotFound(err) { + // Worker Job externally deleted β€” recovery + log.Info("Worker Job externally deleted, transitioning to Pending for recovery", + "job", workerJobName) + r.Recorder.Event(lt, corev1.EventTypeWarning, "ResourceDeleted", + fmt.Sprintf("Worker Job %s was deleted externally, will attempt recreation", workerJobName)) + + log.Info("Attempting to update status to Pending after external deletion", + "currentPhase", lt.Status.Phase, + "generation", lt.Generation, + "observedGeneration", lt.Status.ObservedGeneration) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, client.ObjectKeyFromObject(lt), lt); err != nil { + log.Error(err, "Failed to re-fetch LocustTest during status update retry") + return err + } + log.V(1).Info("Re-fetched LocustTest for status update", + "resourceVersion", lt.ResourceVersion, + "phase", lt.Status.Phase) + lt.Status.Phase = locustv2.PhasePending + lt.Status.ObservedGeneration = lt.Generation + r.setReady(lt, false, locustv2.ReasonResourcesCreating, "Recreating externally deleted resources") + return r.Status().Update(ctx, lt) + }); err != nil { + log.Error(err, "Failed to update status after detecting worker Job deletion", + "job", workerJobName, + "retryAttempts", "exhausted") + // Still requeue to retry the entire reconciliation + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + log.Info("Successfully updated status to Pending, will recreate worker Job", + "job", workerJobName) + return ctrl.Result{RequeueAfter: time.Second}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to get worker Job: %w", err) + } + + // Check pod health before updating status from Jobs + podHealthStatus, requeueAfter := r.checkPodHealth(ctx, lt) + + // Update status from Jobs (pass pod health to update logic) + if err := r.updateStatusFromJobs(ctx, lt, masterJob, workerJob, podHealthStatus); err != nil { + log.Error(err, "Failed to update status from Jobs") + return ctrl.Result{}, fmt.Errorf("failed to update status from Jobs: %w", err) + } + + // Requeue if pods are in grace period + if requeueAfter > 0 { + return ctrl.Result{RequeueAfter: requeueAfter}, nil + } + + return ctrl.Result{}, nil +} + +// mapPodToLocustTest maps a Pod event to the owning LocustTest reconcile request. +// Pods are owned by Jobs (Podβ†’Job), and Jobs are owned by LocustTests (Jobβ†’LocustTest). +// This function traverses the two-level owner reference chain: Pod β†’ Job β†’ LocustTest. +func (r *LocustTestReconciler) mapPodToLocustTest(ctx context.Context, obj client.Object) []reconcile.Request { + log := logf.FromContext(ctx) + + // Step 1: Find the owning Job from the pod's owner references + var jobName string + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind == "Job" && ref.APIVersion == "batch/v1" { + jobName = ref.Name + break + } + } + if jobName == "" { + return nil + } + + // Step 2: Fetch the Job to find the owning LocustTest + job := &batchv1.Job{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: obj.GetNamespace(), + Name: jobName, + }, job); err != nil { + log.V(1).Info("Failed to fetch Job for pod mapping", "pod", obj.GetName(), "job", jobName, "error", err) + return nil + } + + // Step 3: Find the LocustTest owner from the Job's owner references + for _, ref := range job.GetOwnerReferences() { + if ref.Kind == "LocustTest" { + log.V(1).Info("Mapped pod to LocustTest", "pod", obj.GetName(), "job", jobName, "locustTest", ref.Name) + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: ref.Name, + }, + }, + } + } + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LocustTestReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&locustv2.LocustTest{}). + Owns(&batchv1.Job{}). // Watch owned Jobs for status updates + Owns(&corev1.Service{}). // Watch owned Services + Watches( // Watch pods via custom mapping (pods are owned by Jobs, not LocustTest) + &corev1.Pod{}, + handler.EnqueueRequestsFromMapFunc(r.mapPodToLocustTest), + ). + Named("locusttest"). + Complete(r) +} diff --git a/internal/controller/locusttest_controller_test.go b/internal/controller/locusttest_controller_test.go new file mode 100644 index 00000000..22b5801a --- /dev/null +++ b/internal/controller/locusttest_controller_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +var _ = Describe("LocustTest Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + locusttest := &locustv2.LocustTest{} + + BeforeEach(func() { + By("creating the custom resource for the Kind LocustTest") + err := k8sClient.Get(ctx, typeNamespacedName, locusttest) + if err != nil && errors.IsNotFound(err) { + resource := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "--locustfile /lotest/src/test.py --host https://example.com", + }, + Worker: locustv2.WorkerSpec{ + Command: "--locustfile /lotest/src/test.py", + Replicas: 1, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &locustv2.LocustTest{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance LocustTest") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(ctx, typeNamespacedName, &locustv2.LocustTest{}) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + It("should successfully reconcile the resource", func() { + By("Waiting for the manager to reconcile and create resources") + + // Wait for master Service to be created by the manager + svc := &corev1.Service{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: resourceName + "-master", + Namespace: "default", + }, svc) + }, timeout, interval).Should(Succeed()) + + // Wait for master Job to be created + masterJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: resourceName + "-master", + Namespace: "default", + }, masterJob) + }, timeout, interval).Should(Succeed()) + + // Wait for worker Job to be created + workerJob := &batchv1.Job{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{ + Name: resourceName + "-worker", + Namespace: "default", + }, workerJob) + }, timeout, interval).Should(Succeed()) + + // Verify status was updated + lt := &locustv2.LocustTest{} + Eventually(func() string { + err := k8sClient.Get(ctx, typeNamespacedName, lt) + if err != nil { + return "" + } + return string(lt.Status.Phase) + }, timeout, interval).Should(Equal(string(locustv2.PhaseRunning))) + }) + }) +}) diff --git a/internal/controller/locusttest_controller_unit_test.go b/internal/controller/locusttest_controller_unit_test.go new file mode 100644 index 00000000..539f7de0 --- /dev/null +++ b/internal/controller/locusttest_controller_unit_test.go @@ -0,0 +1,1605 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" +) + +// newTestScheme creates a scheme with all required types registered. +func newTestScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = locustv2.AddToScheme(scheme) + _ = batchv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + return scheme +} + +// newTestReconciler creates a reconciler with a fake client for testing. +func newTestReconciler(objs ...client.Object) (*LocustTestReconciler, *record.FakeRecorder) { + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&locustv2.LocustTest{}). + Build() + recorder := record.NewFakeRecorder(10) + + return &LocustTestReconciler{ + Client: fakeClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + }, recorder +} + +// newTestOperatorConfig creates a test operator configuration. +func newTestOperatorConfig() *config.OperatorConfig { + return &config.OperatorConfig{ + PodCPURequest: "250m", + PodMemRequest: "128Mi", + PodEphemeralStorageRequest: "30M", + PodCPULimit: "1000m", + PodMemLimit: "1024Mi", + PodEphemeralStorageLimit: "50M", + + MetricsExporterImage: "containersol/locust_exporter:v0.5.0", + MetricsExporterPort: 9646, + MetricsExporterPullPolicy: "Always", + MetricsExporterCPURequest: "250m", + MetricsExporterMemRequest: "128Mi", + MetricsExporterEphemeralStorageRequest: "30M", + MetricsExporterCPULimit: "1000m", + MetricsExporterMemLimit: "1024Mi", + MetricsExporterEphemeralStorageLimit: "50M", + + KafkaBootstrapServers: "localhost:9092", + KafkaSecurityEnabled: false, + } +} + +// newTestLocustTestCR creates a test LocustTest CR. +func newTestLocustTestCR(name, namespace string) *locustv2.LocustTest { + return &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + UID: "test-uid-12345", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + TestFiles: &locustv2.TestFilesConfig{ + ConfigMapRef: "test-configmap", + }, + }, + } +} + +func TestReconcile_NotFound(t *testing.T) { + reconciler, _ := newTestReconciler() + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "nonexistent", + Namespace: "default", + }, + }) + + assert.NoError(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_CreateResources(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, recorder := newTestReconciler(lt) + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + + assert.NoError(t, err) + assert.Equal(t, ctrl.Result{}, result) + + // Verify Service created + svc := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, svc) + assert.NoError(t, err) + assert.Equal(t, "my-test-master", svc.Name) + + // Verify master Job created + masterJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterJob) + assert.NoError(t, err) + + // Verify worker Job created + workerJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-worker", + Namespace: "default", + }, workerJob) + assert.NoError(t, err) + + // Verify events recorded (3 resources created) + eventCount := 0 + for { + select { + case event := <-recorder.Events: + assert.Contains(t, event, "Created") + eventCount++ + default: + goto done + } + } +done: + assert.Equal(t, 3, eventCount, "Expected 3 creation events") +} + +func TestReconcile_PendingPhaseCreatesResourcesRegardlessOfGeneration(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + lt.Generation = 2 // Simulates an update while still in Pending phase + reconciler, _ := newTestReconciler(lt) + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + + assert.NoError(t, err) + assert.Equal(t, ctrl.Result{}, result) + + // Verify resources ARE created (fixes operator-restart edge case) + svc := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, svc) + assert.NoError(t, err, "Service should be created even with generation > 1 when phase is Pending") + + // Verify master Job created + masterJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterJob) + assert.NoError(t, err, "Master Job should be created") +} + +func TestReconcile_OwnerReferences(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify owner reference on Service + svc := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, svc) + require.NoError(t, err) + + require.Len(t, svc.OwnerReferences, 1) + assert.Equal(t, "my-test", svc.OwnerReferences[0].Name) + assert.Equal(t, "LocustTest", svc.OwnerReferences[0].Kind) + + // Verify owner reference on master Job + masterJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterJob) + require.NoError(t, err) + require.Len(t, masterJob.OwnerReferences, 1) + assert.Equal(t, "my-test", masterJob.OwnerReferences[0].Name) + + // Verify owner reference on worker Job + workerJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "my-test-worker", + Namespace: "default", + }, workerJob) + require.NoError(t, err) + require.Len(t, workerJob.OwnerReferences, 1) + assert.Equal(t, "my-test", workerJob.OwnerReferences[0].Name) +} + +func TestReconcile_IdempotentCreate(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, _ := newTestReconciler(lt) + + // First reconcile - creates resources + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Second reconcile with same generation should handle existing resources gracefully + _, err = reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + assert.NoError(t, err, "Should not error even if resources exist") +} + +func TestReconcile_WithDifferentGenerations(t *testing.T) { + tests := []struct { + name string + generation int64 + }{ + { + name: "generation 1 pending creates resources", + generation: 1, + }, + { + name: "generation 2 pending creates resources", + generation: 2, + }, + { + name: "generation 10 pending creates resources", + generation: 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lt := newTestLocustTestCR("test-gen", "default") + lt.Generation = tt.generation + // Phase defaults to empty string (Pending) from newTestLocustTestCR + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-gen", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // All pending CRs create resources regardless of generation + svc := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "test-gen-master", + Namespace: "default", + }, svc) + assert.NoError(t, err, "Service should be created for Pending phase regardless of generation") + }) + } +} + +func TestReconcile_VerifyServiceConfiguration(t *testing.T) { + lt := newTestLocustTestCR("svc-test", "default") + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "svc-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + svc := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "svc-test-master", + Namespace: "default", + }, svc) + require.NoError(t, err) + + // Service type defaults to ClusterIP in K8s (empty string in fake client is OK) + // The actual K8s API will default empty to ClusterIP + + // Verify service has correct selector + assert.Equal(t, "svc-test-master", svc.Spec.Selector["performance-test-pod-name"]) + + // Verify service has ports (3 ports: 5557, 5558, metrics) + assert.Len(t, svc.Spec.Ports, 3) +} + +func TestReconcile_VerifyMasterJobConfiguration(t *testing.T) { + lt := newTestLocustTestCR("job-test", "default") + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "job-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + job := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "job-test-master", + Namespace: "default", + }, job) + require.NoError(t, err) + + // Verify master parallelism is 1 + require.NotNil(t, job.Spec.Parallelism) + assert.Equal(t, int32(1), *job.Spec.Parallelism) + + // Verify master has 2 containers (locust + metrics exporter) + assert.Len(t, job.Spec.Template.Spec.Containers, 2) + + // Verify RestartPolicy is Never + assert.Equal(t, corev1.RestartPolicyNever, job.Spec.Template.Spec.RestartPolicy) +} + +func TestReconcile_VerifyWorkerJobConfiguration(t *testing.T) { + lt := newTestLocustTestCR("worker-test", "default") + lt.Spec.Worker.Replicas = 5 + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "worker-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + job := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "worker-test-worker", + Namespace: "default", + }, job) + require.NoError(t, err) + + // Verify worker parallelism equals WorkerReplicas + require.NotNil(t, job.Spec.Parallelism) + assert.Equal(t, int32(5), *job.Spec.Parallelism) + + // Verify worker has 1 container (no metrics exporter) + assert.Len(t, job.Spec.Template.Spec.Containers, 1) +} + +func TestReconcile_EventRecording(t *testing.T) { + lt := newTestLocustTestCR("event-test", "default") + reconciler, recorder := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "event-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Collect all events + var events []string + for { + select { + case event := <-recorder.Events: + events = append(events, event) + default: + goto done + } + } +done: + + // Should have 3 events: Service, Master Job, Worker Job + assert.Len(t, events, 3) + + // Verify event content + serviceEventFound := false + masterJobEventFound := false + workerJobEventFound := false + + for _, event := range events { + if strings.Contains(event, "Service") && strings.Contains(event, "event-test-master") { + serviceEventFound = true + } + if strings.Contains(event, "Job") && strings.Contains(event, "event-test-master") { + masterJobEventFound = true + } + if strings.Contains(event, "Job") && strings.Contains(event, "event-test-worker") { + workerJobEventFound = true + } + } + + assert.True(t, serviceEventFound, "Service creation event should be recorded") + assert.True(t, masterJobEventFound, "Master Job creation event should be recorded") + assert.True(t, workerJobEventFound, "Worker Job creation event should be recorded") +} + +func TestReconcile_WithCustomLabels(t *testing.T) { + lt := newTestLocustTestCR("label-test", "default") + lt.Spec.Master.Labels = map[string]string{ + "custom-label": "master-value", + } + lt.Spec.Worker.Labels = map[string]string{ + "custom-label": "worker-value", + } + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "label-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify master job has custom label + masterJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "label-test-master", + Namespace: "default", + }, masterJob) + require.NoError(t, err) + assert.Equal(t, "master-value", masterJob.Spec.Template.Labels["custom-label"]) + + // Verify worker job has custom label + workerJob := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "label-test-worker", + Namespace: "default", + }, workerJob) + require.NoError(t, err) + assert.Equal(t, "worker-value", workerJob.Spec.Template.Labels["custom-label"]) +} + +func TestReconcile_WithImagePullSecrets(t *testing.T) { + lt := newTestLocustTestCR("secret-test", "default") + lt.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: "my-registry-secret"}, + } + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "secret-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + job := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "secret-test-master", + Namespace: "default", + }, job) + require.NoError(t, err) + + require.Len(t, job.Spec.Template.Spec.ImagePullSecrets, 1) + assert.Equal(t, "my-registry-secret", job.Spec.Template.Spec.ImagePullSecrets[0].Name) +} + +func TestReconcile_WithLibConfigMap(t *testing.T) { + lt := newTestLocustTestCR("lib-test", "default") + lt.Spec.TestFiles.LibConfigMapRef = "locust-lib" + reconciler, _ := newTestReconciler(lt) + + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "lib-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + job := &batchv1.Job{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "lib-test-master", + Namespace: "default", + }, job) + require.NoError(t, err) + + // Should have 2 volumes: configmap and lib + assert.Len(t, job.Spec.Template.Spec.Volumes, 2) + + // Find lib volume (LibVolumeName constant is "locust-lib") + var libVolumeFound bool + for _, v := range job.Spec.Template.Spec.Volumes { + if v.Name == "locust-lib" { + libVolumeFound = true + assert.Equal(t, "locust-lib", v.ConfigMap.Name) + } + } + assert.True(t, libVolumeFound, "Lib volume should exist") +} + +func TestReconcile_MultipleNamespaces(t *testing.T) { + lt1 := newTestLocustTestCR("test1", "namespace-a") + lt2 := newTestLocustTestCR("test2", "namespace-b") + reconciler, _ := newTestReconciler(lt1, lt2) + + // Reconcile first CR + _, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test1", + Namespace: "namespace-a", + }, + }) + require.NoError(t, err) + + // Reconcile second CR + _, err = reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test2", + Namespace: "namespace-b", + }, + }) + require.NoError(t, err) + + // Verify resources in namespace-a + svc1 := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "test1-master", + Namespace: "namespace-a", + }, svc1) + assert.NoError(t, err) + + // Verify resources in namespace-b + svc2 := &corev1.Service{} + err = reconciler.Get(context.Background(), types.NamespacedName{ + Name: "test2-master", + Namespace: "namespace-b", + }, svc2) + assert.NoError(t, err) +} + +// errorClient is a fake client that returns errors for testing error paths. +type errorClient struct { + client.Client + getError error + createError error +} + +func (e *errorClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if e.getError != nil { + return e.getError + } + return e.Client.Get(ctx, key, obj, opts...) +} + +func (e *errorClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if e.createError != nil { + return e.createError + } + return e.Client.Create(ctx, obj, opts...) +} + +func TestReconcile_GetError(t *testing.T) { + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + recorder := record.NewFakeRecorder(10) + + // Wrap with error client that returns an error on Get + errClient := &errorClient{ + Client: fakeClient, + getError: apierrors.NewInternalError(fmt.Errorf("internal server error")), + } + + reconciler := &LocustTestReconciler{ + Client: errClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: "default", + }, + }) + + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_CreateServiceError(t *testing.T) { + lt := newTestLocustTestCR("error-test", "default") + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt). + Build() + recorder := record.NewFakeRecorder(10) + + // Wrap with error client that returns an error on Create + errClient := &errorClient{ + Client: fakeClient, + createError: apierrors.NewInternalError(fmt.Errorf("failed to create")), + } + + reconciler := &LocustTestReconciler{ + Client: errClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "error-test", + Namespace: "default", + }, + }) + + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +// sequentialErrorClient returns error only after N successful creates +type sequentialErrorClient struct { + client.Client + createCount int + errorAfterCreate int + createError error +} + +func (s *sequentialErrorClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + s.createCount++ + if s.createCount > s.errorAfterCreate { + return s.createError + } + return s.Client.Create(ctx, obj, opts...) +} + +func TestReconcile_CreateMasterJobError(t *testing.T) { + lt := newTestLocustTestCR("job-error-test", "default") + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt). + Build() + recorder := record.NewFakeRecorder(10) + + // Error after first create (Service succeeds, Master Job fails) + errClient := &sequentialErrorClient{ + Client: fakeClient, + errorAfterCreate: 1, + createError: apierrors.NewInternalError(fmt.Errorf("failed to create job")), + } + + reconciler := &LocustTestReconciler{ + Client: errClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "job-error-test", + Namespace: "default", + }, + }) + + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_CreateWorkerJobError(t *testing.T) { + lt := newTestLocustTestCR("worker-error-test", "default") + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt). + Build() + recorder := record.NewFakeRecorder(10) + + // Error after second create (Service and Master Job succeed, Worker Job fails) + errClient := &sequentialErrorClient{ + Client: fakeClient, + errorAfterCreate: 2, + createError: apierrors.NewInternalError(fmt.Errorf("failed to create worker job")), + } + + reconciler := &LocustTestReconciler{ + Client: errClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "worker-error-test", + Namespace: "default", + }, + }) + + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestCreateResource_SetControllerReferenceError(t *testing.T) { + // Create a LocustTest without a UID - this causes SetControllerReference to fail + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-uid-test", + Namespace: "default", + Generation: 1, + // UID intentionally not set + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 1, + }, + TestFiles: &locustv2.TestFilesConfig{ + ConfigMapRef: "test-configmap", + }, + }, + } + + // Use a scheme without LocustTest to cause SetControllerReference to fail + badScheme := runtime.NewScheme() + _ = batchv1.AddToScheme(badScheme) + _ = corev1.AddToScheme(badScheme) + // Note: NOT adding locustv2 to scheme + + fakeClient := fake.NewClientBuilder(). + WithScheme(badScheme). + Build() + recorder := record.NewFakeRecorder(10) + + reconciler := &LocustTestReconciler{ + Client: fakeClient, + Scheme: badScheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-svc", + Namespace: "default", + }, + } + + err := reconciler.createResource(context.Background(), lt, svc, "Service") + assert.Error(t, err) +} + +func TestReconcile_ExternalDeletion_MasterService(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // First reconcile - creates resources and transitions to Running + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Drain creation events from first reconcile + for i := 0; i < 3; i++ { + select { + case <-recorder.Events: + // Drain creation events (Service, Master Job, Worker Job) + default: + } + } + + // Refetch CR to get updated status + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) + + // Manually delete the master Service + masterService := &corev1.Service{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterService) + require.NoError(t, err) + err = reconciler.Delete(ctx, masterService) + require.NoError(t, err) + + // Second reconcile - should detect deletion and reset to Pending + result, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + assert.Greater(t, result.RequeueAfter, time.Duration(0), "Should requeue after detecting deletion") + + // Check that Warning event was recorded + select { + case event := <-recorder.Events: + assert.Contains(t, event, "Warning") + assert.Contains(t, event, "ResourceDeleted") + assert.Contains(t, event, "Service") + assert.Contains(t, event, "my-test-master") + default: + t.Fatal("Expected Warning event for external deletion") + } + + // Refetch CR to check Phase was reset + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhasePending, lt.Status.Phase, "Phase should be reset to Pending") + + // Third reconcile - should recreate the Service (self-healing) + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify Service exists again + masterService = &corev1.Service{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterService) + assert.NoError(t, err, "Service should be recreated") + + // Refetch CR to check Phase transitioned back to Running + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase, "Phase should be Running after recreation") +} + +func TestReconcile_ExternalDeletion_MasterJob(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // First reconcile - creates resources and transitions to Running + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Drain creation events from first reconcile + for i := 0; i < 3; i++ { + select { + case <-recorder.Events: + // Drain creation events (Service, Master Job, Worker Job) + default: + } + } + + // Refetch CR to get updated status + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) + + // Manually delete the master Job + masterJob := &batchv1.Job{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterJob) + require.NoError(t, err) + err = reconciler.Delete(ctx, masterJob) + require.NoError(t, err) + + // Second reconcile - should detect deletion and reset to Pending + result, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + assert.Greater(t, result.RequeueAfter, time.Duration(0), "Should requeue after detecting deletion") + + // Check that Warning event was recorded + select { + case event := <-recorder.Events: + assert.Contains(t, event, "Warning") + assert.Contains(t, event, "ResourceDeleted") + assert.Contains(t, event, "Job") + assert.Contains(t, event, "my-test-master") + default: + t.Fatal("Expected Warning event for external deletion") + } + + // Refetch CR to check Phase was reset + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhasePending, lt.Status.Phase, "Phase should be reset to Pending") + + // Third reconcile - should recreate the Job (self-healing) + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify Job exists again + masterJob = &batchv1.Job{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-master", + Namespace: "default", + }, masterJob) + assert.NoError(t, err, "Master Job should be recreated") + + // Refetch CR to check Phase transitioned back to Running + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase, "Phase should be Running after recreation") +} + +func TestReconcile_ExternalDeletion_WorkerJob(t *testing.T) { + lt := newTestLocustTestCR("my-test", "default") + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // First reconcile - creates resources and transitions to Running + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Drain creation events from first reconcile + for i := 0; i < 3; i++ { + select { + case <-recorder.Events: + // Drain creation events (Service, Master Job, Worker Job) + default: + } + } + + // Refetch CR to get updated status + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) + + // Manually delete the worker Job + workerJob := &batchv1.Job{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-worker", + Namespace: "default", + }, workerJob) + require.NoError(t, err) + err = reconciler.Delete(ctx, workerJob) + require.NoError(t, err) + + // Second reconcile - should detect deletion and reset to Pending + result, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + assert.Greater(t, result.RequeueAfter, time.Duration(0), "Should requeue after detecting deletion") + + // Check that Warning event was recorded + select { + case event := <-recorder.Events: + assert.Contains(t, event, "Warning") + assert.Contains(t, event, "ResourceDeleted") + assert.Contains(t, event, "Job") + assert.Contains(t, event, "my-test-worker") + default: + t.Fatal("Expected Warning event for external deletion") + } + + // Refetch CR to check Phase was reset + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhasePending, lt.Status.Phase, "Phase should be reset to Pending") + + // Third reconcile - should recreate the Job (self-healing) + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify Job exists again + workerJob = &batchv1.Job{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test-worker", + Namespace: "default", + }, workerJob) + assert.NoError(t, err, "Worker Job should be recreated") + + // Refetch CR to check Phase transitioned back to Running + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "my-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase, "Phase should be Running after recreation") +} + +// conflictOnUpdateClient wraps a client.Client and returns 409 Conflict errors +// on the first N Status().Update() calls, then delegates to the real client. +type conflictOnUpdateClient struct { + client.Client + conflictCount int // number of conflicts to return before succeeding + updateCalls int // tracks total Status().Update() calls +} + +func (c *conflictOnUpdateClient) Status() client.SubResourceWriter { + return &conflictStatusWriter{ + SubResourceWriter: c.Client.Status(), + parent: c, + } +} + +type conflictStatusWriter struct { + client.SubResourceWriter + parent *conflictOnUpdateClient +} + +func (w *conflictStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + w.parent.updateCalls++ + if w.parent.updateCalls <= w.parent.conflictCount { + return apierrors.NewConflict( + schema.GroupResource{Group: "locust.io", Resource: "locusttests"}, + obj.GetName(), + fmt.Errorf("the object has been modified"), + ) + } + return w.SubResourceWriter.Update(ctx, obj, opts...) +} + +func TestCreateResources_RetryOnConflict(t *testing.T) { + lt := newTestLocustTestCR("conflict-test", "default") + // Pre-set Phase to Pending so initializeStatus is skipped + lt.Status.Phase = locustv2.PhasePending + lt.Status.ExpectedWorkers = lt.Spec.Worker.Replicas + + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt). + WithStatusSubresource(&locustv2.LocustTest{}). + Build() + recorder := record.NewFakeRecorder(10) + + cc := &conflictOnUpdateClient{ + Client: fakeClient, + conflictCount: 1, // fail once, succeed on second attempt + } + + reconciler := &LocustTestReconciler{ + Client: cc, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "conflict-test", + Namespace: "default", + }, + }) + require.NoError(t, err) + assert.Equal(t, ctrl.Result{}, result) + assert.Equal(t, 2, cc.updateCalls, "Expected 1 conflict + 1 successful update") + + // Verify status was correctly set despite the conflict + err = cc.Get(context.Background(), types.NamespacedName{ + Name: "conflict-test", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) +} + +func TestReconcile_ExternalDeletion_RetryOnConflict(t *testing.T) { + lt := newTestLocustTestCR("conflict-del", "default") + // Pre-set to Running phase with resources "already created" + lt.Status.Phase = locustv2.PhaseRunning + lt.Status.ExpectedWorkers = lt.Spec.Worker.Replicas + lt.Status.ObservedGeneration = lt.Generation + + // Create master Job and worker Job, but NOT master Service (simulates external deletion) + masterJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "conflict-del-master", + Namespace: "default", + }, + } + workerJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "conflict-del-worker", + Namespace: "default", + }, + } + + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt, masterJob, workerJob). + WithStatusSubresource(&locustv2.LocustTest{}). + Build() + recorder := record.NewFakeRecorder(10) + + cc := &conflictOnUpdateClient{ + Client: fakeClient, + conflictCount: 1, // fail once, succeed on second attempt + } + + reconciler := &LocustTestReconciler{ + Client: cc, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "conflict-del", + Namespace: "default", + }, + }) + require.NoError(t, err) + assert.Greater(t, result.RequeueAfter, time.Duration(0), "Should requeue after detecting deletion") + assert.Equal(t, 2, cc.updateCalls, "Expected 1 conflict + 1 successful update") + + // Verify phase was reset to Pending despite the conflict + err = cc.Get(context.Background(), types.NamespacedName{ + Name: "conflict-del", + Namespace: "default", + }, lt) + require.NoError(t, err) + assert.Equal(t, locustv2.PhasePending, lt.Status.Phase) +} + +func TestReconcile_FinalizerAddedOnFirstReconcile(t *testing.T) { + lt := newTestLocustTestCR("finalizer-add", "default") + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "finalizer-add", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Refetch CR and verify finalizer is present + updated := &locustv2.LocustTest{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "finalizer-add", + Namespace: "default", + }, updated) + require.NoError(t, err) + assert.Contains(t, updated.Finalizers, finalizerName, "Finalizer should be added on first reconcile") +} + +func TestReconcile_FinalizerDeletion(t *testing.T) { + lt := newTestLocustTestCR("finalizer-del", "default") + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // First reconcile β€” adds finalizer and creates resources + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "finalizer-del", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Drain creation events (3 resource Created events) + for i := 0; i < 3; i++ { + select { + case <-recorder.Events: + default: + } + } + + // Delete the CR (sets DeletionTimestamp, finalizer keeps it alive) + updated := &locustv2.LocustTest{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "finalizer-del", + Namespace: "default", + }, updated) + require.NoError(t, err) + err = reconciler.Delete(ctx, updated) + require.NoError(t, err) + + // Second reconcile β€” should remove finalizer and emit Deleting event + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "finalizer-del", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify "Deleting" event was emitted + deletingEventFound := false + for { + select { + case event := <-recorder.Events: + if strings.Contains(event, "Deleting") { + deletingEventFound = true + } + default: + goto done + } + } +done: + assert.True(t, deletingEventFound, "Expected 'Deleting' event after finalizer removal") + + // Verify the CR is gone (finalizer removed allows deletion) + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "finalizer-del", + Namespace: "default", + }, &locustv2.LocustTest{}) + assert.True(t, apierrors.IsNotFound(err), "CR should be deleted after finalizer removal") +} + +func TestReconcile_FinalizerIdempotent(t *testing.T) { + lt := newTestLocustTestCR("finalizer-idem", "default") + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // First reconcile β€” adds finalizer + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "finalizer-idem", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Second reconcile β€” should NOT add a duplicate finalizer + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "finalizer-idem", + Namespace: "default", + }, + }) + require.NoError(t, err) + + // Verify finalizer appears exactly once + updated := &locustv2.LocustTest{} + err = reconciler.Get(ctx, types.NamespacedName{ + Name: "finalizer-idem", + Namespace: "default", + }, updated) + require.NoError(t, err) + + finalizerCount := 0 + for _, f := range updated.Finalizers { + if f == finalizerName { + finalizerCount++ + } + } + assert.Equal(t, 1, finalizerCount, "Finalizer should appear exactly once") +} + +func TestMapPodToLocustTest(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + job *batchv1.Job + expectedRequests int + expectedName string + expectedNamespace string + }{ + { + name: "pod with valid owner chain maps to LocustTest", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-master-abc123", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: "test-master", + UID: "job-uid-123", + }, + }, + }, + }, + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-master", + Namespace: "default", + UID: "job-uid-123", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "locust.io/v2", + Kind: "LocustTest", + Name: "test", + UID: "locusttest-uid-123", + }, + }, + }, + }, + expectedRequests: 1, + expectedName: "test", + expectedNamespace: "default", + }, + { + name: "pod without owner references returns empty", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "orphan-pod", + Namespace: "default", + }, + }, + job: nil, + expectedRequests: 0, + }, + { + name: "pod owned by non-Job resource returns empty", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "replicaset-pod", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "my-replicaset", + UID: "rs-uid-123", + }, + }, + }, + }, + job: nil, + expectedRequests: 0, + }, + { + name: "pod owned by Job not owned by LocustTest returns empty", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "standalone-job-pod", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: "standalone-job", + UID: "job-uid-456", + }, + }, + }, + }, + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "standalone-job", + Namespace: "default", + UID: "job-uid-456", + // No owner references + }, + }, + expectedRequests: 0, + }, + { + name: "pod owned by deleted Job returns empty", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleted-job-pod", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: "deleted-job", + UID: "job-uid-789", + }, + }, + }, + }, + job: nil, // Job doesn't exist in fake client + expectedRequests: 0, + }, + { + name: "pod in different namespace maps correctly", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-pod", + Namespace: "custom-ns", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "Job", + Name: "custom-job", + UID: "job-uid-custom", + }, + }, + }, + }, + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-job", + Namespace: "custom-ns", + UID: "job-uid-custom", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "locust.io/v2", + Kind: "LocustTest", + Name: "custom-test", + UID: "locusttest-uid-custom", + }, + }, + }, + }, + expectedRequests: 1, + expectedName: "custom-test", + expectedNamespace: "custom-ns", + }, + { + name: "multiple owner references finds Job correctly", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-owner-pod", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "some-replicaset", + UID: "rs-uid-999", + }, + { + APIVersion: "batch/v1", + Kind: "Job", + Name: "multi-owner-job", + UID: "job-uid-multi", + }, + }, + }, + }, + job: &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-owner-job", + Namespace: "default", + UID: "job-uid-multi", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "locust.io/v2", + Kind: "LocustTest", + Name: "multi-test", + UID: "locusttest-uid-multi", + }, + }, + }, + }, + expectedRequests: 1, + expectedName: "multi-test", + expectedNamespace: "default", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := newTestScheme() + + // Build fake client with Job if provided + builder := fake.NewClientBuilder().WithScheme(scheme) + if tt.job != nil { + builder = builder.WithObjects(tt.job) + } + fakeClient := builder.Build() + + reconciler := &LocustTestReconciler{ + Client: fakeClient, + Scheme: scheme, + Config: newTestOperatorConfig(), + } + + ctx := context.Background() + requests := reconciler.mapPodToLocustTest(ctx, tt.pod) + + if tt.expectedRequests == 0 { + assert.Empty(t, requests, "Expected no reconcile requests") + } else { + require.Len(t, requests, tt.expectedRequests) + assert.Equal(t, tt.expectedName, requests[0].Name) + assert.Equal(t, tt.expectedNamespace, requests[0].Namespace) + } + }) + } +} diff --git a/internal/controller/pod_health.go b/internal/controller/pod_health.go new file mode 100644 index 00000000..7294c32d --- /dev/null +++ b/internal/controller/pod_health.go @@ -0,0 +1,321 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +const ( + // podStartupGracePeriod is the time to wait before reporting pod failures. + // This prevents false positives during normal startup (scheduling, image pull, volume mount). + podStartupGracePeriod = 2 * time.Minute +) + +// PodHealthStatus represents the aggregated health status of all pods for a LocustTest. +type PodHealthStatus struct { + Healthy bool + Reason string + Message string + FailedPods []PodFailureInfo + InGracePeriod bool +} + +// PodFailureInfo contains details about a failed pod. +type PodFailureInfo struct { + Name string + FailureType string + ErrorMessage string +} + +// checkPodHealth analyzes all pods owned by the LocustTest and returns their health status. +// Returns PodHealthStatus and optional requeue duration (for grace period handling). +func (r *LocustTestReconciler) checkPodHealth(ctx context.Context, lt *locustv2.LocustTest) (PodHealthStatus, time.Duration) { + log := logf.FromContext(ctx) + + // List all pods owned by this LocustTest + podList := &corev1.PodList{} + listOpts := []client.ListOption{ + client.InNamespace(lt.Namespace), + client.MatchingLabels{ + "performance-test-name": lt.Name, + }, + } + + if err := r.List(ctx, podList, listOpts...); err != nil { + log.Error(err, "Failed to list pods for health check") + // Return healthy status to avoid blocking on transient errors + return PodHealthStatus{ + Healthy: true, + Reason: locustv2.ReasonPodsHealthy, + Message: "Pod health check pending", + }, 0 + } + + // No pods yet - this is normal during initial creation + if len(podList.Items) == 0 { + return PodHealthStatus{ + Healthy: true, + Reason: locustv2.ReasonPodsStarting, + Message: "Waiting for pods to be created", + }, 0 + } + + // Check if we're still in the grace period + oldestPodCreation := findOldestPodCreationTime(podList.Items) + gracePeriodRemaining := podStartupGracePeriod - time.Since(oldestPodCreation) + + if gracePeriodRemaining > 0 { + log.V(1).Info("Pods in startup grace period", "remaining", gracePeriodRemaining) + return PodHealthStatus{ + Healthy: true, + Reason: locustv2.ReasonPodsStarting, + Message: "Pods are starting up", + InGracePeriod: true, + }, gracePeriodRemaining + } + + // Analyze each pod for failures + var failedPods []PodFailureInfo + for _, pod := range podList.Items { + if failure := analyzePodFailure(&pod, lt); failure != nil { + failedPods = append(failedPods, *failure) + } + } + + // If no failures, all pods are healthy + if len(failedPods) == 0 { + return PodHealthStatus{ + Healthy: true, + Reason: locustv2.ReasonPodsHealthy, + Message: "All pods are healthy", + }, 0 + } + + // Categorize and prioritize failures + failureType, message := buildFailureMessage(failedPods) + + return PodHealthStatus{ + Healthy: false, + Reason: failureType, + Message: message, + FailedPods: failedPods, + }, 0 +} + +// analyzePodFailure examines a single pod and returns failure info if the pod is unhealthy. +// Returns nil if the pod is healthy. +func analyzePodFailure(pod *corev1.Pod, lt *locustv2.LocustTest) *PodFailureInfo { + // Check pod conditions for scheduling failures + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodScheduled && condition.Status == corev1.ConditionFalse { + return &PodFailureInfo{ + Name: pod.Name, + FailureType: locustv2.ReasonPodSchedulingError, + ErrorMessage: condition.Message, + } + } + } + + // Check init containers + for _, initStatus := range pod.Status.InitContainerStatuses { + if failure := analyzeContainerStatus(pod.Name, initStatus, true, lt); failure != nil { + return failure + } + } + + // Check main containers + for _, containerStatus := range pod.Status.ContainerStatuses { + if failure := analyzeContainerStatus(pod.Name, containerStatus, false, lt); failure != nil { + return failure + } + } + + // Pod is healthy + return nil +} + +// analyzeContainerStatus checks a container status for failures. +func analyzeContainerStatus(podName string, status corev1.ContainerStatus, isInitContainer bool, lt *locustv2.LocustTest) *PodFailureInfo { + // Check waiting state + if status.State.Waiting != nil { + waiting := status.State.Waiting + reason := waiting.Reason + message := waiting.Message + + switch { + case reason == "CreateContainerConfigError": + // Extract ConfigMap name if this is a config error + enhancedMsg := extractConfigMapError(message, lt) + return &PodFailureInfo{ + Name: podName, + FailureType: locustv2.ReasonPodConfigError, + ErrorMessage: enhancedMsg, + } + + case reason == "ImagePullBackOff" || reason == "ErrImagePull" || strings.Contains(reason, "ImagePull"): + return &PodFailureInfo{ + Name: podName, + FailureType: locustv2.ReasonPodImagePullError, + ErrorMessage: message, + } + + case reason == "CrashLoopBackOff": + return &PodFailureInfo{ + Name: podName, + FailureType: locustv2.ReasonPodCrashLoop, + ErrorMessage: message, + } + } + } + + // Check terminated state (for init containers or recently failed containers) + if status.State.Terminated != nil { + terminated := status.State.Terminated + if terminated.ExitCode != 0 { + failureType := locustv2.ReasonPodInitError + if !isInitContainer { + failureType = locustv2.ReasonPodCrashLoop + } + return &PodFailureInfo{ + Name: podName, + FailureType: failureType, + ErrorMessage: fmt.Sprintf("Container %s exited with code %d: %s", status.Name, terminated.ExitCode, terminated.Reason), + } + } + } + + return nil +} + +// extractConfigMapError enhances ConfigMap error messages with the expected ConfigMap name from spec. +func extractConfigMapError(errorMsg string, lt *locustv2.LocustTest) string { + // Try to extract ConfigMap name from error message + // Common patterns: + // - "configmap \"name\" not found" + // - "couldn't find key ... in ConfigMap ..." + configMapRegex := regexp.MustCompile(`[Cc]onfig[Mm]ap\s+"([^"]+)"`) + matches := configMapRegex.FindStringSubmatch(errorMsg) + + var expectedConfigMap string + if lt.Spec.TestFiles != nil { + if lt.Spec.TestFiles.ConfigMapRef != "" { + expectedConfigMap = lt.Spec.TestFiles.ConfigMapRef + } else if lt.Spec.TestFiles.LibConfigMapRef != "" { + expectedConfigMap = lt.Spec.TestFiles.LibConfigMapRef + } + } + + if len(matches) > 1 { + // ConfigMap name found in error + foundName := matches[1] + if expectedConfigMap != "" && foundName == expectedConfigMap { + return fmt.Sprintf("ConfigMap not found (expected: %s). %s", expectedConfigMap, errorMsg) + } + return errorMsg + } + + // Generic config error - add expected ConfigMap if known + if expectedConfigMap != "" { + return fmt.Sprintf("ConfigMap not found (expected: %s). %s", expectedConfigMap, errorMsg) + } + + return errorMsg +} + +// buildFailureMessage creates a user-friendly message from pod failures. +// Returns failure type (reason) and formatted message. +func buildFailureMessage(failures []PodFailureInfo) (string, string) { + if len(failures) == 0 { + return locustv2.ReasonPodsHealthy, "All pods are healthy" + } + + // Group failures by type + failuresByType := make(map[string][]PodFailureInfo) + for _, f := range failures { + failuresByType[f.FailureType] = append(failuresByType[f.FailureType], f) + } + + // Prioritize failure types (most critical first) + priorityOrder := []string{ + locustv2.ReasonPodConfigError, + locustv2.ReasonPodImagePullError, + locustv2.ReasonPodSchedulingError, + locustv2.ReasonPodCrashLoop, + locustv2.ReasonPodInitError, + } + + var primaryType string + var primaryFailures []PodFailureInfo + + for _, failureType := range priorityOrder { + if pods, exists := failuresByType[failureType]; exists { + primaryType = failureType + primaryFailures = pods + break + } + } + + // Build message for primary failure type + podNames := make([]string, len(primaryFailures)) + for i, f := range primaryFailures { + podNames[i] = f.Name + } + + // Get first error message as example + exampleError := primaryFailures[0].ErrorMessage + + message := fmt.Sprintf("%s: %d pod(s) affected [%s]: %s", + primaryType, + len(primaryFailures), + strings.Join(podNames, ", "), + exampleError, + ) + + // Add recovery hint for config errors + if primaryType == locustv2.ReasonPodConfigError { + message += ". Create the ConfigMap and the pods will restart automatically." + } + + return primaryType, message +} + +// findOldestPodCreationTime returns the creation time of the oldest pod in the list. +func findOldestPodCreationTime(pods []corev1.Pod) time.Time { + if len(pods) == 0 { + return time.Now() + } + + oldest := pods[0].CreationTimestamp.Time + for _, pod := range pods[1:] { + if pod.CreationTimestamp.Time.Before(oldest) { + oldest = pod.CreationTimestamp.Time + } + } + + return oldest +} diff --git a/internal/controller/status.go b/internal/controller/status.go new file mode 100644 index 00000000..28212384 --- /dev/null +++ b/internal/controller/status.go @@ -0,0 +1,226 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +// initializeStatus sets initial status values for a new LocustTest. +func (r *LocustTestReconciler) initializeStatus(lt *locustv2.LocustTest) { + lt.Status.Phase = locustv2.PhasePending + lt.Status.ExpectedWorkers = lt.Spec.Worker.Replicas + lt.Status.ConnectedWorkers = 0 + + r.setReady(lt, false, locustv2.ReasonResourcesCreating, "Creating resources") + r.setCondition(lt, locustv2.ConditionTypeWorkersConnected, + metav1.ConditionFalse, locustv2.ReasonWaitingForWorkers, + "Waiting for workers to connect") + r.setCondition(lt, locustv2.ConditionTypeTestCompleted, + metav1.ConditionFalse, locustv2.ReasonTestInProgress, + "Test has not started") + r.setCondition(lt, locustv2.ConditionTypePodsHealthy, + metav1.ConditionTrue, locustv2.ReasonPodsStarting, + "Waiting for pods to start") +} + +// setCondition sets a condition on the LocustTest status. +// Uses the standard meta.SetStatusCondition helper for proper handling. +func (r *LocustTestReconciler) setCondition( + lt *locustv2.LocustTest, + condType string, + status metav1.ConditionStatus, + reason string, + message string, +) { + meta.SetStatusCondition(<.Status.Conditions, metav1.Condition{ + Type: condType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: metav1.Now(), + ObservedGeneration: lt.Generation, + }) +} + +// setReady is a convenience wrapper for setting the Ready condition. +func (r *LocustTestReconciler) setReady(lt *locustv2.LocustTest, ready bool, reason, message string) { + status := metav1.ConditionFalse + if ready { + status = metav1.ConditionTrue + } + r.setCondition(lt, locustv2.ConditionTypeReady, status, reason, message) +} + +// updateStatusFromJobs derives status from the current state of owned Jobs. +func (r *LocustTestReconciler) updateStatusFromJobs( + ctx context.Context, + lt *locustv2.LocustTest, + masterJob *batchv1.Job, + workerJob *batchv1.Job, + podHealth PodHealthStatus, +) error { + log := logf.FromContext(ctx) + + // Determine phase from master Job status + newPhase := derivePhaseFromJob(masterJob) + + // If pods unhealthy and grace period expired, mark as Failed + // BUT: Don't override terminal states (Succeeded/Failed from Job completion) + if !podHealth.Healthy && !podHealth.InGracePeriod { + if newPhase != locustv2.PhaseSucceeded && newPhase != locustv2.PhaseFailed { + newPhase = locustv2.PhaseFailed + } + } + + // Update phase if changed and emit events + if lt.Status.Phase != newPhase { + oldPhase := lt.Status.Phase + lt.Status.Phase = newPhase + + // Emit event for significant transitions (CORE-26) + switch newPhase { + case locustv2.PhaseRunning: + r.Recorder.Event(lt, corev1.EventTypeNormal, "TestStarted", "Load test execution started") + case locustv2.PhaseSucceeded: + r.Recorder.Event(lt, corev1.EventTypeNormal, "TestCompleted", "Load test completed successfully") + case locustv2.PhaseFailed: + r.Recorder.Event(lt, corev1.EventTypeWarning, "TestFailed", "Load test execution failed") + case locustv2.PhasePending: + // No event for Pending - it's the initial state or recovery state + } + + // Set timestamps + if newPhase == locustv2.PhaseRunning && lt.Status.StartTime == nil { + now := metav1.Now() + lt.Status.StartTime = &now + } + + if newPhase == locustv2.PhaseSucceeded || newPhase == locustv2.PhaseFailed { + now := metav1.Now() + lt.Status.CompletionTime = &now + + // Update TestCompleted condition + if newPhase == locustv2.PhaseSucceeded { + r.setCondition(lt, locustv2.ConditionTypeTestCompleted, + metav1.ConditionTrue, locustv2.ReasonTestSucceeded, + "Test completed successfully") + } else { + r.setCondition(lt, locustv2.ConditionTypeTestCompleted, + metav1.ConditionTrue, locustv2.ReasonTestFailed, + "Test failed") + r.setReady(lt, false, locustv2.ReasonResourcesFailed, "Test failed") + } + } + + log.Info("Phase transition", "from", string(oldPhase), "to", string(newPhase), "locustTest", lt.Name) + } + + // Update worker connection status (approximation from worker Job) + if workerJob != nil { + lt.Status.ConnectedWorkers = workerJob.Status.Active + + if lt.Status.ConnectedWorkers >= lt.Status.ExpectedWorkers { + r.setCondition(lt, locustv2.ConditionTypeWorkersConnected, + metav1.ConditionTrue, locustv2.ReasonAllWorkersConnected, + fmt.Sprintf("%d/%d workers connected", + lt.Status.ConnectedWorkers, lt.Status.ExpectedWorkers)) + } else { + r.setCondition(lt, locustv2.ConditionTypeWorkersConnected, + metav1.ConditionFalse, locustv2.ReasonWorkersMissing, + fmt.Sprintf("%d/%d workers connected", + lt.Status.ConnectedWorkers, lt.Status.ExpectedWorkers)) + } + } + + // Update PodsHealthy condition + if podHealth.Healthy { + r.setCondition(lt, locustv2.ConditionTypePodsHealthy, + metav1.ConditionTrue, podHealth.Reason, podHealth.Message) + } else { + r.setCondition(lt, locustv2.ConditionTypePodsHealthy, + metav1.ConditionFalse, podHealth.Reason, podHealth.Message) + + // Emit warning event + r.Recorder.Event(lt, corev1.EventTypeWarning, "PodFailure", podHealth.Message) + + // Log for operator visibility + log.Info("Pod health check failed", + "reason", podHealth.Reason, + "failedPods", len(podHealth.FailedPods)) + } + + // Update ObservedGeneration (CORE-25) + lt.Status.ObservedGeneration = lt.Generation + + // Set SpecDrifted condition when spec was modified on an immutable test (STAB-03) + if lt.Generation > 1 && lt.Status.Phase != locustv2.PhasePending { + r.setCondition(lt, locustv2.ConditionTypeSpecDrifted, + metav1.ConditionTrue, locustv2.ReasonSpecChangeIgnored, + "Spec changes after creation are ignored. Delete and recreate the CR to apply changes.") + } + + // Save computed status before retry loop β€” status is derived from Jobs, not from the CR + desiredStatus := lt.Status + + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, client.ObjectKeyFromObject(lt), lt); err != nil { + return err + } + lt.Status = desiredStatus + return r.Status().Update(ctx, lt) + }) + if err != nil { + return fmt.Errorf("failed to update status from Jobs: %w", err) + } + return nil +} + +// derivePhaseFromJob determines the LocustTest phase from Job status. +func derivePhaseFromJob(job *batchv1.Job) locustv2.Phase { + if job == nil { + return locustv2.PhasePending + } + + // Check for completion conditions + for _, condition := range job.Status.Conditions { + if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue { + return locustv2.PhaseSucceeded + } + if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue { + return locustv2.PhaseFailed + } + } + + // Check if running + if job.Status.Active > 0 { + return locustv2.PhaseRunning + } + + return locustv2.PhasePending +} diff --git a/internal/controller/status_test.go b/internal/controller/status_test.go new file mode 100644 index 00000000..4d95ad4d --- /dev/null +++ b/internal/controller/status_test.go @@ -0,0 +1,1041 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +// healthyPodStatus returns a default healthy PodHealthStatus for tests that don't test pod health. +func healthyPodStatus() PodHealthStatus { + return PodHealthStatus{Healthy: true, Reason: locustv2.ReasonPodsHealthy, Message: "All pods are healthy"} +} + +func TestInitializeStatus(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 5, + }, + }, + } + + reconciler := &LocustTestReconciler{} + reconciler.initializeStatus(lt) + + assert.Equal(t, locustv2.PhasePending, lt.Status.Phase) + assert.Equal(t, int32(5), lt.Status.ExpectedWorkers) + assert.Equal(t, int32(0), lt.Status.ConnectedWorkers) + assert.Nil(t, lt.Status.StartTime) + assert.Nil(t, lt.Status.CompletionTime) + + // Verify conditions are set + require.Len(t, lt.Status.Conditions, 4) + + // Check Ready condition + readyCondition := findCondition(lt.Status.Conditions, locustv2.ConditionTypeReady) + require.NotNil(t, readyCondition) + assert.Equal(t, metav1.ConditionFalse, readyCondition.Status) + assert.Equal(t, locustv2.ReasonResourcesCreating, readyCondition.Reason) + + // Check WorkersConnected condition + workersCondition := findCondition(lt.Status.Conditions, locustv2.ConditionTypeWorkersConnected) + require.NotNil(t, workersCondition) + assert.Equal(t, metav1.ConditionFalse, workersCondition.Status) + assert.Equal(t, locustv2.ReasonWaitingForWorkers, workersCondition.Reason) + + // Check TestCompleted condition + completedCondition := findCondition(lt.Status.Conditions, locustv2.ConditionTypeTestCompleted) + require.NotNil(t, completedCondition) + assert.Equal(t, metav1.ConditionFalse, completedCondition.Status) + assert.Equal(t, locustv2.ReasonTestInProgress, completedCondition.Reason) + + // Check PodsHealthy condition + podsHealthyCondition := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCondition) + assert.Equal(t, metav1.ConditionTrue, podsHealthyCondition.Status) + assert.Equal(t, locustv2.ReasonPodsStarting, podsHealthyCondition.Reason) +} + +func TestSetCondition(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + } + + reconciler := &LocustTestReconciler{} + + // Set initial condition + reconciler.setCondition(lt, locustv2.ConditionTypeReady, + metav1.ConditionFalse, locustv2.ReasonResourcesCreating, "Creating resources") + + require.Len(t, lt.Status.Conditions, 1) + assert.Equal(t, locustv2.ConditionTypeReady, lt.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionFalse, lt.Status.Conditions[0].Status) + assert.Equal(t, locustv2.ReasonResourcesCreating, lt.Status.Conditions[0].Reason) + assert.Equal(t, int64(1), lt.Status.Conditions[0].ObservedGeneration) + + // Update the same condition + reconciler.setCondition(lt, locustv2.ConditionTypeReady, + metav1.ConditionTrue, locustv2.ReasonResourcesCreated, "All resources created") + + require.Len(t, lt.Status.Conditions, 1) // Should still be 1, not 2 + assert.Equal(t, metav1.ConditionTrue, lt.Status.Conditions[0].Status) + assert.Equal(t, locustv2.ReasonResourcesCreated, lt.Status.Conditions[0].Reason) +} + +func TestSetReady(t *testing.T) { + tests := []struct { + name string + ready bool + reason string + message string + expectedStatus metav1.ConditionStatus + }{ + { + name: "set ready true", + ready: true, + reason: locustv2.ReasonResourcesCreated, + message: "All resources created", + expectedStatus: metav1.ConditionTrue, + }, + { + name: "set ready false", + ready: false, + reason: locustv2.ReasonResourcesFailed, + message: "Failed to create resources", + expectedStatus: metav1.ConditionFalse, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + } + + reconciler := &LocustTestReconciler{} + reconciler.setReady(lt, tt.ready, tt.reason, tt.message) + + condition := findCondition(lt.Status.Conditions, locustv2.ConditionTypeReady) + require.NotNil(t, condition) + assert.Equal(t, tt.expectedStatus, condition.Status) + assert.Equal(t, tt.reason, condition.Reason) + assert.Equal(t, tt.message, condition.Message) + }) + } +} + +func TestDerivePhaseFromJob(t *testing.T) { + tests := []struct { + name string + job *batchv1.Job + expectedPhase locustv2.Phase + }{ + { + name: "nil job returns Pending", + job: nil, + expectedPhase: locustv2.PhasePending, + }, + { + name: "job with no conditions returns Pending", + job: &batchv1.Job{ + Status: batchv1.JobStatus{}, + }, + expectedPhase: locustv2.PhasePending, + }, + { + name: "job with active pods returns Running", + job: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + }, + expectedPhase: locustv2.PhaseRunning, + }, + { + name: "completed job returns Succeeded", + job: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedPhase: locustv2.PhaseSucceeded, + }, + { + name: "failed job returns Failed", + job: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedPhase: locustv2.PhaseFailed, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + phase := derivePhaseFromJob(tt.job) + assert.Equal(t, tt.expectedPhase, phase) + }) + } +} + +// findCondition finds a condition by type in a slice of conditions. +func findCondition(conditions []metav1.Condition, condType string) *metav1.Condition { + for i := range conditions { + if conditions[i].Type == condType { + return &conditions[i] + } + } + return nil +} + +// TestUpdateStatusFromJobs_FullStateMachine tests all phase transitions. +func TestUpdateStatusFromJobs_FullStateMachine(t *testing.T) { + tests := []struct { + name string + initialPhase locustv2.Phase + masterJob *batchv1.Job + workerJob *batchv1.Job + expectedPhase locustv2.Phase + expectedStartTime bool + expectedComplete bool + expectedWorkers int32 + }{ + { + name: "no jobs - stays Pending", + initialPhase: locustv2.PhasePending, + masterJob: nil, + workerJob: nil, + expectedPhase: locustv2.PhasePending, + expectedStartTime: false, + expectedComplete: false, + expectedWorkers: 0, + }, + { + name: "active master job - transitions to Running", + initialPhase: locustv2.PhasePending, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + }, + workerJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 3, + }, + }, + expectedPhase: locustv2.PhaseRunning, + expectedStartTime: true, + expectedComplete: false, + expectedWorkers: 3, + }, + { + name: "completed master job - transitions to Succeeded", + initialPhase: locustv2.PhaseRunning, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + workerJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 3, + }, + }, + expectedPhase: locustv2.PhaseSucceeded, + expectedStartTime: false, // already set + expectedComplete: true, + expectedWorkers: 3, + }, + { + name: "failed master job - transitions to Failed", + initialPhase: locustv2.PhaseRunning, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + workerJob: nil, + expectedPhase: locustv2.PhaseFailed, + expectedStartTime: false, + expectedComplete: true, + expectedWorkers: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test LocustTest + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 2, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 5, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: tt.initialPhase, + ExpectedWorkers: 5, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, tt.masterJob, tt.workerJob, healthyPodStatus()) + require.NoError(t, err) + + // Verify phase + assert.Equal(t, tt.expectedPhase, lt.Status.Phase) + + // Verify ObservedGeneration + assert.Equal(t, int64(2), lt.Status.ObservedGeneration) + + // Verify StartTime + if tt.expectedStartTime { + assert.NotNil(t, lt.Status.StartTime) + } + + // Verify CompletionTime and TestCompleted condition + if tt.expectedComplete { + assert.NotNil(t, lt.Status.CompletionTime) + completedCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypeTestCompleted) + require.NotNil(t, completedCond) + assert.Equal(t, metav1.ConditionTrue, completedCond.Status) + if tt.expectedPhase == locustv2.PhaseSucceeded { + assert.Equal(t, locustv2.ReasonTestSucceeded, completedCond.Reason) + } else { + assert.Equal(t, locustv2.ReasonTestFailed, completedCond.Reason) + } + } + + // Verify worker connection count + assert.Equal(t, tt.expectedWorkers, lt.Status.ConnectedWorkers) + }) + } +} + +// TestUpdateStatusFromJobs_PhaseTransitionEvents verifies events are emitted on phase changes. +func TestUpdateStatusFromJobs_PhaseTransitionEvents(t *testing.T) { + tests := []struct { + name string + initialPhase locustv2.Phase + masterJob *batchv1.Job + expectedEvent string + }{ + { + name: "Pending to Running emits TestStarted", + initialPhase: locustv2.PhasePending, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + }, + expectedEvent: "TestStarted", + }, + { + name: "Running to Succeeded emits TestCompleted", + initialPhase: locustv2.PhaseRunning, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedEvent: "TestCompleted", + }, + { + name: "Running to Failed emits TestFailed", + initialPhase: locustv2.PhaseRunning, + masterJob: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedEvent: "TestFailed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 3, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: tt.initialPhase, + ExpectedWorkers: 3, + }, + } + + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, tt.masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + // Verify event was emitted + select { + case event := <-recorder.Events: + assert.Contains(t, event, tt.expectedEvent) + default: + t.Errorf("Expected event %s but none was emitted", tt.expectedEvent) + } + }) + } +} + +// TestUpdateStatusFromJobs_NoEventOnSamePhase verifies no event is emitted when phase doesn't change. +func TestUpdateStatusFromJobs_NoEventOnSamePhase(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 3, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, // Still running + }, + } + + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + // Verify no event was emitted + select { + case event := <-recorder.Events: + t.Errorf("Expected no event but got: %s", event) + default: + // Correct - no event emitted + } + + // Phase should still be Running + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) +} + +// TestUpdateStatusFromJobs_ObservedGeneration verifies ObservedGeneration is always updated. +func TestUpdateStatusFromJobs_ObservedGeneration(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 5, // Higher generation + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 3, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ObservedGeneration: 3, // Lower observed generation + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + // Verify ObservedGeneration was updated to match Generation + assert.Equal(t, lt.Generation, lt.Status.ObservedGeneration) +} + +// TestDerivePhaseFromJob_TypeSafety verifies typed Phase return. +func TestDerivePhaseFromJob_TypeSafety(t *testing.T) { + tests := []struct { + name string + job *batchv1.Job + expectedPhase locustv2.Phase + }{ + { + name: "nil job", + job: nil, + expectedPhase: locustv2.PhasePending, + }, + { + name: "both Complete and Failed conditions - Complete wins", + job: &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + expectedPhase: locustv2.PhaseSucceeded, + }, + { + name: "active job", + job: &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + }, + expectedPhase: locustv2.PhaseRunning, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + phase := derivePhaseFromJob(tt.job) + assert.Equal(t, tt.expectedPhase, phase) + + // Verify it's the typed Phase type + var _ locustv2.Phase = phase //nolint:staticcheck + }) + } +} + +// TestUpdateStatusFromJobs_WorkersConnectedCondition verifies WorkersConnected condition. +func TestUpdateStatusFromJobs_WorkersConnectedCondition(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 5, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 5, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + } + + workerJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 5, // All workers connected + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, workerJob, healthyPodStatus()) + require.NoError(t, err) + + // Verify WorkersConnected condition + workersCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypeWorkersConnected) + require.NotNil(t, workersCond) + assert.Equal(t, metav1.ConditionTrue, workersCond.Status) + assert.Equal(t, locustv2.ReasonAllWorkersConnected, workersCond.Reason) + assert.Contains(t, workersCond.Message, "5/5 workers connected") +} + +// TestUpdateStatusFromJobs_SpecDriftedCondition verifies SpecDrifted condition is set when Generation > 1. +func TestUpdateStatusFromJobs_SpecDriftedCondition(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 3, // Simulating spec edits + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 5, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ObservedGeneration: 1, + ExpectedWorkers: 5, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + // Verify SpecDrifted condition exists with ConditionTrue + specDriftedCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypeSpecDrifted) + require.NotNil(t, specDriftedCond) + assert.Equal(t, metav1.ConditionTrue, specDriftedCond.Status) + assert.Equal(t, locustv2.ReasonSpecChangeIgnored, specDriftedCond.Reason) + assert.Contains(t, specDriftedCond.Message, "Delete and recreate") +} + +// TestUpdateStatusFromJobs_NoSpecDriftedOnGeneration1 verifies SpecDrifted condition is NOT set when Generation == 1. +func TestUpdateStatusFromJobs_NoSpecDriftedOnGeneration1(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, // No spec edits + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 5, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ObservedGeneration: 1, + ExpectedWorkers: 5, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Active: 1, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + // Call updateStatusFromJobs + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + // Verify SpecDrifted condition does NOT exist + specDriftedCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypeSpecDrifted) + assert.Nil(t, specDriftedCond) +} + +// TestUpdateStatusFromJobs_RetryOnConflict verifies that updateStatusFromJobs retries on 409 Conflict. +func TestUpdateStatusFromJobs_RetryOnConflict(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{ + Replicas: 3, + }, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + scheme := newTestScheme() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(lt). + WithStatusSubresource(&locustv2.LocustTest{}). + Build() + recorder := record.NewFakeRecorder(10) + + cc := &conflictOnUpdateClient{ + Client: fakeClient, + conflictCount: 2, // fail twice, succeed on third attempt + } + + reconciler := &LocustTestReconciler{ + Client: cc, + Scheme: scheme, + Config: newTestOperatorConfig(), + Recorder: recorder, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{Active: 1}, + } + + ctx := context.Background() + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, healthyPodStatus()) + require.NoError(t, err) + + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) + assert.Equal(t, 3, cc.updateCalls, "Expected 2 conflicts + 1 successful update") +} + +func TestUpdateStatusFromJobs_PodHealthUnhealthy_TransitionsToFailed(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{Replicas: 3}, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{Active: 1}, // Job still running + } + + unhealthyStatus := PodHealthStatus{ + Healthy: false, + Reason: locustv2.ReasonPodConfigError, + Message: "ConfigurationError: 1 pod(s) affected [test-master-abc]: ConfigMap not found (expected: my-configmap)", + FailedPods: []PodFailureInfo{ + {Name: "test-master-abc", FailureType: locustv2.ReasonPodConfigError, ErrorMessage: "ConfigMap not found"}, + }, + } + + reconciler, recorder := newTestReconciler(lt) + ctx := context.Background() + + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, unhealthyStatus) + require.NoError(t, err) + + // Phase should transition to Failed + assert.Equal(t, locustv2.PhaseFailed, lt.Status.Phase) + + // PodsHealthy condition should be False + podsHealthyCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCond) + assert.Equal(t, metav1.ConditionFalse, podsHealthyCond.Status) + assert.Equal(t, locustv2.ReasonPodConfigError, podsHealthyCond.Reason) + assert.Contains(t, podsHealthyCond.Message, "ConfigMap not found") + + // Two events should be emitted: TestFailed and PodFailure + var events []string + for i := 0; i < 2; i++ { + select { + case event := <-recorder.Events: + events = append(events, event) + default: + } + } + require.Len(t, events, 2, "Expected 2 events: TestFailed and PodFailure") + + // Check that both events were emitted + hasTestFailed := false + hasPodFailure := false + for _, event := range events { + if strings.Contains(event, "TestFailed") { + hasTestFailed = true + } + if strings.Contains(event, "PodFailure") { + hasPodFailure = true + } + } + assert.True(t, hasTestFailed, "Expected TestFailed event") + assert.True(t, hasPodFailure, "Expected PodFailure event") +} + +func TestUpdateStatusFromJobs_PodHealthUnhealthy_DoesNotOverrideTerminalState(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{Replicas: 3}, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + // Master Job has completed successfully + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + }, + } + + // Pods are unhealthy (e.g., worker pods terminating after completion) + unhealthyStatus := PodHealthStatus{ + Healthy: false, + Reason: locustv2.ReasonPodCrashLoop, + Message: "CrashLoopBackOff: 1 pod(s) affected [test-worker-xyz]: Container failed", + FailedPods: []PodFailureInfo{ + {Name: "test-worker-xyz", FailureType: locustv2.ReasonPodCrashLoop, ErrorMessage: "Container failed"}, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, unhealthyStatus) + require.NoError(t, err) + + // Phase should be Succeeded (not Failed), as terminal state takes precedence + assert.Equal(t, locustv2.PhaseSucceeded, lt.Status.Phase) + + // PodsHealthy condition should still be set as informational + podsHealthyCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCond) + assert.Equal(t, metav1.ConditionFalse, podsHealthyCond.Status) + assert.Equal(t, locustv2.ReasonPodCrashLoop, podsHealthyCond.Reason) +} + +func TestUpdateStatusFromJobs_PodHealthImagePullError(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{Replicas: 3}, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{Active: 1}, + } + + unhealthyStatus := PodHealthStatus{ + Healthy: false, + Reason: locustv2.ReasonPodImagePullError, + Message: "ImagePullError: 2 pod(s) affected [test-worker-1, test-worker-2]: Failed to pull image locustio/locust:nonexistent", + FailedPods: []PodFailureInfo{ + {Name: "test-worker-1", FailureType: locustv2.ReasonPodImagePullError, ErrorMessage: "Failed to pull image"}, + {Name: "test-worker-2", FailureType: locustv2.ReasonPodImagePullError, ErrorMessage: "Failed to pull image"}, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, unhealthyStatus) + require.NoError(t, err) + + // Phase should transition to Failed + assert.Equal(t, locustv2.PhaseFailed, lt.Status.Phase) + + // PodsHealthy condition should have correct reason + podsHealthyCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCond) + assert.Equal(t, metav1.ConditionFalse, podsHealthyCond.Status) + assert.Equal(t, locustv2.ReasonPodImagePullError, podsHealthyCond.Reason) + assert.Contains(t, podsHealthyCond.Message, "ImagePullError") + assert.Contains(t, podsHealthyCond.Message, "test-worker-1") +} + +func TestUpdateStatusFromJobs_PodHealthCrashLoop(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{Replicas: 3}, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{Active: 1}, + } + + unhealthyStatus := PodHealthStatus{ + Healthy: false, + Reason: locustv2.ReasonPodCrashLoop, + Message: "CrashLoopBackOff: 1 pod(s) affected [test-master-xyz]: Container exited with code 1", + FailedPods: []PodFailureInfo{ + {Name: "test-master-xyz", FailureType: locustv2.ReasonPodCrashLoop, ErrorMessage: "Container exited with code 1"}, + }, + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, unhealthyStatus) + require.NoError(t, err) + + // Phase should transition to Failed + assert.Equal(t, locustv2.PhaseFailed, lt.Status.Phase) + + // PodsHealthy condition should have correct reason + podsHealthyCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCond) + assert.Equal(t, metav1.ConditionFalse, podsHealthyCond.Status) + assert.Equal(t, locustv2.ReasonPodCrashLoop, podsHealthyCond.Reason) + assert.Contains(t, podsHealthyCond.Message, "CrashLoopBackOff") +} + +func TestUpdateStatusFromJobs_PodHealthInGracePeriod(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Generation: 1, + }, + Spec: locustv2.LocustTestSpec{ + Worker: locustv2.WorkerSpec{Replicas: 3}, + }, + Status: locustv2.LocustTestStatus{ + Phase: locustv2.PhaseRunning, + ExpectedWorkers: 3, + }, + } + + masterJob := &batchv1.Job{ + Status: batchv1.JobStatus{Active: 1}, + } + + // Pods in grace period - still healthy + gracePeriodStatus := PodHealthStatus{ + Healthy: true, + InGracePeriod: true, + Reason: locustv2.ReasonPodsStarting, + Message: "Pods are starting up", + } + + reconciler, _ := newTestReconciler(lt) + ctx := context.Background() + + err := reconciler.updateStatusFromJobs(ctx, lt, masterJob, nil, gracePeriodStatus) + require.NoError(t, err) + + // Phase should stay Running + assert.Equal(t, locustv2.PhaseRunning, lt.Status.Phase) + + // PodsHealthy condition should be True with starting reason + podsHealthyCond := findCondition(lt.Status.Conditions, locustv2.ConditionTypePodsHealthy) + require.NotNil(t, podsHealthyCond) + assert.Equal(t, metav1.ConditionTrue, podsHealthyCond.Status) + assert.Equal(t, locustv2.ReasonPodsStarting, podsHealthyCond.Reason) + assert.Contains(t, podsHealthyCond.Message, "starting") +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go new file mode 100644 index 00000000..283f5a40 --- /dev/null +++ b/internal/controller/suite_test.go @@ -0,0 +1,164 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + locustv1 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v1" + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +const ( + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Integration Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + // Use the main CRD with v2 as storage version. + // The controller now uses v2 API directly, so no conversion is needed. + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } + + var err error + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + // Register schemes + err = locustv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = locustv2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = batchv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = corev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + // Create manager + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", // Disable metrics for tests + }, + }) + Expect(err).NotTo(HaveOccurred()) + + // Setup reconciler with manager + operatorConfig, err := config.LoadConfig() + Expect(err).NotTo(HaveOccurred()) + err = (&LocustTestReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Config: operatorConfig, + Recorder: k8sManager.GetEventRecorderFor("locust-controller"), + }).SetupWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + // Start manager in background goroutine + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to run manager") + }() + + // Create direct client for test assertions + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/internal/resources/.gitkeep b/internal/resources/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/internal/resources/command.go b/internal/resources/command.go new file mode 100644 index 00000000..a8de7082 --- /dev/null +++ b/internal/resources/command.go @@ -0,0 +1,141 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "strings" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/go-logr/logr" +) + +// operatorManagedFlags is the registry of flags managed by the operator. +// Users should not override these in extraArgs, but if they do, their value takes precedence. +var operatorManagedFlags = map[string]bool{ + "--master": true, + "--worker": true, + "--master-port": true, + "--master-host": true, + "--expect-workers": true, + "--autostart": true, + "--autoquit": true, + "--otel": true, + "--enable-rebalancing": true, + "--only-summary": true, +} + +// detectFlagConflicts checks if extraArgs contain operator-managed flags. +// Returns a slice of conflicting arguments. +func detectFlagConflicts(extraArgs []string) []string { + var conflicts []string + for _, arg := range extraArgs { + // Check if arg matches a known operator-managed flag + // Handle both "--flag=value" and "--flag value" forms + for flag := range operatorManagedFlags { + if arg == flag || strings.HasPrefix(arg, flag+"=") { + conflicts = append(conflicts, arg) + break + } + } + } + return conflicts +} + +// BuildMasterCommand constructs the command arguments for the master node. +// Uses MasterSpec configuration and appends extraArgs after operator-managed flags. +func BuildMasterCommand(masterSpec *locustv2.MasterSpec, workerReplicas int32, otelEnabled bool, logger logr.Logger) []string { + var cmdParts []string + // Split command seed into individual args at append time + cmdParts = append(cmdParts, strings.Fields(masterSpec.Command)...) + + // Add --otel flag if enabled (must come before other flags) + if otelEnabled { + cmdParts = append(cmdParts, "--otel") + } + + cmdParts = append(cmdParts, + "--master", + fmt.Sprintf("--master-port=%d", MasterPort), + fmt.Sprintf("--expect-workers=%d", workerReplicas), + ) + + // Add --autostart if enabled (default: true) + if masterSpec.Autostart == nil || *masterSpec.Autostart { + cmdParts = append(cmdParts, "--autostart") + } + + // Add --autoquit if enabled (default: enabled with 60s timeout) + if masterSpec.Autoquit == nil || masterSpec.Autoquit.Enabled { + timeout := int32(60) // default + if masterSpec.Autoquit != nil && masterSpec.Autoquit.Timeout >= 0 { + timeout = masterSpec.Autoquit.Timeout + } + cmdParts = append(cmdParts, "--autoquit", fmt.Sprintf("%d", timeout)) + } + + cmdParts = append(cmdParts, + "--enable-rebalancing", + "--only-summary", + ) + + // Append extraArgs after operator-managed flags (user flags take precedence via POSIX last-occurrence-wins) + if len(masterSpec.ExtraArgs) > 0 { + conflicts := detectFlagConflicts(masterSpec.ExtraArgs) + if len(conflicts) > 0 { + logger.Info("User-provided extraArgs override operator-managed flags", + "conflicts", conflicts, + "behavior", "user value takes precedence") + } + cmdParts = append(cmdParts, masterSpec.ExtraArgs...) + } + + return cmdParts +} + +// BuildWorkerCommand constructs the command arguments for worker nodes. +// Template: "{seed} [--otel] --worker --master-port=5557 --master-host={master-name} [extraArgs...]" +func BuildWorkerCommand(commandSeed string, masterHost string, otelEnabled bool, extraArgs []string, logger logr.Logger) []string { + var cmdParts []string + // Split command seed into individual args at append time + cmdParts = append(cmdParts, strings.Fields(commandSeed)...) + + // Add --otel flag if enabled (must come before other flags) + if otelEnabled { + cmdParts = append(cmdParts, "--otel") + } + + cmdParts = append(cmdParts, + "--worker", + fmt.Sprintf("--master-port=%d", MasterPort), + fmt.Sprintf("--master-host=%s", masterHost), + ) + + // Append extraArgs after operator-managed flags (user flags take precedence via POSIX last-occurrence-wins) + if len(extraArgs) > 0 { + conflicts := detectFlagConflicts(extraArgs) + if len(conflicts) > 0 { + logger.Info("User-provided extraArgs override operator-managed flags", + "mode", "worker", + "conflicts", conflicts, + "behavior", "user value takes precedence") + } + cmdParts = append(cmdParts, extraArgs...) + } + + return cmdParts +} diff --git a/internal/resources/command_test.go b/internal/resources/command_test.go new file mode 100644 index 00000000..84538ffb --- /dev/null +++ b/internal/resources/command_test.go @@ -0,0 +1,457 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "strings" + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +const testCommandSeed = "locust -f /lotest/src/test.py" +const testMasterHost = "my-test-master" + +// Helper to create a default MasterSpec for testing +func testMasterSpec() *locustv2.MasterSpec { + return &locustv2.MasterSpec{ + Command: testCommandSeed, + Autostart: ptr.To(true), + Autoquit: &locustv2.AutoquitConfig{Enabled: true, Timeout: 60}, + } +} + +func TestBuildMasterCommand(t *testing.T) { + workerReplicas := int32(5) + masterSpec := testMasterSpec() + + cmd := BuildMasterCommand(masterSpec, workerReplicas, false, logr.Discard()) + + // Verify all expected flags are present + assert.Contains(t, cmd, "locust") + assert.Contains(t, cmd, "-f") + assert.Contains(t, cmd, "/lotest/src/test.py") + assert.Contains(t, cmd, "--master") + assert.Contains(t, cmd, "--master-port=5557") + assert.Contains(t, cmd, "--expect-workers=5") + assert.Contains(t, cmd, "--autostart") + assert.Contains(t, cmd, "--autoquit") + assert.Contains(t, cmd, "60") + assert.Contains(t, cmd, "--enable-rebalancing") + assert.Contains(t, cmd, "--only-summary") +} + +func TestBuildMasterCommand_SplitsCorrectly(t *testing.T) { + masterSpec := &locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + Autostart: ptr.To(true), + } + workerReplicas := int32(3) + + cmd := BuildMasterCommand(masterSpec, workerReplicas, false, logr.Discard()) + + // strings.Fields handles multiple spaces correctly + assert.Equal(t, "locust", cmd[0]) + assert.Equal(t, "-f", cmd[1]) + assert.Equal(t, "/lotest/src/test.py", cmd[2]) +} + +func TestBuildWorkerCommand(t *testing.T) { + masterHost := testMasterHost + + cmd := BuildWorkerCommand(testCommandSeed, masterHost, false, nil, logr.Discard()) + + // Verify all expected flags are present + assert.Contains(t, cmd, "locust") + assert.Contains(t, cmd, "-f") + assert.Contains(t, cmd, "/lotest/src/test.py") + assert.Contains(t, cmd, "--worker") + assert.Contains(t, cmd, "--master-port=5557") + assert.Contains(t, cmd, "--master-host="+testMasterHost) +} + +func TestBuildWorkerCommand_MasterHostCorrect(t *testing.T) { + masterHost := "team-a-load-test-master" + + cmd := BuildWorkerCommand(testCommandSeed, masterHost, false, nil, logr.Discard()) + + // Find the master-host flag + found := false + for _, arg := range cmd { + if arg == "--master-host=team-a-load-test-master" { + found = true + break + } + } + assert.True(t, found, "master-host flag should contain the correct master host") +} + +// ===== OTel Flag Tests ===== + +func TestBuildMasterCommand_OTelDisabled(t *testing.T) { + workerReplicas := int32(3) + masterSpec := testMasterSpec() + + cmd := BuildMasterCommand(masterSpec, workerReplicas, false, logr.Discard()) + + // --otel flag should NOT be present + assert.NotContains(t, cmd, "--otel") +} + +func TestBuildMasterCommand_OTelEnabled(t *testing.T) { + workerReplicas := int32(3) + masterSpec := testMasterSpec() + + cmd := BuildMasterCommand(masterSpec, workerReplicas, true, logr.Discard()) + + // --otel flag should be present + assert.Contains(t, cmd, "--otel") +} + +func TestBuildMasterCommand_OTelFlagPosition(t *testing.T) { + workerReplicas := int32(3) + masterSpec := testMasterSpec() + + cmd := BuildMasterCommand(masterSpec, workerReplicas, true, logr.Discard()) + + // Find positions of --otel and --master + otelIndex := -1 + masterIndex := -1 + for i, arg := range cmd { + if arg == "--otel" { + otelIndex = i + } + if arg == "--master" { + masterIndex = i + } + } + + // --otel should appear before --master + assert.NotEqual(t, -1, otelIndex, "--otel flag should be present") + assert.NotEqual(t, -1, masterIndex, "--master flag should be present") + assert.Less(t, otelIndex, masterIndex, "--otel should appear before --master") +} + +// ===== Autostart/Autoquit Tests ===== + +func TestBuildMasterCommand_AutostartDisabled(t *testing.T) { + masterSpec := &locustv2.MasterSpec{ + Command: testCommandSeed, + Autostart: ptr.To(false), + } + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + assert.NotContains(t, cmd, "--autostart") +} + +func TestBuildMasterCommand_AutostartDefault(t *testing.T) { + // When Autostart is nil, default to true + masterSpec := &locustv2.MasterSpec{ + Command: testCommandSeed, + } + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + assert.Contains(t, cmd, "--autostart") +} + +func TestBuildMasterCommand_AutoquitDisabled(t *testing.T) { + masterSpec := &locustv2.MasterSpec{ + Command: testCommandSeed, + Autostart: ptr.To(true), + Autoquit: &locustv2.AutoquitConfig{Enabled: false}, + } + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + assert.NotContains(t, cmd, "--autoquit") +} + +func TestBuildMasterCommand_AutoquitCustomTimeout(t *testing.T) { + masterSpec := &locustv2.MasterSpec{ + Command: testCommandSeed, + Autostart: ptr.To(true), + Autoquit: &locustv2.AutoquitConfig{Enabled: true, Timeout: 120}, + } + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + assert.Contains(t, cmd, "--autoquit") + assert.Contains(t, cmd, "120") + assert.NotContains(t, cmd, "60") +} + +func TestBuildMasterCommand_AutoquitDefault(t *testing.T) { + // When Autoquit is nil, default to enabled with 60s timeout + masterSpec := &locustv2.MasterSpec{ + Command: testCommandSeed, + } + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + assert.Contains(t, cmd, "--autoquit") + assert.Contains(t, cmd, "60") +} + +func TestBuildWorkerCommand_OTelDisabled(t *testing.T) { + masterHost := testMasterHost + + cmd := BuildWorkerCommand(testCommandSeed, masterHost, false, nil, logr.Discard()) + + // --otel flag should NOT be present + assert.NotContains(t, cmd, "--otel") +} + +func TestBuildWorkerCommand_OTelEnabled(t *testing.T) { + masterHost := testMasterHost + + cmd := BuildWorkerCommand(testCommandSeed, masterHost, true, nil, logr.Discard()) + + // --otel flag should be present + assert.Contains(t, cmd, "--otel") +} + +func TestBuildWorkerCommand_OTelFlagPosition(t *testing.T) { + masterHost := testMasterHost + + cmd := BuildWorkerCommand(testCommandSeed, masterHost, true, nil, logr.Discard()) + + // Find positions of --otel and --worker + otelIndex := -1 + workerIndex := -1 + for i, arg := range cmd { + if arg == "--otel" { + otelIndex = i + } + if arg == "--worker" { + workerIndex = i + } + } + + // --otel should appear before --worker + assert.NotEqual(t, -1, otelIndex, "--otel flag should be present") + assert.NotEqual(t, -1, workerIndex, "--worker flag should be present") + assert.Less(t, otelIndex, workerIndex, "--otel should appear before --worker") +} + +// ===== ExtraArgs Tests ===== + +func TestBuildMasterCommand_WithExtraArgs(t *testing.T) { + masterSpec := testMasterSpec() + masterSpec.ExtraArgs = []string{"--csv=results", "--users=100"} + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + // ExtraArgs should be present in command + assert.Contains(t, cmd, "--csv=results") + assert.Contains(t, cmd, "--users=100") + + // ExtraArgs should come after operator-managed flags + onlySummaryIndex := -1 + csvIndex := -1 + for i, arg := range cmd { + if arg == "--only-summary" { + onlySummaryIndex = i + } + if arg == "--csv=results" { + csvIndex = i + } + } + assert.Greater(t, csvIndex, onlySummaryIndex, "extraArgs should come after --only-summary") +} + +func TestBuildMasterCommand_WithExtraArgsNil(t *testing.T) { + masterSpec := testMasterSpec() + masterSpec.ExtraArgs = nil + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + // Command should be identical to behavior without extraArgs + assert.Contains(t, cmd, "--master") + assert.Contains(t, cmd, "--only-summary") +} + +func TestBuildMasterCommand_WithExtraArgsEmpty(t *testing.T) { + masterSpec := testMasterSpec() + masterSpec.ExtraArgs = []string{} + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + // Command should be identical to behavior without extraArgs + assert.Contains(t, cmd, "--master") + assert.Contains(t, cmd, "--only-summary") +} + +func TestBuildMasterCommand_WithConflictingExtraArgs(t *testing.T) { + masterSpec := testMasterSpec() + masterSpec.ExtraArgs = []string{"--master-port=9999"} + + cmd := BuildMasterCommand(masterSpec, 3, false, logr.Discard()) + + // Command should contain both operator flag and user flag + // User flag comes last, so it wins per POSIX behavior + assert.Contains(t, cmd, "--master-port=5557") + assert.Contains(t, cmd, "--master-port=9999") + + // Find indices to verify order + operatorIndex := -1 + userIndex := -1 + for i, arg := range cmd { + if arg == "--master-port=5557" { + operatorIndex = i + } + if arg == "--master-port=9999" { + userIndex = i + } + } + assert.Greater(t, userIndex, operatorIndex, "user's flag should come after operator's flag") +} + +func TestBuildWorkerCommand_WithExtraArgs(t *testing.T) { + extraArgs := []string{"--csv=results"} + + cmd := BuildWorkerCommand(testCommandSeed, testMasterHost, false, extraArgs, logr.Discard()) + + // ExtraArgs should be present in command + assert.Contains(t, cmd, "--csv=results") + + // ExtraArgs should come after operator-managed flags + masterHostFlag := "--master-host=" + testMasterHost + masterHostIndex := -1 + csvIndex := -1 + for i, arg := range cmd { + if arg == masterHostFlag { + masterHostIndex = i + } + if arg == "--csv=results" { + csvIndex = i + } + } + assert.Greater(t, csvIndex, masterHostIndex, "extraArgs should come after --master-host") +} + +func TestBuildWorkerCommand_WithConflictingExtraArgs(t *testing.T) { + extraArgs := []string{"--worker", "--master-host=evil"} + + cmd := BuildWorkerCommand(testCommandSeed, testMasterHost, false, extraArgs, logr.Discard()) + + // Command should contain both operator flags and user flags + assert.Contains(t, cmd, "--worker") + assert.Contains(t, cmd, "--master-host="+testMasterHost) + assert.Contains(t, cmd, "--master-host=evil") +} + +func TestDetectFlagConflicts_WithConflict(t *testing.T) { + extraArgs := []string{"--master-port=9999"} + + conflicts := detectFlagConflicts(extraArgs) + + assert.Equal(t, []string{"--master-port=9999"}, conflicts) +} + +func TestDetectFlagConflicts_WithoutConflict(t *testing.T) { + extraArgs := []string{"--csv=results"} + + conflicts := detectFlagConflicts(extraArgs) + + assert.Empty(t, conflicts) +} + +func TestDetectFlagConflicts_WithOtelConflict(t *testing.T) { + extraArgs := []string{"--otel"} + + conflicts := detectFlagConflicts(extraArgs) + + assert.Equal(t, []string{"--otel"}, conflicts) +} + +func TestDetectFlagConflicts_MultipleConflicts(t *testing.T) { + extraArgs := []string{"--master-port=9999", "--csv=results", "--worker"} + + conflicts := detectFlagConflicts(extraArgs) + + assert.Len(t, conflicts, 2) + assert.Contains(t, conflicts, "--master-port=9999") + assert.Contains(t, conflicts, "--worker") +} + +// TestBuildMasterCommand_LogsWarningOnConflict verifies that conflict warnings are logged +func TestBuildMasterCommand_LogsWarningOnConflict(t *testing.T) { + // Use a test logger that captures log output + var logBuffer strings.Builder + logger := logr.New(&testLogSink{writer: &logBuffer}) + + masterSpec := testMasterSpec() + masterSpec.ExtraArgs = []string{"--master-port=9999"} + + BuildMasterCommand(masterSpec, 3, false, logger) + + // Verify that warning was logged (exact message match not required, just presence) + logOutput := logBuffer.String() + assert.Contains(t, logOutput, "extraArgs override operator-managed flags", "should log conflict warning") +} + +func TestBuildWorkerCommand_LogsWarningOnConflict(t *testing.T) { + var logBuffer strings.Builder + logger := logr.New(&testLogSink{writer: &logBuffer}) + + extraArgs := []string{"--worker"} + + BuildWorkerCommand(testCommandSeed, testMasterHost, false, extraArgs, logger) + + logOutput := logBuffer.String() + assert.Contains(t, logOutput, "extraArgs override operator-managed flags", "should log conflict warning") +} + +// testLogSink is a simple logr.LogSink implementation for testing +type testLogSink struct { + writer *strings.Builder +} + +func (t *testLogSink) Init(info logr.RuntimeInfo) {} + +func (t *testLogSink) Enabled(level int) bool { + return true +} + +func (t *testLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + t.writer.WriteString(msg) + for i := 0; i < len(keysAndValues); i += 2 { + if i+1 < len(keysAndValues) { + t.writer.WriteString(" ") + t.writer.WriteString(keysAndValues[i].(string)) + t.writer.WriteString("=") + } + } +} + +func (t *testLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + t.writer.WriteString("ERROR: ") + t.writer.WriteString(msg) +} + +func (t *testLogSink) WithValues(keysAndValues ...interface{}) logr.LogSink { + return t +} + +func (t *testLogSink) WithName(name string) logr.LogSink { + return t +} diff --git a/internal/resources/constants.go b/internal/resources/constants.go new file mode 100644 index 00000000..2ea59fd2 --- /dev/null +++ b/internal/resources/constants.go @@ -0,0 +1,123 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// Port constants matching Java Constants.java +const ( + // MasterPort is the port for master-worker communication. + MasterPort = 5557 + // MasterBindPort is the secondary port for master binding. + MasterBindPort = 5558 + // WebUIPort is the Locust web UI port. + WebUIPort = 8089 + // WorkerPort is the port exposed by worker nodes. + WorkerPort = 8080 + // DefaultMetricsExporterPort is the default port for the Prometheus metrics exporter. + DefaultMetricsExporterPort = 9646 +) + +// Mount path constants +const ( + // DefaultMountPath is the default path where ConfigMap is mounted. + DefaultMountPath = "/lotest/src" + // LibMountPath is the path where the lib ConfigMap is mounted. + LibMountPath = "/opt/locust/lib" +) + +// Label constants +const ( + // LabelTestName is the label key for the performance test name. + LabelTestName = "performance-test-name" + // LabelPodName is the label key for the pod name (used as service selector). + LabelPodName = "performance-test-pod-name" + // LabelManagedBy is the label key indicating the managing operator. + LabelManagedBy = "managed-by" + // ManagedByValue is the value for the managed-by label. + ManagedByValue = "locust-k8s-operator" + // LabelApp is the app label key. + LabelApp = "app" +) + +// Prometheus annotation constants +const ( + // AnnotationPrometheusScrape enables Prometheus scraping. + AnnotationPrometheusScrape = "prometheus.io/scrape" + // AnnotationPrometheusPath specifies the metrics endpoint path. + AnnotationPrometheusPath = "prometheus.io/path" + // AnnotationPrometheusPort specifies the metrics port. + AnnotationPrometheusPort = "prometheus.io/port" + // MetricsEndpointPath is the path for the metrics endpoint. + MetricsEndpointPath = "/metrics" +) + +// Job constants +const ( + // BackoffLimit is the number of retries before marking a job as failed. + BackoffLimit = 0 + // MasterReplicaCount is the fixed replica count for master (always 1). + MasterReplicaCount = 1 +) + +// Container constants +const ( + // MetricsExporterContainerName is the name of the metrics exporter sidecar. + MetricsExporterContainerName = "locust-metrics-exporter" + // LibVolumeName is the name of the lib volume. + LibVolumeName = "locust-lib" +) + +// Exporter environment variable constants +const ( + // ExporterURIEnvVar is the environment variable for the exporter URI. + ExporterURIEnvVar = "LOCUST_EXPORTER_URI" + // ExporterPortEnvVar is the environment variable for the exporter listen address. + ExporterPortEnvVar = "LOCUST_EXPORTER_WEB_LISTEN_ADDRESS" +) + +// Kafka environment variable constants +const ( + // EnvKafkaBootstrapServers is the Kafka bootstrap servers env var name. + EnvKafkaBootstrapServers = "KAFKA_BOOTSTRAP_SERVERS" + // EnvKafkaSecurityEnabled is the Kafka security enabled env var name. + EnvKafkaSecurityEnabled = "KAFKA_SECURITY_ENABLED" + // EnvKafkaSecurityProtocol is the Kafka security protocol env var name. + EnvKafkaSecurityProtocol = "KAFKA_SECURITY_PROTOCOL_CONFIG" + // EnvKafkaSaslMechanism is the Kafka SASL mechanism env var name. + EnvKafkaSaslMechanism = "KAFKA_SASL_MECHANISM" + // EnvKafkaSaslJaasConfig is the Kafka SASL JAAS config env var name. + EnvKafkaSaslJaasConfig = "KAFKA_SASL_JAAS_CONFIG" + // EnvKafkaUsername is the Kafka username env var name. + EnvKafkaUsername = "KAFKA_USERNAME" + // EnvKafkaPassword is the Kafka password env var name. + EnvKafkaPassword = "KAFKA_PASSWORD" +) + +// Service constants +const ( + // ProtocolTCP is the TCP protocol string. + ProtocolTCP = "TCP" + // PortNamePrefix is the prefix for port names. + PortNamePrefix = "port" + // MetricsPortName is the name for the metrics port. + MetricsPortName = "prometheus-metrics" +) + +// Node affinity constants +const ( + // DefaultNodeMatchExpressionOperator is the default operator for node selector requirements. + DefaultNodeMatchExpressionOperator = "In" +) diff --git a/internal/resources/env.go b/internal/resources/env.go new file mode 100644 index 00000000..2693f35a --- /dev/null +++ b/internal/resources/env.go @@ -0,0 +1,154 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "strconv" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + corev1 "k8s.io/api/core/v1" +) + +// BuildEnvFrom creates EnvFromSource entries from ConfigMap and Secret refs. +// Returns envFrom slice for container spec. +func BuildEnvFrom(lt *locustv2.LocustTest) []corev1.EnvFromSource { + if lt.Spec.Env == nil { + return nil + } + + // Return nil if there are no refs to process + if len(lt.Spec.Env.ConfigMapRefs) == 0 && len(lt.Spec.Env.SecretRefs) == 0 { + return nil + } + + envFrom := make([]corev1.EnvFromSource, 0, len(lt.Spec.Env.ConfigMapRefs)+len(lt.Spec.Env.SecretRefs)) + + // Process ConfigMapRefs + for _, cmRef := range lt.Spec.Env.ConfigMapRefs { + envFrom = append(envFrom, corev1.EnvFromSource{ + Prefix: cmRef.Prefix, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cmRef.Name, + }, + }, + }) + } + + // Process SecretRefs + for _, secretRef := range lt.Spec.Env.SecretRefs { + envFrom = append(envFrom, corev1.EnvFromSource{ + Prefix: secretRef.Prefix, + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretRef.Name, + }, + }, + }) + } + + return envFrom +} + +// BuildUserEnvVars creates EnvVar entries from the variables list. +// These are appended to the existing Kafka env vars. +func BuildUserEnvVars(lt *locustv2.LocustTest) []corev1.EnvVar { + if lt.Spec.Env == nil || len(lt.Spec.Env.Variables) == 0 { + return nil + } + + // Return a copy to avoid mutating the original + result := make([]corev1.EnvVar, len(lt.Spec.Env.Variables)) + copy(result, lt.Spec.Env.Variables) + return result +} + +// BuildKafkaEnvVars creates the Kafka environment variables for the Locust container. +func BuildKafkaEnvVars(cfg *config.OperatorConfig) []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: EnvKafkaBootstrapServers, Value: cfg.KafkaBootstrapServers}, + {Name: EnvKafkaSecurityEnabled, Value: strconv.FormatBool(cfg.KafkaSecurityEnabled)}, + {Name: EnvKafkaSecurityProtocol, Value: cfg.KafkaSecurityProtocol}, + {Name: EnvKafkaSaslMechanism, Value: cfg.KafkaSaslMechanism}, + {Name: EnvKafkaSaslJaasConfig, Value: cfg.KafkaSaslJaasConfig}, + {Name: EnvKafkaUsername, Value: cfg.KafkaUsername}, + {Name: EnvKafkaPassword, Value: cfg.KafkaPassword}, + } +} + +// BuildEnvVars combines Kafka env vars, OTel env vars, and user-defined env vars. +func BuildEnvVars(lt *locustv2.LocustTest, cfg *config.OperatorConfig) []corev1.EnvVar { + // Start with Kafka env vars (existing behavior) + envVars := BuildKafkaEnvVars(cfg) + + // Add OTel environment variables if enabled + otelEnvVars := BuildOTelEnvVars(lt) + if len(otelEnvVars) > 0 { + envVars = append(envVars, otelEnvVars...) + } + + // Append user-defined variables + userVars := BuildUserEnvVars(lt) + if len(userVars) > 0 { + envVars = append(envVars, userVars...) + } + + return envVars +} + +// BuildSecretVolumes creates Volume entries for secret mounts. +func BuildSecretVolumes(lt *locustv2.LocustTest) []corev1.Volume { + if lt.Spec.Env == nil || len(lt.Spec.Env.SecretMounts) == 0 { + return nil + } + + volumes := make([]corev1.Volume, 0, len(lt.Spec.Env.SecretMounts)) + for _, sm := range lt.Spec.Env.SecretMounts { + volumes = append(volumes, corev1.Volume{ + Name: SecretVolumeName(sm.Name), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sm.Name, + }, + }, + }) + } + return volumes +} + +// BuildSecretVolumeMounts creates VolumeMount entries for secret mounts. +func BuildSecretVolumeMounts(lt *locustv2.LocustTest) []corev1.VolumeMount { + if lt.Spec.Env == nil || len(lt.Spec.Env.SecretMounts) == 0 { + return nil + } + + mounts := make([]corev1.VolumeMount, 0, len(lt.Spec.Env.SecretMounts)) + for _, sm := range lt.Spec.Env.SecretMounts { + mounts = append(mounts, corev1.VolumeMount{ + Name: SecretVolumeName(sm.Name), + MountPath: sm.MountPath, + ReadOnly: sm.ReadOnly, + }) + } + return mounts +} + +// SecretVolumeName generates a unique volume name for a secret mount. +func SecretVolumeName(secretName string) string { + return "secret-" + secretName +} diff --git a/internal/resources/env_test.go b/internal/resources/env_test.go new file mode 100644 index 00000000..a1cd7808 --- /dev/null +++ b/internal/resources/env_test.go @@ -0,0 +1,706 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestBuildEnvFrom_NilEnvConfig(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: nil, + }, + } + + result := BuildEnvFrom(lt) + assert.Nil(t, result) +} + +func TestBuildEnvFrom_EmptyEnvConfig(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{}, + }, + } + + result := BuildEnvFrom(lt) + assert.Nil(t, result) +} + +func TestBuildEnvFrom_ConfigMapRefs(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "app-config"}, + }, + }, + }, + } + + result := BuildEnvFrom(lt) + + assert.Len(t, result, 1) + assert.NotNil(t, result[0].ConfigMapRef) + assert.Nil(t, result[0].SecretRef) + assert.Equal(t, "app-config", result[0].ConfigMapRef.Name) + assert.Empty(t, result[0].Prefix) +} + +func TestBuildEnvFrom_ConfigMapRefs_WithPrefix(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "app-config", Prefix: "APP_"}, + }, + }, + }, + } + + result := BuildEnvFrom(lt) + + assert.Len(t, result, 1) + assert.Equal(t, "APP_", result[0].Prefix) + assert.Equal(t, "app-config", result[0].ConfigMapRef.Name) +} + +func TestBuildEnvFrom_SecretRefs(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretRefs: []locustv2.SecretEnvSource{ + {Name: "api-credentials"}, + }, + }, + }, + } + + result := BuildEnvFrom(lt) + + assert.Len(t, result, 1) + assert.Nil(t, result[0].ConfigMapRef) + assert.NotNil(t, result[0].SecretRef) + assert.Equal(t, "api-credentials", result[0].SecretRef.Name) + assert.Empty(t, result[0].Prefix) +} + +func TestBuildEnvFrom_SecretRefs_WithPrefix(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretRefs: []locustv2.SecretEnvSource{ + {Name: "api-credentials", Prefix: "SECRET_"}, + }, + }, + }, + } + + result := BuildEnvFrom(lt) + + assert.Len(t, result, 1) + assert.Equal(t, "SECRET_", result[0].Prefix) + assert.Equal(t, "api-credentials", result[0].SecretRef.Name) +} + +func TestBuildEnvFrom_Multiple(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "config1", Prefix: "CFG1_"}, + {Name: "config2", Prefix: "CFG2_"}, + }, + SecretRefs: []locustv2.SecretEnvSource{ + {Name: "secret1"}, + {Name: "secret2", Prefix: "SEC_"}, + }, + }, + }, + } + + result := BuildEnvFrom(lt) + + assert.Len(t, result, 4) + + // ConfigMaps come first + assert.Equal(t, "config1", result[0].ConfigMapRef.Name) + assert.Equal(t, "CFG1_", result[0].Prefix) + assert.Equal(t, "config2", result[1].ConfigMapRef.Name) + assert.Equal(t, "CFG2_", result[1].Prefix) + + // Secrets come after + assert.Equal(t, "secret1", result[2].SecretRef.Name) + assert.Empty(t, result[2].Prefix) + assert.Equal(t, "secret2", result[3].SecretRef.Name) + assert.Equal(t, "SEC_", result[3].Prefix) +} + +func TestBuildUserEnvVars_Nil(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: nil, + }, + } + + result := BuildUserEnvVars(lt) + assert.Nil(t, result) +} + +func TestBuildUserEnvVars_Empty(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{}, + }, + }, + } + + result := BuildUserEnvVars(lt) + assert.Nil(t, result) +} + +func TestBuildUserEnvVars_DirectValues(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "TARGET_HOST", Value: "https://example.com"}, + {Name: "LOG_LEVEL", Value: "DEBUG"}, + }, + }, + }, + } + + result := BuildUserEnvVars(lt) + + assert.Len(t, result, 2) + assert.Equal(t, "TARGET_HOST", result[0].Name) + assert.Equal(t, "https://example.com", result[0].Value) + assert.Equal(t, "LOG_LEVEL", result[1].Name) + assert.Equal(t, "DEBUG", result[1].Value) +} + +func TestBuildUserEnvVars_ValueFrom(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + { + Name: "API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "api-secret", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + } + + result := BuildUserEnvVars(lt) + + assert.Len(t, result, 1) + assert.Equal(t, "API_KEY", result[0].Name) + assert.NotNil(t, result[0].ValueFrom) + assert.NotNil(t, result[0].ValueFrom.SecretKeyRef) + assert.Equal(t, "api-secret", result[0].ValueFrom.SecretKeyRef.Name) + assert.Equal(t, "key", result[0].ValueFrom.SecretKeyRef.Key) +} + +func TestBuildUserEnvVars_ReturnsCopy(t *testing.T) { + original := []corev1.EnvVar{ + {Name: "KEY", Value: "value"}, + } + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: original, + }, + }, + } + + result := BuildUserEnvVars(lt) + + // Modify the result + result[0].Value = "modified" + + // Original should be unchanged + assert.Equal(t, "value", original[0].Value) +} + +func TestBuildEnvVars_OnlyKafka(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: nil, + }, + } + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + KafkaSecurityEnabled: false, + } + + result := BuildEnvVars(lt, cfg) + + // Should have 7 Kafka env vars + assert.Len(t, result, 7) + assert.Equal(t, EnvKafkaBootstrapServers, result[0].Name) + assert.Equal(t, "kafka:9092", result[0].Value) +} + +func TestBuildEnvVars_Combined(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + }, + }, + } + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + } + + result := BuildEnvVars(lt, cfg) + + // 7 Kafka vars + 1 user var + assert.Len(t, result, 8) + + // Kafka vars come first + assert.Equal(t, EnvKafkaBootstrapServers, result[0].Name) + + // User var comes last + assert.Equal(t, "USER_VAR", result[7].Name) + assert.Equal(t, "user-value", result[7].Value) +} + +func TestBuildSecretVolumes_Nil(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: nil, + }, + } + + result := BuildSecretVolumes(lt) + assert.Nil(t, result) +} + +func TestBuildSecretVolumes_Empty(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{}, + }, + }, + } + + result := BuildSecretVolumes(lt) + assert.Nil(t, result) +} + +func TestBuildSecretVolumes_Single(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/certs"}, + }, + }, + }, + } + + result := BuildSecretVolumes(lt) + + assert.Len(t, result, 1) + assert.Equal(t, "secret-tls-certs", result[0].Name) + assert.NotNil(t, result[0].Secret) + assert.Equal(t, "tls-certs", result[0].Secret.SecretName) +} + +func TestBuildSecretVolumes_Multiple(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/certs"}, + {Name: "ssh-keys", MountPath: "/root/.ssh"}, + }, + }, + }, + } + + result := BuildSecretVolumes(lt) + + assert.Len(t, result, 2) + assert.Equal(t, "secret-tls-certs", result[0].Name) + assert.Equal(t, "tls-certs", result[0].Secret.SecretName) + assert.Equal(t, "secret-ssh-keys", result[1].Name) + assert.Equal(t, "ssh-keys", result[1].Secret.SecretName) +} + +func TestBuildSecretVolumeMounts_Nil(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: nil, + }, + } + + result := BuildSecretVolumeMounts(lt) + assert.Nil(t, result) +} + +func TestBuildSecretVolumeMounts_Empty(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{}, + }, + }, + } + + result := BuildSecretVolumeMounts(lt) + assert.Nil(t, result) +} + +func TestBuildSecretVolumeMounts_Single(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/certs", ReadOnly: true}, + }, + }, + }, + } + + result := BuildSecretVolumeMounts(lt) + + assert.Len(t, result, 1) + assert.Equal(t, "secret-tls-certs", result[0].Name) + assert.Equal(t, "/etc/certs", result[0].MountPath) + assert.True(t, result[0].ReadOnly) +} + +func TestBuildSecretVolumeMounts_ReadOnly(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "secret1", MountPath: "/path1", ReadOnly: true}, + {Name: "secret2", MountPath: "/path2", ReadOnly: false}, + }, + }, + }, + } + + result := BuildSecretVolumeMounts(lt) + + assert.Len(t, result, 2) + assert.True(t, result[0].ReadOnly) + assert.False(t, result[1].ReadOnly) +} + +func TestSecretVolumeName(t *testing.T) { + tests := []struct { + secretName string + expected string + }{ + {"tls-certs", "secret-tls-certs"}, + {"api-keys", "secret-api-keys"}, + {"my.secret", "secret-my.secret"}, + } + + for _, tt := range tests { + t.Run(tt.secretName, func(t *testing.T) { + result := SecretVolumeName(tt.secretName) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestBuildEnvFrom_IntegrationWithFullSpec(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-load", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:2.20.0", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 5, + }, + Env: &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "app-config", Prefix: "APP_"}, + }, + SecretRefs: []locustv2.SecretEnvSource{ + {Name: "api-credentials"}, + }, + Variables: []corev1.EnvVar{ + {Name: "TARGET_HOST", Value: "https://api.example.com"}, + }, + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/locust/certs", ReadOnly: true}, + }, + }, + }, + } + + // Test all builders work together + envFrom := BuildEnvFrom(lt) + assert.Len(t, envFrom, 2) + + userVars := BuildUserEnvVars(lt) + assert.Len(t, userVars, 1) + + volumes := BuildSecretVolumes(lt) + assert.Len(t, volumes, 1) + + mounts := BuildSecretVolumeMounts(lt) + assert.Len(t, mounts, 1) +} + +// ===== OTel Integration Tests ===== + +func TestBuildEnvVars_WithOTel_Enabled(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lt", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:2.32.0", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 3, + }, + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + }, + Observability: &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector.monitoring:4317", + Protocol: "grpc", + Insecure: true, + ExtraEnvVars: map[string]string{ + "OTEL_RESOURCE_ATTRIBUTES": "service.name=locust-test", + }, + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + KafkaSecurityEnabled: false, + } + + result := BuildEnvVars(lt, cfg) + + // Should have: 7 Kafka + 5 OTel (traces, metrics, endpoint, protocol, insecure) + 1 extra OTel + 1 user = 14 + assert.Len(t, result, 14) + + // Convert to map for easier assertions + envMap := make(map[string]string) + for _, ev := range result { + envMap[ev.Name] = ev.Value + } + + // Verify Kafka env vars present + assert.Equal(t, "kafka:9092", envMap[EnvKafkaBootstrapServers]) + + // Verify OTel env vars present and correct + assert.Equal(t, "otlp", envMap["OTEL_TRACES_EXPORTER"]) + assert.Equal(t, "otlp", envMap["OTEL_METRICS_EXPORTER"]) + assert.Equal(t, "otel-collector.monitoring:4317", envMap["OTEL_EXPORTER_OTLP_ENDPOINT"]) + assert.Equal(t, "grpc", envMap["OTEL_EXPORTER_OTLP_PROTOCOL"]) + assert.Equal(t, "true", envMap["OTEL_EXPORTER_OTLP_INSECURE"]) + assert.Equal(t, "service.name=locust-test", envMap["OTEL_RESOURCE_ATTRIBUTES"]) + + // Verify user env vars present + assert.Equal(t, "user-value", envMap["USER_VAR"]) +} + +func TestBuildEnvVars_WithOTel_Disabled(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + }, + Observability: &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: false, + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + } + + result := BuildEnvVars(lt, cfg) + + // Should have: 7 Kafka + 1 user = 8 (no OTel vars) + assert.Len(t, result, 8) + + // Convert to map for easier assertions + envMap := make(map[string]string) + for _, ev := range result { + envMap[ev.Name] = ev.Value + } + + // Verify no OTel env vars present + _, hasOTelTraces := envMap["OTEL_TRACES_EXPORTER"] + _, hasOTelMetrics := envMap["OTEL_METRICS_EXPORTER"] + _, hasOTelEndpoint := envMap["OTEL_EXPORTER_OTLP_ENDPOINT"] + + assert.False(t, hasOTelTraces, "OTEL_TRACES_EXPORTER should not be present when OTel disabled") + assert.False(t, hasOTelMetrics, "OTEL_METRICS_EXPORTER should not be present when OTel disabled") + assert.False(t, hasOTelEndpoint, "OTEL_EXPORTER_OTLP_ENDPOINT should not be present when OTel disabled") + + // Verify Kafka and user vars still present + assert.Equal(t, "kafka:9092", envMap[EnvKafkaBootstrapServers]) + assert.Equal(t, "user-value", envMap["USER_VAR"]) +} + +func TestBuildEnvVars_WithOTel_NoObservabilityConfig(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + }, + // No Observability config at all + }, + } + + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + } + + result := BuildEnvVars(lt, cfg) + + // Should have: 7 Kafka + 1 user = 8 (no OTel vars) + assert.Len(t, result, 8) + + // Convert to map for easier assertions + envMap := make(map[string]string) + for _, ev := range result { + envMap[ev.Name] = ev.Value + } + + // Verify no OTel env vars present + _, hasOTelTraces := envMap["OTEL_TRACES_EXPORTER"] + assert.False(t, hasOTelTraces, "OTEL_TRACES_EXPORTER should not be present when no Observability config") +} + +func TestBuildEnvVars_OTel_HTTPProtocol(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Observability: &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "http://jaeger:4318/v1/traces", + Protocol: "http/protobuf", + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + } + + result := BuildEnvVars(lt, cfg) + + // Find the protocol env var + envMap := make(map[string]string) + for _, ev := range result { + envMap[ev.Name] = ev.Value + } + + // Verify HTTP protocol is set correctly + assert.Equal(t, "http/protobuf", envMap["OTEL_EXPORTER_OTLP_PROTOCOL"]) + assert.Equal(t, "http://jaeger:4318/v1/traces", envMap["OTEL_EXPORTER_OTLP_ENDPOINT"]) +} + +func TestBuildEnvVars_OTel_EnvVarOrder(t *testing.T) { + lt := &locustv2.LocustTest{ + Spec: locustv2.LocustTestSpec{ + Env: &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + }, + Observability: &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + KafkaBootstrapServers: "kafka:9092", + } + + result := BuildEnvVars(lt, cfg) + + // Verify order: Kafka vars first, then OTel vars, then user vars last + // First 7 should be Kafka + assert.Equal(t, EnvKafkaBootstrapServers, result[0].Name) + + // OTel vars come after Kafka (at indices 7-10: traces, metrics, endpoint, protocol) + otelVarNames := []string{} + for i := 7; i <= 10; i++ { + otelVarNames = append(otelVarNames, result[i].Name) + } + assert.Contains(t, otelVarNames, "OTEL_TRACES_EXPORTER") + assert.Contains(t, otelVarNames, "OTEL_METRICS_EXPORTER") + assert.Contains(t, otelVarNames, "OTEL_EXPORTER_OTLP_ENDPOINT") + assert.Contains(t, otelVarNames, "OTEL_EXPORTER_OTLP_PROTOCOL") + + // User var should be last + assert.Equal(t, "USER_VAR", result[len(result)-1].Name) +} diff --git a/internal/resources/job.go b/internal/resources/job.go new file mode 100644 index 00000000..1925d00a --- /dev/null +++ b/internal/resources/job.go @@ -0,0 +1,449 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/go-logr/logr" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildMasterJob creates a Kubernetes Job for the Locust master node. +func BuildMasterJob(lt *locustv2.LocustTest, cfg *config.OperatorConfig, logger logr.Logger) *batchv1.Job { + nodeName := NodeName(lt.Name, Master) + otelEnabled := IsOTelEnabled(lt) + command := BuildMasterCommand(<.Spec.Master, lt.Spec.Worker.Replicas, otelEnabled, logger) + + return buildJob(lt, cfg, Master, nodeName, command) +} + +// BuildWorkerJob creates a Kubernetes Job for the Locust worker nodes. +func BuildWorkerJob(lt *locustv2.LocustTest, cfg *config.OperatorConfig, logger logr.Logger) *batchv1.Job { + nodeName := NodeName(lt.Name, Worker) + masterHost := NodeName(lt.Name, Master) + otelEnabled := IsOTelEnabled(lt) + command := BuildWorkerCommand(lt.Spec.Worker.Command, masterHost, otelEnabled, lt.Spec.Worker.ExtraArgs, logger) + + return buildJob(lt, cfg, Worker, nodeName, command) +} + +// buildJob is the internal function that constructs a Job for either master or worker. +func buildJob(lt *locustv2.LocustTest, cfg *config.OperatorConfig, mode OperationalMode, nodeName string, command []string) *batchv1.Job { + labels := BuildLabels(lt, mode) + annotations := BuildAnnotations(lt, mode, cfg) + + // Determine parallelism based on mode + var parallelism int32 + if mode == Master { + parallelism = MasterReplicaCount + } else { + parallelism = lt.Spec.Worker.Replicas + } + + // Determine ports based on mode + var ports []corev1.ContainerPort + if mode == Master { + ports = MasterPorts() + } else { + ports = WorkerPorts() + } + + // Build containers + containers := []corev1.Container{ + buildLocustContainer(lt, nodeName, command, ports, cfg, mode), + } + + // Master gets the metrics exporter sidecar ONLY if OTel is disabled + if mode == Master && !IsOTelEnabled(lt) { + containers = append(containers, buildMetricsExporterContainer(cfg)) + } + + backoffLimit := int32(BackoffLimit) + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: lt.Namespace, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: cfg.TTLSecondsAfterFinished, + Parallelism: ¶llelism, + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ImagePullSecrets: buildImagePullSecrets(lt), + Containers: containers, + Volumes: buildVolumes(lt, nodeName, mode), + Affinity: buildAffinity(lt, cfg), + Tolerations: buildTolerations(lt, cfg), + NodeSelector: buildNodeSelector(lt), + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + }, + } + + return job +} + +// buildLocustContainer creates the main Locust container. +func buildLocustContainer(lt *locustv2.LocustTest, name string, command []string, ports []corev1.ContainerPort, cfg *config.OperatorConfig, mode OperationalMode) corev1.Container { + container := corev1.Container{ + Name: name, + Image: lt.Spec.Image, + ImagePullPolicy: lt.Spec.ImagePullPolicy, + Args: command, + Ports: ports, + Resources: buildResourceRequirementsWithPrecedence(lt, cfg, mode), + Env: BuildEnvVars(lt, cfg), + EnvFrom: BuildEnvFrom(lt), + VolumeMounts: buildVolumeMounts(lt, name, mode), + } + + // Default to IfNotPresent if not specified + if container.ImagePullPolicy == "" { + container.ImagePullPolicy = corev1.PullIfNotPresent + } + + return container +} + +// buildMetricsExporterContainer creates the Prometheus metrics exporter sidecar container. +func buildMetricsExporterContainer(cfg *config.OperatorConfig) corev1.Container { + return corev1.Container{ + Name: MetricsExporterContainerName, + Image: cfg.MetricsExporterImage, + ImagePullPolicy: corev1.PullPolicy(cfg.MetricsExporterPullPolicy), + Ports: []corev1.ContainerPort{ + {ContainerPort: cfg.MetricsExporterPort}, + }, + Resources: buildResourceRequirements(cfg, true), + Env: []corev1.EnvVar{ + { + Name: ExporterURIEnvVar, + Value: fmt.Sprintf("http://localhost:%d", WebUIPort), + }, + { + Name: ExporterPortEnvVar, + Value: fmt.Sprintf(":%d", cfg.MetricsExporterPort), + }, + }, + } +} + +// buildImagePullSecrets creates LocalObjectReferences for image pull secrets. +func buildImagePullSecrets(lt *locustv2.LocustTest) []corev1.LocalObjectReference { + return lt.Spec.ImagePullSecrets +} + +// buildVolumes creates the volumes for ConfigMap, LibConfigMap, Secrets, and user volumes. +func buildVolumes(lt *locustv2.LocustTest, nodeName string, mode OperationalMode) []corev1.Volume { + var volumes []corev1.Volume + + // Get ConfigMap refs from v2 TestFiles config + var configMapRef, libConfigMapRef string + if lt.Spec.TestFiles != nil { + configMapRef = lt.Spec.TestFiles.ConfigMapRef + libConfigMapRef = lt.Spec.TestFiles.LibConfigMapRef + } + + if configMapRef != "" { + volumes = append(volumes, corev1.Volume{ + Name: nodeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapRef, + }, + }, + }, + }) + } + + if libConfigMapRef != "" { + volumes = append(volumes, corev1.Volume{ + Name: LibVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: libConfigMapRef, + }, + }, + }, + }) + } + + // Add secret volumes from env.secretMounts + secretVolumes := BuildSecretVolumes(lt) + if len(secretVolumes) > 0 { + volumes = append(volumes, secretVolumes...) + } + + // Add user-defined volumes (filtered by target) + userVolumes := BuildUserVolumes(lt, mode) + if len(userVolumes) > 0 { + volumes = append(volumes, userVolumes...) + } + + return volumes +} + +// buildVolumeMounts creates the volume mounts for ConfigMap, LibConfigMap, Secrets, and user mounts. +func buildVolumeMounts(lt *locustv2.LocustTest, nodeName string, mode OperationalMode) []corev1.VolumeMount { + var mounts []corev1.VolumeMount + + // Get ConfigMap refs and mount paths from v2 TestFiles config + var configMapRef, libConfigMapRef string + srcMountPath := DefaultMountPath + libMountPath := LibMountPath + if lt.Spec.TestFiles != nil { + configMapRef = lt.Spec.TestFiles.ConfigMapRef + libConfigMapRef = lt.Spec.TestFiles.LibConfigMapRef + if lt.Spec.TestFiles.SrcMountPath != "" { + srcMountPath = lt.Spec.TestFiles.SrcMountPath + } + if lt.Spec.TestFiles.LibMountPath != "" { + libMountPath = lt.Spec.TestFiles.LibMountPath + } + } + + if configMapRef != "" { + mounts = append(mounts, corev1.VolumeMount{ + Name: nodeName, + MountPath: srcMountPath, + ReadOnly: false, + }) + } + + if libConfigMapRef != "" { + mounts = append(mounts, corev1.VolumeMount{ + Name: LibVolumeName, + MountPath: libMountPath, + ReadOnly: false, + }) + } + + // Add secret mounts from env.secretMounts + secretMounts := BuildSecretVolumeMounts(lt) + if len(secretMounts) > 0 { + mounts = append(mounts, secretMounts...) + } + + // Add user-defined volume mounts (filtered by target) + userMounts := BuildUserVolumeMounts(lt, mode) + if len(userMounts) > 0 { + mounts = append(mounts, userMounts...) + } + + return mounts +} + +// buildResourceRequirements creates resource requirements for containers. +// isMetricsExporter determines whether to use metrics exporter or locust container resources. +func buildResourceRequirements(cfg *config.OperatorConfig, isMetricsExporter bool) corev1.ResourceRequirements { + var requests, limits corev1.ResourceList + + if isMetricsExporter { + requests = buildResourceList( + cfg.MetricsExporterCPURequest, + cfg.MetricsExporterMemRequest, + cfg.MetricsExporterEphemeralStorageRequest, + ) + limits = buildResourceList( + cfg.MetricsExporterCPULimit, + cfg.MetricsExporterMemLimit, + cfg.MetricsExporterEphemeralStorageLimit, + ) + } else { + requests = buildResourceList( + cfg.PodCPURequest, + cfg.PodMemRequest, + cfg.PodEphemeralStorageRequest, + ) + limits = buildResourceList( + cfg.PodCPULimit, + cfg.PodMemLimit, + cfg.PodEphemeralStorageLimit, + ) + } + + return corev1.ResourceRequirements{ + Requests: requests, + Limits: limits, + } +} + +// buildResourceRequirementsWithPrecedence implements resource precedence chain: +// Level 1: CR-level resources (complete override, same as native K8s) +// Level 2: Role-specific operator config (from Helm masterResources/workerResources) +// Level 3: Unified operator defaults (from Helm resources) +// CR resources are a COMPLETE OVERRIDE (not partial merge). +// Role-specific resources use FIELD-LEVEL FALLBACK to unified defaults. +func buildResourceRequirementsWithPrecedence( + lt *locustv2.LocustTest, + cfg *config.OperatorConfig, + mode OperationalMode, +) corev1.ResourceRequirements { + // Level 1: CR-level resources (highest precedence) + // CR resources are a COMPLETE OVERRIDE (not partial merge) β€” same as native K8s + var crResources *corev1.ResourceRequirements + switch mode { + case Master: + crResources = <.Spec.Master.Resources + case Worker: + crResources = <.Spec.Worker.Resources + } + + if hasResourcesSpecified(crResources) { + return *crResources // Complete override, return as-is + } + + // Level 2: Role-specific operator config (from Helm masterResources/workerResources) + // Each field independently falls through to Level 3 if empty. + // This builds a resource spec where non-empty role-specific fields override + // unified fields, and empty role-specific fields fall through to unified. + var cpuReq, memReq, ephReq, cpuLim, memLim, ephLim string + if mode == Master { + cpuReq = cfg.MasterCPURequest + memReq = cfg.MasterMemRequest + ephReq = cfg.MasterEphemeralStorageRequest + cpuLim = cfg.MasterCPULimit + memLim = cfg.MasterMemLimit + ephLim = cfg.MasterEphemeralStorageLimit + } else { + cpuReq = cfg.WorkerCPURequest + memReq = cfg.WorkerMemRequest + ephReq = cfg.WorkerEphemeralStorageRequest + cpuLim = cfg.WorkerCPULimit + memLim = cfg.WorkerMemLimit + ephLim = cfg.WorkerEphemeralStorageLimit + } + + // Field-level fallback: empty role-specific β†’ unified + if cpuReq == "" { + cpuReq = cfg.PodCPURequest + } + if memReq == "" { + memReq = cfg.PodMemRequest + } + if ephReq == "" { + ephReq = cfg.PodEphemeralStorageRequest + } + if cpuLim == "" { + cpuLim = cfg.PodCPULimit + } + if memLim == "" { + memLim = cfg.PodMemLimit + } + if ephLim == "" { + ephLim = cfg.PodEphemeralStorageLimit + } + + return corev1.ResourceRequirements{ + Requests: buildResourceList(cpuReq, memReq, ephReq), + Limits: buildResourceList(cpuLim, memLim, ephLim), + } +} + +// hasResourcesSpecified checks if ResourceRequirements has any non-empty fields. +// This distinguishes "user set resources to empty" vs "user didn't set resources at all". +func hasResourcesSpecified(r *corev1.ResourceRequirements) bool { + if r == nil { + return false + } + return len(r.Requests) > 0 || len(r.Limits) > 0 +} + +// buildResourceList creates a ResourceList from CPU, memory, and ephemeral storage strings. +// Empty strings are skipped (not added to the resource list). +// Safe parsing is used (errors ignored) because values are pre-validated at operator startup. +func buildResourceList(cpu, memory, ephemeral string) corev1.ResourceList { + resources := corev1.ResourceList{} + + if cpu != "" { + // Safe: Already validated at startup in LoadConfig + q, _ := resource.ParseQuantity(cpu) + resources[corev1.ResourceCPU] = q + } + if memory != "" { + q, _ := resource.ParseQuantity(memory) + resources[corev1.ResourceMemory] = q + } + if ephemeral != "" { + q, _ := resource.ParseQuantity(ephemeral) + resources[corev1.ResourceEphemeralStorage] = q + } + + return resources +} + +// buildAffinity creates the pod affinity configuration from the CR spec. +// Returns nil if affinity injection is disabled or no affinity is specified. +func buildAffinity(lt *locustv2.LocustTest, cfg *config.OperatorConfig) *corev1.Affinity { + if !cfg.EnableAffinityCRInjection { + return nil + } + + if lt.Spec.Scheduling == nil || lt.Spec.Scheduling.Affinity == nil { + return nil + } + + // v2 uses standard corev1.Affinity directly + return lt.Spec.Scheduling.Affinity +} + +// buildTolerations creates pod tolerations from the CR spec. +// Returns nil if toleration injection is disabled or no tolerations are specified. +func buildTolerations(lt *locustv2.LocustTest, cfg *config.OperatorConfig) []corev1.Toleration { + if !cfg.EnableTolerationsCRInjection { + return nil + } + + if lt.Spec.Scheduling == nil || lt.Spec.Scheduling.Tolerations == nil { + return nil + } + + // v2 uses standard corev1.Toleration directly + return lt.Spec.Scheduling.Tolerations +} + +// buildNodeSelector creates pod node selector from the CR spec. +// Returns nil if no node selector is specified. +// Note: Map iteration order is non-deterministic in Go, but the Kubernetes +// API server handles consistent serialization of label/selector maps. +func buildNodeSelector(lt *locustv2.LocustTest) map[string]string { + if lt.Spec.Scheduling == nil || len(lt.Spec.Scheduling.NodeSelector) == 0 { + return nil + } + + return lt.Spec.Scheduling.NodeSelector +} diff --git a/internal/resources/job_test.go b/internal/resources/job_test.go new file mode 100644 index 00000000..42c1b9f3 --- /dev/null +++ b/internal/resources/job_test.go @@ -0,0 +1,1315 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const secretTLSCertsVolumeName = "secret-tls-certs" + +func newTestLocustTest() *locustv2.LocustTest { + return &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + ImagePullPolicy: corev1.PullAlways, + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + TestFiles: &locustv2.TestFilesConfig{ + ConfigMapRef: "my-test-configmap", + }, + }, + } +} + +func newTestConfig() *config.OperatorConfig { + return &config.OperatorConfig{ + PodCPURequest: "250m", + PodMemRequest: "128Mi", + PodEphemeralStorageRequest: "30M", + PodCPULimit: "1000m", + PodMemLimit: "1024Mi", + PodEphemeralStorageLimit: "50M", + + MetricsExporterImage: "containersol/locust_exporter:v0.5.0", + MetricsExporterPort: 9646, + MetricsExporterPullPolicy: "Always", + MetricsExporterCPURequest: "250m", + MetricsExporterMemRequest: "128Mi", + MetricsExporterEphemeralStorageRequest: "30M", + MetricsExporterCPULimit: "1000m", + MetricsExporterMemLimit: "1024Mi", + MetricsExporterEphemeralStorageLimit: "50M", + + KafkaBootstrapServers: "localhost:9092", + KafkaSecurityEnabled: false, + KafkaSecurityProtocol: "SASL_PLAINTEXT", + KafkaSaslMechanism: "SCRAM-SHA-512", + } +} + +func TestBuildMasterJob(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job) + assert.Equal(t, "my-test-master", job.Name) + assert.Equal(t, "default", job.Namespace) +} + +func TestBuildMasterJob_Metadata(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Equal(t, "my-test-master", job.Name) + assert.Equal(t, "default", job.Namespace) +} + +func TestBuildMasterJob_Parallelism(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job.Spec.Parallelism) + assert.Equal(t, int32(1), *job.Spec.Parallelism, "Master parallelism should always be 1") +} + +func TestBuildMasterJob_Containers(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 2, "Master should have 2 containers (locust + metrics exporter)") + + // Find container names + containerNames := make([]string, len(containers)) + for i, c := range containers { + containerNames[i] = c.Name + } + assert.Contains(t, containerNames, "my-test-master") + assert.Contains(t, containerNames, MetricsExporterContainerName) +} + +func TestBuildMasterJob_WithTTL(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + ttl := int32(3600) + cfg.TTLSecondsAfterFinished = &ttl + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job.Spec.TTLSecondsAfterFinished) + assert.Equal(t, int32(3600), *job.Spec.TTLSecondsAfterFinished) +} + +func TestBuildMasterJob_WithImagePullSecrets(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: "my-registry-secret"}, + {Name: "another-secret"}, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + secrets := job.Spec.Template.Spec.ImagePullSecrets + assert.Len(t, secrets, 2) + assert.Equal(t, "my-registry-secret", secrets[0].Name) + assert.Equal(t, "another-secret", secrets[1].Name) +} + +func TestBuildMasterJob_WithLibConfigMap(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.TestFiles.LibConfigMapRef = "my-lib-configmap" + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + volumes := job.Spec.Template.Spec.Volumes + assert.Len(t, volumes, 2, "Should have 2 volumes (configmap + lib)") + + // Check lib volume exists + var libVolumeFound bool + for _, v := range volumes { + if v.Name == LibVolumeName { + libVolumeFound = true + assert.Equal(t, "my-lib-configmap", v.ConfigMap.Name) + } + } + assert.True(t, libVolumeFound, "Lib volume should exist") + + // Check volume mounts + container := job.Spec.Template.Spec.Containers[0] + var libMountFound bool + for _, m := range container.VolumeMounts { + if m.Name == LibVolumeName { + libMountFound = true + assert.Equal(t, LibMountPath, m.MountPath) + } + } + assert.True(t, libMountFound, "Lib volume mount should exist") +} + +func TestBuildMasterJob_Labels(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + labels := job.Spec.Template.Labels + assert.Equal(t, "my-test", labels[LabelApp]) + assert.Equal(t, "my-test-master", labels[LabelPodName]) + assert.Equal(t, ManagedByValue, labels[LabelManagedBy]) +} + +func TestBuildMasterJob_Annotations(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + annotations := job.Spec.Template.Annotations + assert.Equal(t, "true", annotations[AnnotationPrometheusScrape]) + assert.Equal(t, MetricsEndpointPath, annotations[AnnotationPrometheusPath]) + assert.Equal(t, "9646", annotations[AnnotationPrometheusPort]) +} + +func TestBuildWorkerJob(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job) + assert.Equal(t, "my-test-worker", job.Name) + assert.Equal(t, "default", job.Namespace) +} + +func TestBuildWorkerJob_Parallelism(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Worker.Replicas = 5 + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job.Spec.Parallelism) + assert.Equal(t, int32(5), *job.Spec.Parallelism, "Worker parallelism should equal Worker.Replicas") +} + +func TestBuildWorkerJob_Containers(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 1, "Worker should have 1 container only") + assert.Equal(t, "my-test-worker", containers[0].Name) +} + +func TestBuildWorkerJob_NoPrometheusAnnotations(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + annotations := job.Spec.Template.Annotations + assert.Empty(t, annotations[AnnotationPrometheusScrape]) + assert.Empty(t, annotations[AnnotationPrometheusPath]) + assert.Empty(t, annotations[AnnotationPrometheusPort]) +} + +func TestBuildResourceRequirements(t *testing.T) { + cfg := newTestConfig() + + resources := buildResourceRequirements(cfg, false) + + assert.Equal(t, "250m", resources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", resources.Requests.Memory().String()) + assert.Equal(t, "1", resources.Limits.Cpu().String()) + assert.Equal(t, "1Gi", resources.Limits.Memory().String()) +} + +func TestBuildResourceRequirements_MetricsExporter(t *testing.T) { + cfg := newTestConfig() + + resources := buildResourceRequirements(cfg, true) + + assert.Equal(t, "250m", resources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", resources.Requests.Memory().String()) +} + +func TestBuildAffinity_Disabled(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"performance"}, + }, + }, + }, + }, + }, + }, + }, + } + cfg := newTestConfig() + cfg.EnableAffinityCRInjection = false + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Nil(t, job.Spec.Template.Spec.Affinity, "Affinity should be nil when feature flag is disabled") +} + +func TestBuildAffinity_Enabled(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"performance"}, + }, + }, + }, + }, + }, + }, + }, + } + cfg := newTestConfig() + cfg.EnableAffinityCRInjection = true + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job.Spec.Template.Spec.Affinity) + require.NotNil(t, job.Spec.Template.Spec.Affinity.NodeAffinity) + require.NotNil(t, job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) + + terms := job.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms + require.Len(t, terms, 1) + require.Len(t, terms[0].MatchExpressions, 1) + assert.Equal(t, "node-type", terms[0].MatchExpressions[0].Key) + assert.Equal(t, corev1.NodeSelectorOpIn, terms[0].MatchExpressions[0].Operator) + assert.Contains(t, terms[0].MatchExpressions[0].Values, "performance") +} + +func TestBuildTolerations_Disabled(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Tolerations: []corev1.Toleration{ + { + Key: "dedicated", + Operator: corev1.TolerationOpEqual, + Value: "performance", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + } + cfg := newTestConfig() + cfg.EnableTolerationsCRInjection = false + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Nil(t, job.Spec.Template.Spec.Tolerations, "Tolerations should be nil when feature flag is disabled") +} + +func TestBuildTolerations_Enabled(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Tolerations: []corev1.Toleration{ + { + Key: "dedicated", + Operator: corev1.TolerationOpEqual, + Value: "performance", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + } + cfg := newTestConfig() + cfg.EnableTolerationsCRInjection = true + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.Len(t, job.Spec.Template.Spec.Tolerations, 1) + assert.Equal(t, "dedicated", job.Spec.Template.Spec.Tolerations[0].Key) + assert.Equal(t, corev1.TolerationOpEqual, job.Spec.Template.Spec.Tolerations[0].Operator) + assert.Equal(t, "performance", job.Spec.Template.Spec.Tolerations[0].Value) + assert.Equal(t, corev1.TaintEffectNoSchedule, job.Spec.Template.Spec.Tolerations[0].Effect) +} + +func TestBuildTolerations_ExistsOperator(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Tolerations: []corev1.Toleration{ + { + Key: "node.kubernetes.io/not-ready", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + }, + }, + } + cfg := newTestConfig() + cfg.EnableTolerationsCRInjection = true + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.Len(t, job.Spec.Template.Spec.Tolerations, 1) + assert.Equal(t, corev1.TolerationOpExists, job.Spec.Template.Spec.Tolerations[0].Operator) + assert.Empty(t, job.Spec.Template.Spec.Tolerations[0].Value, "Value should be empty for Exists operator") +} + +func TestBuildMasterJob_EmptyImagePullPolicy(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.ImagePullPolicy = "" // Empty should default to IfNotPresent + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + assert.Equal(t, corev1.PullIfNotPresent, container.ImagePullPolicy) +} + +func TestBuildMasterJob_NoConfigMap(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.TestFiles = nil // No test files config + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Empty(t, job.Spec.Template.Spec.Volumes) + assert.Empty(t, job.Spec.Template.Spec.Containers[0].VolumeMounts) +} + +func TestBuildMasterJob_KafkaEnvVars(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + cfg.KafkaSecurityEnabled = true + cfg.KafkaBootstrapServers = "kafka.example.com:9092" + cfg.KafkaSecurityProtocol = "SASL_SSL" + cfg.KafkaUsername = "user" + cfg.KafkaPassword = "secret" + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + + assert.Equal(t, "kafka.example.com:9092", envMap["KAFKA_BOOTSTRAP_SERVERS"]) + assert.Equal(t, "SASL_SSL", envMap["KAFKA_SECURITY_PROTOCOL_CONFIG"]) + assert.Equal(t, "user", envMap["KAFKA_USERNAME"]) + assert.Equal(t, "secret", envMap["KAFKA_PASSWORD"]) +} + +func TestBuildAffinity_NilScheduling(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = nil + cfg := newTestConfig() + cfg.EnableAffinityCRInjection = true + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Nil(t, job.Spec.Template.Spec.Affinity) +} + +func TestBuildAffinity_NilAffinity(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Scheduling = &locustv2.SchedulingConfig{ + Affinity: nil, + } + cfg := newTestConfig() + cfg.EnableAffinityCRInjection = true + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + assert.Nil(t, job.Spec.Template.Spec.Affinity) +} + +func TestBuildMasterJob_Completions(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Master job should not have Completions set (nil means run to completion) + assert.Nil(t, job.Spec.Completions) +} + +func TestBuildMasterJob_BackoffLimit(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + require.NotNil(t, job.Spec.BackoffLimit) + assert.Equal(t, int32(0), *job.Spec.BackoffLimit) +} + +func TestBuildMasterJob_WithEnvConfigMapRef(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "app-config", Prefix: "APP_"}, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + require.Len(t, container.EnvFrom, 1) + assert.NotNil(t, container.EnvFrom[0].ConfigMapRef) + assert.Equal(t, "app-config", container.EnvFrom[0].ConfigMapRef.Name) + assert.Equal(t, "APP_", container.EnvFrom[0].Prefix) +} + +func TestBuildMasterJob_WithEnvSecretRef(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + SecretRefs: []locustv2.SecretEnvSource{ + {Name: "api-credentials"}, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + require.Len(t, container.EnvFrom, 1) + assert.NotNil(t, container.EnvFrom[0].SecretRef) + assert.Equal(t, "api-credentials", container.EnvFrom[0].SecretRef.Name) +} + +func TestBuildMasterJob_WithEnvVariables(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "TARGET_HOST", Value: "https://example.com"}, + {Name: "LOG_LEVEL", Value: "DEBUG"}, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + + // User vars should be present + assert.Equal(t, "https://example.com", envMap["TARGET_HOST"]) + assert.Equal(t, "DEBUG", envMap["LOG_LEVEL"]) + + // Kafka vars should still be present + assert.Contains(t, envMap, "KAFKA_BOOTSTRAP_SERVERS") +} + +func TestBuildMasterJob_WithSecretMount(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/locust/certs", ReadOnly: true}, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Check volume exists + var secretVolumeFound bool + for _, v := range job.Spec.Template.Spec.Volumes { + if v.Name == secretTLSCertsVolumeName { + secretVolumeFound = true + assert.NotNil(t, v.Secret) + assert.Equal(t, "tls-certs", v.Secret.SecretName) + } + } + assert.True(t, secretVolumeFound, "Secret volume should exist") + + // Check volume mount exists + container := job.Spec.Template.Spec.Containers[0] + var secretMountFound bool + for _, m := range container.VolumeMounts { + if m.Name == secretTLSCertsVolumeName { + secretMountFound = true + assert.Equal(t, "/etc/locust/certs", m.MountPath) + assert.True(t, m.ReadOnly) + } + } + assert.True(t, secretMountFound, "Secret volume mount should exist") +} + +func TestBuildMasterJob_EnvCombinesKafkaAndUser(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + Variables: []corev1.EnvVar{ + {Name: "USER_VAR", Value: "user-value"}, + }, + } + cfg := newTestConfig() + cfg.KafkaBootstrapServers = "kafka:9092" + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + + // Should have 7 Kafka vars + 1 user var = 8 total + assert.Len(t, container.Env, 8) + + // Kafka vars come first + assert.Equal(t, "KAFKA_BOOTSTRAP_SERVERS", container.Env[0].Name) + assert.Equal(t, "kafka:9092", container.Env[0].Value) + + // User var comes last + assert.Equal(t, "USER_VAR", container.Env[7].Name) + assert.Equal(t, "user-value", container.Env[7].Value) +} + +func TestBuildWorkerJob_WithEnvConfig(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + ConfigMapRefs: []locustv2.ConfigMapEnvSource{ + {Name: "app-config"}, + }, + Variables: []corev1.EnvVar{ + {Name: "TARGET_HOST", Value: "https://example.com"}, + }, + SecretMounts: []locustv2.SecretMount{ + {Name: "tls-certs", MountPath: "/etc/certs"}, + }, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + + // EnvFrom should have ConfigMapRef + require.Len(t, container.EnvFrom, 1) + assert.Equal(t, "app-config", container.EnvFrom[0].ConfigMapRef.Name) + + // Env should have Kafka + user vars + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + assert.Equal(t, "https://example.com", envMap["TARGET_HOST"]) + assert.Contains(t, envMap, "KAFKA_BOOTSTRAP_SERVERS") + + // Secret mount should exist + var secretMountFound bool + for _, m := range container.VolumeMounts { + if m.Name == secretTLSCertsVolumeName { + secretMountFound = true + } + } + assert.True(t, secretMountFound) +} + +// ============================================ +// User Volume Tests +// ============================================ + +func TestBuildMasterJob_WithUserVolumes(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Volumes = []corev1.Volume{ + {Name: "results", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "shared", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + } + lt.Spec.VolumeMounts = []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared", MountPath: "/shared"}, Target: "both"}, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Check volumes + volumeNames := make(map[string]bool) + for _, v := range job.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.True(t, volumeNames["results"], "results volume should be in master") + assert.True(t, volumeNames["shared"], "shared volume should be in master") + + // Check mounts + container := job.Spec.Template.Spec.Containers[0] + mountPaths := make(map[string]bool) + for _, m := range container.VolumeMounts { + mountPaths[m.MountPath] = true + } + assert.True(t, mountPaths["/results"], "results mount should be in master") + assert.True(t, mountPaths["/shared"], "shared mount should be in master") +} + +func TestBuildWorkerJob_WithUserVolumes(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Volumes = []corev1.Volume{ + {Name: "results", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "shared", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "certs", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + } + lt.Spec.VolumeMounts = []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared", MountPath: "/shared"}, Target: "both"}, + {VolumeMount: corev1.VolumeMount{Name: "certs", MountPath: "/certs"}, Target: "worker"}, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + // Check volumes - worker should NOT have results + volumeNames := make(map[string]bool) + for _, v := range job.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.False(t, volumeNames["results"], "results volume should NOT be in worker") + assert.True(t, volumeNames["shared"], "shared volume should be in worker") + assert.True(t, volumeNames["certs"], "certs volume should be in worker") + + // Check mounts + container := job.Spec.Template.Spec.Containers[0] + mountPaths := make(map[string]bool) + for _, m := range container.VolumeMounts { + mountPaths[m.MountPath] = true + } + assert.False(t, mountPaths["/results"], "results mount should NOT be in worker") + assert.True(t, mountPaths["/shared"], "shared mount should be in worker") + assert.True(t, mountPaths["/certs"], "certs mount should be in worker") +} + +func TestBuildMasterJob_WithUserVolumeMounts_TargetWorker(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Volumes = []corev1.Volume{ + {Name: "worker-only", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + } + lt.Spec.VolumeMounts = []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "worker-only", MountPath: "/worker-data"}, Target: "worker"}, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Master should NOT have worker-only volume + volumeNames := make(map[string]bool) + for _, v := range job.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.False(t, volumeNames["worker-only"], "worker-only volume should NOT be in master") + + // Master should NOT have worker-only mount + container := job.Spec.Template.Spec.Containers[0] + mountPaths := make(map[string]bool) + for _, m := range container.VolumeMounts { + mountPaths[m.MountPath] = true + } + assert.False(t, mountPaths["/worker-data"], "worker-only mount should NOT be in master") +} + +func TestBuildJob_UserVolumesWithSecretVolumes(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Env = &locustv2.EnvConfig{ + SecretMounts: []locustv2.SecretMount{ + {Name: "api-keys", MountPath: "/etc/api-keys"}, + }, + } + lt.Spec.Volumes = []corev1.Volume{ + {Name: "user-data", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + } + lt.Spec.VolumeMounts = []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "user-data", MountPath: "/data"}, Target: "both"}, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Both secret and user volumes should exist + volumeNames := make(map[string]bool) + for _, v := range job.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.True(t, volumeNames["secret-api-keys"], "secret volume should exist") + assert.True(t, volumeNames["user-data"], "user volume should exist") + + // Both mounts should exist + container := job.Spec.Template.Spec.Containers[0] + mountPaths := make(map[string]bool) + for _, m := range container.VolumeMounts { + mountPaths[m.MountPath] = true + } + assert.True(t, mountPaths["/etc/api-keys"], "secret mount should exist") + assert.True(t, mountPaths["/data"], "user mount should exist") +} + +// ============================================ +// OTel Support Tests +// ============================================ + +func TestBuildMasterJob_OTelDisabled_HasSidecar(t *testing.T) { + lt := newTestLocustTest() + // No OTel config = disabled + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 2, "Master should have 2 containers (locust + metrics exporter) when OTel disabled") + + containerNames := make([]string, len(containers)) + for i, c := range containers { + containerNames[i] = c.Name + } + assert.Contains(t, containerNames, MetricsExporterContainerName) +} + +func TestBuildMasterJob_OTelEnabled_NoSidecar(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 1, "Master should have 1 container only when OTel enabled") + assert.Equal(t, "my-test-master", containers[0].Name) +} + +func TestBuildMasterJob_NoObservability_HasSidecar(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = nil + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 2, "Master should have 2 containers when observability is nil") +} + +func TestBuildWorkerJob_OTelEnabled_NoSidecar(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + containers := job.Spec.Template.Spec.Containers + assert.Len(t, containers, 1, "Worker should always have 1 container") +} + +func TestBuildMasterJob_OTelEnabled_HasEnvVars(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector.monitoring:4317", + Protocol: "grpc", + Insecure: true, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + + assert.Equal(t, "otlp", envMap["OTEL_TRACES_EXPORTER"]) + assert.Equal(t, "otlp", envMap["OTEL_METRICS_EXPORTER"]) + assert.Equal(t, "otel-collector.monitoring:4317", envMap["OTEL_EXPORTER_OTLP_ENDPOINT"]) + assert.Equal(t, "grpc", envMap["OTEL_EXPORTER_OTLP_PROTOCOL"]) + assert.Equal(t, "true", envMap["OTEL_EXPORTER_OTLP_INSECURE"]) +} + +func TestBuildWorkerJob_OTelEnabled_HasEnvVars(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + + assert.Equal(t, "otlp", envMap["OTEL_TRACES_EXPORTER"]) + assert.Equal(t, "otel-collector:4317", envMap["OTEL_EXPORTER_OTLP_ENDPOINT"]) +} + +func TestBuildMasterJob_OTelEnabled_CommandHasFlag(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + assert.Contains(t, container.Args, "--otel", "Command should include --otel flag") +} + +func TestBuildWorkerJob_OTelEnabled_CommandHasFlag(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + assert.Contains(t, container.Args, "--otel", "Command should include --otel flag") +} + +func TestBuildMasterJob_OTelDisabled_CommandNoFlag(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = nil + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + assert.NotContains(t, container.Args, "--otel", "Command should NOT include --otel flag when disabled") +} + +func TestBuildMasterJob_OTelEnabled_ExtraEnvVars(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + ExtraEnvVars: map[string]string{ + "OTEL_RESOURCE_ATTRIBUTES": "service.name=locust-load-test", + }, + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + + assert.Equal(t, "service.name=locust-load-test", envMap["OTEL_RESOURCE_ATTRIBUTES"]) +} + +// ============================================ +// Integration Tests - ExtraArgs and Resource Precedence +// ============================================ + +func TestBuildMasterJob_WithExtraArgs(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Master.ExtraArgs = []string{"--csv=results", "--users=100"} + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + args := container.Args + + // Verify extraArgs are present + assert.Contains(t, args, "--csv=results") + assert.Contains(t, args, "--users=100") + + // Verify position: extraArgs should appear AFTER --only-summary + onlySummaryIndex := -1 + csvIndex := -1 + for i, arg := range args { + if arg == "--only-summary" { + onlySummaryIndex = i + } + if arg == "--csv=results" { + csvIndex = i + } + } + assert.Greater(t, csvIndex, onlySummaryIndex, "extraArgs should appear after operator-managed flags") +} + +func TestBuildWorkerJob_WithExtraArgs(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Worker.ExtraArgs = []string{"--csv=results"} + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + args := container.Args + + // Verify extraArgs are present + assert.Contains(t, args, "--csv=results") +} + +func TestBuildMasterJob_ExtraArgsNil(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Master.ExtraArgs = nil + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + args := container.Args + + // Verify command output matches current behavior + assert.Contains(t, args, "locust") + assert.Contains(t, args, "--master") + assert.Contains(t, args, "--only-summary") + assert.NotContains(t, args, "--csv=results") +} + +func TestBuildMasterJob_WithCRResources(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Master.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("500m"), + corev1.ResourceMemory: mustParseQuantity("256Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("2000m"), + corev1.ResourceMemory: mustParseQuantity("2Gi"), + }, + } + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Verify CR resources are used exactly (complete override) + assert.Equal(t, "500m", resources.Requests.Cpu().String()) + assert.Equal(t, "256Mi", resources.Requests.Memory().String()) + assert.Equal(t, "2", resources.Limits.Cpu().String()) + assert.Equal(t, "2Gi", resources.Limits.Memory().String()) + + // Verify ephemeral storage NOT present (CR didn't specify it) + _, hasEphemeralRequest := resources.Requests[corev1.ResourceEphemeralStorage] + _, hasEphemeralLimit := resources.Limits[corev1.ResourceEphemeralStorage] + assert.False(t, hasEphemeralRequest, "CR resources should not include operator defaults for unspecified fields") + assert.False(t, hasEphemeralLimit, "CR resources should not include operator defaults for unspecified fields") +} + +func TestBuildWorkerJob_WithCRResources(t *testing.T) { + lt := newTestLocustTest() + lt.Spec.Worker.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("250m"), + corev1.ResourceMemory: mustParseQuantity("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("1000m"), + corev1.ResourceMemory: mustParseQuantity("1Gi"), + }, + } + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Verify worker CR resources are used + assert.Equal(t, "250m", resources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", resources.Requests.Memory().String()) + assert.Equal(t, "1", resources.Limits.Cpu().String()) + assert.Equal(t, "1Gi", resources.Limits.Memory().String()) +} + +func TestBuildMasterJob_NoCRResources_UsesDefaults(t *testing.T) { + lt := newTestLocustTest() + // Leave Resources empty (default) + lt.Spec.Master.Resources = corev1.ResourceRequirements{} + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Verify operator defaults are used + assert.Equal(t, "250m", resources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", resources.Requests.Memory().String()) + assert.Equal(t, "30M", resources.Requests.StorageEphemeral().String()) + assert.Equal(t, "1", resources.Limits.Cpu().String()) + assert.Equal(t, "1Gi", resources.Limits.Memory().String()) + assert.Equal(t, "50M", resources.Limits.StorageEphemeral().String()) +} + +func TestBuildMasterJob_CRResources_WorkerUnaffected(t *testing.T) { + lt := newTestLocustTest() + // Set master resources only + lt.Spec.Master.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("500m"), + corev1.ResourceMemory: mustParseQuantity("256Mi"), + }, + } + // Leave worker resources empty + lt.Spec.Worker.Resources = corev1.ResourceRequirements{} + cfg := newTestConfig() + + masterJob := BuildMasterJob(lt, cfg, logr.Discard()) + workerJob := BuildWorkerJob(lt, cfg, logr.Discard()) + + masterContainer := masterJob.Spec.Template.Spec.Containers[0] + workerContainer := workerJob.Spec.Template.Spec.Containers[0] + + // Master uses CR resources + assert.Equal(t, "500m", masterContainer.Resources.Requests.Cpu().String()) + assert.Equal(t, "256Mi", masterContainer.Resources.Requests.Memory().String()) + + // Worker uses operator defaults (independent) + assert.Equal(t, "250m", workerContainer.Resources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", workerContainer.Resources.Requests.Memory().String()) +} + +// Helper function for tests +func mustParseQuantity(s string) resource.Quantity { + q, err := resource.ParseQuantity(s) + if err != nil { + panic(err) + } + return q +} + +// ============================================ +// Integration Tests - Helm Role-Specific Resources and 3-Level Precedence +// ============================================ + +func TestBuildMasterJob_WithHelmMasterResources(t *testing.T) { + lt := newTestLocustTest() + // NO CR resources (empty ResourceRequirements) + lt.Spec.Master.Resources = corev1.ResourceRequirements{} + + cfg := newTestConfig() + // Set role-specific fields for master + cfg.MasterCPURequest = "500m" + cfg.MasterMemRequest = "512Mi" + cfg.MasterEphemeralStorageRequest = "" // Empty - should fall through to unified + cfg.MasterCPULimit = "2000m" + cfg.MasterMemLimit = "2Gi" + cfg.MasterEphemeralStorageLimit = "" // Empty - should fall through to unified + // Unified fields remain at default values (250m, 128Mi, 30M, 1000m, 1024Mi, 50M) + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Assert container resources use master-specific values + assert.Equal(t, "500m", resources.Requests.Cpu().String()) + assert.Equal(t, "512Mi", resources.Requests.Memory().String()) + assert.Equal(t, "2", resources.Limits.Cpu().String()) + assert.Equal(t, "2Gi", resources.Limits.Memory().String()) + + // Assert ephemeral storage falls back to unified values (master-specific ephemeral is empty) + assert.Equal(t, "30M", resources.Requests.StorageEphemeral().String()) + assert.Equal(t, "50M", resources.Limits.StorageEphemeral().String()) +} + +func TestBuildWorkerJob_WithHelmWorkerResources(t *testing.T) { + lt := newTestLocustTest() + // NO CR resources + lt.Spec.Worker.Resources = corev1.ResourceRequirements{} + + cfg := newTestConfig() + // Set role-specific fields for worker + cfg.WorkerCPURequest = "300m" + cfg.WorkerMemRequest = "256Mi" + cfg.WorkerCPULimit = "1500m" + cfg.WorkerMemLimit = "1536Mi" + // Unified fields remain at default values + + workerJob := BuildWorkerJob(lt, cfg, logr.Discard()) + masterJob := BuildMasterJob(lt, cfg, logr.Discard()) + + workerContainer := workerJob.Spec.Template.Spec.Containers[0] + workerResources := workerContainer.Resources + + // Verify worker-specific resources used + assert.Equal(t, "300m", workerResources.Requests.Cpu().String()) + assert.Equal(t, "256Mi", workerResources.Requests.Memory().String()) + assert.Equal(t, "1500m", workerResources.Limits.Cpu().String()) + assert.Equal(t, "1536Mi", workerResources.Limits.Memory().String()) + + // Build master job with same config, verify master uses unified (NOT worker values) + masterContainer := masterJob.Spec.Template.Spec.Containers[0] + masterResources := masterContainer.Resources + + // Master should use unified defaults (not worker-specific) + assert.Equal(t, "250m", masterResources.Requests.Cpu().String()) + assert.Equal(t, "128Mi", masterResources.Requests.Memory().String()) +} + +func TestBuildMasterJob_CROverridesHelmRoleSpecific(t *testing.T) { + lt := newTestLocustTest() + // Set CR resources + lt.Spec.Master.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("1000m"), + corev1.ResourceMemory: mustParseQuantity("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustParseQuantity("3000m"), + corev1.ResourceMemory: mustParseQuantity("3Gi"), + }, + } + + cfg := newTestConfig() + // Set master-specific config fields + cfg.MasterCPURequest = "500m" + cfg.MasterMemRequest = "512Mi" + cfg.MasterCPULimit = "2000m" + cfg.MasterMemLimit = "2Gi" + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Assert CR resources win (Level 1 > Level 2) + assert.Equal(t, "1", resources.Requests.Cpu().String()) + assert.Equal(t, "1Gi", resources.Requests.Memory().String()) + assert.Equal(t, "3", resources.Limits.Cpu().String()) + assert.Equal(t, "3Gi", resources.Limits.Memory().String()) +} + +func TestBuildMasterJob_HelmRoleSpecific_PrecedenceOverUnified(t *testing.T) { + lt := newTestLocustTest() + // NO CR resources + lt.Spec.Master.Resources = corev1.ResourceRequirements{} + + cfg := newTestConfig() + // Set master-specific: only CPU request + cfg.MasterCPURequest = "500m" + cfg.MasterMemRequest = "" // Empty - should fall through to unified + // Unified: PodCPURequest="250m", PodMemRequest="128Mi" + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + container := job.Spec.Template.Spec.Containers[0] + resources := container.Resources + + // Assert CPU is "500m" (from master-specific) + assert.Equal(t, "500m", resources.Requests.Cpu().String()) + // Assert memory is "128Mi" (falls through to unified) + assert.Equal(t, "128Mi", resources.Requests.Memory().String()) + // Assert ephemeral is "30M" (falls through to unified) + assert.Equal(t, "30M", resources.Requests.StorageEphemeral().String()) +} + +// ============================================ +// SecurityContext Tests +// ============================================ + +func TestBuildMasterJob_HasSecurityContext(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildMasterJob(lt, cfg, logr.Discard()) + + // Verify SecurityContext is set + require.NotNil(t, job.Spec.Template.Spec.SecurityContext, "SecurityContext should be set") + + // Verify SeccompProfile is RuntimeDefault + require.NotNil(t, job.Spec.Template.Spec.SecurityContext.SeccompProfile, "SeccompProfile should be set") + assert.Equal(t, corev1.SeccompProfileTypeRuntimeDefault, job.Spec.Template.Spec.SecurityContext.SeccompProfile.Type, "SeccompProfile should be RuntimeDefault") +} + +func TestBuildWorkerJob_HasSecurityContext(t *testing.T) { + lt := newTestLocustTest() + cfg := newTestConfig() + + job := BuildWorkerJob(lt, cfg, logr.Discard()) + + // Verify SecurityContext is set + require.NotNil(t, job.Spec.Template.Spec.SecurityContext, "SecurityContext should be set") + + // Verify SeccompProfile is RuntimeDefault + require.NotNil(t, job.Spec.Template.Spec.SecurityContext.SeccompProfile, "SeccompProfile should be set") + assert.Equal(t, corev1.SeccompProfileTypeRuntimeDefault, job.Spec.Template.Spec.SecurityContext.SeccompProfile.Type, "SeccompProfile should be RuntimeDefault") +} diff --git a/internal/resources/labels.go b/internal/resources/labels.go new file mode 100644 index 00000000..7fd3eb78 --- /dev/null +++ b/internal/resources/labels.go @@ -0,0 +1,103 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "strings" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" +) + +// NodeName constructs the node name from the CR name and operational mode. +// Format: "{cr-name}-{mode}" with dots replaced by dashes. +// Example: "team-a.load-test" -> "team-a-load-test-master" +func NodeName(crName string, mode OperationalMode) string { + name := fmt.Sprintf("%s-%s", crName, mode.String()) + return strings.ReplaceAll(name, ".", "-") +} + +// BuildLabels constructs the labels for a pod based on the LocustTest CR and mode. +// Includes required labels and merges user-defined labels from the CR spec. +func BuildLabels(lt *locustv2.LocustTest, mode OperationalMode) map[string]string { + nodeName := NodeName(lt.Name, mode) + + labels := map[string]string{ + LabelApp: lt.Name, + LabelPodName: nodeName, + LabelManagedBy: ManagedByValue, + LabelTestName: lt.Name, + } + + // Merge user-defined labels, protecting operator-critical labels + for k, v := range getUserLabels(lt, mode) { + if k == LabelPodName || k == LabelManagedBy { + continue + } + labels[k] = v + } + + return labels +} + +// getUserLabels extracts user-defined labels from the CR spec for the given mode. +func getUserLabels(lt *locustv2.LocustTest, mode OperationalMode) map[string]string { + switch mode { + case Master: + return lt.Spec.Master.Labels + case Worker: + return lt.Spec.Worker.Labels + default: + return nil + } +} + +// BuildAnnotations constructs the annotations for a pod based on the LocustTest CR and mode. +// Master pods include Prometheus scrape annotations; worker pods do not. +// When OTel is enabled, Prometheus annotations are suppressed (Locust exports natively via OTLP). +// Merges user-defined annotations from the CR spec. +func BuildAnnotations(lt *locustv2.LocustTest, mode OperationalMode, cfg *config.OperatorConfig) map[string]string { + annotations := make(map[string]string) + + // Master pods get Prometheus annotations ONLY if OTel is disabled + // When OTel is enabled, Locust exports metrics natively via OTLP β€” no sidecar or scrape annotations needed + if mode == Master && !IsOTelEnabled(lt) { + annotations[AnnotationPrometheusScrape] = "true" + annotations[AnnotationPrometheusPath] = MetricsEndpointPath + annotations[AnnotationPrometheusPort] = fmt.Sprintf("%d", cfg.MetricsExporterPort) + } + + // Merge user-defined annotations + for k, v := range getUserAnnotations(lt, mode) { + annotations[k] = v + } + + return annotations +} + +// getUserAnnotations extracts user-defined annotations from the CR spec for the given mode. +func getUserAnnotations(lt *locustv2.LocustTest, mode OperationalMode) map[string]string { + switch mode { + case Master: + return lt.Spec.Master.Annotations + case Worker: + return lt.Spec.Worker.Annotations + default: + return nil + } +} diff --git a/internal/resources/labels_test.go b/internal/resources/labels_test.go new file mode 100644 index 00000000..09277f99 --- /dev/null +++ b/internal/resources/labels_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNodeName(t *testing.T) { + tests := []struct { + name string + crName string + mode OperationalMode + expected string + }{ + { + name: "basic master name", + crName: "my-test", + mode: Master, + expected: "my-test-master", + }, + { + name: "basic worker name", + crName: "my-test", + mode: Worker, + expected: "my-test-worker", + }, + { + name: "dots replaced with dashes", + crName: "team-a.load-test", + mode: Master, + expected: "team-a-load-test-master", + }, + { + name: "multiple dots replaced", + crName: "a.b.c.test", + mode: Worker, + expected: "a-b-c-test-worker", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := NodeName(tt.crName, tt.mode) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestBuildLabels(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } + + labels := BuildLabels(lt, Master) + + assert.Equal(t, "my-test", labels[LabelApp]) + assert.Equal(t, "my-test-master", labels[LabelPodName]) + assert.Equal(t, ManagedByValue, labels[LabelManagedBy]) + assert.Equal(t, "my-test", labels[LabelTestName]) +} + +func TestBuildLabels_WithUserLabels(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + Labels: map[string]string{ + "custom-label": "master-value", + "team": "platform", + }, + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + Labels: map[string]string{ + "custom-label": "worker-value", + }, + }, + }, + } + + masterLabels := BuildLabels(lt, Master) + assert.Equal(t, "master-value", masterLabels["custom-label"]) + assert.Equal(t, "platform", masterLabels["team"]) + + workerLabels := BuildLabels(lt, Worker) + assert.Equal(t, "worker-value", workerLabels["custom-label"]) + assert.Empty(t, workerLabels["team"]) +} + +func TestBuildAnnotations_Master_PrometheusWhenOTelDisabled(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + annotations := BuildAnnotations(lt, Master, cfg) + + assert.Equal(t, "true", annotations[AnnotationPrometheusScrape]) + assert.Equal(t, MetricsEndpointPath, annotations[AnnotationPrometheusPath]) + assert.Equal(t, "9646", annotations[AnnotationPrometheusPort]) +} + +func TestBuildAnnotations_Master_NoPrometheusWhenOTelEnabled(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + Observability: &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "http://otel-collector:4317", + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + annotations := BuildAnnotations(lt, Master, cfg) + + // When OTel is enabled, Prometheus annotations should NOT be present + assert.Empty(t, annotations[AnnotationPrometheusScrape]) + assert.Empty(t, annotations[AnnotationPrometheusPath]) + assert.Empty(t, annotations[AnnotationPrometheusPort]) +} + +func TestBuildAnnotations_Worker(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + annotations := BuildAnnotations(lt, Worker, cfg) + + // Worker should NOT have Prometheus annotations + assert.Empty(t, annotations[AnnotationPrometheusScrape]) + assert.Empty(t, annotations[AnnotationPrometheusPath]) + assert.Empty(t, annotations[AnnotationPrometheusPort]) +} + +func TestBuildAnnotations_WithUserAnnotations(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + Annotations: map[string]string{ + "custom-annotation": "master-value", + }, + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + Annotations: map[string]string{ + "custom-annotation": "worker-value", + }, + }, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + masterAnnotations := BuildAnnotations(lt, Master, cfg) + assert.Equal(t, "master-value", masterAnnotations["custom-annotation"]) + // Should still have Prometheus annotations + assert.Equal(t, "true", masterAnnotations[AnnotationPrometheusScrape]) + + workerAnnotations := BuildAnnotations(lt, Worker, cfg) + assert.Equal(t, "worker-value", workerAnnotations["custom-annotation"]) +} + +func TestBuildLabels_NilLabelsSpec(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + Labels: nil, + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } + + labels := BuildLabels(lt, Master) + + // Should have base labels even when user labels are nil + assert.Equal(t, "my-test", labels[LabelApp]) + assert.Equal(t, ManagedByValue, labels[LabelManagedBy]) +} + +func TestBuildAnnotations_NilAnnotationsSpec(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + Annotations: nil, + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + annotations := BuildAnnotations(lt, Master, cfg) + + // Should still have Prometheus annotations for master + assert.Equal(t, "true", annotations[AnnotationPrometheusScrape]) +} + +func TestWorkerPortInts(t *testing.T) { + ports := WorkerPortInts() + + assert.Contains(t, ports, int32(WorkerPort)) + assert.Len(t, ports, 1) +} + +func TestMasterPortInts(t *testing.T) { + ports := MasterPortInts() + + assert.Contains(t, ports, int32(MasterPort)) + assert.Contains(t, ports, int32(MasterBindPort)) + assert.Contains(t, ports, int32(WebUIPort)) + assert.Len(t, ports, 3) +} diff --git a/internal/resources/otel.go b/internal/resources/otel.go new file mode 100644 index 00000000..e46dffc7 --- /dev/null +++ b/internal/resources/otel.go @@ -0,0 +1,125 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "sort" + "strconv" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + corev1 "k8s.io/api/core/v1" +) + +// OTel environment variable names +const ( + EnvOTelTracesExporter = "OTEL_TRACES_EXPORTER" + EnvOTelMetricsExporter = "OTEL_METRICS_EXPORTER" + EnvOTelExporterEndpoint = "OTEL_EXPORTER_OTLP_ENDPOINT" + EnvOTelExporterProtocol = "OTEL_EXPORTER_OTLP_PROTOCOL" + EnvOTelExporterInsecure = "OTEL_EXPORTER_OTLP_INSECURE" +) + +// Default OTel values +const ( + OTelExporterOTLP = "otlp" + OTelProtocolGRPC = "grpc" + OTelProtocolHTTP = "http/protobuf" +) + +// IsOTelEnabled returns true if OpenTelemetry is enabled in the spec. +func IsOTelEnabled(lt *locustv2.LocustTest) bool { + if lt.Spec.Observability == nil { + return false + } + if lt.Spec.Observability.OpenTelemetry == nil { + return false + } + return lt.Spec.Observability.OpenTelemetry.Enabled +} + +// GetOTelConfig returns the OpenTelemetry configuration, or nil if not configured. +func GetOTelConfig(lt *locustv2.LocustTest) *locustv2.OpenTelemetryConfig { + if lt.Spec.Observability == nil { + return nil + } + return lt.Spec.Observability.OpenTelemetry +} + +// BuildOTelEnvVars creates environment variables for OpenTelemetry configuration. +// Returns nil if OTel is not enabled. +func BuildOTelEnvVars(lt *locustv2.LocustTest) []corev1.EnvVar { + if !IsOTelEnabled(lt) { + return nil + } + + otelCfg := GetOTelConfig(lt) + if otelCfg == nil { + return nil + } + + var envVars []corev1.EnvVar + + // Core OTel exporter configuration + envVars = append(envVars, + corev1.EnvVar{Name: EnvOTelTracesExporter, Value: OTelExporterOTLP}, + corev1.EnvVar{Name: EnvOTelMetricsExporter, Value: OTelExporterOTLP}, + ) + + // Endpoint (required when enabled - validated by webhook) + if otelCfg.Endpoint != "" { + envVars = append(envVars, corev1.EnvVar{ + Name: EnvOTelExporterEndpoint, + Value: otelCfg.Endpoint, + }) + } + + // Protocol (default: grpc) + protocol := otelCfg.Protocol + if protocol == "" { + protocol = OTelProtocolGRPC + } + envVars = append(envVars, corev1.EnvVar{ + Name: EnvOTelExporterProtocol, + Value: protocol, + }) + + // Insecure flag (only set if true) + if otelCfg.Insecure { + envVars = append(envVars, corev1.EnvVar{ + Name: EnvOTelExporterInsecure, + Value: strconv.FormatBool(true), + }) + } + + // Extra environment variables from spec (sorted for deterministic order) + if len(otelCfg.ExtraEnvVars) > 0 { + keys := make([]string, 0, len(otelCfg.ExtraEnvVars)) + for key := range otelCfg.ExtraEnvVars { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + envVars = append(envVars, corev1.EnvVar{ + Name: key, + Value: otelCfg.ExtraEnvVars[key], + }) + } + } + + return envVars +} diff --git a/internal/resources/otel_test.go b/internal/resources/otel_test.go new file mode 100644 index 00000000..31083c0e --- /dev/null +++ b/internal/resources/otel_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Helper to create a minimal LocustTest for OTel testing +func newOTelTestLocustTest() *locustv2.LocustTest { + return &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lt", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:2.32.0", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/locustfile.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/locustfile.py", + Replicas: 3, + }, + }, + } +} + +// ===== IsOTelEnabled Tests ===== + +func TestIsOTelEnabled_NilObservability(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = nil + + assert.False(t, IsOTelEnabled(lt)) +} + +func TestIsOTelEnabled_NilOpenTelemetry(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: nil, + } + + assert.False(t, IsOTelEnabled(lt)) +} + +func TestIsOTelEnabled_Disabled(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: false, + }, + } + + assert.False(t, IsOTelEnabled(lt)) +} + +func TestIsOTelEnabled_Enabled(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + + assert.True(t, IsOTelEnabled(lt)) +} + +// ===== GetOTelConfig Tests ===== + +func TestGetOTelConfig_NilObservability(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = nil + + assert.Nil(t, GetOTelConfig(lt)) +} + +func TestGetOTelConfig_NilOpenTelemetry(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: nil, + } + + assert.Nil(t, GetOTelConfig(lt)) +} + +func TestGetOTelConfig_HasConfig(t *testing.T) { + lt := newOTelTestLocustTest() + expectedConfig := &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + Protocol: "grpc", + } + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: expectedConfig, + } + + result := GetOTelConfig(lt) + assert.NotNil(t, result) + assert.Equal(t, expectedConfig, result) +} + +// ===== BuildOTelEnvVars Tests ===== + +func TestBuildOTelEnvVars_Disabled(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: false, + }, + } + + envVars := BuildOTelEnvVars(lt) + assert.Nil(t, envVars) +} + +func TestBuildOTelEnvVars_NilObservability(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = nil + + envVars := BuildOTelEnvVars(lt) + assert.Nil(t, envVars) +} + +func TestBuildOTelEnvVars_EnabledMinimal(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + + envVars := BuildOTelEnvVars(lt) + assert.NotNil(t, envVars) + + // Should have: traces exporter, metrics exporter, endpoint, protocol (default grpc) + assert.Len(t, envVars, 4) + + envMap := envVarsToMap(envVars) + assert.Equal(t, "otlp", envMap[EnvOTelTracesExporter]) + assert.Equal(t, "otlp", envMap[EnvOTelMetricsExporter]) + assert.Equal(t, "otel-collector:4317", envMap[EnvOTelExporterEndpoint]) + assert.Equal(t, "grpc", envMap[EnvOTelExporterProtocol]) +} + +func TestBuildOTelEnvVars_FullConfig(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector.monitoring:4317", + Protocol: "grpc", + Insecure: true, + }, + } + + envVars := BuildOTelEnvVars(lt) + assert.NotNil(t, envVars) + + // Should have: traces exporter, metrics exporter, endpoint, protocol, insecure + assert.Len(t, envVars, 5) + + envMap := envVarsToMap(envVars) + assert.Equal(t, "otlp", envMap[EnvOTelTracesExporter]) + assert.Equal(t, "otlp", envMap[EnvOTelMetricsExporter]) + assert.Equal(t, "otel-collector.monitoring:4317", envMap[EnvOTelExporterEndpoint]) + assert.Equal(t, "grpc", envMap[EnvOTelExporterProtocol]) + assert.Equal(t, "true", envMap[EnvOTelExporterInsecure]) +} + +func TestBuildOTelEnvVars_ExtraEnvVars(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + ExtraEnvVars: map[string]string{ + "OTEL_RESOURCE_ATTRIBUTES": "service.name=locust-load-test", + "OTEL_TRACES_SAMPLER": "parentbased_traceidratio", + "OTEL_TRACES_SAMPLER_ARG": "0.1", + }, + }, + } + + envVars := BuildOTelEnvVars(lt) + assert.NotNil(t, envVars) + + // Should have: traces exporter, metrics exporter, endpoint, protocol (default) + 3 extra + assert.Len(t, envVars, 7) + + envMap := envVarsToMap(envVars) + assert.Equal(t, "service.name=locust-load-test", envMap["OTEL_RESOURCE_ATTRIBUTES"]) + assert.Equal(t, "parentbased_traceidratio", envMap["OTEL_TRACES_SAMPLER"]) + assert.Equal(t, "0.1", envMap["OTEL_TRACES_SAMPLER_ARG"]) +} + +func TestBuildOTelEnvVars_DefaultProtocol(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + Protocol: "", // Empty - should default to grpc + }, + } + + envVars := BuildOTelEnvVars(lt) + envMap := envVarsToMap(envVars) + assert.Equal(t, "grpc", envMap[EnvOTelExporterProtocol]) +} + +func TestBuildOTelEnvVars_HTTPProtocol(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4318", + Protocol: "http/protobuf", + }, + } + + envVars := BuildOTelEnvVars(lt) + envMap := envVarsToMap(envVars) + assert.Equal(t, "http/protobuf", envMap[EnvOTelExporterProtocol]) +} + +func TestBuildOTelEnvVars_InsecureFalse(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + Insecure: false, + }, + } + + envVars := BuildOTelEnvVars(lt) + envMap := envVarsToMap(envVars) + + // Insecure env var should NOT be present when false + _, exists := envMap[EnvOTelExporterInsecure] + assert.False(t, exists, "OTEL_EXPORTER_OTLP_INSECURE should not be set when Insecure is false") +} + +func TestBuildOTelEnvVars_ExtraEnvVarsSortedOrder(t *testing.T) { + lt := newOTelTestLocustTest() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + ExtraEnvVars: map[string]string{ + "ZEBRA_VAR": "z", + "APPLE_VAR": "a", + "MANGO_VAR": "m", + }, + }, + } + + envVars := BuildOTelEnvVars(lt) + + // Extra env vars should appear after the core ones, in sorted order + // Core: OTEL_TRACES_EXPORTER, OTEL_METRICS_EXPORTER, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_PROTOCOL + // Extra (sorted): APPLE_VAR, MANGO_VAR, ZEBRA_VAR + assert.Len(t, envVars, 7) + assert.Equal(t, "APPLE_VAR", envVars[4].Name) + assert.Equal(t, "MANGO_VAR", envVars[5].Name) + assert.Equal(t, "ZEBRA_VAR", envVars[6].Name) +} + +// Helper to convert env vars slice to map for easier assertions +func envVarsToMap(envVars []corev1.EnvVar) map[string]string { + result := make(map[string]string) + for _, ev := range envVars { + result[ev.Name] = ev.Value + } + return result +} diff --git a/internal/resources/ports.go b/internal/resources/ports.go new file mode 100644 index 00000000..adb234fa --- /dev/null +++ b/internal/resources/ports.go @@ -0,0 +1,49 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" +) + +// MasterPorts returns the container ports for the master node. +// Ports: 5557 (master), 5558 (bind), 8089 (web UI) +func MasterPorts() []corev1.ContainerPort { + return []corev1.ContainerPort{ + {ContainerPort: MasterPort}, + {ContainerPort: MasterBindPort}, + {ContainerPort: WebUIPort}, + } +} + +// WorkerPorts returns the container ports for worker nodes. +// Ports: 8080 (worker) +func WorkerPorts() []corev1.ContainerPort { + return []corev1.ContainerPort{ + {ContainerPort: WorkerPort}, + } +} + +// MasterPortInts returns the master ports as int32 slice (for service creation). +func MasterPortInts() []int32 { + return []int32{MasterPort, MasterBindPort, WebUIPort} +} + +// WorkerPortInts returns the worker ports as int32 slice. +func WorkerPortInts() []int32 { + return []int32{WorkerPort} +} diff --git a/internal/resources/service.go b/internal/resources/service.go new file mode 100644 index 00000000..789e9e85 --- /dev/null +++ b/internal/resources/service.go @@ -0,0 +1,72 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildMasterService creates a Kubernetes Service for the Locust master node. +// The service exposes ports 5557 (master), 5558 (bind), and the metrics port. +// Port 8089 (web UI) is NOT exposed via the service. +func BuildMasterService(lt *locustv2.LocustTest, cfg *config.OperatorConfig) *corev1.Service { + nodeName := NodeName(lt.Name, Master) + + // Build service ports - exclude WebUIPort (8089) + // Pre-allocate: 2 master ports (excluding WebUI) + 1 metrics port = 3 + servicePorts := make([]corev1.ServicePort, 0, 3) + + for _, port := range MasterPortInts() { + // Skip WebUI port - it's not exposed via service + if port == WebUIPort { + continue + } + + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: fmt.Sprintf("%s%d", PortNamePrefix, port), + Protocol: corev1.ProtocolTCP, + Port: port, + }) + } + + // Add metrics port ONLY if OTel is disabled (sidecar will be deployed) + if !IsOTelEnabled(lt) { + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: MetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: cfg.MetricsExporterPort, + }) + } + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: lt.Namespace, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + LabelPodName: nodeName, + }, + Ports: servicePorts, + }, + } +} diff --git a/internal/resources/service_test.go b/internal/resources/service_test.go new file mode 100644 index 00000000..f0324ec8 --- /dev/null +++ b/internal/resources/service_test.go @@ -0,0 +1,232 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/AbdelrhmanHamouda/locust-k8s-operator/internal/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func newTestLocustTestForService() *locustv2.LocustTest { + return &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test", + Namespace: "default", + }, + Spec: locustv2.LocustTestSpec{ + Image: "locustio/locust:latest", + Master: locustv2.MasterSpec{ + Command: "locust -f /lotest/src/test.py", + }, + Worker: locustv2.WorkerSpec{ + Command: "locust -f /lotest/src/test.py", + Replicas: 3, + }, + }, + } +} + +func TestBuildMasterService(t *testing.T) { + lt := newTestLocustTestForService() + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + require.NotNil(t, svc) + assert.Equal(t, "my-test-master", svc.Name) + assert.Equal(t, "default", svc.Namespace) +} + +func TestBuildMasterService_Ports(t *testing.T) { + lt := newTestLocustTestForService() + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // Should have 3 ports: 5557, 5558, and metrics (9646) + // WebUI port 8089 should NOT be included + assert.Len(t, svc.Spec.Ports, 3) + + portNumbers := make([]int32, len(svc.Spec.Ports)) + for i, p := range svc.Spec.Ports { + portNumbers[i] = p.Port + } + + assert.Contains(t, portNumbers, int32(MasterPort)) + assert.Contains(t, portNumbers, int32(MasterBindPort)) + assert.Contains(t, portNumbers, int32(9646)) +} + +func TestBuildMasterService_NoWebUIPort(t *testing.T) { + lt := newTestLocustTestForService() + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // WebUI port 8089 should NOT be exposed + for _, p := range svc.Spec.Ports { + assert.NotEqual(t, int32(WebUIPort), p.Port, "WebUI port 8089 should NOT be exposed via service") + } +} + +func TestBuildMasterService_CustomMetricsPort(t *testing.T) { + lt := newTestLocustTestForService() + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9999, + } + + svc := BuildMasterService(lt, cfg) + + // Find the metrics port + var metricsPort *corev1.ServicePort + for i := range svc.Spec.Ports { + if svc.Spec.Ports[i].Name == MetricsPortName { + metricsPort = &svc.Spec.Ports[i] + break + } + } + + require.NotNil(t, metricsPort, "Metrics port should exist") + assert.Equal(t, int32(9999), metricsPort.Port) +} + +func TestBuildMasterService_Selector(t *testing.T) { + lt := newTestLocustTestForService() + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + assert.Equal(t, "my-test-master", svc.Spec.Selector[LabelPodName]) +} + +// ============================================ +// OTel Support Tests +// ============================================ + +func TestBuildMasterService_OTelDisabled_HasMetricsPort(t *testing.T) { + lt := newTestLocustTestForService() + // No OTel config = disabled + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // Should have 3 ports: 5557, 5558, and metrics (9646) + assert.Len(t, svc.Spec.Ports, 3) + + var metricsPortFound bool + for _, p := range svc.Spec.Ports { + if p.Name == MetricsPortName { + metricsPortFound = true + assert.Equal(t, int32(9646), p.Port) + } + } + assert.True(t, metricsPortFound, "Metrics port should be present when OTel is disabled") +} + +func TestBuildMasterService_OTelEnabled_NoMetricsPort(t *testing.T) { + lt := newTestLocustTestForService() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: true, + Endpoint: "otel-collector:4317", + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // Should have 2 ports: 5557, 5558 (no metrics) + assert.Len(t, svc.Spec.Ports, 2) + + for _, p := range svc.Spec.Ports { + assert.NotEqual(t, MetricsPortName, p.Name, "Metrics port should NOT be present when OTel is enabled") + assert.NotEqual(t, int32(9646), p.Port, "Metrics port 9646 should NOT be present when OTel is enabled") + } +} + +func TestBuildMasterService_NoObservability_HasMetricsPort(t *testing.T) { + lt := newTestLocustTestForService() + lt.Spec.Observability = nil + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // Should have 3 ports when observability is nil + assert.Len(t, svc.Spec.Ports, 3) + + var metricsPortFound bool + for _, p := range svc.Spec.Ports { + if p.Name == MetricsPortName { + metricsPortFound = true + } + } + assert.True(t, metricsPortFound, "Metrics port should be present when observability is nil") +} + +func TestBuildMasterService_OTelEnabledFalse_HasMetricsPort(t *testing.T) { + lt := newTestLocustTestForService() + lt.Spec.Observability = &locustv2.ObservabilityConfig{ + OpenTelemetry: &locustv2.OpenTelemetryConfig{ + Enabled: false, + }, + } + + cfg := &config.OperatorConfig{ + MetricsExporterPort: 9646, + } + + svc := BuildMasterService(lt, cfg) + + // Should have 3 ports when OTel is explicitly disabled + assert.Len(t, svc.Spec.Ports, 3) + + var metricsPortFound bool + for _, p := range svc.Spec.Ports { + if p.Name == MetricsPortName { + metricsPortFound = true + } + } + assert.True(t, metricsPortFound, "Metrics port should be present when OTel is explicitly disabled") +} diff --git a/internal/resources/types.go b/internal/resources/types.go new file mode 100644 index 00000000..98e0925f --- /dev/null +++ b/internal/resources/types.go @@ -0,0 +1,32 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// OperationalMode represents the mode of operation for a Locust node. +type OperationalMode string + +const ( + // Master represents the master node mode. + Master OperationalMode = "master" + // Worker represents the worker node mode. + Worker OperationalMode = "worker" +) + +// String returns the string representation of the OperationalMode. +func (m OperationalMode) String() string { + return string(m) +} diff --git a/internal/resources/volumes.go b/internal/resources/volumes.go new file mode 100644 index 00000000..7bceb9c7 --- /dev/null +++ b/internal/resources/volumes.go @@ -0,0 +1,90 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + corev1 "k8s.io/api/core/v1" +) + +// Target constants for volume mount filtering. +const ( + TargetMaster = "master" + TargetWorker = "worker" + TargetBoth = "both" +) + +// BuildUserVolumes returns user-defined volumes filtered for the given mode. +// Only volumes that have at least one mount targeting this mode are included. +func BuildUserVolumes(lt *locustv2.LocustTest, mode OperationalMode) []corev1.Volume { + if len(lt.Spec.Volumes) == 0 { + return nil + } + + var volumes []corev1.Volume + for _, vol := range lt.Spec.Volumes { + if shouldIncludeVolume(vol.Name, lt.Spec.VolumeMounts, mode) { + volumes = append(volumes, vol) + } + } + return volumes +} + +// BuildUserVolumeMounts returns user-defined volume mounts filtered for the given mode. +func BuildUserVolumeMounts(lt *locustv2.LocustTest, mode OperationalMode) []corev1.VolumeMount { + if len(lt.Spec.VolumeMounts) == 0 { + return nil + } + + var mounts []corev1.VolumeMount + for _, tvm := range lt.Spec.VolumeMounts { + if shouldApplyMount(tvm, mode) { + // Convert TargetedVolumeMount to VolumeMount (strip Target field) + mounts = append(mounts, tvm.VolumeMount) + } + } + return mounts +} + +// shouldApplyMount checks if a mount applies to the given operational mode. +func shouldApplyMount(mount locustv2.TargetedVolumeMount, mode OperationalMode) bool { + target := mount.Target + if target == "" { + target = TargetBoth + } + + switch target { + case TargetBoth: + return true + case TargetMaster: + return mode == Master + case TargetWorker: + return mode == Worker + default: + return false + } +} + +// shouldIncludeVolume checks if a volume has any mounts for the given mode. +func shouldIncludeVolume(volumeName string, mounts []locustv2.TargetedVolumeMount, mode OperationalMode) bool { + for _, mount := range mounts { + if mount.Name == volumeName && shouldApplyMount(mount, mode) { + return true + } + } + return false +} diff --git a/internal/resources/volumes_test.go b/internal/resources/volumes_test.go new file mode 100644 index 00000000..fbed7c7e --- /dev/null +++ b/internal/resources/volumes_test.go @@ -0,0 +1,339 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestBuildUserVolumes_Empty(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: nil, + }, + } + + result := BuildUserVolumes(lt, Master) + assert.Nil(t, result) + + result = BuildUserVolumes(lt, Worker) + assert.Nil(t, result) +} + +func TestBuildUserVolumes_AllTargetBoth(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "vol1", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "vol2", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "vol1", MountPath: "/data1"}, Target: "both"}, + {VolumeMount: corev1.VolumeMount{Name: "vol2", MountPath: "/data2"}, Target: "both"}, + }, + }, + } + + // Both volumes should be included for master + result := BuildUserVolumes(lt, Master) + assert.Len(t, result, 2) + assert.Equal(t, "vol1", result[0].Name) + assert.Equal(t, "vol2", result[1].Name) + + // Both volumes should be included for worker + result = BuildUserVolumes(lt, Worker) + assert.Len(t, result, 2) +} + +func TestBuildUserVolumes_MasterOnly(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "results", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + }, + }, + } + + // Volume should be included for master + result := BuildUserVolumes(lt, Master) + assert.Len(t, result, 1) + assert.Equal(t, "results", result[0].Name) + + // Volume should NOT be included for worker + result = BuildUserVolumes(lt, Worker) + assert.Nil(t, result) +} + +func TestBuildUserVolumes_WorkerOnly(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "certs", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "certs", MountPath: "/certs"}, Target: "worker"}, + }, + }, + } + + // Volume should NOT be included for master + result := BuildUserVolumes(lt, Master) + assert.Nil(t, result) + + // Volume should be included for worker + result = BuildUserVolumes(lt, Worker) + assert.Len(t, result, 1) + assert.Equal(t, "certs", result[0].Name) +} + +func TestBuildUserVolumes_Mixed(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "results", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "shared", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "certs", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared", MountPath: "/shared"}, Target: "both"}, + {VolumeMount: corev1.VolumeMount{Name: "certs", MountPath: "/certs"}, Target: "worker"}, + }, + }, + } + + // Master should get results and shared + result := BuildUserVolumes(lt, Master) + assert.Len(t, result, 2) + names := []string{result[0].Name, result[1].Name} + assert.Contains(t, names, "results") + assert.Contains(t, names, "shared") + + // Worker should get shared and certs + result = BuildUserVolumes(lt, Worker) + assert.Len(t, result, 2) + names = []string{result[0].Name, result[1].Name} + assert.Contains(t, names, "shared") + assert.Contains(t, names, "certs") +} + +func TestBuildUserVolumes_VolumeWithNoMatchingMount(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + Volumes: []corev1.Volume{ + {Name: "orphan", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "used", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "used", MountPath: "/used"}, Target: "both"}, + }, + }, + } + + // Only "used" should be included, "orphan" has no mount + result := BuildUserVolumes(lt, Master) + assert.Len(t, result, 1) + assert.Equal(t, "used", result[0].Name) +} + +func TestBuildUserVolumeMounts_Empty(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + VolumeMounts: nil, + }, + } + + result := BuildUserVolumeMounts(lt, Master) + assert.Nil(t, result) + + result = BuildUserVolumeMounts(lt, Worker) + assert.Nil(t, result) +} + +func TestBuildUserVolumeMounts_MasterMode(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared", MountPath: "/shared"}, Target: "both"}, + {VolumeMount: corev1.VolumeMount{Name: "certs", MountPath: "/certs"}, Target: "worker"}, + }, + }, + } + + result := BuildUserVolumeMounts(lt, Master) + assert.Len(t, result, 2) + paths := []string{result[0].MountPath, result[1].MountPath} + assert.Contains(t, paths, "/results") + assert.Contains(t, paths, "/shared") +} + +func TestBuildUserVolumeMounts_WorkerMode(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "results", MountPath: "/results"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "shared", MountPath: "/shared"}, Target: "both"}, + {VolumeMount: corev1.VolumeMount{Name: "certs", MountPath: "/certs"}, Target: "worker"}, + }, + }, + } + + result := BuildUserVolumeMounts(lt, Worker) + assert.Len(t, result, 2) + paths := []string{result[0].MountPath, result[1].MountPath} + assert.Contains(t, paths, "/shared") + assert.Contains(t, paths, "/certs") +} + +func TestBuildUserVolumeMounts_DefaultTarget(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + VolumeMounts: []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "data", MountPath: "/data"}, Target: ""}, // Empty = both + }, + }, + } + + // Should be included for both modes + result := BuildUserVolumeMounts(lt, Master) + assert.Len(t, result, 1) + assert.Equal(t, "/data", result[0].MountPath) + + result = BuildUserVolumeMounts(lt, Worker) + assert.Len(t, result, 1) + assert.Equal(t, "/data", result[0].MountPath) +} + +func TestBuildUserVolumeMounts_ConvertsToVolumeMount(t *testing.T) { + lt := &locustv2.LocustTest{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: locustv2.LocustTestSpec{ + VolumeMounts: []locustv2.TargetedVolumeMount{ + { + VolumeMount: corev1.VolumeMount{ + Name: "data", + MountPath: "/data", + ReadOnly: true, + SubPath: "subdir", + }, + Target: "both", + }, + }, + }, + } + + result := BuildUserVolumeMounts(lt, Master) + assert.Len(t, result, 1) + assert.Equal(t, "data", result[0].Name) + assert.Equal(t, "/data", result[0].MountPath) + assert.True(t, result[0].ReadOnly) + assert.Equal(t, "subdir", result[0].SubPath) +} + +func TestShouldApplyMount_BothTarget(t *testing.T) { + mount := locustv2.TargetedVolumeMount{ + VolumeMount: corev1.VolumeMount{Name: "test", MountPath: "/test"}, + Target: "both", + } + + assert.True(t, shouldApplyMount(mount, Master)) + assert.True(t, shouldApplyMount(mount, Worker)) +} + +func TestShouldApplyMount_MasterTarget(t *testing.T) { + mount := locustv2.TargetedVolumeMount{ + VolumeMount: corev1.VolumeMount{Name: "test", MountPath: "/test"}, + Target: "master", + } + + assert.True(t, shouldApplyMount(mount, Master)) + assert.False(t, shouldApplyMount(mount, Worker)) +} + +func TestShouldApplyMount_WorkerTarget(t *testing.T) { + mount := locustv2.TargetedVolumeMount{ + VolumeMount: corev1.VolumeMount{Name: "test", MountPath: "/test"}, + Target: "worker", + } + + assert.False(t, shouldApplyMount(mount, Master)) + assert.True(t, shouldApplyMount(mount, Worker)) +} + +func TestShouldApplyMount_EmptyTarget(t *testing.T) { + mount := locustv2.TargetedVolumeMount{ + VolumeMount: corev1.VolumeMount{Name: "test", MountPath: "/test"}, + Target: "", // Empty defaults to "both" + } + + assert.True(t, shouldApplyMount(mount, Master)) + assert.True(t, shouldApplyMount(mount, Worker)) +} + +func TestShouldApplyMount_InvalidTarget(t *testing.T) { + mount := locustv2.TargetedVolumeMount{ + VolumeMount: corev1.VolumeMount{Name: "test", MountPath: "/test"}, + Target: "invalid", + } + + assert.False(t, shouldApplyMount(mount, Master)) + assert.False(t, shouldApplyMount(mount, Worker)) +} + +func TestShouldIncludeVolume_HasMatchingMount(t *testing.T) { + mounts := []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "vol1", MountPath: "/vol1"}, Target: "master"}, + {VolumeMount: corev1.VolumeMount{Name: "vol2", MountPath: "/vol2"}, Target: "both"}, + } + + // vol1 matches master + assert.True(t, shouldIncludeVolume("vol1", mounts, Master)) + assert.False(t, shouldIncludeVolume("vol1", mounts, Worker)) + + // vol2 matches both + assert.True(t, shouldIncludeVolume("vol2", mounts, Master)) + assert.True(t, shouldIncludeVolume("vol2", mounts, Worker)) +} + +func TestShouldIncludeVolume_NoMatchingMount(t *testing.T) { + mounts := []locustv2.TargetedVolumeMount{ + {VolumeMount: corev1.VolumeMount{Name: "vol1", MountPath: "/vol1"}, Target: "master"}, + } + + // vol2 has no mount + assert.False(t, shouldIncludeVolume("vol2", mounts, Master)) + assert.False(t, shouldIncludeVolume("vol2", mounts, Worker)) +} diff --git a/internal/testdata/fixtures.go b/internal/testdata/fixtures.go new file mode 100644 index 00000000..ea383621 --- /dev/null +++ b/internal/testdata/fixtures.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testdata provides test fixtures and helpers for unit tests. +package testdata + +import ( + "encoding/json" + "os" + "path/filepath" + "runtime" + + locustv1 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v1" + locustv2 "github.com/AbdelrhmanHamouda/locust-k8s-operator/api/v2" +) + +// LoadLocustTest loads a v1 LocustTest from a JSON fixture file. +func LoadLocustTest(filename string) (*locustv1.LocustTest, error) { + _, currentFile, _, _ := runtime.Caller(0) + testdataDir := filepath.Dir(currentFile) + + data, err := os.ReadFile(filepath.Join(testdataDir, filename)) + if err != nil { + return nil, err + } + + var lt locustv1.LocustTest + if err := json.Unmarshal(data, <); err != nil { + return nil, err + } + + return <, nil +} + +// MustLoadLocustTest loads a v1 LocustTest from a JSON fixture file and panics on error. +// Useful in tests where fixture loading should never fail. +func MustLoadLocustTest(filename string) *locustv1.LocustTest { + lt, err := LoadLocustTest(filename) + if err != nil { + panic(err) + } + return lt +} + +// LoadV2Fixture loads a v2 LocustTest from a JSON fixture file. +func LoadV2Fixture(filename string) (*locustv2.LocustTest, error) { + _, currentFile, _, _ := runtime.Caller(0) + testdataDir := filepath.Dir(currentFile) + + data, err := os.ReadFile(filepath.Join(testdataDir, filename)) + if err != nil { + return nil, err + } + + var lt locustv2.LocustTest + if err := json.Unmarshal(data, <); err != nil { + return nil, err + } + + return <, nil +} + +// MustLoadV2Fixture loads a v2 LocustTest from a JSON fixture file and panics on error. +// Useful in tests where fixture loading should never fail. +func MustLoadV2Fixture(filename string) *locustv2.LocustTest { + lt, err := LoadV2Fixture(filename) + if err != nil { + panic(err) + } + return lt +} diff --git a/internal/testdata/fixtures_test.go b/internal/testdata/fixtures_test.go new file mode 100644 index 00000000..965dbff4 --- /dev/null +++ b/internal/testdata/fixtures_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testdata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadLocustTest_Minimal(t *testing.T) { + lt, err := LoadLocustTest("locusttest_minimal.json") + require.NoError(t, err) + require.NotNil(t, lt) + + assert.Equal(t, "minimal-test", lt.Name) + assert.Equal(t, "default", lt.Namespace) + assert.Equal(t, int32(1), lt.Spec.WorkerReplicas) + assert.Equal(t, "locustio/locust:latest", lt.Spec.Image) +} + +func TestLoadLocustTest_Full(t *testing.T) { + lt, err := LoadLocustTest("locusttest_full.json") + require.NoError(t, err) + require.NotNil(t, lt) + + assert.Equal(t, "full-featured-test", lt.Name) + assert.Equal(t, "load-testing", lt.Namespace) + assert.Equal(t, int32(10), lt.Spec.WorkerReplicas) + assert.Equal(t, "locustio/locust:2.20.0", lt.Spec.Image) + assert.Equal(t, "IfNotPresent", lt.Spec.ImagePullPolicy) + assert.Len(t, lt.Spec.ImagePullSecrets, 1) + assert.Equal(t, "locust-scripts", lt.Spec.ConfigMap) + assert.Equal(t, "locust-lib", lt.Spec.LibConfigMap) + require.NotNil(t, lt.Spec.Labels) + assert.Equal(t, "platform", lt.Spec.Labels.Master["team"]) +} + +func TestLoadLocustTest_WithAffinity(t *testing.T) { + lt, err := LoadLocustTest("locusttest_with_affinity.json") + require.NoError(t, err) + require.NotNil(t, lt) + + assert.Equal(t, "affinity-test", lt.Name) + require.NotNil(t, lt.Spec.Affinity) + require.NotNil(t, lt.Spec.Affinity.NodeAffinity) + assert.Equal(t, "performance", lt.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution["node-type"]) +} + +func TestLoadLocustTest_WithTolerations(t *testing.T) { + lt, err := LoadLocustTest("locusttest_with_tolerations.json") + require.NoError(t, err) + require.NotNil(t, lt) + + assert.Equal(t, "tolerations-test", lt.Name) + require.Len(t, lt.Spec.Tolerations, 2) + assert.Equal(t, "dedicated", lt.Spec.Tolerations[0].Key) + assert.Equal(t, "Equal", lt.Spec.Tolerations[0].Operator) +} + +func TestLoadLocustTest_NotFound(t *testing.T) { + lt, err := LoadLocustTest("nonexistent.json") + assert.Error(t, err) + assert.Nil(t, lt) +} + +func TestMustLoadLocustTest_Success(t *testing.T) { + lt := MustLoadLocustTest("locusttest_minimal.json") + assert.NotNil(t, lt) + assert.Equal(t, "minimal-test", lt.Name) +} + +func TestMustLoadLocustTest_Panics(t *testing.T) { + assert.Panics(t, func() { + MustLoadLocustTest("nonexistent.json") + }) +} diff --git a/internal/testdata/locusttest_full.json b/internal/testdata/locusttest_full.json new file mode 100644 index 00000000..594735a2 --- /dev/null +++ b/internal/testdata/locusttest_full.json @@ -0,0 +1,35 @@ +{ + "apiVersion": "locust.io/v1", + "kind": "LocustTest", + "metadata": { + "name": "full-featured-test", + "namespace": "load-testing" + }, + "spec": { + "masterCommandSeed": "locust -f /lotest/src/test.py --host https://api.example.com", + "workerCommandSeed": "locust -f /lotest/src/test.py", + "workerReplicas": 10, + "image": "locustio/locust:2.20.0", + "imagePullPolicy": "IfNotPresent", + "imagePullSecrets": ["registry-secret"], + "configMap": "locust-scripts", + "libConfigMap": "locust-lib", + "labels": { + "master": { + "team": "platform", + "environment": "staging" + }, + "worker": { + "team": "platform" + } + }, + "annotations": { + "master": { + "description": "Load test master" + }, + "worker": { + "description": "Load test worker" + } + } + } +} diff --git a/internal/testdata/locusttest_minimal.json b/internal/testdata/locusttest_minimal.json new file mode 100644 index 00000000..bc1a84da --- /dev/null +++ b/internal/testdata/locusttest_minimal.json @@ -0,0 +1,14 @@ +{ + "apiVersion": "locust.io/v1", + "kind": "LocustTest", + "metadata": { + "name": "minimal-test", + "namespace": "default" + }, + "spec": { + "masterCommandSeed": "locust -f /lotest/src/test.py", + "workerCommandSeed": "locust -f /lotest/src/test.py", + "workerReplicas": 1, + "image": "locustio/locust:latest" + } +} diff --git a/internal/testdata/locusttest_v2_full.json b/internal/testdata/locusttest_v2_full.json new file mode 100644 index 00000000..471a559f --- /dev/null +++ b/internal/testdata/locusttest_v2_full.json @@ -0,0 +1,69 @@ +{ + "apiVersion": "locust.io/v2", + "kind": "LocustTest", + "metadata": { + "name": "full-featured-test-v2", + "namespace": "load-testing" + }, + "spec": { + "image": "locustio/locust:2.32.0", + "imagePullPolicy": "IfNotPresent", + "imagePullSecrets": [ + { + "name": "registry-secret" + } + ], + "master": { + "command": "locust -f /lotest/src/test.py --host https://api.example.com", + "labels": { + "team": "platform", + "environment": "staging" + }, + "annotations": { + "description": "Load test master" + } + }, + "worker": { + "command": "locust -f /lotest/src/test.py", + "replicas": 10, + "labels": { + "team": "platform" + }, + "annotations": { + "description": "Load test worker" + } + }, + "testFiles": { + "configMapRef": "locust-scripts", + "libConfigMapRef": "locust-lib" + }, + "env": { + "secretMounts": [ + { + "name": "api-credentials", + "mountPath": "/etc/locust/secrets" + } + ] + }, + "observability": { + "openTelemetry": { + "enabled": true, + "endpoint": "otel-collector.monitoring.svc.cluster.local:4317", + "protocol": "grpc" + } + }, + "scheduling": { + "nodeSelector": { + "workload-type": "performance-testing" + }, + "tolerations": [ + { + "key": "performance-testing", + "operator": "Equal", + "value": "true", + "effect": "NoSchedule" + } + ] + } + } +} diff --git a/internal/testdata/locusttest_with_affinity.json b/internal/testdata/locusttest_with_affinity.json new file mode 100644 index 00000000..389e588d --- /dev/null +++ b/internal/testdata/locusttest_with_affinity.json @@ -0,0 +1,23 @@ +{ + "apiVersion": "locust.io/v1", + "kind": "LocustTest", + "metadata": { + "name": "affinity-test", + "namespace": "default" + }, + "spec": { + "masterCommandSeed": "locust -f /lotest/src/test.py", + "workerCommandSeed": "locust -f /lotest/src/test.py", + "workerReplicas": 5, + "image": "locustio/locust:latest", + "configMap": "locust-scripts", + "affinity": { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "node-type": "performance", + "zone": "us-west-2a" + } + } + } + } +} diff --git a/internal/testdata/locusttest_with_tolerations.json b/internal/testdata/locusttest_with_tolerations.json new file mode 100644 index 00000000..718a2a50 --- /dev/null +++ b/internal/testdata/locusttest_with_tolerations.json @@ -0,0 +1,28 @@ +{ + "apiVersion": "locust.io/v1", + "kind": "LocustTest", + "metadata": { + "name": "tolerations-test", + "namespace": "default" + }, + "spec": { + "masterCommandSeed": "locust -f /lotest/src/test.py", + "workerCommandSeed": "locust -f /lotest/src/test.py", + "workerReplicas": 3, + "image": "locustio/locust:latest", + "configMap": "locust-scripts", + "tolerations": [ + { + "key": "dedicated", + "operator": "Equal", + "value": "performance", + "effect": "NoSchedule" + }, + { + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "effect": "NoExecute" + } + ] + } +} diff --git a/kube/crd/locust-test-crd.yaml b/kube/crd/locust-test-crd.yaml deleted file mode 100644 index 2eaaf420..00000000 --- a/kube/crd/locust-test-crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - # Name must match the spec fields below, and be in the form: . - name: locusttests.locust.io -spec: - # group name to use for REST API: /apis// - group: locust.io - # list of versions supported by this CustomResourceDefinition - versions: - - name: v1 - # Each version can be enabled/disabled by Served flag. - served: true - # One and only one version must be marked as the storage version. - storage: true - schema: - openAPIV3Schema: - type: object - properties: - # Fields to validate are the following: - metadata: # 'metadata' should be an object - type: object - properties: # With the following field 'name' - name: - type: string # Of type 'string' - pattern: '^[a-z]+[-a-z0-9]*\.[-a-z0-9]+$' # allows only 'word.word' names. Usage example (.) - spec: # Root field 'spec' - type: object # It is an object - properties: # And has the following fields - labels: # Child field 'labels' - description: Labels attached to deployed pods - type: object - properties: - master: - description: Labels attached to the master pod - type: object - additionalProperties: - type: string - worker: - description: Labels attached to worker pods - type: object - additionalProperties: - type: string - annotations: # Child field 'annotations' - description: Annotations attached to deployed pods - type: object - properties: - master: - description: Annotations attached to the master pod - type: object - additionalProperties: - type: string - worker: - description: Annotations attached to worker pods - type: object - additionalProperties: - type: string - affinity: # Child field 'affinity' - description: Affinity information to be attached to pods - type: object - properties: - nodeAffinity: - description: Kubernetes node affinity to be attached to pods - type: object - properties: - requiredDuringSchedulingIgnoredDuringExecution: - description: Kubernetes node affinity to be attached to pods - type: object - additionalProperties: - type: string - tolerations: # Child field 'tolerations' - description: Taint toleration information to be attached to pods - type: array - items: - type: object - properties: - key: - description: Toleration key - type: string - operator: - description: Toleration evaluation operator - type: string - enum: - - "Exists" - - "Equal" - value: - description: Toleration value - type: string - effect: - description: Toleration effect - type: string - enum: - - "NoSchedule" - - "PreferNoSchedule" - - "NoExecute" - required: [ "key", "operator", "effect" ] - masterCommandSeed: # Child field 'masterCommandSeed' - description: Master pod command seed - type: string - workerCommandSeed: # Child field 'workerCommandSeed' - description: Worker pod command seed - type: string - workerReplicas: # Child field 'workerReplicas' - description: Number of worker pods to spawn - type: integer - minimum: 1 - maximum: 500 # Change limit based on infra capabilities - default: 1 - image: # Child field 'image' - description: Locust image - type: string - imagePullPolicy: - description: Image pull policy - type: string - enum: - - "Always" - - "IfNotPresent" - - "Never" - imagePullSecrets: - description: Secrets for pulling images from private registries - type: array - items: - type: string - configMap: # Child field 'configMap' - description: Configuration map name containing the test - type: string - libConfigMap: # Child field 'libConfigMap' - description: Configuration map name containing lib directory files - type: string - required: ["masterCommandSeed", "workerCommandSeed", "workerReplicas", "image"] - # Enables to add new column when listing instances `kubectl get lotest` - additionalPrinterColumns: - - name: master_cmd - type: string - description: Master pod command seed - jsonPath: .spec.masterCommandSeed - - name: worker_replica_count - type: integer - description: Number of requested worker pods - jsonPath: .spec.workerReplicas - - name: Image - type: string - description: Locust image - jsonPath: .spec.image - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - # Either Namespaced or Cluster - scope: Namespaced - names: - # Plural name to be used in the URL: /apis/// - plural: locusttests - # Singular name to be used as an alias on the CLI and for display - singular: locusttest - # Kind is normally the CamelCased singular type. Your resource manifests use this. - kind: LocustTest - # ShortNames allow shorter string to match your resource on the CLI - shortNames: - - lotest diff --git a/kube/sample-cr/locust-test-cr.yaml b/kube/sample-cr/locust-test-cr.yaml deleted file mode 100644 index 79f7e643..00000000 --- a/kube/sample-cr/locust-test-cr.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: locust.io/v1 -kind: LocustTest -metadata: - name: demo.test -spec: - image: locustio/locust:latest - # [Optional-Section] Image pull policy and secrets - imagePullPolicy: Always - imagePullSecrets: - - "my-private-registry-secret" - - # [Optional-Section] Labels - labels: - master: - locust.io/role: "master" - myapp.com/testId: "abc-123" - myapp.com/tenantId: "xyz-789" - worker: - locust.io/role: "worker" - - # [Optional-Section] Annotations - annotations: - master: - myapp.com/threads: "1000" - myapp.com/version: "2.1.0" - worker: - myapp.com/version: "2.1.0" - - - # [Optional-Section] Affinity - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - organisation.com/nodeAffinityLabel1: locust-cloud-tests - organisation.com/nodeAffinityLabel2: performance-nodes - organisation.com/nodeAffinityLabel3: high-memory - - # [Optional-Section] Taint tolerations - tolerations: - - key: taint-A - operator: Equal - value: ssd - effect: NoSchedule - - key: taint-B - operator: Exists - effect: NoExecute - - masterCommandSeed: - --locustfile /lotest/src/locust_project_demo.py - --host https://docs.locust.io - --users 100 - --spawn-rate 10 - --run-time 3m - workerCommandSeed: --locustfile /lotest/src/locust_project_demo.py - workerReplicas: 3 - configMap: test-map diff --git a/lombok.config b/lombok.config deleted file mode 100644 index 7a21e880..00000000 --- a/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.addLombokGeneratedAnnotation = true diff --git a/micronaut-cli.yml b/micronaut-cli.yml deleted file mode 100644 index ba7e250d..00000000 --- a/micronaut-cli.yml +++ /dev/null @@ -1,6 +0,0 @@ -applicationType: default -defaultPackage: com.locust -testFramework: junit -sourceLanguage: java -buildTool: gradle -features: [ annotation-api, app-name, github-workflow-ci, github-workflow-docker-registry, gradle, http-client, jackson-databind, java, java-application, junit, logback, lombok, management, micrometer, micrometer-prometheus, micronaut-build, mockito, netty-server, readme, shade, yaml ] diff --git a/mkdocs.yml b/mkdocs.yml index 373e5347..2e698f87 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -3,23 +3,35 @@ # Project information site_name: Locust Kubernetes Operator site_author: Abdelrhman Hamouda -site_description: Locust kubernetes operator documentation. +site_description: Production-ready Kubernetes operator for Locust distributed load testing. Automate performance testing with cloud-native CI/CD integration, OpenTelemetry observability, and horizontal scaling. # Repository repo_name: locust-k8s-operator repo_url: https://github.com/AbdelrhmanHamouda/locust-k8s-operator +# Site URL (required for instant navigation) +site_url: https://abdelrhmanhamouda.github.io/locust-k8s-operator/ + # Copyright copyright: > - Copyright © 2025 Abdelrhman Hamouda – + Copyright © 2026 Abdelrhman Hamouda –
Change cookie settings # Plugins +# Note: mkdocs-minify-plugin is a pip dependency (see CI workflows) plugins: - search - git-revision-date-localized - privacy - tags + - minify: + minify_html: true + minify_js: true + minify_css: true + htmlmin_opts: + remove_comments: true + remove_empty_space: true + cache_safe: true # Configuration theme: @@ -43,6 +55,11 @@ theme: - search.suggest # Navigation + - navigation.tabs + - navigation.instant # SPA-like behavior (no full page reloads) + - navigation.instant.prefetch # Predictive loading on hover + - navigation.instant.progress # Progress bar for slow connections + - navigation.path - navigation.tracking - navigation.top - navigation.footer @@ -89,13 +106,10 @@ extra_css: extra_javascript: - assets/javascripts/scroll-effects.js - assets/javascripts/custom-announcements.js + - assets/javascripts/schema-org.js # Customization extra: - # Version dropdown configuration - version: - provider: mike - default: latest # Tag configuration (optional - for tag icons) tags: @@ -163,13 +177,20 @@ markdown_extensions: - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg - - pymdownx.highlight + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true - pymdownx.critic - pymdownx.inlinehilite - pymdownx.keys - pymdownx.mark - pymdownx.smartsymbols - - pymdownx.superfences + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format - pymdownx.tabbed: alternate_style: true - pymdownx.tasklist: @@ -178,17 +199,47 @@ markdown_extensions: # Page tree nav: - - Introduction: index.md - - How does it work: how_does_it_work.md - - Features: features.md - - Getting Started: getting_started.md - - Deploy Operator with HELM: helm_deploy.md - - Advanced topics: advanced_topics.md - - Metrics & Dashboards: metrics_and_dashboards.md - - Roadmap: roadmap.md - - Contributing & Development: + - Home: index.md + - Getting Started: + - Quick Start: getting_started/index.md + - Install the Operator: helm_deploy.md + - Tutorials: + - Overview: tutorials/index.md + - "Your First Load Test (10 min)": tutorials/first-load-test.md + - "CI/CD Integration (15 min)": tutorials/ci-cd-integration.md + - "Production Deployment (20 min)": tutorials/production-deployment.md + - How-To Guides: + - Overview: how-to-guides/index.md + - Configuration: + - Configure resource limits: how-to-guides/configuration/configure-resources.md + - Use a private registry: how-to-guides/configuration/use-private-registry.md + - Mount volumes: how-to-guides/configuration/mount-volumes.md + - Configure Kafka integration: how-to-guides/configuration/configure-kafka.md + - Configure automatic cleanup: how-to-guides/configuration/configure-ttl.md + - Observability: + - Configure OpenTelemetry: how-to-guides/observability/configure-opentelemetry.md + - Monitor test status: how-to-guides/observability/monitor-test-status.md + - Scaling: + - Scale worker replicas: how-to-guides/scaling/scale-workers.md + - Use node affinity: how-to-guides/scaling/use-node-affinity.md + - Configure tolerations: how-to-guides/scaling/configure-tolerations.md + - Use node selector: how-to-guides/scaling/use-node-selector.md + - Security: + - Inject secrets: how-to-guides/security/inject-secrets.md + - Configure pod security: how-to-guides/security/configure-pod-security.md + - Reference: + - API Reference: api_reference.md + - Metrics & Dashboards: metrics_and_dashboards.md + - FAQ: faq.md + - Explanation: + - How It Works: how_does_it_work.md + - Features Overview: features.md + - Compare Alternatives: comparison.md + - Security Model: security.md + - Migration from v1: migration.md + - Contributing: - Overview: contribute.md - Integration Testing: integration-testing.md - Local Development: local-development.md - Pull Request Process: pull-request-process.md - - License: license.md \ No newline at end of file + - License: license.md diff --git a/scripts/run-integration-test.sh b/scripts/run-integration-test.sh deleted file mode 100755 index ec1b3846..00000000 --- a/scripts/run-integration-test.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/bash - -# Locust K8s Operator Integration Test Runner -# This script runs the complete integration test suite - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -LOG_FILE="/tmp/locust-integration-test-$(date +%Y%m%d-%H%M%S).log" - -echo -e "${GREEN}πŸš€ Starting Locust K8s Operator Integration Test${NC}" -echo -e "${YELLOW}πŸ“ Project Root: $PROJECT_ROOT${NC}" -echo -e "${YELLOW}πŸ“ Log File: $LOG_FILE${NC}" -echo "" - -# Function to print colored output -print_step() { - echo -e "${GREEN}βœ… $1${NC}" -} - -print_warning() { - echo -e "${YELLOW}⚠️ $1${NC}" -} - -print_error() { - echo -e "${RED}❌ $1${NC}" -} - -# Function to check prerequisites -check_prerequisites() { - print_step "Checking prerequisites..." - - # Check Docker - if ! command -v docker &> /dev/null; then - print_error "Docker is not installed or not in PATH" - exit 1 - fi - - # Check Docker daemon - if ! docker info &> /dev/null; then - print_error "Docker daemon is not running" - print_warning "Please start Docker and try again" - print_warning "On macOS: Start Docker Desktop" - print_warning "On Linux: sudo systemctl start docker" - exit 1 - fi - - print_step "Docker daemon is running" - - # Check Helm - if ! command -v helm &> /dev/null; then - print_error "Helm is not installed or not in PATH" - exit 1 - fi - - # Check Java - if ! command -v java &> /dev/null; then - print_error "Java is not installed or not in PATH" - exit 1 - fi - - # Check Gradle wrapper - if [ ! -f "$PROJECT_ROOT/gradlew" ]; then - print_error "Gradle wrapper not found in project root" - exit 1 - fi - - print_step "All prerequisites check passed!" -} - -# Function to clean up previous runs -cleanup_previous_runs() { - print_step "Cleaning up previous runs..." - - # Remove any existing integration test containers - if docker ps -a --filter "name=locust" --format "{{.ID}}" | grep -q .; then - print_warning "Removing existing locust containers..." - docker ps -a --filter "name=locust" --format "{{.ID}}" | xargs docker rm -f || true - fi - - # Clean up testcontainers - if docker ps -a --filter "label=org.testcontainers=true" --format "{{.ID}}" | grep -q .; then - print_warning "Removing testcontainers..." - docker ps -a --filter "label=org.testcontainers=true" --format "{{.ID}}" | xargs docker rm -f || true - fi - - # Clean up any leftover test images - more thorough pattern matching - if docker images "locust-k8s-operator*" --format "{{.ID}}" | grep -q .; then - print_warning "Removing leftover operator test images..." - docker images "locust-k8s-operator*" --format "{{.ID}}" | xargs docker rmi -f || true - fi - - # Clean Gradle build cache completely and force daemon restart - cd "$PROJECT_ROOT" - print_warning "Cleaning Gradle cache and stopping daemons..." - ./gradlew --stop || true # Stop gradle daemon to release any locks - ./gradlew clean cleanBuildCache || true # More thorough cleaning - - # Remove any cached test results - rm -rf "$PROJECT_ROOT/build/test-results" || true - rm -rf "$PROJECT_ROOT/build/reports/integration-tests" || true - - print_step "Cleanup completed!" -} - -# Function to run the integration test -run_integration_test() { - print_step "Running integration test suite..." - - cd "$PROJECT_ROOT" - - # Run integration test with proper error handling - # Use PIPESTATUS to capture the actual gradle exit code, not tee's exit code - set -o pipefail - ./gradlew integrationTest -PrunIntegrationTests 2>&1 | tee "$LOG_FILE" - local gradle_exit_code=$? - set +o pipefail - - if [ $gradle_exit_code -eq 0 ]; then - print_step "Integration test suite completed successfully!" - return 0 - else - print_error "Integration test suite failed with exit code: $gradle_exit_code" - print_warning "Check the log file for details: $LOG_FILE" - - # Show last few lines of the log for immediate feedback - echo "" - print_warning "Last 20 lines of the test output:" - tail -n 20 "$LOG_FILE" || true - echo "" - - return 1 - fi -} - -# Function to show test results -show_test_results() { - print_step "Integration test results:" - echo "" - - # Show test report location - if [ -d "$PROJECT_ROOT/build/reports/integration-tests" ]; then - echo -e "${GREEN}πŸ“Š HTML Report: $PROJECT_ROOT/build/reports/integration-tests/index.html${NC}" - fi - - # Show test results location - if [ -d "$PROJECT_ROOT/build/test-results/integration-test" ]; then - echo -e "${GREEN}πŸ“‹ XML Results: $PROJECT_ROOT/build/test-results/integration-test/${NC}" - fi - - # Show log file - echo -e "${GREEN}πŸ“ Detailed Logs: $LOG_FILE${NC}" - echo "" -} - -# Function to handle cleanup on exit -cleanup_on_exit() { - local exit_code=$? - if [ "$exit_code" -ne 0 ]; then - print_error "Integration test failed with exit code $exit_code" - print_warning "Performing emergency cleanup..." - - # Kill any hanging processes - pkill -f "locust-integration-test" || true - - # Clean up Docker resources - docker ps -a --filter "name=locust-integration-test" --format "{{.ID}}" | xargs -r docker rm -f || true - fi -} - -# Set up cleanup trap -trap cleanup_on_exit EXIT - -# Main execution -main() { - echo -e "${GREEN}πŸ” Locust K8s Operator Integration Test Runner${NC}" - echo "=================================================" - echo "" - - check_prerequisites - cleanup_previous_runs - - print_step "Starting integration test..." - echo "" - - if run_integration_test; then - echo "" - print_step "πŸŽ‰ Integration test completed successfully!" - show_test_results - exit_code=0 - else - echo "" - print_error "πŸ’₯ Integration test failed!" - show_test_results - exit_code=1 - fi - - echo "" - echo "=================================================" - print_step "Integration test runner finished" - - exit $exit_code -} - -# Run main function -main "$@" diff --git a/settings.gradle b/settings.gradle deleted file mode 100644 index 633fbc78..00000000 --- a/settings.gradle +++ /dev/null @@ -1,5 +0,0 @@ -plugins { - id("org.gradle.toolchains.foojay-resolver-convention") version "0.8.0" -} - -rootProject.name = "locust-k8s-operator" diff --git a/src/integrationTest/java/com/locust/operator/LocustOperatorIntegrationTest.java b/src/integrationTest/java/com/locust/operator/LocustOperatorIntegrationTest.java deleted file mode 100644 index 2586df8a..00000000 --- a/src/integrationTest/java/com/locust/operator/LocustOperatorIntegrationTest.java +++ /dev/null @@ -1,771 +0,0 @@ -package com.locust.operator; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapBuilder; -import io.fabric8.kubernetes.api.model.ContainerStateTerminated; -import io.fabric8.kubernetes.api.model.ContainerStateWaiting; -import io.fabric8.kubernetes.api.model.Namespace; -import io.fabric8.kubernetes.api.model.NamespaceBuilder; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.apps.Deployment; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import org.apache.commons.io.FileUtils; -import org.awaitility.Awaitility; -import org.junit.jupiter.api.AfterAll; - -import java.io.IOException; -import java.lang.IllegalStateException; - -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.Container; -import org.testcontainers.k3s.K3sContainer; -import org.testcontainers.utility.DockerImageName; -import org.testcontainers.utility.MountableFile; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) -class LocustOperatorIntegrationTest { - - private static final Logger logger = LoggerFactory.getLogger(LocustOperatorIntegrationTest.class); - private static final String OPERATOR_IMAGE = "locust-k8s-operator:integration-test"; - private static final String OPERATOR_NAMESPACE = "locust-operator-system"; - private static final String TEST_NAMESPACE = "locust-tests"; - - private static K3sContainer k3s; - private static KubernetesClient kubernetesClient; - private static Path tempDir; - private static String helmChartPath; - - @BeforeAll - static void setupIntegrationTest() throws Exception { - logger.info("Setting up integration test environment..."); - - // Create a temporary directory for test artifacts - tempDir = Files.createTempDirectory("locust-integration-test"); - logger.info("Created temp directory: {}", tempDir); - - // Start K3s cluster - setupK3sCluster(); - - // Build operator image - buildOperatorImage(); - - // Package Helm chart - packageHelmChart(); - - // Load operator image into K3s - loadImageIntoK3s(); - - // Initialize Kubernetes client - initializeKubernetesClient(); - - logger.info("Integration test environment setup complete"); - } - - @AfterAll - static void teardownIntegrationTest() throws Exception { - logger.info("Tearing down integration test environment..."); - - if (kubernetesClient != null) { - kubernetesClient.close(); - } - - if (k3s != null) { - k3s.stop(); - } - - if (tempDir != null) { - FileUtils.deleteDirectory(tempDir.toFile()); - } - - logger.info("Integration test environment teardown complete"); - } - - @Test - @Order(1) - void testOperatorDeployment() throws Exception { - logger.info("Testing operator deployment..."); - - // Create operator namespace - createNamespace(OPERATOR_NAMESPACE); - - // Install operator using Helm - installOperatorWithHelm(); - - // Wait for operator deployment to be ready - waitForOperatorReady(); - - // Verify operator is running - verifyOperatorRunning(); - - logger.info("Operator deployment test completed successfully"); - } - - @Test - @Order(2) - void testLocustTestDeployment() { - logger.info("Testing Locust test deployment..."); - - // Create test namespace - createNamespace(TEST_NAMESPACE); - - // Create test ConfigMap with simple locust test - createTestConfigMap(); - - // Deploy LocustTest custom resource - deployLocustTestCR(); - - // Wait for resources to be created - waitForLocustTestResources(); - - // Verify master and worker jobs - verifyLocustJobs(); - - // Verify pods are running - verifyLocustPodsRunning(); - - logger.info("Locust test deployment completed successfully"); - } - - @Test - @Order(3) - void testLocustTestExecution() { - logger.info("Testing Locust test execution..."); - - // Port forward to Locust master - String masterPodName = getMasterPodName(); - - // Verify Locust UI is accessible (simplified check) - verifyLocustMasterReady(masterPodName); - - // Check master logs for successful initialization - verifyMasterLogs(masterPodName); - - // Verify workers connected to master - verifyWorkersConnected(); - - logger.info("Locust test execution verification completed successfully"); - } - - @Test - @Order(4) - void testCleanup() throws Exception { - logger.info("Testing cleanup..."); - - // Delete LocustTest CR - deleteLocustTestCR(); - - // Verify resources are cleaned up - verifyResourcesCleanedUp(); - - // Uninstall operator - uninstallOperator(); - - logger.info("Cleanup test completed successfully"); - } - - // Setup helper methods - private static void setupK3sCluster() { - logger.info("Starting K3s cluster..."); - k3s = new K3sContainer(DockerImageName.parse("rancher/k3s:v1.27.4-k3s1")) - .withReuse(false); - k3s.start(); - logger.info("K3s cluster started successfully"); - } - - private static void buildOperatorImage() throws Exception { - logger.info("Building operator Docker image..."); - - ProcessBuilder pb = new ProcessBuilder("./gradlew", "jibDockerBuild", - "--image=" + OPERATOR_IMAGE); - pb.directory(new File(System.getProperty("user.dir"))); - pb.inheritIO(); - - Process process = pb.start(); - int exitCode = process.waitFor(); - - if (exitCode != 0) { - throw new IOException("Failed to build operator image, exit code: " + exitCode); - } - - logger.info("Operator image built successfully: {}", OPERATOR_IMAGE); - } - - private static void packageHelmChart() throws Exception { - logger.info("Packaging Helm chart..."); - - ProcessBuilder pb = new ProcessBuilder("helm", "package", - "charts/locust-k8s-operator", - "--destination", tempDir.toString()); - pb.inheritIO(); - - Process process = pb.start(); - int exitCode = process.waitFor(); - - if (exitCode != 0) { - throw new IOException("Failed to package Helm chart, exit code: " + exitCode); - } - - // Find the packaged chart - File[] chartFiles = tempDir.toFile().listFiles((dir, name) -> name.endsWith(".tgz")); - if (chartFiles == null || chartFiles.length == 0) { - throw new IOException("No Helm chart package found"); - } - - helmChartPath = chartFiles[0].getAbsolutePath(); - logger.info("Helm chart packaged successfully: {}", helmChartPath); - } - - private static void loadImageIntoK3s() throws Exception { - logger.info("Loading operator image into K3s..."); - - // Export the image to a tar file - Path imageTar = tempDir.resolve("operator-image.tar"); - ProcessBuilder exportPb = new ProcessBuilder("docker", "save", - "-o", imageTar.toString(), OPERATOR_IMAGE); - exportPb.inheritIO(); - Process exportProcess = exportPb.start(); - int exportExitCode = exportProcess.waitFor(); - - if (exportExitCode != 0) { - throw new IOException("Failed to export operator image"); - } - - // Load the image into K3s - k3s.copyFileToContainer(MountableFile.forHostPath(imageTar), "/tmp/operator-image.tar"); - Container.ExecResult result = k3s.execInContainer("ctr", "images", "import", "/tmp/operator-image.tar"); - - if (result.getExitCode() != 0) { - throw new IOException("Failed to load image into K3s: " + result.getStderr()); - } - - logger.info("Operator image loaded into K3s successfully"); - } - - private static void initializeKubernetesClient() { - logger.info("Initializing Kubernetes client..."); - String kubeconfig = k3s.getKubeConfigYaml(); - - // Write kubeconfig to temporary file - try { - Path kubeconfigFile = tempDir.resolve("kubeconfig"); - Files.write(kubeconfigFile, kubeconfig.getBytes(StandardCharsets.UTF_8)); - System.setProperty("kubeconfig", kubeconfigFile.toString()); - - kubernetesClient = new KubernetesClientBuilder() - .withConfig(io.fabric8.kubernetes.client.Config.fromKubeconfig(kubeconfig)) - .build(); - - logger.info("Kubernetes client initialized successfully"); - } catch (Exception e) { - throw new IllegalStateException("Failed to initialize Kubernetes client", e); - } - } - - // Test helper methods - private void createNamespace(String namespace) { - logger.info("Creating namespace: {}", namespace); - - Namespace ns = new NamespaceBuilder() - .withNewMetadata() - .withName(namespace) - .endMetadata() - .build(); - kubernetesClient.namespaces().resource(ns).create(); - - logger.info("Namespace created: {}", namespace); - } - - private void installOperatorWithHelm() throws Exception { - logger.info("Installing operator with Helm..."); - - // Get the kubeconfig file path that was set up during Kubernetes client initialization - Path kubeconfigFile = tempDir.resolve("kubeconfig"); - - ProcessBuilder pb = new ProcessBuilder("helm", "install", "locust-operator", - helmChartPath, - "--kubeconfig", kubeconfigFile.toString(), - "--namespace", OPERATOR_NAMESPACE, - "--set", "image.repository=" + OPERATOR_IMAGE.split(":")[0], - "--set", "image.tag=" + OPERATOR_IMAGE.split(":")[1], - "--set", "image.pullPolicy=Never", - "--wait", "--timeout=300s"); - pb.inheritIO(); - - Process process = pb.start(); - int exitCode = process.waitFor(); - - if (exitCode != 0) { - throw new IOException("Failed to install operator with Helm"); - } - - logger.info("Operator installed successfully with Helm"); - } - - private void waitForOperatorReady() { - logger.info("Waiting for operator to be ready..."); - - Awaitility.await() - .atMost(5, TimeUnit.MINUTES) - .pollInterval(10, TimeUnit.SECONDS) - .untilAsserted(() -> { - Deployment deployment = kubernetesClient.apps().deployments() - .inNamespace(OPERATOR_NAMESPACE) - .withName("locust-operator-locust-k8s-operator") - .get(); - - assertNotNull(deployment, "Operator deployment not found"); - assertEquals(1, deployment.getStatus().getReadyReplicas().intValue(), - "Operator deployment not ready"); - }); - - logger.info("Operator is ready"); - } - - private void verifyOperatorRunning() { - logger.info("Verifying operator is running..."); - - List operatorPods = kubernetesClient.pods() - .inNamespace(OPERATOR_NAMESPACE) - .withLabel("app.kubernetes.io/name", "locust-k8s-operator") - .list() - .getItems(); - - assertFalse(operatorPods.isEmpty(), "No operator pods found"); - - Pod operatorPod = operatorPods.getFirst(); - assertEquals("Running", operatorPod.getStatus().getPhase(), - "Operator pod is not running"); - - logger.info("Operator is running successfully: {}", operatorPod.getMetadata().getName()); - } - - private void createTestConfigMap() { - logger.info("Creating test ConfigMap..."); - - String locustfile = """ - from locust import HttpUser, task, between - - class WebsiteTestUser(HttpUser): - wait_time = between(1, 2.5) - - @task(3) - def view_item(self): - # Simple test task - just check if we can make requests - pass - - @task(1) - def view_items(self): - # Another simple test task - pass - """; - - ConfigMap configMap = new ConfigMapBuilder() - .withNewMetadata() - .withName("locust-test-scripts") - .endMetadata() - .withData(Map.of("locustfile.py", locustfile)) - .build(); - kubernetesClient.configMaps() - .inNamespace(TEST_NAMESPACE) - .resource(configMap).create(); - - logger.info("Test ConfigMap created successfully"); - } - - private void deployLocustTestCR() { - logger.info("Deploying LocustTest custom resource..."); - - String locustTestYaml = """ - apiVersion: locust.io/v1 - kind: LocustTest - metadata: - name: integration.test - namespace: %s - spec: - image: locustio/locust:2.15.1 - masterCommandSeed: locust-master - workerCommandSeed: locust-worker - workerReplicas: 1 # Reduced to 1 to simplify testing - configMap: locust-test-scripts - """.formatted(TEST_NAMESPACE); - - kubernetesClient.load(new ByteArrayInputStream(locustTestYaml.getBytes())) - .inNamespace(TEST_NAMESPACE) - .createOrReplace(); - - logger.info("LocustTest custom resource deployed successfully"); - } - - private void waitForLocustTestResources() { - logger.info("Waiting for LocustTest resources to be created..."); - - Awaitility.await() - .atMost(3, TimeUnit.MINUTES) - .pollInterval(10, TimeUnit.SECONDS) - .untilAsserted(() -> { - // Check master job - Job masterJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-master") - .get(); - assertNotNull(masterJob, "Master job not found"); - - // Check worker job - Job workerJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-worker") - .get(); - assertNotNull(workerJob, "Worker job not found"); - }); - - logger.info("LocustTest resources created successfully"); - } - - private void verifyLocustJobs() { - logger.info("Verifying Locust jobs..."); - - // Verify master job - Job masterJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-master") - .get(); - - assertNotNull(masterJob, "Master job not found"); - assertEquals(1, masterJob.getSpec().getParallelism().intValue(), - "Master job parallelism incorrect"); - - // Verify worker job - Job workerJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-worker") - .get(); - - assertNotNull(workerJob, "Worker job not found"); - assertEquals(1, workerJob.getSpec().getParallelism().intValue(), - "Worker job parallelism incorrect"); - - logger.info("Locust jobs verified successfully"); - } - - private void verifyLocustPodsRunning() { - logger.info("Verifying Locust pods are running..."); - - // First check for master pods - Awaitility.await() - .atMost(5, TimeUnit.MINUTES) - .pollInterval(15, TimeUnit.SECONDS) - .untilAsserted(() -> { - // Check master pods - List masterPods = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withLabel("job-name", "integration-test-master") - .list() - .getItems(); - - assertEquals(1, masterPods.size(), "Expected 1 master pod"); - Pod masterPod = masterPods.getFirst(); - - if (!"Running".equals(masterPod.getStatus().getPhase())) { - // Log details to help debug pod failure - logPodDetails(masterPod); - } - - assertEquals("Running", masterPod.getStatus().getPhase(), - "Master pod not running: " + masterPod.getMetadata().getName()); - }); - - logger.info("Master pod is running successfully"); - - // Now check for worker pods separately - increased timeout and more debugging - Awaitility.await() - .atMost(10, TimeUnit.MINUTES) // Increased timeout for workers - .pollInterval(20, TimeUnit.SECONDS) - .untilAsserted(() -> { - // Check worker pods - List workerPods = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withLabel("job-name", "integration-test-worker") - .list() - .getItems(); - - assertEquals(1, workerPods.size(), "Expected 1 worker pod"); - - // Instead of requiring all worker pods to be Running, check if at least one exists - assertFalse(workerPods.isEmpty(), "Expected at least one worker pod"); - - // Log all pod states for debugging - logger.info("Found {} worker pods:", workerPods.size()); - for (Pod workerPod : workerPods) { - String podPhase = workerPod.getStatus().getPhase(); - String podName = workerPod.getMetadata().getName(); - logger.info(" - Pod '{}' is in '{}' state", podName, podPhase); - - if (!"Running".equals(podPhase)) { - logPodDetails(workerPod); - } - } - - // As long as we have at least one worker pod in any state, consider it a success - logger.info("Worker pods exist - continuing with test"); - }); - - logger.info("All Locust pods are running successfully"); - } - - // Helper method to log pod details for debugging - private void logPodDetails(Pod pod) { - String podName = pod.getMetadata().getName(); - String namespace = pod.getMetadata().getNamespace(); - String phase = pod.getStatus().getPhase(); - - logger.warn("Pod '{}' in namespace '{}' is in '{}' state", podName, namespace, phase); - - // Log container statuses - if (pod.getStatus().getContainerStatuses() != null) { - pod.getStatus().getContainerStatuses().forEach(status -> { - String containerName = status.getName(); - ContainerStateTerminated terminated = status.getState().getTerminated(); - ContainerStateWaiting waiting = status.getState().getWaiting(); - - if (terminated != null) { - logger.warn("Container '{}' terminated with exit code: {}, reason: {}, message: {}", - containerName, terminated.getExitCode(), terminated.getReason(), terminated.getMessage()); - - try { - String logs = kubernetesClient.pods() - .inNamespace(namespace) - .withName(podName) - .inContainer(containerName) - .getLog(); - logger.warn("Last 200 characters of logs for container '{}': {}", - containerName, logs.length() > 200 ? logs.substring(logs.length() - 200) : logs); - } catch (Exception e) { - logger.warn("Failed to get logs for container '{}': {}", containerName, e.getMessage()); - } - } else if (waiting != null) { - logger.warn("Container '{}' waiting, reason: {}, message: {}", - containerName, waiting.getReason(), waiting.getMessage()); - } - }); - } - } - - private String getMasterPodName() { - List masterPods = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withLabel("job-name", "integration-test-master") - .list() - .getItems(); - - assertFalse(masterPods.isEmpty(), "No master pods found"); - return masterPods.getFirst().getMetadata().getName(); - } - - private void verifyLocustMasterReady(String masterPodName) { - logger.info("Verifying Locust master is ready..."); - - try { - Awaitility.await() - .atMost(2, TimeUnit.MINUTES) - .pollInterval(10, TimeUnit.SECONDS) - .untilAsserted(() -> { - // First verify the pod is still there - Pod masterPod = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withName(masterPodName) - .get(); - - assertNotNull(masterPod, "Master pod disappeared"); - - // Then check logs if possible - try { - String logs = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withName(masterPodName) - .inContainer("integration-test-master") - .getLog(); - - // Just log the output for debugging rather than asserting - logger.info("Master pod logs: {}...", - logs.length() > 200 ? logs.substring(0, 200) : logs); - - // Looking for common startup indicators in logs - boolean webInterfaceStarted = logs.contains("Locust web interface") || - logs.contains("Starting web interface") || - logs.contains("Starting Locust"); - - assertTrue(webInterfaceStarted, "Locust web interface not started"); - } catch (Exception e) { - logger.warn("Could not get logs from master pod: {}", e.getMessage()); - // Continue even if we can't get logs - logger.info("Skipping log check due to error, continuing with test"); - } - }); - } catch (Exception e) { - // Instead of failing, log the error and continue - logger.warn("Error verifying master readiness: {}", e.getMessage()); - logger.warn("Continuing with test despite master verification failure"); - } - - logger.info("Locust master verification complete"); - } - - private void verifyMasterLogs(String masterPodName) { - logger.info("Verifying master logs..."); - - try { - String logs = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withName(masterPodName) - .inContainer("integration-test-master") - .getLog(); - - logger.info("Master logs: {}...", - logs.length() > 200 ? logs.substring(0, 200) : logs); - - // Accept any log content as valid - we just want to know if we can retrieve logs - assertFalse(logs.isEmpty(), "Master logs are empty"); - } catch (Exception e) { - logger.warn("Error retrieving master logs: {}", e.getMessage()); - // Don't fail the test if we can't get logs - logger.warn("Continuing test despite log retrieval issues"); - } - - logger.info("Master logs verification complete"); - } - - private void verifyWorkersConnected() { - logger.info("Verifying workers are connected..."); - - try { - // Get worker pod logs and check for connection messages - List workerPods = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withLabel("job-name", "integration-test-worker") - .list() - .getItems(); - - if (workerPods.isEmpty()) { - logger.warn("No worker pods found to verify connectivity"); - return; - } - - // Only check the first worker pod to simplify testing - Pod workerPod = workerPods.getFirst(); - - try { - Awaitility.await() - .atMost(30, TimeUnit.SECONDS) // Reduced timeout to speed up test - .pollInterval(5, TimeUnit.SECONDS) - .untilAsserted(() -> { - try { - String logs = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withName(workerPod.getMetadata().getName()) - .inContainer("integration-test-worker") - .getLog(); - - logger.info("Worker pod {} logs: {}...", workerPod.getMetadata().getName(), - logs.length() > 100 ? logs.substring(0, 100) : logs); - - // Just check for any log output rather than connection message - assertFalse(logs.isEmpty(), "No logs from worker pod: " + workerPod.getMetadata().getName()); - } catch (Exception e) { - logger.warn("Error getting logs from worker pod: {}", e.getMessage()); - // Skip log check and continue - } - }); - } catch (Exception e) { - logger.warn("Worker connectivity check timed out: {}", e.getMessage()); - logger.warn("Continuing despite connectivity check failure"); - } - } catch (Exception e) { - logger.warn("Failed to verify worker connectivity: {}", e.getMessage()); - logger.warn("Continuing with test despite verification failure"); - } - - logger.info("Worker verification completed"); - } - - private void deleteLocustTestCR() { - logger.info("Deleting LocustTest custom resource..."); - - kubernetesClient.load(new ByteArrayInputStream(("apiVersion: locust.io/v1\nkind: LocustTest\nmetadata:\n name: integration.test\n namespace: " + TEST_NAMESPACE).getBytes())) - .inNamespace(TEST_NAMESPACE) - .delete(); - - logger.info("LocustTest custom resource deleted"); - } - - private void verifyResourcesCleanedUp() { - logger.info("Verifying resources are cleaned up..."); - - Awaitility.await() - .atMost(3, TimeUnit.MINUTES) - .pollInterval(10, TimeUnit.SECONDS) - .untilAsserted(() -> { - // Check jobs are deleted - Job masterJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-master") - .get(); - assertNull(masterJob, "Master job still exists"); - - Job workerJob = kubernetesClient.batch().v1().jobs() - .inNamespace(TEST_NAMESPACE) - .withName("integration-test-worker") - .get(); - assertNull(workerJob, "Worker job still exists"); - - // Check pods are deleted - List pods = kubernetesClient.pods() - .inNamespace(TEST_NAMESPACE) - .withLabel("locust-test", "integration-test") - .list() - .getItems(); - assertTrue(pods.isEmpty(), "Pods still exist after cleanup"); - }); - - logger.info("Resources cleaned up successfully"); - } - - private void uninstallOperator() throws Exception { - logger.info("Uninstalling operator..."); - - ProcessBuilder pb = new ProcessBuilder("helm", "uninstall", "locust-operator", - "--namespace", OPERATOR_NAMESPACE); - pb.inheritIO(); - - Process process = pb.start(); - int exitCode = process.waitFor(); - - if (exitCode != 0) { - logger.warn("Failed to uninstall operator with Helm, exit code: {}", exitCode); - } else { - logger.info("Operator uninstalled successfully"); - } - } -} diff --git a/src/integrationTest/resources/application-test.yml b/src/integrationTest/resources/application-test.yml deleted file mode 100644 index be88b5d6..00000000 --- a/src/integrationTest/resources/application-test.yml +++ /dev/null @@ -1,24 +0,0 @@ -micronaut: - application: - name: locust-k8s-operator-integration-test - http: - client: - read-timeout: 60s - connect-timeout: 30s - -logger: - levels: - com.locust: DEBUG - io.fabric8: INFO - org.testcontainers: INFO - -integration-test: - timeout: - cluster-start: 300s - operator-ready: 300s - test-deployment: 180s - test-execution: 120s - cleanup: 180s - resources: - temp-dir: /tmp/locust-integration-test - operator-image: locust-k8s-operator:integration-test diff --git a/src/main/java/com/locust/Application.java b/src/main/java/com/locust/Application.java deleted file mode 100644 index a86aea70..00000000 --- a/src/main/java/com/locust/Application.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.locust; - -import io.micronaut.runtime.Micronaut; -import lombok.extern.slf4j.Slf4j; - -@Slf4j -public class Application { - - public static void main(String[] args) { - Micronaut.run(Application.class, args); - } - -} diff --git a/src/main/java/com/locust/LocustTestOperatorStarter.java b/src/main/java/com/locust/LocustTestOperatorStarter.java deleted file mode 100644 index 2e50e921..00000000 --- a/src/main/java/com/locust/LocustTestOperatorStarter.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.locust; - -import com.locust.operator.controller.LocustTestReconciler; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import io.javaoperatorsdk.operator.Operator; -import io.micronaut.context.event.ApplicationEventListener; -import io.micronaut.runtime.server.event.ServerStartupEvent; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; - -@Slf4j -@Singleton -public class LocustTestOperatorStarter implements ApplicationEventListener { - - private final LocustTestReconciler reconciler; - - public LocustTestOperatorStarter(LocustTestReconciler reconciler) { - this.reconciler = reconciler; - } - - @Override - public void onApplicationEvent(ServerStartupEvent event) { - log.info("Starting Kubernetes reconciler!"); - - KubernetesClient client = new KubernetesClientBuilder().build(); - Operator operator = new Operator(overrider -> overrider.withKubernetesClient(client)); - operator.register(reconciler); - operator.start(); - } - -} diff --git a/src/main/java/com/locust/operator/controller/LocustTestReconciler.java b/src/main/java/com/locust/operator/controller/LocustTestReconciler.java deleted file mode 100644 index d8416de5..00000000 --- a/src/main/java/com/locust/operator/controller/LocustTestReconciler.java +++ /dev/null @@ -1,127 +0,0 @@ -package com.locust.operator.controller; - -import com.locust.operator.controller.utils.LoadGenHelpers; -import com.locust.operator.controller.utils.resource.manage.ResourceCreationManager; -import com.locust.operator.controller.utils.resource.manage.ResourceDeletionManager; -import com.locust.operator.customresource.LocustTest; -import io.javaoperatorsdk.operator.api.reconciler.Cleaner; -import io.javaoperatorsdk.operator.api.reconciler.Context; -import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration; -import io.javaoperatorsdk.operator.api.reconciler.DeleteControl; -import io.javaoperatorsdk.operator.api.reconciler.Reconciler; -import io.javaoperatorsdk.operator.api.reconciler.UpdateControl; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; - -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperationalMode.WORKER; - -@Slf4j -@Singleton -@ControllerConfiguration -public class LocustTestReconciler implements Reconciler, Cleaner { - - private final LoadGenHelpers loadGenHelpers; - private final ResourceCreationManager creationManager; - private final ResourceDeletionManager deletionManager; - - public LocustTestReconciler(LoadGenHelpers loadGenHelpers, ResourceCreationManager creationManager, - ResourceDeletionManager deletionManager) { - this.loadGenHelpers = loadGenHelpers; - this.creationManager = creationManager; - this.deletionManager = deletionManager; - } - - @Override - public UpdateControl reconcile(LocustTest resource, Context context) { - // * On update >> NOOP - if (resource.getMetadata().getGeneration() > 1) { - // On update will be no op as this use case is not aligned with the use case of this reconciler - log.info("LocustTest updated: {} in namespace: {}.", resource.getMetadata().getName(), resource.getMetadata().getNamespace()); - log.info("Update operations on {} are not currently supported!", resource.getCRDName()); - return UpdateControl.noUpdate(); - } - - // * On add - log.info("LocustTest created: '{}'", resource.getMetadata().getName()); - - log.debug( - """ - Custom resource information:\s - Namespace: '{}' CR name: '{}'\s - Image: '{}'\s - Master command: '{}'\s - Worker command: '{}'\s - Worker replica count:'{}'\s - Annotations: '{}'\s - Labels: '{}'\s - Affinity: '{}'\s - Tolerations: '{}'\s - configMap:'{}'.""", - resource.getMetadata().getNamespace(), - resource.getMetadata().getName(), - resource.getSpec().getImage(), - resource.getSpec().getMasterCommandSeed(), - resource.getSpec().getWorkerCommandSeed(), - resource.getSpec().getWorkerReplicas(), - resource.getSpec().getAnnotations(), - resource.getSpec().getLabels(), - resource.getSpec().getAffinity(), - resource.getSpec().getTolerations(), - resource.getSpec().getConfigMap()); - - // * Construct node commands & map to internal dto - // Generate `master` node object - var masterNode = loadGenHelpers.generateLoadGenNodeObject(resource, MASTER); - log.debug("Master node configuration: {}", masterNode); - - // Constructing `worker` node object - var workerNode = loadGenHelpers.generateLoadGenNodeObject(resource, WORKER); - log.debug("Worker node configuration: {}", workerNode); - - // * Deploy load generation resource - // Deploy Master node service - creationManager.createMasterService(masterNode, resource.getMetadata().getNamespace()); - - // Deploy Master job - creationManager.createJob(masterNode, resource.getMetadata().getNamespace(), resource.getMetadata().getName()); - - // Deploy Worker jobs - creationManager.createJob(workerNode, resource.getMetadata().getNamespace(), resource.getMetadata().getName()); - - // TODO update status - return UpdateControl.noUpdate(); - } - - @Override - public DeleteControl cleanup(LocustTest resource, Context context) { - - // * Log custom resource - log.info("LocustTest deleted: {}", resource.getMetadata().getName()); - - log.debug( - "Deleted in namespace: {}, \nCR with name: {}, and generation: {}, \nimage: {}, \nmaster command: {}, \nworker command: {}, \nreplicas: {} \nconfigMap:'{}'.", - resource.getMetadata().getNamespace(), - resource.getMetadata().getName(), - resource.getMetadata().getGeneration(), - resource.getSpec().getImage(), - resource.getSpec().getMasterCommandSeed(), - resource.getSpec().getWorkerCommandSeed(), - resource.getSpec().getWorkerReplicas(), - resource.getSpec().getConfigMap()); - - - // * Delete load generation resource - // Delete Master node service - deletionManager.deleteService(resource, MASTER); - - // Delete Master job - deletionManager.deleteJob(resource, MASTER); - - // Delete Worker jobs - deletionManager.deleteJob(resource, WORKER); - - return DeleteControl.defaultDelete(); - } - -} diff --git a/src/main/java/com/locust/operator/controller/config/SysConfig.java b/src/main/java/com/locust/operator/controller/config/SysConfig.java deleted file mode 100644 index bc3844d9..00000000 --- a/src/main/java/com/locust/operator/controller/config/SysConfig.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.locust.operator.controller.config; - -import io.micronaut.context.annotation.Property; -import jakarta.inject.Singleton; -import lombok.Getter; -import lombok.ToString; -import org.apache.commons.lang3.math.NumberUtils; - -@Getter -@ToString -@Singleton -public class SysConfig { - - // * Kafka - @Property(name = "config.load-generation-pods.kafka.bootstrap-servers") - private String kafkaBootstrapServers; - @Property(name = "config.load-generation-pods.kafka.security.enabled") - private boolean kafkaSecurityEnabled; - @Property(name = "config.load-generation-pods.kafka.security.protocol") - private String kafkaSecurityProtocol; - @Property(name = "config.load-generation-pods.kafka.security.username") - private String kafkaUsername; - @Property(name = "config.load-generation-pods.kafka.security.password") - private String kafkaUserPassword; - @Property(name = "config.load-generation-pods.kafka.sasl.mechanism") - private String kafkaSaslMechanism; - @ToString.Exclude - @Property(name = "config.load-generation-pods.kafka.sasl.jaas.config") - private String kafkaSaslJaasConfig; - - // * Generated job characteristics - /** - * We use Object here to prevent automatic conversion from null to 0. - *

- * See {@link #getTtlSecondsAfterFinished()} for understanding how the - * value is converted to an integer. - */ - @Property(name = "config.load-generation-jobs.ttl-seconds-after-finished") - private Object ttlSecondsAfterFinished; - - // * Generated pod characteristics - @Property(name = "config.load-generation-pods.resource.cpu-request") - private String podCpuRequest; - @Property(name = "config.load-generation-pods.resource.mem-request") - private String podMemRequest; - @Property(name = "config.load-generation-pods.resource.ephemeralStorage-request") - private String podEphemeralStorageRequest; - @Property(name = "config.load-generation-pods.resource.cpu-limit") - private String podCpuLimit; - @Property(name = "config.load-generation-pods.resource.mem-limit") - private String podMemLimit; - @Property(name = "config.load-generation-pods.resource.ephemeralStorage-limit") - private String podEphemeralStorageLimit; - - // * Metrics exporter container characteristics - @Property(name = "config.load-generation-pods.metricsExporter.image") - private String metricsExporterImage; - @Property(name = "config.load-generation-pods.metricsExporter.port") - private Integer metricsExporterPort; - @Property(name = "config.load-generation-pods.metricsExporter.pullPolicy") - private String metricsExporterPullPolicy; - @Property(name = "config.load-generation-pods.metricsExporter.resource.cpu-request") - private String metricsExporterCpuRequest; - @Property(name = "config.load-generation-pods.metricsExporter.resource.mem-request") - private String metricsExporterMemRequest; - @Property(name = "config.load-generation-pods.metricsExporter.resource.ephemeralStorage-request") - private String metricsExporterEphemeralStorageRequest; - @Property(name = "config.load-generation-pods.metricsExporter.resource.cpu-limit") - private String metricsExporterCpuLimit; - @Property(name = "config.load-generation-pods.metricsExporter.resource.mem-limit") - private String metricsExporterMemLimit; - @Property(name = "config.load-generation-pods.metricsExporter.resource.ephemeralStorage-limit") - private String metricsExporterEphemeralStorageLimit; - - @Property(name = "config.load-generation-pods.affinity.enableCrInjection") - private boolean affinityCrInjectionEnabled; - @Property(name = "config.load-generation-pods.taintTolerations.enableCrInjection") - private boolean tolerationsCrInjectionEnabled; - - /** - * Value configured for setting Kubernetes Jobs' ttlSecondsAfterFinished property. - * This method will try to convert the value to an integer or fail and report invalid values. - * {@code null} or empty strings will result in a {@code null} return. - * - * @return either {@code null} or an integer value greater than or equal to 0 - */ - public Integer getTtlSecondsAfterFinished() { - final String stringValue = String.valueOf(this.ttlSecondsAfterFinished); - - if (NumberUtils.isDigits(stringValue)) { - return Integer.parseInt(stringValue); - } else if (stringValue.isEmpty()) { - return null; - } else { - throw new IllegalArgumentException( - String.format( - "Invalid value '%s' for property ttl-seconds-after-finished", - stringValue - ) - ); - } - } -} diff --git a/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java b/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java deleted file mode 100644 index e75e410d..00000000 --- a/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.locust.operator.controller.dto; - -import com.locust.operator.customresource.internaldto.LocustTestAffinity; -import com.locust.operator.customresource.internaldto.LocustTestToleration; -import lombok.AccessLevel; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.util.List; -import java.util.Map; - -@Data -@Builder -@AllArgsConstructor -@NoArgsConstructor(access = AccessLevel.NONE) -public class LoadGenerationNode { - - private String name; - private Map labels; - private Map annotations; - private LocustTestAffinity affinity; - private List tolerations; - private Integer ttlSecondsAfterFinished; - private List command; - private OperationalMode operationalMode; - private String image; - private String imagePullPolicy; - private List imagePullSecrets; - private Integer replicas; - private List ports; - private String configMap; - private String libConfigMap; - -} diff --git a/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java b/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java deleted file mode 100644 index ac1e10f0..00000000 --- a/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.locust.operator.controller.dto; - -import io.fabric8.kubernetes.api.model.ResourceRequirements; -import lombok.AccessLevel; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -@Data -@Builder -@AllArgsConstructor -@NoArgsConstructor(access = AccessLevel.NONE) -public class MetricsExporterContainer { - - private String containerName; - private String containerImage; - private String pullPolicy; - private int exporterPort; - private ResourceRequirements resourceRequirements; - -} diff --git a/src/main/java/com/locust/operator/controller/dto/OperationalMode.java b/src/main/java/com/locust/operator/controller/dto/OperationalMode.java deleted file mode 100644 index f187b730..00000000 --- a/src/main/java/com/locust/operator/controller/dto/OperationalMode.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.locust.operator.controller.dto; - -import lombok.AllArgsConstructor; -import lombok.Getter; - -@AllArgsConstructor -public enum OperationalMode { - - MASTER("master"), - WORKER("worker"); - - @Getter - public final String mode; -} diff --git a/src/main/java/com/locust/operator/controller/dto/OperatorType.java b/src/main/java/com/locust/operator/controller/dto/OperatorType.java deleted file mode 100644 index ca96cb03..00000000 --- a/src/main/java/com/locust/operator/controller/dto/OperatorType.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.locust.operator.controller.dto; - -import lombok.AllArgsConstructor; -import lombok.Getter; - -@AllArgsConstructor -public enum OperatorType { - - EXISTS("Exists"), - EQUAL("Equal"); - - @Getter - public final String type; - -} diff --git a/src/main/java/com/locust/operator/controller/utils/Constants.java b/src/main/java/com/locust/operator/controller/utils/Constants.java deleted file mode 100644 index 25093553..00000000 --- a/src/main/java/com/locust/operator/controller/utils/Constants.java +++ /dev/null @@ -1,88 +0,0 @@ -package com.locust.operator.controller.utils; - -import lombok.NoArgsConstructor; - -import java.util.List; - -import static lombok.AccessLevel.PRIVATE; - -@NoArgsConstructor(access = PRIVATE) -public class Constants { - - public static final String NODE_NAME_TEMPLATE = "%s-%s"; - - // Master node constants - public static final int MASTER_NODE_REPLICA_COUNT = 1; - public static final int DEFAULT_WEB_UI_PORT = 8089; - // 8089 -> Web interface - // 5557, 5558 -> Node communication - public static final List MASTER_NODE_PORTS = List.of(5557, 5558, DEFAULT_WEB_UI_PORT); - - public static final Integer WORKER_NODE_PORT = 8080; - // Master node command template: %s -> Team test configuration - public static final String MASTER_CMD_TEMPLATE = "%s " - // Declare `master` operation mode & availability port - + "--master --master-port=%d " - // Number of workers to wait for before starting the test - + "--expect-workers=%d " - // Auto start the test while keeping the UI available - + "--autostart --autoquit 60 " - // Allow to automatically rebalance users if new workers are added or removed during a test run. - + "--enable-rebalancing " - // Log only the summary - + "--only-summary "; - - // Worker node constants - // When used, output will be: " --worker --master-port= --master-host=" - public static final String WORKER_CMD_TEMPLATE = "%s --worker --master-port=%d --master-host=%s"; - - // Generic k8s constants - public static final String APP_DEFAULT_LABEL = "performance-test-name"; - public static final String SERVICE_SELECTOR_LABEL = "performance-test-pod-name"; - public static final String MANAGED_BY_LABEL_KEY = "managed-by"; - public static final String MANAGED_BY_LABEL_VALUE = "locust-k8s-operator"; - - // Environment variables names - public static final String KAFKA_BOOTSTRAP_SERVERS = "KAFKA_BOOTSTRAP_SERVERS"; - public static final String KAFKA_SECURITY_ENABLED = "KAFKA_SECURITY_ENABLED"; - public static final String KAFKA_SECURITY_PROTOCOL_CONFIG = "KAFKA_SECURITY_PROTOCOL_CONFIG"; - public static final String KAFKA_SASL_MECHANISM = "KAFKA_SASL_MECHANISM"; - public static final String KAFKA_SASL_JAAS_CONFIG = "KAFKA_SASL_JAAS_CONFIG"; - public static final String KAFKA_USERNAME = "KAFKA_USERNAME"; - public static final String KAFKA_PASSWORD = "KAFKA_PASSWORD"; - - // Service constants - public static final String PORT_DEFAULT_NAME = "port"; - public static final String TCP_PROTOCOL = "TCP"; - public static final String METRICS_PORT_NAME = "prometheus-metrics"; - - // Job constants - public static final String DEFAULT_RESTART_POLICY = "Never"; - public static final int BACKOFF_LIMIT = 0; - public static final String DEFAULT_MOUNT_PATH = "/lotest/src/"; - public static final String LIB_MOUNT_PATH = "/opt/locust/lib"; - public static final String CONTAINER_ARGS_SEPARATOR = " "; - - // Node Affinity constants - public static final String DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR = "In"; - - // Metrics - public static final String PROMETHEUS_IO_SCRAPE = "prometheus.io/scrape"; - public static final String PROMETHEUS_IO_PATH = "prometheus.io/path"; - public static final String PROMETHEUS_IO_PORT = "prometheus.io/port"; - public static final String PROMETHEUS_IO_ENDPOINT = "/metrics"; - - // Metrics container - public static final String EXPORTER_CONTAINER_NAME = "locust-metrics-exporter"; - - public static final String EXPORTER_URI_ENV_VAR = "LOCUST_EXPORTER_URI"; - // localhost is used because the exporter container is in the same pod as the master container. - // This means that they share the same network - public static final String EXPORTER_URI_ENV_VAR_VALUE = String.format("http://localhost:%s", DEFAULT_WEB_UI_PORT); - - public static final String EXPORTER_PORT_ENV_VAR = "LOCUST_EXPORTER_WEB_LISTEN_ADDRESS"; - - public static final String DEFAULT_RESOURCE_TARGET = "defaultTarget"; - public static final String METRICS_EXPORTER_RESOURCE_TARGET = "metricsExporter"; - -} diff --git a/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java b/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java deleted file mode 100644 index 2c39f50f..00000000 --- a/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java +++ /dev/null @@ -1,397 +0,0 @@ -package com.locust.operator.controller.utils; - -import com.locust.operator.controller.config.SysConfig; -import com.locust.operator.controller.dto.LoadGenerationNode; -import com.locust.operator.controller.dto.MetricsExporterContainer; -import com.locust.operator.controller.dto.OperationalMode; -import com.locust.operator.customresource.LocustTest; -import com.locust.operator.customresource.internaldto.LocustTestAffinity; -import com.locust.operator.customresource.internaldto.LocustTestToleration; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirements; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperationalMode.WORKER; -import static com.locust.operator.controller.utils.Constants.CONTAINER_ARGS_SEPARATOR; -import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET; -import static com.locust.operator.controller.utils.Constants.EXPORTER_CONTAINER_NAME; -import static com.locust.operator.controller.utils.Constants.KAFKA_BOOTSTRAP_SERVERS; -import static com.locust.operator.controller.utils.Constants.KAFKA_PASSWORD; -import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_JAAS_CONFIG; -import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_MECHANISM; -import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_ENABLED; -import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_PROTOCOL_CONFIG; -import static com.locust.operator.controller.utils.Constants.KAFKA_USERNAME; -import static com.locust.operator.controller.utils.Constants.MASTER_CMD_TEMPLATE; -import static com.locust.operator.controller.utils.Constants.MASTER_NODE_PORTS; -import static com.locust.operator.controller.utils.Constants.MASTER_NODE_REPLICA_COUNT; -import static com.locust.operator.controller.utils.Constants.METRICS_EXPORTER_RESOURCE_TARGET; -import static com.locust.operator.controller.utils.Constants.NODE_NAME_TEMPLATE; -import static com.locust.operator.controller.utils.Constants.WORKER_CMD_TEMPLATE; -import static com.locust.operator.controller.utils.Constants.WORKER_NODE_PORT; - -@Slf4j -@Singleton -public class LoadGenHelpers { - - private final SysConfig config; - - public LoadGenHelpers(SysConfig config) { - this.config = config; - } - - /** - * Parse an LocustTest resource and convert it a LoadGenerationNode object after: - Constructing the node operational command based on - * the `mode` parameter - Set the replica count based on the `mode` parameter - * - * @param resource Custom resource object - * @param mode Operational mode - * @return Load generation node configuration - */ - public LoadGenerationNode generateLoadGenNodeObject(LocustTest resource, OperationalMode mode) { - - return LoadGenerationNode.builder() - .name(constructNodeName(resource, mode)) - .labels(constructNodeLabels(resource, mode)) - .annotations(constructNodeAnnotations(resource, mode)) - .affinity(getNodeAffinity(resource)) - .tolerations(getPodToleration(resource)) - .ttlSecondsAfterFinished(getTtlSecondsAfterFinished()) - .command(constructNodeCommand(resource, mode)) - .operationalMode(mode) - .image(getNodeImage(resource)) - .imagePullPolicy(getNodeImagePullPolicy(resource)) - .imagePullSecrets(getNodeImagePullSecrets(resource)) - .replicas(getReplicaCount(resource, mode)) - .ports(getNodePorts(resource, mode)) - .configMap(getConfigMap(resource)) - .libConfigMap(getLibConfigMap(resource)) - .build(); - - } - - private Integer getTtlSecondsAfterFinished() { - return this.config.getTtlSecondsAfterFinished(); - } - - private List getPodToleration(LocustTest resource) { - - return config.isTolerationsCrInjectionEnabled() ? resource.getSpec().getTolerations() : null; - - } - - public String getConfigMap(LocustTest resource) { - - return resource.getSpec().getConfigMap(); - - } - - public String getLibConfigMap(LocustTest resource) { - - return resource.getSpec().getLibConfigMap(); - - } - - private String getNodeImage(LocustTest resource) { - - return resource.getSpec().getImage(); - - } - - private String getNodeImagePullPolicy(LocustTest resource) { - return resource.getSpec().getImagePullPolicy(); - } - - private List getNodeImagePullSecrets(LocustTest resource) { - return resource.getSpec().getImagePullSecrets(); - } - - public LocustTestAffinity getNodeAffinity(LocustTest resource) { - - return config.isAffinityCrInjectionEnabled() ? resource.getSpec().getAffinity() : null; - - } - - public String constructNodeName(LocustTest customResource, OperationalMode mode) { - - return String - .format(NODE_NAME_TEMPLATE, customResource.getMetadata().getName(), mode.getMode()) - .replace(".", "-"); - - } - - /** - * Constructs the labels to attach to the master and worker pods. - * - * @param customResource The custom resource object - * @param mode The operational mode - * @return A non-null, possibly empty map of labels - */ - public Map constructNodeLabels(final LocustTest customResource, final OperationalMode mode) { - final Map> labels = Optional.ofNullable(customResource.getSpec().getLabels()) - .orElse(new HashMap<>()); - final Map result; - if (mode.equals(MASTER)) { - result = labels.getOrDefault(MASTER.getMode(), new HashMap<>()); - } else { - // Worker - result = labels.getOrDefault(WORKER.getMode(), new HashMap<>()); - } - log.debug("Labels attached to {} pod are {}", mode.getMode(), result); - return result; - } - - /** - * Constructs the annotations to attach to the master and worker pods. - * - * @param customResource The custom resource object - * @param mode The operational mode - * @return A non-null, possibly empty map of annotations - */ - public Map constructNodeAnnotations(final LocustTest customResource, final OperationalMode mode) { - final Map> annotations = Optional.ofNullable(customResource.getSpec().getAnnotations()) - .orElse(new HashMap<>()); - final Map result; - if (mode.equals(MASTER)) { - result = annotations.getOrDefault(MASTER.getMode(), new HashMap<>()); - } else { - // Worker - result = annotations.getOrDefault(WORKER.getMode(), new HashMap<>()); - } - log.debug("Annotations attached to {} pod are {}", mode.getMode(), result); - return result; - } - - /** - * Construct node command based on mode of operation - * - * @param customResource Custom resource object - * @param mode Operational mode - * @return Node command - */ - private List constructNodeCommand(LocustTest customResource, OperationalMode mode) { - - String cmd; - - if (mode.equals(MASTER)) { - cmd = String.format(MASTER_CMD_TEMPLATE, - customResource.getSpec().getMasterCommandSeed(), - MASTER_NODE_PORTS.getFirst(), - customResource.getSpec().getWorkerReplicas()); - } else { - // worker - cmd = String.format(WORKER_CMD_TEMPLATE, - customResource.getSpec().getWorkerCommandSeed(), - MASTER_NODE_PORTS.getFirst(), - constructNodeName(customResource, MASTER) - ); - } - - log.debug("Constructed command: {}", cmd); - // Split the command on <\s> to match expected container args - return List.of(cmd.split(CONTAINER_ARGS_SEPARATOR)); - } - - /** - * Get Replica count based on mode of operation - * - * @param customResource Custom resource object - * @param mode Operational mode - * @return Replica count - */ - private int getReplicaCount(LocustTest customResource, OperationalMode mode) { - - Integer replicaCount; - - if (mode.equals(MASTER)) { - replicaCount = MASTER_NODE_REPLICA_COUNT; - } else { - replicaCount = customResource.getSpec().getWorkerReplicas(); - } - - log.debug("Replica count for node: {}, with mode: {}, is: {}", customResource.getMetadata().getName(), mode, replicaCount); - return replicaCount; - - } - - private List getNodePorts(LocustTest customResource, OperationalMode mode) { - - List ports; - - if (mode.equals(MASTER)) { - ports = MASTER_NODE_PORTS; - } else { - ports = Collections.singletonList(WORKER_NODE_PORT); - } - - log.debug("Ports list for node: {}, with mode: {}, is: {}", customResource.getMetadata().getName(), mode, ports); - return ports; - - } - - public Map generateContainerEnvironmentMap() { - HashMap environmentMap = new HashMap<>(); - - environmentMap.put(KAFKA_BOOTSTRAP_SERVERS, config.getKafkaBootstrapServers()); - environmentMap.put(KAFKA_SECURITY_ENABLED, String.valueOf(config.isKafkaSecurityEnabled())); - environmentMap.put(KAFKA_SECURITY_PROTOCOL_CONFIG, config.getKafkaSecurityProtocol()); - environmentMap.put(KAFKA_SASL_MECHANISM, config.getKafkaSaslMechanism()); - environmentMap.put(KAFKA_SASL_JAAS_CONFIG, config.getKafkaSaslJaasConfig()); - environmentMap.put(KAFKA_USERNAME, config.getKafkaUsername()); - environmentMap.put(KAFKA_PASSWORD, config.getKafkaUserPassword()); - - return environmentMap; - } - - /** - * Constructs a MetricsExporterContainer using the configuration settings and resource requirements. - * - * @return A MetricsExporterContainer instance configured with the specified settings and resource requirements. - */ - public MetricsExporterContainer constructMetricsExporterContainer() { - return new MetricsExporterContainer( - EXPORTER_CONTAINER_NAME, - config.getMetricsExporterImage(), - config.getMetricsExporterPullPolicy(), - config.getMetricsExporterPort(), - this.getResourceRequirements(METRICS_EXPORTER_RESOURCE_TARGET) - - ); - } - - /** - * Get resource request and limit for containers - * - * @return resource requirements - */ - public ResourceRequirements getResourceRequirements(String target) { - - Map resourceRequests; - Map resourceLimits; - - // Default target - if (target.equals(DEFAULT_RESOURCE_TARGET)) { - - resourceRequests = this.getResourceRequests(); - resourceLimits = this.getResourceLimits(); - - // If not default target, then the assumed target is a "Metrics Exporter" container! - // + No need for "else if" in order to avoid unneeded checks and increased complexity - // + in a future implementation if another "target" is introduced, - // + the method should be updated and this comment removed. - } else { - - resourceRequests = this.getMetricsExporterResourceRequests(); - resourceLimits = this.getMetricsExporterResourceLimits(); - - } - - final var resourceRequest = new ResourceRequirements(); - - // Add memory and cpu resource requests - resourceRequest.setRequests(resourceRequests); - - // Add memory and cpu resource limits - resourceRequest.setLimits(resourceLimits); - - return resourceRequest; - - } - - /** - * Get requested resources based on configuration (defaults or HELM). - * - * @return the resources request to use - */ - private Map getResourceRequests() { - String memOverride = config.getPodMemRequest(); - String cpuOverride = config.getPodCpuRequest(); - String ephemeralOverride = config.getPodEphemeralStorageRequest(); - - log.debug("Using resource requests - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride); - - return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride); - } - - /** - * Get resource limits based on configuration (defaults or HELM). - * - * @return the resource limits to use - */ - private Map getResourceLimits() { - String memOverride = config.getPodMemLimit(); - String cpuOverride = config.getPodCpuLimit(); - String ephemeralOverride = config.getPodEphemeralStorageLimit(); - - log.debug("Using resource limits - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride); - - return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride); - } - - /** - * Get resources request for Metrics Exporter container. - * - * @return the resource requests to use - */ - private Map getMetricsExporterResourceRequests() { - String memOverride = config.getMetricsExporterMemRequest(); - String cpuOverride = config.getMetricsExporterCpuRequest(); - String ephemeralOverride = config.getMetricsExporterEphemeralStorageRequest(); - - log.debug("Using resource requests for metrics exporter - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, - ephemeralOverride); - - return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride); - } - - /** - * Get resource limits for Metrics Exporter container. - * - * @return the resource requests to use - */ - private Map getMetricsExporterResourceLimits() { - String memOverride = config.getMetricsExporterMemLimit(); - String cpuOverride = config.getMetricsExporterCpuLimit(); - String ephemeralOverride = config.getMetricsExporterEphemeralStorageLimit(); - - log.debug("Using resource limits - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride); - - return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride); - } - - /** - * Generates a resource override map based on the provided memory, CPU, and ephemeral storage overrides. - * - * @param memOverride The memory override value to be used for the "memory" resource. - * @param cpuOverride The CPU override value to be used for the "cpu" resource. - * @param ephemeralOverride The ephemeral storage override value to be used for the "ephemeral-storage" resource. This value will be - * applied only if the Kubernetes version supports "ephemeral-storage" requests. - * @return A Map containing resource overrides for memory, CPU, and ephemeral storage. - */ - private Map generateResourceOverrideMap(String memOverride, String cpuOverride, String ephemeralOverride) { - Map resourceOverrideMap = new HashMap<>(); - - Optional.ofNullable(memOverride) - .filter(s -> !s.isBlank()) - .ifPresent(override -> resourceOverrideMap.put("memory", new Quantity(override))); - - Optional.ofNullable(cpuOverride) - .filter(s -> !s.isBlank()) - .ifPresent(override -> resourceOverrideMap.put("cpu", new Quantity(override))); - - Optional.ofNullable(ephemeralOverride) - .filter(s -> !s.isBlank()) - .ifPresent(override -> resourceOverrideMap.put("ephemeral-storage", new Quantity(override))); - - return resourceOverrideMap; - } - -} diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java deleted file mode 100644 index 841a0d04..00000000 --- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java +++ /dev/null @@ -1,587 +0,0 @@ -package com.locust.operator.controller.utils.resource.manage; - -import com.locust.operator.controller.dto.LoadGenerationNode; -import com.locust.operator.controller.dto.MetricsExporterContainer; -import com.locust.operator.controller.utils.LoadGenHelpers; -import io.fabric8.kubernetes.api.model.Affinity; -import io.fabric8.kubernetes.api.model.AffinityBuilder; -import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource; -import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder; -import io.fabric8.kubernetes.api.model.Container; -import io.fabric8.kubernetes.api.model.ContainerBuilder; -import io.fabric8.kubernetes.api.model.ContainerPort; -import io.fabric8.kubernetes.api.model.ContainerPortBuilder; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.EnvVarBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.LocalObjectReferenceBuilder; -import io.fabric8.kubernetes.api.model.NodeAffinity; -import io.fabric8.kubernetes.api.model.NodeAffinityBuilder; -import io.fabric8.kubernetes.api.model.NodeSelector; -import io.fabric8.kubernetes.api.model.NodeSelectorBuilder; -import io.fabric8.kubernetes.api.model.NodeSelectorRequirement; -import io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder; -import io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder; -import io.fabric8.kubernetes.api.model.ObjectMeta; -import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; -import io.fabric8.kubernetes.api.model.PodSpec; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; -import io.fabric8.kubernetes.api.model.PodTemplateSpec; -import io.fabric8.kubernetes.api.model.PodTemplateSpecBuilder; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.ServiceBuilder; -import io.fabric8.kubernetes.api.model.Toleration; -import io.fabric8.kubernetes.api.model.TolerationBuilder; -import io.fabric8.kubernetes.api.model.Volume; -import io.fabric8.kubernetes.api.model.VolumeBuilder; -import io.fabric8.kubernetes.api.model.VolumeMount; -import io.fabric8.kubernetes.api.model.VolumeMountBuilder; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; -import io.fabric8.kubernetes.api.model.batch.v1.JobSpec; -import io.fabric8.kubernetes.api.model.batch.v1.JobSpecBuilder; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import lombok.val; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; - -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperatorType.EQUAL; -import static com.locust.operator.controller.utils.Constants.APP_DEFAULT_LABEL; -import static com.locust.operator.controller.utils.Constants.BACKOFF_LIMIT; -import static com.locust.operator.controller.utils.Constants.DEFAULT_MOUNT_PATH; -import static com.locust.operator.controller.utils.Constants.LIB_MOUNT_PATH; -import static com.locust.operator.controller.utils.Constants.DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR; -import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET; -import static com.locust.operator.controller.utils.Constants.DEFAULT_RESTART_POLICY; -import static com.locust.operator.controller.utils.Constants.DEFAULT_WEB_UI_PORT; -import static com.locust.operator.controller.utils.Constants.EXPORTER_PORT_ENV_VAR; -import static com.locust.operator.controller.utils.Constants.EXPORTER_URI_ENV_VAR; -import static com.locust.operator.controller.utils.Constants.EXPORTER_URI_ENV_VAR_VALUE; -import static com.locust.operator.controller.utils.Constants.MANAGED_BY_LABEL_KEY; -import static com.locust.operator.controller.utils.Constants.MANAGED_BY_LABEL_VALUE; -import static com.locust.operator.controller.utils.Constants.METRICS_PORT_NAME; -import static com.locust.operator.controller.utils.Constants.PORT_DEFAULT_NAME; -import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_ENDPOINT; -import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_PATH; -import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_PORT; -import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_SCRAPE; -import static com.locust.operator.controller.utils.Constants.SERVICE_SELECTOR_LABEL; -import static com.locust.operator.controller.utils.Constants.TCP_PROTOCOL; - -@Slf4j -@Singleton -public class ResourceCreationHelpers { - - private final LoadGenHelpers loadGenHelpers; - - public ResourceCreationHelpers(LoadGenHelpers loadGenHelpers) { - this.loadGenHelpers = loadGenHelpers; - } - - /** - * Prepare a Kubernetes Job. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig Load generation configuration - * @return Job - */ - protected Job prepareJob(LoadGenerationNode nodeConfig, String testName) { - return new JobBuilder() - .withMetadata(prepareJobMetadata(nodeConfig)) - .withSpec(prepareJobSpec(nodeConfig, testName)) - .build(); - } - - /** - * Prepare Kubernetes 'Job > Metadata'. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig Load generation configuration - * @return ObjectMeta - */ - private ObjectMeta prepareJobMetadata(LoadGenerationNode nodeConfig) { - - // * Metadata - ObjectMeta jobMeta = new ObjectMetaBuilder() - .withName(nodeConfig.getName()) - .build(); - - log.debug("Prepared Kubernetes 'Job > Metadata': {}", jobMeta); - - return jobMeta; - - } - - /** - * Prepare Kubernetes 'Job > Spec'. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig Load generation configuration - * @return JobSpec - */ - private JobSpec prepareJobSpec(LoadGenerationNode nodeConfig, String testName) { - - // * Job Spec configuration - JobSpec jobSpec = new JobSpecBuilder() - .withTtlSecondsAfterFinished(nodeConfig.getTtlSecondsAfterFinished()) - - // Pods count - // Setting the `Parallelism` attribute will result in k8s deploying pods to match the requested value - // effectively enabling control over the deployed pod count. - .withParallelism(nodeConfig.getReplicas()) - - // Backoff limit - .withBackoffLimit(BACKOFF_LIMIT) - - // Template - .withTemplate(prepareSpecTemplate(nodeConfig, testName)) - - .build(); - - log.debug("Prepared Kubernetes 'Job > Spec': {}", jobSpec); - - return jobSpec; - - } - - /** - * Prepare Kubernetes 'Job > Spec > Template'. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig Load generation configuration - * @return PodTemplateSpec - */ - private PodTemplateSpec prepareSpecTemplate(LoadGenerationNode nodeConfig, String testName) { - - PodTemplateSpec specTemplate = new PodTemplateSpecBuilder() - .withMetadata(prepareTemplateMetadata(nodeConfig, testName)) - .withSpec(prepareTemplateSpec(nodeConfig)) - .build(); - - log.debug("Prepared Kubernetes 'Job > Spec > Template': {}", specTemplate); - - return specTemplate; - - } - - /** - * Prepare Kubernetes 'Job > Spec > Template > Metadata'. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig The node configuration object. - * @param testName Test name. - * @return PodTemplateSpec. - */ - private ObjectMeta prepareTemplateMetadata(LoadGenerationNode nodeConfig, String testName) { - - ObjectMeta templateMeta = new ObjectMetaBuilder() - // Labels - .addToLabels(APP_DEFAULT_LABEL, testName) - .addToLabels(SERVICE_SELECTOR_LABEL, nodeConfig.getName()) - .addToLabels(MANAGED_BY_LABEL_KEY, MANAGED_BY_LABEL_VALUE) - .addToLabels(nodeConfig.getLabels()) - - // Annotations - // Enable Prometheus endpoint discovery by Prometheus server - .addToAnnotations(PROMETHEUS_IO_SCRAPE, "true") - .addToAnnotations(PROMETHEUS_IO_PATH, PROMETHEUS_IO_ENDPOINT) - .addToAnnotations(PROMETHEUS_IO_PORT, String.valueOf(loadGenHelpers.constructMetricsExporterContainer().getExporterPort())) - .addToAnnotations(nodeConfig.getAnnotations()) - - .build(); - - log.debug("Prepared Kubernetes 'Job > Spec > Template > Metadata': {}", templateMeta); - - return templateMeta; - - } - - /** - * Prepare Kubernetes 'Job > Spec > Template > Spec'. - *

- * Reference: Kubernetes Job Docs - * - * @param nodeConfig Load generation configuration - * @return PodTemplateSpec - */ - private PodSpec prepareTemplateSpec(LoadGenerationNode nodeConfig) { - - PodSpec templateSpec = new PodSpecBuilder() - // images - .withImagePullSecrets(prepareImagePullSecrets(nodeConfig)) - - // Containers - .withContainers(prepareContainerList(nodeConfig)) - .withVolumes(prepareVolumesList(nodeConfig)) - .withAffinity(prepareAffinity(nodeConfig)) - .withTolerations(prepareTolerations(nodeConfig)) - .withRestartPolicy(DEFAULT_RESTART_POLICY) - .build(); - - log.debug("Prepared Kubernetes 'Job > Spec > Template > Spec': {}", templateSpec); - - return templateSpec; - - } - - private List prepareImagePullSecrets(LoadGenerationNode nodeConfig) { - final List references = new ArrayList<>(); - - if (nodeConfig.getImagePullSecrets() != null) { - references.addAll( - nodeConfig.getImagePullSecrets() - .stream() - .map(secretName -> new LocalObjectReferenceBuilder().withName(secretName).build()) - .toList() - ); - } - - log.debug("Prepared image pull secrets: {}", references); - - return references; - } - - private List prepareVolumesList(LoadGenerationNode nodeConfig) { - - List volumeList = new ArrayList<>(); - - if (nodeConfig.getConfigMap() != null) { - volumeList.add(prepareVolume(nodeConfig)); - } - - if (nodeConfig.getLibConfigMap() != null) { - volumeList.add(prepareLibVolume(nodeConfig)); - } - - return volumeList; - - } - - private Affinity prepareAffinity(LoadGenerationNode nodeConfig) { - - // Construct Affinity - var affinityBuilder = new AffinityBuilder(); - - //! Note for future feature extensions: - //! When adding support for more "Affinity" options, the evaluation inside the `if` condition is to be split into several checks. - if (nodeConfig.getAffinity() != null && nodeConfig.getAffinity().getNodeAffinity() != null) { - affinityBuilder.withNodeAffinity(prepareNodeAffinity(nodeConfig)); - } - - var affinity = affinityBuilder.build(); - log.debug("Prepared pod affinity: '{}'", affinity); - - return affinity; - - } - - private NodeAffinity prepareNodeAffinity(LoadGenerationNode nodeConfig) { - - var nodeAffinityBuilder = new NodeAffinityBuilder(); - - // Prepare Required during scheduling node selector - var requiredDuringSchedulingNodeSelector = prepareRequiredDuringSchedulingNodeSelector(nodeConfig); - - nodeAffinityBuilder.withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingNodeSelector); - - return nodeAffinityBuilder.build(); - - } - - private NodeSelector prepareRequiredDuringSchedulingNodeSelector(LoadGenerationNode nodeConfig) { - - // Required during scheduling - List matchExpressions = new ArrayList<>(); - - final var requiredDuringScheduling = Optional.ofNullable( - nodeConfig.getAffinity().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution()).orElse(new HashMap<>()); - - requiredDuringScheduling.forEach((requiredAffinityKey, requiredAffinityValue) -> matchExpressions - .add(new NodeSelectorRequirementBuilder().withKey(requiredAffinityKey).withOperator(DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR) - .withValues(requiredAffinityValue).build())); - - var nodeSelectorTerms = new NodeSelectorTermBuilder().withMatchExpressions(matchExpressions).build(); - - return new NodeSelectorBuilder().withNodeSelectorTerms(nodeSelectorTerms).build(); - } - - private List prepareTolerations(LoadGenerationNode nodeConfig) { - - List tolerations = new ArrayList<>(); - - if (nodeConfig.getTolerations() != null) { - - // For each configured node toleration from the Custom Resource, build a toleration object and add it to list - nodeConfig.getTolerations().forEach(nodeToleration -> { - var tolerationBuilder = new TolerationBuilder(); - tolerationBuilder - .withKey(nodeToleration.getKey()) - .withOperator(nodeToleration.getOperator()) - .withEffect(nodeToleration.getEffect()); - - if (nodeToleration.getOperator().equals(EQUAL.getType())) { - tolerationBuilder.withValue(nodeToleration.getValue()); - } - - tolerations.add(tolerationBuilder.build()); - }); - } - - log.debug("Prepared pod tolerations: '{}'", tolerations); - return tolerations; - - } - - private static Volume prepareVolume(LoadGenerationNode nodeConfig) { - return new VolumeBuilder() - .withName(nodeConfig.getName()) - .withConfigMap(prepareConfigMapSource(nodeConfig)) - .build(); - } - - private static Volume prepareLibVolume(LoadGenerationNode nodeConfig) { - return new VolumeBuilder() - .withName("lib") - .withConfigMap(prepareLibConfigMapSource(nodeConfig)) - .build(); - } - - private static ConfigMapVolumeSource prepareConfigMapSource(LoadGenerationNode nodeConfig) { - return new ConfigMapVolumeSourceBuilder() - .withName(nodeConfig.getConfigMap()) - .build(); - } - - private static ConfigMapVolumeSource prepareLibConfigMapSource(LoadGenerationNode nodeConfig) { - return new ConfigMapVolumeSourceBuilder() - .withName(nodeConfig.getLibConfigMap()) - .build(); - } - - private List prepareContainerList(LoadGenerationNode nodeConfig) { - - List constantsList = new ArrayList<>(); - - // Load generation container - constantsList.add(prepareLoadGenContainer(nodeConfig)); - - // Inject metrics container only if `master` - if (nodeConfig.getOperationalMode().equals(MASTER)) { - constantsList.add(prepareMetricsExporterContainer(loadGenHelpers.constructMetricsExporterContainer())); - } - - return constantsList; - - } - - /** - * Prepare locust prometheus metrics exporter container. - *

- * Reference for default exporter: locust exporter docs - * - * @param exporterContainer The metrics exporter container - * @return Container - */ - private Container prepareMetricsExporterContainer(final MetricsExporterContainer exporterContainer) { - - HashMap envMap = new HashMap<>(); - - envMap.put(EXPORTER_URI_ENV_VAR, EXPORTER_URI_ENV_VAR_VALUE); - envMap.put(EXPORTER_PORT_ENV_VAR, String.format(":%s", exporterContainer.getExporterPort())); - - Container container = new ContainerBuilder() - - // Name - .withName(exporterContainer.getContainerName()) - - // Image - .withImage(exporterContainer.getContainerImage()) - .withImagePullPolicy(exporterContainer.getPullPolicy()) - - // Resources - .withResources(exporterContainer.getResourceRequirements()) - - // Ports - .withPorts(new ContainerPortBuilder().withContainerPort(exporterContainer.getExporterPort()).build()) - - // Environment - .withEnv(prepareContainerEnvironmentVariables(envMap)) - - .build(); - - log.debug("Prepared Kubernetes metrics exporter container: {}", container); - - return container; - } - - /** - * Prepare a load generation container. - *

- * Reference: Kubernetes containers Docs - * - * @param nodeConfig Load generation configuration - * @return Container - */ - private Container prepareLoadGenContainer(LoadGenerationNode nodeConfig) { - Container container = new ContainerBuilder() - - // Name - .withName(nodeConfig.getName()) - - // Resource config - .withResources(loadGenHelpers.getResourceRequirements(DEFAULT_RESOURCE_TARGET)) - - // Image - .withImage(nodeConfig.getImage()) - .withImagePullPolicy(nodeConfig.getImagePullPolicy()) - - // Ports - .withPorts(prepareContainerPorts(nodeConfig.getPorts())) - - // Environment - .withEnv(prepareContainerEnvironmentVariables(loadGenHelpers.generateContainerEnvironmentMap())) - - // Container command - .withArgs(nodeConfig.getCommand()) - - // Mount configMap as volume - .withVolumeMounts(prepareVolumeMounts(nodeConfig)) - - .build(); - - log.debug("Prepared Kubernetes load generator container: {}", container); - - return container; - } - - private List prepareVolumeMounts(LoadGenerationNode nodeConfig) { - - List mounts = new ArrayList<>(); - - if (nodeConfig.getConfigMap() != null) { - // Prepare main configMap mount - mounts.add(new VolumeMountBuilder() - .withName(nodeConfig.getName()) - .withMountPath(DEFAULT_MOUNT_PATH) - .withReadOnly(false) - .build()); - } - - if (nodeConfig.getLibConfigMap() != null) { - // Prepare lib configMap mount - mounts.add(new VolumeMountBuilder() - .withName("lib") - .withMountPath(LIB_MOUNT_PATH) - .withReadOnly(false) - .build()); - } - - return mounts; - - } - - /** - * Prepare container Environment variable. - *

- * Reference: Kubernetes containers Docs - * - * @param envMap Environment variable map - * @return ContainerPort - */ - private List prepareContainerEnvironmentVariables(Map envMap) { - - List containerEnvVars = envMap - .entrySet() - .stream() - .map(entry -> new EnvVarBuilder() - .withName(entry.getKey()) - .withValue(entry.getValue()) - .build()) - .collect(Collectors.toList()); - - log.debug("Prepared container environment variable list: {}", containerEnvVars); - - return containerEnvVars; - - } - - /** - * Prepare container ports. - *

- * Reference: Kubernetes containers Docs - * - * @param portsList Container port list - * @return ContainerPort - */ - private List prepareContainerPorts(List portsList) { - - List containerPortList = portsList - .stream() - .map(port -> new ContainerPortBuilder().withContainerPort(port).build()) - .collect(Collectors.toList()); - - log.debug("Prepared container ports list: {}", containerPortList); - - return containerPortList; - - } - - protected Service prepareService(LoadGenerationNode nodeConfig) { - - // Initial service configuration - var serviceConfig = new ServiceBuilder() - - // Metadata - .withNewMetadata() - .withName(nodeConfig.getName()) - .endMetadata() - - // Spec - .withNewSpec() - .withSelector(Collections.singletonMap(SERVICE_SELECTOR_LABEL, nodeConfig.getName())); - - // Map ports - nodeConfig.getPorts() - .stream() - .filter(port -> !port.equals(DEFAULT_WEB_UI_PORT)) - .forEach(port -> { - - val portName = PORT_DEFAULT_NAME + port; - - serviceConfig - .addNewPort() - .withName(portName) - .withProtocol(TCP_PROTOCOL) - .withPort(port) - .endPort(); - }); - - // Metrics port - serviceConfig - .addNewPort() - .withName(METRICS_PORT_NAME) - .withProtocol(TCP_PROTOCOL) - .withPort(loadGenHelpers.constructMetricsExporterContainer().getExporterPort()) - .endPort(); - - // Finalize building the service object - var service = serviceConfig.endSpec(); - - return service.build(); - - } - -} diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java deleted file mode 100644 index 5f947955..00000000 --- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.locust.operator.controller.utils.resource.manage; - -import com.locust.operator.controller.dto.LoadGenerationNode; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.client.ConfigBuilder; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; - -@Slf4j -@Singleton -public class ResourceCreationManager { - - private final ResourceCreationHelpers creationHelper; - - public ResourceCreationManager(ResourceCreationHelpers creationHelper) { - this.creationHelper = creationHelper; - } - - public void createJob(LoadGenerationNode nodeConfig, String namespace, String testName) { - - try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) { - - log.info("Creating Job for: {} in namespace: {}", nodeConfig.getName(), namespace); - - Job job = creationHelper.prepareJob(nodeConfig, testName); - job = client.batch().v1().jobs().inNamespace(namespace).resource(job).serverSideApply(); - - log.info("Created job with name: {}", job.getMetadata().getName()); - log.debug("Created job details: {}", job); - } catch (Exception e) { - - log.error("Exception occurred during Job creation: {}", e.getLocalizedMessage(), e); - - } - - } - - public void createMasterService(LoadGenerationNode nodeConfig, String namespace) { - try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) { - - log.info("Creating service for: {} in namespace: {}", nodeConfig.getName(), namespace); - - Service service = creationHelper.prepareService(nodeConfig); - - service = client.services().inNamespace(namespace).resource(service).create(); - log.info("Created service with name: {}", service.getMetadata().getName()); - log.debug("Created service {}", service); - - } catch (Exception e) { - - log.error("Exception occurred during service creation: {}", e.getLocalizedMessage(), e); - - } - } - -} diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java deleted file mode 100644 index 38c2a0e5..00000000 --- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java +++ /dev/null @@ -1,64 +0,0 @@ -package com.locust.operator.controller.utils.resource.manage; - -import com.locust.operator.controller.dto.OperationalMode; -import com.locust.operator.controller.utils.LoadGenHelpers; -import com.locust.operator.customresource.LocustTest; -import io.fabric8.kubernetes.api.model.StatusDetails; -import io.fabric8.kubernetes.client.ConfigBuilder; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import jakarta.inject.Singleton; -import lombok.extern.slf4j.Slf4j; -import lombok.val; - -import java.util.List; -import java.util.Optional; - -@Slf4j -@Singleton -public class ResourceDeletionManager { - - private final LoadGenHelpers loadGenHelpers; - - public ResourceDeletionManager(LoadGenHelpers loadGenHelpers) { - this.loadGenHelpers = loadGenHelpers; - } - - public Optional> deleteJob(LocustTest crdInstance, OperationalMode mode) { - - try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) { - - val namespace = crdInstance.getMetadata().getNamespace(); - val resourceName = loadGenHelpers.constructNodeName(crdInstance, mode); - log.info("Deleting Job for: {} in namespace: {}", crdInstance.getMetadata().getName(), namespace); - return Optional.ofNullable(client.batch().v1().jobs().inNamespace(namespace).withName(resourceName).delete()); - - } catch (Exception e) { - - log.error("Exception occurred during Job deletion: {}", e.getLocalizedMessage(), e); - return Optional.empty(); - - } - - } - - public Optional> deleteService(LocustTest crdInstance, OperationalMode mode) { - - try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) { - - val namespace = crdInstance.getMetadata().getNamespace(); - val resourceName = loadGenHelpers.constructNodeName(crdInstance, mode); - - log.info("Deleting Service for: {} in namespace: {}", crdInstance.getMetadata().getName(), namespace); - return Optional.ofNullable(client.services().inNamespace(namespace).withName(resourceName).delete()); - - } catch (Exception e) { - - log.error("Exception occurred during Service deletion: {}", e.getLocalizedMessage(), e); - return Optional.empty(); - - } - - } - -} diff --git a/src/main/java/com/locust/operator/customresource/LocustTest.java b/src/main/java/com/locust/operator/customresource/LocustTest.java deleted file mode 100644 index f5dd194c..00000000 --- a/src/main/java/com/locust/operator/customresource/LocustTest.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.locust.operator.customresource; - -import io.fabric8.kubernetes.api.model.Namespaced; -import io.fabric8.kubernetes.client.CustomResource; -import io.fabric8.kubernetes.model.annotation.Group; -import io.fabric8.kubernetes.model.annotation.Version; - -import java.io.Serial; - -@Group(LocustTest.GROUP) -@Version(LocustTest.VERSION) -public class LocustTest extends CustomResource implements Namespaced { - - public static final String GROUP = "locust.io"; - public static final String VERSION = "v1"; - - // Used during deserialization to verify that the sender and receiver of a serialized object - // have loaded classes for that object that are compatible with respect to serialization. - // Manually setting this avoids the automatic allocation and thus removes the chance of unexpected failure during runtime. - @Serial - private static final long serialVersionUID = 1; - -} diff --git a/src/main/java/com/locust/operator/customresource/LocustTestSpec.java b/src/main/java/com/locust/operator/customresource/LocustTestSpec.java deleted file mode 100644 index 8d12dc46..00000000 --- a/src/main/java/com/locust/operator/customresource/LocustTestSpec.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.locust.operator.customresource; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.JsonDeserializer; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import com.locust.operator.customresource.internaldto.LocustTestAffinity; -import com.locust.operator.customresource.internaldto.LocustTestToleration; -import io.fabric8.kubernetes.api.model.KubernetesResource; -import lombok.Data; - -import java.io.Serial; -import java.util.List; -import java.util.Map; - -// This @JsonDeserialize overrides the deserializer used in KubernetesResource, -// in order to be able to deserialize correctly the fields in the 'spec' field of the json -@JsonDeserialize(using = JsonDeserializer.None.class) -@JsonInclude(Include.NON_NULL) -@Data -public class LocustTestSpec implements KubernetesResource { - - // Used during deserialization to verify that the sender and receiver of a serialized object - // have loaded classes for that object that are compatible with respect to serialization. - // Manually setting this avoids the automatic allocation and thus removes the chance of unexpected failure during runtime. - @Serial - private static final long serialVersionUID = 1; - - private Map> labels; - private Map> annotations; - private LocustTestAffinity affinity; - private List tolerations; - - private String masterCommandSeed; - private String workerCommandSeed; - private Integer workerReplicas; - private String configMap; - private String libConfigMap; - private String image; - private String imagePullPolicy; - private List imagePullSecrets; - -} diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java deleted file mode 100644 index ea9d404c..00000000 --- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.locust.operator.customresource.internaldto; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.JsonDeserializer; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import lombok.Data; - -import java.io.Serializable; - -@JsonDeserialize(using = JsonDeserializer.None.class) -@JsonInclude(Include.NON_NULL) -@Data -public class LocustTestAffinity implements Serializable { - - private static final long serialVersionUID = 1; - private LocustTestNodeAffinity nodeAffinity; - -} diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java deleted file mode 100644 index e1bb9038..00000000 --- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.locust.operator.customresource.internaldto; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.JsonDeserializer; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import lombok.Data; - -import java.io.Serializable; -import java.util.Map; - -@JsonDeserialize(using = JsonDeserializer.None.class) -@JsonInclude(Include.NON_NULL) -@Data -public class LocustTestNodeAffinity implements Serializable { - - private static final long serialVersionUID = 1; - - private Map requiredDuringSchedulingIgnoredDuringExecution; - -} diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java deleted file mode 100644 index c25f2a00..00000000 --- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.locust.operator.customresource.internaldto; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import lombok.AllArgsConstructor; -import lombok.Data; -import lombok.NoArgsConstructor; - -import java.io.Serializable; - -@Data -@NoArgsConstructor -@AllArgsConstructor -@JsonInclude(Include.NON_NULL) -public class LocustTestToleration implements Serializable { - - private static final long serialVersionUID = 1; - - private String key; - private String operator; - private String value; - private String effect; - -} diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml deleted file mode 100644 index 562705f9..00000000 --- a/src/main/resources/application.yml +++ /dev/null @@ -1,81 +0,0 @@ -micronaut: - application: - name: locustK8sOperator - server: - port: ${APP_SERVER_PORT:8081} - - metrics: - binders: - web: - enabled: ${METRICS_WEB_ENABLE:true} - jvm: - enabled: ${METRICS_JVM_ENABLE:true} - uptime: - enabled: ${METRICS_UPTIME_ENABLE:true} - processor: - enabled: ${METRICS_PROCESSOR_ENABLE:true} - files: - enabled: ${METRICS_FILES_ENABLE:true} - logback: - enabled: ${METRICS_LOGBACK_ENABLE:true} - executor: - enabled: ${METRICS_EXECUTOR_ENABLE:true} - kafka: - enabled: ${METRICS_KAFKA_ENABLE:false} - export: - prometheus: - enabled: true - descriptions: true - step: ${METRICS_PROMETHEUS_STEP:`PT30S`} - enabled: true -netty: - default: - allocator: - max-order: 3 - ---- -config: - watcher: - # 1m -> every 1 minute - resyncPeriod: 1m - k8s: - namespace: ${K8S_NAMESPACE:default} - load-generation-jobs: - ttl-seconds-after-finished: ${JOB_TTL_SECONDS_AFTER_FINISHED:} - load-generation-pods: - affinity: - enableCrInjection: ${ENABLE_AFFINITY_CR_INJECTION:false} - taintTolerations: - enableCrInjection: ${ENABLE_TAINT_TOLERATIONS_CR_INJECTION:false} - resource: - cpu-request: ${POD_CPU_REQUEST:`250m`} - mem-request: ${POD_MEM_REQUEST:`128Mi`} - ephemeralStorage-request: ${POD_EPHEMERAL_REQUEST:`30M`} - cpu-limit: ${POD_CPU_LIMIT:`1000m`} - mem-limit: ${POD_MEM_LIMIT:`1024Mi`} - ephemeralStorage-limit: ${POD_EPHEMERAL_LIMIT:`50M`} - metricsExporter: - image: ${METRICS_EXPORTER_IMAGE:`containersol/locust_exporter:v0.5.0`} - port: ${METRICS_EXPORTER_PORT:`9646`} - pullPolicy: ${METRICS_EXPORTER_IMAGE_PULL_POLICY:`Always`} - resource: - cpu-request: ${METRICS_EXPORTER_CPU_REQUEST:`250m`} - mem-request: ${METRICS_EXPORTER_MEM_REQUEST:`128Mi`} - ephemeralStorage-request: ${METRICS_EXPORTER_EPHEMERAL_REQUEST:`30M`} - cpu-limit: ${METRICS_EXPORTER_CPU_LIMIT:`1000m`} - mem-limit: ${METRICS_EXPORTER_MEM_LIMIT:`1024Mi`} - ephemeralStorage-limit: ${METRICS_EXPORTER_EPHEMERAL_LIMIT:`50M`} - - kafka: - bootstrap-servers: ${KAFKA_BOOTSTRAP_SERVERS:`localhost:9092`} - security: - enabled: ${KAFKA_SECURITY_ENABLED:`false`} - protocol: ${KAFKA_SECURITY_PROTOCOL_CONFIG:`SASL_PLAINTEXT`} - username: ${KAFKA_USERNAME:`localKafkaUser`} - password: ${KAFKA_PASSWORD:`localKafkaPassword`} - sasl: - mechanism: ${KAFKA_SASL_MECHANISM:`SCRAM-SHA-512`} - jaas: - config: ${KAFKA_SASL_JAAS_CONFIG:`placeholder`} - - diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml deleted file mode 100644 index bbf9e194..00000000 --- a/src/main/resources/logback.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - %green(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1}): %msg%n%throwable - - - - - - - diff --git a/src/test/java/com/locust/OperatorStarterTests.java b/src/test/java/com/locust/OperatorStarterTests.java deleted file mode 100644 index e6c880d4..00000000 --- a/src/test/java/com/locust/OperatorStarterTests.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.locust; - -import com.locust.operator.controller.LocustTestReconciler; -import io.fabric8.kubeapitest.junit.EnableKubeAPIServer; -import io.fabric8.kubeapitest.junit.KubeConfig; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -import static com.locust.operator.controller.TestFixtures.creatKubernetesClient; -import static com.locust.operator.controller.TestFixtures.setupCustomResourceDefinition; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@EnableKubeAPIServer(updateKubeConfigFile = true) -public class OperatorStarterTests { - - @KubeConfig - static String configYaml; - - @Mock - private LocustTestReconciler reconciler; - - @BeforeAll - void setup() { - MockitoAnnotations.openMocks(this); - setupCustomResourceDefinition(creatKubernetesClient(configYaml)); - } - - /** - * Ideally this test should be replaced with a combination of - * {@link io.micronaut.test.extensions.junit5.annotation.MicronautTest @MicronautTest} and a check that "Application.isRunning()" - * returns true. - *

- * The only reason this test was designed this way is that I am not able to find how to inject `k8sServerUrl` variable into the test - * environment before @MicronautTest boots up the Application. If the application trys to boot without `k8sServerUrl` being injected, it - * will throw an Exception. - */ - @Test - @DisplayName("Functional: Check operator startup core") - void operatorStarterCore() { - - // * Setup - var operatorStarter = new LocustTestOperatorStarter(reconciler); - - // * Act & Assert - // Passing "null" to onApplicationEvent(ServerStartupEvent event) is safe since the event is not used by the "operatorStarter" logic. - //executeWithK8sMockServer(k8sServerUrl, () -> operatorStarter.onApplicationEvent(null)); - operatorStarter.onApplicationEvent(null); - - // * Assert - // This test doesn't need an explicit assertion statement since the onApplicationEvent() logic - // will throw an exception if it doesn't manage to start the Operator. - - } - -} diff --git a/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java b/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java deleted file mode 100644 index 1a911b78..00000000 --- a/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java +++ /dev/null @@ -1,194 +0,0 @@ -package com.locust.operator.controller; - -import com.locust.operator.controller.config.SysConfig; -import com.locust.operator.controller.utils.LoadGenHelpers; -import com.locust.operator.controller.utils.resource.manage.ResourceCreationHelpers; -import com.locust.operator.controller.utils.resource.manage.ResourceCreationManager; -import com.locust.operator.controller.utils.resource.manage.ResourceDeletionManager; -import io.fabric8.kubeapitest.junit.EnableKubeAPIServer; -import io.fabric8.kubeapitest.junit.KubeConfig; -import io.fabric8.kubernetes.client.KubernetesClient; -import lombok.extern.slf4j.Slf4j; -import lombok.val; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.junit.jupiter.MockitoExtension; - -import static com.locust.operator.controller.TestFixtures.DEFAULT_NAMESPACE; -import static com.locust.operator.controller.TestFixtures.creatKubernetesClient; -import static com.locust.operator.controller.TestFixtures.deleteLocustTestCrd; -import static com.locust.operator.controller.TestFixtures.prepareLocustTest; -import static com.locust.operator.controller.TestFixtures.setupCustomResourceDefinition; -import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.SoftAssertions.assertSoftly; - -@Slf4j -@ExtendWith(MockitoExtension.class) -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@EnableKubeAPIServer(updateKubeConfigFile = true) -class LocustTestReconcilerTests { - - @Mock - private SysConfig sysConfig; - - @KubeConfig - static String configYaml; - - private LocustTestReconciler locustTestReconciler; - private KubernetesClient k8sTestClient; - - @BeforeAll - void setupMethodMock() { - - // Mock configuration - MockitoAnnotations.openMocks(this); - var loadGenHelpers = new LoadGenHelpers(sysConfig); - var creationHelper = new ResourceCreationHelpers(loadGenHelpers); - var creationManager = new ResourceCreationManager(creationHelper); - var deletionManager = new ResourceDeletionManager(loadGenHelpers); - locustTestReconciler = new LocustTestReconciler(loadGenHelpers, creationManager, deletionManager); - - // Setup SysConfig mock - setupSysconfigMock(sysConfig); - - // Setup and deploy the CRD - k8sTestClient = creatKubernetesClient(configYaml); - - } - - @BeforeEach - void setup() { - // Setup and deploy the CRD - setupCustomResourceDefinition(k8sTestClient); - } - - @AfterEach - void tearDown() throws InterruptedException { - // Clean resources from cluster - deleteLocustTestCrd(k8sTestClient); - // Dirty loop until the CRD is deleted to avoid test failures due to the CRD not being deleted - while (!k8sTestClient.apiextensions().v1().customResourceDefinitions().list().getItems().isEmpty()) { - Thread.sleep(50); - } - } - - @Test - @DisplayName("Functional: Reconcile - onAdd event") - void reconcileOnAddEvent() { - // * Setup - val expectedJobCount = 2; - val expectedServiceCount = 2; - val resourceName = "team.perftest"; - val expectedMasterResourceName = "team-perftest-master"; // Based on the conversion logic - val expectedWorkerResourceName = "team-perftest-worker"; // Based on the conversion logic - val locustTest = prepareLocustTest(resourceName); - - // * Act - // Passing "null" to context is safe as it is not used in the "reconcile()" method - locustTestReconciler.reconcile(locustTest, null); - - // Get All Jobs created - val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list(); - log.debug("Acquired Job list: {}", jobList); - - // Get All Services created - val serviceList = k8sTestClient.services().inNamespace(DEFAULT_NAMESPACE).list(); - log.debug("Acquired Service list: {}", serviceList); - - // * Assert - assertSoftly(softly -> { - // Assert master/worker jobs have been created - softly.assertThat(jobList.getItems().size()).isEqualTo(expectedJobCount); - softly.assertThat(jobList.getItems().get(0).getMetadata().getName()).isEqualTo(expectedMasterResourceName); - softly.assertThat(jobList.getItems().get(1).getMetadata().getName()).isEqualTo(expectedWorkerResourceName); - - // Assert master service have been created - softly.assertThat(serviceList.getItems().size()).isEqualTo(expectedServiceCount); - // checking for the second item as the first service is the default kubernetes service - softly.assertThat(serviceList.getItems().get(1).getMetadata().getName()).isEqualTo(expectedMasterResourceName); - }); - - } - - @Test - @DisplayName("Functional: Reconcile - NOOP to onUpdate event") - void reconcileOnUpdateEvent() { - // * Setup - val resourceName = "team.perftest"; - val resourceGeneration = 1L; - val workerReplicaCount = 100; - val locustTest = prepareLocustTest(resourceName, workerReplicaCount, resourceGeneration); - - // Deploy CR - // Passing "null" to context is safe as it is not used in the "reconcile()" method - locustTestReconciler.reconcile(locustTest, null); - - // * Act - // Increase worker count - val updatedResourceGeneration = 2L; - val updatedWorkerReplicaCount = 37; - val updatedLocustTest = prepareLocustTest(resourceName, updatedWorkerReplicaCount, updatedResourceGeneration); - - // Update deployed CR - // Passing "null" to context is safe as it is not used in the "reconcile()" method - locustTestReconciler.reconcile(updatedLocustTest, null); - - // Get All Jobs created - val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - // Assert NOOP on update - assertThat(jobList.getItems().get(1).getSpec().getParallelism()).isEqualTo(workerReplicaCount); - - } - - @Test - @DisplayName("Functional: Reconcile - cleanup onDelete event") - void cleanupOnDeleteEvent() { - // * Setup - val expectedJobCount = 0; - val expectedServiceCount = 1; // 1 Because of the default kubernetes service remaining post deletion - val resourceName = "team.perftest"; - val expectedDefaultServiceName = "kubernetes"; - val locustTest = prepareLocustTest(resourceName); - - // Deploy CR - // Passing "null" to context is safe as it is not used in the "reconcile()" method - locustTestReconciler.reconcile(locustTest, null); - - // * Act - // Delete CR - // Passing "null" to context is safe as it is not used in the "cleanup()" method - locustTestReconciler.cleanup(locustTest, null); - - // Get All Jobs created - val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list(); - log.debug("Acquired Job list: {}", jobList); - - // Get All Services created - val serviceList = k8sTestClient.services().inNamespace(DEFAULT_NAMESPACE).list(); - log.debug("Acquired Service list: {}", serviceList); - - // * Assert - assertSoftly(softly -> { - // Assert master/worker jobs have been deleted - softly.assertThat(jobList.getItems().size()).isEqualTo(expectedJobCount); - - // Assert master service have been deleted - softly.assertThat(serviceList.getItems().size()).isEqualTo(expectedServiceCount); - softly.assertThat(serviceList.getItems().get(0).getMetadata().getName()).isEqualTo(expectedDefaultServiceName); - - }); - - } - -} diff --git a/src/test/java/com/locust/operator/controller/TestFixtures.java b/src/test/java/com/locust/operator/controller/TestFixtures.java deleted file mode 100644 index 8e98bcf2..00000000 --- a/src/test/java/com/locust/operator/controller/TestFixtures.java +++ /dev/null @@ -1,187 +0,0 @@ -package com.locust.operator.controller; - -import com.locust.operator.customresource.LocustTest; -import com.locust.operator.customresource.LocustTestSpec; -import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; -import io.fabric8.kubernetes.api.model.StatusDetails; -import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition; -import io.fabric8.kubernetes.client.Config; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.KubernetesClientBuilder; -import lombok.NoArgsConstructor; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import lombok.val; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperationalMode.WORKER; -import static com.locust.operator.customresource.LocustTest.GROUP; -import static com.locust.operator.customresource.LocustTest.VERSION; -import static lombok.AccessLevel.PRIVATE; - -import static java.nio.charset.StandardCharsets.UTF_8; - -@Slf4j -@NoArgsConstructor(access = PRIVATE) -public class TestFixtures { - - public static final String CRD_FILE_PATH = "charts/locust-k8s-operator/crds/locust-test-crd.yaml"; - public static final String DEFAULT_API_VERSION = GROUP + "/" + VERSION; - public static final String KIND = "LocustTest"; - public static final String DEFAULT_SEED_COMMAND = "--locustfile src/demo.py"; - public static final String DEFAULT_TEST_IMAGE = "xlocust:latest"; - public static final String DEFAULT_IMAGE_PULL_POLICY = "IfNotPresent"; - public static final List DEFAULT_IMAGE_PULL_SECRETS = Collections.emptyList(); - public static final String DEFAULT_TEST_CONFIGMAP = "demo-test-configmap"; - public static final String DEFAULT_NAMESPACE = "default"; - public static final int REPLICAS = 50; - public static final long DEFAULT_CR_GENERATION = 1L; - public static final Map DEFAULT_MASTER_LABELS = Map.of("role", "master"); - public static final Map DEFAULT_WORKER_LABELS = Map.of("role", "worker"); - public static final Map DEFAULT_MASTER_ANNOTATIONS = Map.of("locust.io/role", "master"); - public static final Map DEFAULT_WORKER_ANNOTATIONS = new HashMap<>(); - - @SneakyThrows - public static CustomResourceDefinition prepareCustomResourceDefinition(KubernetesClient k8sClient) { - - return loadCrdFile(Paths.get(CRD_FILE_PATH), k8sClient); - } - - private static CustomResourceDefinition loadCrdFile(Path path, KubernetesClient k8sClient) throws IOException { - - // Purge HELM specific lines from CRD file - ByteArrayInputStream inputStream = removeHelmSpecialLines(path); - - // Load CRD - return k8sClient.apiextensions().v1() - .customResourceDefinitions() - .load(inputStream) - .item(); - } - - /** - * Removes HELM condition that is not supported when loading the CRD for the component tests. - * - * @param path Path to the CRD file - * @return Processed file content - */ - private static ByteArrayInputStream removeHelmSpecialLines(Path path) throws IOException { - - StringBuilder stringBuilder = new StringBuilder(); - Files.lines(path).filter(line -> !line.startsWith("{{")) - .map(line -> line + "\n") - .forEach(stringBuilder::append); - return new ByteArrayInputStream(stringBuilder.toString().getBytes(UTF_8)); - } - - public static CustomResourceDefinition createCrd(CustomResourceDefinition crd, KubernetesClient k8sClient) { - return k8sClient.apiextensions().v1().customResourceDefinitions().resource(crd).create(); - } - - public static List deleteLocustTestCrd(KubernetesClient k8sClient) { - - log.debug("Deleting LocustTest CRD instances"); - - val crdClient = k8sClient.apiextensions().v1().customResourceDefinitions().withName("locusttests.locust.io"); - return crdClient.delete(); - } - - public static LocustTest prepareLocustTest(String resourceName) { - - return prepareLocustTest(resourceName, REPLICAS, DEFAULT_CR_GENERATION); - - } - - public static LocustTest prepareLocustTest(String resourceName, Integer replicas, Long generation) { - - var locustTest = new LocustTest(); - - // API version - locustTest.setApiVersion(DEFAULT_API_VERSION); - - // Kind - locustTest.setKind(KIND); - - // Metadata - locustTest.setMetadata(new ObjectMetaBuilder() - .withName(resourceName) - .withNamespace(DEFAULT_NAMESPACE) - .withGeneration(generation) - .build()); - - // Spec - var spec = new LocustTestSpec(); - spec.setMasterCommandSeed(DEFAULT_SEED_COMMAND); - spec.setWorkerCommandSeed(DEFAULT_SEED_COMMAND); - spec.setConfigMap(DEFAULT_TEST_CONFIGMAP); - spec.setImage(DEFAULT_TEST_IMAGE); - spec.setImagePullPolicy(DEFAULT_IMAGE_PULL_POLICY); - spec.setImagePullSecrets(DEFAULT_IMAGE_PULL_SECRETS); - spec.setWorkerReplicas(replicas); - - var labels = new HashMap>(); - labels.put(MASTER.getMode(), DEFAULT_MASTER_LABELS); - labels.put(WORKER.getMode(), DEFAULT_WORKER_LABELS); - spec.setLabels(labels); - - var annotations = new HashMap>(); - annotations.put(MASTER.getMode(), DEFAULT_MASTER_ANNOTATIONS); - annotations.put(WORKER.getMode(), DEFAULT_WORKER_ANNOTATIONS); - spec.setAnnotations(annotations); - - locustTest.setSpec(spec); - log.debug("Created resource object:\n{}", locustTest); - - return locustTest; - - } - - /** - * Creates a new instance of KubernetesClient using the provided YAML configuration. - *

- * This method uses the KubernetesClientBuilder to create a new KubernetesClient. The builder is configured with a Config object, which - * is created from the provided YAML configuration using the Config.fromKubeconfig method. - * - * @param configYaml A string representing the Kubernetes configuration in YAML format. - * @return A new instance of KubernetesClient configured according to the provided YAML configuration. - */ - public static KubernetesClient creatKubernetesClient(String configYaml) { - // Instantiate a KubernetesClientBuilder, configure it with the provided YAML configuration - return new KubernetesClientBuilder(). - withConfig(Config.fromKubeconfig(configYaml)) - .build(); - } - - /** - * Prepares and creates a Custom Resource Definition (CRD) in the Kubernetes cluster associated with the provided client. - *

- * This method first prepares a CRD using the prepareCustomResourceDefinition method. It then creates that CRD in the Kubernetes cluster - * using the createCrd method. Both of these methods use the provided KubernetesClient to interact with the Kubernetes API. - * After the CRD is created, it logs the details of the created CRD and returns it. - * - * @param testClient The KubernetesClient to use when interacting with the Kubernetes server API. - * @return The created CustomResourceDefinition. - */ - public static CustomResourceDefinition setupCustomResourceDefinition(KubernetesClient testClient) { - // Prepare and create the Custom Resource Definition - val expectedCrd = prepareCustomResourceDefinition(testClient); - - // Create the Custom Resource Definition - val crd = createCrd(expectedCrd, testClient); - - // Log and return the created CRD - log.debug("Created CRD details: {}", crd); - return crd; - } - -} diff --git a/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java b/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java deleted file mode 100644 index b9cf41e8..00000000 --- a/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java +++ /dev/null @@ -1,86 +0,0 @@ -package com.locust.operator.controller.utils; - -import com.locust.operator.controller.config.SysConfig; -import lombok.val; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import static com.locust.operator.controller.TestFixtures.prepareLocustTest; -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperationalMode.WORKER; -import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET; -import static com.locust.operator.controller.utils.TestFixtures.assertNodeConfig; -import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@ExtendWith(MockitoExtension.class) -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@MockitoSettings(strictness = Strictness.LENIENT) -public class LoadGenHelpersTests { - - @Test - @DisplayName("Functional: Master node configuration") - void masterLoadConfigGeneration() { - - // * Setup - final SysConfig config = mock(SysConfig.class); - setupSysconfigMock(config); - final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config); - val resourceName = "qe.performanceTest"; - val operationalMode = MASTER; - val locustTest = prepareLocustTest(resourceName); - - // * Act - val generatedNodeConfig = loadGenHelpers.generateLoadGenNodeObject(locustTest, operationalMode); - - // * Assert - assertNodeConfig(locustTest, generatedNodeConfig, operationalMode); - - } - - @Test - @DisplayName("Functional: Worker node configuration") - void workerLoadConfigGeneration() { - - // * Setup - final SysConfig config = mock(SysConfig.class); - setupSysconfigMock(config); - final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config); - val resourceName = "eq.test"; - val operationalMode = WORKER; - val locustTest = prepareLocustTest(resourceName); - - // * Act - val generatedNodeConfig = loadGenHelpers.generateLoadGenNodeObject(locustTest, operationalMode); - - // * Assert - assertNodeConfig(locustTest, generatedNodeConfig, operationalMode); - - } - - @Test - @DisplayName("Functional: Unbound CPU limit configuration") - void unboundCpuLimitConfiguration() { - - // * Setup - final SysConfig config = mock(SysConfig.class); - setupSysconfigMock(config); - when(config.getPodCpuLimit()).thenReturn(""); - final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config); - - // * Act - val resourceRequirements = loadGenHelpers.getResourceRequirements(DEFAULT_RESOURCE_TARGET); - - // * Assert - assertFalse(resourceRequirements.getLimits().containsKey("cpu"), "CPU limit should not be set when the config value is blank"); - - } - -} diff --git a/src/test/java/com/locust/operator/controller/utils/TestFixtures.java b/src/test/java/com/locust/operator/controller/utils/TestFixtures.java deleted file mode 100644 index c5bb3242..00000000 --- a/src/test/java/com/locust/operator/controller/utils/TestFixtures.java +++ /dev/null @@ -1,408 +0,0 @@ -package com.locust.operator.controller.utils; - -import com.locust.operator.controller.config.SysConfig; -import com.locust.operator.controller.dto.LoadGenerationNode; -import com.locust.operator.controller.dto.MetricsExporterContainer; -import com.locust.operator.controller.dto.OperationalMode; -import com.locust.operator.customresource.LocustTest; -import com.locust.operator.customresource.internaldto.LocustTestAffinity; -import com.locust.operator.customresource.internaldto.LocustTestNodeAffinity; -import com.locust.operator.customresource.internaldto.LocustTestToleration; -import io.fabric8.kubernetes.api.model.KubernetesResourceList; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.NamespaceBuilder; -import io.fabric8.kubernetes.api.model.PodList; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirements; -import io.fabric8.kubernetes.api.model.ServiceList; -import io.fabric8.kubernetes.api.model.batch.v1.JobList; -import io.fabric8.kubernetes.client.KubernetesClient; -import lombok.NoArgsConstructor; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import lombok.val; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static com.github.stefanbirkner.systemlambda.SystemLambda.withEnvironmentVariable; -import static com.locust.operator.controller.TestFixtures.REPLICAS; -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.dto.OperatorType.EQUAL; -import static com.locust.operator.controller.utils.Constants.CONTAINER_ARGS_SEPARATOR; -import static com.locust.operator.controller.utils.Constants.EXPORTER_CONTAINER_NAME; -import static com.locust.operator.controller.utils.Constants.KAFKA_BOOTSTRAP_SERVERS; -import static com.locust.operator.controller.utils.Constants.KAFKA_PASSWORD; -import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_JAAS_CONFIG; -import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_MECHANISM; -import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_ENABLED; -import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_PROTOCOL_CONFIG; -import static com.locust.operator.controller.utils.Constants.KAFKA_USERNAME; -import static lombok.AccessLevel.PRIVATE; -import static org.assertj.core.api.SoftAssertions.assertSoftly; -import static org.mockito.Mockito.when; - -@Slf4j -@NoArgsConstructor(access = PRIVATE) -public class TestFixtures { - - public static final List DEFAULT_MASTER_PORT_LIST = List.of(5557, 5558, 8089); - public static final List DEFAULT_WORKER_PORT_LIST = List.of(8080); - public static final Integer MASTER_REPLICA_COUNT = 1; - public static final String DEFAULT_SEED_COMMAND = "--locustfile src/GQ/src/demo.py"; - public static final String DEFAULT_TEST_IMAGE = "xlocust:latest"; - public static final String DEFAULT_METRICS_IMAGE = "containersol/locust_exporter:v0.5.0"; - public static final int EXPECTED_GENERIC_RESOURCE_COUNT = 1; - public static final int EXPECTED_SERVICE_RESOURCE_COUNT = 2; - public static final String K8S_SERVER_URL_ENV_VAR = "KUBERNETES_MASTER"; - public static final String MOCK_KAFKA_BOOTSTRAP_VALUE = "localhost:9092"; - public static final boolean MOCK_SECURITY_VALUE = true; - public static final boolean MOCK_AFFINITY_INJECTION_VALUE = true; - public static final boolean MOCK_TOLERATION_INJECTION_VALUE = true; - public static final String MOCK_SECURITY_PROTOCOL_VALUE = "SASL_PLAINTEXT"; - public static final String MOCK_SASL_MECHANISM_VALUE = "SCRAM-SHA-512"; - public static final String MOCK_SASL_JAAS_CONFIG_VALUE = "placeholder"; - public static final String MOCK_USERNAME = "localKafkaUser"; - public static final String MOCK_PASSWORD = "localKafkaPassword"; - public static final String MOCK_POD_MEM = "1024Mi"; - public static final String MOCK_POD_CPU = "1000m"; - public static final String MOCK_POD_EPHEMERAL_STORAGE = "50M"; - public static final Integer MOCK_POD_PORT = 9646; - public static final Integer MOCK_TTL_SECONDS_AFTER_FINISHED = 60; - public static final Map DEFAULT_MASTER_LABELS = Map.of("role", "master"); - public static final Map DEFAULT_WORKER_LABELS = Map.of("role", "worker"); - public static final Map DEFAULT_MASTER_ANNOTATIONS = Map.of("locust.io/role", "master"); - public static final Map DEFAULT_WORKER_ANNOTATIONS = new HashMap<>(); - - public static void assertNodeConfig(LocustTest customResource, LoadGenerationNode generatedNodeConfig, - OperationalMode mode) { - - String expectedConfigName = customResource.getMetadata().getName().replace('.', '-'); - - Map expectedLabels = mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS; - - Map expectedAnnotations = mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS; - - Integer expectedReplicas = mode.equals(MASTER) ? MASTER_REPLICA_COUNT : customResource.getSpec().getWorkerReplicas(); - - List expectedPortList = mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST; - - assertSoftly(softly -> { - softly.assertThat(generatedNodeConfig.getName()).contains(expectedConfigName); - softly.assertThat(generatedNodeConfig.getLabels()).isEqualTo(expectedLabels); - softly.assertThat(generatedNodeConfig.getAnnotations()).isEqualTo(expectedAnnotations); - softly.assertThat(generatedNodeConfig.getTtlSecondsAfterFinished()).isEqualTo(MOCK_TTL_SECONDS_AFTER_FINISHED); - softly.assertThat(generatedNodeConfig.getOperationalMode()).isEqualTo(mode); - softly.assertThat(generatedNodeConfig.getPorts()).isEqualTo(expectedPortList); - softly.assertThat(generatedNodeConfig.getReplicas()).isEqualTo(expectedReplicas); - }); - } - - public static LoadGenerationNode prepareNodeConfig(String nodeName, OperationalMode mode) { - var nodeConfig = LoadGenerationNode.builder() - .name(nodeName) - .labels(mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS) - .annotations(mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS) - .command(List.of(DEFAULT_SEED_COMMAND.split(CONTAINER_ARGS_SEPARATOR))) - .operationalMode(mode) - .image(DEFAULT_TEST_IMAGE) - .replicas(mode.equals(MASTER) ? MASTER_REPLICA_COUNT : REPLICAS) - .ports(mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST) - .build(); - - log.debug("Created node configuration: {}", nodeConfig); - return nodeConfig; - } - - public static LoadGenerationNode prepareNodeConfigWithTtlSecondsAfterFinished( - String nodeName, OperationalMode mode, Integer ttlSecondsAfterFinished) { - var nodeConfig = LoadGenerationNode.builder() - .name(nodeName) - .labels(mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS) - .annotations(mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS) - .ttlSecondsAfterFinished(ttlSecondsAfterFinished) - .command(List.of(DEFAULT_SEED_COMMAND.split(CONTAINER_ARGS_SEPARATOR))) - .operationalMode(mode) - .image(DEFAULT_TEST_IMAGE) - .replicas(mode.equals(MASTER) ? MASTER_REPLICA_COUNT : REPLICAS) - .ports(mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST) - .build(); - - log.debug("Created node configuration: {}", nodeConfig); - return nodeConfig; - } - - public static LoadGenerationNode prepareNodeConfigWithNodeAffinity(String nodeName, OperationalMode mode, String affinityKey, - String affinityValue) { - - // Init instances - val nodeAffinity = new LocustTestNodeAffinity(); - val affinity = new LocustTestAffinity(); - val nodeConfig = prepareNodeConfig(nodeName, mode); - - // Set affinity - nodeAffinity.setRequiredDuringSchedulingIgnoredDuringExecution(Map.of(affinityKey, affinityValue)); - affinity.setNodeAffinity(nodeAffinity); - - // Push affinity config to object - nodeConfig.setAffinity(affinity); - log.debug("Created node configuration with nodeAffinity: {}", nodeConfig); - - return nodeConfig; - - } - - public static LoadGenerationNode prepareNodeConfigWithTolerations(String nodeName, OperationalMode mode, - LocustTestToleration toleration) { - - val nodeConfig = prepareNodeConfig(nodeName, mode); - nodeConfig.setTolerations(Collections.singletonList(toleration)); - - return nodeConfig; - - } - - public static LoadGenerationNode prepareNodeConfigWithPullPolicyAndSecrets( - String nodeName, OperationalMode mode, String pullPolicy, List pullSecrets) { - - val nodeConfig = prepareNodeConfig(nodeName, mode); - nodeConfig.setImagePullPolicy(pullPolicy); - nodeConfig.setImagePullSecrets(pullSecrets); - - return nodeConfig; - - } - - public static void assertK8sServiceCreation(String nodeName, ServiceList serviceList) { - assertK8sResourceCreation(nodeName, serviceList, EXPECTED_SERVICE_RESOURCE_COUNT); - } - - public static > void assertK8sResourceCreation(String nodeName, T resourceList) { - assertK8sResourceCreation(nodeName, resourceList, EXPECTED_GENERIC_RESOURCE_COUNT); - } - - private static > void assertK8sResourceCreation(String nodeName, T resourceList, - int expectedResourceCount) { - val resourceNamesList = extractNames(resourceList); - log.debug("Acquired resource list: {}", resourceNamesList); - - assertSoftly(softly -> { - softly.assertThat(resourceList.getItems().size()).isEqualTo(expectedResourceCount); - softly.assertThat(resourceNamesList).contains(nodeName); - }); - } - - private static > List extractNames(T resourceList) { - return resourceList.getItems().stream() - .map(item -> item.getMetadata().getName()) - .collect(Collectors.toList()); - } - - public static void createNamespace(KubernetesClient testClient, String namespace) { - - testClient.namespaces() - .resource(new NamespaceBuilder() - .withNewMetadata() - .withName(namespace) - .endMetadata() - .build()) - .serverSideApply(); - } - - public static void assertImagePullData(LoadGenerationNode nodeConfig, PodList podList) { - - podList.getItems().forEach(pod -> { - final List references = pod.getSpec() - .getImagePullSecrets() - .stream() - .map(LocalObjectReference::getName) - .toList(); - - assertSoftly(softly -> softly.assertThat(references).isEqualTo(nodeConfig.getImagePullSecrets())); - - pod.getSpec() - .getContainers() - .forEach(container -> assertSoftly( - softly -> softly.assertThat(container.getImagePullPolicy()).isEqualTo(nodeConfig.getImagePullPolicy()))); - }); - } - - public static void assertK8sTtlSecondsAfterFinished(JobList jobList, Integer ttlSecondsAfterFinished) { - jobList.getItems().forEach(job -> { - val actualTtlSecondsAfterFinished = job.getSpec().getTtlSecondsAfterFinished(); - assertSoftly(softly -> softly.assertThat(actualTtlSecondsAfterFinished).isEqualTo(ttlSecondsAfterFinished)); - }); - } - - public static void assertK8sNodeAffinity(LoadGenerationNode nodeConfig, JobList jobList, String k8sNodeLabelKey) { - - jobList.getItems().forEach(job -> { - val nodeSelectorTerms = job.getSpec().getTemplate().getSpec().getAffinity().getNodeAffinity() - .getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms(); - - nodeSelectorTerms.forEach(selectorTerm -> { - val actualSelectorKey = selectorTerm.getMatchExpressions().get(0).getKey(); - val actualSelectorValue = selectorTerm.getMatchExpressions().get(0).getValues().get(0); - val desiredSelectorValue = nodeConfig.getAffinity().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() - .get(k8sNodeLabelKey); - - assertSoftly(softly -> { - softly.assertThat(actualSelectorKey).isEqualTo(k8sNodeLabelKey); - softly.assertThat(actualSelectorValue).isEqualTo(desiredSelectorValue); - }); - }); - - }); - - } - - public static void assertK8sTolerations(JobList jobList, LocustTestToleration expectedToleration) { - - jobList.getItems().forEach(job -> { - val actualTolerations = job.getSpec().getTemplate().getSpec().getTolerations(); - - assertSoftly(softly -> { - softly.assertThat(actualTolerations.get(0).getKey()).isEqualTo(expectedToleration.getKey()); - softly.assertThat(actualTolerations.get(0).getEffect()).isEqualTo(expectedToleration.getEffect()); - softly.assertThat(actualTolerations.get(0).getOperator()).isEqualTo(expectedToleration.getOperator()); - - if (expectedToleration.getOperator().equals(EQUAL.getType())) { - softly.assertThat(actualTolerations.get(0).getValue()).isEqualTo(expectedToleration.getValue()); - } - }); - - }); - - } - - /** - * Method to run `runnable` methods while injection the "KUBERNETES_MASTER" in the run environment. This is required as the core methods - * uses an internally created k8s client that searches for configuration in a specific order. Injecting the environment variable this - * way allows the internal client to connect to the mock server. - * - * @param mockServerUrl Mock server URL - * @param runnable Runnable object to run - */ - @SneakyThrows - public static void executeWithK8sMockServer(String mockServerUrl, Runnable runnable) { - - withEnvironmentVariable(K8S_SERVER_URL_ENV_VAR, mockServerUrl) - .execute(runnable::run); - - } - - public static Map containerEnvironmentMap() { - HashMap environmentMap = new HashMap<>(); - - environmentMap.put(KAFKA_BOOTSTRAP_SERVERS, MOCK_KAFKA_BOOTSTRAP_VALUE); - environmentMap.put(KAFKA_SECURITY_ENABLED, String.valueOf(MOCK_SECURITY_VALUE)); - environmentMap.put(KAFKA_SECURITY_PROTOCOL_CONFIG, MOCK_SECURITY_PROTOCOL_VALUE); - environmentMap.put(KAFKA_SASL_MECHANISM, MOCK_SASL_MECHANISM_VALUE); - environmentMap.put(KAFKA_SASL_JAAS_CONFIG, MOCK_SASL_JAAS_CONFIG_VALUE); - environmentMap.put(KAFKA_USERNAME, MOCK_USERNAME); - environmentMap.put(KAFKA_PASSWORD, MOCK_PASSWORD); - - return environmentMap; - - } - - public static MetricsExporterContainer mockMetricsExporterContainer() { - - // Set Resource overrides - Map resourceOverrideMap = new HashMap<>(); - - resourceOverrideMap.put("memory", new Quantity(MOCK_POD_MEM)); - resourceOverrideMap.put("cpu", new Quantity(MOCK_POD_CPU)); - resourceOverrideMap.put("ephemeral-storage", new Quantity(MOCK_POD_EPHEMERAL_STORAGE)); - - // Construct resource request - final var mockResourceRequest = new ResourceRequirements(); - - mockResourceRequest.setRequests(resourceOverrideMap); - mockResourceRequest.setLimits(resourceOverrideMap); - - return new MetricsExporterContainer( - EXPORTER_CONTAINER_NAME, - "containersol/locust_exporter:v0.5.0", - "Always", - 9646, - mockResourceRequest - - ); - } - - public static void setupSysconfigMock(SysConfig mockedConfInstance) { - - // Kafka - when(mockedConfInstance.getKafkaBootstrapServers()) - .thenReturn(MOCK_KAFKA_BOOTSTRAP_VALUE); - when(mockedConfInstance.isKafkaSecurityEnabled()) - .thenReturn(MOCK_SECURITY_VALUE); - when(mockedConfInstance.getKafkaSecurityProtocol()) - .thenReturn(MOCK_SECURITY_PROTOCOL_VALUE); - when(mockedConfInstance.getKafkaUsername()) - .thenReturn(MOCK_USERNAME); - when(mockedConfInstance.getKafkaUserPassword()) - .thenReturn(MOCK_PASSWORD); - when(mockedConfInstance.getKafkaSaslMechanism()) - .thenReturn(MOCK_SASL_MECHANISM_VALUE); - when(mockedConfInstance.getKafkaSaslJaasConfig()) - .thenReturn(MOCK_SASL_JAAS_CONFIG_VALUE); - - // Resource request :: Load generation node - when(mockedConfInstance.getPodMemRequest()) - .thenReturn(MOCK_POD_MEM); - when(mockedConfInstance.getPodCpuRequest()) - .thenReturn(MOCK_POD_CPU); - when(mockedConfInstance.getPodEphemeralStorageRequest()) - .thenReturn(MOCK_POD_EPHEMERAL_STORAGE); - - // Resource request :: Metrics exporter - when(mockedConfInstance.getMetricsExporterMemRequest()) - .thenReturn(MOCK_POD_MEM); - when(mockedConfInstance.getMetricsExporterCpuRequest()) - .thenReturn(MOCK_POD_CPU); - when(mockedConfInstance.getMetricsExporterEphemeralStorageRequest()) - .thenReturn(MOCK_POD_EPHEMERAL_STORAGE); - - // Port binding :: Metrics exporter - when(mockedConfInstance.getMetricsExporterPort()) - .thenReturn(MOCK_POD_PORT); - - // Image :: Metrics exporter - when(mockedConfInstance.getMetricsExporterImage()) - .thenReturn(DEFAULT_METRICS_IMAGE); - - // Job characteristics - when(mockedConfInstance.getTtlSecondsAfterFinished()) - .thenReturn(MOCK_TTL_SECONDS_AFTER_FINISHED); - - // Resource limit :: Load generation node - when(mockedConfInstance.getPodMemLimit()) - .thenReturn(MOCK_POD_MEM); - when(mockedConfInstance.getPodCpuLimit()) - .thenReturn(MOCK_POD_CPU); - when(mockedConfInstance.getPodEphemeralStorageLimit()) - .thenReturn(MOCK_POD_EPHEMERAL_STORAGE); - - // Resource limit :: Metrics exporter - when(mockedConfInstance.getMetricsExporterMemLimit()) - .thenReturn(MOCK_POD_MEM); - when(mockedConfInstance.getMetricsExporterCpuLimit()) - .thenReturn(MOCK_POD_CPU); - when(mockedConfInstance.getMetricsExporterEphemeralStorageLimit()) - .thenReturn(MOCK_POD_EPHEMERAL_STORAGE); - - // Affinity - when(mockedConfInstance.isAffinityCrInjectionEnabled()) - .thenReturn(MOCK_AFFINITY_INJECTION_VALUE); - - // Taints Toleration - when(mockedConfInstance.isTolerationsCrInjectionEnabled()) - .thenReturn(MOCK_TOLERATION_INJECTION_VALUE); - } - -} diff --git a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java b/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java deleted file mode 100644 index 8ec4ead3..00000000 --- a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java +++ /dev/null @@ -1,271 +0,0 @@ -package com.locust.operator.controller.utils.resource.manage; - -import com.locust.operator.controller.utils.LoadGenHelpers; -import com.locust.operator.customresource.internaldto.LocustTestToleration; -import io.fabric8.kubeapitest.junit.EnableKubeAPIServer; -import io.fabric8.kubeapitest.junit.KubeConfig; -import io.fabric8.kubernetes.client.KubernetesClient; -import lombok.extern.slf4j.Slf4j; -import lombok.val; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -import java.util.List; - -import static com.locust.operator.controller.TestFixtures.creatKubernetesClient; -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.utils.TestFixtures.assertImagePullData; -import static com.locust.operator.controller.utils.TestFixtures.assertK8sNodeAffinity; -import static com.locust.operator.controller.utils.TestFixtures.assertK8sResourceCreation; -import static com.locust.operator.controller.utils.TestFixtures.assertK8sServiceCreation; -import static com.locust.operator.controller.utils.TestFixtures.assertK8sTolerations; -import static com.locust.operator.controller.utils.TestFixtures.assertK8sTtlSecondsAfterFinished; -import static com.locust.operator.controller.utils.TestFixtures.containerEnvironmentMap; -import static com.locust.operator.controller.utils.TestFixtures.createNamespace; -import static com.locust.operator.controller.utils.TestFixtures.mockMetricsExporterContainer; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfig; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithNodeAffinity; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithPullPolicyAndSecrets; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithTolerations; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithTtlSecondsAfterFinished; -import static org.mockito.Mockito.when; - -@Slf4j -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@EnableKubeAPIServer(updateKubeConfigFile = true) -public class ResourceCreationManagerTests { - - @Mock - private LoadGenHelpers loadGenHelpers; - private ResourceCreationManager CreationManager; - - @KubeConfig - static String configYaml; - - KubernetesClient testClient; - - @BeforeAll - void setupMethodMock() { - - MockitoAnnotations.openMocks(this); - var creationHelper = new ResourceCreationHelpers(loadGenHelpers); - CreationManager = new ResourceCreationManager(creationHelper); - when(loadGenHelpers.generateContainerEnvironmentMap()) - .thenReturn(containerEnvironmentMap()); - when(loadGenHelpers.constructMetricsExporterContainer()) - .thenReturn(mockMetricsExporterContainer()); - - testClient = creatKubernetesClient(configYaml); - } - - @Test - @DisplayName("Functional: Create a kubernetes Job") - void createJobTest() { - - // * Setup - val namespace = "default"; - val nodeName = "mnt-demo-test"; - val resourceName = "mnt.demo-test"; - val nodeConfig = prepareNodeConfig(nodeName, MASTER); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sResourceCreation(nodeName, jobList); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Service") - void createServiceTest() { - - // * Setup - val namespace = "default"; - val nodeName = "act-kafka-test"; - val nodeConfig = prepareNodeConfig(nodeName, MASTER); - - // * Act - CreationManager.createMasterService(nodeConfig, namespace); - - // Get All Services created by the method - val serviceList = testClient.services().inNamespace(namespace).list(); - log.debug("Acquired Service list: {}", serviceList); - - // * Assert - assertK8sServiceCreation(nodeName, serviceList); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with Default TTL Seconds After Finished") - void createJobWithDefaultTtlSecondsAfterFinishedTest() { - - // * Setup - val namespace = "ttl-ns"; - val nodeName = "ttl-demo-test"; - val resourceName = "ttl.demo-test"; - final Integer defaultTtlSecondsAfterFinished = null; - val nodeConfig = prepareNodeConfigWithTtlSecondsAfterFinished(nodeName, MASTER, defaultTtlSecondsAfterFinished); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sTtlSecondsAfterFinished(jobList, defaultTtlSecondsAfterFinished); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with TTL Seconds After Finished") - void createJobWithTtlSecondsAfterFinishedTest() { - - // * Setup - val namespace = "ttl-ns"; - val nodeName = "ttl-demo-test"; - val resourceName = "ttl.demo-test"; - val ttlSecondsAfterFinished = Integer.valueOf(120); - val nodeConfig = prepareNodeConfigWithTtlSecondsAfterFinished(nodeName, MASTER, ttlSecondsAfterFinished); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sTtlSecondsAfterFinished(jobList, ttlSecondsAfterFinished); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with Node Affinity") - void createJobWithNodeAffinityTest() { - - // * Setup - val namespace = "node-affinity"; - val nodeName = "locust-demo-test"; - val resourceName = "locust.demo-test"; - val k8sNodeLabelKey = "organisation.com/nodeLabel"; - val k8sNodeLabelValue = "performance-nodes"; - val nodeConfig = prepareNodeConfigWithNodeAffinity(nodeName, MASTER, k8sNodeLabelKey, k8sNodeLabelValue); - - // Create test namespace - createNamespace(testClient, namespace); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sResourceCreation(nodeName, jobList); - assertK8sNodeAffinity(nodeConfig, jobList, k8sNodeLabelKey); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with Tolerations and Toleration Operator set to Equal") - void createJobWithTolerationsAndOperatorEqualTest() { - // * Setup - val namespace = "taint-toleration-equal"; - val nodeName = "locust-demo-test"; - val resourceName = "locust.demo-test"; - - // Toleration - val tolerationKey = "taintA"; - val tolerationEffect = "NoSchedule"; - val tolerationEqualOperator = "Equal"; - val tolerationValue = "dedicatedToPerformance"; - - // Create test namespace - createNamespace(testClient, namespace); - - val toleration = new LocustTestToleration(tolerationKey, tolerationEqualOperator, tolerationValue, tolerationEffect); - val nodeConfig = prepareNodeConfigWithTolerations(nodeName, MASTER, toleration); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sResourceCreation(nodeName, jobList); - assertK8sTolerations(jobList, toleration); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with Tolerations and Toleration Operator set to Exists") - void createJobWithTolerationsAndOperatorExistsTest() { - // * Setup - val namespace = "taint-toleration-exists"; - val nodeName = "locust-demo-test"; - val resourceName = "locust.demo-test"; - - // Toleration - val tolerationKey = "taintA"; - val tolerationEffect = "NoSchedule"; - val tolerationEqualOperator = "Exists"; - - val toleration = new LocustTestToleration(tolerationKey, tolerationEqualOperator, null, tolerationEffect); - val nodeConfig = prepareNodeConfigWithTolerations(nodeName, MASTER, toleration); - - // Create test namespace - createNamespace(testClient, namespace); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertK8sResourceCreation(nodeName, jobList); - assertK8sTolerations(jobList, toleration); - - } - - @Test - @DisplayName("Functional: Create a kubernetes Job with Pod image pull policy and secrets") - void createJobWithPodImagePullPolicyAndSecrets() { - - // * Setup - val namespace = "default"; - val nodeName = "mnt-demo-test"; - val resourceName = "mnt.demo-test"; - val nodeConfig = prepareNodeConfigWithPullPolicyAndSecrets( - nodeName, MASTER, "Always", List.of("my-private-registry-secret", "gcr-cred-secret") - ); - - // * Act - CreationManager.createJob(nodeConfig, namespace, resourceName); - - // Get All Pods created by the method - val podList = testClient.pods().inNamespace(namespace).list(); - log.debug("Acquired Pod list: {}", podList); - - // * Assert - assertImagePullData(nodeConfig, podList); - - } - -} diff --git a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java b/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java deleted file mode 100644 index 6d92b228..00000000 --- a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.locust.operator.controller.utils.resource.manage; - -import com.locust.operator.controller.config.SysConfig; -import com.locust.operator.controller.utils.LoadGenHelpers; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient; -import lombok.extern.slf4j.Slf4j; -import lombok.val; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.junit.jupiter.MockitoExtension; - -import java.util.Collections; - -import static com.locust.operator.controller.TestFixtures.prepareLocustTest; -import static com.locust.operator.controller.dto.OperationalMode.MASTER; -import static com.locust.operator.controller.utils.TestFixtures.executeWithK8sMockServer; -import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfig; -import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock; -import static org.assertj.core.api.AssertionsForClassTypes.assertThat; - -@Slf4j -@ExtendWith(MockitoExtension.class) -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@EnableKubernetesMockClient(https = false, crud = true) -public class ResourceDeletionManagerTests { - - @Mock - private SysConfig sysConfig; - private ResourceCreationManager creationManager; - private ResourceDeletionManager deletionManager; - - String k8sServerUrl; - KubernetesClient testClient; - - @BeforeAll - void setupMethodMock() { - - MockitoAnnotations.openMocks(this); - var loadGenHelpers = new LoadGenHelpers(sysConfig); - var creationHelper = new ResourceCreationHelpers(loadGenHelpers); - creationManager = new ResourceCreationManager(creationHelper); - deletionManager = new ResourceDeletionManager(loadGenHelpers); - setupSysconfigMock(sysConfig); - - } - - @BeforeEach - void setup() { - k8sServerUrl = testClient.getMasterUrl().toString(); - } - - @Test - @DisplayName("Functional: Delete a kubernetes Job") - void deleteJobTest() { - - // * Setup - val namespace = "default"; - val nodeName = "mnt-demo-test-master"; - val resourceName = "mnt.demo-test"; - val nodeConfig = prepareNodeConfig(nodeName, MASTER); - val locustTest = prepareLocustTest(resourceName); - - // * Act - executeWithK8sMockServer(k8sServerUrl, () -> creationManager.createJob(nodeConfig, namespace, resourceName)); - executeWithK8sMockServer(k8sServerUrl, () -> deletionManager.deleteJob(locustTest, MASTER)); - - // Get All Jobs created by the method - val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list(); - log.debug("Acquired Job list: {}", jobList); - - // * Assert - assertThat(jobList.getItems().size()).isEqualTo(0); - - } - - @Test - @DisplayName("Functional: Check that when Job deletion fails, an empty list is returned.") - void deleteJobFailureReturnEmptyListTest() { - - // * Setup - val resourceName = "mnt.demo-test"; - val locustTest = prepareLocustTest(resourceName); - - // * Act - val deletedJobStatus = deletionManager.deleteJob(locustTest, MASTER).orElse(Collections.emptyList()); - - // * Assert - assertThat(deletedJobStatus.isEmpty()).isTrue(); - - } - - @Test - @DisplayName("Functional: Delete a kubernetes Service") - void deleteServiceTest() { - - // * Setup - val namespace = "default"; - val nodeName = "act-kafka-test-master"; - val resourceName = "act.kafka-test"; - val nodeConfig = prepareNodeConfig(nodeName, MASTER); - val locustTest = prepareLocustTest(resourceName); - - // * Act - executeWithK8sMockServer(k8sServerUrl, () -> creationManager.createMasterService(nodeConfig, namespace)); - executeWithK8sMockServer(k8sServerUrl, () -> deletionManager.deleteService(locustTest, MASTER)); - - // Get All Jobs created by the method - val serviceList = testClient.services().inNamespace(namespace).list(); - log.debug("Acquired Deployment list: {}", serviceList); - - // * Assert - assertThat(serviceList.getItems().size()).isEqualTo(0); - - } - - @Test - @DisplayName("Functional: Check that when Service deletion fails, empty list is returned") - void deleteServiceFailureReturnEmptyListTest() { - - // * Setup - val resourceName = "mnt.demo-test"; - val locustTest = prepareLocustTest(resourceName); - - // * Act - val deletedServiceStatus = deletionManager.deleteService(locustTest, MASTER).orElse(Collections.emptyList()); - - // * Assert - assertThat(deletedServiceStatus.isEmpty()).isTrue(); - - } - -} diff --git a/src/test/resources/logback-test.xml b/src/test/resources/logback-test.xml deleted file mode 100644 index 3f27c99f..00000000 --- a/src/test/resources/logback-test.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - false - - - %green(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1}): %msg%n%throwable - - - - - - - - - - - diff --git a/test/e2e/.gitkeep b/test/e2e/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/test/e2e/conversion/configmap.yaml b/test/e2e/conversion/configmap.yaml new file mode 100644 index 00000000..1312ea5c --- /dev/null +++ b/test/e2e/conversion/configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: e2e-test-scripts + namespace: default +data: + locustfile.py: | + from locust import HttpUser, task, between + + class E2ETestUser(HttpUser): + wait_time = between(1, 2) + + @task + def hello(self): + self.client.get("/") diff --git a/test/e2e/conversion/run-e2e.sh b/test/e2e/conversion/run-e2e.sh new file mode 100755 index 00000000..e3289ebc --- /dev/null +++ b/test/e2e/conversion/run-e2e.sh @@ -0,0 +1,174 @@ +#!/bin/bash +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" + +echo -e "${YELLOW}=== E2E Conversion Webhook Tests ===${NC}" +echo "Project root: ${PROJECT_ROOT}" + +# Helper functions +pass() { + echo -e "${GREEN}βœ“ PASS:${NC} $1" +} + +fail() { + echo -e "${RED}βœ— FAIL:${NC} $1" + exit 1 +} + +info() { + echo -e "${YELLOW}β†’${NC} $1" +} + +# Verify prerequisites +info "Checking prerequisites..." +command -v kubectl >/dev/null 2>&1 || fail "kubectl not found" +command -v kind >/dev/null 2>&1 || fail "kind not found" + +# Verify cluster is running +kubectl cluster-info >/dev/null 2>&1 || fail "Kubernetes cluster not reachable" +pass "Cluster is reachable" + +# Verify operator is running +info "Checking operator deployment..." +kubectl wait --for=condition=Available deployment/locust-k8s-operator-controller-manager -n locust-k8s-operator-system --timeout=60s || fail "Operator not running" +pass "Operator is running" + +# Verify storage version +info "Verifying v2 is storage version..." +STORAGE_VERSION=$(kubectl get crd locusttests.locust.io -o jsonpath='{.spec.versions[?(@.storage==true)].name}') +if [ "$STORAGE_VERSION" != "v2" ]; then + fail "Storage version is '$STORAGE_VERSION', expected 'v2'" +fi +pass "v2 is storage version" + +# Cleanup any previous test resources +info "Cleaning up previous test resources..." +kubectl delete locusttest e2e-test-v1 e2e-test-v2 --ignore-not-found=true 2>/dev/null || true +kubectl delete configmap e2e-test-scripts --ignore-not-found=true 2>/dev/null || true +sleep 2 + +# Create ConfigMap +info "Creating test ConfigMap..." +kubectl apply -f "${SCRIPT_DIR}/configmap.yaml" +pass "ConfigMap created" + +echo "" +echo -e "${YELLOW}=== Test 1: Create v1 CR ===${NC}" +info "Creating v1 LocustTest..." +kubectl apply -f "${SCRIPT_DIR}/v1-cr.yaml" +sleep 3 + +# Verify v1 CR is created and can be read +V1_NAME=$(kubectl get locusttests.v1.locust.io e2e-test-v1 -o jsonpath='{.metadata.name}' 2>/dev/null || echo "") +if [ "$V1_NAME" != "e2e-test-v1" ]; then + fail "v1 CR not created properly" +fi +pass "v1 CR created successfully" + +echo "" +echo -e "${YELLOW}=== Test 2: Read v1 CR as v2 ===${NC}" +info "Reading v1 CR via v2 API..." +V2_IMAGE=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.image}' 2>/dev/null || echo "") +V2_WORKER_REPLICAS=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.worker.replicas}' 2>/dev/null || echo "") +V2_MASTER_CMD=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.master.command}' 2>/dev/null || echo "") + +if [ "$V2_IMAGE" != "locustio/locust:2.20.0" ]; then + fail "v2 image mismatch: got '$V2_IMAGE'" +fi +if [ "$V2_WORKER_REPLICAS" != "2" ]; then + fail "v2 worker.replicas mismatch: got '$V2_WORKER_REPLICAS'" +fi +if [ "$V2_MASTER_CMD" != "locust" ]; then + fail "v2 master.command mismatch: got '$V2_MASTER_CMD'" +fi +pass "v1β†’v2 conversion works correctly" + +echo "" +echo -e "${YELLOW}=== Test 3: Create v2 CR ===${NC}" +info "Creating v2 LocustTest..." +kubectl apply -f "${SCRIPT_DIR}/v2-cr.yaml" +sleep 3 + +V2_NAME=$(kubectl get locusttests.v2.locust.io e2e-test-v2 -o jsonpath='{.metadata.name}' 2>/dev/null || echo "") +if [ "$V2_NAME" != "e2e-test-v2" ]; then + fail "v2 CR not created properly" +fi +pass "v2 CR created successfully" + +echo "" +echo -e "${YELLOW}=== Test 4: Read v2 CR as v1 ===${NC}" +info "Reading v2 CR via v1 API..." +V1_IMAGE=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.image}' 2>/dev/null || echo "") +V1_WORKER_REPLICAS=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.workerReplicas}' 2>/dev/null || echo "") +V1_MASTER_CMD=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.masterCommandSeed}' 2>/dev/null || echo "") + +if [ "$V1_IMAGE" != "locustio/locust:2.20.0" ]; then + fail "v1 image mismatch: got '$V1_IMAGE'" +fi +if [ "$V1_WORKER_REPLICAS" != "3" ]; then + fail "v1 workerReplicas mismatch: got '$V1_WORKER_REPLICAS'" +fi +if [ "$V1_MASTER_CMD" != "locust" ]; then + fail "v1 masterCommandSeed mismatch: got '$V1_MASTER_CMD'" +fi +pass "v2β†’v1 conversion works correctly" + +echo "" +echo -e "${YELLOW}=== Test 5: Update v1 CR ===${NC}" +info "Updating v1 CR workerReplicas..." +kubectl patch locusttests.v1.locust.io e2e-test-v1 --type=merge -p '{"spec":{"workerReplicas":5}}' +sleep 2 + +# Verify update is reflected in v2 view +V2_UPDATED_REPLICAS=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.worker.replicas}' 2>/dev/null || echo "") +if [ "$V2_UPDATED_REPLICAS" != "5" ]; then + fail "v2 worker.replicas not updated: got '$V2_UPDATED_REPLICAS'" +fi +pass "v1 update reflected in v2 view" + +echo "" +echo -e "${YELLOW}=== Test 6: Verify Reconciler Creates Jobs ===${NC}" +info "Checking if reconciler created Jobs for e2e-test-v2..." +sleep 2 + +# Check jobs for e2e-test-v2 (created via v2 API, should have jobs) +MASTER_JOB=$(kubectl get jobs -l performance-test-pod-name=e2e-test-v2-master -o name 2>/dev/null | head -1) +WORKER_JOB=$(kubectl get jobs -l performance-test-pod-name=e2e-test-v2-worker -o name 2>/dev/null | head -1) + +if [ -z "$MASTER_JOB" ]; then + fail "Master Job not created for e2e-test-v2" +fi +if [ -z "$WORKER_JOB" ]; then + fail "Worker Job not created for e2e-test-v2" +fi +pass "Reconciler created Jobs from v2 resources" + +echo "" +echo -e "${YELLOW}=== Test 7: Verify Deprecation Warning ===${NC}" +info "Checking deprecation warning on v1 API..." +DEPRECATION_OUTPUT=$(kubectl get locusttests.v1.locust.io e2e-test-v1 2>&1) +if echo "$DEPRECATION_OUTPUT" | grep -q "deprecated"; then + pass "Deprecation warning shown for v1 API" +else + info "Note: Deprecation warning may not be visible in all kubectl versions" + pass "Deprecation warning test skipped (kubectl version dependent)" +fi + +# Cleanup +echo "" +info "Cleaning up test resources..." +kubectl delete locusttest e2e-test-v1 e2e-test-v2 --ignore-not-found=true 2>/dev/null || true +kubectl delete configmap e2e-test-scripts --ignore-not-found=true 2>/dev/null || true + +echo "" +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN} All E2E Conversion Tests PASSED! ${NC}" +echo -e "${GREEN}========================================${NC}" diff --git a/test/e2e/conversion/v1-cr.yaml b/test/e2e/conversion/v1-cr.yaml new file mode 100644 index 00000000..64130a1c --- /dev/null +++ b/test/e2e/conversion/v1-cr.yaml @@ -0,0 +1,16 @@ +apiVersion: locust.io/v1 +kind: LocustTest +metadata: + name: e2e-test-v1 + namespace: default +spec: + masterCommandSeed: "locust" + workerCommandSeed: "locust" + workerReplicas: 2 + image: "locustio/locust:2.20.0" + configMap: "e2e-test-scripts" + labels: + master: + app: locust-master + worker: + app: locust-worker diff --git a/test/e2e/conversion/v2-cr.yaml b/test/e2e/conversion/v2-cr.yaml new file mode 100644 index 00000000..b062c627 --- /dev/null +++ b/test/e2e/conversion/v2-cr.yaml @@ -0,0 +1,19 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-v2 + namespace: default +spec: + image: "locustio/locust:2.20.0" + master: + command: "locust" + autostart: true + labels: + app: locust-master + worker: + command: "locust" + replicas: 3 + labels: + app: locust-worker + testFiles: + configMapRef: "e2e-test-scripts" diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 00000000..51575c6d --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +var ( + // Optional Environment Variables: + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. + // These variables are useful if CertManager is already installed, avoiding + // re-installation and conflicts. + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster + isCertManagerAlreadyInstalled = false + + // projectImage is the name of the image which will be build and loaded + // with the code source changes to be tested. + projectImage = "example.com/locust-k8s-operator:v0.0.1" +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the purposed to be used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting locust-k8s-operator integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + By("building the manager(Operator) image") + //nolint:gosec,lll // Test code with known safe projectImage + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") + + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is + // built and available before running the tests. Also, remove the following block. + By("loading the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") + + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. + // To prevent errors when tests run in environments with CertManager already installed, + // we check for its presence before execution. + // Setup CertManager before the suite if not skipped and if not already installed + if !skipCertManagerInstall { + By("checking if cert manager is installed already") + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() + if !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") + } + } + + By("deploying the operator") + //nolint:gosec,lll // Test code with known safe projectImage + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to deploy the operator") + + By("waiting for the controller-manager deployment to be ready") + err = utils.WaitForControllerReady("locust-k8s-operator-system", "5m") + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Controller-manager deployment not ready") + + By("waiting for the webhook certificate to be ready") + err = utils.WaitForCertificateReady("locust-k8s-operator-system", "locust-k8s-operator-serving-cert", "2m") + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Webhook certificate not ready") + + By("waiting for the webhook service endpoint to be ready") + err = utils.WaitForWebhookReady("locust-k8s-operator-system", "locust-k8s-operator-webhook-service", "2m") + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Webhook service endpoint not ready") +}) + +var _ = AfterSuite(func() { + By("undeploying the operator") + cmd := exec.Command("make", "undeploy") + _, _ = utils.Run(cmd) + + // Teardown CertManager after the suite if not skipped and if it was not already installed + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") + utils.UninstallCertManager() + } +}) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 00000000..c31c604f --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,352 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +// namespace where the project is deployed in +const namespace = "locust-k8s-operator-system" + +// serviceAccountName created for the project +const serviceAccountName = "locust-k8s-operator-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "locust-k8s-operator-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "locust-k8s-operator-metrics-binding" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // Before running the tests, verify the controller is running. + // The actual deployment is handled in BeforeSuite (e2e_suite_test.go). + BeforeAll(func() { + By("verifying the namespace exists") + cmd := exec.Command("kubectl", "get", "ns", namespace) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Namespace should exist from BeforeSuite") + }) + + // After all tests have been executed, clean up test-specific resources. + // The operator undeployment is handled in AfterSuite (e2e_suite_test.go). + AfterAll(func() { + By("cleaning up the curl pod for metrics") + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace, "--ignore-not-found=true") + _, _ = utils.Run(cmd) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + //nolint:gosec // Test code with validated pod name from test setup + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + controllerLogs, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) + } + + By("Fetching Kubernetes events") + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") + eventsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) + } + + By("Fetching curl-metrics logs") + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) + } + + By("Fetching controller manager pod description") + //nolint:gosec // Test code with validated pod name from test setup + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + podDescription, err := utils.Run(cmd) + if err == nil { + fmt.Println("Pod description:\n", podDescription) + } else { + fmt.Println("Failed to describe controller pod") + } + + By("Fetching LocustTest CRs") + cmd = exec.Command("kubectl", "get", "locusttest", "-n", namespace, "-o", "yaml") + output, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output) + } + + By("Fetching Jobs") + cmd = exec.Command("kubectl", "get", "jobs", "-n", namespace, "-o", "wide") + output, err = utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output) + } + + By("Fetching Services") + cmd = exec.Command("kubectl", "get", "services", "-n", namespace) + output, err = utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Services:\n%s", output) + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + cmd := exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + podNames := utils.GetNonEmptyLines(podOutput) + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = podNames[0] + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + //nolint:gosec // Test code with validated pod name from test setup + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + // Delete existing binding if it exists (cleanup from previous runs) + //nolint:gosec // Test code with validated binding name from test setup + cleanupCmd := exec.Command("kubectl", "delete", "clusterrolebinding", metricsRoleBindingName, + "--ignore-not-found", + ) + _, _ = utils.Run(cleanupCmd) // Ignore errors - binding may not exist + + //nolint:gosec // Test code with validated namespace and service account from test setup + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + "--clusterrole=locust-k8s-operator-metrics-reader", + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), + ) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("waiting for the metrics endpoint to be ready") + verifyMetricsEndpointReady := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready") + } + Eventually(verifyMetricsEndpointReady).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + //nolint:gosec // Test code with validated pod name from test setup + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("Starting metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted).Should(Succeed()) + + By("creating the curl-metrics pod to access the metrics endpoint") + //nolint:gosec // Test code with validated namespace and service account from test setup + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + "--namespace", namespace, + "--image=curlimages/curl:latest", + "--overrides", + fmt.Sprintf(`{ + "spec": { + "containers": [{ + "name": "curl", + "image": "curlimages/curl:latest", + "command": ["/bin/sh", "-c"], + "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"], + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": ["ALL"] + }, + "runAsNonRoot": true, + "runAsUser": 1000, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }], + "serviceAccount": "%s" + } + }`, token, metricsServiceName, namespace, serviceAccountName)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", + "-o", "jsonpath={.status.phase}", + "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + metricsOutput := getMetricsOutput() + Expect(metricsOutput).To(ContainSubstring( + "controller_runtime_reconcile_total", + )) + + By("cleaning up test resources") + // Delete the curl-metrics pod + //nolint:gosec // Test code with validated namespace from test setup + cleanupCmd = exec.Command("kubectl", "delete", "pod", "curl-metrics", + "-n", namespace, + "--ignore-not-found", + ) + _, _ = utils.Run(cleanupCmd) + + // Delete the ClusterRoleBinding + //nolint:gosec // Test code with validated binding name from test setup + cleanupCmd = exec.Command("kubectl", "delete", "clusterrolebinding", metricsRoleBindingName, + "--ignore-not-found", + ) + _, _ = utils.Run(cleanupCmd) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput := getMetricsOutput() + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + //nolint:gosec // Not hardcoded credentials - this is a K8s API resource kind string + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + //nolint:gosec // Test code with validated namespace and service account from test setup + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + serviceAccountName, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal(output, &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() string { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + return metricsOutput +} + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} diff --git a/test/e2e/kind-config.yaml b/test/e2e/kind-config.yaml new file mode 100644 index 00000000..80a2901b --- /dev/null +++ b/test/e2e/kind-config.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: locust-webhook-test +nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" diff --git a/test/e2e/locusttest_e2e_test.go b/test/e2e/locusttest_e2e_test.go new file mode 100644 index 00000000..3aba129d --- /dev/null +++ b/test/e2e/locusttest_e2e_test.go @@ -0,0 +1,237 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +var _ = Describe("LocustTest", Ordered, func() { + const testNamespace = "locust-k8s-operator-system" + var testdataDir string + + BeforeAll(func() { + var err error + testdataDir, err = filepath.Abs("testdata") + Expect(err).NotTo(HaveOccurred()) + + By("applying test ConfigMaps") + _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml")) + Expect(err).NotTo(HaveOccurred(), "Failed to apply test ConfigMap") + + _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "env-configmap.yaml")) + Expect(err).NotTo(HaveOccurred(), "Failed to apply env ConfigMap") + }) + + AfterAll(func() { + By("cleaning up test ConfigMaps") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml")) + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "env-configmap.yaml")) + }) + + AfterEach(func() { + if CurrentSpecReport().Failed() { + By("Fetching LocustTest CRs on failure") + cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml") + output, _ := utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output) + + By("Fetching Jobs on failure") + cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "wide") + output, _ = utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output) + + By("Fetching Services on failure") + cmd = exec.Command("kubectl", "get", "services", "-n", testNamespace) + output, _ = utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "Services:\n%s", output) + } + }) + + Context("v2 API lifecycle", func() { + const crName = "e2e-test-basic" + + AfterAll(func() { + By("cleaning up basic LocustTest CR") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + // Wait for cleanup + Eventually(func() bool { + return !utils.ResourceExists("locusttest", testNamespace, crName) + }, 30*time.Second, time.Second).Should(BeTrue()) + }) + + It("should create master Service on CR creation", func() { + By("applying the basic LocustTest CR") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for master Service") + Eventually(func() bool { + return utils.ResourceExists("service", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should create master Job on CR creation", func() { + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should create worker Job on CR creation", func() { + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-worker") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should set owner references on created resources", func() { + owner, err := utils.GetOwnerReferenceName("job", testNamespace, crName+"-master") + Expect(err).NotTo(HaveOccurred()) + Expect(owner).To(Equal(crName)) + + owner, err = utils.GetOwnerReferenceName("job", testNamespace, crName+"-worker") + Expect(err).NotTo(HaveOccurred()) + Expect(owner).To(Equal(crName)) + + owner, err = utils.GetOwnerReferenceName("service", testNamespace, crName+"-master") + Expect(err).NotTo(HaveOccurred()) + Expect(owner).To(Equal(crName)) + }) + + It("should update status phase", func() { + Eventually(func() string { + phase, _ := utils.GetResourceField("locusttest", testNamespace, crName, ".status.phase") + return phase + }, 60*time.Second, time.Second).Should(Or(Equal("Pending"), Equal("Running"))) + }) + + It("should clean up resources on CR deletion", func() { + By("deleting the LocustTest CR") + _, err := utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("verifying Jobs are deleted") + Eventually(func() bool { + return !utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + + Eventually(func() bool { + return !utils.ResourceExists("job", testNamespace, crName+"-worker") + }, 60*time.Second, time.Second).Should(BeTrue()) + + By("verifying Service is deleted") + Eventually(func() bool { + return !utils.ResourceExists("service", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + }) + + Context("with environment injection", func() { + const crName = "e2e-test-env" + + AfterAll(func() { + By("cleaning up env LocustTest CR") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-env.yaml")) + Eventually(func() bool { + return !utils.ResourceExists("locusttest", testNamespace, crName) + }, 30*time.Second, time.Second).Should(BeTrue()) + }) + + It("should create resources with env configuration", func() { + By("applying LocustTest with env config") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-env.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for master Job") + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should inject ConfigMap env vars via envFrom", func() { + Eventually(func() string { + envFrom, _ := utils.GetJobEnvFrom(testNamespace, crName+"-master", crName+"-master") + return envFrom + }, 30*time.Second, time.Second).Should(ContainSubstring("e2e-env-configmap")) + }) + + It("should inject inline env variables", func() { + Eventually(func() string { + env, _ := utils.GetJobContainerEnv(testNamespace, crName+"-master", crName+"-master") + return env + }, 30*time.Second, time.Second).Should(ContainSubstring("E2E_TEST_VAR")) + }) + }) + + Context("with custom volumes", func() { + const crName = "e2e-test-volumes" + + AfterAll(func() { + By("cleaning up volumes LocustTest CR") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-volumes.yaml")) + Eventually(func() bool { + return !utils.ResourceExists("locusttest", testNamespace, crName) + }, 30*time.Second, time.Second).Should(BeTrue()) + }) + + It("should create resources with volume configuration", func() { + By("applying LocustTest with volumes") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-volumes.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for master Job") + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should mount volumes to master pod", func() { + Eventually(func() string { + volumes, _ := utils.GetJobVolumes(testNamespace, crName+"-master") + return volumes + }, 30*time.Second, time.Second).Should(ContainSubstring("test-data")) + + Eventually(func() string { + mounts, _ := utils.GetJobVolumeMounts(testNamespace, crName+"-master", crName+"-master") + return mounts + }, 30*time.Second, time.Second).Should(ContainSubstring("/data")) + }) + + It("should mount volumes to worker pods", func() { + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-worker") + }, 60*time.Second, time.Second).Should(BeTrue()) + + Eventually(func() string { + volumes, _ := utils.GetJobVolumes(testNamespace, crName+"-worker") + return volumes + }, 30*time.Second, time.Second).Should(ContainSubstring("test-data")) + + Eventually(func() string { + mounts, _ := utils.GetJobVolumeMounts(testNamespace, crName+"-worker", crName+"-worker") + return mounts + }, 30*time.Second, time.Second).Should(ContainSubstring("/data")) + }) + }) +}) diff --git a/test/e2e/otel_e2e_test.go b/test/e2e/otel_e2e_test.go new file mode 100644 index 00000000..7aae8658 --- /dev/null +++ b/test/e2e/otel_e2e_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +var _ = Describe("OpenTelemetry", Ordered, func() { + const testNamespace = "locust-k8s-operator-system" + const crName = "e2e-test-otel" + var testdataDir string + + BeforeAll(func() { + var err error + testdataDir, err = filepath.Abs("testdata") + Expect(err).NotTo(HaveOccurred()) + + By("ensuring test ConfigMap exists") + _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml")) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterAll(func() { + By("cleaning up OTel LocustTest CR") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-otel.yaml")) + Eventually(func() bool { + return !utils.ResourceExists("locusttest", testNamespace, crName) + }, 30*time.Second, time.Second).Should(BeTrue()) + }) + + AfterEach(func() { + if CurrentSpecReport().Failed() { + By("Fetching LocustTest CRs on failure") + cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml") + output, _ := utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output) + + By("Fetching Jobs on failure") + cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "yaml") + output, _ = utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output) + } + }) + + It("should create resources with OTel enabled", func() { + By("applying LocustTest with OTel config") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-otel.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for master Job") + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should add --otel flag when enabled", func() { + Eventually(func() string { + args, _ := utils.GetJobContainerArgs(testNamespace, crName+"-master", crName+"-master") + return args + }, 30*time.Second, time.Second).Should(ContainSubstring("--otel")) + }) + + It("should inject OTEL_* environment variables", func() { + Eventually(func() string { + env, _ := utils.GetJobContainerEnv(testNamespace, crName+"-master", crName+"-master") + return env + }, 30*time.Second, time.Second).Should(ContainSubstring("OTEL_EXPORTER_OTLP_ENDPOINT")) + }) + + It("should NOT deploy metrics sidecar when OTel enabled", func() { + Eventually(func() string { + containers, _ := utils.GetJobContainerNames(testNamespace, crName+"-master") + return containers + }, 30*time.Second, time.Second).ShouldNot(ContainSubstring("metrics-exporter")) + }) + + It("should have only one container (locust) in master pod", func() { + Eventually(func() string { + containers, _ := utils.GetJobContainerNames(testNamespace, crName+"-master") + return containers + }, 30*time.Second, time.Second).Should(Equal(crName + "-master")) + }) + + It("should exclude metrics port from Service when OTel enabled", func() { + Eventually(func() bool { + return utils.ResourceExists("service", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + + ports, err := utils.GetServicePorts(testNamespace, crName+"-master") + Expect(err).NotTo(HaveOccurred()) + Expect(ports).NotTo(ContainSubstring("metrics")) + }) +}) diff --git a/test/e2e/testdata/configmaps/env-configmap.yaml b/test/e2e/testdata/configmaps/env-configmap.yaml new file mode 100644 index 00000000..4db3f271 --- /dev/null +++ b/test/e2e/testdata/configmaps/env-configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: e2e-env-configmap +data: + TARGET_HOST: "http://example.com" + TEST_MODE: "e2e" diff --git a/test/e2e/testdata/configmaps/test-config.yaml b/test/e2e/testdata/configmaps/test-config.yaml new file mode 100644 index 00000000..e7e7b51e --- /dev/null +++ b/test/e2e/testdata/configmaps/test-config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: e2e-test-configmap +data: + locustfile.py: | + from locust import HttpUser, task + + class TestUser(HttpUser): + @task + def hello(self): + self.client.get("/") diff --git a/test/e2e/testdata/v1/locusttest-basic.yaml b/test/e2e/testdata/v1/locusttest-basic.yaml new file mode 100644 index 00000000..15db0f8d --- /dev/null +++ b/test/e2e/testdata/v1/locusttest-basic.yaml @@ -0,0 +1,10 @@ +apiVersion: locust.io/v1 +kind: LocustTest +metadata: + name: e2e-test-v1 +spec: + masterCommandSeed: "-f /lotest/src/locustfile.py" + workerCommandSeed: "-f /lotest/src/locustfile.py" + workerReplicas: 1 + image: locustio/locust:latest + configMap: e2e-test-configmap diff --git a/test/e2e/testdata/v2/locusttest-basic.yaml b/test/e2e/testdata/v2/locusttest-basic.yaml new file mode 100644 index 00000000..768c3f76 --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-basic.yaml @@ -0,0 +1,13 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-basic +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 2 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap diff --git a/test/e2e/testdata/v2/locusttest-invalid.yaml b/test/e2e/testdata/v2/locusttest-invalid.yaml new file mode 100644 index 00000000..3c40130d --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-invalid.yaml @@ -0,0 +1,13 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-invalid +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 0 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap diff --git a/test/e2e/testdata/v2/locusttest-with-env.yaml b/test/e2e/testdata/v2/locusttest-with-env.yaml new file mode 100644 index 00000000..a5c049bf --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-with-env.yaml @@ -0,0 +1,19 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-env +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 1 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap + env: + configMapRefs: + - name: e2e-env-configmap + variables: + - name: E2E_TEST_VAR + value: "test-value" diff --git a/test/e2e/testdata/v2/locusttest-with-otel.yaml b/test/e2e/testdata/v2/locusttest-with-otel.yaml new file mode 100644 index 00000000..e6ed73f2 --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-with-otel.yaml @@ -0,0 +1,17 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-otel +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 1 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap + observability: + openTelemetry: + enabled: true + endpoint: "http://otel-collector:4317" diff --git a/test/e2e/testdata/v2/locusttest-with-scheduling.yaml b/test/e2e/testdata/v2/locusttest-with-scheduling.yaml new file mode 100644 index 00000000..7fd7eb01 --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-with-scheduling.yaml @@ -0,0 +1,46 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-scheduling +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 2 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap + scheduling: + nodeSelector: + workload-type: performance-testing + tier: compute + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: performance-test-name + operator: In + values: + - e2e-test-scheduling + topologyKey: kubernetes.io/hostname + tolerations: + - key: "performance-testing" + operator: "Equal" + value: "true" + effect: "NoSchedule" + - key: "high-load" + operator: "Exists" + effect: "PreferNoSchedule" diff --git a/test/e2e/testdata/v2/locusttest-with-volumes.yaml b/test/e2e/testdata/v2/locusttest-with-volumes.yaml new file mode 100644 index 00000000..c832c25c --- /dev/null +++ b/test/e2e/testdata/v2/locusttest-with-volumes.yaml @@ -0,0 +1,20 @@ +apiVersion: locust.io/v2 +kind: LocustTest +metadata: + name: e2e-test-volumes +spec: + master: + command: "-f /lotest/src/locustfile.py" + worker: + replicas: 1 + command: "-f /lotest/src/locustfile.py" + image: locustio/locust:latest + testFiles: + configMapRef: e2e-test-configmap + volumes: + - name: test-data + emptyDir: {} + volumeMounts: + - name: test-data + mountPath: /data + target: both diff --git a/test/e2e/v1_compatibility_test.go b/test/e2e/v1_compatibility_test.go new file mode 100644 index 00000000..9d39ad40 --- /dev/null +++ b/test/e2e/v1_compatibility_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +var _ = Describe("v1 API Compatibility", Ordered, func() { + const testNamespace = "locust-k8s-operator-system" + const crName = "e2e-test-v1" + var testdataDir string + + BeforeAll(func() { + var err error + testdataDir, err = filepath.Abs("testdata") + Expect(err).NotTo(HaveOccurred()) + + By("ensuring test ConfigMap exists") + _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml")) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterAll(func() { + By("cleaning up v1 LocustTest CR") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v1", "locusttest-basic.yaml")) + Eventually(func() bool { + return !utils.ResourceExists("locusttest", testNamespace, crName) + }, 30*time.Second, time.Second).Should(BeTrue()) + }) + + AfterEach(func() { + if CurrentSpecReport().Failed() { + By("Fetching LocustTest CRs on failure") + cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml") + output, _ := utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output) + + By("Fetching Jobs on failure") + cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "wide") + output, _ = utils.Run(cmd) + _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output) + } + }) + + It("should accept v1 LocustTest CR", func() { + By("applying v1 LocustTest CR") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v1", "locusttest-basic.yaml")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should create resources from v1 CR", func() { + By("waiting for master Service") + Eventually(func() bool { + return utils.ResourceExists("service", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + + By("waiting for master Job") + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-master") + }, 60*time.Second, time.Second).Should(BeTrue()) + + By("waiting for worker Job") + Eventually(func() bool { + return utils.ResourceExists("job", testNamespace, crName+"-worker") + }, 60*time.Second, time.Second).Should(BeTrue()) + }) + + It("should allow reading v1 CR as v2", func() { + By("fetching v1 CR using v2 API version") + cmd := exec.Command("kubectl", "get", "locusttest.v2.locust.io", + crName, "-n", testNamespace, "-o", "yaml") + output, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(output).To(ContainSubstring("apiVersion: locust.io/v2")) + }) + + It("should have correct owner references", func() { + owner, err := utils.GetOwnerReferenceName("job", testNamespace, crName+"-master") + Expect(err).NotTo(HaveOccurred()) + Expect(owner).To(Equal(crName)) + }) +}) diff --git a/test/e2e/validation_e2e_test.go b/test/e2e/validation_e2e_test.go new file mode 100644 index 00000000..885117b6 --- /dev/null +++ b/test/e2e/validation_e2e_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils" +) + +var _ = Describe("Validation Webhook", Ordered, func() { + const testNamespace = "locust-k8s-operator-system" + var testdataDir string + + BeforeAll(func() { + var err error + testdataDir, err = filepath.Abs("testdata") + Expect(err).NotTo(HaveOccurred()) + + By("ensuring test ConfigMap exists") + _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml")) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterAll(func() { + By("cleaning up any leftover CRs") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-invalid.yaml")) + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + }) + + It("should reject CR with invalid workerReplicas (0)", func() { + By("applying invalid LocustTest CR with workerReplicas=0") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-invalid.yaml")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Or( + ContainSubstring("minimum"), + ContainSubstring("Invalid value"), + ContainSubstring("spec.worker.replicas"), + )) + }) + + It("should accept valid CR", func() { + By("applying valid LocustTest CR") + _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + Expect(err).NotTo(HaveOccurred()) + + By("verifying CR was created") + Eventually(func() bool { + return utils.ResourceExists("locusttest", testNamespace, "e2e-test-basic") + }, 30*time.Second, time.Second).Should(BeTrue()) + + By("cleaning up") + _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml")) + }) +}) diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 00000000..2adfd73a --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,406 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck +) + +const ( + prometheusOperatorVersion = "v0.77.1" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.16.3" + certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) + } + + return string(output), nil +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) //nolint:gosec // Test code with known safe prometheus URL + _, err := Run(cmd) + return err +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) //nolint:gosec // Test code with known safe prometheus URL + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed +// by verifying the existence of key CRDs related to Prometheus. +func IsPrometheusCRDsInstalled() bool { + // List of common Prometheus CRDs + prometheusCRDs := []string{ + "prometheuses.monitoring.coreos.com", + "prometheusrules.monitoring.coreos.com", + "prometheusagents.monitoring.coreos.com", + } + + cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") + output, err := Run(cmd) + if err != nil { + return false + } + crdList := GetNonEmptyLines(output) + for _, crd := range prometheusCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) //nolint:gosec // Test code with known safe cert-manager URL + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) //nolint:gosec // Test code with known safe cert-manager URL + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + if _, err := Run(cmd); err != nil { + return err + } + + // Wait for all cert-manager pods to be ready + cmd = exec.Command("kubectl", "wait", "pods", + "--all", + "--for", "condition=Ready", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + if _, err := Run(cmd); err != nil { + return err + } + + // Wait for cert-manager's own webhook to have its CA bundle injected + // This ensures the webhook is fully functional before we try to create Certificate resources + maxRetries := 60 // 60 retries * 2 seconds = 2 minutes + for i := 0; i < maxRetries; i++ { + cmd = exec.Command("kubectl", "-n", "cert-manager", "get", + "validatingwebhookconfigurations", "cert-manager-webhook", + "-o", "jsonpath={.webhooks[0].clientConfig.caBundle}", + ) + output, err := Run(cmd) + if err == nil && len(output) > 0 { + // CA bundle is present, webhook is ready + return nil + } + time.Sleep(2 * time.Second) + } + + return fmt.Errorf("timed out waiting for cert-manager webhook CA bundle to be injected") +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(output) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) //nolint:gosec // Test code with validated cluster name and image + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, fmt.Errorf("failed to get current working directory: %w", err) + } + wd = strings.ReplaceAll(wd, "/test/e2e", "") + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + content, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read file %q: %w", filename, err) + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %q to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err = out.WriteString("\n"); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + } + + if _, err = out.Write(content[idx+len(target):]); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + // false positive + // nolint:gosec + if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { + return fmt.Errorf("failed to write file %q: %w", filename, err) + } + + return nil +} + +// ApplyFromFile applies a Kubernetes resource from a YAML file +func ApplyFromFile(namespace, path string) (string, error) { + cmd := exec.Command("kubectl", "apply", "-f", path, "-n", namespace) + return Run(cmd) +} + +// DeleteFromFile deletes a Kubernetes resource from a YAML file +func DeleteFromFile(namespace, path string) (string, error) { + cmd := exec.Command("kubectl", "delete", "-f", path, "-n", namespace, "--ignore-not-found") + return Run(cmd) +} + +// WaitForResource waits for a resource to exist +func WaitForResource(resourceType, namespace, name string, timeout string) error { + cmd := exec.Command("kubectl", "wait", resourceType, name, + "-n", namespace, + "--for=create", + "--timeout", timeout) + _, err := Run(cmd) + return err +} + +// ResourceExists checks if a resource exists +func ResourceExists(resourceType, namespace, name string) bool { + cmd := exec.Command("kubectl", "get", resourceType, name, "-n", namespace) + _, err := Run(cmd) + return err == nil +} + +// GetResourceField retrieves a field from a resource using jsonpath +func GetResourceField(resourceType, namespace, name, jsonpath string) (string, error) { + //nolint:gosec // Test code with validated kubectl parameters + cmd := exec.Command("kubectl", "get", resourceType, name, + "-n", namespace, "-o", fmt.Sprintf("jsonpath={%s}", jsonpath)) + output, err := Run(cmd) + if err != nil { + return "", err + } + return strings.TrimSpace(output), nil +} + +// GetOwnerReferenceName retrieves the owner reference name from a resource +func GetOwnerReferenceName(resourceType, namespace, name string) (string, error) { + return GetResourceField(resourceType, namespace, name, ".metadata.ownerReferences[0].name") +} + +// GetJobContainerEnv retrieves environment variables from a Job's container +func GetJobContainerEnv(namespace, jobName, containerName string) (string, error) { + jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].env[*].name", containerName) + return GetResourceField("job", namespace, jobName, jsonpath) +} + +// GetJobContainerCommand retrieves the command from a Job's container +func GetJobContainerCommand(namespace, jobName, containerName string) (string, error) { + jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].command", containerName) + return GetResourceField("job", namespace, jobName, jsonpath) +} + +// GetJobContainerArgs retrieves the args from a Job's container +func GetJobContainerArgs(namespace, jobName, containerName string) (string, error) { + jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].args", containerName) + return GetResourceField("job", namespace, jobName, jsonpath) +} + +// GetJobContainerNames retrieves all container names from a Job +func GetJobContainerNames(namespace, jobName string) (string, error) { + return GetResourceField("job", namespace, jobName, ".spec.template.spec.containers[*].name") +} + +// GetServicePorts retrieves port names from a Service +func GetServicePorts(namespace, serviceName string) (string, error) { + return GetResourceField("service", namespace, serviceName, ".spec.ports[*].name") +} + +// GetJobEnvFrom retrieves envFrom configuration from a Job's container +func GetJobEnvFrom(namespace, jobName, containerName string) (string, error) { + jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].envFrom", containerName) + return GetResourceField("job", namespace, jobName, jsonpath) +} + +// GetJobVolumes retrieves volume names from a Job +func GetJobVolumes(namespace, jobName string) (string, error) { + return GetResourceField("job", namespace, jobName, ".spec.template.spec.volumes[*].name") +} + +// GetJobVolumeMounts retrieves volume mount paths from a Job's container +func GetJobVolumeMounts(namespace, jobName, containerName string) (string, error) { + jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].volumeMounts[*].mountPath", containerName) + return GetResourceField("job", namespace, jobName, jsonpath) +} + +// WaitForControllerReady waits for the controller-manager deployment to be ready +func WaitForControllerReady(namespace string, timeout string) error { + _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for controller-manager deployment to be ready...\n") + cmd := exec.Command("kubectl", "wait", "deployment", + "-l", "control-plane=controller-manager", + "-n", namespace, + "--for=condition=Available", + "--timeout", timeout) + _, err := Run(cmd) + return err +} + +// WaitForWebhookReady waits for the webhook service endpoint to be ready +func WaitForWebhookReady(namespace, serviceName string, timeout string) error { + _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for webhook service endpoint to be ready...\n") + cmd := exec.Command("kubectl", "wait", "endpoints", serviceName, + "-n", namespace, + "--for=jsonpath={.subsets[0].addresses[0].ip}", + "--timeout", timeout) + _, err := Run(cmd) + return err +} + +// WaitForCertificateReady waits for the serving certificate to be ready +func WaitForCertificateReady(namespace, certName string, timeout string) error { + _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for certificate %s to be ready...\n", certName) + cmd := exec.Command("kubectl", "wait", "certificate", certName, + "-n", namespace, + "--for=condition=Ready", + "--timeout", timeout) + _, err := Run(cmd) + return err +}